content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_min_max(data_column):
"""Function that gets min and max values from a column
Args:
data_column (list): list of data in the column
Returns:
min_value (float): min value in the column
max_value (float): max value in the column
"""
min_index = data_column.idxmin()
max_index = data_column.idxmax()
min_value = data_column[min_index]
max_value = data_column[max_index]
return min_value, max_value | ed4c947c7168aaadf88a4c31595984e3b1f3fa7d | 38,659 |
def folder_name(unit_name):
"""
Extract folder name from full unit name
Example:
Unit 01 - Example unit
returns:
Unit 01
"""
return unit_name.strip() | dda9c5806e147119c2bb4d803790777703b3ec38 | 38,660 |
def find_property(n_block, property_type):
"""Find a property."""
if hasattr(n_block, "properties"):
for prop in n_block.properties:
if isinstance(prop, property_type):
return prop
if hasattr(n_block, "bs_properties"):
for prop in n_block.bs_properties:
if isinstance(prop, property_type):
return prop
return None | 4ba57d4734eaa3e3a24b679739b122b500b49580 | 38,661 |
def blocks_to_string(blocks):
"""Convert a list of unencrypted 64-bit blocks to a string"""
return ''.join([bytearray([block >> (8 * (7 - i)) & 0xFF
for i in range(8)]).decode('utf-8')
for block in blocks]) | 23cb664e22271ba9e9893d31de2b0e12e20df23b | 38,662 |
def format_header(sample, reference, accession, length):
"""Return a newly formatted header."""
title = f'Pseudo-seq with called substitutions and low coverage masked'
return f'>gnl|{accession}|{sample} {title} [assembly_accession={reference}] [length={length}]' | 129c0d2c560aa04478cc1943cf7ee39d2e6cda6d | 38,663 |
import os
def get_qrels(qrel_file='test_collection/qrels-all.txt'):
"""
Return qrels in the form qiddoc -> relevance, where qiddoc is qid concat with doc.
"""
qrels = {}
if os.path.exists(qrel_file):
with open(qrel_file) as fh:
for line in fh:
qid, zero, doc, relevance = line.strip().split()
qrels[qid+doc] = relevance
return qrels | 1518b1e235e16b04f9f4a2c78ad5737ee03eddc1 | 38,664 |
import sys
def str_to_bool(s):
"""
This function converts a string into a boolean value
Args:
s [str]: string representing tre value
Returns:
b [bool]: boolean value of string representation
"""
if s.lower() in ["true", "yes", "y", "t", "1"]:
b = True
elif s.lower() in ["false", "no", "f", "n", "0"]:
b = False
else:
print("boolean string not correctly specified")
sys.exit(1)
return b | 58ed36b320be1df3dcededabb63a423444d26860 | 38,665 |
def _mock_post_target(request, dynamic_value=None):
"""Return true if the validated data is correct."""
if request.validated_data == {"testkey": "testvalue"}:
valid = True
else:
valid = False
return valid | e5f2de5e1309df65fcd99f607971617b1950aa31 | 38,666 |
def checkanswer(response, correct_code, json):
"""Takes API response and checks if the response is correct.
Appends value 'API_status' to answer indicating if response correct or not
:param response: requests; response returned by API calls
:param correct_code: int, result code for no errors
:return dict; coverted json of answer (if correct) and 'API_status'
"""
if response.status_code != correct_code:
return {'API_status': False, 'error_code': response.status_code}
elif json:
return {'response': response.json(), 'API_status': True}
else:
return {'API_status': True} | 335f208c25bbd41bdb0ef9a8967f3805679d616c | 38,667 |
import os
def get_db_path(relpath):
"""
Need 4 /'s to specify absolute path for sqlalchemy
e.g. sqlite:////fidash/databases/TickerScrape.db
Need 3 /'s for relative paths
relpath has 3 /'s
"""
package_dir = os.path.abspath(os.path.dirname(__file__))
db_dir = os.path.join(package_dir, relpath)
uri = ''.join(['sqlite:///', db_dir])
print (uri)
return uri | fb430a1dce4a79a29ba29289c8fa73153d3543a5 | 38,668 |
def HallucinateNegatives(pos_list):
"""
Reads a list of positive examples and returns a list of negative examples
based on the provided content.
@method HallucinateNegatives
@param {list} pos_list list of positive examples
@return {list} neg_list list of negative examples
Example:
>>> HallucinateNegatives(['a("1","2").', 'a("3","4").'])
['a("1","4").', 'a("3","2").']
"""
def parse(predicate_string):
"""
Source:
https://github.com/hayesall/Mode-Inference/blob/master/inferModes.py
License:
BSD 2-Clause License
Copyright (c) 2018 Alexander L. Hayes
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Input a string of the format:
'father(harrypotter,jamespotter).'
Returns a list where [0] is the name of the literal and [1] is the
list of variables in the rule.
['father', ['harrypotter', 'jamespotter']]
"""
predicate_list = predicate_string.replace(' ','').split(')', 1)[0].split('(')
predicate_list[1] = predicate_list[1].split(',')
return predicate_list
# Create some structures which we will use to infer what is false.
true_examples = {}
all_authors = []
all_stories = []
for example in pos_list:
pred_list = parse(example)
author = pred_list[1][0]
story = pred_list[1][1]
# Update the structures.
all_authors.append(author)
all_stories.append(story)
true_examples[tuple([author, story])] = True
# Iterate over all authors and stories. If an author did not write a story,
# the predicate is false.
neg_list = []
for author in all_authors:
for story in all_stories:
if not true_examples.get(tuple([author, story])):
neg_list.append('liked(' + author + ',' + story + ').')
# The length of the false_examples will be massive. On a set I was experi-
# menting with, 423 positives resulted in 177,874 negatives.
return neg_list | d74ccedc410746429eb69ea4e35795eaa5ce8d5f | 38,669 |
import math
def get_prime_number(num):
"""Summary line.
エラトステネスの篩を使い、素数を列挙する
Args:
num (int): 素因数分解を行う数値
Returns:
list: numまでの素数リスト
"""
sequence_list = [i for i in range(2, num + 1)]
# 2から入力値までの数列のリスト
prime_list = []
# 素数のリスト
while True:
prime = min(sequence_list)
if float(prime) > math.sqrt(num):
# 入力値の平方根以上は全てリストに加えて終了
prime_list.extend(sequence_list)
break
else:
prime_list.append(prime)
i = 0
while i < len(sequence_list):
if sequence_list[i] % prime == 0:
# 素数の倍数をリストから削除
sequence_list.pop(i)
continue
i += 1
return prime_list | 696cae5e41b052adfc1af6746ca78fac1dab0aa2 | 38,670 |
import re
def get_boundingbox(s) -> dict:
"""
Using the input string (for bbox), parse out the x1, y1, x2, y2 coordinates (i.e. BoundingBox)
and return a dictionary containing the left/top/right/bottom values.
The response dictionary defaults the left/top/right/bottom to 0.
"""
bb_rgx = r'''(?<=bbox )([0-9]{0,4}) ([0-9]{0,4}) ([0-9]{0,4}) ([0-9]{0,4})'''
bb = {
"left":0,
"top":0,
"right":0,
"bottom":0
}
match = re.search(bb_rgx, s.get('title'))
if match:
bb["left"] = int(match.group(1))
bb["top"] = int(match.group(2))
bb["right"] = int(match.group(3))
bb["bottom"] = int(match.group(4))
return bb | 820559484a9f9c3af409d512318ca9370ef8e042 | 38,671 |
def searchsorted(arr, N, x):
"""N is length of arr
"""
L = 0
R = N-1
done = False
m = (L+R)//2
while not done:
if arr[m] < x:
L = m + 1
elif arr[m] > x:
R = m - 1
elif arr[m] == x:
done = True
m = (L+R)//2
if L>R:
done = True
return L | 3648c0c77bb94b0946ca5a500b5d820b8f5198fd | 38,673 |
def get_column(su, i):
""" get the ith column of the sudoku """
return [su[j][i] for j in range(len(su))] | ff35098f14bed4cef938a4a4e6af3d60fb4e7be3 | 38,674 |
from typing import Any
def is_integer(thing: Any) -> bool:
"""
A function that returns whether the given value is an integer. Note that this will return
``False`` for the value, ``True``, which is different from normal Python.
:param thing: the value to check.
:return: ``True`` if the value is an integer.
"""
return isinstance(thing, int) and not isinstance(thing, bool) | f8707dabf95286e03a37240f27517f45f7abd1b2 | 38,675 |
import functools
def as_root(func):
"""Task will run from root, sets to self.user."""
@functools.wraps(func)
async def wrapper(self, *args, **kwargs):
with self._set_user('root'):
return await func(self, *args, **kwargs)
return wrapper | a81fc2973540e1e77df73c1080fa17889fd3b44e | 38,676 |
import os
import argparse
def existent(path):
"""
Check if a path exists
:param path: Path to check
:return: Existent path as a string
"""
if not os.path.exists(path):
raise argparse.ArgumentTypeError(f"{path} does not exist")
return path | 45b6daba270715358cde170cc4e664e1d24c6244 | 38,677 |
def suffix_array_to_suffix_tree(sa, lcp, text):
"""
Build suffix tree of the string text given its suffix array suffix_array
and LCP array lcp_array. Return the tree as a mapping from a node ID
to the list of all outgoing edges of the corresponding node. The edges in
the list must be sorted in the ascending order by the first character of
the edge label. Root must have node ID = 0, and all other node IDs must
be different nonnegative integers. Each edge must be represented by a
tuple (node, start, end), where
* node is the node ID of the ending node of the edge
* start is the starting position (0-based) of the substring of text
corresponding to the edge label
* end is the first position (0-based) after the end of the substring
corresponding to the edge label
For example, if text = "ACACAA$", an edge with label "$" from root to a
node with ID 1 must be represented by a tuple (1, 6, 7). This edge must
be present in the list tree[0] (corresponding to the root node), and it
should be the first edge in the list (because it has the smallest first
character of all edges outgoing from the root).
"""
tree = {}
# Implement this function yourself
return tree | 036fa9ba6729d3fe04a875b41ae1928be622a955 | 38,678 |
def compute_errors(model, s, im):
"""
Computes errors between each sentence and caption
"""
return model['f_err'](s, im) | 23096ef0f8aaf970c7c102e473d152e1887cca04 | 38,680 |
def multiply_operator(word: str, factor: int = 1):
"""
Multiply the string 'word' for the 'factor' value
:param word: string to be multiplied
:param factor: it's default value is 1
:return: multiplied string
"""
return word * factor | 505221d2003b8aa1db41abba205d9211f4462aef | 38,682 |
from typing import Any
def is_greater_than(value: Any, *, lower_bound: Any = 0) -> bool:
"""Checks whether the value is greater than the lower_bound
:param value: The value to check if is greater than
:param lower_bound: The lower bound
:return: Whether the value is greater than lower_bound
"""
return value > lower_bound | 5b78579bec610cb83ee01cc0c625b056604e6510 | 38,684 |
def place_crop(crop, image, center_x, center_y):
"""Place the crop in the image at the specified location."""
im_height, im_width = image.shape[:2]
crop_height, crop_width = crop.shape[:2]
left = center_x - crop_width // 2
right = left + crop_width
top = center_y - crop_height // 2
bottom = top + crop_height
adjusted_crop = crop # remove regions of crop that go beyond image bounds
if left < 0:
adjusted_crop = adjusted_crop[:, -left:]
if right > im_width:
adjusted_crop = adjusted_crop[:, :(im_width - right)]
if top < 0:
adjusted_crop = adjusted_crop[-top:]
if bottom > im_height:
adjusted_crop = adjusted_crop[:(im_height - bottom)]
crop_mask = (adjusted_crop > 0).astype(crop.dtype).sum(-1, keepdims=True)
image[max(0, top):min(im_height, bottom), max(0, left):min(im_width, right)] *= (1 - crop_mask)
image[max(0, top):min(im_height, bottom), max(0, left):min(im_width, right)] += adjusted_crop
return image | 7011b18bde8106cf0cf9b78f8b48dbe3bf574478 | 38,687 |
def is_hdfs_path(path):
"""
Check if a given path is HDFS uri
Args:
path (str): input path
Returns:
bool: True if input is a HDFS path, False otherwise
>>>is_hdfs_path("/tdk")
False
>>>is_hdfs_path("hdfs://aa:123/bb/cc")
True
"""
return path.startswith("hdfs://") | 62cc9286f91fbad848541275d79d250bf62b4c99 | 38,690 |
import pickle
def separate_dataset(path_list):
"""
Read attack data from a list of paths
"""
cls_dict = {i:[] for i in range(10)}
for path in path_list:
with open(path, 'rb') as f:
data = pickle.load(f)
for p in data:
label, logits, in_out = p
cls_dict[label].append((logits, in_out))
return cls_dict | aad16913ebb7e6b39b3c1cac7d4694a7c120def7 | 38,692 |
def _check_bal_args(cash, bank, reason) -> bool:
"""Checks types and content of arguments for edit and set balance methods"""
if cash is None and bank is None:
raise ValueError('An amount or "Infinity" must be specified for either cash or bank')
for arg in (d := {'cash': cash, 'bank': bank}):
value = d[arg]
if value is None:
continue
else:
if (t := type(value)) not in [int, str]:
raise TypeError(f"{arg} can only be int or str but was {t}")
elif t is str and value not in ['Infinity', '-Infinity']:
raise ValueError(f'When {arg} is a String "Infinity" is expected but "{value}" was received')
if (t := type(reason)) is not str:
raise TypeError(f'Reason can only be str but was "{t}"')
return True | 64f0f99485902a6d1f7308722067659f81b3562f | 38,693 |
def _inverse_move(move):
"""Invert a move"""
if '\'' in move:
return move.strip('\'')
return move+'\'' | 963277482644f1e7e96a5483cc5fb3f5f90c4088 | 38,694 |
def err_func(p,x,y,yerr,function):
"""
Difference between data and a model
"""
#print "p is",type(p),"of length",len(p)
#print "x is",type(x),"of length",len(x)
#print "y is",type(y),"of length",len(y)
fit = function(x,p)
#print "fit is",type(fit),"of length",len(fit)
return (y - function(x,p))/yerr**2 | 462748d5ddbd91c81e5b24ff8da976ea1e1d0409 | 38,695 |
def say(number):
"""Returns a numbers 'say' equivalent."""
numbers = []
spans = []
i = 0
repeats = 1
while i < len(number) - 1:
if number[i] == number[i + 1]:
repeats += 1
else:
numbers.append(number[i])
spans.append(str(repeats))
repeats = 1
i += 1
if number[-1] != number[-2]:
numbers.append(number[-1])
spans.append('1')
said_list = list(zip(spans, numbers))
said = ''
for pair in said_list:
said += pair[0]
said += pair[1]
return said | 7664ca5dff35efd50f291a42e4b9428240daa036 | 38,696 |
def dotProduct(vector1, vector2):
"""Caclulate and return dot product of vectors.
Calculate and return the dot product (inner product) of the two vectors
(Python dict type).
:param vector1: vector 1
:type vector1: dict
:param vector2: vector 2
:type vector2: dict
:returns: dot product
>>> dotProduct({1:1,2:2,3:3,4:4}, {2:2,3:3})
13
>>> dotProduct({1:1,2:2,3:3,4:4}, {2:2.0,3:3.0})
13.0
"""
# order doesn't affect result - just need to compare the two
# should be faster to iterate over shorter then search in longer dict
a, b = sorted([vector1, vector2], key=len)
return sum([v * b.get(k, 0) for k, v in a.iteritems()]) | c1e136f308ce6743bd90b42af87b09ef569eaffc | 38,697 |
import random
def _gsa_update_velocity(velocity, acceleration):
"""Stochastically update velocity given acceleration.
In GSA paper, velocity is v_i, acceleration is a_i
"""
# The GSA algorithm specifies that the new velocity for each dimension
# is a sum of a random fraction of its current velocity in that dimension,
# and its acceleration in that dimension
# For this reason we sum the dimensions individually instead of simply
# using vec_a+vec_b
new_velocity = []
for vel, acc in zip(velocity, acceleration):
new_velocity.append(random.uniform(0.0, 1.0) * vel + acc)
return new_velocity | 14110c14a54450d0ea8f42d47c90bf56d0b8d3f7 | 38,698 |
import copy
def agent_to_json(agent, trace_aware=False):
"""
Converts agent to JSON string
:param agent: the agent
:param trace_aware: include traces
:return: json string with x, y, direction, speed, active
"""
x, y = agent.pos
agent_json = {
"x": x,
"y": y,
"direction": str(agent.direction),
"speed": agent.speed,
"active": agent.active
}
if trace_aware:
agent_json["trace"] = copy.deepcopy(agent.trace)
return agent_json | 27cb0e8ff5471a89d408d586e9893b73006db985 | 38,699 |
def _parseAndSum(line, numCommas, toggle):
"""Given a line from an excel csv file, add all the numbers
after comma1 before numCommas+1.
Example: if numCommas = 30, then summate all numbers between comma 1 and comma 31
If activ=True, than
Return summation"""
Sum = 0
track = 0
while track < numCommas:
comma1 = line.find(",")
comma2 = line.find(",", comma1+1)
Sum += float(line[comma1+1:comma2])
line = line[comma2:]
track += 1
if toggle=="Occ":
Sum = 0
track = 0
while track < numCommas:
comma1 = line.find(",")
comma2 = line.find(",", comma1+1)
Sum += float(line[comma1+1:comma2])
line = line[comma2:]
track += 1
return Sum | ca1eaee0bc1c489823f99ce3e7d5c60239caacc3 | 38,701 |
def extract_cands(mystr,candd):
""" extract candidate names from _-separated string, increment dict entries
"""
for c in mystr.split("_"):
if c in candd.keys():
candd[c] += 1
else:
candd[c] = 1
return candd | 1add9487d8939c3fa24cf2568c4916ad72c4381a | 38,702 |
def clean_postcode(postcode):
"""This function takes an string and returns a string of 5 digit postcode in the boston_massachusetts.osm"""
# delete -XXXX after the five digit postcode
if "-" in postcode:
return postcode.split("-")[0]
# delete MA in the postcodes
elif "MA" in postcode:
new_postcode = postcode.replace("MA ", "")
if len(new_postcode) == 5:
return new_postcode
else:
return "00000"
# return "00000" for postcodes that are less than 5 digits
elif len(postcode) < 5:
return "00000"
# return "00000" for postcodes that are outside the area
elif postcode == "01125" or postcode == "20052" or postcode == "01238" or postcode == "01240" or postcode == "01250":
return "00000"
else:
return postcode | 7eb23eef09c3e3cba0ba839ac63bb40f28f45860 | 38,703 |
def try_get(src, getter, expected_type=None):
"""Getter for Object with type checking.
Args:
src (object): Object for getter.
getter (lambda): Lambda expression for getting item from Object.
expected_type (type, optional): Expected type from the getter. Defaults to None.
Returns:
expected_type: Value of getter for Object.
"""
if not isinstance(getter, (list, tuple)):
getter = [getter]
for get in getter:
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v | 7d54542d70df933ecaf40c3bd8df58a71fa0f5b1 | 38,704 |
def is_pure_list(arg):
""" is it a list or tuple?
"""
return isinstance(arg, (list, tuple)) | 810d465287ed15972504c6183db5be343c639ec6 | 38,706 |
def GetArithmeticMembers(code, operator):
"""Get members and operators used in equation."""
op_list = []
arg_list = []
for i in code:
op = " ".join(i.split()[:-1])
arg = i.split()[-1]
if op in operator.keys():
arg_list.append(arg)
op_list.append(operator[op])
return op_list, arg_list | 31ea7fdb50290c3cb292232f6b064c919d5b61ba | 38,708 |
def perimeter(n):
"""."""
p = 1
a = 0
b = 1
for i in range(n):
p += a + b
c = a
a = b
b += c
return p * 4 | 42db3e48d3623b9eaf6fa9398d9695f9bb922fd4 | 38,709 |
def to_sentiment(rating) -> int:
"""
:param rating: rating of the review
:return: sentiment of the review
"""
rating = int(rating)
if rating <= 2:
return 0
elif rating == 3:
return 1
else:
return 2 | a0d5e1d35979a0984308f2571006331e0932ac78 | 38,710 |
import struct
import socket
import binascii
def compact(ip, port, ascii=False):
"""
Compact IP address and port.
>>> compact('127.0.0.1', 6667, ascii=True)
'7f0000011a0b'
>>> compact('127.0.0.1', 6667) == '7f0000011a0b'.decode('hex')
True
"""
compacted = struct.pack('!4sH', socket.inet_aton(ip), port)
return binascii.hexlify(compacted) if ascii else compacted | 51aa9d2ece55fce558855763e7c4a965d4d800cb | 38,712 |
def return_occurrences(pattern, bwt, starts, occ_counts_before):
"""
Find occurrences of a string pattern in a text,
given only Burrows-Wheeler Transform, bwt, of the text, and additional
information we get from the preprocessing stage - starts and occ_counts_before.
"""
dim = len(bwt)
top = 0
bottom = dim - 1
i = len(pattern) - 1
while top <= bottom:
if i >= 0:
# print(f"pattern = {pattern[:i+1]}")
symbol = pattern[i]
# print(f"\tpattern = {pattern[:i]}, symbol = {symbol}")
top = starts[symbol] + occ_counts_before[symbol][top]
bottom = starts[symbol] + occ_counts_before[symbol][bottom + 1] - 1
# print(f"top = {top}, bottom = {bottom}")
else:
# print(f"--> EMPTY. bottom = {bottom}, top = {top}\n")
return top, bottom + 1
i -= 1
# print(f"--> NO OCCURRENCE of pattern '{pattern}'.\n")
return None | 4a529e0b49e91fb0714846ae12d38a6a193aade1 | 38,715 |
def get_number_of_packages_maintained_by_maintainers(pypi_profiles):
"""Retrieve number of PyPI packages maintained by each maintainer"""
num_packages = []
# Loop thru beautiful-souped maintainer list
for soup in pypi_profiles["maintainers_data"]:
package_count_elements = soup.findAll("div", {"class": "left-layout__main"})
# Extract count of number of projects maintained for each profile
for element in package_count_elements:
num_package_element = element.find("h2")
# Remove whitespace
num_package_element_stripped = num_package_element.contents[0].strip()
# Take only number from the number of packages, drop "packages" units
num_package = num_package_element_stripped.split(" ")[0]
num_packages.append(num_package)
return num_packages | e669ba16626051af64e8740580958b82ac5156c2 | 38,716 |
import numpy
import math
def angle_between(dest, base=(1,0,0)):
"""Returns the angle in positive degrees from base to dest in the
xy-plane.
dest: A vector
base: A vector
Returns:
Angle in degrees [0, 360)
"""
target = dest[0], dest[1]
p_axis = -base[1], base[0]
b_axis = base[0], base[1]
x_proj = numpy.dot(target, b_axis)
y_proj = numpy.dot(target, p_axis)
result = math.degrees(math.atan2(y_proj, x_proj))
return (result + 360) % 360 | 42d3bac7a039d7700004be0c8bf2edea208d8a06 | 38,717 |
def vectorize(data):
"""
data needs to be a row
"""
# print(data,type(data))
vector=[]
#sip
temp=[int(ip) for ip in data['sip'].split('.')]
for e in temp:
vector.append(e)
#sport
vector.append(int(data['sport']))
#dip
temp=[int(ip) for ip in data['dip'].split('.')]
for e in temp:
vector.append(e)
#dport
vector.append(int(data['dport']))
#byte_count
vector.append(int(data['byte_count']))
#tos
vector.append(int(data['tos']))
#proto
vector.append(0) if data['proto']=='TCP' else vector.append(1)
#duration
vector.append(float(data['duration']))
#totalbytes
vector.append(int(data['totalbytes']))
labels = []
labels.append(data['sip'])
labels.append(data['dip'])
labels.append(data['ts'])
return vector,int(data['label']), labels | e42fe0311d751a9b70ec835a4971926335bcd531 | 38,719 |
import re
def remove_tags_and_content(s, tag):
"""Removes all of the specified tags from the string including their children.
Greedily finds an opening and closing of specified tag and removes all content
between the two.
**Note**: Not intended to remove multiple sibling nodes with content in between.
Args:
s (:obj:`str`): The HTML to parse.
tag (:obj:`str`): The tag to be removed.
Returns:
:obj:`str`: A string with all of the specified tags and their content removed.
"""
return re.sub(rf'<\s*{tag}.*?>(.|\r|\n)*<\s*/\s*{tag}\s*>', '', s) | 9bfecaa082e9bea406b75bde46edbe32b1f4149a | 38,720 |
def get_A_door_180_f_i():
"""階層fにおける単位住戸iの主開口方位から時計回りに180°の方向に面した玄関ドア面積…………式(13c)
Args:
Returns:
float: 階層fにおける単位住戸iの主開口方位から時計回りに180°の方向に面した玄関ドア面積(m2)
"""
return 1.6 | c38d348ebfb216b05e1402823a94806d34712ea6 | 38,721 |
def snake_to_camel(text: str) -> str:
""" Convert snake_case to CamelCase """
data = [
i.capitalize()
for i in text.split("_")
]
return "".join(data) | fbfd0a3de9f659559a1f3d2b7ccedc5c8bcc173f | 38,722 |
def calc_f1(precision, recall):
"""
Compute F1 metric from the score dictionary
inputs:
precision float with the precision value
recall float with the recall value
output:
f1 float with the F1 value
"""
f1 = (2 * precision * recall) / (precision + recall)
return f1 | 7dd261deb9d5325c05986b9498b80d01066b69b1 | 38,723 |
def check_intents(test_dict, observed_intents):
"""
Check intents
"""
total_errors = 0
if (
test_dict.get("predicted_intent")
and test_dict["predicted_intent"] not in observed_intents
):
total_errors += 1
return total_errors | 4211dd5abd96e489375a5688a6cd1cb030614a3c | 38,724 |
import time
def program_timer(func):
"""Print the runtime of the decorated function"""
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter() # 1
value = func(*args, **kwargs)
end_time = time.perf_counter() # 2
run_time = end_time - start_time # 3
# convert duration to hours, minuts, seconds
minutes, seconds = divmod(run_time, 60)
hours, minutes = divmod(minutes, 60)
if hours > 0:
print(
"{}{} HOURS {} MINUTES {:6.3f} SECONDS".format(
"TOTAL RUN TIME: ", hours, minutes, seconds
)
)
elif minutes > 0:
print(
"{}{} MINUTES {:6.3f} SECONDS".format(
"TOTAL RUN TIME: ", minutes, seconds
)
)
else:
print("{}{:6.3f} SECONDS".format("TOTAL RUN TIME: ", seconds))
return value
return wrapper_timer | 2832963859fea68c2e7120f3777be952e54c33f8 | 38,725 |
import binascii
def mac_str_to_bytes(mac):
"""Converts string representation of a MAC address to bytes"""
if isinstance(mac, bytes):
return mac
if not isinstance(mac, str):
raise TypeError('MAC address given must be a string')
mac = mac.replace(':', '').replace('-', '').replace('.', '')
return binascii.unhexlify(mac) | ab6952595dcf193908b529f29edbcba540aa4310 | 38,726 |
import json
def read_config(filename: str) -> dict:
"""
Reads a JSON config file into a dict
"""
with open(filename) as file:
config = json.load(file)
return config | 9c4ed49e0a70e568effaec1315a6aeefdad987f2 | 38,727 |
def div(a: int, b: int) -> float:
"""
Division, mind the zero!
>>> div(10, 2)
5.0
:param a: An integer
:param b: Another integer
:return: the result of dividing a and b
:raises:
ZeroDivisionError: if parameter b is 0
"""
return a / b | 9c1af2374b7b8f5c8b9d6bcfcc5e25c3099e2634 | 38,728 |
def make_list(iterable):
"""
Makes a list from given ``iterable``. But won't create new one if
``iterable`` is a :py:func:`list` or :py:func:`tuple` itself.
:param Iterable iterable: Some iterable entity we need to convert into
:py:func:`list`.
"""
if isinstance(iterable, (list, tuple)):
return iterable
return list(iterable) | 37116d2716e59ce57b45aadbc05b7c2bc5dddcd2 | 38,729 |
def get_filter_count(layer_idx, filters_root):
"""
At each downsampling step we double the number
of feature channels.
"""
return 2 ** layer_idx * filters_root | 0d712d6fd2441297f6ab0730d4b9235f9778022d | 38,730 |
def _GenerateUserPayload(users):
"""Generate the user payload data for all users.
I could just pass through all the user's properties here, but that
would expose the private key we have in the datastore along with
various other user data, so I'm explicitly limiting what we show to
an email and key for modifying values.
Args:
users: A list of users with associated properties from the datastore.
Returns:
user_token_payloads: A dictionary with user key id as key and email
as a value.
"""
user_token_payloads = {}
for user in users:
user_token_payloads[user.key.urlsafe()] = user.email
return user_token_payloads | b193991ca67879f0eab683882b50f337bbb84942 | 38,732 |
def add_traffic_column(df):
"""Add a TRAFFIC column that is the sum of the Entries and Exits for a station
Args:
df (pandas.DataFrame): The original pandas dataframe
Returns:
df (pandas.DataFrame): The pandas dataframe with the TRAFFIC column and TIMEFRAME_ENTRIES
and TIMEFRAME_EXITS columns removed
"""
df = df[(df['TIMEFRAME_ENTRIES'] >= 0) &
(df['TIMEFRAME_ENTRIES'] <= 5000)]
df = df[(df['TIMEFRAME_EXITS'] >= 0) &
(df['TIMEFRAME_EXITS'] <= 5000)]
df['TRAFFIC'] = df['TIMEFRAME_ENTRIES'] + df['TIMEFRAME_EXITS']
df = df.drop('TIMEFRAME_ENTRIES', 1)
df = df.drop('TIMEFRAME_EXITS', 1)
return df | b916144b26e9985554009ab7e3cd446d09ff6b52 | 38,733 |
import time
def get_runtime_s(job):
"""Returns job runtime in milliseconds."""
scrapystats = job.metadata.get("scrapystats")
finished_time = job.metadata.get("finished_time")
start_time = scrapystats.get("start_time")
if finished_time:
return finished_time - start_time
return int(round(time.time() * 1000)) - start_time | 4cd353b078de50c6e4f54ce27bcf620c7212b9bb | 38,734 |
import os
def load_manifest(upath):
"""
Return the manifest as a dictionary
"""
manifest_path = os.path.join(upath, 'images', 'manifest.txt')
print("### Reading manifest from {}...".format(manifest_path))
manifest_contents = open(manifest_path).read()
manifest = {}
for line in manifest_contents.split('\n'):
line_unpacked = line.split()
try:
# Check that the line isn't empty or a comment
if not line_unpacked or line.strip().startswith('#'):
continue
target, repo_hash, url, sha256_hash = line_unpacked
manifest[target] = {
"repo_hash": repo_hash,
"url": url,
"sha256_hash": sha256_hash,
}
except ValueError:
print("WARNING: Invalid line in manifest file:\n"
" {}".format(line))
continue
return manifest | a70037a097a8068bf03d489956e05ac96a0d2256 | 38,735 |
import torch
def get_output_dim(model, pooling_type="gem"):
"""Dinamically compute the output size of a model.
"""
output_dim = model(torch.ones([2, 3, 224, 224])).shape[1]
if pooling_type == "netvlad":
output_dim *= 64 # NetVLAD layer has 64x bigger output dimensions
return output_dim | d08f54fef8923ac5132af9aab80a080b9bd6c0ee | 38,738 |
def compute_center(box):
"""
:param box: (box[0], box[1]) is the left low conner coordinate; (box[2], box[3]) is the right top conner coordinate.
:return:
"""
x = (box[0] + box[2]) / 2.0
y = (box[1] + box[3]) / 2.0
return x, y | bf4fb6e8a715d8a51e1f756228e701b58851e803 | 38,740 |
def merge_schemes(alist, novalue=True):
"""Merges schemes of list of objects and generates final data schema"""
if len(alist) == 0:
return None
obj = alist[0]
okeys = obj.keys()
for item in alist[1:]:
for k in item.keys():
# print(obj[k]['type'])
if k not in okeys:
obj[k] = item[k]
elif obj[k]['type'] in ['integer', 'float', 'string', 'datetime']:
if not novalue:
obj[k]['value'] += item[k]['value']
elif obj[k]['type'] == 'dict':
if not novalue:
obj[k]['value'] += item[k]['value']
if 'schema' in item[k].keys():
obj[k]['schema'] = merge_schemes([obj[k]['schema'], item[k]['schema']])
elif obj[k]['type'] == 'array':
# if 'subtype' not in obj[k].keys():
# logging.info(str(obj[k]))
if 'subtype' in obj[k].keys() and obj[k]['subtype'] == 'dict':
if not novalue:
obj[k]['value'] += item[k]['value']
if 'schema' in item[k].keys():
obj[k]['schema'] = merge_schemes([obj[k]['schema'], item[k]['schema']])
else:
if not novalue:
obj[k]['value'] += item['value']
return obj | 1320e0e4bca4d4d619e66fcee19475f5be0cc5c8 | 38,741 |
def extrair_palavras(frases_com_stemmer):
"""Função que unifica todas as palavras do conjunto de dados em uma única lista.
Args:
frases_com_stemmer: Frases com o Stemmer já aplicados.
Returns:
todas_palavras: lista com todas as palavras.
"""
todas_palavras = []
for (palavras, classe) in frases_com_stemmer:
todas_palavras.extend(palavras)
return todas_palavras | c2f8d3ddc136d6ffcac02d93db28ac8dd54df143 | 38,743 |
def get_attribute(node, attr, default=None):
"""Return a requested attribute from a graph node.
Construction (CX) graphs contain other CX objects but also
integers (Text-Fabric node numbers). This method
enables attribute calls on CX objects without
erroring out on integer objects.
Args:
node: a node in a CX graph
attr: an attribute string to call on node
default: a default to return if attr not found
"""
if type(node) == int:
return default
try:
return node.__dict__[attr]
except KeyError:
return default | 99db4750f78035630aeea2afeabd9708190ce075 | 38,744 |
from collections import namedtuple
from ipaddress import ip_address, ip_network
def validate(request, validation_data):
"""Check payload from GitHub: the origin IP must be genuine; the repo owner and title must be valid.
:param request: `CherryPy request <http://docs.cherrypy.org/en/latest/pkg/cherrypy.html#cherrypy._cprequest.Request>`_ instance representing incoming request
:param validation_data: dict with the keys ``owner``, ``repo``, and ``branches``, parsed from the config
:returns: namedtuple(status, message, list of extracted params as dicts), e.g. ``Response(status=200, message='Payload validated. Branches: default', [{'branch': 'default'}])``
"""
response = namedtuple('Response', ('status', 'message', 'param_dicts'))
if request.method != 'POST':
return response(405, 'Payload validation failed: Wrong method, POST expected, got %s.' % request.method, [])
trusted_ips = ip_network('192.30.252.0/22')
remote_ip = ip_address(request.remote.ip)
if remote_ip not in trusted_ips:
return response(403, 'Payload validation failed: Unverified remote IP: %s.' % remote_ip, [])
try:
payload = request.json
is_ping = 'zen' in payload
if is_ping:
owner = payload['repository']['owner']['login']
else:
owner = payload['repository']['owner']['name']
if owner != validation_data['owner']:
return response(403, 'Payload validation failed: wrong owner: %s' % owner, [])
repo = payload['repository']['name']
if repo != validation_data['repo']:
return response(403, 'Payload validation failed: wrong repository: %s' % repo, [])
if is_ping:
return response(200, 'Ping payload validated', [])
branch = payload['ref'].split('/')[-1]
allowed_branches = set(validation_data.get('branches', branch))
if branch not in allowed_branches:
return response(403, 'Payload validation failed: wrong branch: %s' % branch, [])
return response(200, 'Payload validated. Branch: %s' % branch, [{'branch': branch}])
except Exception as e:
return response(400, 'Payload validation failed: %s' % e, []) | bfccdefca2bac6fa40dfe2c079b73980c32b1a7b | 38,745 |
def value_for_key(membersuite_object_data, key):
"""Return the value for `key` of membersuite_object_data.
"""
key_value_dicts = {
d['Key']: d['Value'] for d
in membersuite_object_data["Fields"]["KeyValueOfstringanyType"]}
return key_value_dicts[key] | ef35afa306c7ba9e90060a4f143ed0ae87a831bb | 38,746 |
def intersection(A,B):
"""intersection of two lists
"""
return filter(lambda x: x in B,A) | 87b02ffedb78e033714c11186e097b13c4425fb0 | 38,747 |
def v9_add(*matrices):
"""Add corresponding numbers in given 2-D matrices.
Using the - sum - function so sum over the row values.
"""
combined = []
for rows in zip(*matrices):
row = []
for values in zip(*rows):
row.append(sum(values))
combined.append(row)
return combined | 50960b0b3efae938e927b9b8968933018dfe370f | 38,748 |
import re
def clean_sentence(sentence):
"""Remove extra white space from `sentence`."""
return re.sub(r'\s+', ' ', sentence) | 56e69adcb34f9b982eb12c396dc11727a150a7a8 | 38,750 |
def collapse_epitopes(epitopes):
"""
Inverts the epitope map and merges epitopes with enst and pos added.
"""
output_epitopes = {}
for k, v in epitopes.items():
enst, ensg, name, pos, chrom, genome_pos, fasta_name = k
#alt_epitope, wt_epitope = v
if v in output_epitopes:
output_epitopes[v][3].append((enst, str(pos)))
else:
output_epitopes[v] = [name, chrom, genome_pos,
[(enst, str(pos))], ensg, fasta_name]
return output_epitopes | 28c9e8671a66b476f153ef108e19b0bb6806d23c | 38,751 |
from typing import Any
def as_text(value: Any) -> str:
"""
Convert the given value to a string. :py:obj:`None` is converted to ``''``.
:param value: The value to convert to a string.
:rtype:
.. versionchanged:: 0.8.0
Moved from :mod:`domdf_python_tools.utils`.
"""
if value is None:
return ''
return str(value) | 5f246d8b291fcaa312fb8340bf3d0a60040df297 | 38,752 |
def _validate_str_with_equals(input_string):
"""
make sure an input string is of the format {0}={1} {2}={3} {4}={5} ...
Some software programs put spaces after the equals sign and that's not
cool. So we make the string into a readable format
:param input_string: input string from an edi file
:type input_string: string
:returns line_list: list of lines as ['key_00=value_00',
'key_01=value_01']
:rtype line_list: list
"""
input_string = input_string.strip()
# remove the first >XXXXX
if ">" in input_string:
input_string = input_string[input_string.find(" ") :]
# check if there is a // at the end of the line
if input_string.find("//") > 0:
input_string = input_string[0 : input_string.find("//")]
# split the line by =
l_list = input_string.strip().split("=")
# split the remaining strings
str_list = []
for line in l_list:
s_list = line.strip().split()
for l_str in s_list:
str_list.append(l_str.strip())
# probably not a good return
if len(str_list) % 2 != 0:
# _logger.info(
# 'The number of entries in {0} is not even'.format(str_list))
return str_list
line_list = [
"{0}={1}".format(str_list[ii], str_list[ii + 1])
for ii in range(0, len(str_list), 2)
]
return line_list | f137ddabfea9671e693373edd43505e7955c2510 | 38,753 |
def get_gwf_row_color(conf_list, id):
"""Get gwf row color """
if id == -1:
return '"black"'
return '"' + conf_list[id].color_text + '"' | c778ef50749a2bca0b38ab402721c4f55ed9e8e1 | 38,754 |
import re
def remove_tags(html_str: str, tags):
"""
removes a list of tags and their content from the html
"""
if isinstance(tags, str):
tags = [tags]
if isinstance(tags, list):
for tag in tags:
if tag == 'js' or tag == 'javascript':
scripts = re.compile(r'<(script).*?</\1>(?s)')
html_str = scripts.sub('', html_str)
if tag == 'css':
css = re.compile(r'<(style).*?</\1>(?s)')
html_str = css.sub('', html_str)
if 'comment' in tag or tag == '#' or tag == '//':
comments = re.compile(r'<!--(.|\s)*?-->')
html_str = comments.sub('', html_str)
# tag = re.compile(r'<(style).*?</\1>(?s)')
# html = tag.sub('', html)
return html_str | cf7cbd068e73eb90dcbc89e90e140c326fe0e273 | 38,755 |
def make_select(
name,
selected,
data,
jscallback=None,
cssclass=None,
multiple=False,
showvalue=True,
) -> str:
"""Generate a HTML select.
The trick here is what `data` looks like. The basic form is a dict.
You can get `optgroup`s by having the dictionary keys be additional
lists or dicts.
Args:
name (str): The select[name] to assign.
selected (mixed): The option value that should be set to selected.
data (dict): The structure to build our select from.
jscallback (str): javascript to place in the `onChange` attribute.
cssclass (str): CSS class to assign to the select element.
showvalue (bool): Should option label be prepended by [key].
Returns:
html_string
"""
if not isinstance(selected, (list, tuple)):
selected = [selected]
s = '<select name="%s"%s%s%s>\n' % (
name,
(
""
if jscallback is None
else f' onChange="{jscallback}(this.value)"'
),
"" if cssclass is None else f' class="{cssclass}"',
"" if not multiple else " MULTIPLE",
)
for key, val in data.items():
if isinstance(val, (tuple, list)):
val = dict(list(zip(val, val)))
if not isinstance(val, dict): # simple
s += '<option value="%s"%s>%s%s</option>\n' % (
key,
' selected="selected"' if key in selected else "",
f"[{key}] " if showvalue else "",
val,
)
continue
s += f'<optgroup label="{key}">\n'
for key2, val2 in val.items():
s += '<option value="%s"%s>%s%s</option>\n' % (
key2,
' selected="selected"' if key2 in selected else "",
f"[{key2}] " if showvalue else "",
val2,
)
s += "</optgroup>\n"
s += "</select>\n"
return s | 7d47b562d3c3bd58f7f946032207c1a32dc3f04b | 38,756 |
def response(response_code, schema=None, description=None):
"""A decorator that add swagger response"""
def decorator(handler):
responses = getattr(handler, 'swagger_responses', {})
responses[response_code] = {}
if schema is not None:
responses[response_code]['schema'] = schema
if description is not None:
responses[response_code]['description'] = description
handler.swagger_responses = responses
return handler
return decorator | 13960bd37158a13eb36ab46f4170a1fca5d4b7c2 | 38,757 |
def create_file_name(path, start):
"""Create the name of rst file.
Example:
resources.libraries.python.honeycomb.rst
tests.perf.rst
:param path: Path to a module to be documented.
:param start: The first directory in path which is used in the file name.
:type path: str
:type start: str
:returns: File name.
:rtype: str
"""
dir_list = path.split('/')
start_index = dir_list.index(start)
return ".".join(dir_list[start_index:-1]) + ".rst" | 398a5e7749cc3f7f47a068dd5859b28fb0ffe98d | 38,760 |
import re
def get_compound_id(microstate_id):
"""
Extract the compound ID from a microstate ID (which includes a wart suffix like '_1', '_2')
Parameters
----------
microstate_id : str
The microstate ID, which includes a wart suffix (e.g. 'MAT-POS-8a69d52e-7_1')
Returns
-------
compound_id : str
The compound ID (e.g. 'MAT-POS-8a69d52e-7_1')
"""
match = re.match('^(?P<compound_id>\S+)_(?P<microstate_suffix>\d+)$', microstate_id)
if match is None:
# No warts; compound and microstate are identical
compound_id = microstate_id
else:
# Remove the wart
compound_id = match.group('compound_id')
return compound_id | 6260389236ec579e57f9deaa4d7b41e34b45bcc2 | 38,761 |
import inspect
def get_methods_defined_in_class(cls):
"""
Get all functions defined in a given class. This includes all
non-inherited methods, static methods and class methods.
Args:
cls (Type): Class for lookup
Returns:
List[Tuple[str, Union[FunctionType, MethodType]]]
"""
methods = inspect.getmembers(cls, inspect.isfunction)
class_methods = inspect.getmembers(cls, inspect.ismethod)
functions = methods + class_methods
# Only keep non-inherited functions
cls_symbols = cls.__dict__
functions = [f for f in functions if f[0] in cls_symbols]
return functions | 82974e60ed907a998f736bec0ab7cd28c1f93fdd | 38,762 |
def initial_graph_properties(rlist, qlist):
"""Initial processing of sequence names for
network construction.
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
Returns:
vertex_labels (list)
Ordered list of sequences in network
self_comparison (bool)
Whether the network is being constructed from all-v-all distances or
reference-v-query information
"""
if rlist == qlist:
self_comparison = True
vertex_labels = rlist
else:
self_comparison = False
vertex_labels = rlist + qlist
return vertex_labels, self_comparison | e65a81a421c02bb2a4faa679660641f7550c5dc7 | 38,763 |
import sys
def forceUnicode(var, encoding=None):
"""If not already unicode, decode it"""
if sys.version_info < (3,0):
if isinstance(var, unicode):
ret = var
elif isinstance(var, str):
if encoding:
ret = var.decode(encoding)
else:
ret = unicode(var)
else:
ret = unicode(var)
else:
ret = str(var)
return ret | d60b2a43055e87cc595cb81026eda91ba7186c73 | 38,764 |
def regex_for_range(min,max):
"""A recursive function to generate a regular expression that matches
any number in the range between min and max inclusive.
Usage / doctests:
>>> regex_for_range(13,57)
'4[0-9]|3[0-9]|2[0-9]|1[3-9]|5[0-7]'
>>> regex_for_range(1983,2011)
'200[0-9]|199[0-9]|198[3-9]|201[0-1]'
>>> regex_for_range(99,112)
'99|10[0-9]|11[0-2]'
Note: doctests are order sensitive, while regular expression engines don't care. So you may need to rewrite these
doctests if making changes.
"""
#overhead
#assert (max>=min) and (min>=0)
_min,_max=str(min),str(max)
#calculations
if min==max:
return '%s' % str(max)
if len(_max)>len(_min):
#more digits in max than min, so we pair it down into sub ranges
#that are the same number of digits. If applicable we also create a pattern to
#cover the cases of values with number of digits in between that of
#max and min.
re_middle_range=None
if len(_max)>len(_min)+2:
#digits more than 2 off, create mid range
re_middle_range='[0-9]{%s,%s}' % (len(_min)+1,len(_max)-1)
elif len(_max)>len(_min)+1:
#digits more than 1 off, create mid range
#assert len(_min)+1==len(_max)-1 #temp: remove
re_middle_range='[0-9]{%s}' % (len(_min)+1)
#pair off into sub ranges
max_big=max
min_big=int('1'+('0'*(len(_max)-1)))
re_big=regex_for_range(min_big,max_big)
max_small=int('9'*len(_min))
min_small=min
re_small=regex_for_range(min_small,max_small)
if re_middle_range:
return '|'.join([re_small,re_middle_range,re_big])
else:
return '|'.join([re_small,re_big])
elif len(_max)==len(_min):
def naive_range(min,max):
"""Simply matches min, to max digits by position. Should create a
valid regex when min and max have same num digits and has same 10s
place digit."""
_min,_max=str(min),str(max)
pattern=''
for i in range(len(_min)):
if _min[i]==_max[i]:
pattern+=_min[i]
else:
pattern+='[%s-%s]' % (_min[i],_max[i])
return '%s' % pattern
if len(_max)==1:
patterns=[naive_range(min,max)]
else:
#this is probably the trickiest part so we'll follow the example of
#1336 to 1821 through this section
patterns=[]
distance=str(max-min) #e.g., distance = 1821-1336 = 485
increment=int('1'+('0'*(len(distance)-1))) #e.g., 100 when distance is 485
if increment==1:
#it's safe to do a naive_range see, see def since 10's place is the same for min and max
patterns=[naive_range(min,max)]
else:
#create a function to return a floor to the correct digit position
#e.g., floor_digit_n(1336) => 1300 when increment is 100
floor_digit_n=lambda x:int(round(x/increment,0)*increment)
#capture a safe middle range
#e.g., create regex patterns to cover range between 1400 to 1800 inclusive
#so in example we should get: 14[0-9]{2}|15[0-9]{2}|16[0-9]{2}|17[0-9]{2}
for i in range(floor_digit_n(max)-increment,floor_digit_n(min),-increment):
len_end_to_replace=len(str(increment))-1
if len_end_to_replace==1:
pattern='%s[0-9]' % str(i)[:-(len_end_to_replace)]
else:
pattern='%s[0-9]{%s}' % (str(i)[:-(len_end_to_replace)],len_end_to_replace)
patterns.append(pattern)
#split off ranges outside of increment digits, i.e., what isn't covered in last step.
#low side: e.g., 1336 -> min=1336, max=1300+(100-1) = 1399
patterns.append(regex_for_range(min,floor_digit_n(min)+(increment-1)))
#high side: e.g., 1821 -> min=1800 max=1821
patterns.append(regex_for_range(floor_digit_n(max),max))
return '|'.join(patterns)
else:
raise ValueError('max value must have more or the same num digits as min') | 704fe6a50c62c91472c684d5652d6b8cd385dde1 | 38,765 |
import tempfile
def write_temp_return_filename(data):
"""
Write out data to a temporary file and return that file's name.
This file will need to be deleted.
:param data: str: data to be written to a file
:return: str: temp filename we just created
"""
file = tempfile.NamedTemporaryFile(delete=False)
file.write(data.encode('utf-8'))
file.close()
return file.name | ef34cd2328cf450c0cbf603ca07295e858da7861 | 38,767 |
from typing import List
import os
def get_all_files_recursively_from_directory(dirname: str) -> List[str]:
"""
Get all files recursively from a directory.
:param dirname: The directory name.
:type dirname: str
:return: The list of founded files.
:rtype: List[str]
"""
files = []
for (dirpath, dirnames, filenames) in os.walk(dirname):
files.extend(
map(
lambda n: os.path.join(*n),
zip([dirpath] * len(filenames), filenames),
)
)
return files | 03a7a77cd28a6a41f9f8f2686bf6f479d572f767 | 38,768 |
import inspect
def protected_method(func):
"""Decorator for making an instance method private."""
def func_wrapper(*args, **kwargs):
"""Decorator wrapper function."""
outer_frame = inspect.stack()[1][0]
caller = inspect.getmro(outer_frame.f_locals['self'].__class__)[:-1]
target = inspect.getmro(args[0].__class__)[:-1]
share_subsclass = False
for cls_ in target:
if issubclass(caller[0], cls_) or caller[0] is cls_:
share_subsclass = True
break
if ('self' not in outer_frame.f_locals or
outer_frame.f_locals['self'] is not args[0]) and (not share_subsclass):
raise RuntimeError('%s.%s is a protected method' % (args[0].__class__.__name__, func.__name__))
return func(*args, **kwargs)
return func_wrapper | 5f23a90c5ba536a5ed267bf3ae720b32fd28ad89 | 38,770 |
def is_isomorphic(word_1: str, word_2: str):
"""
Example:
Input: s = "egg", t = "add"
Output: true
@param word_1: <str> word 1
@param word_2: <str> word 2
@return: <bool> whether two words are isomorphic
"""
return len(set(word_1)) == len(set(word_2)) == len(set(zip(word_1, word_2))) | fd5bbc94ccd80d26d02fc1fe6e55ce7c6ad4617c | 38,773 |
def same_base_index(a, b):
"""Check if the base parts of two index names are the same."""
return a.split("_")[:-1] == b.split("_")[:-1] | 6186230a9cb982be4cd113c2e8098e8ab472159b | 38,776 |
import re
def quote_type_string(type_string: str) -> str:
"""Quotes a type representation for use in messages."""
no_quote_regex = r'^<(tuple|union): \d+ items>$'
if (type_string in ['Module', 'overloaded function', '<nothing>', '<deleted>']
or re.match(no_quote_regex, type_string) is not None or type_string.endswith('?')):
# Messages are easier to read if these aren't quoted. We use a
# regex to match strings with variable contents.
return type_string
return '"{}"'.format(type_string) | 63f50cf7a986354cc3cddd947ff9830012659d81 | 38,777 |
def reddit_user(reddit_factories):
"""Override the user fixture to use reddit_factories"""
return reddit_factories.user("contributor") | 0d5ff93a36ad06c67bc9daf7fada52d43b595b80 | 38,778 |
def split_by_pos(data_input: str) -> list:
"""Split a string data_input by position"""
return list(data_input) | 5e8070047dc4b8fc3db4210ddfa99f258ab1c25a | 38,779 |
def season():
"""
"""
return 2016 | 9e838fb1ed5efad572b1abb62284c6e150ff36f1 | 38,780 |
import uuid
def make_uuid(df, name='uuid'):
"""
Creates a list of uuids with the same length as the dataframe
"""
uuids = [uuid.uuid4().hex for _ in range(len(df))]
return uuids | 904dab2ea1dab3b53974277a1f37fa30f26e3e61 | 38,781 |
def list_to_streamdict(list):
"""creates a dictionary out of a list
assuming the list is written in kwarg, arg,...
the dictionary will be written as {kwarg:[arg], }"""
dictio = {}
for i in range(1,len(list),2):
list[i] = [list[i]]
print(list[i])
# print("List[1]", list[1])
for i in range(0,len(list),2):
dictio[list[i]] = list[i+1]
return dictio | aaed01c57ad3471807bebf2136c4b006e7100eed | 38,782 |
def get_collection_sizes(net, bus_size=1.0, ext_grid_size=1.0, trafo_size=1.0, load_size=1.0,
sgen_size=1.0, switch_size=2.0, switch_distance=1.0):
"""
Calculates the size for most collection types according to the distance between min and max
geocoord so that the collections fit the plot nicely
.. note: This is implemented because if you would choose a fixed values (e.g. bus_size = 0.2),\
the size could be to small for large networks and vice versa
:param net: pandapower network for which to create plot
:type net: pandapowerNet
:param bus_size: relative bus size
:type bus_size: float, default 1.
:param ext_grid_size: relative external grid size
:type ext_grid_size: float, default 1.
:param trafo_size: relative trafo size
:type trafo_size: float, default 1.
:param load_size: relative load size
:type load_size: float, default 1.
:param sgen_size: relative static generator size
:type sgen_size: float, default 1.
:param switch_size: relative switch size
:type switch_size: float, default 2.
:param switch_distance: relative distance between switches
:type switch_distance: float, default 1.
:return: sizes (dict) - dictionary containing all scaled sizes
"""
mean_distance_between_buses = sum((net['bus_geodata'].max() - net[
'bus_geodata'].min()).dropna() / 200)
sizes = {
"bus": bus_size * mean_distance_between_buses,
"ext_grid": ext_grid_size * mean_distance_between_buses * 1.5,
"switch": switch_size * mean_distance_between_buses * 1,
"switch_distance": switch_distance * mean_distance_between_buses * 2,
"load": load_size * mean_distance_between_buses,
"sgen": sgen_size * mean_distance_between_buses,
"trafo": trafo_size * mean_distance_between_buses
}
return sizes | 06abcef3e8fe7833820057952133c6fa570b36d5 | 38,783 |
import glob
import re
def get_all_pod_uids():
"""Return mapping of pid to pod uid"""
pod_uids = {}
for cgroup_file in glob.glob("/proc/[0-9]*/cgroup"):
pid = int(cgroup_file.split("/")[-2])
try:
with open(cgroup_file) as f:
cgroups = f.read()
except FileNotFoundError:
# process deleted, ignore
continue
m = re.search("/pod([^/]+)", cgroups)
if m is None:
# not a pod proc
continue
pod_uids[pid] = m.group(1)
return pod_uids | 0755fece29fcf65f05f5c22d5259147f3866fe85 | 38,784 |
def constant_density_2D(R, constant):
"""
return a constant value at every input r
Parameters
----------
R: [Mpc]
distance from the center
constant:
multiplicative constant
Returns
-------
constant
"""
return constant | 8a0f51ba8b296e6b81270790daa5e69d71f808d7 | 38,785 |
import shelve
def read_population_assignation_cp(namefile):
"""Read pre-computed population assignation."""
db = shelve.open(namefile)
cps = db['cps']
population_value = db['population_value']
methodvalues = db['methodvalues']
db.close()
return cps, population_value, methodvalues | 9281d78d3c21e8d340c38b1060d616f09b0e312b | 38,787 |
import random
def sample_coal(k, n):
"""
Returns a sample coalescent time for 'k' individuals in a population 'n'
"""
# k choose 2
k2 = k * (k-1) / 2
k2n = k2 / n
return random.expovariate(k2n) | 4069830905f83b28b60e2fa5e547ab8ba0f81e9a | 38,788 |
def cached(fn):
"""
Cache decorator. This decorator simply uses ``*args`` as lookup key for
cache dict.
If you are using python3, use functools.lru_cache() instead.
"""
cache = {}
def cached_decorator(*args, **kwargs):
if args in cache:
return cache[args]
val = fn(*args, **kwargs)
cache[args] = val
return val
return cached_decorator | b73dd99a2e29077adbee51c8a79f12832badf37c | 38,789 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.