content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import re
def latin_lower(s):
""" Convert string to lower case,
replace all non-latin or non-digit symbols with dashes,
deduplicate and trim dashes
"""
result = s.lower()
result = re.sub("[^a-z0-9]", '-', result)
result = re.sub("-+", '-', result)
result = re.sub("^-", '', result)
result = re.sub("-$", '', result)
return result;
|
e08cd0b3c18ddca1dfe5c89f8f562ed19213c39a
| 54,564
|
import math
def apply_deadzone(value: float, threshold: float) -> float:
"""Apply a deadzone to a value in [-1,1].
This ensures that the rest of the input space maps to [-1,1].
"""
assert 0 <= threshold < 1
if abs(value) < threshold:
return 0
return (value - math.copysign(threshold, value)) / (1 - threshold)
|
49a0e0c642059865cb1de838bddf14e7ec3a8b9b
| 54,568
|
def get_meta_value(meta, key):
"""Get a value from MRD Meta Attributes (returns None if key not found)"""
if key in meta.keys():
return meta[key]
else:
return None
|
4d1f211475d09e1e345c20620a117dfc45723fe5
| 54,570
|
def movePos(pos, modifiers):
"""
Determine the coordinates of a new move
The move is based on the current turn of the game, ie. if red is moving,
then the coordinates are from red's perspective
:param pos: A 2-tuple (x, y) of positive integers the grid coordinates of the piece to move
:param modifiers: A list of booleans, (left, forward, jump)
left: True to move left, False to move Right
forward: True to move forward, False to move backwards
jump: True if this move is a jump, False if it is a normal move
:return The coordinates of the location of the new move, as a 2-tuple (x, y)
"""
x, y = pos
left, forward, jump = modifiers
if jump:
# determine direction for y axis
newY = y - 2 if forward else y + 2
# determine direction for x axis
if left:
newX = x - 1
else:
newX = x + 1
else:
# determine direction for y axis
newY = y - 1 if forward else y + 1
# determine direction for x axis
if left:
newX = x - 1 if y % 2 == 1 else x
else:
newX = x + 1 if y % 2 == 0 else x
return newX, newY
|
171653472e7c4f4ea6caeaa9014352b2f5e89610
| 54,574
|
def get_config_xml_tail(tail_xpath):
"""Get xml tail string when config.
Args:
tail_xpath: The string of xpath_key.
Returns:
xml_tail_str: The xml tail str.
"""
xml_tail_str = ""
tail_xpath_list = tail_xpath.split("/")[1:]
for item in reversed(tail_xpath_list):
xml_tail_str = xml_tail_str + "</" + item + ">"
xml_tail_str += "</config>"
return xml_tail_str
|
a72c85096633bade103df8e48b5c99ee300b28a0
| 54,575
|
def getChildForRequest(resource, request):
"""
Traverse resource tree to find who will handle the request.
"""
while request.postpath and not resource.isLeaf:
pathElement = request.postpath.pop(0)
request.prepath.append(pathElement)
resource = resource.getChildWithDefault(pathElement, request)
return resource
|
80d1a034e7b2fbeea6d62198146238d4682f3c5b
| 54,577
|
def caps_are_open(caps):
"""Return true if all caps are open"""
return all([c.isOpened() for c in caps])
|
036c2fdb71c6700e8698c0d0c56df643b2e9c42c
| 54,582
|
def compute_displacement(point, pointf):
"""To compute a displacement between two point arrays"""
assert len(point)==len(pointf)
values = []
for i, pt0 in enumerate(point):
pt1 = pointf[i]
values.append((pt1[0]-pt0[0], pt1[1]-pt0[1]))
return values
|
48a8950a89d5f1e4558b0dfbf0010f4f912fc571
| 54,591
|
import inspect
import six
def pp_class(obj):
"""
Prettify and return the passed class, or the class of the instance if an
instance is passed in.
"""
if not inspect.isclass(obj):
obj = obj.__class__
if (
(six.PY2 and obj.__module__ == "__builtin__") or
(six.PY3 and obj.__module__ == "builtins")
):
return "{}".format(obj.__name__)
else:
return "{}.{}".format(obj.__module__, obj.__name__)
|
fdd4d540c330b5c91a7231074912db6425abca7c
| 54,592
|
def _dictionary_to_list_values(dictionary):
"""
Replaces a dictionary with a list of just the values
Example input:
"interfaces": {
"1/1/21": "/rest/v10.04/system/interfaces/1%2F1%2F21",
"1/1/22": "/rest/v10.04/system/interfaces/1%2F1%2F22"
}
Example output:
"interfaces": [
"/rest/v10.04/system/interfaces/1%2F1%2F21",
"/rest/v10.04/system/interfaces/1%2F1%2F22"
]
:param dictionary: A Non-empty dictionary that will have its values added to a list
:return: A new list with the values from the dictionary
"""
new_list = []
for x in dictionary:
new_list.append(dictionary[x])
return new_list
|
ee7decffaf305d272a140ef6bbffe2c696150ef5
| 54,594
|
def str_to_bin(string):
""" Converts string of 1s and 0s to binary integer """
return int(string, 2)
|
606b71c676beb4cc45cc23f9dfdbf8ff90ee186b
| 54,596
|
def get_inf_rate(mod_year):
"""
Estimate infiltration rate in 1/h, dependend on last year of modernization.
If no modernization has been performed, use year of construction instead.
Parameters
----------
mod_year : int
Year of last modernization
Returns
-------
inf_rate : float
Infiltration rate in 1/h (related to building air volume)
"""
if mod_year < 1951:
inf_rate = 1
elif mod_year < 1969:
inf_rate = 0.5
elif mod_year < 1982:
inf_rate = 0.3
elif mod_year < 1994:
inf_rate = 0.25
elif mod_year < 2001:
inf_rate = 0.2
elif mod_year < 2009:
inf_rate = 0.15
elif mod_year < 2014:
inf_rate = 0.1
else:
inf_rate = 0.1
return inf_rate
|
6b23f9beb598f28bda33dcc04254516288918b5c
| 54,609
|
import json
import traceback
def get_json_from_file(json_file):
"""
Reads and returns a JSON file as an dictionary object.
Parameters:
json_file (string): path to json file
Returns:
json_object (dictionary): json dictionary
"""
try:
with open(json_file) as json_file_object:
return json.load(json_file_object)
except Exception:
print(traceback.format_exc())
|
f50ef489d44e5c82edc06f1ae633c4ecca4820c7
| 54,610
|
def extract_message_received_time(input_json):
"""Extract the departure date from the EOBD field
and return as datestring in the form 'YYYY-MM-DD'
"""
date_str = input_json['messageReceievedTime']
return f'msg_date=20{date_str[0:2]}-{date_str[2:4]}-{date_str[4:6]}'
|
6170958fea30e7aac1ee92325859835fae0a0526
| 54,611
|
def invert_bitstring_with_mask(string: int, masklen: int) -> int:
"""Invert a bitstring with a mask.
Args:
string (bitstring) - the bitstring to invert
masklen (int) - the value to mask the inverted bitstring to
Returns:
(bitstring) - a bitstring inverted up to the masking length
"""
mask = (1 << masklen) - 1
return ~string & mask
|
6dc8a0499c959695b828a71976cd5020be306b2c
| 54,621
|
from typing import Type
from typing import Any
import typing
from typing import Union
def _is_optional(field: Type[Any]) -> bool:
"""Returns True when the given type annotation is Optional[...]."""
return typing.get_origin(field) is Union and type(None) in typing.get_args(field)
|
275cf4b2846af34a24b99818a3ed73bda7eab800
| 54,627
|
def sample_from_data(args, device, data_loader):
"""Sample real images and labels from data_loader.
Args:
args (argparse object)
device (torch.device)
data_loader (DataLoader)
Returns:
real, y
"""
real, y = next(data_loader)
real, y = real.to(device), y.to(device)
if not args.cGAN:
y = None
return real, y
|
73e3427b1adf1b4da233e5ac70756b7c3804e8af
| 54,628
|
def create_sub_G(proj_nodes, G):
"""
Creating a new graph from the nodes in the initial embedding so we can do the initial embedding on it
:param proj_nodes: The nodes in the initial embedding
:param G: Our graph
:return: A sub graph of G that its nodes are the nodes in the initial embedding.
"""
sub_G = G.subgraph(list(proj_nodes))
return sub_G
|
c0f10c1631dc3dc26c384362a96342dbe1e04712
| 54,630
|
def dot(vec1, vec2):
""" Return the dot product of vec1 and vec2 """
return vec1[0]*vec2[0] + vec1[1]*vec2[1]
|
3b1b49365214e995d6c3e7f444e114ebe2d588f6
| 54,631
|
def parent(heap, i):
"""
Returns the index of the parent of a given node.
"""
if i == 0:
return 0
return heap.array[(i-1)//2]
|
df1e7fa83a6376e394f53bdc465d77d3dd3c6b2f
| 54,635
|
import requests
def download_image(url):
"""Downloads an image, given an url
The image is saved in the download.jpg file
Args:
url: source from where download the image
"""
filename = "download.jpg"
response = requests.get(url)
file = open(filename, "wb")
file.write(response.content)
file.close()
return filename
|
7d938ef60d3ac197683adfc6bbdb38b3e8890b89
| 54,636
|
def parse_db_uri(conf):
"""
Parse input database config into database URI format
:param conf: input database config
:type conf: dict
:return: string of database config in URI format
:rtype: str
"""
# Input config must be a dict
assert isinstance(conf, dict)
# Key 'dbname' is required in config
if 'dbname' not in conf:
raise ValueError('No database specified')
# Read and parse config
dbname = str(conf['dbname'])
host = str(conf.get('host', '127.0.0.1') or '127.0.0.1')
port = str(conf.get('port', ''))
user = str(conf.get('user', ''))
passwd = str(conf.get('passwd', ''))
driver = str(conf.get('driver', 'postgresql')).lower() or 'postgresql'
if user and passwd:
user = '%s:%s@' % (user, passwd)
elif user:
user = '%s@' % user
elif passwd:
raise ValueError('No user with that password')
if port:
if not port.isdigit():
raise ValueError('Database port must be a number')
host = '%s:%s' % (host, port)
# Return parsed config in URI format
return '{}://{}{}/{}'.format(driver, user, host, dbname)
|
6637f47e00730ea22716eede8901d64790f2e2ae
| 54,637
|
from bs4 import BeautifulSoup
def new_float(float_class: str, content, caption, id=None):
"""Creates a float using the <float> tag.
Parameters:
- float_class: A string specifying the type of float, e.g. "Figure".
- content: Whatever you want to put in this float. It can be anythin
accepted by the "append" method of BeautifulSoup (e.g. a string with
the HTML code as well as a tag object).
- caption: The caption. It can be anything accepted by the "append"
method of BeautifulSoup (e.g. a string with the HTML as well as a
tag object).
Returns:
- The float tag.
- """
if not isinstance(float_class, str):
raise TypeError(f'<float_class> must be a string, received object of type {type(float_class)}.')
float_tag = BeautifulSoup(features='lxml').new_tag('float')
float_tag['class'] = float_class
float_tag.append(content)
caption_tag = BeautifulSoup(features='lxml').new_tag('floatcaption')
caption_tag.append(caption)
float_tag.append(caption_tag)
if isinstance(id, str):
float_tag['id'] = id
return float_tag
|
7a660dff8da7adcff8d4083e1f2559801da02f46
| 54,646
|
def confusionMatrix(predicted, actual, threshold):
"""
计算数据的混淆矩阵表数值
:param predicted: 实验数据集
:param actual: 真实数据集
:param threshold: 阀值
:return: 数值形式[tp, fn, tp, tn, rate]
"""
# 检查数据长度,避免无效输入
if len(predicted) != len(actual):
return -1
# 定义混淆矩阵的四项指标
tp = 0.0 # 真实例
fp = 0.0 # 假实例
tn = 0.0 # 真负例
fn = 0.0 # 假负率
for i in range(len(actual)):
if actual[i] > 0.5:
if predicted[i] > threshold:
tp += 1.0
else:
fn += 1.0
else:
if predicted[i] < threshold:
tn += 1.0
else:
fp += 1.0
# 误分率
rate =(fn + fp) / (tp + fn + tn + fp)
rtn = [tp, fn, fp, tn, rate]
return rtn
|
866360e041f0870b88e53ea2cc6cd2ebdd568d6d
| 54,648
|
import six
def _decode(entity):
"""
Decodes all unicode characters to avoid the `u` prefix from
proliferating in complex data structures. We should probably
instead JSON encode everything, but for now, this is fine.
This is only needed as long as Python 2 support is necessary.
:param entity: The entity to decode.
:return: The iterable without unicode.
"""
# Only necessary for Python 2
if six.PY3:
return entity
if isinstance(entity, tuple):
return tuple(_decode(e) for e in entity)
elif isinstance(entity, list):
return list(_decode(e) for e in entity)
elif isinstance(entity, dict):
return {_decode(k): _decode(v) for k, v in entity.items()}
elif isinstance(entity, six.text_type):
return entity.encode('utf8')
return entity
|
7e972a4f20d7257de49dafa29b080145eb30d695
| 54,650
|
def full_type(o):
"""
Returns the fully qualified type name of a given object.
"""
cls = o.__class__
module = cls.__module__
if module == 'builtins':
return cls.__qualname__
return module + '.' + cls.__qualname__
|
6acfbe62fab9bf7fcf4cae95ecdf2ae49cbad20e
| 54,651
|
import re
def identifier(filename):
"""
File names are of the format: 09.44.32 hrs __[0000312].jpeg and we need
to filter out the 0000312 part to use as identifier
"""
match = re.search('\[(\d+)\]', filename)
if match:
return 'attachment' + match.group(1)
else:
# if we can't match, lets hope returning the filename works
return filename
|
aa8c231b0351c9e88db06b234bc3261930080d85
| 54,652
|
def convert_size(size, input_unit="B", output_unit="KB"):
"""
Converts bytes from one unit to another, rounded up to 2 decimals
:param size: The initial amount of bytes
:type size: int or float
:param str input_unit: The current unit for the given amount. Defaults to "B".
:param str output_unit: The desired unit to convert to. Defaults to "KB".
:return: The converted amount of bytes, in the desired unit, rounded to 2 digits
:rtype: float
:raises IndexError: if 'input_unit' or 'output_units' are not in the valid unit list
"""
units = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
# Errors
if input_unit not in units:
units_as_text = ", ".join(units)
raise IndexError(
"'input_unit' must be one of the following: {}".format(units_as_text)
)
if output_unit not in units:
units_as_text = ", ".join(units)
raise IndexError(
"'output_unit' must be one of the following: {}".format(units_as_text)
)
# Setup
input_index = units.index(input_unit)
output_index = units.index(output_unit)
n = output_index - input_index
# Maths
output_size = round(size / (1024 ** n), 2)
return output_size
|
b3b7c9a3ebce0dbc531808dead9674c931e809f4
| 54,656
|
def init_cal_coeff_dict(obstreemodel):
"""
Initiate dict for storing meter calibration coefficients.
Parameters
----------
obstreemodel : ObsTreeModel
Returns
-------
dict
key: Meter (str), value: float
"""
try:
meter_list = {}
for i in range(obstreemodel.invisibleRootItem().rowCount()):
survey = obstreemodel.invisibleRootItem().child(i)
for ii in range(survey.rowCount()):
loop = survey.child(ii)
if loop.meter not in meter_list:
meter_list[loop.meter] = 1.000
return meter_list
except Exception:
return None
|
8bebb7c751c13f923f9b4f5f5f0fcf300b50e6e8
| 54,662
|
import re
def get_casa_version_from_calibration_script(script):
"""Pulls CASA pipeline version from calibration script
Again, taken from Daizhong's A3COSMOS stuff, thanks Daizhong!
"""
casa_pipeline_version = None
with open(script, 'r') as fp:
for line in fp:
if line.find('PLEASE USE THE SAME VERSION OF CASA') >= 0:
if re.match(r'^.*PLEASE USE THE SAME VERSION OF CASA.*:[ \t]*([0-9.]+)\b.*$', line):
casa_pipeline_version = re.sub(r'^.*PLEASE USE THE SAME VERSION OF CASA.*:[ \t]*([0-9.]+)\b.*$',
r'\1', line)
# Trim any whitespace
casa_pipeline_version = casa_pipeline_version.strip()
break
return casa_pipeline_version
|
44e6c7ef8453280552c1b8aae588544a317eeeae
| 54,665
|
def app_config(app_config):
"""Override pytest-invenio app_config fixture to disable CSRF check."""
# Variable not used. We set it to silent warnings
app_config["REST_CSRF_ENABLED"] = False
return app_config
|
8c3023b4dc2b16121955643bb3e13ee193f72db9
| 54,667
|
def open_bcfile(opts, cmdline):
"""
open BC file for writing and write header with command line
"""
BCFILE = open(opts.bcfile, 'w')
BCFILE.write("$ Generated using %s with the following options:\n" %
cmdline)
BCFILE.write("$ %s\n" % opts)
return BCFILE
|
bd37fed97be2736ecc30c2b07dad26431dc7043d
| 54,668
|
def gcd(a, b):
"""
Calculates the greatest common divisor of two positive integers.
The gcd of two or more integers, when at least one of them is not zero,
is the largest positive integer that divides the numbers without a remainder.
a, b: two positive integers
Returns the greatest common divisor of a and b
"""
if(b==0):
return a
else:
return gcd(b, a % b)
|
3f47e398a457c9c94b6c6d93492790c6171ce741
| 54,669
|
def bold(text):
"""Bold.
Args:
text (str): text to make bold.
Returns:
str: bold text.
"""
return '**' + text + '**'
|
daf2abb139e7fc9a4d128d8aba5fef4d70e1f29c
| 54,670
|
import posixpath
def get_path(*args):
"""
construct a path from arguments provided.
:param args: <tuple> array of arguments to concatenate.
:return: <str> path
"""
return posixpath.join(*args)
|
e718ab9cdee6755827fe17df0b19b0f6048a80dc
| 54,672
|
def bubble_sort(array):
"""
Def: Bubble sort, sometimes referred to as sinking sort, is a simple
sorting algorithm that repeatedly steps through the list to be sorted,
compares each pair of adjacent items and swaps them if they are in the
wrong order (ascending or descending arrangement). The pass through
the list is repeated until no swaps are needed, which indicates that
the list is sorted.
Complexity: Best: n | Average: n^2 | Worst: n^2
"""
swapped = False
for i in range(0, len(array) - 1, 1):
for j in range(0, len(array) - 1, 1):
if array[j] > array[j+1]:
# swap the elements
temp = array[j]
array[j] = array[j+1]
array[j+1] = temp
swapped = True
if swapped is False:
break
return array
|
5cedc8f302a0a87d54fa1a2c25d813d82640010d
| 54,675
|
def mat2lst(m):
"""
Convert 3x3 matrix to list
:param m: ndarray object
:return: list
"""
return m.flatten().tolist()
|
9c11603a55013474e1b61577654c2ccde0842735
| 54,678
|
def is_float(x):
"""Return true if X can be coerced to a float. Otherwise, return false."""
try:
float(x)
return True
except ValueError:
return False
|
0450c3c6b2024de07e6ea9ec09c5a3955b744ef9
| 54,681
|
def calculate_rectangle_area(length, width):
"""
Computes the area of a rectangle, given its length and width.
Params:
length (int or float) like 10
width (int or float) like 3
Examples:
calculate_rectangle_area(10, 3)
calculate_rectangle_area(length=10, width=3)
calculate_rectangle_area(width=3, length=10)
"""
return length * width
|
4975a6dbdc0987194a036b876b3fe0d1447b5e55
| 54,687
|
def renormalize(a, from_range, to_range):
"""Renormalize `a` from one range to another."""
delta1 = from_range[1] - from_range[0]
delta2 = to_range[1] - to_range[0]
return (delta2 * (a - from_range[0]) / delta1) + to_range[0]
|
59d06098d362e764ae461ceda69879d703713a11
| 54,689
|
from typing import List
def sieve_of_eratosthenes(n: int) -> List[int]:
"""Return a list of prime numbers which are less than the given integer using sieve of eratosthenes method
>>> sieve_of_eratosthenes(1)
[]
>>> sieve_of_eratosthenes(10)
[2, 3, 5, 7]
>>> sieve_of_eratosthenes(50)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
>>> sieve_of_eratosthenes(-37)
Traceback (most recent call last):
...
ValueError: n must be >= 1
"""
if n < 1:
raise ValueError("n must be >= 1")
sieve = [True] * n
sieve[0] = False
for i in range(2, n + 1):
if sieve[i - 1]:
for j in range(i ** 2, n + 1, i):
sieve[j - 1] = False
prime_list = []
for i, is_prime in enumerate(sieve):
if is_prime:
prime_list.append(i + 1)
return prime_list
|
c2b2973d6d09c4cc90e8c82e2f1bc0987a5d288a
| 54,692
|
import importlib
def get_executor(config, model):
"""
according the config['executor'] to create the executor
Args:
config(ConfigParser): config
model(AbstractModel): model
Returns:
AbstractExecutor: the loaded executor
"""
try:
return getattr(importlib.import_module('libcity.executor'),
config['executor'])(config, model)
except AttributeError:
raise AttributeError('executor is not found')
|
b45b4f8dc8be81a667ab3aa4caa89c6195403b5f
| 54,693
|
def transpose(data):
"""Transpose a 2-dimensional list."""
return list(zip(*data))
|
5b63c51e0d5856a7a9632b848b5149c814795fca
| 54,703
|
import getpass
def get_opal_password(opal_password, password_file):
"""
Retrieve the OPAL password form the user, either from the command line arguments, the file they specified or
by asking them to input it
:param opal_password: The actual password, if provided via the command line
:param password_file: The file containing the password, if provided.
:return: The password
"""
if opal_password:
return opal_password
if password_file:
with open(password_file, 'r') as fd:
password = fd.readlines()[0].strip()
else:
password = getpass.getpass(str("Enter your OPAL password: "))
return password
|
e6c13644c120beab3a629634c433343a43c92f00
| 54,704
|
def IIf(cond, truepart, falsepart):
"""Conditional operator"""
if cond:
return truepart
else:
return falsepart
|
ac12fb349871bc7ae63a16239119c760fe9f1856
| 54,707
|
def _include_chooser_msg_wildcard_docs(f):
"""
Combines the basic Chooser options (wildard, message) docsstring
with the wrapped function's doc string.
"""
_doc = """:param wildcard: Sets the wildcard, which can contain multiple file types, for
example: "BMP files (.bmp)|.bmp|GIF files (.gif)|.gif"
:param message: Sets the message that will be displayed on the dialog.
"""
f.__doc__ = (f.__doc__ or '') + _doc
return f
|
cd824d05049c28a6b31aeba60e63739d4971554f
| 54,708
|
def _xpath_union(*xpath_list):
"""Form union of xpath expressions"""
return ' | '.join(xpath_list)
|
61cfa08c48ef5f2259786f72b156c10207603e9a
| 54,709
|
def get_facet_mask(function_space, mode='geometric', facet='bottom'):
"""
Returns the top/bottom nodes of extruded 3D elements.
:arg function_space: Firedrake :class:`FunctionSpace` object
:kwarg str mode: 'topological', to retrieve nodes that lie on the facet, or
'geometric' for nodes whose basis functions do not vanish on the facet.
:kwarg str facet: 'top' or 'bottom'
.. note::
The definition of top/bottom depends on the direction of the extrusion.
Here we assume that the mesh has been extruded upwards (along positive
z axis).
"""
section, iset, facets = function_space.cell_boundary_masks[mode]
ifacet = -2 if facet == 'bottom' else -1
off = section.getOffset(facets[ifacet])
dof = section.getDof(facets[ifacet])
indices = iset[off:off+dof]
return indices
|
b8e4936e64c7c299e80e8d60cb403129cd790ad7
| 54,710
|
def make_error(errors):
"""
Creates a JSON response with the provided errors. The status
is determined by the highest code in the provided list of errors.
The response object is returned to the caller
:errors a single error or list of errors
"""
error_list = []
if type(errors) is not list:
errors = [errors]
for error in errors:
error_list.append(error.to_dict())
status = max(error.status for error in errors)
response = { "meta": { 'status': status }, "errors": error_list }
return response
|
2fafefa822c981b3ee97ccd34ead96cbedbf0322
| 54,711
|
def to_format(date):
"""
Turns a date object into the format used by http://www.market-holidays.com/
:param date: date object
:return: string in the format of "January 17, 2022"
"""
month = date.strftime("%B")
day = str(date.day)
year = str(date.year)
return "%s %s, %s" % (month, day, year)
|
83fe48532fc983e300464914c38c0c00070b8ce6
| 54,717
|
def pop_all(items):
"""
Clear items in place and return a copy of items.
>>> items = [1, 2, 3]
>>> popped = pop_all(items)
>>> popped is items
False
>>> popped
[1, 2, 3]
>>> items
[]
"""
result, items[:] = items[:], []
return result
|
2c9e80c21c012f838f3a11c596f8628b544a72a0
| 54,719
|
def getAnsText(answer_id_text, task_uuid, question_label):
"""
Returns a list of answers under the question label
"""
task_data = answer_id_text[answer_id_text["quiz_task_uuid"] == task_uuid]
ans = task_data[task_data["question_label"] == question_label].drop_duplicates(subset = ["contributor_uuid", "answer_uuid"])
return ans["answer_text"].unique().tolist()
|
5c9dbb04cc857419eb84e655a6a683810817a4f1
| 54,720
|
from typing import Dict
from typing import Any
def _dummy_task_description(data_config: Dict[str, Any]) -> Dict[str, Any]:
"""Generate a dummy task description to use for hypothesis space generation."""
# data_config {
# "gendnf_num_objects": 4,
# "gendnf_num_nullary": 1,
# "gendnf_num_unary": 1,
# "gendnf_num_binary": 2,
# "gendnf_num_variables": 2,
# "gendnf_num_conjuncts": 2,
task_description = {
"inputs": {
"nullary": {"shape": [data_config["gendnf_num_nullary"]]},
"unary": {
"shape": [
data_config["gendnf_num_objects"],
data_config["gendnf_num_unary"],
]
},
"binary": {"shape": [data_config["gendnf_num_binary"]]},
},
"metadata": {"num_variables": data_config["gendnf_num_variables"]},
}
return task_description
|
9788dda9993ef86e75ea91af96fafc4697799941
| 54,721
|
def correct(value, ty):
""" Correct a value to the given bits """
bits = ty.bits
signed = ty.signed
base = 1 << bits
value %= base
return value - base if signed and value.bit_length() == bits else value
|
aac7544b4a92b72ea24b76205d1536875c2fdc7e
| 54,724
|
def accuracy(scores, targets, k):
"""
Computes top-k accuracy, from predicted and true labels.
:param scores: scores from the model
:param targets: true labels
:param k: k in top-k accuracy
:return: top-k accuracy
"""
batch_size = targets.size(0)
_, ind = scores.topk(k, 1, True, True)
correct = ind.eq(targets.view(-1, 1).expand_as(ind))
correct_total = correct.view(-1).float().sum() # 0D tensor
return correct_total.item() * (100.0 / batch_size)
'''Dataset
The dataset has already been pre-processed and split for you. There are 5000 images and captions for training, 1000 for validation and 1000 for testing. The images, captions, lenght of captions and vocabulary have been saved.
The images have a resolution of (3, 256, 256) (channels, width, height). The captions have a maximum length of 100. Each image has 5 captions and the minimum word frequency is 5.
The functions below facilitate the loading of the dataset.
'''
|
f6ab7926f1b5d5af6268595ed00179759868e256
| 54,726
|
def get_category_to_id_mapping(data, column):
"""Creates two mappings for id and categorical value and vice verse for given column.
Id is a unique identifier of categorical value. Starting from 0.
Args:
data: dataframe that contains categorical values
column: a column of dataframe that contains categorical values for which a mapping from categorical value
to id is needed
Returns:
id_to_category: dictionary of ids and categories
category_to_id: dictionary of categories and ids
"""
categories = sorted(data[column].unique())
print("There are {} unique categories".format(len(categories)))
id_to_category = {i: categories[i] for i in range(len(categories))}
category_to_id = {categories[i]: i for i in range(len(categories))}
return id_to_category, category_to_id
|
c06d829536ce2a1fa8f24e8388e25102c053e311
| 54,727
|
from typing import Counter
def get_words(texts):
"""
Construct a word counter from words.
"""
word_counter = Counter()
for t in texts:
word_counter.update(t)
return word_counter
|
d5f228137161ea74798dc54e84f3b75e00e36bfa
| 54,731
|
def str_to_gast(node):
"""
Python native string class is the type of function names and arguments. String literals are handled by str_to_gast()
This function takes the name of an identifer and turns it into a gast node
"""
return {"type": "name", "value": node}
|
d4a79af72ee89034d62eeab5d697a548380204fa
| 54,737
|
from collections import namedtuple
def tol_cset(colorset=None):
"""
Discrete color sets for qualitative data.
Define a namedtuple instance with the colors.
Examples for: cset = tol_cset(<scheme>)
- cset.red and cset[1] give the same color (in default 'bright' colorset)
- cset._fields gives a tuple with all color names
- list(cset) gives a list with all colors
"""
namelist = ('bright', 'high-contrast', 'vibrant', 'muted', 'medium-contrast', 'light')
if colorset is None:
return namelist
if colorset not in namelist:
colorset = 'bright'
print('*** Warning: requested colorset not defined,',
'known colorsets are {}.'.format(namelist),
'Using {}.'.format(colorset))
if colorset == 'bright':
cset = namedtuple('Bcset',
'blue cyan green yellow red purple grey black')
return cset('#4477AA', '#66CCEE', '#228833', '#CCBB44', '#EE6677',
'#AA3377', '#BBBBBB', '#000000')
if colorset == 'high-contrast':
cset = namedtuple('Hcset',
'blue yellow red black')
return cset('#004488', '#DDAA33', '#BB5566', '#000000')
if colorset == 'vibrant':
cset = namedtuple('Vcset',
'orange blue cyan magenta red teal grey black')
return cset('#EE7733', '#0077BB', '#33BBEE', '#EE3377', '#CC3311',
'#009988', '#BBBBBB', '#000000')
if colorset == 'muted':
cset = namedtuple('Mcset',
'rose indigo sand green cyan wine teal olive purple pale_grey black')
return cset('#CC6677', '#332288', '#DDCC77', '#117733', '#88CCEE',
'#882255', '#44AA99', '#999933', '#AA4499', '#DDDDDD',
'#000000')
if colorset == 'medium-contrast':
cset = namedtuple('Mcset',
'light_blue dark_blue light_yellow dark_red dark_yellow light_red black')
return cset('#6699CC', '#004488', '#EECC66', '#994455', '#997700',
'#EE99AA', '#000000')
if colorset == 'light':
cset = namedtuple('Lcset',
'light_blue orange light_yellow pink light_cyan mint pear olive pale_grey black')
return cset('#77AADD', '#EE8866', '#EEDD88', '#FFAABB', '#99DDFF',
'#44BB99', '#BBCC33', '#AAAA00', '#DDDDDD', '#000000')
|
c6edc57be76c8f53cc897f7be58156a1b326fb40
| 54,738
|
def is_import(
x: str) \
-> bool:
"""
Whether line contains an import statement.
"""
return x.startswith('import ') or x.startswith('from ') and ' .' not in x
|
a2067b0f044b371267aaa3912147cac835ca8d2a
| 54,739
|
from datetime import datetime
import time
def str_datetime_to_unixtime(input_str: str) -> float:
"""
Parse a datetime from a string. Return a UTC Unix timestamp.
"""
datetime_obj: datetime.datetime = dateutil_parse(input_str) # type: ignore
return time.mktime(datetime_obj.timetuple())
|
53667dc3b88570faf748feaf59f867754cb7aa00
| 54,742
|
def next_mro_class(cls, current):
"""Find the next class in `cls.mro()` after `current`."""
mro = cls.mro()
return mro[mro.index(current) + 1]
|
c5a0fab08baf1c64959203c86ecaee227f35988b
| 54,744
|
import hashlib
def hash_SHA256_twice(by):
"""
Use hashlib to hash with SHA256 twice, expects binary input
"""
h1 = hashlib.sha256(by).digest()
h2 = hashlib.sha256(h1).digest()
return h2
|
f098e8e0b87ed55ccda542832072fce68390fb97
| 54,748
|
def dict_to_str(props):
"""Returns the given dictionary as a string of space
separated key/value pairs sorted by keys.
"""
ret = []
for k in sorted(props.keys()):
ret += [k,props[k]]
return " ".join(ret)
|
134ca7ae7d42bdef76624b9b6b217a4fee8c1a34
| 54,750
|
def flake8(context):
"""Run the flake8 task
The ``context`` argument is automatically passed in by invoke and
represents the context the commands is to be "invoked" in. See
http://docs.pyinvoke.org/en/stable/api/context.html for details.
"""
print("# flake8")
return context.run("flake8").return_code
|
2733650a89830abccf78bfd044e931c27577098a
| 54,758
|
def get_named_people_from_sen(sen):
"""
returns a list of annotated words that correspond to named entities in sen.
sen must be an annotated sentence dict.
each element of the returned list is a list of annotated words. Each list
corresponds to a single entity.
result looks like:
[[[u'Dan',
{u'CharacterOffsetBegin': u'0',
u'CharacterOffsetEnd': u'3',
u'Lemma': u'Dan',
u'NamedEntityTag': u'PERSON',
u'PartOfSpeech': u'NNP'}],
[u'Calacci',
{u'CharacterOffsetBegin': u'4',
u'CharacterOffsetEnd': u'11',
u'Lemma': u'Calacci',
u'NamedEntityTag': u'PERSON',
u'PartOfSpeech': u'NNP'}]],
[[u'Shane',
{u'CharacterOffsetBegin': u'18',
u'CharacterOffsetEnd': u'23',
u'Lemma': u'Shane',
u'NamedEntityTag': u'PERSON',
u'PartOfSpeech': u'NNP'}],
[u'Boissiere',
{u'CharacterOffsetBegin': u'24',
u'CharacterOffsetEnd': u'33',
u'Lemma': u'Boissiere',
u'NamedEntityTag': u'PERSON',
u'PartOfSpeech': u'NNP'}]]]
"""
wordlist = sen['words']
entities = []
named = []
for index, word in enumerate(wordlist):
if word[1]['NamedEntityTag'] == 'PERSON':
named.append(word)
try:
next = wordlist[index+1]
except:
named = []
break
if next[1]['NamedEntityTag'] != 'PERSON':
if named:
entities.append(named)
named = []
return entities
|
a3db265ed8e8d06848ed5cbfa786477ea04b8345
| 54,769
|
def simpson(f, a, b, n):
"""
Integral of f on [a,b] using composite Simpson's rule.
"""
if(n % 2 != 0): raise ValueError("n must be even" % n)
h = (b - a) / n
s = f(a) + f(b)
for i in range(1, n, 1):
if(i%2==0): s += 4 * f(a + i * h)
else: s += 2 * f(a + i * h)
return s * h / 3
|
a62b02cc18006f3ca5635638daf8f98b0a7baa61
| 54,773
|
def flatten_nested_dict_to_pairs(nested_dict):
"""
Given a nested dict of arbitrary depth, this function returns a
list of pairs (nested_key, final value).
:Example:
>>> nested_dict = {'k1': {'ka' : 'v1', 'kb': {'kα': 'v2'}}, 'k2': 'v3'}
>>> flatten_nested_dict_to_pairs(nested_dict)
>>> [('k2','v3'), ('k1_kb_kα','v2'), ('k1_ka', 'v1')]
:param nested_dict: A dictionary
:return:
"""
pairs = []
for key, value in nested_dict.items():
if isinstance(value, dict):
nested_pairs = flatten_nested_dict_to_pairs(value)
for nested_pair in nested_pairs:
k, v = nested_pair
pairs.append((u'%s_%s' % (key, k), v))
else:
pairs.append((u'%s' % key, value))
return pairs
|
20ea5b93ca3c19fbaba836b7b7b495aec768c9fa
| 54,776
|
def capitalize(val: str) -> str:
"""Make sure the first character of a string is upper case and the rest
lowercase."""
return val.capitalize()
|
09c22e53f2bd333359a61eae3eaedf70b0fc44e2
| 54,777
|
def change_waveunit(wave, oldunits, newunits):
"""Return wavelengths converted to new wavelength units.
Parameters
----------
wave : float or list of floats
Input wavelength(s) in units specified by `oldunits`.
oldunits : str
Input units for `wave`. Allowed values are described in the table
below. Values are case insensitive.
newunits : str
Units for returned wavelength(s). Allowed values are described in
the table below. Values are case insensitive.
Returns
-------
float or list of floats
Output wavelength(s) in units specified by `newunits`.
Raises
------
ValueError
If either `oldunits` or `newunits` is not a recognized wavelength
unit, as described in the table below.
The following table defines allowed values for `newunits` and `oldunits`.
+-----------------+---------------------+
| Unit | Description |
+=================+=====================+
| :kbd:`'A'` | Angstroms |
+-----------------+---------------------+
| :kbd:`'nm'` | nanometers |
+-----------------+---------------------+
| :kbd:`'um'` | micrometers |
+-----------------+---------------------+
| :kbd:`'micron'` | micrometers |
+-----------------+---------------------+
| :kbd:`'cm^-1'` | inverse centimeters |
+-----------------+---------------------+
| :kbd:`'1/cm'` | inverse centimeters |
+-----------------+---------------------+
Examples
--------
>>> from sme.util import change_waveunit
>>> change_waveunit(5000, 'A', 'nm')
500.0
>>> change_waveunit([10000, 20000], 'cm^-1', 'a')
[10000.0, 5000.0]
"""
oldlow = oldunits.lower()
newlow = newunits.lower()
if newlow == oldlow:
return wave
factor = {
'a': 1.0, 'nm': 10.0, 'um': 1e4, 'micron': 1e4,
'cm^-1': 1e8, '1/cm': 1e8}
try:
old_to_A = factor[oldlow]
A_to_new = factor[newlow]
except KeyError:
raise ValueError(
f"invalid wavelength unit: old='{oldunits}', new='{newunits}'\n"
f"Valid wavelength units: '" + "', '".join(factor.keys()) + "'")
old_new = old_to_A / A_to_new
if oldlow in ['cm^-1', '1/cm']:
try:
return [old_new / w for w in wave]
except TypeError:
return old_new / wave
elif newlow in ['cm^-1', '1/cm']:
try:
return [1.0 / old_new / w for w in wave]
except TypeError:
return 1.0 / old_new / wave
else:
try:
return [old_new * w for w in wave]
except TypeError:
return old_new * wave
|
40375a696d4b8bf01a75ae55349c3dbeca16a260
| 54,781
|
def score_point(score):
"""
Scores a point for use in plotting.
Args:
score: float score of point
Returns:
color: matplotlib color
"""
col = 'darkred'
if score > .9:
col = 'limegreen'
elif score > .8:
col = 'green'
elif score > .7:
col = 'teal'
elif score > .6:
col = 'dodgerblue'
elif score > .5:
col = 'blue'
elif score > .4:
col = 'yellow'
elif score > .3:
col = 'orange'
elif score > .2:
col = 'peru'
elif score > .1:
col = 'red'
return col
|
15ef51d024c3d6b00192bb689721208c68b1bd9b
| 54,784
|
def fibonacci(n):
""" Returns n elements of the fibonacci series"""
fibo = [1, 1]
if n < 1 or n > 2_000_000:
return []
elif n < 2:
return fibo[:n]
for i in range(2, n):
fibo.append(sum(fibo[i - 2 : i]))
return fibo
|
e894ebadc56f132fc436fe5ef3639a4b8363bf72
| 54,785
|
import random
def sample_wr(data, n_samples=1):
"""
Create a list of data by random sampling from a data set with replacement
Inputs:
data List of data
n_samples number of samples to draw
Return:
sample list of samples drawn
"""
sample = []
for ind in range(0, n_samples):
sample.append(random.choice(data))
return sample
|
985dfde725f71f572638845ccb48b662cf4e1ba4
| 54,787
|
def rank_obj_data(serData):
"""
Parameters
#---------
serData pandas.Series containing one Column from data set
Returns
#------
serFirstTwentyData pandas.Series with first twenty highest counts
of classes from column.
Description
#----------
Groupby classes in column. Count number per class. Sort values descending.
Take only first twenty. Helper method for plotting only most important
classes of a feature. Helps keeping overview.
"""
serCount = serData.groupby(by = serData).count()
serSortData = serCount.sort_values(ascending = False)
serFirstTwentyData = serSortData[:20]
return serFirstTwentyData
|
6ff61ef1da1d08e9ea372c61360cbab1799b28b6
| 54,791
|
def vectorized_regularization(theta, lambda_):
"""
Computes the regularization term of a non-empty numpy.ndarray,
vector wise.
Args:
theta: has to be numpy.ndarray, a vector of dimensions n*1.
lambda_: has to be a float.
Returns:
The regularization term of theta.
None if theta is empty.
Raises:
This function should not raise any Exception.
"""
try:
if theta.size == 0 or type(lambda_) != float:
return None
return lambda_ * theta.T.dot(theta)
except Exception:
return None
|
64309524d3942a1053cf0339d21532fe41f86939
| 54,799
|
import random
import string
def random_string(length=5):
"""Return a random string of the desired length."""
return "".join(random.choices(string.ascii_uppercase + string.digits, k=length))
|
5ba86b3f959a070de628b813cda3a17e1875caf9
| 54,800
|
def _reshape_nd(arr, ndim, axis):
"""Promote a 1d array to ndim with size > 1 at the specified axis."""
if arr.ndim != 1:
raise ValueError("expected a 1d array")
if axis < -ndim or axis > ndim - 1:
raise ValueError("invalid axis")
if ndim < 1:
raise ValueError("ndim must be >= 1")
axis = axis % ndim
nd_shape = (1,) * axis + (arr.size,) + (1,) * (ndim - axis - 1)
return arr.reshape(nd_shape)
|
4b19a5b6ee1a93f221c70ff2744430bb744dada9
| 54,804
|
def rps_logic(p1_move, p2_move):
"""
Execute logic of RPS Game.
Rock < Paper < Scissors < Rock
Args:
p1_move (int): Encoded player1 move.
p2_move (int): Encoded player2 move.
Returns:
0 if its a tie.
1 if Player1 wins.
2 if Player2 wins.
"""
if p1_move == p2_move:
# Same move, its a draw
return 0
if (p1_move - p2_move) == 1 or (p1_move - p2_move) == -2:
# Player1 wins (Paper vs Rock or Scissors vs Paper or Rock vs Scissors)
return 1
# Player2 wins
return 2
|
5f5032f13606145108b7a44e8e1e7e30ecfb8e83
| 54,809
|
def infer_separation_gain3(distance: float) -> float:
"""Compute separation gain from desired distance
Args:
distance: Desired minimum inter-agent distance
Returns:
separation_gain: Separation gain to produce (at least) the desired distance
Note: This is the analytical equilibrium distance of the Reynolds equations for N = 3
"""
return distance ** 2 / 2
|
368fd188982fb2175dc04aaca1bc1efbd10686c7
| 54,810
|
import re
def CamelCaseToOutputFriendly(string):
"""Converts camel case text into output friendly text.
Args:
string: The string to convert.
Returns:
The string converted from CamelCase to output friendly text.
Examples:
'camelCase' -> 'camel case'
'CamelCase' -> 'camel case'
'camelTLA' -> 'camel tla'
"""
return re.sub('([A-Z]+)', r' \1', string).strip().lower()
|
a1d726fe68649efe0c0b6cf17e95e55ad4a8183f
| 54,813
|
def metric_batch(model, metric_func, xb, yb):
"""Compute metric of model on a batch data.
metric_batch provide a way to compute model metric with mini-batch data.
The metric can be accuracy. It is necessary that metric_batch should be
implemented under torch.no_grad().
Args:
model (Model): a model with weights which can be updated
metric_func (function): a function used to compute metric
xb (tensor): features of a mini-batch data
yb (tensor): labels of a mini-batch data corresponding to xb
Example::
>>> import torch
>>> x = torch.tensor([[1, 2, 3], [3, 4, 5]])
>>> y = torch.tensor([0, 1])
>>> def accuracy(out, y):
>>> preds = torch.argmax(out, dim=1)
>>> return (preds == y).float().mean()
>>> model = Mnist_CNN()
>>> with torch.no_grad():
>>> acc = metric_batch(model, accuracy, x, y)
Returns:
(float): a metric of model w.r.t the pair of xb and yb
"""
metric = metric_func(model(xb), yb)
return metric.item()
|
ce1879dc511e29ac49ceb8465dbe31bcf9dae5cf
| 54,815
|
def sample_patches(inputs, patch_size=3, stride=1):
"""Extract sliding local patches from an input feature tensor.
The sampled pathes are row-major.
Args:
inputs (Tensor): the input feature maps, shape: (c, h, w).
patch_size (int): the spatial size of sampled patches. Default: 3.
stride (int): the stride of sampling. Default: 1.
Returns:
patches (Tensor): extracted patches, shape: (c, patch_size,
patch_size, n_patches).
"""
c, h, w = inputs.shape
patches = inputs.unfold(1, patch_size, stride)\
.unfold(2, patch_size, stride)\
.reshape(c, -1, patch_size, patch_size)\
.permute(0, 2, 3, 1)
return patches
|
728a53e08563d5d64073deced3689f65850e1214
| 54,825
|
def always_valid(value):
"""A validator that always says 'Valid'."""
return True
|
beb5fa0ba2a04ef8ea76c6cae2751cdcd1b3db2c
| 54,828
|
def get_block_lines(b, mdata):
"""
Return the number of lines based on the block index in the RAW file.
"""
line_counts = [1, 1, 1, 1, 1, 4, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0]
if b == 5: # for transformer
if mdata[0][2] == 0: # two-winding transformer
return 4
else: # three-winding transformer
return 5
return line_counts[b]
|
f00a8da81dde73b75564b8968be02c8597c0b1bc
| 54,830
|
def resolve_xpath(d, xp):
"""Resolves XPath 'xp' in dictionary 'd' and return the value."""
value = d
try:
# Strip the leading /.
xp = xp.strip("/")
for k in xp.split("/"):
try:
idx = int(k)
value = value[idx]
except ValueError:
value = value.get(k)
except:
value = None
pass
return value
|
a90ec5c81a733baf18ddf5c5b2628ddc8e1ce88d
| 54,831
|
def readahead_file_path(path, unused_readahead=None):
"""Readahead files not implemented; simply returns given path."""
return path
|
7921795670d460a819a74c4112d9ed321971ed13
| 54,832
|
def _in_order(expected, received):
"""Determine whether or not the received queue is in the order
that we expect. A rule's destination port is used as its ID.
:param expected: list of rules in the expected order.
:param received: list of rules in ACLSwitch's order
:return: True if in order, False otherwise.
"""
list_size = len(expected)
for i in range(list_size):
if str(expected[i]["rule"]["port_dst"]) != str(received[i]):
return False
return True
|
5e2c16517cb943e3607852285a7b4033b5d82e05
| 54,833
|
def trim_downbeats(downbeat_times, downbeat_labels, ini_bar, num_bars):
""" Trim downbeats to select certain section within the recording.
Parameters
----------
downbeat_times : np.ndarray
time instants of the downbeats
downbeat_labels : list
labels at the downbeats
ini_bar : int
initial bar to trim from
num_bars : int
number of bars to trim
Returns
-------
downbeat_times_trimmed : np.ndarray
time instants of the downbeats after trimming
downbeat_labels_trimmed : list
labels at the downbeats after trimming
"""
downbeat_times_trimmed = downbeat_times[ini_bar-1:ini_bar-1+num_bars]
downbeat_labels_trimmed = downbeat_labels[ini_bar-1:ini_bar-1+num_bars]
return downbeat_times_trimmed, downbeat_labels_trimmed
|
59648cc4cfabb7bb21e3f4e9446b2514a6a71223
| 54,838
|
def get_priority_elem_in_set(obj_set, priority_list):
"""Returns the highest priority element in a set.
The set will be searched for objects in the order they appear in the
priority list, and the first one to be found will be returned. None is
returned if no such object is found.
Parameters
---------
obj_set : set, list
A set or list of objects.
priority_list : list
A list of objects in descending order of priority.
Returns
-------
object
The highest priority object in the given set.
Example:
--------
>>> obj_set = set([3, 2, 7, 8])
>>> priority_list = [4, 8, 1, 3]
>>> print(get_priority_elem_in_set(obj_set, priority_list))
8
"""
for obj in priority_list:
if obj in obj_set:
return obj
return None
|
3a5775d0feab15bae33cbb79212f9b049d2abfce
| 54,839
|
def compute_block(name: str) -> str:
"""
Compute block for a name.
Override for other definition of blocks. This function gives block as first initial + last name.
Parameters
----------
name: string
the name to block
Returns
-------
string: the block string
"""
if len(name) == 0:
return ""
name_parts = name.split(" ")
if len(name_parts) == 1:
return name_parts[0]
block = name_parts[0][0] + " " + name_parts[-1]
return block
|
3c4217dbbfa977447315ded3d4d3792c5a29c091
| 54,844
|
import itertools
def flatten(ls):
"""
flatten a nested list
"""
flat_ls = list(itertools.chain.from_iterable(ls))
return flat_ls
|
46c9ce5b089cae4ee67876b4f5e798dbf334036d
| 54,845
|
def _TransposeRnaSeqTable(rnaseq):
"""Convert the RnaSeq table, indexed by gene, to be indexed by specimen."""
rnaseq.index = rnaseq.Gene
return rnaseq[rnaseq.columns[2:]].T
|
49dda6eea77e96bc3a7d296644ca2dfaf39ecace
| 54,848
|
def notas(*notas,sit):
"""
-> Funcao que analisa notas e situacoes de varios alunos.
:param n: uma ou mias notas dos alunos (aceita varias notas)
:param: sit: valor opcional se deve ou nao adicionar a situacao
:return: dicionario com varias informacoes sobre a situacao da turma
"""
notasdict = {}
notasdict["Total"] = len(notas)
notasdict["Maior"] = max(notas)
notasdict["Menor"] = min(notas)
notasdict['Media'] = sum(notas)/len(notas)
if sit == True:
if notasdict["Media"] < 5:
notasdict['Situacao'] = "REPROVADO"
elif 5<notasdict["Media"]<6:
notasdict["Situacao"] = "RECUPERACAO"
else:
notasdict["Situacao"] = "APROVADO"
return notasdict
|
cf1916eb8a60d01afc9f7e4139f7afb38cdc0b3b
| 54,849
|
def calc_det_dzb(theta):
"""Calculate detector vertical offset of the bottom raw
.. note:: formula taken from Table 2 on pag. 68 of CE document
vol. 3 (Olivier Proux et al.), theta in deg
"""
return -677.96 + 19.121 * theta - 0.17315 * theta ** 2 + 0.00049335 * theta ** 3
|
ee8030280399e4944808a1d04ceca456bddd46ac
| 54,856
|
import re
def read_conf_file(file: str) -> dict:
"""Convert a configuration file with key = value format to a dict"""
DICT_INFO = {}
try:
with open(file) as rel_file:
for line in rel_file:
line = line.partition('#')[0].rstrip()
name, var = line.partition("=")[::2]
var = re.sub(r"^\"", '', str(var.strip()))
var = re.sub(r"\"$", '', var)
DICT_INFO[name.strip()] = var
except Exception:
pass
return DICT_INFO
|
63d06d41793279c9caba337b855453518fbc33b7
| 54,860
|
from typing import List
from typing import Dict
from typing import Any
def _get_trimming_index(sysmon: List[Dict[str, Any]]) -> int:
"""
Find index after which isn't mainly noise
:param sysmon: List of Sysmon events
:return: The index in the list of Sysmon events where events that we care about start
"""
index = 0
for event in sysmon:
event_data = event["EventData"]
for data in event_data["Data"]:
val = data["@Name"]
if not data.get("#text"):
continue
if val == "ParentCommandLine" and 'C:\\Users\\buddy\\AppData\\Local\\Temp\\' in data["#text"]:
# Okay now we have our baseline, everything before this was noise
# get index of eventdata
index = sysmon.index(event)
return index
return index
|
820f17f755e26313297b6c6ef61ab01601f3f021
| 54,862
|
def _format_size( fsize_b ):
"""
Formats the bytes value into a string with KiB, MiB or GiB units. This code has been copied from :py:meth:`deluge's format_size <deluge.ui.console.utils.format_utils.format_size>`.
:param int fsize_b: the filesize in bytes.
:returns: formatted string in KiB, MiB or GiB units.
:rtype: str
**Usage**
>>> format_size( 112245 )
'109.6 KiB'
"""
fsize_kb = fsize_b / 1024.0
if fsize_kb < 1024:
return "%.2f KiB" % fsize_kb
fsize_mb = fsize_kb / 1024.0
if fsize_mb < 1024:
return "%.2f MiB" % fsize_mb
fsize_gb = fsize_mb / 1024.0
return "%.2f GiB" % fsize_gb
|
508f98a9fad744d3a4644b079f9bb9a142bb3131
| 54,868
|
import hashlib
def get_file_hash(path, algorithm='md5'):
"""Get hash from a given file and hashing algorithm.
Args:
path: str.
Full path to a file.
algorithm: str, optional.
Name of hashing algorithm. See `hashlib.algorithms_available`
for list of available hashing algorithms in python.
Returns:
str. File hash computed from the file using the specified algorithm.
#### Examples
```python
get_file_hash('train.gz')
## '5503d900f6902c61682e6b6f408202cb'
```
"""
hash_alg = hashlib.new(algorithm)
with open(path, 'rb') as f:
read_size = 1024 * 1024 * 4
data = f.read(read_size)
while data:
hash_alg.update(data)
data = f.read(read_size)
return hash_alg.hexdigest()
|
9961dd567dafc72f30361fa66d7e987ee919a430
| 54,871
|
def pretty_str(element, encoding="us-ascii", xml_declaration=True, indent=4):
"""
Gets a string of the provided XML element.
Args:
element (xml.etree.ElementTree.Element): The element to get as a string.
encoding (str): The encoding of the XML string.
xml_declaration (bool): If the declaration line is required or not.
indent (int): The number of spaces to use in the indentation.
Returns:
str -- A pretty string ready to be written in a file.
"""
def print_node(nb_indents, node):
node_str = " " * indent * nb_indents
has_children = False
for element in list(node):
has_children = True
close_it = False
if (node.text == None or node.text == "") and not has_children:
close_it = True
node_str += "<" + str(node.tag)
for name, value in node.items():
node_str += " " + name + '="' + str(value) + '"'
if close_it:
node_str += "/>\n"
elif not has_children:
node_str += ">" + str(node.text) + "</" + node.tag + ">\n"
else:
node_str += ">\n"
for element in list(node):
node_str += print_node(nb_indents + 1, element)
node_str += " " * indent * nb_indents + "</" + node.tag + ">\n"
return node_str
result = ""
#xml declaration and encoding:
if xml_declaration == True:
result += "<?xml version='1.0'"
if encoding != None and encoding != "":
result += " encoding='us-ascii'"
result += "?>\n"
#Tree:
result += print_node(0, element)
return result
|
a4c08eb04a31f7c53e0a90c57fb338bf63ae2204
| 54,877
|
from typing import List
def occupied_in_axis(seating: List[List[str]], row: int, col: int,
delta_row: int, delta_col: int, recurse: bool) -> int:
"""Find if there is an occupied seat in an axis."""
row += delta_row
col += delta_col
occupied = 0
if 0 <= row < len(seating) and 0 <= col < len(seating[0]):
if seating[row][col] == '#':
occupied = 1
elif seating[row][col] == '.' and recurse:
occupied = (
occupied_in_axis(seating, row, col, delta_row, delta_col,
recurse)
)
return occupied
|
fa5aacb51ab3c760bcb412101d23d49564bbe2ea
| 54,882
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.