content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
from datetime import datetime
def set_metadata(redis_db, db_revision):
"""
set:
Current revision
Date of update
Update type - e.g. forced update, or new Manifest revision
successful update - set upon completion, to test for errors.
"""
destiny_version = "D2"
# print("date: ", datetime.now())
# print("revision:", db_revision)
# set metadata to empty:
redis_db.set(destiny_version + ":" + "metadata:date", str(datetime.now()))
redis_db.set(destiny_version + ":" + "metadata:revision", db_revision)
redis_db.set(destiny_version + ":" + "metadata:update_type", "forced")
redis_db.set(destiny_version + ":" + "metadata:successful", "True")
return True
|
e852a5981809579b8d0240b26dc7eb7771274882
| 336,346
|
def window_on_island(island, window):
"""
decide if a window is on an island
"""
if (window.start >= island.start and window.end <= island.end):
return 1;
else:
return 0;
|
9a15e109c6a4e0998a500ad5921903ca7280b1e8
| 206,795
|
import math
def coordinates_on_circle(n):
"""Returns x,y coordinates of points on an unit circle with spacing 2π/n"""
if type(n)!=int:
raise Exception("Wrong input: \n the argument must be an integer number of points on the unit circle!")
x,y = [],[]
step_angle = 2*math.pi/n
for i in range(0,n):
x.insert(len(x),math.cos(i*step_angle))
y.insert(len(y),math.sin(i*step_angle))
return x,y
|
9907b942f4c2ac26b215b2106e04d07e47009521
| 525,833
|
import torch
def transform2h(x, y, m):
"""Applies 2d homogeneous transformation."""
A = torch.matmul(m, torch.stack([x, y, torch.ones(len(x))]))
xt = A[0, :] / A[2, :]
yt = A[1, :] / A[2, :]
return xt, yt
|
4fdf757d2383d278660eb4b8e984b8571858f06e
| 420,519
|
def has_vdom(cnfs):
"""
:param cnfs: A list of fortios config objects, [{"config": ...}]
:return: True if vdoms are found in given configurations
"""
return any(c for c in cnfs if c.get("config") == "vdom")
|
cc28f424031ef95688a0593242457e9bc541c726
| 340,282
|
def CountTupleTree(tu):
"""Count the nodes in a tuple parse tree."""
if isinstance(tu, tuple):
s = 0
for entry in tu:
s += CountTupleTree(entry)
return s
elif isinstance(tu, int):
return 1
elif isinstance(tu, str):
return 1
else:
raise AssertionError(tu)
|
eea38c376f2aba4ecf3e832607bc597e66b250e3
| 118,416
|
import re
def load_uses(config_path):
"""Read the set of features that Pently is configured to use."""
useRE = re.compile(r"PENTLY_USE_([a-zA-Z0-9_]+)\s*=\s*([0-9])+\s*(?:;.*)?")
with open(config_path, "r") as infp:
uses = [useRE.match(line.strip()) for line in infp]
uses = [m.groups() for m in uses if m]
return {name for name, value in uses if int(value)}
|
85d26dbfe8a13b4f1a27aaafc821d325ba2dd1f0
| 447,430
|
def location_to_query(location, wildcard=True, tag='i4x'):
"""
Takes a Location and returns a SON object that will query for that location by subfields
rather than subdoc.
Fields in location that are None are ignored in the query.
If `wildcard` is True, then a None in a location is treated as a wildcard
query. Otherwise, it is searched for literally
"""
query = location.to_deprecated_son(prefix='_id.', tag=tag)
if wildcard:
for key, value in query.items():
# don't allow wildcards on revision, since public is set as None, so
# its ambiguous between None as a real value versus None=wildcard
if value is None and key != '_id.revision':
del query[key]
return query
|
7710d26f1f7cc6e66df4d3c24c9b3a558f300810
| 330,173
|
def lessthan(obj1, obj2):
"""
Is the span of obj1 < span obj2
"""
return obj1.span[1] < obj2.span[0]
|
2dbf51442b5d2eca43b1fd82044082f17489297d
| 596,601
|
def month_string_to_int(solar_dataframe):
"""This function translates string values for months into their integer representations"""
#Use a dictionary to map text to integer values for the months - HDF5 needs no strings
month_change = {'January':1,'February':2,'March':3,'April':4, 'May':5,'June':6,'July':7,
'August':8,'September':9,'October':10, 'November':11,'December':12, 'Jan': 1,
'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7, 'Aug': 8,
'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
month_list = list(solar_dataframe['Month'].map(month_change).fillna(solar_dataframe['Month']))
return month_list
|
44bf490327babf844a11caaa34356eff3b229fc2
| 605,196
|
def retrieve_block(model_storage, weight_idx, block_x_idx, block_y_idx, block_size_x, block_size_y):
"""Retrieve a block with given weight_idx, block_x_idx, and block_y_idx
Args:
model_storage (model_storage): a model's model_storage
weight_idx (int): weight index
block_x_idx (int): block index in x-axis
block_y_idx (int): block index in y-axis
block_size_x (int): block size in x-axis
block_size_y (int): block size in y-axis
Return:
numpy array: a block
"""
b = model_storage['weights_padded'][weight_idx][block_x_idx*block_size_x: (block_x_idx+1)*block_size_x,
block_y_idx*block_size_y: (block_y_idx+1)*block_size_y]
return b
|
28118eb869fb350397bed0afbea402a9375db834
| 692,717
|
from typing import List
def read_ta_list(fname: str)->List[str]:
""" Read total list of TAs from a file with one TA roll number per line"""
tfile = open(fname,"r")
tas = []
for l in tfile:
s=l.strip()
if (len(s)==0):
continue
tas.append(s.lower())
return tas
|
7d683a32a9381a7efc80223cf2cea200238dc0a8
| 629,203
|
def move(string, pos1, pos2):
"""
move position X to position Y means that the letter which is at index X
should be removed from the string, then inserted such that it ends up at
index Y.
"""
new_string = list(string)
new_string.insert(pos2, new_string.pop(pos1))
return ''.join(new_string)
|
e43cd94938a311fe0b42fe055a4f00d51f388bc6
| 263,712
|
import pathlib
def setupoutdir(outdir=None):
"""Create directory structure for csvs and logs.
Args:
outdir (path like, optional): Path to top level directory. Defaults to None.
If outdir is None <home>/Documents/TadoLogs/ is used.
"""
if outdir is None:
outdir = pathlib.Path.home() / "Documents" / "TadoLogs"
else:
outdir = pathlib.Path(outdir)
outdir.mkdir(exist_ok=True)
return outdir
|
4233c15ad76a30ed1dc2bcf4ef7834142678931d
| 485,966
|
def ordered_json(json_dict):
"""Creates a ordered json for comparison"""
if isinstance(json_dict, dict):
return sorted((k, ordered_json(v)) for k, v in json_dict.items())
if isinstance(json_dict, list):
return sorted(ordered_json(x) for x in json_dict)
return json_dict
|
716f86745b7388e87d2e72d21b3644ef57e545b0
| 206,582
|
def short_str(s, max_len=32):
"""
Convert `s` to string and cut it off to at most `maxlen` chars (with an ellipsis).
"""
if not isinstance(s, str):
s = str(s)
if len(s) > max_len:
s = s[:27] + "[...]"
return s
|
da9e8c76925b6278d82e4fba35d5872fe4d31cd6
| 371,541
|
def inorder_traversal(root):
"""Function to traverse a binary tree inorder
Args:
root (Node): The root of a binary tree
Returns:
(list): List containing all the values of the tree from an inorder search
"""
res = []
if root:
res = inorder_traversal(root.left)
res.append(root.data)
res = res + inorder_traversal(root.right)
return res
|
f6d5141cbe9f39da609bd515133b367975e56688
| 701
|
import types
def attributed_subclass(cls: type, name: str, **attributes):
"""
Create a subclass of ``cls`` identical to the original,
but with additional or redefined attributes
:param cls: the class to be subclassed
:param name: name of the new class
:param attributes: new attributes
:return: a subclass of ``cls``
::
class Animal:
pass # implementation goes here
Bird = attributed_subclass(Animal, 'Bird', wings=2)
pelican = Bird() # pelican.wings == 2
.. note::
Existence of ``__slots__``
(and, consequently, the absence of the instance ``__dict__``)
is preserved during subclassing
"""
def set_subclass_attrs(ns):
ns.update(attributes.items())
if hasattr(cls, '__slots__'):
ns['__slots__'] = () # no new instance attributes, so empty slots
return types.new_class(name, (cls,), exec_body=set_subclass_attrs)
|
286be37364100ae35c7275e5a509c6f2138e000d
| 206,514
|
def take(l, indexes):
"""take(l, indexes) -> list of just the indexes from l"""
items = []
for i in indexes:
items.append(l[i])
return items
|
d1fc9b9515add2f682103af58e08c1fee29d1d65
| 293,640
|
from bs4 import BeautifulSoup
def footnote(content):
"""Creates a footnote using the <footnote> tag.
Parameters:
- content: Whatever you want to place in the footnote. It can be a
string or a tag element.
Returns:
- The footnote tag."""
tag = BeautifulSoup(features='lxml').new_tag('footnote')
tag.append(content)
return tag
|
21e8fb55a6fe74664c14af2a799b8d74f7a7b2ea
| 562,166
|
def check_rows(board):
"""
list -> bool
This function checks if every row has different numbers and returns
True is yes, and False if not.
>>> check_rows(["**** ****", "***1 ****", "** 3****", "* 4 1****", \
" 9 5 ", " 6 83 *", "3 1 **", " 8 2***", " 2 ****"])
True
>>> check_rows(["**** ****", "***1 ****", "** 3****", "* 4 4****", \
" 9 5 ", " 6 83 *", "1 1 **", " 8 2***", " 2 ****"])
False
"""
for row in board:
numbers = []
for elem in row:
if elem == '*' or elem == ' ':
continue
else:
if elem in numbers:
return False
numbers.append(elem)
return True
|
0c459e9b393892fc2beea9e4f616b8984347cd03
| 352,220
|
def get_node_exec_options(profile_string, exec_node_id):
""" Return a list with all of the ExecOption strings for the given exec node id. """
results = []
matched_node = False
id_string = "(id={0})".format(exec_node_id)
for line in profile_string.splitlines():
if matched_node and line.strip().startswith("ExecOption:"):
results.append(line.strip())
matched_node = False
if id_string in line:
# Check for the ExecOption string on the next line.
matched_node = True
return results
|
c7b6329b9caca6feb3bf00c1e9559887d03e4139
| 21,242
|
def parse_version(bundle):
"""
Parse ds8k version number from bundle.
rule is:
87.51.34.0 => 7.5.1
87.50.21.1 => 7.5.0
88.0.151.0 => 8.0.0
"""
v1, v2, _ = bundle.split('.', 2)
v2 = '{0:0{1}}'.format(int(v2), 2)
return '.'.join([v1[-1], v2[0], v2[1]])
|
5d22a25513d81530e4177503e54adc9eab270c54
| 260,557
|
def faultratio(I, Ipickup, CTR=1):
"""
Fault Multiple of Pickup (Ratio) Calculator.
Evaluates the CTR-scaled pickup measured to pickup current ratio.
M = meas / pickup
Parameters
----------
I: float
Measured Current in Amps
Ipickup: float
Fault Current Pickup Setting (in Amps)
CTR: float, optional
Current Transformer Ratio for relay,
default=1
Returns
-------
M: float
The measured-to-pickup ratio
"""
M = I / (CTR * Ipickup)
return M
|
9c844ab7add58fb23ce9754ab54b409012ba1cee
| 235,427
|
def paged_object_to_list(paged_object):
"""
Extract all pages within a paged object as a list of dictionaries
"""
paged_return = []
while True:
try:
page = next(paged_object)
paged_return.append(page.as_dict())
except StopIteration:
break
return paged_return
|
1215250bb951d10f45576424475ea21792d01e78
| 377,981
|
def norm_vec(lst, idx):
"""Get the normal vector pointing to cube at idx (from previous cube)."""
assert len(lst)>1 and 0 < idx < len(lst)
return [a-a1 for (a,a1) in zip(lst[idx], lst[idx-1])]
|
76fb86ac29b2cd8b5ceab0ba83b9bb8360914764
| 24,016
|
import typing
def build_address_dict(street_name: str, house_number: str) \
-> typing.Dict[str, str]:
"""Builds a dict with the street name and house number
Parameters
----------
street_name : str
Street name
house_number: str
House number
Returns
-------
typing.Dict[str, str]
a dict with the following structure:
{
"street": "Street name",
"housenumber": "House number"
}
"""
return {
"street": street_name,
"housenumber": house_number
}
|
ea6e096341c415b972d93a0b0c63da476a177e21
| 499,553
|
def split(l, n):
"""
Split a list in n parts
"""
if l is None:
return []
avg = len(l) / float(n)
out = []
last = 0.0
while last < len(l):
out.append(l[int(last):int(last + avg)])
last += avg
return out
# return [l[i::n] for i in range(n)] # This doesn't keep the order
|
6d2b5bd6baeb0402726209fa285486052fcf783f
| 255,566
|
def word_count_dict_to_tuples(counts, decrease=True):
"""
Given a dictionary of word counts (mapping words to counts of their
frequencies), convert this into an ordered list of tuples (word,
count). The list is ordered by decreasing count, unless increase is
True.
"""
return sorted(list(counts.items()), key=lambda key_value: key_value[1],
reverse=decrease)
|
b8abfff7b74d2e1b2724bfbe29d1823f6682bd64
| 22,478
|
def check_hair_colour(val):
"""hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f."""
return len(val) == 7 and val[0] == '#' and all(c in "0123456789abcdef" for c in val[1:])
|
9a42c34bbf16fba6444b3541aca3f89ebc408acc
| 391,444
|
def _fortran_to_val(val):
"""
Transform a Fortran value to a Python one.
Args:
val (str): The value to translate.
Returns:
A Python representation.
"""
if val[0] == val[-1] == '"': # A string
return val[1:-1]
if val == ".true.":
return True
if val == ".false.":
return False
# Assume a number
try:
return int(val)
except ValueError:
pass
# Assume a float
val = val.replace("d", "e").replace("D", "e")
try:
return float(val)
except ValueError:
pass
raise TypeError("Unable to parse: " + val)
|
b1066419fe93d5cfccbdad0ee28f7572f12c748e
| 388,630
|
def is_set_nickname(string, nickname):
"""
Test if this is a nickname setting message
"""
if string.startswith(f"{nickname} set the nickname for "):
return True
if string.startswith(f"{nickname} set his own nickname to"):
return True
if string.startswith(f"{nickname} set her own nickname to"):
return True
if string.startswith(f"{nickname} set your nickname to"):
return True
return False
|
aec4909f3c4d3dea689383cc73d26623a16dfd85
| 15,719
|
import math
def refractive_index_nir(angle_incidence,angle_refraction):
"""Usage: Find refractive index using angle of incidence and angle of refraction"""
refractive_index_nir_resul=math.sin(angle_incidence)/math.sin(angle_refraction)
return refractive_index_nir_resul
|
bd4c8ae2f4d704f2caef493e585a44e690c46882
| 427,063
|
def identity_function(image):
"""
Default image transformation function. Does nothing.
:param image: The image to transform.
:return: The original image.
"""
return image
|
ae13be245a325b96d8d4cdbb3c7099acd4b5a47f
| 475,180
|
import yaml
from pathlib import Path
def themeFromFile(themeFile: str) -> list[str]:
"""Set the base24 theme from a base24 scheme.yaml to the application.
Args:
themeFile (str): path to file
Returns:
list[str]: theme to set
"""
schemeDictTheme = yaml.safe_load(Path(themeFile).read_text(encoding="utf-8"))
return ["#" + schemeDictTheme[f"base{x:02X}"] for x in range(0, 24)]
|
c645ca03d611208e264db50544647b5428d03ed5
| 489,276
|
import socket
def check_port(ip, port, timeout=None):
"""
Checks if the port is open on a specific IP
@param ip: IP of the remote host
@param port: The port to check
@param timeout: Timeout, in seconds
@return bool: True if the port is open, False if closed
"""
socket_port = socket.socket()
if timeout is not None:
socket_port.settimeout(timeout)
try:
socket_port.connect((ip, int(port)))
except socket.error:
return False
else:
socket_port.close()
return True
|
43a4696ca002f96e9b6c28d67326dd4c0c285e5e
| 21,860
|
def ping(data_type, event_type, ping_data=None):
"""
Construct a 'PING' message to send either a 'PING_REQUEST' or 'PING_RESPONSE' to the server.
:param data_type: int the RTMP datatype.
:param event_type: int the type of message you want to send (PING_REQUEST = 6 or PING_RESPONSE = 7).
:param ping_data: bytes the data you want to send to the server (DEFAULT: blank bytes.)
"""
if ping_data is None:
ping_data = b"\x00\x00\x00\x00"
msg = {'msg': data_type,
'event_type': event_type,
'event_data': ping_data}
return msg
|
5dd36c7dae9d6fcf3fe12897bc3a598720f5d606
| 130,894
|
def _set_train_steps(max_steps, save_checkpoints_steps, estimator):
""" Set the train steps for each iteration in the train/eval loop. If the estimator
has been trained before, the initial step will be updated accordingly.
Args:
max_steps: (int) number of steps to run training.
save_checkpoints_steps: (int) frequency in steps to save checkpoints.
estimator: tf.Estimator.estimator initialized object.
Returns:
list containing the train steps for each iteration of the train/eval loop.
"""
# Get initial steps from *estimator* if a model has already been saved.
try:
initial_step = estimator.get_variable_value("global_step")
remain_steps = max_steps - initial_step
num_loops, remain = divmod(remain_steps, save_checkpoints_steps)
except ValueError:
initial_step = 0
num_loops, remain = divmod(max_steps, save_checkpoints_steps)
train_steps = [initial_step + (i * save_checkpoints_steps) for i in range(1, num_loops + 1)]
if remain > 0:
train_steps.append(train_steps[-1] + remain)
return train_steps
|
880e8dce487c9bd2ffef98f12ac41890221cb2ae
| 245,636
|
def _split_line(s, parts):
"""
Parameters
----------
s: string
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start:start + length].strip()
start += length
del out['_']
return out
|
d9d1b1c4d92041ce9bdaa594c47c248aa115603d
| 226,151
|
from typing import List
import torch
def encode_ordinal(labels: List[int], num_labels: int) -> torch.Tensor:
"""
Given a list of class labels, transforms them into an ordinal tensor.
:param labels: list of class labels to transform.
:param num_labels: number of class labels. It is not inferred directly from parameter `labels` because this function
could be called in batches.
:return: an ordinal tensor from the labels passed.
"""
return torch.tensor(
[
([1] * label + [0] * (num_labels - label - 1))
for label in labels
]
).float()
|
51d43be251a7d96b502c4931bd79b15760f0542b
| 345,155
|
def is_palindrome(s):
"""
Determine whether or not given string is valid palindrome
:param s: given string
:type s: str
:return: whether or not given string is valid palindrome
:rtype: bool
"""
# basic case
if s == '':
return True
# two pointers
# one from left, one from right
i = 0
j = len(s) - 1
while i < j:
# find left alphanumeric character
if not s[i].isalnum():
i += 1
continue
# find right alphanumeric character
elif not s[j].isalnum():
j -= 1
continue
# case insensitive compare
if s[i].lower() != s[j].lower():
return False
i += 1
j -= 1
return True
|
a9d700a2e7907e551cb5060f61de5c829ab77291
| 701,208
|
import json
def translate_xiaomi_aqara_contact(topic, data, srv=None):
"""Translate the Xiaomi Aqara's contact sensor JSON data to a
human-readable description of whether it's open or closed."""
payload = json.loads(data["payload"])
if "contact" in payload:
if payload["contact"]:
return dict(status="closed")
else:
return dict(status="opened")
return None
|
94a745339cc7893cc5df16d69b01edfc66ab057a
| 623,989
|
def right_strip_lines(lines):
"""Remove trailing spaces on each line"""
return [line.rstrip() for line in lines]
|
76fe5480248b7fef05c0ed8094d608b0b8fb70f6
| 608,676
|
def _escapeAnchor(name):
"""
Escape name to be usable as HTML anchor (URL fragment)
"""
result = []
for c in name:
oc = ord(c)
if oc < 48 or (57 < oc < 65) or (90 < oc < 97) or oc > 122:
if oc > 255:
result.append("$%04x" % oc)
else:
result.append("=%02x" % oc)
else:
result.append(c)
return u"".join(result)
|
84c995916d4399b99d0379ad4879f46d94881f52
| 67,428
|
def is_before(d1, d2):
"""
Return True if d1 is strictly before d2.
:param datetime.date d1: date 1
:param datetime.date d2: date 2
:return: True is d1 is before d2.
:rtype: bool
"""
return d1 < d2
|
194026215e35ce026142e7d0734aaa3ef6b4d254
| 36,993
|
import random
def get_batch(source, i, args, train=True):
"""
get_batch subdivides the source data into chunks of length args.bptt.
If source is equal to the example output of the batchify function, with
a bptt-limit of 2, we'd get the following two Variables for i = 0:
┌ a g m s ┐ ┌ b h n t ┐
└ b h n t ┘ └ c i o u ┘
Note that despite the name of the function, the subdivison of data is not
done along the batch dimension (i.e. dimension 1), since that was handled
by the batchify function. The chunks are along dimension 0, corresponding
to the seq_len dimension in the LSTM.
"""
if train:
seq_len = random.randint(2, args.bptt)
seq_len = min(seq_len, len(source) - 1 - i)
else:
seq_len = min(32, len(source) - 1 - i)
data = source[i:i + seq_len]
target = source[i + 1:i + 1 + seq_len].view(-1)
return data, target, seq_len
|
53d86f02eff38a5c41b78264d96fd0a28d87a854
| 598,285
|
def parse_bool(data):
"""Parse a string value to bool"""
if data.lower() in ('yes', 'true',):
return True
elif data.lower() in ('no', 'false',):
return False
else:
err = f'"{data}" could not be interpreted as a boolean'
raise TypeError(err)
|
60f35d69178fa6322f3ff29b64b96deb7bd43f94
| 25,919
|
import pathlib
def get_path_name(name: pathlib.Path | str | None) -> pathlib.Path | str:
"""Get the full name from a directory name or path and a file name or path.
Args:
name (pathlib.Path | str | None): Directory name or file name.
Returns:
str: Full file name.
"""
if name is None:
return ""
return pathlib.Path(name)
|
0eb4abd9cf82f022d0a0c3d380fab3e750c183d3
| 25,184
|
def get_frame_size(data):
""" Gets frame height and width from data. """
for cls, cls_data in data.items():
for timestep, t_data in enumerate(cls_data):
if len(t_data['im_hs'] > 0):
im_h = t_data['im_hs'][0]
im_w = t_data['im_ws'][0]
return im_h, im_w
return None
|
0e294b158b9e2a914a9cebe18c24c2608a9cacfe
| 187,747
|
def getHeaders(lst, filterOne, filterTwo):
"""
Find indexes of desired values.
Gets a list and finds index for values which exist either in filter one or filter two.
Parameters:
lst (list): Main list which includes the values.
filterOne (list): A list containing values to find indexes of in the main list.
filterTwo (list): A list to check by for the same index of a value from filterOne if it does not exist in the main list.
Returns:
(list): A list containing indexes of values either from filterOne or filterTwo.
"""
headers = []
for i in range(len(filterOne)):
if filterOne[i] in lst:
headers.append(lst.index(filterOne[i]))
else:
headers.append(lst.index(filterTwo[i]))
return headers
|
0aa3612b15114b0e0dcdf55eb3643df58157239b
| 7,966
|
import math
def repeatedString(s, n):
"""
Returns the number of 'a's in the first n letters of a string consisting of
infinitely repeating strings, s.
"""
count = 0
s_count_a = s.count('a')
count += math.floor(n / len(s)) * s_count_a
for _ in range(n % len(s)):
if s[_] == 'a':
count += 1
return count
|
b35bf491ed532e5d70cf273ea3d30af0bb184ff4
| 61,310
|
def add_attrs(da, name, units, long_name):
""" gives name to xr.DataArray `da` and adds attributes `units` and `long_name` """
da.name = name
da.attrs = {'units':units, 'long_name':long_name}
return da
|
1663ebcf5c194b40a8cb92cc3c0050681c09cbd3
| 246,643
|
def divide_list(list_file):
"""Checks if every element in provided list is a HRS image and divides the original list into red and blue frames lists
Parameters
----------
list_file: list of str
List containing names of HRS files
Returns
-------
blue_files: list of str
List of HRS blue frames in the running directory
red_files: list of str
List of HRS red frames in the running directory
"""
blue_files=[]
red_files=[]
f=open(list_file,"r")
for line in f.readlines():
line_s=line.replace("\n", "")
if (line_s[-5:]==".fits") and ("H201" in line_s) and not("pH201" in line_s):
blue_files.append(line_s)
elif (line_s[-5:]==".fits") and ("R201" in line_s) and not("pR201" in line_s):
red_files.append(line_s)
else:
print("This does not look like a name of a raw HRS file: %s"%line_s)
f.close()
return blue_files,red_files
|
a3ee580228efa09bb883e493456bb83a3d8a297d
| 447,842
|
from typing import List
def sequence_to_int(line: str) -> List[int]:
""" Transform a sequence of digits to a list of integers. """
return list(map(int, line))
|
650e3744b1f667e166df3122630ca123c61d728e
| 612,836
|
def scale_vars(var_seq, scale):
"""
Scale a variable sequence.
"""
return [v * scale for v in var_seq]
|
f6167b2075f099a7392533fe2d97ab5d4401d7e1
| 171,892
|
def split_line_num(line):
"""Split each line into line number and remaining line text
Args:
line (str): Text of each line to split
Returns:
tuple consisting of:
line number (int): Line number split from the beginning of line
remaining text (str): Text for remainder of line with whitespace
stripped
"""
line = line.lstrip()
acc = []
while line and line[0].isdigit():
acc.append(line[0])
line = line[1:]
return (int(''.join(acc)), line.lstrip())
|
7157bf72eb868ddd8c80705bd51c549de08fc126
| 134,714
|
def field_to_attr(field_name):
"""Convert a field name to an attribute name
Make the field all lowercase and replace ' ' with '_'
(replace space with underscore)
"""
result = field_name.lower()
if result[0:1].isdigit():
result = "n_" + result
result = result.replace(' ', '_')
result = result.replace('/', '_')
result = result.replace('-', '_')
result = result.replace('.', '_')
result = result.replace('+', '_plus')
return result
|
b6aa8a55f660648e3d259a6b6df0f560550c7d8a
| 19,406
|
import random
def random_dependency_generator(columns, n=10):
""" Generates n random dependencies from a list of columns.
Returns a dictionary with rhs's as keys and associated with it lhs
combinations, representing a total of <= n dependencies.
Note that the returned lhs-combinations are neither randomly
distributed, nor are there exactly n dependencies returned.
The way rand_length is generated makes it more likely for short
lhs-combinations to be generated over long lhs-combinations.
Keyword attributes:
columns -- list of columns-names
n -- int, indicating how many dependencies shall be returned
"""
dependencies_dict = {}
for _ in range(0, n):
# at least 2 columns are necessary to form a dependency
rand_length = random.randint(2, len(columns))
lhs = random.sample(columns, rand_length)
rhs = lhs.pop()
lhs.sort()
if rhs in dependencies_dict:
if lhs not in dependencies_dict[rhs]:
dependencies_dict[rhs].append(lhs)
else:
dependencies_dict[rhs] = [lhs]
return dependencies_dict
|
bbdab053c582438560934f1139aafec2a3f1f4f3
| 100,171
|
def clean_uid(uid):
"""
Return a uid with all unacceptable characters replaced with underscores
"""
if not hasattr(uid, 'replace'):
return clean_uid(str(uid.astype('S')))
try:
return uid.decode('utf-8').replace(u"/", u"_").replace(u":", u"_")
except AttributeError:
return uid.replace("/", "_").replace(":", "_")
|
479384a6afc948e8fe5ea833478a2889f7aa2fc1
| 593,619
|
def location(block):
"""Returns the location component in block in title case."""
# The location component starts emidiately after the second space.
sub = block[block.find('BEGIN')+6:block.find('END')-1]
locationString = sub[sub.find(' ')+1:]
# Make it lower case, reverse it, and make only the first letter upper case.
locationString = locationString.lower()[len(locationString)::-1]
return locationString.title()
|
9a3fdbe7b3fa28c0e2ca70d34111ad65e83d2e5c
| 159,977
|
def colorspace_prefixed_name(colorspace):
"""
Returns given *OCIO* colorspace prefixed name with its family name.
Parameters
----------
colorspace : ColorSpace
ColorSpace to prefix.
Returns
-------
str or unicode
Family prefixed *OCIO* colorspace name.
"""
prefix = colorspace.family.replace('/', ' - ')
return '%s - %s' % (prefix, colorspace.name)
|
5bda395c9c67476333d3a52eefa90aff861c05d3
| 628,898
|
def lgmres_params(inner_m=30, outer_k=3, maxiter=100, tol_coef=0.01):
"""
Bundles parameters for the LGMRES linear solver. These control the
expense of finding the left and right environment Hamiltonians.
PARAMETERS
----------
inner_m (int, 30): Number of gmres iterations per outer k loop.
outer_k (int, 3) : Number of vectors to carry between inner iterations.
maxiter (int) : lgmres terminates after this many iterations.
tol_coef (float): This number times the MPS gradient will set the
convergence threshold of the linear solve.
RETURNS
-------
A dictionary storing each of these parameters.
"""
return {"solver": "lgmres", "inner_m": inner_m, "maxiter": maxiter,
"outer_k": outer_k, "tol_coef": tol_coef}
|
553dea0b28646baf40148affda1f1898e2e8fc43
| 326,097
|
import re
def mask_url(txt):
"""
Finds an url pattern and then masks it.
Parameters
-----------
txt: str
A piece of text containing the url pattern
Returns
-----------
str
masked url string as ' [url] '
list
list with the found pattern(s)
"""
pattern='http\S+'
pattern2='www\S+'
sub=' [url] '
txt, find = re.sub(pattern, sub, txt, flags=re.I), re.findall(pattern, txt, flags=re.I)
txt, find2 = re.sub(pattern2, sub, txt, flags=re.I), re.findall(pattern2, txt, flags=re.I)
return txt, find+find2
|
c34329bbf75261373fbd40b345138c92fd88511d
| 445,852
|
def tidy_color_name(word: str) -> str:
"""Tidy up a string to make it suitable for a color name in a palette
This replaces whitespace with underscores, and lowers the text
Args:
word (str): Color name to tidy up
Returns:
str: Tidied color name
"""
return "".join([c for c in word if c.isalnum()]).lower()
|
bc56adcdb8ddf1b821a9221739ead8acae2f53b0
| 311,523
|
def lerp(input1, input2, mask):
"""Lerp between two values based on a 0-1 mask value.
When mask is 0, return input1. When mask is 1, return input2.
Otherwise blend between the two."""
return ((1 - mask) * input1) + (mask * input2)
|
4cc241659fa577b84fb0bed87a0d3f36643b7fb8
| 655,728
|
def xorGate(argumentValues):
"""
Method that evaluates the XOR gate
The XOR gate gives a true (1 or HIGH) output when the number of true inputs is odd.
https://electronics.stackexchange.com/questions/93713/how-is-an-xor-with-more-than-2-inputs-supposed-to-work
@ In, argumentValues, list, list of values
@ Out, outcome, float, calculated outcome of the gate
"""
if argumentValues.count(1.) % 2 != 0:
outcome = 1
else:
outcome = 0
return outcome
|
943a2ce6c86909f88d66ec39ad9e45bb7b86e333
| 548,400
|
def create_mock_coro(mocker, monkeypatch):
"""Create a mock-coro pair.
The coro can be used to patch an async method while the mock can
be used to assert calls to the mocked out method.
"""
def _create_mock_coro_pair(to_patch=None):
mock = mocker.Mock()
async def _coro(*args, **kwargs):
return mock(*args, **kwargs)
if to_patch:
monkeypatch.setattr(to_patch, _coro)
return mock, _coro
return _create_mock_coro_pair
|
03da2680c164357534e2d2ffcc8a37a04e49ebf6
| 359,727
|
def remove_escapes(msg) -> str:
"""
Returns a filtered string
removing \r
"""
filtered = msg.replace(r'\r', '')
return filtered
|
84edc1920c8d9bc4376d63561a91ce56eba19102
| 609,017
|
def getname(method):
"""Descriptive name for the function.
A name combining the function name and module.
Parameters
----------
method: callable
Returns
-------
name: str
"""
module = method.__module__
group = module.split(".")[-1]
variant = method.__name__
return "{group:s} ({variant:s})".format(group=group,
variant=variant)
|
ba7d02f762ab2e86f51746f7e1a26e978d0b1a3e
| 126,842
|
import re
def sanitize(msg):
"""Attempt to sanitize out known-bad patterns."""
# Remove any dictionary references with "encrypted_password", e.g. lines like:
# {'some_key': ..., 'encrypted_password': b'asdf', 'some_other_key': ...}
msg = re.sub(r"('encrypted_password': b?').+?('(?:,|}))", r'\1<REDACTED>\2', msg)
return msg
|
531ef599c5744f333bf80f136a865e8fbbfa1b9e
| 387,234
|
def encrypt(passwd: str) -> str:
"""
The encrypt method used to encrypt password, hacked from the following js:
.. code-block:: javascript
function securityEncode(b) {
a = 'RDpbLfCPsJZ7fiv'
c = 'yLwVl0zKqws7LgKPRQ84Mdt708T1qQ3Ha7xv3H7NyU84p21BriUWBU43odz3iP4rBL3cD02KZciXTysVXiV8ngg6vL48rPJyAUw0HurW20xqxv9aYb4M9wK1Ae0wlro510qXeU07kV57fQMc8L6aLgMLwygtc0F10a0Dg70TOoouyFhdysuRMO51yY5ZlOZZLEal1h0t9YQW0Ko7oBwmCAHoic4HYbUyVeU3sfQ1xtXcPcf1aT303wAQhv66qzW'
var e = "", f, g, h, k, l = 187, n = 187;
g = a.length;
h = b.length;
k = c.length;
f = g > h ? g : h;
for (var p = 0; p < f; p++) {
n = l = 187,
p >= g ? n = b.charCodeAt(p) : p >= h ? l = a.charCodeAt(p) : (l = a.charCodeAt(p),
n = b.charCodeAt(p)),
e += c.charAt((l ^ n) % k);
}
return e
}
:param passwd: password string
:return: encrypted password
""" # noqa
b = passwd
a = "RDpbLfCPsJZ7fiv"
c = ("yLwVl0zKqws7LgKPRQ84Mdt708T1qQ3Ha7xv3H7NyU84p21BriUWBU43"
"odz3iP4rBL3cD02KZciXTysVXiV8ngg6vL48rPJyAUw0HurW20xqxv9a"
"Yb4M9wK1Ae0wlro510qXeU07kV57fQMc8L6aLgMLwygtc0F10a0Dg70T"
"OoouyFhdysuRMO51yY5ZlOZZLEal1h0t9YQW0Ko7oBwmCAHoic4HYbUy"
"VeU3sfQ1xtXcPcf1aT303wAQhv66qzW")
g = len(a)
h = len(b)
k = len(c)
e = ''
for p in range(max(g, h)):
n = l = 187 # noqa
if p >= g:
l = ord(b[p]) # noqa
elif p >= h:
n = ord(a[p])
else:
n = ord(a[p])
l = ord(b[p]) # noqa
e += chr(ord(c[n ^ l]) % k)
return e
|
da8d7695603ea897d4dff7aff3fcc22af98ed9b6
| 572,966
|
import math
def _get_distance(pos1, pos2):
""" Returns distance between two points
Params :
pos1 : tuple in (x,y) format
pos2 : tuple in (x,y) format
"""
return math.sqrt((pos2[0]-pos1[0])**2 + (pos2[1]-pos1[1])**2)
|
aac00ed10c2e627a2a67e08b09ab1b957655bd13
| 572,511
|
def relative_roughness(roughness, diameter):
"""Returns relative roughness.
:param roughness: roughness of pipe [mm] or [m] (but the same as diameter)
:param diameter: diameter of duct or pipe [mm] or [m]
"""
return round(roughness / diameter, 8)
|
da00c75f49e3ba22a6c0e574658ae2ee842e20b8
| 477,195
|
def cell_list_to_set(cell_list):
"""
Golly represents a cell list as [x1, y1, ..., xN, yN]. We
convert this to a set of the form ((x1, y1), ..., (xN, yN)).
Checking set membership is faster than checking list
membership.
"""
cell_set = set()
for i in range(0, len(cell_list), 2):
cell_set.add((cell_list[i], cell_list[i+1]))
return cell_set
|
d1aa5f30c63a45ce902e0b42d216d56795111e46
| 308,223
|
def gf_rshift(f, n, K):
"""
Efficiently divide ``f`` by ``x**n``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_rshift
>>> gf_rshift([1, 2, 3, 4, 0], 3, ZZ)
([1, 2], [3, 4, 0])
"""
if not n:
return f, []
else:
return f[:-n], f[-n:]
|
20915df69fed8c360c72d8117f725a075e1b8ff7
| 460,970
|
from typing import List
def median_of_three(arr: List[int], lo: int, hi: int) -> int:
"""Get the approximate median of three items in the subarray specified by `lo` and `hi` indices, inclusive.
Given only one distinct item, return itself.
Given two distinct items, return either item.
Given three distinct items, return the one in the middle.
"""
mid = int(lo / 2 + hi / 2)
if arr[lo] <= arr[mid] <= arr[hi] or arr[hi] <= arr[mid] <= arr[lo]:
return arr[mid]
if arr[mid] <= arr[lo] <= arr[hi] or arr[hi] <= arr[lo] <= arr[mid]:
return arr[lo]
return arr[hi]
|
e6e082556b657d63ed6291f5ccdeea300111aded
| 430,938
|
def convert_truthy_or_falsy_to_bool(x):
"""Convert truthy or falsy value in .ini to Python boolean."""
if x in [True, "True", "true", "1"]:
out = True
elif x in [False, "False", "false", "0"]:
out = False
elif x in [None, "None", "none"]:
out = None
else:
raise ValueError(
f"Input '{x}' is neither truthy (True, true, 1) or falsy (False, false, 0)."
)
return out
|
875083962ae680bceb67a2637cfc9850cb913211
| 83,497
|
import functools
def with_post_processing(fn, post_process_fn):
"""Generates a function that applies `post_process_fn` to outputs of `fn`."""
@functools.wraps(fn)
def apply_fn(*args, **kwargs):
return post_process_fn(*fn(*args, **kwargs))
return apply_fn
|
ac951c4e8b9edd7364144e6b3006c2adbe3e3367
| 113,167
|
def list_users(iam_conn, path_prefix):
"""List IAM users."""
users = []
marker = None
while True:
if marker:
response = iam_conn.list_users(PathPrefix=path_prefix,
Marker=marker)
else:
response = iam_conn.list_users(PathPrefix=path_prefix)
for user in response['Users']:
users.append(user)
if not response['IsTruncated']:
break
marker = response['Marker']
return users
|
1d849e928177d13cc6c300c138dc6bed01651ab0
| 672,064
|
def get_abbrev(frame, lower_case=True):
"""
Args:
frame: A Frame from a FrameNet object
lower_case: boolean value. If true, it converts the frame abbreviation to lower case
Returns:
A dictionary mapping abbreviation to Frame Element name
"""
out = dict()
for fe in frame.coreFEs + frame.peripheralFEs:
if len(fe.abbrev) > 0:
if lower_case:
out[fe.abbrev.lower()] = fe.name
else:
out[fe.abbrev] = fe.name
return out
|
29f9b8fa94b0dcc5b53d98fb35c5a29db2da55f4
| 340,652
|
import warnings
def check_model(model: str) -> str:
"""Check model is 'aces' or 'btsettl'.
Parameters
----------
model: str
Model name. Should be either 'aces' or 'btsettl'.
Returns
-------
model: str
Valid model output; either 'aces' or 'btsettl'.
"""
if model == "phoenix":
warnings.warn(
"The model name 'phoenix' is depreciated, use 'aces' instead.",
DeprecationWarning,
)
model = "aces"
if model not in ["aces", "btsettl"]:
raise ValueError(
"Model name error of '{}'. Valid choices are 'aces' and 'btsettl'".format(
model
)
)
return model
|
8ebafa33f25de2dc50340e9526494b03bf90165a
| 648,963
|
def _sec_to_usec(t_sec):
""" Converts a time in seconds to usec since the epoch.
Args:
t_sec: Time in seconds since the unix epoch.
Returns:
An integer containing the number of usec since the unix epoch.
"""
return int(t_sec * 1e6)
|
76d05f9b3be4456bbde46de2f4f7d2a705b033d6
| 546,504
|
def assign_indent_numbers(lst, inum, dic):
""" Associate keywords with their respective indentation numbers
"""
for i in lst:
dic[i] = inum
return dic
|
fc3a32692288ce3640fc41b40ee8b5dc9d472829
| 52,874
|
from typing import Tuple
def _hydra_merge_order(dotlist_entry: str) -> Tuple:
"""
The override list needs to be ordered as the last one wins in case of
duplicates: https://hydra.cc/docs/advanced/defaults_list#composition-order
This function arranges the list so that _base_ is at the top, and we
proceed with overrides from top to bottom.
"""
key = dotlist_entry.split("=")[0]
# presence of "@" => it is a _base_ override
default_list_item_indicator = key.count("@") # 1 if true, 0 otherwise
# level in hierarchy; based on number of "."
hierarchy_level = key.count(".")
# multiply by -1 to keep the default list items on top
return (-1 * default_list_item_indicator, hierarchy_level, dotlist_entry)
|
15d201577b5481c5bddee23d8ed8964190bfc041
| 482,022
|
def check_faces_in_caption(photo):
"""Checks if all faces are mentioned in the caption."""
comment = photo.comment
if photo.getfaces() and not comment:
return False
for face in photo.getfaces():
parts = face.split(" ")
# Look for the full name or just the first name.
if (comment.find(face) == -1 and
(len(parts) <= 1 or comment.find(parts[0]) == -1)):
return False
return True
|
eed03439df84a1ddd4cb7bcbb269af7c60adfcb5
| 11,998
|
import torch
def collate_fn(batch):
"""Collates a batch of padded variable length inputs"""
ids, inputs, labels, texts = zip(*batch)
labels = torch.tensor(labels)
# get sequence lengths
lengths = torch.tensor([x.shape[0] for x in inputs])
# padding
inputs = torch.nn.utils.rnn.pad_sequence(inputs, batch_first=True)
return ids, inputs, labels, texts, lengths
|
fa229077330b97b4cefd6283de3399b64824fd70
| 561,335
|
def edge_overlap(low1, high1, low2, high2):
""" Returns true if two lines have >0 overlap
>>> edge_overlap(0, 1, 1, 2)
False
>>> edge_overlap(0, 2, 1, 2)
True
>>> edge_overlap(1, 2, 1, 2)
True
>>> edge_overlap(1, 2, 0, 1)
False
>>> edge_overlap(1, 2, 0, 2)
True
>>> edge_overlap(0, 1, 0, 1)
True
"""
if low1 < low2:
return low2 < high1
else:
return low1 < high2
|
d81a77d8afd93d2909be8aa6067f871869a04b3a
| 184,366
|
def q_ntu(epsilon, c_min, temp_hot_in, temp_cold_in):
"""Computes the q value for the NTU method
Args:
epsilon (int, float): The value of the effectivness for the HX.
c_min (int, float): minimum C value for NTU calculations.
temp_hot_in (int, float): Hot side inlet temeprature.
temp_cold_in (int, float): Cold side inlet temeprature.
Returns:
int, float: The value of the removal from the NTU method.
"""
return epsilon*c_min*(temp_hot_in-temp_cold_in)
|
77481e584feb5b99daaf8e6a303703983cd44759
| 114,555
|
def cls2dotname(cls):
"""
The full Python name (i.e. `pkg.subpkg.mod.cls`) of a class
"""
return '%s.%s' % (cls.__module__, cls.__name__)
|
d94feca0c97caa9651f05da7069ef4adda6b29b2
| 158,414
|
import copy
def recursive_update(d1, d2):
"""
Recursively merge two dictionaries and return a copy.
d1 is merged into d2. Keys in d1 that are not present in d2 are preserved
at all levels. The default Dict.update() only preserved keys at the top
level.
"""
d3 = copy.deepcopy(d1) # Copy perhaps not needed. Test.
for k, v in d2.items():
if k in d3:
orig_v = d3[k]
if isinstance(v, dict):
if isinstance(orig_v, dict) == False:
d3[k] = v
else:
t = recursive_update(d3.get(k, {}), v)
d3[k] = t
else:
d3[k] = d2[k]
else:
d3[k] = v
return d3
|
9c9d5aff2fafcda9eb20bf24e364eadb3c745dff
| 393,857
|
def dict_to_paths(dict_):
"""Convert a dict to metric paths.
>>> dict_to_paths({'foo': {'bar': 1}, 'baz': 2})
{
'foo.bar': 1,
'baz': 2,
}
"""
metrics = {}
for k, v in dict_.items():
if isinstance(v, dict):
submetrics = dict_to_paths(v)
for subk, subv in submetrics.items():
metrics['.'.join([str(k), str(subk)])] = subv
else:
metrics[k] = v
return metrics
|
fe3358079f28ad06e16a5227a1ea761f0495923c
| 143,598
|
import random
def getRandomColor(colorsList):
"""Returns a random color from <colorsList> or OFF"""
if len(colorsList) > 0:
return colorsList[random.randint(0, len(colorsList)-1)]
return 0, 0, 0
|
71ffb37f96281ee0978367ec983893dc9749d797
| 681,313
|
def get_D(uv_hat, n_channels):
"""Compute the rank 1 dictionary associated with the given uv
Parameter
---------
uv: array (n_atoms, n_channels + n_times_atom)
n_channels: int
number of channels in the original multivariate series
Return
------
D: array (n_atoms, n_channels, n_times_atom)
"""
return uv_hat[:, :n_channels, None] * uv_hat[:, None, n_channels:]
|
0c6518d119ace13179d86a1f7ff832245c4501c5
| 589,161
|
def get_tolerance_min_max(value, expected_tolerance):
"""
Get minimum and maximum tolerance range
Args:
value(int): value to find minumum and maximum range
expected_tolerance ('int'): Expected tolerance precentage
Returns:
minimum and maximum value of tolerance
"""
# Convert it to int
value = int(value)
# Expected tolerance %
tolerance = (value * expected_tolerance) / 100
# Minimum tolerance value
min_value = abs(value - tolerance)
# Maximum tolerance value
max_value = abs(value + tolerance)
return (min_value, max_value)
|
6ab5ffe1cc78e952c2a7dff01d406adc5ce30d07
| 615,415
|
def subfrom(r="A"):
"""Subtract the value from the value in a register (A by default)."""
def _inner(state, d, *args):
return getattr(state.cpu.reg, r) - d
return _inner
|
5b75da7be3772b49ce3a8c9c3e4c587a67ed1340
| 314,824
|
import copy
def _convert_tvdb_episode_metadata(imdb_id , season_metadata, episode, banners=True):
"""
converts tvdb episode metadata to format suited for kodiswift
Args:
season_metadata: dictionary of tvdb season metadata
episode: dictionary of tvdb episode metadata
banners: whether to get the poster image from metadata
Returns:
episode metadata
"""
info = copy.deepcopy(season_metadata)
info['episode'] = episode.get('episodenumber')
info['title'] = episode.get('episodename', '')
info['aired'] = episode.get('firstaired', '')
info['premiered'] = episode.get('firstaired', '')
info['rating'] = episode.get('rating', '')
info['plot'] = episode.get('overview', '')
info['plotoutline'] = episode.get('overview', '')
info['votes'] = episode.get('ratingcount', '')
info['imdb_id'] = imdb_id
if banners:
info['poster'] = episode['filename']
return info
|
00c91e0a35e4b29493d27a31c0a6f1c7d71f9cdb
| 277,748
|
def id_for_settings(val):
"""Generate an id for a settings entry."""
if val in ["DEFAULT_CFG", "USER_CFG"]:
return f"others={val}"
if val == "ansible-navigator_empty.yml":
return "empty settings file"
if val == "ansible-navigator.yml":
return "full settings file"
return val
|
f9e7de9e4330f94dbab2fac22d1df1af011cbdb5
| 346,132
|
def check_table_exists(connection, table_name):
"""
Returns a Boolean to tell if a certain table exists already.
"""
data = None
with connection.cursor() as cursor:
cursor.execute(
f'SELECT * '
f'FROM information_schema.tables '
f"WHERE table_schema = 'public' AND table_name = '{table_name}'"
'LIMIT 1;')
data = cursor.fetchone()
return data is not None
|
3580dbe9b84d521fb9f16da7b82f400525852e23
| 23,841
|
import time
def now(_reactor=None):
"""Get the time, using reactor.seconds or time.time"""
if _reactor and hasattr(_reactor, "seconds"):
return _reactor.seconds()
else:
return time.time()
|
185b7508081bc1e344b72a8c5ccc28f30e086011
| 415,284
|
def indexof(needle, haystack):
"""
Find an index of ``needle`` in ``haystack`` by looking for exact same item by pointer ids
vs usual ``list.index()`` which finds by object comparison.
For example::
>>> a = {}
>>> b = {}
>>> haystack = [1, a, 2, b]
>>> indexof(b, haystack)
3
>>> indexof(None, haystack)
Traceback (most recent call last):
...
ValueError: None is not in [1, {}, 2, {}]
"""
for i, item in enumerate(haystack):
if needle is item:
return i
raise ValueError("{!r} is not in {!r}".format(needle, haystack))
|
b9e5085584a7bad43a4df465aff6ab5db4273bc1
| 549,847
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.