content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def __mode2offset(voxel_size, mode='c'):
"""Modes
'c': center
'b': boundary
"""
if mode == 'c':
return voxel_size / 2
elif mode == 'b':
return 0
else:
raise NotImplementedError(f"Unknown offset mode{mode}") | 9bc8ab87e0e60820290d745daf94cf420852d7e0 | 110,432 |
def word(l, h):
"""
Given a low and high bit, converts the number back into a word.
"""
return (h << 8) + l | 32f938fe6a95cb353db7ede14c0a25ecdc19521a | 110,435 |
def fut_has_callback(fut, callback):
"""
Look at the callbacks on the future and return ``True`` if any of them
are the provided ``callback``.
"""
if not fut._callbacks:
return False
for cb in fut._callbacks:
if type(cb) is tuple and cb:
cb = cb[0]
if cb == callback:
return True
return False | f850ede1dab02715ae9988800209436be3a48b7f | 110,436 |
def parse_length(text):
""" Parses a file length
>>> parse_length(None)
-1
>>> parse_length('0')
-1
>>> parse_length('unknown')
-1
>>> parse_length('100')
100
"""
if text is None:
return -1
try:
return int(text.strip()) or -1
except ValueError:
return -1 | 925156cab95c5312d441b87165a795dd8a19a919 | 110,441 |
import string
import random
def generate_random_string(
length=25, allowed_chars=string.ascii_letters + string.digits
):
"""
Generate a random string.
:param length: The length of the desired string
:type length: int
:param allowed_chars: The set of allowed characters
:type allowed_chars: str
:returns: Random string
:rtype: str
"""
return "".join(random.choice(allowed_chars) for i in range(length)) | 2f33997f2d167bba461ee2102e4cccbceebd19fa | 110,442 |
def K(eps):
""" Rayleigh complex dielectric factor
This is basically the K complex factor that defines the Radar dielectric
factor |K|**2. It is useful in Rayleigh theory to define absorption cross
section from its imaginary part
Parameters
----------
eps : complex
nd array of complex relative dielectric constants
Returns
-------
nd - float
Rayleigh complex dielectric factor K
"""
return (eps-1.0)/(eps+2.0) | 545c853edad55ffa05a8e0ba6ca828683522e47e | 110,444 |
def get_pos_from_nltk_tagged_sents(o) :
""" Converts NLTK's tagged sentences into list of list of
universal pos tags."""
r = []
for s in o : # loop through every sentence
pos = []
for w in s : # loop through every token
pos.append(w[1]) # get the pos tag
r.append(pos)
return r | 8b9faaae4d02b8093a44a69be22175a3cd1e3bb1 | 110,445 |
def modify_spec(config):
"""modify spec values with the sampled config values."""
for key, param in config.items():
if key not in ["steps", "mlflow", "spec"]:
config["spec"].settings[key].value = param
return config | fca88ba490b68c9848beb0d9f423eb9304bd450f | 110,449 |
from typing import Set
def slate_teams(slate: dict) -> Set[str]:
"""Gets teams on given slate
Args:
slate (dict): the slate document
Returns:
Set[str]
"""
return set([sp['team'] for sp in slate['slatePlayers']]) | 96efae352a7094a1c0e7d339386b53adfe135ca4 | 110,453 |
def calculate_field_width(grid):
"""Given a grid with width and height, what should the fixed width of fields be for printing"""
return max(len(str(grid.width)), len(str(grid.height))) | 1bd8fa638f7da255f148b30169e5a63344944881 | 110,458 |
def convertToURL( cPathname ):
"""Convert a Windows or Linux pathname into an OOo URL."""
if len( cPathname ) > 1:
if cPathname[1:2] == ":":
cPathname = "/" + cPathname[0] + "|" + cPathname[2:]
cPathname = cPathname.replace( "\\", "/" )
cPathname = "file://" + cPathname
return cPathname | 0ee15a355041c34a7ba0d93cb28dfe64d5d49ef0 | 110,460 |
def set_difference(in_this_set1, but_not_in_this_set2):
"""**set_difference(in_this_set, but_not_in_this_set)** -> Returns the elements that are in the set1 but not on the set2
<code>
Example:
set_difference(set([1, 2, 3, 4]), set([2, 3]))
Returns:
set([1, 4])
</code>
"""
s1 = set(in_this_set1)
s2 = set(but_not_in_this_set2)
return s1.difference(s2) | 78684ae462f80ceaa735784290bfd8555e659b94 | 110,466 |
import random
import string
def generate_test_cases(pattern, length, k):
"""
Generates <k> test cases with text of length <length> containing <pattern>
Args:
pattern (str): A pattern within the text.
length (int): The length of the pattern
k (int): The number of test cases
Returns:
A list of test cases, i.e. strings that contain <pattern>
"""
result = []
for _ in range(k):
text = pattern
while len(text) < length:
direction = random.choice((0, 1))
# 0 --> Left
if direction == 0:
text = random.choice(string.ascii_lowercase) + text
# 1 --> Right
else:
text = text + random.choice(string.ascii_lowercase)
result.append(text)
return result | 62be3b9e400e2dd84bdd9c74fe12ddf5fed231b6 | 110,470 |
def cleanup_labels(np_data, labels):
"""
Make sure there's the right number of labels. Including making new ones
from scratch if there aren't any
"""
if labels is not None:
if len(labels) != np_data.shape[1]:
err_text = (
'Number of labels, {}, does not match '
'number of data features, {}'
).format(len(labels), np_data.shape[1])
raise ValueError(err_text)
else:
labels = list(range(np_data.shape[1]))
return labels | 8d914a032e7165b4a0a8a9ac621a03b33d9df62d | 110,471 |
def create_warning(scrub_id, file, line, description, tool, priority='Low', query='', suppress=False):
"""This function creates an internal representation of a warning t be used for processing.
Inputs:
- id: Finding identifier of the format <tool><count> [string]
- file: Absolute path to the source file referenced by the finding [string]
- line: Line number of the source file being referenced by the findings [int]
- description: Finding description [list of strings]
- tool: Tool that generated the finding [string]
- priority: Priority marking for the finding [Low/Med/High]
- query: Tool query name that generated the finding [string]
- suppress: Has this finding been supressed? [bool]
Outputs:
- scrub_warning: Dictionary of warning data [dict]
"""
# Create the warning
scrub_warning = {'id': scrub_id,
'file': file,
'line': line,
'description': description,
'tool': tool,
'priority': priority,
'query': query,
'suppress': suppress}
return scrub_warning | f03722a1703a24d1ae90b013a661dc9b82945aa9 | 110,476 |
def alcohol_by_volume_alternative(og, fg):
"""
Alcohol by Volume Alternative Calculation
:param float og: Original Gravity
:param float fg: Final Gravity
:return: Alcohol by Volume decimal percentage
:rtype: float
Alternate Formula:
A more complex equation which attempts to provide greater accuracy at higher gravities is:
:math:`\\text{ABV} = \\frac{76.08 \\times \\big( \\text{og} - \\text{fg} \\big)}{1.775 - \\text{og}} \\times \\frac{\\text{fg}}{0.794}`
This comes from Balling's famous formula, where the Original Extract and
Real Extract values have been converted using the simple Plato to SG
equation, giving Alcohol by Weight. This is then converted to Alcohol
by Volume multiplying by the ratio of Final Gravity to Density of Ethanol.
The alternate equation reports a higher ABV for higher gravity beers.
This equation is just a different take on it. Scientists rarely agree
when it comes to equations. There will probably be another equation for
ABV down the road.
The complex formula, and variations on it come from:
* Ritchie Products Ltd, (Zymurgy, Summer 1995, vol. 18, no. 2)
* Michael L. Hall's article Brew by the Numbers: Add Up What's in Your Beer, and Designing Great Beers by Daniels.
Source:
* http://www.brewersfriend.com/2011/06/16/alcohol-by-volume-calculator-updated/
""" # noqa
# Density listed (possibly incorrectly) from Zymergy Mag
DENSITY_ETHANOL = 0.794
return (76.08 * (og - fg) / (1.775 - og)) * (fg / DENSITY_ETHANOL) / 100.0 | 3fc2cc54b91a08ecc3da46f1eecaf802105bf164 | 110,478 |
from typing import List
import calendar
def get_days_of_week(first_day: int = calendar.MONDAY) -> List[str]:
"""Returns a list of ordered days, starting with the specified day."""
calendar.setfirstweekday(first_day)
return (list(calendar.day_name[calendar.firstweekday():])
+ list(calendar.day_name[0:calendar.firstweekday()])) | 6e3feb090b0ee9243ea4a917d10003e84b916225 | 110,483 |
import re
def parse_host_interfaces(hifs):
"""
parse host interfaces
Support 3 formats:
1. Legacy format (non multi-dut): vlan_index
2. new format (multi-dut): dut_index.vlan_index,dut_index.vlan_index
3. new format (multi-dut): dut_index.vlan_index@ptf_port_index,dut_index.vlan_index@ptf_port_index
"""
if isinstance(hifs, int):
dut_index = 0
vlan_index = int(hifs)
return [(dut_index, vlan_index)]
ret = []
for hif in hifs.split(','):
indices = tuple([int(x) for x in re.split(r'\.|@', hif.strip())])
ret.append(indices) # (dut_index, vlan_index) or (dut_index, vlan_index, ptf_port_index)
return ret | ba1719684463979663d039e144143c2a8cf64fdd | 110,485 |
def default_message()->str:
"""
Returns a default message
"""
return "Seems I do not understand since i only speak 0 and 1.\n\
Just in case, if you need any help, just type 'python main.py [KEYWORD]' on your terminal." | e435febb65cb505106b1e9669cebc5447d20e49b | 110,489 |
def getRawInput(display):
"""
Wrapper around raw_input; put into separate function so that it
can be easily mocked for tests.
"""
return input(display) | 1e5a3e7ca7b416401e4d48b0d424277784eeb974 | 110,495 |
def getcheckinterval(space):
"""Return the current check interval; see setcheckinterval()."""
return space.wrap(space.sys.checkinterval) | 80a29b5c342f6c1fd9be55e6ed2024c6484674bf | 110,496 |
def wooqi(request):
"""
Check if the test is runned with wooqi
"""
return request.config.getoption("--wooqi") | 6d0abcbae0cb32233651dad8f75f3519e8ca4476 | 110,497 |
def lettergrade(value):
"""
Maps grade point average to letter grade.
"""
if value > 3.85:
return 'A'
elif value > 3.5:
return 'A-'
elif value > 3.15:
return 'B+'
elif value > 2.85:
return 'B'
elif value > 2.5:
return 'B-'
elif value > 2.15:
return 'C+'
elif value > 1.85:
return 'C'
elif value > 1.5:
return 'C-'
elif value > 1.15:
return 'D+'
elif value > 0.85:
return 'D'
elif value > 0.5:
return 'D-'
else:
return 'F' | 149c6e29a54b199747cf64fe58f28561dda005f3 | 110,503 |
from typing import Union
import math
def _float_to_json(value) -> Union[None, str, float]:
"""Coerce 'value' to an JSON-compatible representation."""
if value is None:
return None
if isinstance(value, str):
value = float(value)
return str(value) if (math.isnan(value) or math.isinf(value)) else float(value) | b1ef0c7af5754d56435ac2f7b8116f03dd4502d9 | 110,504 |
def collatz_sequence(num):
"""collatz sequence - start with positive integer. If it's even, next term
is n/2, otherwise (3n + 1). 10 -> 5 -> 16 -> 8 -> 4 -> 2 -> 1.
Always ends in 1. This will return [num, n2, n3, n4, ... , 1]
"""
foo = [num]
temp = num
while temp != 1:
if temp % 2 == 0:
temp = int(temp/2)
else:
temp = 3*temp + 1
foo.append(temp)
return(foo) | d03c1b3b3de149090e5c79bfe1e589476486c271 | 110,506 |
def make_oh_cols_names(cat_cols_names, categories):
"""
Build names for OneHot encoded categories.
"""
assert(len(cat_cols_names) == len(categories))
oh_names = []
for col_name, col_cats in zip(cat_cols_names, categories):
for cat in col_cats:
oh_names.append(col_name + "_" + str(int(cat)))
return oh_names | a9568bda3d5b8e1625e97668db5dee43a41fa3d9 | 110,509 |
def szuszik(a, b):
"""
Szuszik's pairing algorithm is a map from a pair of natural numbers to a unique
natural number.
$\\mathcal{N}\times\\mathcal{N}\\mapsto N$.
Args:
a: int
first integer
b: int
second integer
Returns:
int
Szuszik's algorithm result
"""
if a != max(a, b):
map_key = pow(b, 2) + a
else:
map_key = pow(a, 2) + a + b
return map_key | 8eb23ed6f064bcf95327613e95acc21d05a1da2a | 110,511 |
import urllib.request, urllib.error, urllib.parse, socket
def internet_access(timeout=1):
"""Return True if internet is on, else False."""
try:
# Check google.com with numerical IP-address (which avoids
# DNS loopup) and set timeout to 1 sec so this does not
# take much time (google.com should respond quickly)
#response = urllib2.urlopen('http://8.8.8.8', timeout=timeout)
response = urllib.request.urlopen('http://vg.no', timeout=timeout)
return True
except (urllib.error.URLError, socket.timeout) as err:
pass
return False | 6881299f490da708f170f1f659d2f4b6a0ac78df | 110,519 |
def intersect_lines(p1, p2, p3, p4):
"""
Calculates intersection point between two lines defined as (p1, p2)
and (p3, p4).
Args:
p1: (float, float)
Point 1 as (x, y) coordinates.
p2: (float, float)
Point 2 as (x, y) coordinates.
p3: (float, float)
Point 3 as (x, y) coordinates.
p4: (float, float)
Point 4 as (x, y) coordinates.
Returns:
(float, float) or None
Coordinates of the intersection point. Returns None if
there is no intersection.
"""
a_dx = p2[0] - p1[0]
a_dy = p1[1] - p2[1]
a_sq = p2[0]*p1[1] - p1[0]*p2[1]
b_dx = p4[0] - p3[0]
b_dy = p3[1] - p4[1]
b_sq = p4[0]*p3[1] - p3[0]*p4[1]
d = a_dy * b_dx - a_dx * b_dy
dx = a_sq * b_dx - a_dx * b_sq
dy = a_dy * b_sq - a_sq * b_dy
if d == 0:
return None
return dx/d, dy/d | f4e4cb6bc2826b83a2e82df3cb8510fec5cb52e1 | 110,523 |
from typing import Tuple
def get_data_rules(config: dict) -> Tuple[list, list, list, list]:
"""
Uses date configuration to determine which columns are of numeric, date,
categorical type as well as what should be dropped.
Args:
config (dict): Data Configuration
Returns:
Tuple[list, list, list, list]:
List of columns that are numeric, dates, categoricals,
and should be dropped.
"""
numerics = []
dates = []
categoricals = []
drops = []
for key, value in config.items():
if value == "numeric":
numerics.append(key)
if value == "date":
dates.append(key)
if value == "categorical":
categoricals.append(key)
if value == "drop":
drops.append(key)
return numerics, dates, categoricals, drops | 7019d357f361838dc00607121715bf5a49c2c378 | 110,527 |
import itertools
def read_metadata(filename, skip, treename):
""" Read the metadata csv file and return values ad a dictionary
"""
temptreedict = {}
with open(filename, 'r') as f:
result = itertools.islice(f, skip, None)
for line in result:
ll = line.strip().split(",")
if ll[0] == treename:
temptreedict[ll[4]] = {'tree':ll[0], 'tax':ll[1], 'genome':ll[2],
'protein':ll[3], 'fullname':ll[5],
'genus':ll[6],
'host_phylum':ll[7], 'host_order':ll[8],
'host_class':ll[9],'host_common_name':ll[10], 'new':ll[11]}
return temptreedict | ccf8522afa42d08c3cfd9bcdf868955ab5700677 | 110,528 |
from typing import List
def removeStringEndings( originalText:str, endingsList:List[str] ) -> str:
"""
Go through the given list of endings (in order)
and remove any endings from the end of the string.
"""
newText = originalText
for ending in endingsList:
if newText.endswith( ending ):
newText = newText[:-len(ending)]
return newText | b1452f656547277e642cd84cd47256fda736e43a | 110,529 |
from pathlib import Path
def _load(exclude_past_answers=False):
"""
Load the full corpus from file,
"""
pkg = Path(__name__).resolve()
path_data = pkg.parent / "mordle" / "data" / "wordle-corpus.txt"
with open(path_data, "r") as f:
lines = f.readlines()
result = [line.strip() for line in lines]
if not exclude_past_answers:
return result
path_answers = pkg.parent / "mordle" / "data" / "past-answers.txt"
with open(path_answers, "r") as f:
past = f.readlines()
for word in past:
result.remove(word.strip())
return result | 12baa4eb4eaec03b8fbff460d7588517effe6321 | 110,531 |
def _format_files(files):
"""Format the Mordor files data for display."""
return [
(f"({file['file_type']}) {file['file_path'].split('/')[-1]}", file["file_path"])
for file in files
] | 540993a54debe03d5aad298581c6df0c74fbc80e | 110,532 |
def valid_event(event: list):
"""
Filter out some events that don't provide meaningful information.
The HTML PBP has 8 columns:
['#', 'Per', 'Str', 'Time:El', 'Event', 'Description', 'AWAY On Ice', 'HOME On Ice']
:param event: list of stuff in pbp
:return: boolean
"""
return event[0] != '#' and event[4] not in ['GOFF', 'EGT', 'PGSTR', 'PGEND', 'ANTHEM'] | c1e32b5abcffa07abf84800596f7ff38795532b0 | 110,539 |
def remove_duplicates(package_leaflets):
"""
Keep only leaflets with unique product_name.
If there is already the leaflet with same product_name - skip this leaflet
:param package_leaflets: array of processed leaflets
:return: array of unique leaflets
"""
# save only leaflets with unique product_name
package_leaflets_unique = []
# keep track of unique product names observed so far
unique_product_names = set()
COUNT_DUPLICATE_PRODUCT_NAME = 0
for leaflet in package_leaflets:
if leaflet.product_name not in unique_product_names:
unique_product_names.add(leaflet.product_name)
# save unique leaflet separately
package_leaflets_unique.append(leaflet)
# if leaflet.product_name is in unique_product_names - then it is duplicate - do not save
else:
COUNT_DUPLICATE_PRODUCT_NAME += 1
print("Number of *unique* leaflets: ", len(package_leaflets_unique))
print("Number of *duplicate* leaflets (by product names): ", COUNT_DUPLICATE_PRODUCT_NAME)
return package_leaflets_unique | f141b458d98ff497b659fb12eadfe34608d2146c | 110,540 |
import random
def create_nodes(n=300, seed=None):
"""Sample $n$ nodes uniformly at random in the unit square. Each of
the nodes comes with a random time sampled uniformly from [0, n].
# Arguments
n: number of nodes to be sampled
seed: seed for the randomness
# Result
lists ts, xs, ys of time-coordinates x-coordinates and y-coordinates.
Sorted increasingly in time
"""
random.seed(seed)
xs, ys, ts = [[random.random() for _ in range(n)] for _ in range(3)]
ts = [n * t for t in ts]
return list(zip(*[[t,x,y] for t,x,y in sorted(zip(ts,xs,ys))])) | 404927bca419eeb45e0340d296802e84d37a9e5a | 110,543 |
def set_bits_high(bits_to_set_high: int, bitwise_data: int):
"""
This function will set bits high based on `bits_to_set_high` in `bitwise_data`;
:param bits_to_set_high: A integer representing the bits to set high in `bitwise_data`
:param bitwise_data: A integer representing the data from which to set bits high out of
:return: A integer representing `bitwise_data` with bits set high based on `bits_to_set_high`
"""
return bits_to_set_high | bitwise_data | d912b7b9a7ad505c4786d738710573b42758ce46 | 110,546 |
import re
def get_dtm_from_backup_name(fname):
"""
Returns the date time string from our automated backup filenames
"""
p = re.compile(r"^db\-v[^_]+_(?P<dtm>[\d\-_]+).*\.dump$")
m = p.search(fname)
if m:
label = m.groups("dtm")[0]
date = label.split("_")[0]
time = label.split("_")[1]
return "{date} {time}".format(date=date, time=time.replace("-", ":"))
raise ValueError(
"Tried to get date component of unparsed filename: {}".format(fname)
) | 80a089bd43eadfab666e2b791dabfcce113c45e5 | 110,549 |
def annotations_with_overlaps_with_clip(df, begin, end):
"""Determine if any rows overlap with current segment
Args:
df: A dataframe containing a Raven annotation file
begin: The begin time of the current segment (unit: seconds)
end: The end time of the current segment (unit: seconds)
Returns:
sub_df: A dataframe of annotations which overlap with the begin/end times
"""
return df[
((df["begin time (s)"] >= begin) & (df["begin time (s)"] < end))
| ((df["end time (s)"] > begin) & (df["end time (s)"] <= end))
] | 12fc08b2c2fc056829d9207abe4c9e76739b7d1e | 110,552 |
def all_bits_for_opcodes(instructions):
""" Returns bit numbers occupied by any opcode of at least one of the
instructions.
"""
return instructions[0].opcode_bits.union(
*[i.opcode_bits for i in instructions[1:]]
) | 53f174f447477a50510d35de6f7562b029a39719 | 110,555 |
import struct
def get_uint16(s: bytes) -> int:
"""
Get unsigned int16 value from bytes
:param s: bytes array contains unsigned int16 value
:return: unsigned int16 value from bytes array
"""
return struct.unpack('H', s)[0] | 268308b8427389195ed2090c602859b3fb2582f5 | 110,556 |
def applies(inverter, section):
"""Returns whether the inverter applies for given configuration section."""
if 'IP address' not in section or not section['IP address']:
return True
if section['IP address'] == inverter.addr[0]:
return True
return False | f8a551307e3bd2c1fadaca88964a3a7e12e13669 | 110,558 |
def _content_length(line):
"""Extract the content length from an input line."""
if line.startswith(b'Content-Length: '):
_, value = line.split(b'Content-Length: ')
value = value.strip()
try:
return int(value)
except ValueError:
raise ValueError("Invalid Content-Length header: {}".format(value))
return None | bbb03c475fa71d3b70bbfd5ce15af7de502b18a2 | 110,563 |
def add_ordinal(num):
"""Returns a number with ordinal suffix, e.g., 1st, 2nd, 3rd.
Args:
num (int): a number
Returns:
(str): a number with the ordinal suffix
Examples:
>>> add_ordinal(11) == '11th'
True
>>> add_ordinal(132) == '132nd'
True
"""
switch = {1: "st", 2: "nd", 3: "rd"}
end = "th" if (num % 100 in {11, 12, 13}) else switch.get(num % 10, "th")
return "%i%s" % (num, end) | 8a6fc3b4d8fe3dc76dcd6c35ed29fde29b82e0a2 | 110,564 |
def GEOMETRIC_PROPERTIES_0(COORDINATES, ELEMENTS, SECTIONS, I_ELEMENT, AUX_2):
"""
This function assigns the bar element's geometric propertiest of the I_ELEMENT element TYPE_ELEMENT = 0 (Frame element).
Input:
COORDINATES | Coordinates properties | Py Numpy array
| Node, x, y |
ELEMENTS | Elements properties | Py Numpy array
| Node 0 ... Node (N_NODES - 1), Material ID, |
| Geometry ID, Hinge ID node 1, Hinge ID node 2 |
SECTIONS | Sections properties | Py Numpy array
| Area, Inertia 1, Inertia Frame bar, X GC, Y GC |
I_ELEMENT | i element in looping | Integer
AUX_2 | ID section | Integer
Output:
SECTION_IELEMENT | Section I_ELEMENT properties | Py list[6]
| [0] - Length |
| [1] - Sine |
| [2] - Cosine |
| [3] - Area |
| [4] - Inertia auxiliar |
| [5] - Inertia frame element |
"""
NODE_1 = int(ELEMENTS[I_ELEMENT, 0])
NODE_2 = int(ELEMENTS[I_ELEMENT, 1])
X_NODE1 = COORDINATES[NODE_1, 0]
Y_NODE1 = COORDINATES[NODE_1, 1]
X_NODE2 = COORDINATES[NODE_2, 0]
Y_NODE2 = COORDINATES[NODE_2, 1]
DELTA_X = X_NODE2 - X_NODE1
DELTA_Y = Y_NODE2 - Y_NODE1
L = ((DELTA_X) ** 2 + (DELTA_Y) ** 2) ** 0.50
COS = DELTA_X / L
SIN = DELTA_Y / L
SECTION_ID = int(ELEMENTS[I_ELEMENT, AUX_2])
A = SECTIONS[SECTION_ID, 0]
I_1 = SECTIONS[SECTION_ID, 1]
I_2 = SECTIONS[SECTION_ID, 2]
SECTION_IELEMENT = [L, SIN, COS, A, I_1, I_2]
return SECTION_IELEMENT | 0971c28d04af0b2e71aacdf7fcb3bf217f3c9ae2 | 110,565 |
def get_column(name: str, worksheet) -> int:
"""Search a row of cells for the string.
Args:
name: The text to search for
columns: The list or generator of columns in the excel sheet
Returns:
Either returns the column number or returns 0 if no column matched the name
"""
for rows in worksheet.iter_rows(min_row=1, max_row=1, min_col=1):
for column in rows:
if column.value == name:
return column.col_idx
return 0 | 6f76c7ee6cba495a74965d69ae1cde4338e78e37 | 110,568 |
import string
def sanitize_word(word):
"""
Sanitize unicode word for use as filename
Ascii letters and underscore are kept unchanged.
Other characters are replaced with "-u{charccode}-" string.
"""
allowed_chars = string.ascii_letters + '_'
def sanitize_char(char):
if char in allowed_chars:
return char
return '-u' + str(ord(char)) + '-'
return ''.join(sanitize_char(char) for char in word) | 86b14ed754b39e7bb05594441af9734a3b958b1d | 110,572 |
import re
def add_hl_to_url(url):
""" A function that adds or change language query to english for the given url. """
# Note: Used it before is_playlist_url() or is_video_url().
# Why we need this? For consistency when scraping the information embeded on the page.
# Because youtube support different languages for the same page and it makes it hard
# to scrap it if we will not force youtube to just send to us a specific page.
return re.sub(r'&hl=[^&]+', '', url) + '&hl=en' | 7d25e538b2b101b7864f4209dbfa3d90ac820070 | 110,573 |
from typing import Callable
from typing import Any
import click
def _get(func: Callable, pred: Callable, no_error: bool = False) -> Any:
"""
Retrieve the first instance from `func` that matches `pred`.
"""
value = next((elm for elm in func() if pred(elm)), None)
if value or no_error:
return value
name = func.__name__.replace("list_", "").replace("_", "-").rstrip("s")
raise click.ClickException("invalid {}".format(name)) | 7823ff5f7e288b844cd2d73462d3a09f3caafa51 | 110,574 |
from typing import Optional
def try_get_only_key_from_collection(collection: Optional[dict]) -> Optional[str]:
"""
3 scenarios:
1 - dict is None or empty -> return None
2 - dict has exactly one item, return its key
3 - dict has more than one item, raise exception
"""
if collection is None or len(collection) == 0:
return None
if len(collection) == 1:
return next(iter(collection.keys()))
raise ValueError(f'{collection.keys()} has more than 1 key, cannot decide which to pick') | de2a4fd161fd1ffc612c36b1d292930b95ca697c | 110,575 |
import math
def get_width(length, full_width):
"""
Returns the number of bits used by Minecraft to represent indices into a
list of the given length.
"""
width = int(math.ceil(math.log(length, 2)))
if width < 4:
return 4
elif width > 8:
return full_width
else:
return width | f46891ee00db894ebcaf97a58960516e239a8259 | 110,577 |
def read_docs_file(filepath):
"""Read a docs file.
Arguments
filepath -- path to the docs file to read
Returns
filestring -- string read from the file
"""
with open(filepath, encoding='utf-8') as docs_file:
return docs_file.read() | 4f70adc3167625615446844dd525e17450831cc9 | 110,581 |
import socket
import time
def wait_for(ip, port, timeout, _type='tcp'):
"""Wait for service by attempting socket connection to a tuple addr pair.
:param ip: str. an IP address to test if it's up
:param port: int. an associated port to test if a server is up
:param timeout: int. timeout in number of seconds (*2), multiply by two
because the socket timeout is set to 2 seconds
:param _type: can be either tcp or udp
:returns: bool. True if a connection was made, False otherwise
"""
if _type == 'tcp':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif _type == 'udp':
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
raise AttributeError('Invalid socket type specified: {}'.format(_type))
sock.settimeout(2)
for counter in range(timeout):
try:
sock.connect((ip, int(port)))
sock.close()
return True
except socket.error as err:
pass
time.sleep(1)
return False | 507f046d0516ce862b547dcef74ab901bd38fdf6 | 110,582 |
def checkInputDir(inputDir):
"""
function to set the default value for inputDir
Args:
inputDir: String of the path to the directory where the binaries are stored
Returns:
String representation of the path to the input directory
"""
if inputDir is None:
return "input-binaries"
else:
return inputDir | f167c353a83f429308a1d8f1bc92d4581ca4014e | 110,583 |
def _h5_moments_to_dict(h5obj, fmd):
"""
Convert gamic HDF5 moment Dataset and attached Attributes to a dictionary.
"""
d = fmd.get_metadata('default')
d['valid_min'] = h5obj.attrs['dyn_range_min']
d['valid_max'] = h5obj.attrs['dyn_range_max']
d['standard_name'] = h5obj.attrs['moment']
d['long_name'] = h5obj.attrs['moment']
#d['units'] =
if h5obj.attrs['format'] == 'UV8':
div = 256.0
else:
div = 65536.0
d['data'] = (d['valid_min'] + h5obj[...] *
(d['valid_max'] - d['valid_min']) / div)
return d | 81648e585443d677fe0e506377b6a8c0e016c3f1 | 110,584 |
def parsing_name_file(filename):
"""
Parse the name of the data file to extract l and set number.
"""
str1 = filename.split("t")
str2 = str1[1].split("l")
str3 = str2[1].split(".")
str4 = str3[0][1:]
str5 = '0.'+ str4
nset = int(str2[0])
nl = float(str5)
return nset, nl | 130f2616fb36c5f62e2c327fe951b6a0bd9243a3 | 110,586 |
def get_filelines(filename):
""" Returns a list of all stripped non-empty lines in filename in order
"""
print("Parsing SSM file %s" % filename)
with open(filename, 'r') as read_file:
filelines = read_file.readlines()
filelines = [line.strip() for line in filelines if len(line.strip()) > 0]
return filelines | e7fd828c130e16715bf0496d9d4e2610fae4b491 | 110,590 |
def jac_shape(n_ant, n_chan, n_dir):
"""Calculate the jacobian matrix shape based
on the given data dimensions."""
return (n_chan * n_ant * (n_ant - 1),
2 * n_chan * n_dir * n_ant) | 1d182880366c7371ff65137622d6341fbc4321da | 110,591 |
def rsc_to_geotransform(rsc_data, half_shift=True):
"""Convert the data in an .rsc file to a 6 element geotransform for GDAL
See here for geotransform info
https://gdal.org/user/raster_data_model.html#affine-geotransform
NOTE: `half_shift` argument is because gdal standard is to
reference a pixel by its top left corner,
while often the .rsc for SAR focusing is using the middle of a pixel.
Xgeo = GT(0) + Xpixel*GT(1) + Yline*GT(2)
Ygeo = GT(3) + Xpixel*GT(4) + Yline*GT(5)
So for us, this means we have
X0 = trans[0] + .5*trans[1] + (.5*trans[2])
Y0 = trans[3] + (.5*trans[4]) + .5*trans[5]
where trans[2], trans[4] are 0s for north-up rasters
"""
x_step = rsc_data["x_step"]
y_step = rsc_data["y_step"]
X0 = rsc_data["x_first"]
Y0 = rsc_data["y_first"]
if half_shift:
X0 -= 0.5 * x_step
Y0 -= 0.5 * y_step
return (X0, x_step, 0.0, Y0, 0.0, y_step) | 08de8f0994cf47f6f288143422441de146634b8e | 110,592 |
def expected_future_worth(x, percent_return=0.07, years=20, annual_payment=None):
"""
estimate the future value of a current investment given the percent rate of return,
number of years, and optional annual payment amount
:param x: the present value of your account
:param percent_return: the market historically delivers a typical 7 percent annually
:param years: the number of years over which to compound interest
:param annual_payment: optional constant annual contribution to account
:return type: float
"""
i = percent_return
n = years
f = x * (1 + i) ** n
if annual_payment is not None:
f += (annual_payment / i) * (((1 + i) ** n) - 1)
else:
annual_payment = 0
print('\n'.join(['', 'With annual contribution of ${0:,.2f} and',
'\t{1:.2}% rate of return,',
'\texpected account value in {2} years: ${3:,.2f}',
'']).format(annual_payment, i*100, n, f))
return round(f, 2) | 1cea7263a3a890d401a9e1c736ffbc7755edcece | 110,598 |
def resolve_attr(attr_name, dct, mro):
""" Resolve an attribute from a MRO
If the attribute is not present anywhere, None is returned
Parameters:
attr_name: The name of the attribte to resolve
dct: The dictionary of the most derived class body
mro: The C3 linearised list of bases
"""
try:
return dct[attr_name]
except KeyError:
pass
for base in mro:
try:
return base.__dict__[attr_name]
except KeyError:
pass | ddf3f8d5054cade0e75e7adde10c4c783c6cc0eb | 110,599 |
def union_favourites_and_cards(favourites, cards):
"""
Function to unite user's favourites cards with a queryset of cards
"""
# Using sets due to the fact that time complexity is O(1)
# to check that an element is in the set
favourites_set = set()
resulting_cards = cards.all()
for favour in favourites:
favourites_set.add(favour.card.id)
for card in resulting_cards:
# Add new field to the card with the flag
# to indicate is a card user's favourite or not
if card.id in favourites_set:
card.favourite = True
else:
card.favourite = False
return resulting_cards | f525ac54ff44cfe4ec610f9de1174025faa857f0 | 110,600 |
def get_latitude_direction(latitude_degrees):
"""
Returns the direction for the given latitude degrees.
:param latitude_degrees: The degrees (not minutes) as an integer.
:return: String containing the possible latitude directions (N, S).
"""
if latitude_degrees is None:
raise ValueError('No value provided for <latitude_degrees>')
if latitude_degrees < 0:
return "S"
elif latitude_degrees > 0:
return "N"
else:
return "" | 71a61a0335765dfedac93107ace64ed1102239b7 | 110,607 |
def mergeInterval(intervals, h=1):
"""
Merge intervals that are less or equal than "h" hours away from each other
(e.g., for h=1, intervals [1,3] and [4,5] need to be merged into [1,5])
"""
intervals_merged = []
current_iv = None
for iv in intervals:
if current_iv is None:
current_iv = iv
else:
if iv[0] - current_iv[1] <= h:
current_iv[1] = iv[1]
else:
intervals_merged.append(current_iv)
current_iv = iv
if current_iv is not None:
intervals_merged.append(current_iv)
return intervals_merged | 98ee1801ba04d4e1525c12e166005f6089b84b57 | 110,613 |
def to_metric(amount, unit):
"""
Used to convert common (amount, unit) pairs to metric versions, e.g.,
(5, 'GALLON') -> (18.9270589, 'LITER')
"""
if unit == 'POUND':
kgs = (amount * 0.45359237, 'KILOGRAM')
if kgs[0] < 1.0:
return (kgs[0] * 1000, 'GRAM')
return kgs
if unit == 'OUNCE':
return (amount * 28.3495231, 'GRAM')
if unit == 'GALLON':
return (amount * 3.78541178, 'LITER')
if unit == 'TEASPOON':
return (amount * 0.00492892159, 'LITER')
if unit == 'TABLESPOON':
return (amount * 0.0147867648, 'LITER')
return (amount, unit) | 02012f202f4f7f29fabd18a855fc0af1ec08ac76 | 110,615 |
def find_dict_if_matched_key_val(dict_tmp, key, value):
"""
check if a key/value pair match in a given dictionnary
Parameters
----------
dict_tmp: (dict) the dictionnary to be tested
key: (str) a key string to look for in dict_tmp
value: (object) any python object
Returns
-------
bool: True if the key/value pair has been found in dict_tmp
"""
if key in dict_tmp:
if dict_tmp[key] == value:
return True
return False | 19ffd46451021ebfbc665ef4fd815c1d3f8175f7 | 110,617 |
import typing
def compute_prefixes(strings: typing.Iterable[str]):
"""Given an iterable of strings, returns a dict of unique shorter prefixes.
Note in case of collision, the string is not included in the results, eg.:
$> compute_prefixes(["foo", "foobar"])
{"foobar": "foob"}
"""
prefixes = {}
for s in strings:
for hashlen in range(1, len(s) + 1):
prefix = s[:hashlen]
if prefix not in prefixes:
prefixes[prefix] = s
break
if not prefixes[prefix]:
# this prefix is already shorter than another unique prefix
continue
# {prefix: string} -> {prefix: None, longer_prefix: string}
# for parallelization, these write operations must be atomic as a whole
prefixes[prefixes[prefix][: hashlen + 1]] = prefixes[prefix]
prefixes[prefix] = None
return {v: k for k, v in prefixes.items() if v} | e4e8bb03ce07d0a823087bb4ef768306990238f7 | 110,627 |
def log_line() -> str:
"""
A simple reuseable log line string.
"""
line = (
'77.179.66.156 - - [07/Dec/2016:10:34:43 +0100] "GET /favicon.ico HTTP/1.1" '
'404 571 "http://localhost:8080/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36"'
)
return line | ff5a03192509325e27b85be0ca052a3614f33960 | 110,628 |
import fnmatch
def _expand_wildcard_method_list_items(base_class_methods, test_methods):
"""Updates any entries in a test/skip method list with wildcards
Adds list entries for each test method that matches a wildcard entry, then
removes all wildcard entries after expanding. Additionally, any duplicate
entries in the list after wildcard expansion will be removed so the same
test method isn't run multiple times
:param base_class_methods: List of all test methods in the test case class
(i.e. result of loader.getTestCaseNames())
:param test_methods: List of test case method names (or wildcard strings)
:return: The modified ``test_methods`` list or ``None`` if the updated list
is empty (i.e. it was all wildcards and none of them matched any test
methods)
"""
# Get list of wildcard test methods
wildcard_methods = [method for method in test_methods]
# Temporary list of matching test methods used to update the original
updated_test_methods = []
for wildcard_method in wildcard_methods:
matching_methods = fnmatch.filter(base_class_methods, wildcard_method)
# Update temporary list with matching test methods
updated_test_methods.extend(matching_methods)
# Remove wildcard name from original list after finishing
test_methods.remove(wildcard_method)
# Update original list
test_methods.extend(updated_test_methods)
# Remove duplicate entries (convert from a list to a dict and back)
test_methods = list(dict.fromkeys(test_methods))
# If none of the wildcard entries matched any test methods and the updated list is empty, set it to None
if not test_methods:
test_methods = None
return test_methods | f7c6093cdc82beaa663d1f9f805c16a031757b32 | 110,630 |
def count(coll):
"""
Returns the number of items in the collection. Also works on strings.
"""
if hasattr(coll, "__len__"):
return len(coll)
n = 0
for _ in coll:
n += 1
return n | 13f9b7c73b65165d4b14c06e3bee63f0e603d599 | 110,640 |
def is_packed_layout(layout):
"""Check if layout is packed layout"""
if layout == "NCHW":
return False
if "n" in layout and "c" in layout:
return True
return False | ef3c02ea56684938c894323bbd35c350fe79b01a | 110,647 |
def MaximumElementSize( C_B_Array, C_B_eff_Array, Frequency ):
"""
<description> <reference>
Developer: Christoph Winter ( christoph.winter@tum.de )
:param C_B_Array: 1D list of floats
:param C_B_eff_Array: 1D list of floats
:param Frequency: 1D list of floats
:return: dictionary with the following keys: Lamda (1D list of floats),
Lamda_Eff (1D list of floats), ElementSize (1D list of floats)
"""
LamdaH = [ C_B / f for C_B, f in zip( C_B_Array, Frequency ) ]
LamdaH_Effective = [ C_B_eff / f for C_B_eff, f in zip( C_B_eff_Array, Frequency ) ]
ElementSize = [ 0.25 * Entry for Entry in LamdaH_Effective ]
return { "Lamda" : LamdaH,
"Lamda_Eff" : LamdaH_Effective,
"ElementSize" : ElementSize } | f6f3275429591cb2689da796859b817ea4520adc | 110,659 |
def is_camel_case_with_acronyms(s: str):
"""
Checks if the string passed is Camel Case (with capitalised acronyms allowed).
:param s: string to check
:return: true if the name looks cool as Class name.
"""
return s != s.lower() and s != s.upper() and "_" not in s and s[0].upper() == s[0] | c9eac5dfc21e11a9645415c5343f9211f003919f | 110,666 |
def raw_tmp(raw_clean_detrend):
"""Return an unmodified copy of the `raw_clean_detrend` fixture.
This is run once per NoisyChannels test, to keep any modifications to
`raw_tmp` during a test from affecting `raw_tmp` in any others.
"""
raw_tmp = raw_clean_detrend.copy()
return raw_tmp | c5b7ea0808e9d1b684334d5361637b294da9aaf2 | 110,667 |
def ana_dyads(connectivity):
"""
Calculate proportion symmetric dyads (PTCMUT), asymmetric dyads (PTCASY) and mutuality index (RHO2).
"""
nodes = connectivity.shape[0]
mutual_d = 0
asym_d = 0
out_degrees = []
# count out_degree connections per individual and mutual and asymmetrix connections whole network
for i in range(connectivity.shape[0]) :
out_degree = 0
for j in range(connectivity.shape[1]) :
if connectivity[i][j] == 1 :
out_degree += 1
if connectivity[j][i] == 1 :
mutual_d += 0.5
elif connectivity[j][i] != 1 :
asym_d += 1
out_degrees.append(out_degree)
total_d = mutual_d + asym_d
# calculate proportion symmetric dyads (PTCMUT) and asymmetric dyads (PTCASY)
PTCMUT = mutual_d / total_d
PTCASY = asym_d / total_d
# calculate mutuality index (RHO2) (according to Katz and Powell’s (1955))
sum_out_degrees = sum(out_degrees)
mean_out_degrees = sum_out_degrees / len(out_degrees)
sum_squares_out = 0
for i in range(len(out_degrees)) :
sum_squares_out += (mean_out_degrees - out_degrees[i]) ** 2
RHO2 = (2 * (nodes - 1) ** 2 * mutual_d - sum_out_degrees ** 2 + sum_squares_out) / (
sum_out_degrees * (nodes - 1) ** 2 - sum_out_degrees ** 2 + sum_squares_out)
return PTCMUT, PTCASY, RHO2 | 208e9fd75eebf67fb6655f7a66b50ac234c3baae | 110,668 |
def name_in_initials(name):
"""John Doe -> J. Doe"""
name = name.split()
first_name = name[:-1]
last_name = name[-1]
name = [n[0] + "." for n in first_name]
name.append(last_name)
return " ".join(name) | 1a1f5d66d824f5cbd752d875ac415e13fcbf3488 | 110,671 |
def coeff_size(degree):
""" Size of the full coefficient array. """
return ((degree + 2)*(degree + 1))//2 | 3f430d708f82d9b6c6b9f20c572b650645fd2897 | 110,672 |
def is_product_bookable(
product_code: str,
availability: bool,
durability: int,
) -> bool: # noqa E125
"""Checks if a product is available for booking
"""
# Quick and dirty check
if availability and durability > 0:
return True
else:
return False | dbbece8846ffa59b4cb4b55caff059493aea82db | 110,691 |
def check_desired_parameters(desired_port: dict, port_results: dict) -> bool:
"""
Check if port meets given desired parameters and if then yes return True or else return False.
:param desired_port: Desired parameters that port should meet
:param port_results: Parameters of specific port found by scan
:return: Bool value representing whether desired port parameters match any found port in scan result
"""
for key, value in desired_port.items():
if key in port_results.keys():
if key == 'service':
for service_key, service_value in desired_port[key].items():
if service_key in port_results[key].keys():
if service_value.lower() != port_results[key][service_key].lower():
return False
else:
if key == 'cpe':
cpes = []
for cpe in port_results['cpe']:
cpes.append(cpe['cpe'])
if value not in cpes:
return False
else:
if value.lower() != port_results[key].lower():
return False
return True | d73aae9a08cf82caa85af11125f1da9362464ed3 | 110,692 |
def colnames_to_yeo_7(colnames: list, order: bool = True) -> list:
"""
takes a list of colnames in the brainnetome format/naming and converts them to yeo_7 regions
Examples:
>>> print(colnames_to_yeo_7(["108_110", "1_2", "200_218", "148_140"]))
>>> print(colnames_to_yeo_7(["108_110", "1_2", "200_218", "148_140"], order=False))
Args:
colnames: list of the brainnetome colnames
order: whether the resulting colnames should be ordered
Returns:
list of yeo_7 converted colnames
"""
lookup = {
1: 6, 2: 4, 3: 7, 4: 6, 5: 7, 6: 7, 7: 3, 8: 3, 9: 2, 10: 2,
11: 7, 12: 6, 13: 7, 14: 7, 15: 4, 16: 6, 17: 6, 18: 6, 19: 6,
20: 6, 21: 6, 22: 6, 23: 7, 24: 6, 25: 3, 26: 3, 27: 5, 28: 6,
29: 6, 30: 3, 31: 6, 32: 6, 33: 7, 34: 7, 35: 7, 36: 6, 37: 4,
38: 4, 39: 4, 40: 4, 41: 7, 42: 7, 43: 7, 44: 7, 45: 5, 46: 6,
47: 5, 48: 5, 49: 5, 50: 5, 51: 7, 52: 7, 53: 2, 54: 2, 55: 3,
56: 3, 57: 2, 58: 2, 59: 2, 60: 2, 61: 4, 62: 4, 63: 3, 64: 3,
65: 4, 66: 2, 67: 2, 68: 2, 69: 5, 70: 5, 71: 2, 72: 2, 73: 2,
74: 2, 75: 2, 76: 2, 77: 5, 78: 5, 79: 7, 80: 7, 81: 7, 82: 6,
83: 7, 84: 7, 85: 3, 86: 3, 87: 7, 88: 7, 89: 5, 90: 5, 91: 3,
92: 3, 93: 5, 94: 5, 95: 7, 96: 5, 97: 3, 98: 3, 99: 6, 100: 6,
101: 5, 102: 5, 103: 5, 104: 5, 105: 1, 106: 1, 107: 3, 108: 1,
109: 5, 110: 5, 111: 5, 112: 1, 113: 1, 114: 1, 115: 5, 116: 5,
117: 5, 118: 5, 119: 1, 120: 1, 121: 7, 122: 7, 123: 4, 124: 4,
125: 3, 126: 3, 127: 3, 128: 3, 129: 3, 130: 3, 131: 2, 132: 2,
133: 3, 134: 3, 135: 1, 136: 1, 137: 6, 138: 6, 139: 3, 140: 3,
141: 7, 142: 6, 143: 3, 144: 7, 145: 2, 146: 2, 147: 6, 148: 6,
149: 2, 150: 3, 151: 1, 152: 1, 153: 7, 154: 7, 155: 2, 156: 2,
157: 2, 158: 2, 159: 3, 160: 2, 161: 2, 162: 2, 163: 2, 164: 2,
165: 0, 166: 6, 167: 4, 168: 4, 169: 4, 170: 4, 171: 2, 172: 2,
173: 4, 174: 4, 175: 7, 176: 7, 177: 0, 178: 0, 179: 7, 180: 4,
181: 7, 182: 1, 183: 4, 184: 4, 185: 4, 186: 4, 187: 7, 188: 7,
189: 1, 190: 1, 191: 1, 192: 1, 193: 1, 194: 1, 195: 1, 196: 1,
197: 1, 198: 1, 199: 1, 200: 1, 201: 3, 202: 1, 203: 1, 204: 1,
205: 1, 206: 1, 207: 1, 208: 1, 209: 1, 210: 1, 211: 0, 212: 0,
213: 0, 214: 0, 215: 0, 216: 0, 217: 0, 218: 0, 219: 0, 220: 0,
221: 0, 222: 0, 223: 0, 224: 0, 225: 0, 226: 0, 227: 0, 228: 0,
229: 0, 230: 0, 231: 0, 232: 0, 233: 0, 234: 0, 235: 0, 236: 0,
237: 0, 238: 0, 239: 0, 240: 0, 241: 0, 242: 0, 243: 0, 244: 0,
245: 0, 246: 0
}
splitted = [[int(j) for j in i.split("_")] for i in colnames]
new_names = [sorted([lookup[i] for i in j]) if order else [lookup[i] for i in j] for j in splitted]
return [str(i[0]) + "_" + str(i[1]) for i in new_names] | eeac8f08c8ff2f9308c9c608fb1c74d5f5bb0387 | 110,693 |
def _get_sort_and_permutation(lst: list):
"""
Sorts a list, returned the sorted list along with a permutation-index list which can be used for
cursored access to data which was indexed by the unsorted list. Nominally for chunking of CSR
matrices into TileDB which needs sorted string dimension-values for efficient fragmentation.
"""
# Example input: x=['E','A','C','D','B']
# e.g. [('E', 0), ('A', 1), ('C', 2), ('D', 3), ('B', 4)]
lst_and_indices = [(e, i) for i, e in enumerate(lst)]
# e.g. [('A', 1), ('B', 4), ('C', 2), ('D', 3), ('E', 0)]
lst_and_indices.sort(key=lambda pair: pair[0])
# e.g. ['A' 'B' 'C' 'D' 'E']
# and [1, 4, 2, 3, 0]
lst_sorted = [e for e, i in lst_and_indices]
permutation = [i for e, i in lst_and_indices]
return (lst_sorted, permutation) | 16f3097697517911cdbd78cd70bcf383e1be4eca | 110,694 |
def _get_optimized_params(params, n_cameras, n_points):
""" Parse optimization results to camera params and 3D points"""
camera_params = params[: n_cameras * 6].reshape((n_cameras, 6))
points_3d = params[n_cameras * 6 :].reshape((n_points, 3))
return camera_params, points_3d | 09c9e8e3ff245f8bc2495ee978ee39d2b6a0acc6 | 110,695 |
def hex_line8(chunk):
"""Create 8 bit hex string from bytes in chunk"""
result = ' '.join([
'%02x' % part
for part in chunk])
return result.ljust(16 * 3 - 1) | b469c34518779f90fee0f82a3b58a9e8acf3bc14 | 110,699 |
def tensor_size_bytes(tensor, unit='MB'):
"""
Get the size of the tensor in bytes, or a unit that's multiple of bytes
:param tensor: the pytorch tensor
:param unit: GigaBytes or GB (assumes GB=1e9 Bytes), MegaBytes or MB (assumes MB=1e6 Bytes),
KiloBytes or KB (assumes KB=1e3 Bytes), Bytes or B
:return: the size of the tensor in the desired unit
"""
if 'G' in unit.upper():
factor = 1e9
elif 'M' in unit.upper():
factor = 1e6
elif 'K' in unit.upper():
factor = 1e3
else:
factor = 1.0
return (tensor.element_size() * tensor.nelement()) / factor | f8d68b99597b2bcfb90ff18f7f1d9f9b02478de1 | 110,700 |
import bisect
def FindLatestProfile(target, versions):
"""Find latest profile that is usable by the target.
Args:
target: the target version
versions: a list of versions
Returns:
latest profile that is older than the target
"""
cand = bisect.bisect(versions, target) - 1
if cand >= 0:
return versions[cand]
return None | dd8b3d80c03bb40712de750804261f33f56f00ca | 110,703 |
import re
def parser_regex(regex, group, ignorecase=False):
"""
Recovers the group from a regex and returns it
:param regex: regex
:param group: the group to recover
:param ignorecase: False by default
:return: the group obtained.
"""
if ignorecase:
return lambda x: re.search(regex, x, re.I).groups()[group]
return lambda x: re.search(regex, x).groups()[group] | 53c21362c5b1ad9d28a21a58c24bf3e588006a33 | 110,706 |
def dict_to_seconds(dict_duration):
"""
Convert a Replicon API duration dict to an integer of the total seconds
"""
seconds = 0
seconds += int(dict_duration['hours']) * 60 * 60
seconds += int(dict_duration['minutes']) * 60
seconds += int(dict_duration['seconds'])
return seconds | ca6fdd11b1cdb367a66df05b7b934f0ad0166d38 | 110,708 |
def is_callable(attribute, instance=None):
"""Check if value or attribute of instance are callable."""
try:
if instance:
return callable(getattr(instance, attribute))
else:
return callable(attribute)
except AttributeError:
return False | 852198540a9889a23fb33e8fb00fec499a5104ea | 110,713 |
import torch
def ones_like(tensor: torch.Tensor) -> torch.Tensor:
"""
Use clone() + fill_() to make sure that a ones tensor ends up on the right
device at runtime.
"""
return tensor.clone().fill_(1) | 5a60923377953aca21690fd4a4b8fdad8bf1105a | 110,714 |
def get_error_description(code: int) -> str:
"""
Получение описания ошибки.
:param code: Код ошибки
:return: Описание ошибки
"""
errors = {
-100: 'Заданный MFN вне пределов БД',
-101: 'Ошибочный размер полки',
-102: 'Ошибочный номер полки',
-140: 'MFN вне пределов БД',
-141: 'Ошибка чтения',
-200: 'Указанное поле отсутствует',
-201: 'Предыдущая версия записи отсутствует',
-202: 'Заданный термин не найден (термин не существует)',
-203: 'Последний термин в списке',
-204: 'Первый термин в списке',
-300: 'База данных монопольно заблокирована',
-301: 'База данных монопольно заблокирована',
-400: 'Ошибка при открытии файлов MST или XRF (ошибка файла данных)',
-401: 'Ошибка при открытии файлов IFP (ошибка файла индекса)',
-402: 'Ошибка при записи',
-403: 'Ошибка при актуализации',
-600: 'Запись логически удалена',
-601: 'Запись физически удалена',
-602: 'Запись заблокирована на ввод',
-603: 'Запись логически удалена',
-605: 'Запись физически удалена',
-607: 'Ошибка autoin.gbl',
-608: 'Ошибка версии записи',
-700: 'Ошибка создания резервной копии',
-701: 'Ошибка восстановления из резервной копии',
-702: 'Ошибка сортировки',
-703: 'Ошибочный термин',
-704: 'Ошибка создания словаря',
-705: 'Ошибка загрузки словаря',
-800: 'Ошибка в параметрах глобальной корректировки',
-801: 'ERR_GBL_REP',
-801: 'ERR_GBL_MET',
-1111: 'Ошибка исполнения сервера (SERVER_EXECUTE_ERROR)',
-2222: 'Ошибка в протоколе (WRONG_PROTOCOL)',
-3333: 'Незарегистрированный клиент (ошибка входа на сервер) ' +
'(клиент не в списке)',
-3334: 'Клиент не выполнил вход на сервер (клиент не используется)',
-3335: 'Неправильный уникальный идентификатор клиента',
-3336: 'Нет доступа к командам АРМ',
-3337: 'Клиент уже зарегистрирован',
-3338: 'Недопустимый клиент',
-4444: 'Неверный пароль',
-5555: 'Файл не существует',
-6666: 'Сервер перегружен. Достигнуто максимальное число ' +
'потоков обработки',
-7777: 'Не удалось запустить/прервать поток администратора ' +
'(ошибка процесса)',
-8888: 'Общая ошибка',
}
if code >= 0:
return 'Нормальное завершение'
if code not in errors:
return 'Неизвестная ошибка'
return errors[code] | 56b68e58698e93c681cd7dd3d537b3a3270fb3e8 | 110,716 |
from datetime import datetime
def pytime_to_datetime(pytime):
"""
Function to convert a PyTime object to a datetime object.
"""
dt1 = datetime(year=pytime.year, month=pytime.month, day=pytime.day, hour=pytime.hour, minute=pytime.minute)
return dt1 | 17fd4a5a1263cd3e1de1cd3c128f18cce1ba0778 | 110,717 |
from typing import Union
from pathlib import Path
def bump_version(path: Union[str, Path]) -> Path:
"""Bumps the version number for a path if it already exists
Example::
bump_version("folder/new_file.json") == Path("folder/new_file.json)
bump_version("folder/old_file.json") == Path("folder/old_file_1.json)
bump_version("folder/old_file_1.json") == Path("folder/old_file_2.json)
"""
path = Path(path)
if not path.exists():
return path
# Check for already bumped versions
prev_version = None
try:
prev_version = max(
map(
int,
filter(
lambda s: s.isdigit(),
[f.stem.split("_")[-1] for f in path.parent.glob(f"{path.stem}*")],
),
)
)
new_version = prev_version + 1
except ValueError: # max() arg is an empty sequence
new_version = 1
if prev_version and path.stem.endswith(f"_{prev_version}"):
suffix = f"_{prev_version}"
new_name = f"{path.stem[:-len(suffix)]}_{new_version}{path.suffix}"
else:
new_name = f"{path.stem}_{new_version}{path.suffix}"
return path.parent / new_name | 11b8e518d7cdbfbf0084f180e721d29723d4c71b | 110,723 |
def is_command(meth):
""" Return True if method is an exposed Lua command """
return getattr(meth, '_is_command', False) | f761f083aaabd8525bb01d495abaf43e59d50218 | 110,726 |
import typing
import enum
def _separate(values: typing.Iterable[typing.Any], sep: str = ":") -> str:
"""Separate a sequence by a separator into a single string."""
parts: typing.List[str] = []
for value in values:
if value is None:
parts.append("null")
elif isinstance(value, enum.Enum):
parts.append(str(value.value))
elif isinstance(value, tuple):
if value:
parts.append(_separate(value)) # pyright: ignore[reportUnknownArgumentType]
else:
parts.append(str(value))
return sep.join(parts) | 25efb3e8894e06a6a226313c9b8cd70c7c9a6048 | 110,727 |
def get_campaigns(api, assessment_id=""):
"""Return a dictionary containing all campaigns.
When called with a blank string for the assessment_id, the default value,
all campaigns in all assessments will be returned. If an assessment_id is
provided, then only the campaigns for that assessment will be returned.
Args:
api (Gophish API): Connection to Gophish server via the API.
assessment_id (string): Assessment identifier to get campaigns from.
Raises:
LookupError: No campaigns found for the provided assessment id.
Returns:
dict: Campaign id as key, campaign name as value.
"""
allCampaigns = api.campaigns.get()
assessmentCampaigns = dict()
for campaign in allCampaigns:
if campaign.name.startswith(assessment_id):
assessmentCampaigns[campaign.id] = campaign.name
if len(assessmentCampaigns) == 0:
raise LookupError(f"No campaigns found for assessment {assessment_id}")
return assessmentCampaigns | 4fe0904c7be69d673bb0cf2a62b76892218b6c6c | 110,728 |
def maskname_normalize(s):
"""
@param s (str): Mask name with or without the prefix "MP_". Case insensitive.
@return (str): "MP_..."
"""
s = s.upper()
if s.startswith("MP_"):
return s
else:
return "MP_" + s | e45013568c2cc149ceefa495deed9c90e1afbfac | 110,729 |
def broadcasted_shape(*shapes):
"""
Computes the resulting broadcasted shape for a given set of shapes.
Uses the broadcasting rules of NumPy. Raises an exception if the shapes do
not broadcast.
"""
dim = 0
for a in shapes:
dim = max(dim, len(a))
S = ()
for i in range(-dim,0):
s = 1
for a in shapes:
if -i <= len(a):
if s == 1:
s = a[i]
elif a[i] != 1 and a[i] != s:
raise ValueError("Shapes %s do not broadcast" % (shapes,))
S = S + (s,)
return S | dd7ff5f56cd4ef51e4483895e1f8d7b00b9421ea | 110,730 |
def cummulative_continuation(df_delta,initial_value):
"""From a DataFrame with columns consisting of time series with a
common datetime index, returns column-wise cummulative sums
starting from an initial value(s).
Parameters
----------
df_delta : DataFrame
A DataFrame with time series as columns.
intial_value : float or list
An initial value or a list of initial values (one per column).
Returns
-------
df : DataFrame
A DataFrame of cummulative values starting from the intial value(s).
"""
df = df_delta.cumsum(axis=0)
df = df + initial_value
return df | 6b53aa8742a03a73780b07931e4f29d333ee3e15 | 110,732 |
def bu8(u):
"""Convert an 8-bit integer to bytes.
Example:
bu8(0x12) == b'\x12'
"""
return bytes([u]) | c524fc9c9dae01640e65f7bcdf632c5389533e51 | 110,734 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.