content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def date_to_millis(dt):
"""
Converts a datetime object to the number of milliseconds since the unix
epoch.
"""
return int(dt.timestamp()) * 1000 | 1828b44a1a5d9c68e8729b7d03d3998940deb282 | 129,152 |
def dataframe_to_geojson_points(df, lat_col='lat', lon_col='lon', idx_col=None, properties_fn=None):
"""
Creates a GeoJSon structure with a point for each row of the DataFrame.
:param df:
:param lat_col: latitude column (y)
:param lon_col: longitude column (x)
:return: GeoJSON list of points.
"""
geojson = {
'type': 'FeatureCollection',
'features': []
}
for idx, row in df.iterrows():
feature = {
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': [row[lon_col], row[lat_col]]
},
'properties': {
'id': idx if idx_col is None else row[idx_col]
},
'id': idx if idx_col is None else row[idx_col]
}
if callable(properties_fn):
feature['properties'].update(properties_fn(idx, row))
geojson['features'].append(feature)
return geojson | 2a79cb0eff7e0be689ac9a3ef2f7a2e16fa0f2b5 | 129,157 |
def button_url(title, url, webview_height_ratio='full'):
"""
Creates a dict to use with send_buttons
:param title: Button title
:param url: Button URL
:param webview_height_ratio: Height of the Webview. Valid values: compact, tall, full.
:return: dict
"""
return {
'type': 'web_url',
'title': title,
'url': url,
'webview_height_ratio': webview_height_ratio
} | 17f1084593f1bdfca6fb75cc553037c17ad973c6 | 129,158 |
import re
def _verify_host_data(host_hash, zone, host_name):
"""
Check if hosts conifguration is sane.
Check if hosts configuration follows some basic rules on which the script's
main loop depends.
Args:
host_hash: A hash containing host's configuration. Fields are as follows:
ip: mandatory. IP address of the host
port: optional, default is 53. Port to use while connecting.
key-id: optional. The ID of the key that should be send to the host.
key-data: optional. The TSIG key itself.
master: optional, default is False. Flag depicting whether host is
master for the zone or not.
zone: The zone to which host's configuration belongs to.
host_name: The hostname of the host.
Returns:
A string containing description of problems found or an empty string
if there are none.
"""
msg = []
if 'ip' in host_hash:
ip = host_hash['ip']
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
msg.append('Zonehost {0} from zone {1} has malformed IP: {2}.'.format(
host_name, zone, ip))
else:
msg.append('Zonehost {0} from zone {1} '.format(host_name, zone) +
'does not have mandatory "ip" field.')
if 'port' in host_hash:
port = host_hash['port']
if not isinstance(port, int) or port < 1 or port > 65535:
msg.append('Zonehost {0} of zone {1} '.format(host_name, zone) +
'has malformed port: {0}.'.format(port))
if "key-id" in host_hash or "key-data" in host_hash:
if not ("key-id" in host_hash and "key-data" in host_hash):
msg.append('Zonehost {0} from zone {1} '.format(host_name, zone) +
'should have both "key-id" and "key-data" keys ' +
'defined or none of them.')
else:
if not re.match(r"^[a-zA-Z0-9-\.]+$", host_hash['key-id']):
msg.append('Zone {0}, zonekey for host {1}'.format(zone, host_name) +
' has invalid "id" entry: {0}.'.format(
host_hash['key-id']))
if not re.match(
"^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$",
host_hash['key-data']):
msg.append('Zone {0}, zonekey for host {1}'.format(zone, host_name) +
' has non-base64 "data" entry.')
return msg | eae02db2c98c2380ce1a180a9d56b83d9eb5bb53 | 129,163 |
def get_nested(d: dict, *keys):
""" Gets nested dict values, returning None if any in the chain don't exist or are None. """
if len(keys) == 1:
return d[keys[0]]
else:
nested = d.get(keys[0], None)
if nested is not None:
return get_nested(nested, *keys[1:])
else:
return None | 310e25e3622b9d2982b526ba3649cfdb71e1e891 | 129,164 |
def make_menu_dict_from_dict(orig_dict, dict_key_for_display):
"""
Function to create a menu dictionary with sub dictionary
:type orig_dict: Dict
:param orig_dict: Dictionary you want to make a menu from
:type dict_key_for_display: String
:param dict_key_for_display: Dictionary item to become the menu
:rtype: Dict
:return: A dictionary with menu and dictionary in line
"""
temp_dict = dict()
menu_new_key = 1
for orig_dict_key in orig_dict:
temp_dict[menu_new_key] = {'MENU': orig_dict[orig_dict_key][dict_key_for_display],
'SUBDIC': orig_dict[orig_dict_key]}
menu_new_key += 1
return temp_dict | 8e0bba2a58be35b1d8f604ce73734a33d1229223 | 129,165 |
from functools import reduce
def getattr_orm(instance, key):
"""
Provides a getattr method which does a recursive lookup in the orm, by splitting the key on every occurance of
__
:param instance:
:param key:
:return:
"""
return reduce(getattr, [instance] + key.split('__')) | 12ca8a8a20f69f17207369eb6a237d09f9643e15 | 129,167 |
import time
def time_func(func):
""" Decorator adding the execution time for the decorated function """
def wrapper():
t1 = time.time()
func()
t2 = time.time()
print(f'Function: "{func.__name__}", completed in: "{t2 - t1}" s')
return wrapper | 3190b323b586fea89bf9906babb695195ee68cd9 | 129,175 |
from pathlib import Path
def is_path_exists(path: str) -> Path:
"""Check for valid path, if yes, return `Path` else raise `Error` """
filepath = Path(path)
if filepath.exists():
raise FileExistsError(f"{filepath} already exists")
elif not filepath.parent.exists():
raise TypeError(f"{filepath} is not a valid path, provide path with file name")
else:
return filepath | 422ed48f07ff14da85d3b95403ec7c799f15c558 | 129,184 |
def base64_add_padding(string):
"""
Add padding to a URL safe base64 string.
:param string: Non-padded Url-safe Base64 string.
:return: Padded Url-safe Base64 string.
"""
while len(string) % 4 != 0:
string += "="
return string | 4a9cd9652ca24d9c2ecf9e07bf19cba9b9dbd2e9 | 129,187 |
def name_list(string):
"""Convert Zotero name list to Python name list.
Input is a string of semicolon separated 'Lastname, Firstname' names.
Output is a Python list of 'Firstname Lastname' names.
"""
names = []
for name in string.split('; '):
if ', ' in name:
last_comma_first = name.split(', ', 2)
first = last_comma_first[1].strip()
last = last_comma_first[0].strip()
names.append(first + " " + last)
else:
names.append(name.strip())
return names | 60ab1fc9354fdc25441ce872341966e07aea4c29 | 129,192 |
def getContour(semitones):
"""Return semitones in Parson's Code
Given a list of integers defining the size and direction of a series of
musical intervals in semitones, this function encodes the contour of the
melody with Parsons code for musical contour where u=up, d=down, r=repeat.
"""
contour = []
for p in semitones:
if p == 0:
contour.append('r') # repeated
elif p > 0:
contour.append('u') # up
elif p < 0:
contour.append('d') # down
return ''.join(contour) | a9bd484107cb636bcb21835b64057851e6fa38e7 | 129,193 |
def compute_polynomial(coefficients, value):
"""
Horner's method of computation for polynomial functions.
Returns the result of a polynomial function given as coefficients vector 'coefficients' for value 'value'.
:param coefficients: vector of coefficients. ex: [3, 2, 1, 0] for polynomial function
f(x) = 3*(x**3) + 2*(x**2) + 1*(x**1) + 0*(x**0)
:param value: number
:return result: the result of polynomial function f(x) in which x = value
"""
result = None
for coefficient in coefficients:
if not result:
result = coefficient
else:
result *= value
result += coefficient
return result | 98740f0cecdad972adb726a2c8d8a90484a33880 | 129,195 |
def create_test(base, new_name, **attrs):
"""
This function accepts a base class of a test, sets some attributes on it,
and then registers a new test. It's a fast way of registering multiple
tests that share the same testing logic but that differ on a few parameters
or combinations of parameters.
"""
new_name = "%s_%s" % (base.__name__, new_name)
attrs['name'] = new_name
return type(new_name, (base,), attrs) | 6be5c1eab580d70bee1e014a0df87048b6d60e2c | 129,197 |
def setattr_validate_property(traito, traitd, obj, name, value):
"""Validates then assigns a value to a specified property trait attribute"""
validated = traitd.c_attrs.validate(traitd, obj, name, value)
result = traitd.c_attrs.post_setattr(traito, traitd, obj, name, validated)
return result | bfed5c7f6ce6e02c25c297d9abf2c107e98f501b | 129,198 |
def dot(im_a, im_b):
"""Compute the dot product between two images of the same size"""
return im_a.ravel().dot(im_b.ravel()) | d85981e45873d4ac0146a5e8efb67acbc9cf29de | 129,203 |
def netids_above_cutoff(grades,cutoff):
"""
Returns the list of netids with grades above or equal
to cutoff
Parameter grades: The dictionary of student grades
Precondition: grades has netids as keys, ints as values.
Parameter cutoff: The grade cutoff (for, say, a
passing grade)
Precondition: cutoff is an int
"""
result = [] # Accumulator
for k in grades:
if grades[k] >= cutoff:
result.append(k) # Add k to the list result
return result | 0a07ef94d5824e3dedf12e383cf929087a744f18 | 129,205 |
def unroll_rids(rids):
"""Recursively unroll rid id ranges into individual rids."""
if not rids:
return []
m = rids[0]
if m[1]:
return ['R%d' % r for r in range(int(m[0]),
int(m[1])+1)] + unroll_rids(rids[1:])
return ['R%s' % m[0]] + unroll_rids(rids[1:])
return [] | f8dea8f8ffd69315a48f7fdb22eac49390214ea7 | 129,209 |
def strip_suffix(name, split='_'):
"""
Returns the portion of name minus the last element separated by the splitter character
:param name: str, name to strip the suffix from
:param split: str, split character
:return: str
"""
if not name.count(split):
return name
return name.replace(split + name.split(split)[-1], '') | f28070474fc01c1442fc4c12e9eca419b3b53004 | 129,210 |
from datetime import datetime
def parse_date(datestr):
"""
This function parses a string of the form 1982-06-22 into
year, month, day and returns them as strings.
"""
datetime_object = datetime.strptime(datestr, '%Y-%m-%d')
year = str(datetime_object.year)
month = str(datetime_object.month)
day = str(datetime_object.day)
return year, month, day | eb4fcff67230b0bde10d543d9380e67df1e3c7f7 | 129,212 |
from typing import Iterable
def suma_cubo_pares_for(numeros: Iterable[int]) -> int:
"""Toma una lista de números, los eleva al cubo, y devuelve la suma de
los elementos pares.
Restricción: Utilizar dos bucles for, uno para elevar al cubo y otro para
separar los pares.
"""
cubos = []
for numero in numeros:
cubos.append(numero ** 3)
suma_pares = 0
for numero in cubos:
if numero % 2 == 0:
suma_pares += numero
return suma_pares | 6b2f2882f1987a9a25096e8d48149240d66ba245 | 129,216 |
def get_raster_origin(coords):
"""Return raster origin
Parameters
----------
coords : :class:`numpy:numpy.ndarray`
3 dimensional array (rows, cols, 2) of xy-coordinates
Returns
-------
out : str
'lower' or 'upper'
"""
return 'lower' if (coords[1, 1] - coords[0, 0])[1] > 0 else 'upper' | d739701b34ca6947db6f807add062f7ea2a2e5d2 | 129,217 |
def convert_to_html_table(example):
"""
Return string representation of an HTML table for an Example object.
This function illustrates how to extract information from Example
objects. Notice that we are explicitly selecting the 'src', 'imt'
and 'translation' lines. If an Example does not have such lines,
the corresponding table lines will be empty.
"""
src = example.get_words("src")
imt = example.get_words("imt")
trans = example.get_line("translation")
table = lambda rows: "<table>\n{}\n</table>".format("\n".join(rows))
tline = lambda cells: "<tr><td>{}</td></tr>".format("</td><td>".join(cells))
mline = lambda line: '<tr><td colspan="{}">{}</tr>'.format(len(src), line)
return table([tline(src), tline(imt), mline(trans)]) | 39665bf0f8076d0071e10db16dc4f7fa9abbbb5b | 129,219 |
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
hand_clone = hand.copy()
if word in wordList:
for c in word:
if hand_clone.get(c, 0) == 0:
return False
hand_clone[c] = hand_clone.get(c, 0) - 1
return True
return False
# ALITER - Create list of characters in 'hand' with respective frequencies
# hand_list = []
# for letter in hand.keys():
# for i in range(hand[letter]):
# hand_list.append(letter)
#
# if word in wordList:
# for c in word:
# try:
# hand_list.remove(c)
# except ValueError:
# return False
# return True
# return False | 8eee69c02c01f8ef31195c56213889228c1fb8bd | 129,222 |
def binary_search(lis, value):
"""
二分查找
时间复复杂度: O(logn)
"""
left = 0
right = len(lis) - 1
while left <= right:
# tips: Python 的整除是向负无穷方向取整
mid = (left + right) // 2
# 情况一:找到了
if lis[mid] == value:
return mid
# 情况二:待查找的值在 mid左侧
elif lis[mid] > value:
right = mid - 1
# 情况三:待查找的值在 mid右侧
# lis[mid] < value
else:
left = mid + 1
# 情况四:没找着
return None | 47b3b59a77c60cd5306590914a8a7fe3c46aad41 | 129,224 |
def fixedlengthstr(b, i):
"""Returns a string of length i from bytes"""
s = b[:i].decode('utf-8')
return s | 9078a6fc4c9154aa2117682c704d4fc0a1b69ea0 | 129,227 |
def _char_to_int(char):
"""Converts a potential character to its scalar value."""
if type(char) in [str, type(u"")]:
return ord(char)
else:
return char | 70b91fb63af67598fa283902c57d5fe188d21aff | 129,231 |
def compress565(r: int, g: int, b: int):
"""Compress an RGB triplet into 565-packed data."""
# RRRRRGGG GGGBBBBB
return (
(r & 0b11111000) | (g >> 5),
(g << 5) & 0b11100000 | (b >> 3)
) | 5b42f43234cbdf3f2878631294e211e0044ebcf3 | 129,232 |
from typing import List
from typing import Dict
import torch
from typing import Iterable
def _concat_step_output(outputs: List[Dict[str, torch.Tensor]], dim: int = 0, skip: Iterable = ('loss',))\
-> Dict[str, torch.Tensor]:
"""Concatenate step output into single batch tensor.
Args:
outputs: Outputs from train/validation/test_step.
dim: Dimension to be concatenated.
skip: Keys to be skipped in outputs.
"""
data = {}
for output in outputs:
for key, var in output.items():
if key in skip:
continue
if key in data:
data[key].append(var)
else:
data[key] = [var]
for key in data:
data[key] = torch.cat(data[key], dim=dim).clone().detach().cpu()
return data | 7be136ed31a1714d52fdd44b6398cf29fd50c5f9 | 129,233 |
import re
def mathics_split(patt, string, flags):
"""
Python's re.split includes the text of groups if they are capturing.
Furthermore, you can't split on empty matches. Trying to do this returns
the original string for Python < 3.5, raises a ValueError for
Python >= 3.5, <= X and works as expected for Python >= X, where 'X' is
some future version of Python (> 3.6).
For these reasons we implement our own split.
"""
# (start, end) indices of splits
indices = list((m.start(), m.end()) for m in re.finditer(patt, string, flags))
# (start, end) indices of stuff to keep
indices = [(None, 0)] + indices + [(len(string), None)]
indices = [(indices[i][1], indices[i + 1][0]) for i in range(len(indices) - 1)]
# slice up the string
return [string[start:stop] for start, stop in indices] | f0d8bef490ef16089584664ab7afd12f563f98af | 129,235 |
def chunk(value, size=2):
""" size=2: 'abcdefg' -> ['ab', 'cd', 'ef', 'gh'] """
return [value[0 + i:size + i] for i in range(0, len(value), size)] | b4c32503074cf6d85f80a3ca046f348065bcbe0d | 129,241 |
def avg_salaries(departments, employees):
"""
Function calculating average salary of every department
:param departments: list of departments in the db
:param employees: list of employees in the db
:return: dnt_salary: the dictionary of names of departments as keys and average salaries as values
"""
dnt_salary = {}
for dnt in departments:
dnt_salary[dnt.department] = []
for emp in employees:
for dnt in departments:
if emp.department == dnt.department:
dnt_salary[dnt.department].append(emp.salary)
for dnt_name in dnt_salary:
avg_salary = None
if dnt_salary[dnt_name]:
avg_salary = 0
for salary in dnt_salary[dnt_name]:
avg_salary += salary
avg_salary /= len(dnt_salary[dnt_name])
dnt_salary[dnt_name] = round(avg_salary, 3)
else:
avg_salary = 'No employees'
dnt_salary[dnt_name] = avg_salary
return dnt_salary | 44ed8aefc2c60d20c621a560650ab350d2c00357 | 129,242 |
from typing import Optional
import requests
from bs4 import BeautifulSoup
def scrape_discord_username(nation_id: int, /) -> Optional[str]:
"""Scrape a nation page for the discord username
Parameters
----------
nation_id : int
The nation ID to scrape.
Returns
-------
Optional[str]
The discord username, or None if not found.
"""
try:
response = requests.request(
"GET", f"https://politicsandwar.com/nation/id={nation_id}"
)
return [
i.contents[1].text # type: ignore
for i in BeautifulSoup(response.text, "html.parser").find_all(
"tr", class_="notranslate"
)
if any("Discord Username:" in str(j) for j in i.contents) # type: ignore
][0]
except IndexError:
return None | af7bae88a3a03ee751d211982ecaccc72e198e67 | 129,245 |
def timeoutDeferred(reactor, deferred, seconds):
"""
Cancel a L{Deferred} if it does not have a result available within the
given amount of time.
@see: L{Deferred.cancel}.
The timeout only waits for callbacks that were added before
L{timeoutDeferred} was called. If the L{Deferred} is fired then the
timeout will be removed, even if callbacks added after
L{timeoutDeferred} are still waiting for a result to become available.
@type reactor: L{IReactorTime}
@param reactor: A provider of L{twisted.internet.interfaces.IReactorTime}.
@type deferred: L{Deferred}
@param deferred: The L{Deferred} to time out.
@type seconds: C{float}
@param seconds: The number of seconds before the timeout will happen.
@rtype: L{twisted.internet.interfaces.IDelayedCall}
@return: The scheduled timeout call.
"""
# Schedule timeout, making sure we know when it happened:
def timedOutCall():
deferred.cancel()
delayedTimeOutCall = reactor.callLater(seconds, timedOutCall)
# If Deferred has result, cancel the timeout:
def cancelTimeout(result):
if delayedTimeOutCall.active():
delayedTimeOutCall.cancel()
return result
deferred.addBoth(cancelTimeout)
return delayedTimeOutCall | ddf45ebbedee68d9a93a6529fabd6100b55d4c33 | 129,248 |
def pascal_triangle(n):
"""Return the nth line in Pascal's triangle."""
if n == 1:
line = [1]
else:
line = [1]
prev = pascal_triangle(n - 1)
line.extend(prev[i] + prev[i + 1] for i in range(len(prev) - 1))
line.append(1)
return line | 5345385c531c17b14b37d8812df0debb3d7e1c81 | 129,250 |
def _remove_images(cells):
"""Remove markdown cells with images."""
return [cell for cell in cells
if not cell.get('source', '').startswith('![')] | 4ebeefc8a11b872d63936a5756649cb04388fddd | 129,255 |
def safe_cast(invar, totype):
"""Performs a "safe" typecast.
Ensures that `invar` properly casts to `totype`. Checks after
casting that the result is actually of type `totype`. Any exceptions raised
by the typecast itself are unhandled.
Parameters
----------
invar
(arbitrary) -- Value to be typecast.
totype
|type| -- Type to which `invar` is to be cast.
Returns
-------
outvar
`type 'totype'` -- Typecast version of `invar`
Raises
------
~exceptions.TypeError
If result of typecast is not of type `totype`
"""
# Make the typecast. Just use Python built-in exceptioning
outvar = totype(invar)
# Check that the cast type matches
if not isinstance(outvar, totype):
raise TypeError("Result of cast to '{0}' is '{1}'"
.format(totype, type(outvar)))
## end if
# Success; return the cast value
return outvar | 4d792d4f5ab6e3e533af41829ee510bdf7b8341b | 129,257 |
def pretty_request(request):
"""
A simple function to convert a Django request to a string the way requests are meant to be
printed.
Source: https://gist.github.com/defrex/6140951
Returns
-------
str: A displayable request as a string.
"""
headers = ''
for header, value in request.META.items():
if not header.startswith('HTTP'):
continue
header = '-'.join([h.capitalize() for h in header[5:].lower().split('_')])
headers += '{}: {}\n'.format(header, value)
return (
'{method} HTTP/1.1\n'
'Content-Length: {content_length}\n'
'Content-Type: {content_type}\n'
'{headers}\n\n'
'{body}'
).format(
method=request.method,
content_length=request.META['CONTENT_LENGTH'],
content_type=request.META['CONTENT_TYPE'],
headers=headers,
body=request.body,
) | f92468300fbdc7cd6ae2fe1bd7cba88a8200bbf8 | 129,258 |
def db_uri(db_conf):
"""Return the database URI, given the database config."""
if db_conf.get('engine') == 'memory':
return 'sqlite:///:memory:'
elif db_conf.get('engine') == 'sqlite':
return '{engine}:///{host}'.format(**dict(db_conf))
return '{engine}://{user}:{password}@{host}:{port}/{name}'.format(
**dict(db_conf)
) | d12ae3fdbc76d16245f00853639eb046cd89d1ae | 129,263 |
import base64
def base64_to_bytes(value):
"""
decodes the given base64 encoded bytes value into raw bytes.
:param bytes value: base64 encoded bytes value to be decoded into raw bytes.
:returns: raw bytes.
:rtype: bytes
"""
return base64.b64decode(value) | 8eceff3bd9536c4c875a4e0b3da4ae5213f2735b | 129,268 |
import sqlite3
def list_db_vars(dbname):
"""
Return the set of the names of the variables found in the specified case DB
file.
dbname: str
The name of the sqlite DB file.
"""
connection = sqlite3.connect(dbname)
varcur = connection.cursor()
varcur.execute("SELECT name from casevars")
varnames = set([v for v in varcur])
return varnames | 52e5e4dddf58f7f98d9a6a22e716393745bcd193 | 129,269 |
def first_line_match(regexp, file):
"""Return the first match of the regular expression in file (by line),
or None. Technically applicable to any iterable containing
strings, not just files opened for reading.
"""
for line in file:
m = regexp.search(line)
if m:
return m
return None | c36253e2aa4a285f9266a6f670e7de3e5c8b6ed9 | 129,270 |
from pathlib import Path
def get_project_root() -> Path:
"""
This function is for getting the root path location of the project, that may be different depending
where you placed it.
:rtype: Path
"""
return Path(__file__).parent.parent | 8d20102af98d33c289d0dc2dad1346c70affbf80 | 129,271 |
def from_feedback(text: str) -> str:
"""
Generates GIFT-ready text from a feedback string.
Parameters
----------
text : str
Input text.
Returns
-------
out: str
GIFT-ready text.
"""
return '#'*4 + text | 347035b7a25008bd1af30394b22c0c99384d7161 | 129,274 |
def _is_geo_valid(lat, lng):
""" Checks if geographical point valid """
if abs(lat) > 90 or abs(lng) > 180:
return False
return True | 5ae5cbad8432529a9f1e7c8e45f1707394ad46c1 | 129,277 |
def is_local_variable(variable_name):
"""
Is a variable name a local or fully qualified?
>>> is_local_variable('FOO')
True
>>> is_local_variable('HH__FOO')
False
:param variable_name: str
:return: bool
"""
return '__' not in variable_name | 4da43b9a572304ce14d16a656094f75dca7f452e | 129,280 |
import json
def read_one_node(key, file_name='log_file.json'):
"""
Return the dictionary in JSON file under the key=key.
Args:
key (string): dictionary key to address the node
file_name (string): JSON file name to read
Returns:
Dictionary
"""
with open(file_name, 'r', encoding='utf8') as infile:
node = json.load(infile)
return node[key] | b9a90d751bf0119a170f64ec8a20cdde25eab642 | 129,283 |
def build_category_index(data):
"""
Returns mappings from index to category and vice versa.
Args:
data: list of dicts, each dict is one sample
"""
categories = set()
for sample in data:
categories.add(sample['category'])
categories = sorted(list(categories))
idx2cat = {i: cat for i, cat in enumerate(categories)}
cat2idx = {cat: i for i, cat in idx2cat.items()}
return idx2cat, cat2idx | 3c55392ba1af13dedd711ab085c4db768fc60ef6 | 129,287 |
def make_response(resp):
""" Returns Flask tuple `resp, code` code per http://flask.pocoo.org/docs/0.10/quickstart/#about-responses
"""
if 'errorMsg' in resp:
# Error 162 pertains to "Historical data request pacing violation"
if resp['errorCode'] in [None, 162]:
return resp, 429
# Bad request if arg which made it to TWS wasn't right
return resp, 400
else:
return resp | a6d8493cbcf0ff858114cec8032c46bbbff6a4e2 | 129,288 |
from typing import List
import re
def tokenize(arg: str) -> List[str]:
"""
Tokenize.
>>> tokenize("1+2+42")
['1', '+', '2', '+', '42']
>>> tokenize("(1 + 2) * 3")
['(', '1', '+', '2', ')', '*', '3']
"""
return (
re.sub("([0-9]+)", r" \1 ", arg)
.replace("(", " ( ")
.replace(")", " ) ")
.replace(",", " ")
.split()
) | 369baebb9f76f3608bb9f6deb5fa3ee1860c3974 | 129,289 |
def split(list_a: list, length_of_first: int):
"""Problem 17: Split a list in two parts; the length of the first list is given.
Parameters
----------
list_a : list
The input list
length_of_first : int
The desired length of the first output list
Returns
-------
(list, list)
A tuple of lists
Raises
------
TypeError
If the given argument is not of `list` type
ValueError
If the given `n` is greater than the list's length or smaller than 1
"""
if not isinstance(list_a, list):
raise TypeError('The argument given is not of `list` type.')
if length_of_first > len(list_a) or length_of_first < 0:
raise ValueError('The value of `l` is not valid.')
return list_a[:length_of_first], list_a[length_of_first:] | eabb79186b8d296b9a210c55ed5746d8cbe3d389 | 129,290 |
def readable_time_delta(seconds):
"""
Convert a number of seconds into readable days, hours, and minutes
"""
days = seconds // 86400
seconds -= days * 86400
hours = seconds // 3600
seconds -= hours * 3600
minutes = seconds // 60
m_suffix = 's' if minutes != 1 else ''
h_suffix = 's' if hours != 1 else ''
d_suffix = 's' if days != 1 else ''
retval = u'{0} minute{1}'.format(minutes, m_suffix)
if hours != 0:
retval = u'{0} hour{1} and {2}'.format(hours, h_suffix, retval)
if days != 0:
retval = u'{0} day{1}, {2}'.format(days, d_suffix, retval)
return retval | 6d2010cb29404207e2970f9d74f526a81c5b1116 | 129,293 |
def get_sequence_frequencies(sequences):
"""
Computes the frequencies of different sequences in a collection, returning a dictionary of their string representations and counts.
Example
--------
>>> s1 = [1,1,2,2,3]
>>> s2 = [1,2,2,3,3]
>>> s3 = [1,1,2,2,2]
>>> sequences = [s1,s2,s2,s3,s3,s3]
>>> ps.get_sequence_frequencies(sequences) #doctest: +NORMALIZE_WHITESPACE
{'[1, 1, 2, 2, 2]': 3,
'[1, 2, 2, 3, 3]': 2,
'[1, 1, 2, 2, 3]': 1}
"""
# converting to strings makes comparison easy
sequences_as_strings = [str(s) for s in sequences]
sequence_frequencies = {}
for sequence in set(sequences_as_strings):
sequence_frequencies[sequence] = sequences_as_strings.count(sequence)
sequence_frequencies = {k: v for k, v in sorted(sequence_frequencies.items(), key=lambda item: item[1], reverse=True)}
return sequence_frequencies | 18ed7429e5c6b860d565d7e3c380a7cf2bcd4c82 | 129,297 |
def generate_volmount_args(src_str, dst_str, labels):
"""Generate a podman style volmount string"""
vol_mount_str = f"{src_str}:{dst_str}"
if labels:
if not labels.startswith(":"):
vol_mount_str += ":"
vol_mount_str += labels
return ["-v", vol_mount_str] | 335fa8510112d9d0257f363dba4ed7e45b9b92d1 | 129,303 |
from pathlib import Path
from typing import Any
import json
def get_section_index(metapath: Path) -> list[dict[str, Any]]:
"""Return content of sections index file."""
parent_folder = metapath.parent.absolute()
# test if sections have been created in the first place
if not parent_folder.is_dir():
return []
with metapath.open() as file:
index = json.load(file)
return index | 38aa33933f8e53c6694f4739e8524efd0bb63b64 | 129,305 |
import csv
def load_lex(filename):
"""Load a lexicon (key: value) from a file."""
with open(filename, 'r') as infile:
return {row[0]: float(row[1]) for row in csv.reader(infile)} | 6fabef03a9fc7827156b9dd373279c652304bc8b | 129,311 |
from typing import Callable
def expand_dtypes(func: Callable) -> Callable:
"""
Generate test cases for `int32` and `int64` dtypes by copying the
test cases of `int` dtype.
Generate test cases for `float32` and `float64` dtypes by copying the
test cases of `float` dtype.
"""
def _new_arg(s: str) -> tuple:
return tuple([x if x != s else s for x in arg])
result = []
for arg in func.pytestmark[0].args[1]: # type: ignore
if 'int' in arg:
result.append(_new_arg('int64'))
result.append(_new_arg('int32'))
elif 'float' in arg:
result.append(_new_arg('float64'))
result.append(_new_arg('float32'))
for arg in result:
func.pytestmark[0].args[1].append(arg) # type: ignore
return func | 6b08b461e6ada37f6b81bb6a6255de82e937af4f | 129,312 |
from typing import Union
def _is_line_empty(line: Union[str, bytes]) -> bool:
"""
Returns whether line is not relevant in an OFF file.
"""
line = line.strip()
return len(line) == 0 or line[:1] == b"#" | 05f25d9418dc1a5506d75b35a35a39c9fa0f0c8d | 129,320 |
def min_conf_filter_predictions(filter_dict, preds, confs, label_dict=None):
""" Filter our predictions based on per label confidence thresholds
Args:
filter_dict (dict): A dict of strings or ints that get mapped a minimum confidence
value. If the keys are strings, label_dict is required.
preds (list): A list of predicted labels (strings) or classes (ints)
confs (list): A list of floats associated to confidence
values for each predicted class
label_dict (dict): A dict mapping the classifier output index to a string
Returns:
qualifying_preds (list): A list of elements from preds
that have qualifying confidence
"""
if label_dict is None:
label_dict = {}
qualifying_preds = []
for pred, conf in zip(preds, confs):
min_conf = filter_dict.get(pred)
if min_conf is None:
min_conf = filter_dict.get(label_dict.get(pred))
if min_conf is None:
min_conf = 0
if conf >= min_conf:
qualifying_preds.append(pred)
return qualifying_preds | d9d4f9c3de0040a08a568849e635f25104c6f70c | 129,326 |
def find_max_tech_capacity(capacity):
"""
Calculate the technology with greatest capacity at each plant.
Parameters
----------
capacity : DataFrame
dataframe with capacity by generator at every plant
Returns
-------
dictionary
mapping of plant_id to the technology with largest total capacity
"""
sum_cap = capacity.groupby(['plant_id', 'technology']).sum()
max_tech = {}
for plant in capacity['plant_id'].unique():
tech = sum_cap.loc[plant, 'nameplate_capacity_mw'].idxmax()
max_tech[plant] = tech
return max_tech | eb12dfab20f177dc6bcc7931d8784a09815f5ca5 | 129,330 |
import re
def parse_agilent33220a_configuration_str(str):
"""Parse the Agilent 33220A configuration string.
Returns 4 elements: function name, frequency, amplitude, offset"""
valeurs = re.split(",", str)
function_frequency = valeurs[0]
amplitude = valeurs[1]
offset = valeurs[2]
Nchars = len(offset)
valeurs = re.split(" ", function_frequency)
function = valeurs[0]
frequency = valeurs[1]
return (
function[1::],
float(frequency),
float(amplitude),
float(offset[0 : (Nchars - 2)]),
) | 4aa2e11ee32c8b19f2a946de58678fb2ee7f7d27 | 129,335 |
def couple(lst1, lst2):
"""Return a list that contains lists with i-th elements of two sequences
coupled together.
>>> lst1 = [1, 2, 3]
>>> lst2 = [4, 5, 6]
>>> couple(lst1, lst2)
[[1, 4], [2, 5], [3, 6]]
>>> lst3 = ['c', 6]
>>> lst4 = ['s', '1']
>>> couple(lst3, lst4)
[['c', 's'], [6, '1']]
"""
assert len(lst1) == len(lst2)
a = []
for i in range(len(lst1)):
a.append([lst1[i], lst2[i]])
return a | 60e3f846d8c0c6e51f0acdedea81b6387e48d7c1 | 129,336 |
def is_isogram(string):
"""
Test whether the given string parameter is an isogram or not
"""
processed_input = string.lower().replace("-", "").replace(" ", "")
return len(set(processed_input)) == len(processed_input) | 47fb3c176ffaac079c6ffeab8fa7022e43824f67 | 129,337 |
import itertools
def self_product(iterable):
"""
Return the cross product of the iterable with itself.
>>> list(self_product([1, 2, 3]))
[(1, 1), (1, 2), ..., (3, 3)]
"""
return itertools.product(*itertools.tee(iterable)) | 1c948eb4d21a5919e437bebd8e3a7164b3959e7b | 129,341 |
def is_comment(line):
"""
Check if a line consists only of whitespace and
(optionally) a comment.
"""
line = line.strip()
return line == '' or line.startswith('#') | c71c2c14a9dc9b463798817638a249712f4469fc | 129,350 |
def fetch_db_info(mongo):
"""Function: fetch_db_info
Description: Calls adminstration command to run the listDatabases command.
Arguments:
(input) mongo -> Database instance.
(output) -> Returns a document from the listDatabases command.
"""
return mongo.adm_cmd("listDatabases") | 2060f6c68dfe590cd7795c8db94debf08fd708ae | 129,352 |
def get_fasttext_train_calls(train_file_path, param_dict, fasttext_path, model_path, thread=1,
pretrained_vectors_path=None):
"""
Generates fastText command-line calls for training a supervised model and for compressing the output model.
:param train_file_path: path to the training dataset
:param param_dict: dictionary mapping fasttext hyperparameters to their values
:param fasttext_path: path to the fastText executable
:param model_path: str, path to output model
:param thread: int, the number of threads to use
:param pretrained_vectors_path: str, path to pre-trained `.vec` file with word embeddings
:return tuple of str - fastText calls for training and quantizing
"""
train_args = []
for arg in sorted(param_dict.keys()):
val = param_dict[arg]
train_args += [arg, str(val)]
train_call = [fasttext_path, 'supervised', '-input', train_file_path, '-output', model_path]
train_call += train_args
train_call += ['-thread', str(thread)]
if pretrained_vectors_path is not None:
train_call += ['-pretrainedVectors', pretrained_vectors_path]
compress_call = [fasttext_path, 'quantize', '-input', model_path, '-output', model_path]
return train_call, compress_call | f833d790e7151d69486c690744df12a8672bdfb0 | 129,355 |
import torch
def getclassAccuracy(output, target, nclasses, topk=(1,)):
"""
Computes the top-k accuracy between output and target and aggregates it by class
:param output: output vector from the network
:param target: ground-truth
:param nclasses: nclasses in the problem
:param topk: Top-k results desired, i.e. top1, top2, top5
:return: topk vectors aggregated by class
"""
maxk = max(topk)
score, label_index = output.topk(k=maxk, dim=1, largest=True, sorted=True)
correct = label_index.eq(torch.unsqueeze(target, 1))
ClassAccuracyRes = []
for k in topk:
ClassAccuracy = torch.zeros([1, nclasses], dtype=torch.uint8).cuda()
correct_k = correct[:, :k].sum(1)
for n in range(target.shape[0]):
ClassAccuracy[0, target[n]] += correct_k[n].byte()
ClassAccuracyRes.append(ClassAccuracy)
return ClassAccuracyRes | f714540e04479ae8aa38213bafeee7388a164b4b | 129,361 |
def to_ttpt(alpha):
"""
Takes an alphanumeric grade and converts it to 22pt
Args:
alpha (str): The alphanumeric grade to convert
Returns:
The 22pt grade as an integer
"""
grades_dict = {"A1": 22, "A2": 21,
"A3": 20, "A4": 19,
"A5": 18, "B1": 17,
"B2": 16, "B3": 15,
"C1": 14, "C2": 13,
"C3": 12, "D1": 11,
"D2": 10, "D3": 9,
"E1": 8, "E2": 7,
"E3": 6, "F1": 5,
"F2": 4, "F3": 3,
"G1": 2, "G2": 1,
"H": 0, "CW": 0,
"CR": 0, "MV": 0
}
return grades_dict[alpha] | ee6a0f695eebe4a3e738488bad003b57cf07f0f3 | 129,363 |
from pathlib import Path
def path_to_url(path: str | Path) -> str:
"""
Convert a path to a file: URL. The path will be made absolute unless otherwise
specified and have quoted path parts.
"""
return Path(path).absolute().as_uri() | 0a794a88778e069e5b09d62e8d5c4ee023f4c79e | 129,365 |
def extract_sequences(references):
"""
Return a dictionary with reference ids as keys and their corresponding
sequences as values.
:param references: Dictionary with reference models.
:rtype: dict
:return: Reference ids as keys and their corresponding sequences as values
"""
sequences = {}
for reference in references:
sequences[reference] = references[reference]["sequence"]["seq"]
return sequences | 0dcae541d1eab02c8e33b5d0c4bc240b3bb4b274 | 129,366 |
def squared_error(y_orig, y_line):
"""
Total sum of squared errors
:param y_orig: original y points
:param y_line: fitted y points
:return: int square error
"""
return sum((y_line - y_orig) ** 2) | ded54c05e056c13fd7e5adee006a47328350a301 | 129,367 |
def ghi_clear_to_ghi(ghi_clear: float, cloud_coverage: float) -> float:
"""Compute global horizontal irradiance (GHI) from clear-sky GHI, given a cloud coverage between 0 and 1.
References
----------
Perez, R., Moore, K., Wilcox, S., Renne, D., Zelenka, A., 2007.
Forecasting solar radiation – preliminary evaluation of an
approach based upon the national forecast database. Solar Energy
81, 809–812.
"""
if cloud_coverage < 0 or cloud_coverage > 1:
raise ValueError("cloud_coverage should lie in the interval [0, 1]")
return (1 - 0.87 * cloud_coverage ** 1.9) * ghi_clear | 68c6c2ddabc3ff3398011c21304814899bfb9e55 | 129,368 |
def get_alphabetical_topics(course_module):
"""Return a list of team topics sorted alphabetically.
Arguments:
course_module (xmodule): the course which owns the team topics
Returns:
list: a list of sorted team topics
"""
return sorted(
course_module.teams_configuration.cleaned_data['team_sets'],
key=lambda t: t['name'].lower(),
) | 99ba5f8168d2ca233a3451dd4e9e00e2210969be | 129,373 |
def mystery_2a_no_if(x: int, y: int, z: set[int]) -> bool:
"""Return the same value as mystery_2a_if, but without using any if statements."""
# if x >= y:
# return x in z
# else:
# return x not in z and y not in z
return (x >= y and x in z) or (x < y and (x not in z and y not in z)) | 36c9aeefc5caf2f63aaa69e0f1d93580fa5510fb | 129,377 |
def pad_binary_string(binary_string, required_length):
"""
Pads a binary string with additional zeros.
Example: pad_binary_string('101',5) -> '00101'
:param binary_string: a binary representation as a string
:param required_length: the number of digits required in the output binary string
:return: a binary representation as a string with additional zeros
"""
padding_size = required_length - len(binary_string)
padding = ''.join(['0']*padding_size)
return padding + binary_string | c4de724bd00658518cb7f4fbfe25b5ee13346271 | 129,378 |
def mutate_image_dict_to_v1(image):
"""
Replaces a v2-style image dictionary's 'visibility' member with the
equivalent v1-style 'is_public' member.
"""
visibility = image.pop('visibility')
is_image_public = 'public' == visibility
image['is_public'] = is_image_public
return image | c5a1610c17a54878bcbaaaab9619a1559183288b | 129,384 |
import random
def _sample_vocab(tft_output, vocab_name, label, k):
"""Samples the given vocab and returns the indices and samples.
Args:
tft_output: a TFTransformOutput object.
vocab_name: the name of the embedding vocabulary made with tft.
label: a label to assign each sample of the vocab.
k: the number of samples to take.
Returns:
A tuple of (indices, metadata):
indices: a list of indices for the vocab sample.
metadata: a list of lists of data corresponding to the indices.
Raises:
RuntimeError: k is larger than the vocab size.
"""
vocab = tft_output.vocabulary_by_name(vocab_name)
if k > len(vocab):
raise RuntimeError("{0} num samples too high, must be at most {1}"
.format(label, len(vocab)))
indices = random.sample(range(len(vocab)), k)
return indices, [[label, vocab[i]] for i in indices] | 274bc52675e50cad3579cf8d898a5672e552f14c | 129,387 |
def generate_output(shortest_path, starting_node, ending_node):
"""
Generates the message to the user detailing the result of the
breadth first search to find the shortest path.
"""
if shortest_path < 0:
output_message = "Nodes {} and {} are not connected.".format(starting_node, ending_node)
elif shortest_path == 0:
output_message = "The shortest path is 0. If you're at home, you don't have to \
cross the street to get home!"
else:
output_message = "The shortest path between nodes {} and {} is {}.".format(starting_node, ending_node, shortest_path)
return output_message | 864c320e25e10dbf73c46cf751e288963f496279 | 129,400 |
import math
def image_normalize(image):
"""normalize a 3d numpy array simiiar to tf.image.per_image_standardization"""
mean = image.mean()
stddev = image.std()
adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))
standardized_image = (image - mean) / adjusted_stddev
return standardized_image | 0ec4288bd8f5bbf93f4c2bd06bc8a69efafb86c8 | 129,401 |
def _is_s3_notif(event):
"""
Check if type is S3 notification
:param event:
:return: True/False
"""
return (
event.get("Records")
and isinstance(event.get("Records"), list)
and "s3" in event.get("Records")[0]
) | 8af81f3d4dc1d61c8703841b076007179df1298f | 129,404 |
def read_paths(path):
"""Return a list of paths found in the file at path."""
old_paths = []
with open(path, "r", encoding="utf-8") as in_file:
for line in in_file:
old_paths.append(line.strip())
return old_paths | 12bccf7448a0960d29bae24a25ac04c984799acb | 129,410 |
def h_misplaced(node):
""" Return the heuristic value for a given state. Default heuristic function used is
h(n) = number of misplaced tiles """
initial_state = node.state
goal_state = (1,2,3,4,5,6,7,8,0)
count = 0;
for i in range(0,9):
if initial_state[i] == 0:
continue;
if initial_state[i] != goal_state[i]:
count = count +1
return count | c62081504a227883e332e6bb75d53b12a5d8cf2a | 129,416 |
def merge_dfs(df_list, keys_for_merge):
"""Joins all the dataframes on the keys.
Does an outer join, does not check that all rows exist in all dataframes.
Args:
df_list: A list of pandas dataframes. Each needs to have the keys_for_merge
as columns.
keys_for_merge: A string list of column names to merge on. Typically
['plate', 'well'] aka WELL_MERGE_KEYS.
Returns:
The joined pandas dataframe.
"""
if len(df_list) < 1:
raise ValueError('I need at least 1 df to merge')
for k in keys_for_merge:
if k not in df_list[0]:
raise ValueError('df missing column %s. Has: %s' %
(k, df_list[0].columns))
merged_df = df_list[0]
for df in df_list[1:]:
cols_to_add = keys_for_merge + [c for c in df.columns if c not in merged_df]
for k in keys_for_merge:
if k not in df:
raise ValueError('df missing column %s. Has: %s' % (k, df[0].columns))
merged_df = merged_df.merge(df[cols_to_add], on=keys_for_merge, how='outer')
return merged_df | f5b0ee9250f7d0c2a855c7b62684d12371a34ec5 | 129,422 |
import hashlib
def getMD5(string):
"""
Generate the md5 of the string
:param string: The string to deal with.
:return: The md5 value of the string.
"""
m = hashlib.md5()
m.update(string.encode('UTF-8'))
md5Str = m.hexdigest()
return md5Str | 18e821431cea3ce593c3fab8aa768070bf791b90 | 129,429 |
def _default_to_dict(mappings):
"""
Convert a mapping collection from a defaultdict to a dict.
"""
if isinstance(mappings, dict):
return {
key: _default_to_dict(value)
for key, value in mappings.items()
}
else:
return mappings | 2b6237e1ab920a55a7b61391eb42633e20aef911 | 129,430 |
import random
def data_split(num_data: int, train_data: float, val_data: float, shuffle: bool = True) -> tuple:
"""
Computes the indices for a training, validation and test split, based on the total number of data. The test data are
the remaining data, which have not been assigned to training or validation data.
Parameters
----------
num_data: Total number of available data.
train_data: Fraction of data, which should be used for training.
val_data: Fraction of data, which should be used for validation.
shuffle: Boolean, which indicates whether to shuffle the data or not.
Returns
-------
split: tuple, in which lists of indices according to the splits are stored.
"""
assert train_data + val_data <= 1, "The amount of training and validation data needs to be smaller ot equal to 1!"
# create the indices, corresponding to the data points, and shuffle them, if required
indices = list(range(num_data))
if shuffle:
random.shuffle(indices)
# compute the amount of indices
num_train_indices = int(train_data * num_data)
num_val_indices = int(num_data * val_data)
# split the indices into their corresponding lists
train_indices = indices[:num_train_indices]
val_indices = indices[num_train_indices:num_train_indices+num_val_indices]
# if there are remaining data points, assign them to the test set
if num_train_indices + num_val_indices < num_data:
test_indices = indices[num_train_indices+num_val_indices:]
split = (train_indices, val_indices, test_indices)
else:
split = (train_indices, val_indices)
return split | 37b983741fb420c208b6171b42270c9fa1005911 | 129,431 |
from bs4 import BeautifulSoup
import re
def __getAddresses(parsed: BeautifulSoup) -> list:
"""Function to extract company addresses from the parsed HTML EDGAR page.
Searches for address information in divs with class name 'mailer'.
Arguments:
parsed {BeautifulSoup} -- Parsed HTML from company EDGAR filing.
Returns:
list -- List of addresses.
"""
# Addresses container
address_divs = parsed.find_all('div', class_='mailer')
# Building RegEx for phone number
# The following RegEx extracts phone numbers in the following formats:
# 1. (###) ###-####
# 2. ###-###-####
# 3. ##########
phone_number_regex = re.compile(
r'(\(\d{3}\) \d{3}-\d{4}|\d{3}-\d{3}-\d{4}|\d{10})')
# List for final addresses
addresses = list()
for address in address_divs:
# Create dict for address
address_parsed = dict()
# Split text by newline
address_items = address.text.split('\n')
# Removing leading and trailing spaces
address_items = [i.strip() for i in address_items]
# Variable to store street address
street_address = ''
# Iterate through each line
for idx, address_item in enumerate(address_items):
# First line is address type
if idx == 0:
address_parsed['type'] = address_item
continue
# Check if line has phone number
phone_matches = phone_number_regex.findall(address_item)
if len(phone_matches) == 1:
# Stripping non-digit characters from phone number
phone_number = re.sub('[^0-9]', '', phone_matches[0])
address_parsed['phone'] = phone_number
continue
# If no number, add to address line
street_address += address_item.strip() + ' '
# Adding street address to parsed address
address_parsed['street_address'] = street_address.strip()
# Adding parsed address to addresses master list
addresses += [address_parsed]
return addresses | 4b1358e3e9fc6f599190f3cc745acfec905cfeb6 | 129,432 |
def _modulo_ab(x: float, a: float, b: float) -> float:
"""Map a real number onto the interval [a, b)."""
if a >= b:
raise ValueError("Incorrect interval ends.")
y = (x - a) % (b - a)
return y + b if y < 0 else y + a | 65abf4423f1a1047217567cf4e2fcf19db41208c | 129,433 |
def filter_unique_builds(builds):
""" Filter the list of build to keep only one per job name """
jobs = dict()
for build in builds:
if build["job_name"] not in jobs:
jobs[build["job_name"]] = build
unique_builds = list(jobs.values())
print("Found %d unique job builds" % len(unique_builds))
return unique_builds | 1202b14b4b483bb3429640d9f3269a743b7f1fb7 | 129,434 |
def get_timestamp(volatile_status_file):
"""Get BUILD_TIMESTAMP as an integer.
Reads a file of "name<space>value" pairs and returns the value
of the BUILD_TIMESTAMP. The file should be in the workspace status
format: https://docs.bazel.build/versions/master/user-manual.html#workspace_status
Args:
volatile_status_file: path to input file. Typically ctx.version_file.path.
Returns:
int: value of BUILD_TIMESTAMP
Exceptions:
Exception: Raised if there is no BUILD_TIMESTAMP or if it is not a number.
"""
with open(volatile_status_file, 'r') as status_f:
for line in status_f:
parts = line.strip().split(' ')
if len(parts) > 1 and parts[0] == 'BUILD_TIMESTAMP':
return int(parts[1])
raise Exception(
"Invalid status file <%s>. Expected to find BUILD_TIMESTAMP" % volatile_status_file) | 9463ab09b5719ebdde3e0916025006b2c66c7dc3 | 129,437 |
def subtractive_combination_validity(pairs):
"""
Checks the validity of subtractive pairs. True if all pairs are valid, False otherwise.
PARAMETERS:
pairs : ( (str pair, int index), ... ) output of find_subtractive_combinations()
Tuple of upper case character pairs and index
RETURNS: bool
"""
for (leading_numeral, second_numeral), _ in pairs: # Ignore index of pair in original string
# Only one I, X and C can be used as the leading numeral in a subtractive pair
# I can only be placed before V and X
# X can only be placed before L and C
# C can only be placed before D and M
if leading_numeral == "I" and second_numeral in ("V", "X"):
continue
elif leading_numeral == "X" and second_numeral in ("L", "C"):
continue
elif leading_numeral == "C" and second_numeral in ("D", "M"):
continue
else:
return False
return True | 96188c7d796418e1c3fa037d91b6e20bb5f2928d | 129,440 |
def Bin_minutes(val):
"""
Map minutes to a time bin.
"""
if val/60. >= 0.5:
return 0.5
else:
return 0.0 | 67175df068e0b51e56ac7b4b99c734dfba5c44e9 | 129,441 |
def process_latitude(cell):
"""Return the latitude from a cell."""
lat = cell.strip().split("/")[0].strip()
return float(
int(
str(lat[0]) + str(lat[1]),
)
+ int(
str(lat[2]) + str(lat[3]),
) / 60
+ int(
str(lat[4]) + str(lat[5]),
) / 3600
) | eec5f7d80a38794ef9e300c2ffb540f0d1d11bf2 | 129,442 |
import struct
def vox_size_content( x_size: int, y_size: int, z_size: int ) -> bytes:
"""Produces the content for the SIZE chunk. It contains the dimensions for a
single model.
"""
assert( x_size > 0 and y_size > 0 and z_size > 0 and x_size <= 126 and y_size <= 126 and z_size <= 126 )
return struct.pack( '<III', x_size, y_size, z_size ) | d2891992398de9c405e80eef18d20f79407641e5 | 129,444 |
def gene(ncrna):
"""
Get the id of the gene, if possible.
"""
gene_id = ncrna.get("gene", {}).get("geneId", None)
if gene_id:
return gene_id.split(":", 1)[1]
return None | f31f0518a7b8af89ea786b8adeff3e890ef20b6e | 129,454 |
import torch
def dict_to_torchGMM(gmm: dict) -> torch.distributions.MixtureSameFamily:
"""Convert a dictionary to a Gaussian mixture distribution in PyTorch
Args:
gmm: dictionary of GMM parameters
Return: a GMM in PyTorch distribution
"""
mixture_distribution = torch.distributions.Categorical(probs=gmm['pi'])
comp_distribution = torch.distributions.multivariate_normal.MultivariateNormal(
loc=gmm['mean'],
covariance_matrix=gmm['cov']
)
gm_distribution = torch.distributions.mixture_same_family.MixtureSameFamily(
mixture_distribution=mixture_distribution,
component_distribution=comp_distribution
)
return gm_distribution | d0ec9d69fda31d4e65a8721e0984b43bbf85c50f | 129,456 |
def chr_dict(d):
"""
Input dictionary generated by file2dict to make a new dict:
chromosome as key name, positions as value list.
"""
new_dict = {}
for k in d:
chromosome, pos = tuple(k.split('_'))
if chromosome in new_dict:
new_dict[chromosome].append(int(pos))
else:
new_dict[chromosome] = [int(pos)]
return new_dict | 7360607d95c8ec6b767bfe6ca44e16de9c71f4df | 129,459 |
from datetime import datetime
def convert_string_to_date(date_string):
"""
Converts date string to date object.
Args:
date_string: A date string in format '%Y/%m/%d'
Returns:
A date object.
Notes:
Help on the datetime format: https://docs.python.org/2/library/time.html
"""
return datetime.strptime(date_string, '%Y/%m/%d') | f12cd1f293892dc85c938cf100d77c35261f96e1 | 129,460 |
def relative_luminance(red, green, blue):
"""
Calculate the relative luminance for three colors.
:param red: Red, value in range [0.0, 1.0]
:param green: Green, value in range [0.0, 1.0]
:param blue: Blue, value in range [0.0, 1.0]
:return: Relative luminance
"""
return 0.2126 * red + 0.7512 * green + 0.0722 * blue | 8730560a347d055bc9c035265034d2291531a386 | 129,462 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.