content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import json
def create_diff_json(image_diff, file_output_name):
"""diff image file and save as file
args:
image_diff (object)
file_output_name (str)
returns:
saved_file (str)
"""
diff_content = {}
for attr in image_diff.attributes:
diff_content[attr] = {}
diff_content[attr]["before"] = getattr(image_diff, attr)[0]
diff_content[attr]["after"] = getattr(image_diff, attr)[1]
if diff_content[attr]["before"] != diff_content[attr]["after"]:
diff_content[attr]["diff"] = True
diff_content["pixel"] = image_diff.pixel_diff
with open(file_output_name + ".diff.json", "w") as diff_file:
json.dump(diff_content, diff_file, indent=4)
return file_output_name + ".diff.json"
|
0e5b506b7acbc15ca26f21640ee2748598858008
| 11,230
|
def format_class_name(spider_name):
"""Format the spider name to A class name."""
return spider_name.capitalize() + 'Spider'
|
a477c02873347e8df975dade64b8c316ab8dfe67
| 11,231
|
def ordinal(value):
""" Cardinal to ordinal conversion for the edition field """
try:
digit = int(value)
except:
return value.split(' ')[0]
if digit < 1:
return digit
if digit % 100 == 11 or digit % 100 == 12 or digit % 100 == 13:
return value + 'th'
elif digit % 10 == 3:
return value + 'rd'
elif digit % 10 == 2:
return value + 'nd'
elif digit % 10 == 1:
return value + 'st'
else:
return value + 'th'
|
b97accfe96f214ffe9878c24bd78ab63083c9329
| 11,233
|
import torch
def complex_abs(tensor):
"""Compute absolute value of complex image tensor
Parameters
----------
tensor : torch.Tensor
Tensor of shape (batch, 2, height, width)
Returns
-------
Tensor with magnitude image of shape (batch, 1, height, width)
"""
tensor = (tensor[:, 0] ** 2 + tensor[:, 1] ** 2) ** 0.5
return torch.unsqueeze(tensor, dim=1)
|
dd206d9fb58b819d5ed1c58af8e9fc559430ac3a
| 11,234
|
import win32security
import os
def isUserAdmin():
"""Check if the current OS user is an Administrator or root.
:return: True if the current user is an 'Administrator', otherwise False.
"""
if os.name == 'nt':
try:
adminSid = win32security.CreateWellKnownSid(
win32security.WinBuiltinAdministratorsSid, None)
rv = win32security.CheckTokenMembership(None, adminSid)
log.info("isUserAdmin - CheckTokenMembership returned: %r", rv)
return rv
except Exception as e:
log.warning("Admin check failed, assuming not an admin.", exc_info=e)
return False
else:
# Check for root on Posix
return os.getuid() == 0
|
9214ef749c6cfeeee0305f2e1d0bfdbf2b5cedcc
| 11,235
|
def format_docstring(usage: str, arg_list, kwarg_dict):
"""
Use argument lists to create Python docstring
"""
usage = usage.strip().strip("\"")
docstring = f'{usage}\n\n'
for arg in arg_list:
if not arg['input']:
continue
docstring += f"\t:param {arg['name']} {arg['type']}:\n"
for kwarg in kwarg_dict:
if 'help' not in kwarg:
desc = None if 'desc' not in kwarg.keys() else kwarg['desc']
docstring += f"\t:param {kwarg['name']} {kwarg['type']}: {desc} \n"
return docstring
|
a68c0d03aaf851ffb039b722affbdc60f7d05c1c
| 11,236
|
from pathlib import Path
import shutil
def temp_article_dir(temp_cwd: Path) -> Path:
"""Run the test from a temporary directory containing the
"tests/data/article" dataset.
"""
article_source_dir = Path(__file__).parent / "data" / "article"
for source_path in article_source_dir.iterdir():
relative_path = source_path.relative_to(article_source_dir)
dest_path = Path.cwd().joinpath(relative_path)
if source_path.is_dir():
shutil.copytree(source_path, dest_path)
else:
shutil.copy(source_path, dest_path)
return temp_cwd
|
643b2c7f1183f19f263eaba0f3e930ac18fa6988
| 11,237
|
def test_pointer_indexing(pointer_value, type_p):
"""
>>> a = np.array([1, 2, 3, 4], dtype=np.float32)
>>> test_pointer_indexing(a.ctypes.data, float32.pointer())
(1.0, 2.0, 3.0, 4.0)
>>> a = np.array([1, 2, 3, 4], dtype=np.int64)
>>> test_pointer_indexing(a.ctypes.data, int64.pointer())
(1L, 2L, 3L, 4L)
"""
p = type_p(pointer_value)
return p[0], p[1], p[2], p[3]
|
21f87a5ec840e3fd789c9e1ca9382d96f451e1e5
| 11,239
|
def find_by_id(object_id, items):
""" Find an object given its ID from a list of items """
for item in items:
if object_id == item["id"]:
return item
raise Exception(f"Item with {object_id} not found")
|
822bf82ea68bd94d0bb1ed2dd5db754aee9b0cba
| 11,240
|
import requests
import json
def create_snippet(data, baseurl, timeout, raw):
"""
Creates snippet with the given data on the haste server specified by the
baseurl and returns URL of the created snippet.
"""
try:
url = baseurl + "/documents"
response = requests.post(url, data.encode('utf-8'), timeout=float(timeout))
except requests.exceptions.Timeout:
exit("Error: connection timed out")
dockey = json.loads(response.text)['key']
return baseurl + ("/raw/" if raw else "/") + dockey
|
eb34861909b61749ef2f29da16450a82dcc8d83e
| 11,241
|
import torch
def fuse_epic_sliding_windows(maps_dict):
"""
Since different sliding windows have overlap, we desigin this function to fuse sliding windows.
Args:
maps_dict (dict): {
"start_map": (tensor)
"end_map": (tensor)
"verb_map": (tensor)
"noun_map": (tensor)
"confidence_map": (tensor)
}
"""
total_len = maps_dict['feat_mask_in_global'][-1][::2].shape[0]
temporal_sum_count = torch.zeros(total_len)
map_sum_count = torch.zeros(maps_dict['confidence_map'][0].size(1), total_len)
sum_start = torch.zeros(total_len)
sum_end = torch.zeros(total_len)
sum_confidence_map = torch.zeros(maps_dict['confidence_map'][0].size(0), maps_dict['confidence_map'][0].size(1), total_len)
sum_noun_map = torch.zeros(maps_dict['noun_map'][0].size(0), maps_dict['noun_map'][0].size(1), total_len)
sum_verb_map = torch.zeros(maps_dict['verb_map'][0].size(0), maps_dict['verb_map'][0].size(1), total_len)
for idx in range(len(maps_dict['feat_mask_in_global'])):
mask = maps_dict['feat_mask_in_global'][idx][::2]
mask = torch.cat([mask, torch.zeros(total_len-mask.size(0), dtype=torch.bool)])
temporal_sum_count[mask] += 1
sum_start[mask] += maps_dict['start'][idx][:mask.sum()]
sum_end[mask] += maps_dict['end'][idx][:mask.sum()]
sum_confidence_map[:, :, mask] += maps_dict['confidence_map'][idx][:, :, :mask.sum()]
sum_noun_map[:, :, mask] += maps_dict['noun_map'][idx][:, :, :mask.sum()]
sum_verb_map[:, :, mask] += maps_dict['verb_map'][idx][:, :, :mask.sum()]
map_sum_count[:, mask] += maps_dict['map_mask'][:, :mask.sum()]
temporal_sum_count[temporal_sum_count < 0.01] = 1.0
map_sum_count[map_sum_count < 0.01] = 1.0
results = {"start": sum_start / temporal_sum_count,
"end": sum_end / temporal_sum_count,
"confidence_map": sum_confidence_map / map_sum_count,
"verb_map": sum_verb_map / map_sum_count,
"noun_map": sum_noun_map / map_sum_count}
return results
|
205ac18633fcfddd9ba9af6f7b82d89345bf96c3
| 11,242
|
import argparse
def get_args():
"""Process command-line arguments."""
parser = argparse.ArgumentParser(
description='Tool to list all masters along with their hosts and ports.')
parser.add_argument(
'-l', '--list', action='store_true', default=False,
help='Output a list of all ports in use by all masters. Default behavior'
' if no other options are given.')
parser.add_argument(
'--sort-by', action='store',
help='Define the primary key by which rows are sorted. Possible values '
'are: "port", "alt_port", "slave_port", "host", and "name". Only '
'one value is allowed (for now).')
parser.add_argument(
'--find', action='store', metavar='NAME',
help='Outputs three available ports for the given master class.')
parser.add_argument(
'--audit', action='store_true', default=False,
help='Output conflict diagnostics and return an error code if '
'misconfigurations are found.')
parser.add_argument(
'--presubmit', action='store_true', default=False,
help='The same as --audit, but prints no output. Overrides all other '
'options.')
parser.add_argument(
'-f', '--format', choices=['human', 'csv', 'json'],
default='human', help='Print output in the given format')
parser.add_argument(
'--full-host-names', action='store_true', default=False,
help='Refrain from truncating the master host names')
opts = parser.parse_args()
opts.verbose = True
if not (opts.find or opts.audit or opts.presubmit):
opts.list = True
if opts.presubmit:
opts.list = False
opts.audit = True
opts.find = False
opts.verbose = False
return opts
|
e2434453d3bb8a430966f78bfd7b7978628d0443
| 11,243
|
def wh_to_kwh(wh):
"""
Convert watt hours to kilowatt hours and round to two decimal places
:param wh: integer or decimal value
:return: two decimal spot kilowatt hours
"""
kw = float("{0:.2f}".format(wh / 1000.00))
return kw
|
19488960a9c7a4d2fc748f4d897d082cfdaee2b8
| 11,244
|
def julio (string, number):
"""
I like this program.
"""
alpha_cod = ""
codificated = ""
i = 0
position = 0
alpha = "ABCDEFGHIKLMNOPQRSTVX"
while i < len(alpha):
if i + number >= len(alpha) or i + number <= -1:
alpha_cod = alpha_cod + alpha [(i + number) % len(alpha)]
else:
alpha_cod = alpha_cod + alpha[i + number]
i = i + 1
while position < len(string):
if ord(string[position]) < ord("A") or ord(string[position]) > ord("Z"):
codificated = codificated + string[position]
else:
codificated = codificated + alpha_cod[alpha.index(string[position])]
position = position + 1
return codificated
|
676b6f12ddf5897171cc809de2275ec4972fa4a0
| 11,247
|
import torch
def recall_at_k_batch_torch(X_pred, heldout_batch, k=100):
"""
Recall@k for predictions [B, I] and ground-truth [B, I].
"""
batch_users = X_pred.shape[0]
_, topk_indices = torch.topk(X_pred, k, dim=1, sorted=False) # [B, K]
X_pred_binary = torch.zeros_like(X_pred)
if torch.cuda.is_available():
X_pred_binary = X_pred_binary.cuda()
X_pred_binary[torch.arange(batch_users).unsqueeze(1), topk_indices] = 1
X_true_binary = (heldout_batch > 0).float() # .toarray() # [B, I]
k_tensor = torch.tensor([k], dtype=torch.float32)
if torch.cuda.is_available():
X_true_binary = X_true_binary.cuda()
k_tensor = k_tensor.cuda()
tmp = (X_true_binary * X_pred_binary).sum(dim=1).float()
recall = tmp / torch.min(k_tensor, X_true_binary.sum(dim=1).float())
return recall
|
580305c0ed43a22e8cf64bee23e3e207516e9e2a
| 11,248
|
def widthHeightDividedBy(image, value):
"""Divides width and height of an image by a given value."""
w, h = image.shape[:2]
return int(w/value), int(h/value)
|
78bbe60c43a1bbf362c98125bfddc080cf568861
| 11,249
|
def analytical_pulse_energy(q, ekev):
"""
Estimate of analytical_pulse_energy from electron bunch charge and radiation energy
:param q: electron bunch charge [nC]
:param ekev: radiation energy [keV]
:return P: pulse energy [J]
"""
P = 19*q/ekev
return P/1e3
|
d81ddafcc41e0e8619922dce0583bf19579112bc
| 11,250
|
def get_file_date(tree):
"""
Get publication date from dta file xml tree.
:param tree: the xml tree
:return: int, the publication date
"""
date = tree.find("{http://www.dspin.de/data/metadata}MetaData/{http://www.dspin.de/data/metadata}source/{http://www.clarin.eu/cmd/}CMD/{http://www.clarin.eu/cmd/}Components/{http://www.clarin.eu/cmd/}teiHeader/{http://www.clarin.eu/cmd/}fileDesc/{http://www.clarin.eu/cmd/}sourceDesc/{http://www.clarin.eu/cmd/}biblFull/{http://www.clarin.eu/cmd/}publicationStmt/{http://www.clarin.eu/cmd/}date").text
return date
|
64b0e43b9926d94f1cec066af5bf58ecc10d5044
| 11,251
|
import sys
def open_file(path, mode='r'):
"""
Open file with an option to route from stdin or stdout as needed.
"""
if path == '-':
if mode.count('r'):
f = sys.stdin
else:
f = sys.stdout
else:
f = open(path, mode)
return f
|
b304a8dc68f2b558e0d0dbd69158128bac103cc6
| 11,253
|
def format_version(version):
"""Converts a version specified as a list of integers to a string.
e.g. [1, 2, 3] -> '1.2.3'
Args:
version: ([int, ...]) Version as a list of integers.
Returns:
(str) Stringified version.
"""
return '.'.join(str(x) for x in version)
|
fa85b122ce6fed4919ec87ad29847b0997493806
| 11,254
|
import yaml
import os
def read_config_file(filename):
"""
read yaml config file
Parameters
----------
filename (string)
Returns
-------
dict (dictionary)
boolean (boolean)
"""
flag = os.path.isfile(filename)
if flag:
with open(filename,'r') as handle:
config = yaml.safe_load(handle.read()) # (2)
else:
config = {}
return config, flag
|
28d20708faa26db9a1040444f04ee92af5523d9d
| 11,255
|
def even_or_odd(n):
"""Return a string odd or even for odd or even values of n."""
if n % 2 == 0:
return 'Even'
else:
return 'Odd'
|
956e071eeb4be5b9ec2851fc1b566ff1e3e2ef98
| 11,256
|
def SetTriggerMode(mode):
""" This function will set the trigger mode that the camera will
operate in.
Valid values: 0. Internal
1. External
6. External Start
7. External Exposure (Bulb)
9. External FVB EM (only valid for EM Newton models
in FVB mode)
10. Software Trigger
12. External Charge Shifting """
return None
|
9fcb6f3524de7eed5cdda02ce56a5077a97c2d75
| 11,258
|
def L(n, ri, c):
"""Calcula las matrices Li.
ri: a[i][c] / a[c][c]
c : columna c-esima
"""
#Matriz identidad
L = [[float(i == j) for j in range(n)] for i in range(n)]
for k in range(c+1, n):
L[k][c] = -ri[k]
return L
|
d6f7293691842d7175facc540c35cd1c1a43ee09
| 11,259
|
import os
import zipfile
def unzip(src_path, dst_path = None):
"""Extracts a zipfile. Defaults extraction to the same path. Returns filename list."""
dst_pathmod = dst_path
if dst_path is None:
dst_pathmod = os.path.dirname(src_path)
with zipfile.ZipFile(src_path) as zf:
nmlist = zf.namelist()
zf.extractall(dst_pathmod)
return nmlist
|
8947922c1026e669fa494d369f8beaedce49bea5
| 11,260
|
def get_type(type_f):
"""
A function used to categorized each formula.
"""
tmp = ''
if 'trad' in type_f:
tmp = 'T'
elif 'TCONG' in type_f:
tmp = 'P'
elif 'DRA' in type_f:
tmp = 'R'
elif 'WDBA' in type_f:
tmp = 'W'
else:
tmp = type_f
return tmp
|
438c3c3c7fc22fbd09f97727bd30890519efa918
| 11,261
|
def hex_to_int(input: str) -> int:
"""Given a hex string representing bytes, returns an int."""
return int(input, 16)
|
844714eaec1a2b2804cb4725e458980555516ce2
| 11,262
|
import string
def filter_filename(filename):
"""Utility to filter a string into a valid filename"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in filename if c in valid_chars)
return filename
|
8ab97e9a3d9b806090a4b55f6a47d2f011f99ede
| 11,266
|
from warnings import warn
import logging
def logger():
"""Access global logger.
.. deprecated:: 3.3
To control logging from ixmp, instead use :mod:`logging` to retrieve it:
.. code-block:: python
import logging
ixmp_logger = logging.getLogger("ixmp")
# Example: set the level to INFO
ixmp_logger.setLevel(logging.INFO)
"""
warn(
"ixmp.utils.logger() is deprecated as of 3.3.0, and will be removed in ixmp "
'5.0. Use logging.getLogger("ixmp").',
DeprecationWarning,
)
return logging.getLogger("ixmp")
|
28b8eb99d41171e36163e89799c1cc313c311184
| 11,267
|
def dot_product(A, B):
"""
Computes the dot product of vectors A and B.
@type A: vector
@type B: vector
@rtype: number
@return: dot product of A and B
"""
if len(A) != len(B):
raise ValueError("Length of operands do not match")
result = 0.0
for i, v in enumerate(A):
result += A[i] * B[i]
return result
|
f63528bda5d3890137a35de5fd639086262b5c93
| 11,268
|
def has_sources(target, extension=None):
"""Returns True if the target has sources.
If an extension is supplied the target is further checked for at least 1 source with the given
extension.
"""
return (target.has_label('sources')
and (not extension
or (hasattr(target, 'sources')
and any(source.endswith(extension) for source in target.sources))))
|
71853d034f6fe8283f2178daf3648ac75457ec2e
| 11,269
|
def fibo(n):
"""Returns nth fibonacci number."""
a, b = 0, 1
for i in range(1, n):
a, b = b, a+b
return b
|
b9a8d3960cc01f1745151eda250fea97c0e62b14
| 11,270
|
def check_accuracy_single(dat, label):
"""NOTE: OLD! UNUSED. Use sklearn funcs.
Test the accuracy for a list of results, with single answer.
Parameters
----------
dat : list of int
Predicted labels of a set of test data.
label : int
The correct label.
Returns
-------
acc : float
Prediction accuracy for given data.
"""
# Check how accuracy of all results
n_correct = dat.count(label)
acc = n_correct / len(dat)
return acc
|
26fec27fcd03eb637dd2c94559c182a172d693a5
| 11,271
|
from typing import Dict
from typing import List
from typing import Iterable
def get_comments(pf) -> Dict[str, List[str]]:
"""Retrieves comments from proto file and creates a dictionary symbol which comment was aimed at"""
def _get_inner():
i = 0
for location in pf.source_code_info.location:
if location.trailing_comments:
pf_p = pf
cls_path = []
for path in location.path:
if hasattr(pf_p, "field") or hasattr(pf_p, "method"):
cls_path.append(pf_p.name)
if isinstance(pf_p, Iterable):
pf_p = list(pf_p)[path]
else:
pf_p = getattr(pf_p, pf_p.DESCRIPTOR.fields_by_number[path].name)
# cls_path - parent object names, pf_p - message from decoder
# location.trailing_comments - at the end of the line
yield ".".join(cls_path + [pf_p.name]), location.trailing_comments.splitlines()
i += 1
# return dict()
return dict(_get_inner())
|
e2617546f62f05c71a2113d06dc31f89cd6b688c
| 11,273
|
def or_(*args):
"""Compute the logic OR between expressions.
"""
return any(*args)
|
17b775029490d5ca4ea1574186bd02a77a92782a
| 11,275
|
def decimalHour(time_string):
""" Converts time from the 24hrs hh:mm:ss format to HH.hhh format. """
hh, mm, ss = time_string.split(':')
hh, mm, ss = map(float, (hh, mm, ss))
result = ((ss/60) + mm)/60 + hh
return result
|
b931ce0bd1c57c0d2b47a5d51331685e6c478b61
| 11,276
|
def getFileName(fileRequest):
"""returns the file name from a file Request"""
nameByteLength = int(((fileRequest[3] << 8 ) | (fileRequest[4])))
return fileRequest[5:nameByteLength * 2]
|
d6f500b4101677ab2655650f33f0b3f206c55e9c
| 11,277
|
def _generate_flame_clip_name(context, publish_fields):
"""
Generates a name which will be displayed in the dropdown in Flame.
:param publish_fields: Publish fields
:returns: name string
"""
# this implementation generates names on the following form:
#
# Comp, scene.nk (output background), v023
# Comp, Nuke, v023
# Lighting CBBs, final.nk, v034
#
# (depending on what pieces are available in context and names, names
# may vary)
name = ""
# the shot will already be implied by the clip inside Flame (the clip
# file which we are updating is a per-shot file. But if the context
# contains a task or a step, we can display that:
if context.task:
name += "%s, " % context.task["name"].capitalize()
elif context.step:
name += "%s, " % context.step["name"].capitalize()
# if we have a channel set for the write node or a name for the scene,
# add those
rp_name = publish_fields.get("name")
rp_channel = publish_fields.get("channel")
if rp_name and rp_channel:
name += "%s.nk (output %s), " % (rp_name, rp_channel)
elif not rp_name:
name += "Nuke output %s, " % rp_channel
elif not rp_channel:
name += "%s.nk, " % rp_name
else:
name += "Nuke, "
# and finish with version number
name += "v%03d" % (publish_fields.get("version") or 0)
return name
|
91d49612815830c3260920f6f87a6d4eada1006f
| 11,279
|
from typing import List
from typing import Tuple
from typing import Optional
def aggregate_changes(
query_result: List[Tuple[int, int]], initial: Optional[Tuple[int, int, int]] = None
) -> Tuple[int, int, int]:
"""Add a changeset to the aggregated diff result"""
result = list(initial) if initial else [0, 0, 0]
for kind, kind_count in query_result:
assert kind in (0, 1, 2)
result[kind] += kind_count
return result[0], result[1], result[2]
|
ac31c0424ca26fd2bfb5d035b1fea436cbe7d008
| 11,280
|
def calc_air_density(temperature, pressure, elevation_ref=None, elevation_site=None, lapse_rate=-0.113,
specific_gas_constant=286.9):
"""
Calculates air density for a given temperature and pressure and extrapolates that to the site if both reference
and site elevations are given.
:param temperature: Temperature values in degree Celsius
:type temperature: float or pandas.Series or pandas.DataFrame
:param pressure: Pressure values in hectopascal, hPa, (1,013.25 hPa = 101,325 Pa = 101.325 kPa =
1 atm = 1013.25 mbar)
:type pressure: float or pandas.Series or pandas.DataFrame
:param elevation_ref: Elevation, in meters, of the reference temperature and pressure location.
:type elevation_ref: Floating point value (decimal number)
:param elevation_site: Elevation, in meters, of the site location to calculate air density for.
:type elevation_site: Floating point values (decimal number)
:param lapse_rate: Air density lapse rate kg/m^3/km, default is -0.113
:type lapse_rate: Floating point value (decimal number)
:param specific_gas_constant: Specific gas constant, R, for humid air J/(kg.K), default is 286.9
:type specific_gas_constant: Floating point value (decimal number)
:return: Air density in kg/m^3
:rtype: float or pandas.Series depending on the input
**Example usage**
::
import brightwind as bw
#For a series of air densities
data = bw.load_campbell_scientific(bw.demo_datasets.demo_campbell_scientific_site_data)
air_density = bw.calc_air_density(data.T2m, data.P2m)
#For a single value
bw.calc_air_density(15, 1013)
#For a single value with ref and site elevation
bw.calc_air_density(15, 1013, elevation_ref=0, elevation_site=200)
"""
temp = temperature
temp_kelvin = temp + 273.15 # to convert deg C to Kelvin.
pressure = pressure * 100 # to convert hPa to Pa
ref_air_density = pressure / (specific_gas_constant * temp_kelvin)
if elevation_ref is not None and elevation_site is not None:
site_air_density = round(ref_air_density + (((elevation_site - elevation_ref) / 1000) * lapse_rate), 3)
return site_air_density
elif elevation_site is None and elevation_ref is not None:
raise TypeError('elevation_site should be a number')
elif elevation_site is not None and elevation_ref is None:
raise TypeError('elevation_ref should be a number')
else:
return ref_air_density
|
964bff72d67354abeff9a355788d3624d7ec230c
| 11,282
|
import os
import requests
def get_source_repo(target, auth=None):
"""Get the source repo for a given repo.
Parameters
----------
target : str
The GitHub organization/repo
auth : str, optional
The GitHub authorization token
Returns
-------
str
A formatted PR entry
"""
api_token = auth or os.environ.get("GITHUB_ACCESS_TOKEN")
headers = {"Authorization": "token %s" % api_token}
r = requests.get(f"https://api.github.com/repos/{target}", headers=headers)
data = r.json()
# If this is the source repo, return the original target
if "source" not in data:
return target
return data["source"]["full_name"]
|
285636537155979fbaa31f53f313de75540223a2
| 11,284
|
import numpy
def replace_df_nulls(DF, null_list):
"""For each column in input DataFrame, replace with null and values in null_list.
Args:
DataFrame to replace missing values with null,
List of values to replace with null.
Returns:
Cleaned DataFrame."""
for col in list(DF):
before_nulls = DF[col].isnull().sum()
for nullentry in null_list:
DF[col] = DF[col].replace(nullentry, numpy.nan)
after_nulls = DF[col].isnull().sum()
change = after_nulls - before_nulls
print(str(col) + ": " + str(change) + " nulls found, now " + str(after_nulls) + " (" + str(after_nulls/len(DF[col])) + "%) nulls total.")
return DF
|
62637bf2ef33884bc6bb4dd6dd0d8270e3a4e84b
| 11,285
|
def market_keys():
"""Standard keys for storage"""
return ['player_id', 'player_name', 'price', 'amount']
|
0a514f5ae1d138a1e84700ab15b88380186f70b0
| 11,286
|
def get_ads_from_path(path_i):
"""
"""
#| - get_ads_from_path
ads_i = None
# if "run_o_covered" in path_rel_to_proj_i:
if "run_o_covered" in path_i:
ads_i = "o"
elif "run_bare_oh_covered" in path_i:
if "/bare/" in path_i:
ads_i = "difjsi"
# elif "/oh"
elif "difjsi" in path_i:
ads_i = "difjsi"
return(ads_i)
#__|
|
910ede094dd92c180724a950bce80c2d3e69f9b0
| 11,287
|
import pathlib
def module_to_path(module: str, suffix=".py") -> pathlib.Path:
"""convert module a.b.c to path(a/b/c)"""
return pathlib.Path(*module.split(".")).with_suffix(suffix)
|
682fd05379e81a5d8d24f5d0f4ab8134cbbce0e7
| 11,288
|
def byte(value):
"""Converts a char or int to its byte representation."""
if isinstance(value, str) and len(value) == 1:
return ord(value)
elif isinstance(value, int):
if value > 127:
return byte(value - 256)
if value < -128:
return byte(256 + value)
return value
|
6ef59f49a5d0d49ee7222387a8615567ff9d6267
| 11,289
|
def select_frame(**params):
"""切换Frame"""
return [
'self.driver.switch_to.default_content()',
"self.driver.switch_to.frame('{target}')".format(**params)
]
|
76939cadb680554fecd5bf481970a70a37be13a7
| 11,291
|
def mlist(self, node1="", node2="", ninc="", **kwargs):
"""Lists the MDOF of freedom.
APDL Command: MLIST
Parameters
----------
node1, node2, ninc
List master degrees of freedom from NODE1 to NODE2 (defaults
toNODE1) in steps of NINC (defaults to 1). If NODE1 = ALL
(default), NODE2 and NINC are ignored and masters for all selected
nodes [NSEL] are listed. If NODE1 = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may also be substituted for NODE1
(NODE2 and NINC are ignored).
Notes
-----
Lists the master degrees of freedom.
"""
command = f"MLIST,{node1},{node2},{ninc}"
return self.run(command, **kwargs)
|
611cd462d34cd4de0e34cb262549fc6cff40127d
| 11,292
|
import yaml
def _get_benchmark(platform: str, command: str) -> dict:
"""Get benchmark structured datas."""
# template_index = parser.search_template_index(platform, command)
command = str(command.replace(" ", "_"))
with open(
"tests/" + platform + "_" + command + "/" + platform + "_" + command + ".yml",
"r",
) as structured:
benchmark_parsed = yaml.safe_load(structured.read())
return benchmark_parsed
|
4d98fac166a979343d0bb849eb2000e434edea78
| 11,293
|
def formatstr(text):
"""Extract all letters from a string and make them uppercase"""
return "".join([t.upper() for t in text if t.isalpha()])
|
749aeec962e3e39760c028bfb3e3a27ad8c10188
| 11,294
|
def reflexive_missing_elements(relation, set):
"""Returns missing elements to a reflexive relation"""
missingElements = []
for element in set:
if [element, element] not in relation:
missingElements.append((element,element))
return missingElements
|
c31ce237c9156f17cc87a12ac80e595f81b08889
| 11,295
|
def form2_list_comprehension(items):
"""
Remove duplicates using list comprehension.
:return: list with unique items.
"""
return [i for n, i in enumerate(items) if i not in items[n + 1:]]
|
02d095c86de1b7d52d53cdb4bfe77db08523ea3a
| 11,296
|
def calcMass(volume, density):
"""
Calculates the mass of a given volume from its density
Args:
volume (float): in m^3
density (float): in kg/m^3
Returns:
:class:`float` mass in kg
"""
mass = volume * density
return mass
|
38f0ef780704c634e572c092eab5b92347edccd3
| 11,297
|
def rescale(arr, vmin, vmax):
""" Rescale uniform values
Rescale the sampled values between 0 and 1 towards the real boundaries
of the parameter.
Parameters
-----------
arr : array
array of the sampled values
vmin : float
minimal value to rescale to
vmax : float
maximum value to rescale to
"""
arrout = (vmax - vmin)*arr + vmin
return arrout
|
d6debc0eeccd9fe19ed72d869d0de270559c9ce8
| 11,298
|
def get_extreme(extreme, range_minimum, range_maximum):
"""
extreme : should be min or max
range_minimum::int : minimum value you want your extreme to be
range_maximum::int : maximum value you want your extreme to be
"""
if isinstance(extreme, str):
if extreme == "":
extreme = range_minimum
else:
extreme = int(extreme)
if extreme >= range_minimum and extreme <= range_maximum:
extreme = int(extreme)
elif extreme > range_maximum:
extreme = range_maximum
else:
extreme = range_minimum
else:
extreme = range_minimum
return extreme
|
d44d15310c83b1e2fcf03939cd1e7625f68f0b11
| 11,299
|
import shutil
def rename(path: str, target: str):
"""Copy path to target."""
return shutil.move(path, target)
|
00f46f2557f4e0e7bd07610889a8c2deb028f24a
| 11,300
|
import socket
def check_port_occupied(port, address="127.0.0.1"):
"""
Check if a port is occupied by attempting to bind the socket
:return: socket.error if the port is in use, otherwise False
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((address, port))
except socket.error as e:
return e
finally:
s.close()
return False
|
5919e228c835ceb96e62507b9891c6a1ce5948fa
| 11,301
|
def inverseExtend(boundMethod, *args, **kargs):
"""Iterate downward through a hierarchy calling a method at each step.
boundMethod -- This is the bound method of the object you're interested in.
args, kargs -- The arguments and keyword arguments to pass to the
top-level method.
You can call this method via something like this:
inverseExtend(object.method, myArg, myOtherArg)
When calling the method at each step, I'll call it like this:
Class.method(object, callNext, *args, **kargs)
However, the lowest level class's method has no callNext parameter,
since it has no one else to call:
Class.method(object, *args, **kargs)
In the method:
callNext(*args, **kargs)
should be called when it is time to transfer control to the subclass. This
may even be in the middle of the method. Naturally, you don't have to pass
*args, **kargs, but a common idiom is for the parent class to just receive
*args and **kargs and pass them on unmodified.
"""
# Build all the necessary data structures.
obj = boundMethod.im_self
methodName = boundMethod.im_func.__name__
# Figure out the classes in the class hierarchy. "classes" will
# contain the most senior classes first.
Class = obj.__class__
classes = [Class]
while Class.__bases__:
Class = Class.__bases__[0]
classes.insert(0, Class)
# Skip classes that don't define the method. Be careful with getattr
# since it automatically looks in parent classes.
last = None
methods = []
for Class in classes:
if (hasattr(Class, methodName) and
getattr(Class, methodName) != last):
last = getattr(Class, methodName)
methods.insert(0, last)
def callNext(*args, **kargs):
"""This closure is like super(), but it calls the subclass's method."""
method = methods.pop()
if len(methods):
return method(obj, callNext, *args, **kargs)
else:
return method(obj, *args, **kargs)
return callNext(*args, **kargs)
|
a511470f6cd3d86de19cc95e489498e649bea7a5
| 11,303
|
from typing import Dict
def determine_postAST_from_filename(filenamesplit) -> Dict:
"""determine postAST from filenamesplit"""
basepf_split, postAST = filenamesplit, ""
if all([i in basepf_split for i in ("post", "AST")]):
if any(s in basepf_split for s in ["LC"]):
if "postAST_LC" in basepf_split:
postAST = "postAST_LC"
elif "postAST-LC" in basepf_split:
postAST = "postAST-LC"
else:
postAST = "postAST?LC"
elif "postAST_sHA" in basepf_split:
postAST = "postAST_sHA"
elif "post_AST" in basepf_split:
postAST = "post_AST"
elif "postAST" in basepf_split:
postAST = "postAST"
else:
postAST = "postAST?"
elif "pAST-sHA" in basepf_split:
postAST = "pAST-sHA"
elif any("AFTER" in i.upper() for i in filenamesplit) or any(
s in basepf_split for s in ["postORR"]
):
postAST = "postORR"
elif "POST-EXP" in basepf_split:
postAST = "POST-EXP"
else:
postAST = None
return {"postAST": postAST}
|
3787dcef3ae65538f2fe66f98be60cc0f71f7790
| 11,306
|
def pretty(data, corner = '+', separator='|', joins='-'):
"""
Parameters :
~ data : Accepts a dataframe object.
~ corner : Accepts character to be shown on corner points (default value is "+").
~ separator : Accepts character to be shown in place to the line separating two values (default value is "|").
~ joins : Accepts character to be shown in place to the line joining two rows (default value is "-").
Example : PandasPretty.pretty(data = df, corner='%', separator=';', joins='=')
Output :
%================%=========%===========%===========%
; Name ; Class ; Roll_no ; Section ;
%================%=========%===========%===========%
; Ayush kumar ; 12 ; 8 ; A ;
%================%=========%===========%===========%
; Prince kumar ; 12 ; 23 ; A ;
%================%=========%===========%===========%
; Khushi singh ; 12 ; 18 ; B ;
%================%=========%===========%===========%
; Prathisha ; 12 ; 23 ; B ;
%================%=========%===========%===========%
"""
max_len = []
data_t = data.T
data_t_columns = data_t.index.tolist()
data_t = data.T.values.tolist()
for i,j in zip(data_t, data_t_columns):
i.insert(0,j)
for i in data_t:
len_lst = map(lambda x : len(str(x)), i)
max_len.append(max(list(len_lst)))
data_columns = data.columns.tolist()
data = data.values.tolist()
data.insert(0, data_columns)
def line_simple(lst, corner, joins):
string = ''
for i in lst:
string += (i+4)*joins+corner
return corner+string
def line_advans(lst, max_len, separator):
string = ''
def white_spc(word, max_ln, separator):
return separator+((max_ln+4) - len(str(word))-1)*' '+str(word)+' '
for i, j in zip(lst, max_len):
string += white_spc(i, j, separator)
return(string)
main_str = ''
for i in data:
main_str += line_simple(max_len, corner, joins)+'\n'
main_str += line_advans(i, max_len, separator)+separator+'\n'
main_str += line_simple(max_len, corner, joins)
return main_str
|
812fb5d255c5e44c1d026d307e2a39c7b3c31f14
| 11,307
|
import csv
def write_rows(rows, filename, sep="\t"):
"""Given a list of lists, write to a tab separated file"""
with open(filename, "w", newline="") as csvfile:
writer = csv.writer(
csvfile, delimiter=sep, quotechar="|", quoting=csv.QUOTE_MINIMAL
)
for row in rows:
writer.writerow(row)
return filename
|
41e04ee6203baf7db2d08722aed76dfacbc9e838
| 11,309
|
import collections
def context():
"""A mock for the lambda_handler context parameter."""
Context = collections.namedtuple('Context', 'function_name function_version')
return Context('TestFunction', '1')
|
b7d65ca7ff87aea1bd22e87dba4cb2d0e5ec411d
| 11,310
|
def _auth_handler():
"""
Requrired JWT method
"""
return None
|
9ec5301d6e32c7b3016a92ba9ae89d22fed3bad5
| 11,312
|
def HKL2string(hkl):
"""
convert hkl into string
[-10.0,-0.0,5.0] -> '-10,0,5'
"""
res = ""
for elem in hkl:
ind = int(elem)
strind = str(ind)
if strind == "-0": # removing sign before 0
strind = "0"
res += strind + ","
return res[:-1]
|
325ddfcb2243ce9a30328c5b21626a4c93a219de
| 11,313
|
import logging
import sys
def getLogger(nm):
"""Get a basic-configured trace-enabled logger."""
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(levelname).1s: %(message)s')
log = logging.getLogger(nm)
return log
|
a6fc4f0fe6d2104c0e2487d6038401306ad3e3ba
| 11,314
|
import re
def splitAtUppercase(a_string):
"""assumes a_string is a string
returns a list of strings, a_string split at each uppercase letter"""
pattern = "([A-Z])"
string_list = re.split(pattern, a_string)
return string_list
|
5cbf4673ee46db81b8acfb30bfe64a44d10d0d9a
| 11,315
|
from typing import List
def should_expand_range(numbers: List[int], street_is_even_odd: bool) -> bool:
"""Decides if an x-y range should be expanded."""
if len(numbers) != 2:
return False
if numbers[1] < numbers[0]:
# E.g. 42-1, -1 is just a suffix to be ignored.
numbers[1] = 0
return True
# If there is a parity mismatch, ignore.
if street_is_even_odd and numbers[0] % 2 != numbers[1] % 2:
return False
# Assume that 0 is just noise.
if numbers[0] == 0:
return False
# Ranges larger than this are typically just noise in the input data.
if numbers[1] > 1000 or numbers[1] - numbers[0] > 24:
return False
return True
|
fcd5a8e027120ef5cc52d23f67de4ab149b19a2c
| 11,316
|
def zyx_to_yxz_dimension_only(data, z=0, y=0, x=0):
"""
Creates a tuple containing the shape of a conversion if it were to happen.
:param data:
:param z:
:param y:
:param x:
:return:
"""
z = data[0] if z == 0 else z
y = data[1] if y == 0 else y
x = data[2] if x == 0 else x
return (y, x, z)
|
2477976f795f5650e45214c24aaf73e40d9fa4eb
| 11,317
|
def gen_anonymous_varname(column_number: int) -> str:
"""Generate a Stata varname based on the column number.
Stata columns are 1-indexed.
"""
return f'v{column_number}'
|
f0e150300f7d767112d2d9e9a1b139f3e0e07878
| 11,318
|
def create_city_map(n: int) -> set:
"""
Generate city map with coordinates
:param n: defines the size of the city that Bassi needs to hide in,
in other words the side length of the square grid
:return:
"""
return set((row, col) for row in range(0, n) for col in range(0, n))
|
fd30b936647bae64ccf2894ffbb68d9ad7ec9c6b
| 11,320
|
def nullable(datatype):
"""Return the signature of a scalar value that is allowed to be NULL (in
SQL parlance).
Parameters
----------
datatype : ibis.expr.datatypes.DataType
Returns
-------
Tuple[Type]
"""
return (type(None),) if datatype.nullable else ()
|
fe5935d1b4b1df736933455e97da29a901d6acb1
| 11,322
|
import os
def TraverseByDepth(root, include_extensions):
"""
Return a set of child directories of the 'root' containing file
extensions specified in 'include_extensions'.
NOTE:
1. The 'root' directory itself is excluded from the result set.
2. No subdirectories would be excluded if 'include_extensions' is left
to 'None'.
3. Each entry in 'include_extensions' must begin with string '.'.
"""
is_root = True
result = set()
# Perform a depth first top down traverse of the given directory tree.
for root_dir, subdirs, file_list in os.walk(root):
if not is_root:
# print("Relative Root: ", root_dir)
# print(subdirs)
if include_extensions:
get_ext = os.path.splitext
subdir_extensions = {
get_ext(f)[-1] for f in file_list if get_ext(f)[-1]
}
if subdir_extensions & include_extensions:
result.add(root_dir)
else:
result.add(root_dir)
else:
is_root = False
return result
|
68f510a0014c3ad8b511a2b024e6c1d4aef05732
| 11,323
|
def build_facets(data):
""" Data from facets-serves.json
Data structure:
{ "services" : [
{"(type)": "quolab...ClassName",
"id": "<name>"}
] }
"""
deprecated = {"casetree", "cache-id", "indirect"}
services = [service["id"] for service in data["services"]
if service["id"] not in deprecated]
return services
|
1b617180a3a5d3115c5300b91eebb3d6ca079942
| 11,324
|
from typing import Dict
def make_country_dict(
csv_file:str = 'data/country_list.csv') -> Dict[str, str]:
"""Make a dictionary containing the ISO 3166 two-letter code
as the key and the country name as the value. The data is read
from a csv file.
"""
country_dict = {}
with open(csv_file, encoding='utf-8') as country_csv:
for line in country_csv:
key, value = line.strip().split(',')
country_dict[key] = value
return country_dict
|
0334a2b2c918f407d1df682f56c14943f1e66c43
| 11,325
|
import platform
def host_arch_target():
"""
Converts the host architecture to the first part of a target triple
:return: Target host
"""
host_mapping = {
"armv7l": "arm",
"ppc64": "powerpc64",
"ppc64le": "powerpc64le",
"ppc": "powerpc"
}
machine = platform.machine()
return host_mapping.get(machine, machine)
|
7cae48c32e03cecb42e29279a93013ad3529c978
| 11,326
|
import requests
import json
def ocr_space_url(overlay=False, api_key='must specify your own apikey', language='eng'):
""" OCR.space API request with remote file.
Python3.5 - not tested on 2.7 Register here to get free api key: -> https://ocr.space/ocrapi
:param url: Image url.
:param overlay: Is OCR.space overlay required in your response.
Defaults to False.
:param api_key: OCR.space API key.
Defaults to 'helloworld'.
:param language: Language code to be used in OCR.
List of available language codes can be found on https://ocr.space/OCRAPI
Defaults to 'en'.
:return: Result in JSON format.
"""
print("------------------------------To convert Image to text-----------------------------")
payload = {'isOverlayRequired': overlay,
'apikey': api_key,
'language': language,
}
filename = input("Enter file path: -> ")
try:
with open(filename, 'rb') as f:
print("Parsing data: -> https://api.ocr.space/parse/image")
r = requests.post('https://api.ocr.space/parse/image',
files={filename: f},
data=payload,
)
except:
r = 0
print("Oops! File not found.. Provide a valid path")
# for posting data to webserver urllib is difficult compared to this requests module
# in other functions even u can use this requests module but iam doing course based on urllib so i prefered to use that..
if r != 0:
js = json.loads(r.content.decode())
return js['ParsedResults'][0]['ParsedText']
|
ff16f026262383ccfca4b3de91d2b1fa09bbde32
| 11,327
|
from typing import List
def get_reputation_data_statuses(reputation_data: List) -> List[str]:
"""
collects reported statuses of reputation data
Args:
reputation_data: returned data list of a certain reputation command
Returns: a list of reported statuses
"""
reputation_statuses = [status for data_entry in reputation_data if (status := data_entry.get('status'))]
return reputation_statuses
|
2c41344f2970243af83521aecb8a9374f74a40ac
| 11,328
|
def is_number(val):
"""Check if a value is a number by attempting to cast to ``float``.
Args:
val: Value to check.
Returns:
True if the value was successfully cast to ``float``; False otherwise.
"""
try:
float(val)
return True
except (ValueError, TypeError):
return False
|
89728d3d199c3ef5885529da5a2d13dd94fa590f
| 11,329
|
def get_local_name(element):
"""
Just the element name with the schema URI (if any) removed
@type element: Element
@param element: The XML element
@rtype: string
@return: the base name of the element or None if it could not be determined
"""
if element is None:
return None
#Check if this node name is prefixed by a URI, in which case return every after the URI
if '}' in element.tag:
return str(element.tag).rsplit('}',1)[1]
#If no namespace prefix, just return the node name
return element.tag
|
cef42ed650b2798c623f220686364f257e5f385f
| 11,330
|
def format_rfc3339(datetime_instance):
"""Formats a datetime per RFC 3339."""
return datetime_instance.isoformat("T") + "Z"
|
ad60a9e306e82554601efc1ae317296a59b009b0
| 11,331
|
def get_metadata_keys(options):
""" Return a list of metadata keys to be extracted. """
keyfile = options.get("keyfile")
if (keyfile):
with open(keyfile, "r") as mdkeys_file:
return mdkeys_file.read().splitlines()
else:
return None
|
2451d54ae49690691e5b0ce019178fd119e3d269
| 11,332
|
def hex_reflect_y(x, y, z):
"""Reflects the given hex through the x-axis
and returns the co-ordinates of the new hex"""
return x, z, y
|
92f36567d26ae8e942d3dad269cc3f196c8fbe16
| 11,333
|
import json
def getFileTree(frame, dir):
"""
Get the TreeView structure (recursive way)
"""
frame.cmd_return = ""
frame.exec_cmd("\r\n")
result = frame.exec_cmd("os.listdir(\'%s\')\r\n" % dir)
if result == "err":
return result
filemsg = result[result.find("["):result.find("]")+1]
ret = json.loads("{}")
ret[dir] = []
if filemsg == "[]":
return ret
filelist = []
filemsg = filemsg.split("'")
for i in filemsg:
if i.find("[") >= 0 or i.find(",") >= 0 or i.find("]") >= 0:
pass
else:
filelist.append(i)
for i in filelist:
res = frame.exec_cmd("os.stat(\'%s\')\r\n" % (dir + "/" + i))
if res == "err":
print("Error Build TreeView: ", "os.stat(./)")
isdir = res.split("\n")[1]
isdir = isdir.split(", ")
try:
adir = isdir[0]
if adir.find("(") >= 0:
adir = adir[1:]
if adir.find(")") >= 0:
adir = adir[:-1]
if int(adir) == 0o040000:
if i == "System Volume Information":
pass
else:
ret[dir].append(getFileTree(frame, dir+"/"+i))
else:
ret[dir].append(i)
except Exception as e:
print("Error Build TreeView: ", e)
return ret
|
64f7d9aba936476560b506b907cb36a6e1bee734
| 11,335
|
def _unpickle_method(name, im_self, im_class):
""" Unpickles an instancemethod object. """
if im_self is not None:
return getattr(im_self, name)
else:
return getattr(im_class, name)
|
71b7b81be74088f7060c78e706b4646c1f2e613d
| 11,336
|
def is_unexpected(actual, expected):
""" evaluates if there is a parameter in actual that does not exist in expected """
if expected is None:
unexpected = actual
else:
unexpected = filter(lambda x: x not in expected, actual)
return unexpected
|
d0de1d2505e5ad78b5d465b2496b0a154fe0c237
| 11,337
|
def _remove_self(d):
"""Return a copy of d without the 'self' entry."""
d = d.copy()
del d['self']
return d
|
a80eca3886d0499d1871998501c19e9cdae93673
| 11,338
|
def deltanpq(phino, phinoopt=0.2):
"""Calculate deltaNPQ
deltaNPQ = (1/phinoopt) - (1/phino)
:param phino: PhiNO
:param phinoopt: Optimum PhiNO (default: 0.2)
:returns: deltaNPQ (float)
"""
return (1/phinoopt) - (1/phino)
|
34d44a732bd87eb78e8ddee92b4555bea67789be
| 11,339
|
def html_string(request, html_file):
"""NYU Academic Calendar HTML as string"""
return html_file.read()
|
c0879d51a97b643f110ad2134dc7afe9dd279b66
| 11,341
|
def opt_in_argv_tail(argv_tail, concise, mnemonic):
"""Say if an optional argument is plainly present"""
# Give me a concise "-" dash opt, or a "--" double-dash opt, or both
assert concise or mnemonic
if concise:
assert concise.startswith("-") and not concise.startswith("--")
if mnemonic:
assert mnemonic.startswith("--") and not mnemonic.startswith("---")
# Detect the plain use of the concise or mnemonic opts
# Drop the ambiguities silently, like see "-xh" always as "-x '-h'" never as "-x -h"
for arg in argv_tail:
if mnemonic.startswith(arg) and (arg > "--"):
return True
if arg.startswith(concise) and not arg.startswith("--"):
return True
return False
|
17333a2a526799d6db5e78746d3ecb7111cd8cd6
| 11,343
|
def add_side_effect_wrapper(session_mock):
"""Side effect for the add function, used to add an id to the input table."""
def add_side_effect(table):
tablename = table.__tablename__
if tablename in session_mock.tables and len(session_mock.tables[tablename]) > 0:
max_id = 0
stub_id_present = False
for stub in session_mock.tables[tablename]:
if stub.id > max_id:
max_id = stub.id
if stub.id == table.id:
stub_id_present = True
if stub_id_present:
table.id = max_id + 1
else:
session_mock.tables[tablename] = []
if table.id < 1:
table.id = 1
session_mock.tables[tablename].append(table)
return table
return add_side_effect
|
b85052f962c6437934fa9cd310eec7c559d5222d
| 11,345
|
def tie_amps_FIR_to_SIL1(model):
"""
function to tie the FIR amplitude to the SIL1 amplitude
"""
return (0.012 / 0.002) * model.SIL1_amp_0
|
13ae408cc618c118a7ad1059422fabad0036dff2
| 11,346
|
def split_by_unicode_char(input_strs):
"""
Split utf-8 strings to unicode characters
"""
out = []
for s in input_strs:
out.append([c for c in s])
return out
|
290bb5ec82c78e9ec23ee1269fe9227c5198405e
| 11,347
|
def filter(lst):
"""
Method that filters list
:param lst: list to be filtered
:return: new filtered list
"""
if lst is None:
return []
if not isinstance(lst, set):
lst = set(lst) # Remove duplicates.
new_lst = []
for item in lst:
item = str(item)
if (item[0].isalpha() or item[0].isdigit()) and ('xxx' not in item) and ('..' not in item):
item = item.replace('252f', '').replace('2F', '').replace('2f', '')
new_lst.append(item.lower())
return new_lst
|
06c96962910b77a78615861822c76bbc9cfb52ce
| 11,348
|
def calc_hvac_energy(surface_area, building_type, Tin, Tout):
"""
Heat Transfer Equation
Notes
-----
Q = U x SA x (Tin - Tout)
Q - Heat lost or gained due to outside temperature (kJ·h−1)
U - Overall heat transfer coefficient (kJ·h−1·m−2·°C−1)
SA - Surface Area of the space
Tin - Inside air set point temperature (°C)
Tout - Outside air temperature (°C)
"""
if building_type == 'basement':
U = 0.5
else:
U = 24 # generic heat transfer coefficient
Q = U*surface_area*(Tin-Tout)
hvac_kwh = Q*0.00666667*24 # Conversion factor of kJ/h to kWh x 24 hours
# Rudimentary hvac calculations - general
hvac_kwh_per_day = hvac_kwh*1
hvac_kwh_per_month = hvac_kwh_per_day * 30.417 # 365 days/12 months
hvac_kwh_per_week = hvac_kwh_per_day * 7
hvac_kwh_per_year = hvac_kwh_per_day * 365
return hvac_kwh_per_day, hvac_kwh_per_week, hvac_kwh_per_month, hvac_kwh_per_year
|
1616196b36f6c512763362292ceb296ad4acbf96
| 11,349
|
def getValidData(filelist=[]):
"""过滤集合中空的元素."""
valifile = []
for fp in filelist:
if fp != None:
valifile.append(fp)
return valifile
|
57bccc440a056deee3bd16a4785823ab4f0845c2
| 11,350
|
def sorted_dict(d : dict) -> dict:
"""
Sort a dictionary's keys and values and return a new dictionary
"""
try:
return {key: value for key, value in sorted(d.items(), key=lambda item: item[1])}
except Exception as e:
return d
|
ec8b38805ee26f2d2e2fe0d876c8cbda88098026
| 11,351
|
import configparser
def export_story_as_config(from_slide, with_fields):
""" Returns the story items for a story slide in the config format
Each slide contains placeholder with a text frame which in turn contains
paragraphs with text. Only ASCII characters are allowed in the story text
files to overcome the different encodings on computers.
Each placeholder is associated with a story item. They become a 'section' in
the config file with the 'key' text associated with the parsed text.
"""
story = configparser.RawConfigParser()
for item, index in with_fields.items():
try:
shape = from_slide.placeholders[int(index)]
except KeyError:
return None
if not shape.has_text_frame:
continue
text = ""
paras = shape.text_frame.paragraphs
for p in paras:
for r in p.runs:
link = r.hyperlink
has_link = link is not None and (link.address is not None)
if has_link:
text += '<'
for c in link.address:
text += c if ord(c) < 128 else ''
text += '>'
for c in r.text:
text += c if ord(c) < 128 else ''
if has_link:
text += '</>'
if( len(paras) > 1):
text += "\n"
story.add_section(item)
story.set(item, "text", text.strip())
return story
|
529893dc5b238e86a9480d983afe5a229d762dd0
| 11,353
|
def sort_by_type_and_size(layer_nodes):
"""
对关键路径中的叶子节点按照资源类型和大小进行排序
Args:
leaf_nodes: 叶子节点列表
Returns:
排好序的叶子节点列表
"""
document_list = []
stylesheet_list = []
font_list = []
script_list = []
for node in layer_nodes:
if node['mime_type'] == 'text/html':
document_list.append(node)
elif node['mime_type'] == 'text/css':
stylesheet_list.append(node)
elif node['mime_type'] == 'font/woff2':
font_list.append(node)
else:
script_list.append(node)
document_list.sort(key=lambda element: element['size'])
stylesheet_list.sort(key=lambda element: element['size'])
font_list.sort(key=lambda element: element['size'])
script_list.sort(key=lambda element: element['size'])
ordered_nodes = []
ordered_nodes.extend(document_list)
ordered_nodes.extend(stylesheet_list)
ordered_nodes.extend(font_list)
ordered_nodes.extend(script_list)
return ordered_nodes
|
02026778cfe0ff1481e24a9b2f9304fb2a5a4fc3
| 11,356
|
def unescape_html(html):
"""
*Unescape a string previously escaped with cgi.escape()*
**Key Arguments:**
- ``dbConn`` -- mysql database connection
- ``log`` -- logger
- ``html`` -- the string to be unescaped
**Return:**
- ``html`` -- the unescaped string
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
## LOCAL APPLICATION ##
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
html = html.replace("<", "<")
html = html.replace(">", ">")
html = html.replace(""", '"')
# this has to be last:
html = html.replace("&", "&")
return html
|
cafa4eb7ecc68b6221250947173c579e125ede07
| 11,357
|
def get_lr(optimizer):
"""get learning rate from optimizer
Args:
optimizer (torch.optim.Optimizer): the optimizer
Returns:
float: the learning rate
"""
for param_group in optimizer.param_groups:
return param_group['lr']
|
fc8293cc29c2aff01ca98e568515b6943e93ea50
| 11,358
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.