content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import Tuple
def load_test_data(columns: pd.Index, filename: str) -> Tuple[pd.Series, pd.DataFrame]:
"""
loads test data, proccesses it in a similar manner to the train data and return it as pandas df.
:param columns: column names of the traininst set the model was fitted on
:param filename: csv of data to load
"""
features = pd.read_csv(filename, parse_dates=['booking_datetime'])
booking_datetime, features = process_features(features)
# Get missing columns in the training test
missing_cols = set(columns) - set(features.columns)
# Add a missing column in test set with default value equal to 0
for col in missing_cols:
features[col] = 0
features = features.copy()
# Ensure the order of column in the test set is in the same order as in train set
features = features[columns]
return booking_datetime, features | 6a1c34860e445089a2b73feb7a55a5a5bc496b86 | 3,633,800 |
def check_ancestors(page):
"""
First, check if the page's parent is one for which we want to index
the direct children of (constants.SEARCH_CHILDREN_OF)
If that's not the case, check all ancestors for any page we want all
descendants of (constants.SEARCH_DESCENDANTS_OF)
If there's a match in there, return the page objects
:arg obj page: A page returned from a database query
:returns obj page: The same page that was passed in
"""
ancestors = page.get_ancestors()
parent = page.get_parent()
if parent.url_path in constants.SEARCH_CHILDREN_OF:
return page
else:
valid_ancestors = [a for a in ancestors if a.url_path in constants.SEARCH_DESCENDANTS_OF]
if valid_ancestors:
return page
else:
return None | 90428dbc7efe9175bbc0965df08842db1081809f | 3,633,801 |
def tobin(data, width):
"""
"""
data_str = bin(data & (2**width-1))[2:].zfill(width)
return [int(x) for x in tuple(data_str)] | 1679bc6826cbfd226e99f33dbfd049a284c26a75 | 3,633,802 |
def bisect_last_true(arr):
"""Binary search for last True occurrence."""
lo, hi = -1, len(arr)-1
while lo < hi:
mid = lo + hi + 1 >> 1
if arr[mid]: lo = mid
else: hi = mid - 1
return lo | 4ec436506e5823d54d6d31e45de76b30cd100746 | 3,633,803 |
def logLikelihood(theta, times, flux, fluxErr):
"""
Calculates the log likelihood based on the difference between the model and the data
Used by logProbability
Args:
theta (list) - parameters of the model
times (list) - time array of the light curve
flux (list) - array of flux data points
fluxErr (list) - array of errors for the flux data points
Returns:
lnl (float) - log likelihood for the given theta values
"""
xdim, ydim, velocity, opacity, impact, tRef = theta
fluxPredicted = transitSim(ydim, xdim,50,velocity,times, tRef,opacity,int(impact)) ##Simulates a transit to evaluate the parameters
error = [((flux[i] - fluxPredicted[i])**2) /(2*fluxErr[i]**2) for i in range(len(flux))] ##Calcutes Chi Squared error between model and data
lnl = -np.sum(error)
return lnl | dc681cca6d8ca3b781475b58870b625c4d7fd51c | 3,633,804 |
def check_attrs(tag, required, optional):
"""
Helper routine to fetch required and optional attributes
and complain about any additional attributes.
:param tag (xml.dom.Element): DOM element node
:param required [str]: list of required attributes
:param optional [str]: list of optional attributes
"""
result = reqd_attrs(tag, required)
result.extend(opt_attrs(tag, optional))
allowed = required + optional
extra = [
a for a in tag.attributes.keys()
if a not in allowed and not a.startswith("xmlns:")]
if extra: # pragma: no cover
warning(
"%s: unknown attribute(s): %s" % (tag.nodeName, ', '.join(extra)))
if verbosity > 0:
print_location(filestack)
return result | f08e65479cc7ee0a4d2290c9c0a011aac65fb2fa | 3,633,805 |
import os
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified %s' % filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename | b3f435ca952446e376f0c6165bffcf6cde3923e6 | 3,633,806 |
def _interpolation(point_set: list)-> list:
"""
Written by Eric Muzzo, 101184817
Function returns a list which is stored in a variable. The function
takes a list as its parameter.
>>> _interpolation(point_set)
"""
if len(point_set[0]) == 2:
degree = 1
poly = np.polyfit(point_set[1], point_set[2], degree)
return(poly)
elif len(point_set[0]) > 2:
degree = 2
poly = np.polyfit(point_set[1], point_set[2], degree)
return(poly) | da37af093f48d0d2cdb41aaa9cfcc54e709f6a08 | 3,633,807 |
def create_app(label_studio_config=None):
""" Create application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/.
:param label_studio_config: LabelStudioConfig object to use with input_args params
"""
app = flask.Flask(__package__, static_url_path='')
app.secret_key = 'A0Zrdqwf1AQWj12ajkhgFN]dddd/,?RfDWQQT'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config['WTF_CSRF_ENABLED'] = False
app.url_map.strict_slashes = False
app.label_studio = label_studio_config or config_from_file()
# check LabelStudioConfig correct loading
if app.label_studio is None:
raise LabelStudioError('LabelStudioConfig is not loaded correctly')
app.register_blueprint(blueprint) # main app
app.register_blueprint(data_manager_blueprint)
app.register_blueprint(data_import_blueprint)
app.before_request(app_before_request_callback)
app.after_request(app_after_request_callback)
return app | 4ec4b65843cfc8efd069fb5477ffb59a51da7eba | 3,633,808 |
import os
import json
def load_json_result(best_result_name, dataset_name):
"""Load json from a path (directory + filename)."""
result_path = os.path.join(RESULTS_DIR, dataset_name, best_result_name)
with open(result_path, 'r') as f:
return json.JSONDecoder().decode(
f.read()
# default=json_util.default,
# separators=(',', ': ')
) | 6b71fd1bd3cc55b6ad8d194bc68f0dbd0e4602f2 | 3,633,809 |
from collections import defaultdict
from typing import Union
from typing import List
from typing import Dict
def get_included_relationships(
results: Union[EntryResource, List[EntryResource]],
ENTRY_COLLECTIONS: Dict[str, EntryCollection],
include_param: List[str],
) -> Dict[str, List[EntryResource]]:
"""Filters the included relationships and makes the appropriate compound request
to include them in the response.
Parameters:
results: list of returned documents.
ENTRY_COLLECTIONS: dictionary containing collections to query, with key
based on endpoint type.
include_param: list of queried related resources that should be included in
`included`.
Returns:
Dictionary with the same keys as ENTRY_COLLECTIONS, each containing the list
of resource objects for that entry type.
"""
if not isinstance(results, list):
results = [results]
for entry_type in include_param:
if entry_type not in ENTRY_COLLECTIONS and entry_type != "":
raise BadRequest(
detail=f"'{entry_type}' cannot be identified as a valid relationship type. "
f"Known relationship types: {sorted(ENTRY_COLLECTIONS.keys())}"
)
endpoint_includes = defaultdict(dict)
for doc in results:
# convert list of references into dict by ID to only included unique IDs
if doc is None:
continue
relationships = doc.relationships
if relationships is None:
continue
relationships = relationships.dict()
for entry_type in ENTRY_COLLECTIONS:
# Skip entry type if it is not in `include_param`
if entry_type not in include_param:
continue
entry_relationship = relationships.get(entry_type, {})
if entry_relationship is not None:
refs = entry_relationship.get("data", [])
for ref in refs:
# could check here and raise a warning if any IDs clash
endpoint_includes[entry_type][ref["id"]] = ref
included = {}
for entry_type in endpoint_includes:
compound_filter = " OR ".join(
['id="{}"'.format(ref_id) for ref_id in endpoint_includes[entry_type]]
)
params = EntryListingQueryParams(
filter=compound_filter,
response_format="json",
response_fields=None,
sort=None,
page_limit=0,
page_offset=0,
)
# still need to handle pagination
ref_results, _, _, _ = ENTRY_COLLECTIONS[entry_type].find(params)
included[entry_type] = ref_results
# flatten dict by endpoint to list
return [obj for endp in included.values() for obj in endp] | 38dc612011a68b5380f051b83e4314db6b7f4f97 | 3,633,810 |
import os
def guess_strategy_type(file_name_or_ext):
"""Guess strategy type to use for file by extension.
Args:
file_name_or_ext: Either a file name with an extension or just
an extension
Returns:
Strategy: Type corresponding to extension or None if there's no
corresponding strategy type
"""
if "." not in file_name_or_ext:
ext = file_name_or_ext
else:
name, ext = os.path.splitext(file_name_or_ext)
ext = ext.lstrip(".")
file_type_map = get_file_type_map()
return file_type_map.get(ext, None) | d62826f272ec78b654ab46835efd8aad5afd0415 | 3,633,811 |
import math
def plot_width(G, ax, width, tips=True, plot=False):
""" Plot edge width of fault network
Parameters
----------
G : nx.graph
Graph
ax : plt axis
Axis
width : np.array
Width of network edges
tips : bolean
Plot tips
plot : False
Plot helper functions
Returns
-------
"""
# Assertions
assert isinstance(G, nx.Graph), "G is not a NetworkX graph"
# Plotting
pos = nx.get_node_attributes(G, 'pos')
n_comp = 10000
sns.color_palette(None, 2*n_comp)
colors = get_node_colors(G, 'fault')
def get_points(u):
u0 = np.array(pos[u[0]])
u1 = np.array(pos[u[1]])
u_vec = u0-u1
u_perp = np.array([-u_vec[1], u_vec[0]])
u_perp = u_perp/np.linalg.norm(u_perp)
u0a = u0 - u_perp*width[u[0]]
u0b = u0 + u_perp*width[u[0]]
u1a = u1 - u_perp*width[u[1]]
u1b = u1 + u_perp*width[u[1]]
return u0a, u0b, u1a, u1b
def get_intersect(a1, a2, b1, b2):
"""
Returns the point of intersection of the lines passing through a2,a1
and b2,b1.
a1: [x, y] a point on the first line
a2: [x, y] another point on the first line
b1: [x, y] a point on the second line
b2: [x, y] another point on the second line
"""
s = np.vstack([a1, a2, b1, b2]) # s for stacked
h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous
l1 = np.cross(h[0], h[1]) # get first line
l2 = np.cross(h[2], h[3]) # get second line
x, y, z = np.cross(l1, l2) # point of intersection
if z == 0: # lines are parallel
return (float('inf'), float('inf'))
return np.array([x/z, y/z])
def clockwiseangle_and_distance(origin, point):
refvec = [0, 1]
# Vector between point and the origin: v = p - o
vector = [point[0]-origin[0], point[1]-origin[1]]
# Length of vector: ||v||
lenvector = math.hypot(vector[0], vector[1])
# If length is zero there is no angle
if lenvector == 0:
return -math.pi, 0
# Normalize vector: v/||v||
normalized = [vector[0]/lenvector, vector[1]/lenvector]
dotprod = normalized[0]*refvec[0] + \
normalized[1]*refvec[1] # x1*x2 + y1*y2
diffprod = refvec[1]*normalized[0] - \
refvec[0]*normalized[1] # x1*y2 - y1*x2
angle = math.atan2(diffprod, dotprod)
# Negative angles represent counter-clockwise angles so we need to
# subtract them
# from 2*pi (360 degrees)
if angle < 0:
return 2*math.pi+angle, lenvector
# I return first the angle because that's the primary sorting criterium
# but if two vectors have the same angle then the shorter distance
# should come first.
return angle, lenvector
def get_edges(G, node):
neighbors = list(G.neighbors(node))
pts = [G.nodes[neighbor]['pos'] for neighbor in neighbors]
pts, neighbors = zip(
*sorted(
zip(pts, neighbors),
key=lambda x: clockwiseangle_and_distance(
G.nodes[node]['pos'], x[0])
)
)
edges = [(node, neighbor) for neighbor in neighbors]
return edges
for node, color in zip(G, colors):
if tips is True and G.degree(node) == 1:
edge = get_edges(G, node)[0]
node0 = np.array(pos[edge[0]])
node1 = np.array(pos[edge[1]])
vec = node0-node1
vec_perp = np.array([-vec[1], vec[0]])
vec_perp = vec_perp/np.linalg.norm(vec_perp)
vec_pos = node0 + vec_perp*width[edge[0]]
vec_neg = node0 - vec_perp*width[edge[0]]
stack = np.vstack((vec_pos,
node0+vec,
vec_neg,
vec_pos))
polygon = Polygon(stack, True, facecolor=color, alpha=1)
p = PatchCollection([polygon], match_original=True)
ax.add_collection(p)
if G.degree(node) == 2:
edges = get_edges(G, node)
points = []
for edge in edges:
points.append(get_points(edge))
intersects = []
intersects.append(get_intersect(
points[0][0], points[0][2], points[1][1], points[1][3]))
intersects.append(get_intersect(
points[0][1], points[0][3], points[1][0], points[1][2]))
stack = np.vstack((points[0][3], intersects[1], points[1][2],
points[1][3], intersects[0], points[0][2]))
polygon = Polygon(stack, True, facecolor=color, alpha=1)
p = PatchCollection([polygon], match_original=True)
ax.add_collection(p)
if G.degree(node) == 3:
edges = get_edges(G, node)
points = []
for edge in edges:
points.append(get_points(edge))
intersects = []
intersects.append(get_intersect(
points[0][1], points[0][3], points[1][0], points[1][2]))
intersects.append(get_intersect(
points[1][1], points[1][3], points[2][0], points[2][2]))
intersects.append(get_intersect(
points[0][0], points[0][2], points[2][1], points[2][3]))
stack = np.vstack((points[0][3], intersects[0], points[1][2],
points[1][3], intersects[1], points[2][2],
points[2][3], intersects[2], points[0][2]))
polygon = Polygon(stack, True, facecolor=color, alpha=1)
p = PatchCollection([polygon], match_original=True)
ax.add_collection(p)
if G.degree(node) == 4:
edges = get_edges(G, node)
points = []
for edge in edges:
points.append(get_points(edge))
intersects = []
intersects.append(get_intersect(
points[0][1], points[0][3], points[1][0], points[1][2]))
intersects.append(get_intersect(
points[1][1], points[1][3], points[2][0], points[2][2]))
intersects.append(get_intersect(
points[2][1], points[2][3], points[3][0], points[3][2]))
intersects.append(get_intersect(
points[0][0], points[0][2], points[3][1], points[3][3]))
stack = np.vstack((points[0][3], intersects[0], points[1][2],
points[1][3], intersects[1], points[2][2],
points[2][3], intersects[2], points[3][2],
points[3][3], intersects[3], points[0][2]))
polygon = Polygon(stack, True, facecolor=color, alpha=1)
p = PatchCollection([polygon], match_original=True)
ax.add_collection(p)
if G.degree(node) == 5:
edges = get_edges(G, node)
points = []
for edge in edges:
points.append(get_points(edge))
intersects = []
intersects.append(get_intersect(
points[0][1], points[0][3], points[1][0], points[1][2]))
intersects.append(get_intersect(
points[1][1], points[1][3], points[2][0], points[2][2]))
intersects.append(get_intersect(
points[2][1], points[2][3], points[3][0], points[3][2]))
intersects.append(get_intersect(
points[3][1], points[3][3], points[4][0], points[4][2]))
intersects.append(get_intersect(
points[0][0], points[0][2], points[4][1], points[4][3]))
stack = np.vstack((points[0][3], intersects[0], points[1][2],
points[1][3], intersects[1], points[2][2],
points[2][3], intersects[2], points[3][2],
points[3][3], intersects[3], points[4][2],
points[4][3], intersects[4], points[0][2]))
polygon = Polygon(stack, True, facecolor=color, alpha=1)
p = PatchCollection([polygon], match_original=True)
ax.add_collection(p)
ax.axis('equal')
plt.show() | ade2f0780c5153a09be9cdf520549d593dc956bf | 3,633,812 |
import bisect
def get_mete_rad(S, N, beta=None, beta_dict={}):
"""Use beta to generate SAD predicted by the METE
Keyword arguments:
S -- the number of species
N -- the total number of individuals
beta -- allows input of beta by user if it has already been calculated
"""
assert S > 1, "S must be greater than 1"
assert N > 0, "N must be greater than 0"
assert S/N < 1, "N must be greater than S"
if beta is None:
beta = get_beta(S, N, beta_dict=beta_dict)
p = e ** -beta
abundance = list(empty([S]))
rank = range(1, int(S)+1)
rank.reverse()
if p >= 1:
for i in range(0, int(S)):
y = lambda x: trunc_logser_cdf(x, p, N) - (rank[i]-0.5) / S
if y(1) > 0:
abundance[i] = 1
else:
abundance[i] = int(round(bisect(y,1,N)))
else:
for i in range(0, int(S)):
y = lambda x: logser.cdf(x,p) / logser.cdf(N,p) - (rank[i]-0.5) / S
abundance[i] = int(round(bisect(y, 0, N)))
return (abundance, p) | 4ff71a0460ea0d406658df8ec5465ba28641f728 | 3,633,813 |
def determine_firmware_versions(build_target):
"""Returns a namedtuple with main and ec firmware versions.
Args:
build_target (build_target_lib.BuildTarget): The build target.
Returns:
MainEcFirmwareVersions namedtuple with results.
"""
fw_versions = get_firmware_versions(build_target)
main_fw_version = fw_versions.main_rw or fw_versions.main
ec_fw_version = fw_versions.ec_rw or fw_versions.ec
return MainEcFirmwareVersions(main_fw_version, ec_fw_version) | d8e73722bde9edb57fd6962e285b6acf396e5dcf | 3,633,814 |
import sqlite3
def get_transcript_lengths(database, build):
""" Read the transcripts from the database. Then compute the lengths.
Store in a dictionary """
transcript_lengths = {}
conn = sqlite3.connect(database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Get the exon lengths
exon_lens = lu.get_all_exon_lengths(cursor, build)
cursor.execute("SELECT * FROM transcripts")
for transcript_row in cursor.fetchall():
transcript_ID = transcript_row['transcript_ID']
length = lu.get_transcript_length(transcript_row, exon_lens)
transcript_lengths[transcript_ID] = length
conn.close()
return transcript_lengths | d0fc2f229bd8a51770d06ac8021c5eb17e484ccf | 3,633,815 |
def segment_chars(plate_img, fixed_width):
"""
extract Value channel from the HSV format
of image and apply adaptive thresholding
to reveal the characters on the license plate
"""
V = cv2.split(cv2.cvtColor(plate_img, cv2.COLOR_BGR2HSV))[2]
thresh = cv2.adaptiveThreshold(V, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,
11, 2)
thresh = cv2.bitwise_not(thresh)
# resize the license plate region to
# a canoncial size
plate_img = imutils.resize(plate_img, width = fixed_width)
thresh = imutils.resize(thresh, width = fixed_width)
bgr_thresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
# perform a connected components analysis
# and initialize the mask to store the locations
# of the character candidates
labels = measure.label(thresh, neighbors = 8, background = 0)
charCandidates = np.zeros(thresh.shape, dtype ='uint8')
# loop over the unique components
characters = []
for label in np.unique(labels):
# if this is the background label, ignore it
if label == 0:
continue
# otherwise, construct the label mask to display
# only connected components for the current label,
# then find contours in the label mask
labelMask = np.zeros(thresh.shape, dtype ='uint8')
labelMask[labels == label] = 255
cnts = cv2.findContours(labelMask,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# ensure at least one contour was found in the mask
if len(cnts) > 0:
# grab the largest contour which corresponds
# to the component in the mask, then grab the
# bounding box for the contour
c = max(cnts, key = cv2.contourArea)
(boxX, boxY, boxW, boxH) = cv2.boundingRect(c)
# compute the aspect ratio, solodity, and
# height ration for the component
aspectRatio = boxW / float(boxH)
solidity = cv2.contourArea(c) / float(boxW * boxH)
heightRatio = boxH / float(plate_img.shape[0])
# determine if the aspect ratio, solidity,
# and height of the contour pass the rules
# tests
keepAspectRatio = aspectRatio < 1.0
keepSolidity = solidity > 0.15
keepHeight = heightRatio > 0.5 and heightRatio < 0.95
# check to see if the component passes
# all the tests
if keepAspectRatio and keepSolidity and keepHeight and boxW > 14:
# compute the convex hull of the contour
# and draw it on the character candidates
# mask
hull = cv2.convexHull(c)
cv2.drawContours(charCandidates, [hull], -1, 255, -1)
_, contours, hier = cv2.findContours(charCandidates,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
if contours:
contours = sort_cont(contours)
# value to be added to each dimension
# of the character
addPixel = 4
for c in contours:
(x, y, w, h) = cv2.boundingRect(c)
if y > addPixel:
y = y - addPixel
else:
y = 0
if x > addPixel:
x = x - addPixel
else:
x = 0
temp = bgr_thresh[y:y + h + (addPixel * 2),
x:x + w + (addPixel * 2)]
characters.append(temp)
return characters
else:
return None | a9bab29380c70b3d286f85e3e75a5c6ffc40fef3 | 3,633,816 |
from pathlib import Path
def get_filename_addition(orig_path: str, filename_addition: str) -> str:
"""Gets filename with addition. So if item is '/path/name.ext' and the filename_addition is '-add', the new result
would be '/path/name-add.ext'.
Args:
orig_path (str): The original path.
filename_addition (str): The new addition.
Returns: The altered path.
"""
parent_dir, filename, extension = get_path_pieces(orig_path)
if filename is None:
return str(Path(orig_path + filename_addition))
else:
ext = '' if extension is None else extension
return str(Path(parent_dir) / Path('{0}{1}.{2}'.format(filename, filename_addition, ext))) | 75611e4a265380febbb9b1830fd5e467ed5d4b97 | 3,633,817 |
def negated(input_words, include_nt=True):
"""
Determine if input contains negation words
"""
neg_words = []
neg_words.extend(NEGATE)
for word in neg_words:
if word in input_words:
return True
if include_nt:
for word in input_words:
if "n't" in word:
return True
if "least" in input_words:
i = input_words.index("least")
if i > 0 and input_words[i-1] != "at":
return True
return False | cc6c06af58fc6905f5b70dabfcfa3856a224c348 | 3,633,818 |
def get_box_value(box):
"""
Retrieves the value of the provided widget `box` and returns it.
"""
# Values (QAbstractSpinBox)
if isinstance(box, QW.QAbstractSpinBox):
return(box.value())
# Bools (QAbstractButton)
elif isinstance(box, QW.QAbstractButton):
return(box.isChecked())
# Items (QComboBox)
elif isinstance(box, QW.QComboBox):
return(box.currentText())
# Strings (QLineEdit)
elif isinstance(box, QW.QLineEdit):
return(box.text())
# Custom boxes (BaseBox)
elif isinstance(box, BaseBox):
return(box.get_box_value())
# If none applies, raise error
else:
raise NotImplementedError("Custom boxes must be a subclass of BaseBox") | 2123cb188e1198e6e456635a4df8edf11a9a9a28 | 3,633,819 |
from typing import Optional
from typing import Union
def pipeline(
task: str,
model: Optional = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
use_cuda: Optional[bool] = True,
):
"""
:param task:
(:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- :obj:`"k2t"`: will return a :class:`K2TPipeline` which is based on the k2t model based on t5-small
- :obj:`"k2t-tiny"`: will return a :class:`K2TPipeline` which is based on the k2t model based on t5-tiny
- :obj:`"k2t-base"`: will return a :class:`K2TPipeline` which is based on the k2t model based on t5-base
:param model:
(:obj:`str` or `optional`):
The model that will be used by the pipeline to make predictions.
If not provided, the default for the :obj:`task` will be loaded.
:param tokenizer:
(:obj:`str` or `optional`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from :class:`~transformers.PreTrainedTokenizer`.
If not provided, the default tokenizer for the given :obj:`model` will be loaded (if it is a string).
:param use_cuda:
(:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use a GPU or not Default: True
:return:
(:class:):
`K2TPipeline`: A Keytotext pipeline for the task.
"""
if task not in SUPPORTED_TASKS:
raise KeyError(
"Unknown task {}, available tasks are {}".format(
task, list(SUPPORTED_TASKS.keys())
)
)
targeted_task = SUPPORTED_TASKS[task]
task_class = targeted_task["impl"]
if model is None:
model = targeted_task["default"]["model"]
if tokenizer is None:
if isinstance(model, str):
tokenizer = model
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Please provided a PretrainedTokenizer "
"class or a path/identifier to a pretrained tokenizer."
)
if isinstance(tokenizer, (str, tuple)):
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate model if needed
if isinstance(model, str):
model = AutoModelForSeq2SeqLM.from_pretrained(model)
return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda) | c4b17cfb09942a2893870636a952b468383a2526 | 3,633,820 |
def open_file_externally(path: str) -> None:
"""open_file_externally(path: str) -> None
(internal)
Open the provided file in the default external app.
"""
return None | 8bb6f5c19ad89fbef59e2ecbec89d5e2b5d05783 | 3,633,821 |
def _get_init_fn():
"""Return a function that 'warm-starts' the training.
Returns:
An init function.
"""
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from {}'.format(checkpoint_path))
return slim.assign_from_checkpoint_fn(checkpoint_path, variables_to_restore) | 08a22de7161522939bc6a64da03bc1fd3572a81a | 3,633,822 |
def migration_indices(r_mgeo, p_mgeo):
""" identify migration indices
"""
idxs = None
r_mgrphs_iter = chain(*map(multibond_opening_resonances,
resonance_graphs(r_mgeo)))
p_mgrphs_iter = chain(*map(multibond_opening_resonances,
resonance_graphs(p_mgeo)))
for r_mgrph, p_mgrph in product(r_mgrphs_iter, p_mgrphs_iter):
idxs = graph_migration_indices(r_mgrph, p_mgrph)
if idxs:
break
return idxs | 0d130ee16446827c7b478e72363284346e905761 | 3,633,823 |
def counter():
"""Creates a counter instance"""
x = [0]
def c():
x[0] += 1
return x[0]
return c | 0f78a34b53bc5cc8b125a939cd88f58b047607a0 | 3,633,824 |
def replace_null(val):
"""
Replace given value with 'NULL' if it's an equivalent of NULL.
val {any}: value to check
returns {str}: 'NULL' or `val`
"""
if isinstance(val, float):
if np.isnan(val):
return 'NULL'
if val is None:
return 'NULL'
if isinstance(val, str):
if val in ['nan', 'None']:
return 'NULL'
return val | 33f06075108eff6d12c6e83099c6b29ae457a74d | 3,633,825 |
import re
def Element(node, tag, mandatory=False):
"""Get the element text for the provided tag from the provided node"""
value = node.findtext(tag)
if value is None:
if mandatory:
raise SyntaxError("Element '{}.{}' is mandatory, but not present!".format(node.tag, tag))
return None
else:
value = re.sub(r'\s+', ' ', value)
return value.strip() | 2173f1ff50f8c685496d9b2708b19f1d6d808fb5 | 3,633,826 |
import base64
def fix_string_attr(tfjs_node):
"""
Older tfjs models store strings as lists of ints (representing byte values). This function finds and replaces
those strings, so protobuf can correctly decode the json.
"""
def fix(v):
if isinstance(v, list):
return base64.encodebytes(bytes(v)).decode()
return v
if 'attr' not in tfjs_node:
return
for v in tfjs_node['attr'].values():
if 's' in v:
v['s'] = fix(v['s'])
if 'list' in v and 's' in v['list']:
for i, x in enumerate(v['list']['s']):
v['list']['s'][i] = fix(x) | c137144fd9a42134451d2c49c93b20d562f1188b | 3,633,827 |
def get_env_space():
"""
Return obsvervation dimensions, action dimensions and whether or not action space is continuous.
"""
env = gym.make(ENV)
continuous_action_space = type(env.action_space) is gym.spaces.box.Box
if continuous_action_space:
action_dim = env.action_space.shape[0]
else:
action_dim = env.action_space.n
obsv_dim = env.observation_space.shape[0]
return obsv_dim, action_dim, continuous_action_space | c8173ea96b8f16a2f12ac604f1e8c5f6b6f5ebd0 | 3,633,828 |
def is_windows_dark_theme():
"""Detect Windows theme"""
# From https://successfulsoftware.net/2021/03/31/how-to-add-a-dark-theme-to-your-qt-application/
settings = QtCore.QSettings(
"HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Themes\\Personalize",
QtCore.QSettings.NativeFormat,
)
return settings.value("AppsUseLightTheme", 1) == 0 | 9a6a70829827e6efc4d011f0c068e4536ea50297 | 3,633,829 |
def maximumProduct(nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = sorted(nums)
first_option=nums[0]*nums[1]*nums[-1]
second_option=nums[-3] * nums[-2] * nums[-1]
return first_option if first_option > second_option else second_option | 2ebbc11893499d18fcbf7630fc815b07abf329fd | 3,633,830 |
def calczeta(phi1, phi2, theta1, theta2):
"""
Calculate the angular separation between position (phi1, theta1) and
(phi2, theta2)
"""
zeta = 0.0
if phi1 == phi2 and theta1 == theta2:
zeta = 0.0
else:
argument = sin(theta1) * sin(theta2) * cos(phi1 - phi2) + cos(theta1) * cos(theta2)
if argument < -1:
zeta = np.pi
elif argument > 1:
zeta = 0.0
else:
zeta = acos(argument)
return zeta | 76f13decbdea749bbd4aaadda7c95eb07682e54d | 3,633,831 |
import base64
import json
def handler(event):
"""Parses event payload, extract data from BigQuery table, write to GCS"""
json_data = base64.b64decode(event["data"]).decode("utf-8")
data = json.loads(json_data)
if "bq" not in data:
raise Exception("Invalid payload: no 'bq' field in payload", data)
if "table" in data["bq"]:
bq_extract_table(data["bq"])
return "ok"
if "view" in data["bq"]:
raise Exception("Extracts from BigQuery views not implemented.")
# bq_extract_view(data["bq"])
# pub(data["bq"])
# return 'ok'
raise Exception("Invalid payload: no 'view' or 'table' field in payload", data) | 46039fdcc2b25a989eb36ea070ff35a6a6c726ba | 3,633,832 |
from typing import List
from typing import Tuple
def transform_coverage_to_coordinates(
coverage_list: List[int],
) -> List[Tuple[int, int]]:
"""
Takes a list of read depths where the list index is equal to the read position + 1 and returns
a list of (x, y) coordinates.
The coordinates will be simplified using Visvalingham-Wyatt algorithm if the list exceeds 100
pairs.
:param coverage_list: a list of position-indexed depth values
:return: a list of (x, y) coordinates
"""
coordinates = [(0, coverage_list[0])]
last = len(coverage_list) - 1
for x in range(1, last):
y = coverage_list[x]
if y != coverage_list[x - 1] or y != coverage_list[x + 1]:
coordinates.append((x, y))
coordinates.append((last, coverage_list[last]))
if len(coordinates) > 100:
return vw.simplify(coordinates, ratio=0.4)
return coordinates | 844eb986f4ccc322ec6e4720d8f969a7ce9562ec | 3,633,833 |
def complex_wrapper(func):
"""
Wraps complex valued functions into two-dimensional functions.
This enables the root-finding routine to handle it as a
vectorial function.
Args:
func (callable): Callable that returns a complex result.
Return:
two-dimensional, callable: function handle,
taking x = (re(x), im(x) and returning [re(func(x), im(func(x)].
"""
def wrapper(x):
val = func(np.complex(x[0], x[1]))
return np.array([np.real(val),
np.imag(val)])
return wrapper | ecd7530b8e0c43fa82c962b5753524d820213674 | 3,633,834 |
def test_rose_plot_data_using_cpt(data):
"""
Test supplying a 2D numpy array containing a list of lengths and
directions.
Use a cmap to color sectors.
"""
fig = Figure()
fig.rose(
data=data,
region=[0, 1, 0, 360],
sector=15,
diameter="5.5c",
cmap="batlow",
frame=["x0.2g0.2", "y30g30", "+gdarkgray"],
pen="1p",
norm=True,
scale=0.4,
)
return fig | 08db8c7b30856dc3d9f44db4b505c14d5f22e86b | 3,633,835 |
def mod_list_comparator(bot_entry, new_list, original_list):
"""Function to check the differences between new and old lists.
In the case of removals, the function also checks to see if their
removal is due to privatization or banning.
"""
formatted_lines = []
changes = list(set(new_list) - set(original_list)) + list(set(original_list) - set(new_list))
change = "* Changes for u/{}: r/{}".format(bot_entry, ", r/".join(changes))
formatted_lines.append(change)
# Mark down the exact changes.
additions = [x for x in new_list if x not in original_list]
subtractions = [x for x in original_list if x not in new_list]
if additions:
additions_line = " * Additions for u/{}: r/{}".format(bot_entry, ", r/".join(additions))
formatted_lines.append(additions_line)
if subtractions:
removals_line = " * Removals for u/{}: r/{}".format(
bot_entry, ", r/".join(subtractions)
)
formatted_lines.append(removals_line)
# In the case of removals, see if something happened to the
# subreddit. Privatized, banned?
for entry in subtractions:
sub_obj = REDDIT.subreddit(entry)
try:
subtype = sub_obj.subreddit_type
except prawcore.exceptions.Forbidden:
formatted_lines.append(" * Note: r/{} has gone private.".format(entry))
except prawcore.exceptions.NotFound:
formatted_lines.append(" * Note: r/{} has been banned.".format(entry))
return formatted_lines | 9358025cd42902f938cd62b3930a9485ff2777cb | 3,633,836 |
import urllib
import json
def get_sources_articles(id):
"""
Function that gets the json response to our url request
"""
get_news_url = source_url.format(id,api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_articles(news_results_list)
return news_results | 4a321bc3a1a16c9dc989600419fe62878cb3b92d | 3,633,837 |
from typing import Optional
import enum
def cli_domain(name: Optional[str] = None):
"""
Register a value domain for the CLI displayed with its own name or ``name``
"""
def register(domain: TP) -> TP:
if issubclass(domain, enum.Enum):
_register_enum(domain, name)
else:
raise TypeError(f"Cannot register CLI domain: {domain}")
return domain
return register | e1dc1e35e4e1921f107b3c5dfdfc9a22cbf4e413 | 3,633,838 |
def create_predict_function(
route, predict_service, decorator_list_name):
"""Creates a predict function and registers it to
the Flask app using the route decorator.
:param str route:
Path of the entry point.
:param expose.interfaces.PredictService predict_service:
The predict service to be registered to this entry point.
:param str decorator_list_name:
The decorator list to be used for this predict service. It is
OK if there is no such entry in the active expose config.
:return:
A predict service function that will be used to process
predict requests.
"""
model_persister = get_config().get('model_persister')
@app.route(route, methods=['GET', 'POST'], endpoint=route)
@PluggableDecorator(decorator_list_name)
def predict_func():
return predict(model_persister, predict_service)
return predict_func | e72ad4b9877f2069ada79ae264c3c993072dfd30 | 3,633,839 |
def batch_pix_accuracy(output, target):
"""PixAcc"""
# inputs are NDarray, output 4D, target 3D
# the category -1 is ignored class, typically for background / boundary
predict = np.argmax(output.asnumpy(), 1).astype('int64') + 1
target = target.asnumpy().astype('int64') + 1
pixel_labeled = np.sum(target > 0)
pixel_correct = np.sum((predict == target)*(target > 0))
assert pixel_correct <= pixel_labeled, "Correct area should be smaller than Labeled"
return pixel_correct, pixel_labeled | 030b46fc406f5293be0f1d55b0521878da26c93f | 3,633,840 |
def puma560() -> np.ndarray: # pragma: no cover
"""Get PUMA560 MDH model."""
return np.array(
[
[0, 0, 0, 0],
[-np.pi / 2, 0, 0, 0],
[0, 612.7, 0, 0],
[0, 571.6, 0, 163.9],
[-np.pi / 2, 0, 0, 115.7],
[np.pi / 2, 0, np.pi, 92.2],
]
) | ff0afbee7423ffa321e396fce7669a536949a8ba | 3,633,841 |
def get_scaling_factor(window: "Window" = None) -> float:
"""
Gets the scaling factor of the given Window.
This is the ratio between the window and framebuffer size.
If no window is supplied the currently active window will be used.
:param Window window: Handle to window we want to get scaling factor of.
:return: Scaling factor. E.g., 2.0 would indicate the framebuffer
width and height being 2.0 times the window width and height.
This means one "window pixel" is actual a 2 x 2 square of pixels
in the framebuffer.
:rtype: float
"""
if window:
return window.get_pixel_ratio()
else:
return get_window().get_pixel_ratio() | 1a75259d2d7214ad3437ed191cadb6e783121278 | 3,633,842 |
def fetch_addresses(xml_tree):
"""Pull out address information (addresses + instructions). Final
notices do not have addresses (as we no longer accept comments)."""
address_nodes = xml_tree.xpath('//ADD/P')
addresses = {}
for p in address_nodes:
p = cleanup_address_p(p)
if ':' in p:
label, content = p.split(':', 1)
# Instructions is the label
if label.lower().strip() == 'instructions':
addresses['instructions'] = ([content.strip()] +
addresses.get('instructions', []))
continue
if content.strip() and not (label.endswith('http') or
label.endswith('https')):
addresses['methods'] = (addresses.get('methods', [])
+ [(label.strip(), content.strip())])
continue
if not addresses:
addresses['intro'] = p
else:
addresses['instructions'] = (addresses.get('instructions', [])
+ [p])
if addresses:
return addresses | 2e17731a81c6b51e9c9352139bc9a25adcf15080 | 3,633,843 |
import random
def css_tricks(data):
"""Handle data from css-tricks.com"""
articles = data["results"]
article = random.choice(articles)
title = article["highlight"]["title"][0]
description = article["highlight"]["content"][0]
if not description:
description = "No description found for this article"
author = "Unknow" # Not in the API results
site = "https://css-tricks.com/"
url = article["fields"]["permalink.url.raw"]
return Article(title, description, author, site, url) | 188e2f01ff53839781632657018f2a623f4cf7b5 | 3,633,844 |
import pprint
def choose_(all_service_nodes, service_node,node, accepted_method_verbs=('get', 'describe', 'list', 'search')):
# def choose_(service_node, node, method_verbs=('describe', 'list', 'search')):
"""Choose between method verbs.
Priorities:
"""
# filter the node's methods with accepted method verbs
accepted_methods = [
method for method in node.methods
if method.get_verb() in accepted_method_verbs
]
if not accepted_methods:
# Node has no accepted methods, failing to choose any method.
return False
method_parameter_relations = extract_node_methods_relations(all_service_nodes, service_node, node, accepted_method_verbs)
method_parameter_relations
# TODO: save methods
# -------------------------------
# if we got multiple accepted methods we have to select between functions
if len(accepted_methods) > 1:
# priority 1: if method has no required parameter
two_method_combinations = list(combinations(accepted_methods, 2))
two_method_combinations
for first_method, second_method in two_method_combinations:
found_ratio, length_ratio = compare_two_attributes_list(first_method, second_method)
if 100 > found_ratio > 0:
print(">>>> BOTH FUNCTIONS NEEDS TO BE CALLED ")
# print()
# print(f">>>> First Method Attrs:", first_method.get_attributes())
# print()
# print(f">>>> Second Method Attrs:", second_method.get_attributes())
# print()
print(f"\t### RATIO #### {first_method.name} :: {second_method.name} \tfound_ratio:{found_ratio} | length_ratio:{length_ratio}")
if found_ratio == 100:
non_get_funcs = [m.name for m in (first_method, second_method) if m.get_verb()!='get'] # don't prefer get_ verb
print(non_get_funcs)
print(f"\t### RATIO #### found ratio is 100. Will choose the non-get function, or less parameter demanding function.")
print()
print()
def find_methods_having_least_amount_of_required_parameters(methods_list):
"""
checks if the first marp has less required parameters than all of the other ones
returns the first marp and other marps with the same cound of required parameters as the first marp.
"""
# TODO: check if the values can be found in the node's attrs. If it can be found, it means
# list or describe needs the resources own ids, so we can choose the other one. Other one probably
# gets its parameters from one higher up resource.
# sort methods_and_required_parameters for their required parameters arr. length
sorted_methods = list(sorted(methods_list, key=lambda method: len(method.get_required_parameters())))
# select the first method and add it to the list by default
first_method = sorted_methods[0]
first_methods_req_params = first_method.get_required_parameters()
same_req_param_count_methods = [first_method]
for other_method in sorted_methods[1:]:
# look for other
other_method_req_params = other_method.get_required_parameters()
if len(first_methods_req_params) == len(other_method_req_params):
same_req_param_count_methods.append(other_method)
return same_req_param_count_methods
# possible_methods will always have the same length of required_parameters
# possible_methods = find_methods_having_least_amount_of_required_parameters(accepted_methods)
possible_methods = accepted_methods
has_more_than_one_possible_methods = len(possible_methods) > 1
does_all_methods_has_no_required_parameters = all([m.get_required_parameters()==[] for m in possible_methods])
required_param_methods = [m for m in possible_methods if m.get_required_parameters()]
non_required_param_methods = [m for m in possible_methods if not m.get_required_parameters()]
non_required_param_methods
# dealing with multiple methods, len > 1 and all the possible methods will have the same amount of req parameters
if has_more_than_one_possible_methods:
pass
compare_method_attributes_to_nodes(node, possible_methods)
if does_all_methods_has_no_required_parameters:
# we got multiple possible methods, and none of them has any required parameters
# sometimes list and describe doesn't give same data. we should compare method's
# returns_keys and the node's attributes
print('\n### CASE: MULTIPLE POSSIBLE METHODS BUT NO REQUIRED PARAMETERS')
print('--'*20)
# TODO: Compare returns_keys of each method to node's attributes
for method in possible_methods:
print(method)
pprint(method.get_returns_keys())
print('\n'*4)
possible_methods
if not does_all_methods_has_no_required_parameters:
# all the possible methods will have the same amount of req parameters
# we have multiple possible methods, and they have required parameters
# do the attribute comparision
# TODO: see if there are any rule breakers
print('\n### CASE: SELECT BETWEEN METHODS (Multiple Required Parameters):', [(m.name, m.get_required_parameters()) for m in possible_methods])
print('--'*20)
for method in possible_methods:
print("\nmethod: ", node.name, method.name, method.get_required_parameters())
pprint(method.get_returns_keys())
print('\n'*4)
print()
# try to find which method you will choose
# we know
# selected_marp = possible_methods[0]
# selected_marp
# is_only_method_and_got_required_param = len(methods_and_required_parameters) == 1 and \
# methods_and_required_parameters[0].get('required_parameters')
# if is_only_method_and_got_required_param:
# # this is the case when we have to find the required parameters.
# methods_and_required_parameters
return None | 3fab28343ee3059d8b989990368e5955ae9650db | 3,633,845 |
def rank(x, small_rank_is_high_num=True, rank_from_1=True):
"""
Rank items in an array. Using the 'first' method, which ranks ties using
the order of appearance. For rank functionality similar to R, see scipy's
rankdata function (which is imported from this module for convenience).'
Parameters
----------
x : numpy array or array-like
The array of values to be sorted.
small_rank_is_high_num : bool, optional
Smallest rank value is the highest/largest number.
The default is True.
rank_from_1 : bool, optional
Use 1 as the top rank rather than 0. The default is True.
Returns
-------
rk : numpy array
An array containing the ranks.
"""
rk = np.argsort(np.argsort(x))
if small_rank_is_high_num:
rk = (len(x) - 1) - rk
if rank_from_1:
rk += 1
return rk | 365cddbb80bfe7efaebcc89d7256edf29f0fade7 | 3,633,846 |
def unauthorized():
"""For basic_auth. Return 403 instead of 401 to prevent browsers from displaying the default auth dialog."""
return make_response(jsonify({'error': 'Unauthorized access'}), 403) | 516105e6feb3dddcfe80a148e0cd1369eea95aa2 | 3,633,847 |
def cma_ajax_get_table_iso(request):
"""
Ajax view for fetching ISO images list.
"""
if request.method == 'GET':
iso = prep_data('admin_cm/iso_image/get_list/', request.session)
for item in iso:
item['size'] = filesizeformatmb(item['size'])
return messages_ajax.success(iso) | 7d5a4353dcb07aa0263ad90818a5136eb4212859 | 3,633,848 |
import os
def get_config_filepath():
"""Return the filepath of the configuration file."""
default_config_root = os.path.join(os.path.expanduser('~'), '.config')
config_root = os.getenv('XDG_CONFIG_HOME', default=default_config_root)
return os.path.join(config_root, 'zoia/config.yaml') | 53d78749adf56219ca08b3b4556901241608a57d | 3,633,849 |
def prefs(func: callable):
"""This decorator will pass the result of the given func to PREFS.convert_to_prefs,
to print a dictionary using PREFS format.
Example:
# Without prefs decorator
def dictionary():
return {'keybindings': {'Ctrl+C': 'Copy', 'Ctrl+V': 'Paste'}}
print(dictionary())
>>> {'keybindings': {'Ctrl+C': 'Copy', 'Ctrl+V': 'Paste'}}
# With prefs decorator
@prefs # This is called a decorator
def dictionary():
return {'keybindings': {"Ctrl+C": "Copy", "Ctrl+V": "Paste"}}
print(dictionary())
>>> keybindings=>
Ctrl+C='Copy'
Ctrl+C='Paste'
Notes:
Only works with dictionaries.
"""
def wrapper_function(*args, **kwargs):
result = PREFS.convert_to_prefs(func(*args, **kwargs)) # Call given func and pass it's result to PREFS.convert_to_prefs
return result
return wrapper_function | f458c896cdcf96b21bcaaf0acfdd796cec04ab39 | 3,633,850 |
def friction_fnc(normal_force,friction_coefficient):
"""Usage: Find force of friction using normal force and friction coefficent"""
return normal_force * friction_coefficient | 7c25e651d7ef8990eab049a5b356f5470496af8e | 3,633,851 |
def parse_line(line):
"""
Parse a line of assembly code to create machine code byte templates.
If a line is not identifiably a JUMP_IF_OVERFLOW_FLAG assembly line,
return an empty list instead.
Args:
line (str): Assembly line to be parsed.
Returns:
list(dict): List of machine code byte template dictionaries or
an empty list.
"""
return jump_if_flag_base.parse_line(line, "SP", _NAME) | 54eb89b6912cb0a4d69b3fd10009a6e64073bc1c | 3,633,852 |
def unpack_asn1_general_string(value): # type (Union[bytes, ASN1Value]) -> bytes
""" Unpacks an ASN.1 GeneralString value. """
return extract_asn1_tlv(value, TagClass.universal, TypeTagNumber.general_string) | 9d9fda9713a57e4e7c0d7a5162dbf19f37621bf0 | 3,633,853 |
def opti_loc_poly_traj(data_traj, t, minh, maxh, nb_h):
"""
Find the optimal parameter h to estimate the derivatives with local polynomial regression
...
"""
HH = np.linspace(minh,maxh,nb_h)
err_h = np.zeros(len(HH))
kf = KFold(n_splits=10, shuffle=False)
for j in range(len(HH)):
err = []
for train_index, test_index in kf.split(t):
t_train, t_test = t[train_index], t[test_index]
data_train, data_test = data_traj[train_index,:], data_traj[test_index,:]
X_train = Trajectory(data_train, t_train)
X_train.loc_poly_estimation(t_test, 5, HH[j])
dX0 = X_train.derivatives[:,0:3]
diff = dX0 - data_test
err.append(np.linalg.norm(diff)**2)
err_h[j] = np.mean(err)
if isinstance(np.where(err_h==np.min(err_h)), int):
h_opt = HH[np.where(err_h==np.min(err_h))]
else:
h_opt = HH[np.where(err_h==np.min(err_h))][0]
return h_opt | bed960693b17928683284d69bd69bff19391c2f2 | 3,633,854 |
def _trimmed_mean(arr, n=2, axis=None, maskval=0):
"""
Return the trimmed mean of an input array.
Parameters
----------
arr: ndarray
Data to trim
n: integer
Number of points to trim at each end
axis: int
The axis along which to compute the trimmed mean.
If None, compute the trimmed mean on the
flattened array. Default None.
maskval: float
Value to mask in trimmed mean calculation. NaN
and inf values are already masked. Default 0.
Returns
-------
trimmed_mean: ndarray
Returns the trimmed mean
"""
arr_sorted = arr.copy()
shape = arr_sorted.shape
########################################################
# Sort with masked values, NaN, inf at the end,
# Trim the lowest n unmasked entries
########################################################
arr_sorted[np.where(np.logical_not(np.isfinite(arr_sorted)))] = np.inf
if maskval is not None:
arr_sorted[np.where(arr == maskval)] = np.inf
arr_sorted = np.sort(arr_sorted, axis=axis)
if axis > 0:
arr_sorted = np.take(arr_sorted, np.arange(n, shape[axis]), axis=axis)
else:
arr_sorted = arr_sorted[n:]
########################################################
# Move masked values to the beginning,
# trim the largest n unmasked entries
########################################################
arr_sorted[np.where(np.isinf(arr_sorted))] *= -1
if axis > 0:
arr_sorted = np.take(arr_sorted, np.arange(0, shape[axis] - n), axis=axis)
elif n > 0:
arr_sorted = np.sort(arr_sorted, axis=axis)[:-n]
else:
arr_sorted = np.sort(arr_sorted, axis=axis)
########################################################
# Replace mask with zero and return trimmed mean.
# No data --> return zero
########################################################
if maskval is not None:
norm = np.sum(np.isfinite(arr_sorted), axis=axis)
arr_sorted[np.where(np.isinf(arr_sorted))] = 0
return np.sum(arr_sorted, axis=axis) / (norm + 1e-100)
else:
return np.mean(arr_sorted, axis=axis) | 4c4315d6e10bb51af01074fd404f745f54233dae | 3,633,855 |
def validate_kind_name(value):
"""Validate the value of the kind_name."""
if value is not None and not isinstance(value, str):
raise ValidationError('kind_name must be a string')
return value | cb01b96bd9d5c75765c1d004f3a6794e50c275b9 | 3,633,856 |
def setup_directories(env, saving_dir, replay_filename, expert_replay_file_path, agent_replay_file_path, pretrain_model_save_path, create_dirs=True):
""" Setup directories where information will be saved
env: Pass in current environment to have access to getting environment variables for recording purposes
saving_dir: main name for all related files (ex: train_DDPGfD_CubeS)
expert_replay_file_path: Expert replay buffer file path
agent_replay_file_path: Agent replay buffer file path
pretrain_model_save_path: Pre-train policy file path
"""
# Store all directory names where information is saved
all_saving_dirs = {}
# Experiment output
if args.mode == "experiment":
model_save_path = saving_dir + "/policy/exp_policy"
tensorboard_dir = saving_dir + "/output/tensorboard/"
output_dir = saving_dir + "/output"
heatmap_train_dir = saving_dir + "/output/heatmap/train"
results_saving_dir = saving_dir + "/output/results"
elif args.mode == "combined" or args.mode == "naive" or args.mode == "position-dependent":
output_dir = saving_dir + "/output"
heatmap_train_dir = output_dir + "/heatmap/expert"
model_save_path = "None"
results_saving_dir = "None"
tensorboard_dir = "None"
else:
print("---------- STARTING: ", args.mode, " ---------")
# Original saving directory locations for model and tensorboard
model_save_path = "./policies/" + saving_dir + "/{}_{}".format(args.mode, "DDPGfD_kinovaGrip") + datestr + "/"
tensorboard_dir = "./kinova_gripper_strategy/" + saving_dir + "/{}/".format(args.tensorboardindex)
output_dir = "./output/" + saving_dir
heatmap_train_dir = output_dir + "/heatmap" + "/" + args.mode
results_saving_dir = output_dir + "/results" + "/" + args.mode
# Create directory paths if they do not exist
if create_dirs is True:
create_paths([model_save_path, output_dir, tensorboard_dir, heatmap_train_dir, results_saving_dir])
all_saving_dirs["saving_dir"] = saving_dir
all_saving_dirs["model_save_path"] = model_save_path
all_saving_dirs["output_dir"] = output_dir
all_saving_dirs["tensorboard_dir"] = tensorboard_dir
all_saving_dirs["heatmap_train_dir"] = heatmap_train_dir
all_saving_dirs["results_saving_dir"] = results_saving_dir
all_saving_dirs["replay_buffer"] = replay_filename
all_saving_dirs["expert_replay_file_path"] = expert_replay_file_path
all_saving_dirs["agent_replay_file_path"] = agent_replay_file_path
all_saving_dirs["pretrain_model_save_path"] = pretrain_model_save_path
all_saving_dirs["train_init_coord_file_path"] = env.get_coords_filename()
all_saving_dirs["eval_init_coord_file_path"] = env.get_coords_filename()
all_saving_dirs["controller_init_coord_file_path"] = env.get_coords_filename()
return all_saving_dirs | e68874de0f61d023306f8f0fcb2a76dc2f83cb7e | 3,633,857 |
def keyInfoCtxCopyUserPref(dst, src):
"""
Copies user preferences from src context to dst context.
dst : the destination context object.
src : the source context object.
Returns : 0 on success and a negative value if an error occurs.
"""
return xmlsecmod.keyInfoCtxCopyUserPref(dst, src) | 709fd4f3a42d7dede8ddc801adc717668b92fa19 | 3,633,858 |
def kernel_classifier_distance_and_std_from_activations(real_activations,
generated_activations,
max_block_size=500,
dtype=None):
"""Kernel "classifier" distance for evaluating a generative model.
This methods computes the kernel classifier distance from activations of
real images and generated images. This can be used independently of the
kernel_classifier_distance() method, especially in the case of using large
batches during evaluation where we would like to precompute all of the
activations before computing the classifier distance, or if we want to
compute multiple metrics based on the same images. It also returns a rough
estimate of the standard error of the estimator.
This technique is described in detail in https://arxiv.org/abs/1801.01401.
Given two distributions P and Q of activations, this function calculates
E_{X, X' ~ P}[k(X, X')] + E_{Y, Y' ~ Q}[k(Y, Y')]
- 2 E_{X ~ P, Y ~ Q}[k(X, Y)]
where k is the polynomial kernel
k(x, y) = ( x^T y / dimension + 1 )^3.
This captures how different the distributions of real and generated images'
visual features are. Like the Frechet distance (and unlike the Inception
score), this is a true distance and incorporates information about the
target images. Unlike the Frechet score, this function computes an
*unbiased* and asymptotically normal estimator, which makes comparing
estimates across models much more intuitive.
The estimator used takes time quadratic in max_block_size. Larger values of
max_block_size will decrease the variance of the estimator but increase the
computational cost. This differs slightly from the estimator used by the
original paper; it is the block estimator of https://arxiv.org/abs/1307.1954.
The estimate of the standard error will also be more reliable when there are
more blocks, i.e. when max_block_size is smaller.
NOTE: the blocking code assumes that real_activations and
generated_activations are both in random order. If either is sorted in a
meaningful order, the estimator will behave poorly.
Args:
real_activations: 2D Tensor containing activations of real data. Shape is
[batch_size, activation_size].
generated_activations: 2D Tensor containing activations of generated data.
Shape is [batch_size, activation_size].
max_block_size: integer, default 1024. The distance estimator splits samples
into blocks for computational efficiency. Larger values are more
computationally expensive but decrease the variance of the distance
estimate. Having a smaller block size also gives a better estimate of the
standard error.
dtype: if not None, coerce activations to this dtype before computations.
Returns:
The Kernel Inception Distance. A floating-point scalar of the same type
as the output of the activations.
An estimate of the standard error of the distance estimator (a scalar of
the same type).
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
real_activations.shape[1].assert_is_compatible_with(
generated_activations.shape[1])
if dtype is None:
dtype = real_activations.dtype
assert generated_activations.dtype == dtype
else:
real_activations = math_ops.cast(real_activations, dtype)
generated_activations = math_ops.cast(generated_activations, dtype)
# Figure out how to split the activations into blocks of approximately
# equal size, with none larger than max_block_size.
n_r = array_ops.shape(real_activations)[0]
n_g = array_ops.shape(generated_activations)[0]
n_bigger = math_ops.maximum(n_r, n_g)
n_blocks = math_ops.to_int32(math_ops.ceil(n_bigger / max_block_size))
v_r = n_r // n_blocks
v_g = n_g // n_blocks
n_plusone_r = n_r - v_r * n_blocks
n_plusone_g = n_g - v_g * n_blocks
sizes_r = array_ops.concat([
array_ops.fill([n_blocks - n_plusone_r], v_r),
array_ops.fill([n_plusone_r], v_r + 1),
], 0)
sizes_g = array_ops.concat([
array_ops.fill([n_blocks - n_plusone_g], v_g),
array_ops.fill([n_plusone_g], v_g + 1),
], 0)
zero = array_ops.zeros([1], dtype=dtypes.int32)
inds_r = array_ops.concat([zero, math_ops.cumsum(sizes_r)], 0)
inds_g = array_ops.concat([zero, math_ops.cumsum(sizes_g)], 0)
dim = math_ops.cast(tf.shape(real_activations)[1], dtype)
def compute_kid_block(i):
'Compute the ith block of the KID estimate.'
r_s = inds_r[i]
r_e = inds_r[i + 1]
r = real_activations[r_s:r_e]
m = math_ops.cast(r_e - r_s, dtype)
g_s = inds_g[i]
g_e = inds_g[i + 1]
g = generated_activations[g_s:g_e]
n = math_ops.cast(g_e - g_s, dtype)
k_rr = (math_ops.matmul(r, r, transpose_b=True) / dim + 1)**3
k_rg = (math_ops.matmul(r, g, transpose_b=True) / dim + 1)**3
k_gg = (math_ops.matmul(g, g, transpose_b=True) / dim + 1)**3
return (-2 * math_ops.reduce_mean(k_rg) +
(math_ops.reduce_sum(k_rr) - math_ops.trace(k_rr)) / (m * (m - 1)) +
(math_ops.reduce_sum(k_gg) - math_ops.trace(k_gg)) / (n * (n - 1)))
ests = functional_ops.map_fn(
compute_kid_block, math_ops.range(n_blocks), dtype=dtype, back_prop=False)
mn = math_ops.reduce_mean(ests)
# nn_impl.moments doesn't use the Bessel correction, which we want here
n_blocks_ = math_ops.cast(n_blocks, dtype)
var = control_flow_ops.cond(
math_ops.less_equal(n_blocks, 1),
lambda: array_ops.constant(float('nan'), dtype=dtype),
lambda: math_ops.reduce_sum(math_ops.square(ests - mn)) / (n_blocks_ - 1))
return mn, math_ops.sqrt(var / n_blocks_) | e3197b9e5952bcb98faba0705115e887aa34e067 | 3,633,859 |
def yolo_eval(yolo_outputs,
anchors,
num_classes,
image_shape,
max_boxes=20,
score_threshold=.6, # max_boxes=20, score_threshold=.6,iou_threshold=.5
iou_threshold=.5):
"""Evaluate YOLO model on given input and return filtered boxes."""
num_layers = len(yolo_outputs)
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers == 3 else [[3,4,5], [0,1,2]] # default setting
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
boxes.append(_boxes)
box_scores.append(_box_scores)
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
mask = box_scores >= score_threshold
max_boxes_tensor = K.constant(max_boxes, dtype='int32')
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
nms_index = tf.image.non_max_suppression(
class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
class_boxes = K.gather(class_boxes, nms_index)
class_box_scores = K.gather(class_box_scores, nms_index)
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
boxes_ = K.concatenate(boxes_, axis=0)
scores_ = K.concatenate(scores_, axis=0)
classes_ = K.concatenate(classes_, axis=0)
return boxes_, scores_, classes_ | ddb8afc6c1ead0cf7dfcfcdab3cc1e6438bdcaa7 | 3,633,860 |
from typing import List
def tile_images(images: List[np.ndarray]) -> np.ndarray:
"""Tile multiple images into single image
Args:
images: list of images where each image has dimension
(height x width x channels)
Returns:
tiled image (new_height x width x channels)
"""
assert len(images) > 0, "empty list of images"
np_images = np.asarray(images)
n_images, height, width, n_channels = np_images.shape
new_height = int(np.ceil(np.sqrt(n_images)))
new_width = int(np.ceil(float(n_images) / new_height))
# pad with empty images to complete the rectangle
np_images = np.array(
images
+ [images[0] * 0 for _ in range(n_images, new_height * new_width)]
)
# img_HWhwc
out_image = np_images.reshape(
new_height, new_width, height, width, n_channels
)
# img_HhWwc
out_image = out_image.transpose(0, 2, 1, 3, 4)
# img_Hh_Ww_c
out_image = out_image.reshape(
new_height * height, new_width * width, n_channels
)
return out_image | dfa1bf0f6b778575083c99e6bf88c12614d8b8f2 | 3,633,861 |
def network(images1, images2, weight_decay):
"""
Siamese neural network for training person re-identification. Based on:
https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Ahmed_An_Improved_Deep_2015_CVPR_paper.pdf
:param images1, images2: image pairs (positive and negative examples)
:param weight_decay: (scalar) an additional term in the weight update rule that causes the weights to exponentially
decay to zero, if no other update is scheduled.
:return: logits (before softmax)
"""
with tf.variable_scope('network', reuse=tf.AUTO_REUSE):
# Tied Convolution
conv1_1 = tf.layers.conv2d(images1, 20, [5, 5], activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay), name='conv1_1')
pool1_1 = tf.layers.max_pooling2d(conv1_1, [2, 2], [2, 2], name='pool1_1')
conv1_2 = tf.layers.conv2d(pool1_1, 25, [5, 5], activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay), name='conv1_2')
pool1_2 = tf.layers.max_pooling2d(conv1_2, [2, 2], [2, 2], name='pool1_2')
conv2_1 = tf.layers.conv2d(images2, 20, [5, 5], activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay), name='conv2_1')
pool2_1 = tf.layers.max_pooling2d(conv2_1, [2, 2], [2, 2], name='pool2_1')
conv2_2 = tf.layers.conv2d(pool2_1, 25, [5, 5], activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay), name='conv2_2')
pool2_2 = tf.layers.max_pooling2d(conv2_2, [2, 2], [2, 2], name='pool2_2')
# Cross-Input Neighborhood Differences
trans = tf.transpose(pool1_2, [0, 3, 1, 2])
shape = trans.get_shape().as_list()
m1s = tf.ones([shape[0], shape[1], shape[2], shape[3], 5, 5])
reshape = tf.reshape(trans, [shape[0], shape[1], shape[2], shape[3], 1, 1])
f = tf.multiply(reshape, m1s)
trans = tf.transpose(pool2_2, [0, 3, 1, 2])
reshape = tf.reshape(trans, [1, shape[0], shape[1], shape[2], shape[3]])
g = []
pad = tf.pad(reshape, [[0, 0], [0, 0], [0, 0], [2, 2], [2, 2]])
for i in xrange(shape[2]):
for j in xrange(shape[3]):
g.append(pad[:, :, :, i:i+5, j:j+5])
concat = tf.concat(g, axis=0)
reshape = tf.reshape(concat, [shape[2], shape[3], shape[0], shape[1], 5, 5])
g = tf.transpose(reshape, [2, 3, 0, 1, 4, 5])
reshape1 = tf.reshape(tf.subtract(f, g), [shape[0], shape[1], shape[2] * 5, shape[3] * 5])
reshape2 = tf.reshape(tf.subtract(g, f), [shape[0], shape[1], shape[2] * 5, shape[3] * 5])
k1 = tf.nn.relu(tf.transpose(reshape1, [0, 2, 3, 1]), name='k1')
k2 = tf.nn.relu(tf.transpose(reshape2, [0, 2, 3, 1]), name='k2')
# Patch Summary Features
l1 = tf.layers.conv2d(k1, 25, [5, 5], (5, 5), activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay), name='l1')
l2 = tf.layers.conv2d(k2, 25, [5, 5], (5, 5), activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay), name='l2')
# Across-Patch Features
m1 = tf.layers.conv2d(l1, 25, [3, 3], activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay), name='m1')
pool_m1 = tf.layers.max_pooling2d(m1, [2, 2], [2, 2], padding='same', name='pool_m1')
m2 = tf.layers.conv2d(l2, 25, [3, 3], activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay), name='m2')
pool_m2 = tf.layers.max_pooling2d(m2, [2, 2], [2, 2], padding='same', name='pool_m2')
# Higher-Order Relationships
concat = tf.concat([pool_m1, pool_m2], axis=3)
reshape = tf.reshape(concat, [FLAGS.batch_size, -1])
fc1 = tf.layers.dense(reshape, 500, tf.nn.relu, name='fc1')
fc2 = tf.layers.dense(fc1, 2, name='fc2')
return fc2 | 6a84f19cf529ec7e5828b97b1edc9ea2f1ecbe7f | 3,633,862 |
def prepQuestAppliedNotification(shipper, questr, questdetails):
"""Prepare the details for notification emails for new quests"""
template_name="Quest_Accepted_Notification_Questr"
subject="Questr - Your shipment has been processed"
quest_support_email="support@questr.co"
email_details = {
'subject' : subject,
'template_name' : template_name,
'global_merge_vars': {
'quest_public_link' : settings.QUESTR_URL+'/quest/'+str(questdetails.id),
'quest_description' : questdetails.description,
'questr_first_name' : questr.first_name,
'shipper_first_name': shipper.first_name,
'shipper_last_name': shipper.last_name,
'shipper_user_name': shipper.displayname,
'shipper_phone': shipper.phone,
'shipper_profile_link' : settings.QUESTR_URL+'/user/'+shipper.displayname,
'quest_title' : questdetails.title,
'quest_size' : questdetails.size,
'quest_pickup_name' : questdetails.pickup['name'],
'quest_pickup_phone' : questdetails.pickup['phone'],
'quest_pickup_address' : questdetails.pickup['address'],
'quest_pickup_city' : questdetails.pickup['city'],
'quest_pickup_postalcode' : questdetails.pickup['postalcode'],
'quest_pickup_name' : questdetails.pickup['name'],
'quest_dropoff_name' : questdetails.dropoff['name'],
'quest_dropoff_phone' : questdetails.dropoff['phone'],
'quest_dropoff_address' : questdetails.dropoff['address'],
'quest_dropoff_city' : questdetails.dropoff['city'],
'quest_dropoff_postalcode' : questdetails.dropoff['postalcode'],
'quest_reward' : str(questdetails.reward),
'quest_distance' : str(questdetails.distance),
'quest_creation_date' : questdetails.creation_date.strftime('%m-%d-%Y'),
'quest_support_mail': quest_support_email,
'quest_shipment_link' : settings.QUESTR_URL+'/user/trades/',
'company' : "Questr Co"
},
}
return email_details | 2457baf3e320b1ac8e93e622b213379aa04e31b7 | 3,633,863 |
from typing import Iterable
from typing import Tuple
def commit_ref_db_val_from_raw_val(db_kvs: Iterable[Tuple[bytes, bytes]]) -> DigestAndBytes:
"""serialize and compress a list of db_key/db_value pairs for commit storage
Parameters
----------
db_kvs : Iterable[Tuple[bytes, bytes]]
Iterable collection binary encoded db_key/db_val pairs.
Returns
-------
DigestAndBytes
`raw` serialized and compressed representation of the object. `digest`
digest of the joined db kvs.
"""
joined = tuple(map(c.CMT_KV_JOIN_KEY.join, db_kvs))
refDigest = _commit_ref_joined_kv_digest(joined)
pck = c.CMT_REC_JOIN_KEY.join(joined)
raw = blosc.compress(pck, typesize=1, clevel=9, shuffle=blosc.SHUFFLE, cname='zlib')
res = DigestAndBytes(digest=refDigest, raw=raw)
return res | 16ce44bfe82c9d884d20aafaf360160755128a02 | 3,633,864 |
def measure_text(text, r, ax):
"""Measure size of text string on canvas."""
t = plt.text(0.5, 0.5, text, **font_opts)
res = measure_text_obj(t, r, ax)
t.remove()
return res | c7ce29547f88552ef9489615da4e148546516ada | 3,633,865 |
def add_sto_plants(net: pypsa.Network, topology_type: str = "countries",
extendable: bool = False, cyclic_sof: bool = True) -> pypsa.Network:
"""
Add run-of-river generators to a Network instance
Parameters
----------
net: pypsa.Network
A Network instance.
topology_type: str
Can currently be countries (for one node per country topologies) or ehighway (for topologies based on ehighway)
extendable: bool (default: False)
Whether generators are extendable
cyclic_sof: bool (default: True)
Whether to set to True the cyclic_state_of_charge for the storage_unit component
Returns
-------
net: pypsa.Network
Updated network
"""
check_assertions(net, topology_type)
# Hydro generators can only be added onshore
buses_onshore = net.buses.dropna(subset=["onshore_region"], axis=0)
# Load capacities and inflows
aggr_level = "countries" if topology_type == "countries" else "NUTS3"
pow_cap, en_cap = get_sto_capacities(aggr_level)
inflows = get_sto_inflows(aggr_level, net.snapshots)
if topology_type == 'countries':
# Extract only countries for which data is available
countries_with_capacity = sorted(list(set(buses_onshore.country) & set(pow_cap.index)))
buses_with_capacity_indexes = net.buses[net.buses.country.isin(countries_with_capacity)].index
bus_pow_cap = pow_cap.loc[countries_with_capacity]
bus_pow_cap.index = buses_with_capacity_indexes
bus_en_cap = en_cap.loc[countries_with_capacity]
bus_en_cap.index = buses_with_capacity_indexes
bus_inflows = inflows[countries_with_capacity]
bus_inflows.columns = buses_with_capacity_indexes
else: # topology_type == 'ehighway'
bus_pow_cap, bus_en_cap, bus_inflows = \
sto_inputs_nuts_to_ehighway(buses_onshore.index, pow_cap, en_cap, inflows)
countries_with_capacity = set(bus_pow_cap.index.str[2:])
logger.info(f"Adding {bus_pow_cap.sum():.2f} GW of STO hydro "
f"with {bus_en_cap.sum() * 1e-3:.2f} TWh of storage in {countries_with_capacity}.")
bus_inflows = bus_inflows.round(3)
max_hours = bus_en_cap / bus_pow_cap
capital_cost, marginal_cost = get_costs('sto', sum(net.snapshot_weightings['objective']))
# Get efficiencies
efficiency_dispatch = get_tech_info('sto', ['efficiency_ds'])["efficiency_ds"]
net.madd("StorageUnit",
bus_pow_cap.index,
suffix=" Storage reservoir",
bus=bus_pow_cap.index,
type='sto',
p_nom=bus_pow_cap,
p_nom_min=bus_pow_cap,
p_min_pu=0.,
p_nom_extendable=extendable,
capital_cost=capital_cost,
marginal_cost=marginal_cost,
efficiency_store=0.,
efficiency_dispatch=efficiency_dispatch,
cyclic_state_of_charge=cyclic_sof,
max_hours=max_hours,
inflow=bus_inflows,
x=buses_onshore.loc[bus_pow_cap.index.values].x,
y=buses_onshore.loc[bus_pow_cap.index.values].y)
return net | d871cf0a8fd1caffa8c74eaf91704f9a85cfc070 | 3,633,866 |
def flow_to_image(flow):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
print('flow to image shape', flow.shape)
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
print("max flow: %.4f\nflow range:\nu = %.3f .. %.3f\nv = %.3f .. %.3f" % (maxrad, minu, maxu, minv, maxv))
u = u / (maxrad + np.finfo(float).eps)
v = v / (maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img) | fabd87b248994393df4297001b02f32bca9508d3 | 3,633,867 |
def contract_edges(graph, edge_weight='weight'):
"""
Given a graph, contract edges into a list of contracted edges. Nodes with degree 2 are collapsed into an edge
stretching from a dead-end node (degree 1) or intersection (degree >= 3) to another like node.
Args:
graph (networkx graph):
edge_weight (str): edge weight attribute to us for shortest path calculations
Returns:
List of tuples representing contracted edges
"""
if len([n for n in graph.nodes() if graph.degree(n) > 2]) > 1:
keep_nodes = [n for n in graph.nodes() if graph.degree(n) != 2]
contracted_edges = []
for n in keep_nodes:
for nn in nx.neighbors(graph, n):
nn_hood = set(nx.neighbors(graph, nn)) - {n}
path = [n, nn]
if len(nn_hood) == 1:
while len(nn_hood) == 1:
nnn = list(nn_hood)[0]
nn_hood = set(nx.neighbors(graph, nnn)) - {path[-1]}
path += [nnn]
full_edges = list(zip(path[:-1], path[1:])) # granular edges between keep_nodes
spl = sum([graph[e[0]][e[1]][0][edge_weight] for e in full_edges]) # distance
# only keep if path is unique. Parallel/Multi edges allowed, but not those that are completely redundant.
if (not contracted_edges) | ([set(path)] not in [[set(p[3])] for p in contracted_edges]):
contracted_edges.append(tuple(sorted([n, path[-1]])) + (spl,) + (path,))
else:
contracted_edges = []
for e in graph.edges(data=True):
edge = (e[0], e[1], e[2][edge_weight], [e[0], e[1]])
contracted_edges += [edge]
return contracted_edges | 1b8976831c3ca9d19354d52869106f6402bc21b5 | 3,633,868 |
def lagval3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
lagval, lagval2d, laggrid2d, laggrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(lagval, c, x, y, z) | 34852c6c7945e166c7742552dc035b497ee450bb | 3,633,869 |
async def get_file_path(project_id: str, file_id: str) -> str:
"""
获取文件id
:param project_id:
:param file_id:
:return:
"""
file_path = PROJECT_FILE_DICT[project_id].pop(file_id)
return file_path | 368c40174bf06ede8f331ac6016704b15bf42599 | 3,633,870 |
def makeTopRegister(board, jigFrameSize, jigThickness, pcbThickness,
outerBorder=fromMm(3), innerBorder=fromMm(1),
tolerance=fromMm(0.05)):
"""
Create a SolidPython representation of the top register
"""
print("Top")
return makeRegister(board, jigFrameSize, jigThickness, pcbThickness,
outerBorder, innerBorder, tolerance, True) | 0ec41afa722cfd1d96f84f3a82f175bd9b5115bb | 3,633,871 |
def pvxpv(a, b):
""" Outer product of two pv-vectors.
:param a: first pv-vector.
:type a: array-like of shape (2,3)
:param b: second pv-vector.
:type b: array-like of shape (2,3)
:returns: a x b as a numpy.matrix of shape 2x3.
.. seealso:: |MANUAL| page 191
"""
axb = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C'))
_sofa.iauPvxpv(_req_shape_c(a, float, (2,3)),
_req_shape_c(b, float, (2,3)), axb)
return axb | d238777454de939045f8e31d1a763afc9e973bbb | 3,633,872 |
def _encodeImage(image, encoding='JPEG', jpegQuality=95, jpegSubsampling=0,
format=(TILE_FORMAT_IMAGE, ), tiffCompression='raw',
**kwargs):
"""
Convert a PIL or numpy image into raw output bytes and a mime type.
:param image: a PIL image.
:param encoding: a valid PIL encoding (typically 'PNG' or 'JPEG'). Must
also be in the TileOutputMimeTypes map.
:param jpegQuality: the quality to use when encoding a JPEG.
:param jpegSubsampling: the subsampling level to use when encoding a JPEG.
:param format: the desired format or a tuple of allowed formats. Formats
are members of (TILE_FORMAT_PIL, TILE_FORMAT_NUMPY, TILE_FORMAT_IMAGE).
:param tiffCompression: the compression format to use when encoding a TIFF.
:returns:
imageData: the image data in the specified format and encoding.
imageFormatOrMimeType: the image mime type if the format is
TILE_FORMAT_IMAGE, or the format of the image data if it is
anything else.
"""
if not isinstance(format, (tuple, set, list)):
format = (format, )
imageData = image
imageFormatOrMimeType = TILE_FORMAT_PIL
if TILE_FORMAT_NUMPY in format:
imageData, _ = _imageToNumpy(image)
imageFormatOrMimeType = TILE_FORMAT_NUMPY
elif TILE_FORMAT_PIL in format:
imageData = _imageToPIL(image)
imageFormatOrMimeType = TILE_FORMAT_PIL
elif TILE_FORMAT_IMAGE in format:
if encoding not in TileOutputMimeTypes:
raise ValueError('Invalid encoding "%s"' % encoding)
imageFormatOrMimeType = TileOutputMimeTypes[encoding]
image = _imageToPIL(image)
if image.width == 0 or image.height == 0:
imageData = b''
else:
encoding = TileOutputPILFormat.get(encoding, encoding)
output = BytesIO()
params = {}
if encoding == 'JPEG' and image.mode not in ('L', 'RGB'):
image = image.convert('RGB' if image.mode != 'LA' else 'L')
if encoding == 'JPEG':
params['quality'] = jpegQuality
params['subsampling'] = jpegSubsampling
elif encoding == 'TIFF':
params['compression'] = {
'none': 'raw',
'lzw': 'tiff_lzw',
'deflate': 'tiff_adobe_deflate',
}.get(tiffCompression, tiffCompression)
image.save(output, encoding, **params)
imageData = output.getvalue()
return imageData, imageFormatOrMimeType | db1b08c936bca8e4b8a79a7cbeddd469f48c87fb | 3,633,873 |
from typing import Optional
from typing import Sequence
from typing import Mapping
def get_local_gateway(filters: Optional[Sequence[pulumi.InputType['GetLocalGatewayFilterArgs']]] = None,
id: Optional[str] = None,
state: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLocalGatewayResult:
"""
Provides details about an EC2 Local Gateway.
## Example Usage
The following example shows how one might accept a local gateway id as a variable.
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
local_gateway_id = config.require_object("localGatewayId")
selected = aws.ec2.get_local_gateway(id=local_gateway_id)
```
:param Sequence[pulumi.InputType['GetLocalGatewayFilterArgs']] filters: Custom filter block as described below.
:param str id: The id of the specific Local Gateway to retrieve.
:param str state: The current state of the desired Local Gateway.
Can be either `"pending"` or `"available"`.
:param Mapping[str, str] tags: A mapping of tags, each pair of which must exactly match
a pair on the desired Local Gateway.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['id'] = id
__args__['state'] = state
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:ec2/getLocalGateway:getLocalGateway', __args__, opts=opts, typ=GetLocalGatewayResult).value
return AwaitableGetLocalGatewayResult(
filters=__ret__.filters,
id=__ret__.id,
outpost_arn=__ret__.outpost_arn,
owner_id=__ret__.owner_id,
state=__ret__.state,
tags=__ret__.tags) | 723ffdd550fccad9f21ac86530cffaaa0bc1c589 | 3,633,874 |
def last_char(text: str, begin: int, end: int, chars: str) -> int:
"""Returns the index of the last non-whitespace character in string
`text` within the bounds [begin, end].
"""
while end > begin and text[end - 1] in chars:
end -= 1
return end | 5d59cd50fb99593d5261513327b9799fc175cd6c | 3,633,875 |
import argparse
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a scene graph generation network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--weights', dest='model',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb',
help='dataset to test',
default='imdb_1024.h5', type=str)
parser.add_argument('--roidb', dest='roidb',
help='dataset to test',
default='VG', type=str)
parser.add_argument('--rpndb', dest='rpndb',
help='dataset to test',
default='proposals.h5', type=str)
parser.add_argument('--network', dest='network_name',
help='name of the network',
default=None, type=str)
parser.add_argument('--output', dest='output_dir',
default=None, type=str)
parser.add_argument('--inference_iter', dest='inference_iter',
default=3, type=int)
parser.add_argument('--test_size', dest='test_size',
default=1000, type=int)
parser.add_argument('--test_mode', dest='test_mode',
default='fg', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args | e1969c7fa389d4aa48d95ff95645a27ee31c141c | 3,633,876 |
import asyncio
def test_cache_memoize_async(cache):
"""Test that cache.memoize() can decorate async functions."""
loop = asyncio.get_event_loop()
marker = 1
@cache.memoize()
@asyncio.coroutine
def func(a):
return (a, marker)
assert asyncio.iscoroutinefunction(func)
assert len(cache) == 0
result = loop.run_until_complete(func("a"))
assert result == ("a", 1)
assert len(cache) == 1
assert list(cache.values())[0] == ("a", 1)
marker += 1
result = loop.run_until_complete(func("a"))
assert result == ("a", 1)
assert len(cache) == 1
result = loop.run_until_complete(func("b"))
assert result == ("b", 2)
assert len(cache) == 2 | 716981f584136a4fb947172876028e8ce031a196 | 3,633,877 |
def _gen_write_element(e: UxsdElement, parent: str) -> str:
"""Function to generate partial C++ code for writing out a struct generated
from an UxsdElement.
Currently, all values with non-zero default values are emitted.
Otherwise, we would have to check against the nonzero value, and the
check would create a case split for all simple types again.(how to compare
unions? strings? doubles?)
"""
assert isinstance(e.type, UxsdComplex)
out = ""
out += "\n"
out += _gen_write_complex_element(e, parent)
return out | b8fd26c9a937bbd55bc8dd217d5afb1830ae1e42 | 3,633,878 |
import math
def compare_cols(fg_col, fg_cons, fg_size, fg_weights,
bg_col, bg_cons, bg_size, bg_weights,
aa_freqs, pseudo_size):
"""Compare amino acid frequencies between aligned columns via G-test."""
# Calculate the "expected" aa frequencies
bg_counts = count_col(bg_col, bg_weights, aa_freqs, pseudo_size)
expected = {}
for aa in 'ACDEFGHIKLMNPQRSTVWY':
# Scale to same size as foreground
expected[aa] = fg_size * (bg_counts[aa] / (bg_size + pseudo_size))
# Calculate the G-value of observed vs. expected
observed = count_col(fg_col, fg_weights)
G = 2 * sum(obsv * math.log(obsv/expected[aa])
for aa, obsv in observed.items())
# 4. Calculate the Chi-squared p-value of G
pvalue = chisqprob(G, 19)
return pvalue | adc473257286d1703b0b52d31ad2db3e3d95652c | 3,633,879 |
import os
def quick_nph(mol, confId=0, step=2000, time_step=None, press=1.0, f_press=None, shake=False, idx=None, tmp_clear=False,
solver='lammps', solver_path=None, work_dir=None, omp=1, mpi=0, gpu=0, **kwargs):
"""
MD.quick_nph
MD simulation with NPH ensemble
Args:
mol: RDKit Mol object
Optional args:
confId: Target conformer ID (int)
step: Number of MD steps (int)
time_step: Set timestep of MD (float or None, fs)
press: Initial pressure (float, atm)
f_press: Final pressure (float or None, atm)
shake: Use SHAKE (boolean)
solver: lammps (str)
solver_path: File path of solver (str)
work_dir: Path of work directory (str)
barostat: Nose-Hoover, or Berendsen (str, default:Nose-Hoover)
Returns:
Unwrapped coordinates (float, numpy.ndarray, angstrom)
Cell coordinates (float, numpy.ndarray, angstrom)
"""
mol_copy = utils.deepcopy_mol(mol)
if solver == 'lammps':
sol = LAMMPS(work_dir=work_dir, solver_path=solver_path, idx=idx)
#elif solver == 'gromacs':
# sol = Gromacs(work_dir=work_dir, solver_path=solver_path)
md = MD(idx=idx)
if not hasattr(mol_copy, 'cell'):
md.pbc = False
calc.centering_mol(mol_copy, confId=confId)
if f_press is None: f_press = press
md.add_md('nph', step, time_step=time_step, shake=shake, p_start=press, p_stop=f_press, **kwargs)
sol.make_dat(mol_copy, confId=confId, file_name=md.dat_file)
sol.make_input(md)
cp = sol.exec(omp=omp, mpi=mpi, gpu=gpu)
if cp.returncode != 0 and (
(md.write_data is not None and not os.path.exists(os.path.join(work_dir, md.write_data)))
or (md.outstr is not None and not os.path.exists(os.path.join(work_dir, md.outstr)))
):
utils.radon_print('Error termination of %s. Return code = %i' % (sol.get_name, cp.returncode), level=3)
return None
uwstr, wstr, cell, vel, _ = sol.read_traj_simple(os.path.join(sol.work_dir, md.outstr))
for i in range(mol_copy.GetNumAtoms()):
mol_copy.GetConformer(confId).SetAtomPosition(i, Geom.Point3D(uwstr[i, 0], uwstr[i, 1], uwstr[i, 2]))
mol_copy.GetAtomWithIdx(i).SetDoubleProp('vx', vel[i, 0])
mol_copy.GetAtomWithIdx(i).SetDoubleProp('vy', vel[i, 1])
mol_copy.GetAtomWithIdx(i).SetDoubleProp('vz', vel[i, 2])
setattr(mol_copy, 'cell', utils.Cell(cell[0, 1], cell[0, 0], cell[1, 1], cell[1, 0], cell[2, 1], cell[2, 0]))
mol_copy = calc.mol_trans_in_cell(mol_copy, confId=confId)
if tmp_clear: md.clear(work_dir)
return mol_copy, uwstr, cell | 24c8c60e2909c9931a26d7960bbad72dddefe0fd | 3,633,880 |
def parameter_converter(
possible_types: list,
default_return: t.Any,
cache_handler: t.Optional[t.Callable],
):
"""
parameter_converter is used for converting annotated parameters
of a function into the annotated types.
Conversion is attempted in the order that they are annotated
e.g: t.Union[int, str] will make the converter first attempt
to convert the parameter into a integer before attempting to
convert to a string.
"""
for type_ in possible_types:
if isinstance(None, type_):
possible_types.remove(type_)
default_return = None
should_raise = isinstance(default_return, NoDefault)
def _converter(arg):
for conv in possible_types:
try:
return conv(arg)
except ValueError:
continue
if should_raise:
raise ConversionFailure(
f"Cannot convert {arg!r} to any of the types:"
f" {possible_types!r}"
)
return default_return
if cache_handler is not None:
return cache_handler(_converter)
return _converter | 4472debf197d4c7726da7036faeabdb3d77a836b | 3,633,881 |
import time
def next_tide_state(tide_info, current_time):
"""Compute next tide state"""
# Get next tide time
next_tide = tide_info.give_next_tide_in_epoch(current_time)
if next_tide.get("error") == None:
tidetime = time.strftime("%H:%M", time.localtime(next_tide.get("tide_time")))
tidetype = next_tide.get("tide_type")
tide_string = f"{tidetype} tide at {tidetime}"
return tide_string
else:
return None | cc4f78cf41aa76d3788b69daaf64f4711d68714f | 3,633,882 |
def dct_2d_reverse(block):
"""
:reverse 2d Discrete Cosine Transformation
:param tensor:
:return:
"""
block = end_T(block)
block = idct(block, norm='ortho')
block = end_T(block)
block = idct(block, norm='ortho')
return block | 851261541dd7c7e4e15ba0bef0653ebdbe2a37a3 | 3,633,883 |
def convert_idx(text, tokens):
"""
Calculates the coordinates of each start
end spans of each token.
:param text: The text to extract spans from.
:param tokens: The tokens of that text.
:return: A list of spans.
"""
current = 0
spans = []
for token in tokens:
current = text.find(token, current)
if current < 0:
print("Token {} cannot be found".format(token))
raise Exception()
spans.append((current, current + len(token)))
current += len(token)
return spans | 6022dca6591ae4a9bea3902af09ff59fee7d5cd5 | 3,633,884 |
import zipfile
def parse_zip(bufferstr):
"""
parse binary object as zip file
"""
z = zipfile.ZipFile(BytesIO(bufferstr))
filenames = z.namelist()
if not filenames:
print('No names found.')
with open('tmp.badzipfile.zip', 'wb') as f:
f.write(bufferstr)
exit()
if len(filenames) > 1:
print('WARNING: >1 file found, using only the first available.')
data = z.read(filenames[0])
return data | c448a505d21a65bec10799a5d2f6b52802788be8 | 3,633,885 |
def downsample(u_t, Fs, Fs_new, plotit=False):
"""
The proper way to downsample a signal.
First low-pass filter the signal
Interpolate / Decimate the signal down to the new sampling frequency
"""
tau = 2/Fs_new
nt = len(u_t)
tt = _np.arange(0, nt, 1)/Fs
# tt = tt.reshape(nt, 1)
# tt = (_np.arange(0, 1/Fs, nt)).reshape(nt, 1)
try:
nch = _np.size(u_t, axis=1)
except:
nch = 1
u_t = u_t.reshape(nt, nch)
# end try
# ----------- #
#2nd order LPF gets converted to a 4th order LPF by filtfilt
lowpass_n, lowpass_d = _dsp.butter(2, 2.0/(Fs*tau), btype='low')
if plotit:
# ------- #
#Calculate the frequency response of the lowpass filter,
w, h = _dsp.freqz(lowpass_n, lowpass_d, worN=12000) #
#Convert to frequency vector from rad/sample
w = (Fs/(2.0*_np.pi))*w
# ------- #
_plt.figure(num=3951)
# _fig.clf()
_ax1 = _plt.subplot(3, 1, 1)
_ax1.plot( tt,u_t, 'k')
_ax1.set_ylabel('Signal', color='k')
_ax1.set_xlabel('t [s]')
_ax2 = _plt.subplot(3, 1, 2)
_ax2.plot(w, 20*_np.log10(abs(h)), 'b')
_ax2.plot(1.0/tau, 0.5*_np.sqrt(2), 'ko')
_ax2.set_ylabel('|LPF| [dB]', color='b')
_ax2.set_xlabel('Frequency [Hz]')
_ax2.set_title('Digital LPF frequency response (Stage 1)')
_plt.xscale('log')
_plt.grid(which='both', axis='both')
_plt.axvline(1.0/tau, color='k')
_plt.grid()
_plt.axis('tight')
# endif plotit
# nskip = int(_np.round(Fs/Fs_new))
# ti = tt[0:nt:nskip]
ti = _np.arange(0, nt/Fs, 1/Fs_new)
u_n = _np.zeros((len(ti), nch), dtype=_np.float64)
for ii in range(nch):
# (Non-Causal) LPF
u_t[:, ii] = _dsp.filtfilt(lowpass_n, lowpass_d, u_t[:, ii])
# _ut.interp(xi,yi,ei,xo)
u_n[:, ii] = _ut.interp(tt, u_t[:, ii], ei=None, xo=ti)
# uinterp = interp1d(tt, u_t[:, ii], kind='cubic', axis=0)
# u_n[:, ii] = uinterp(ti)
#endif
if plotit:
_ax1.plot(ti, u_n, 'b-')
_ax3 = _plt.subplot(3, 1, 3, sharex=_ax1)
_ax3.plot(tt, u_t, 'k')
_ax3.set_ylabel('Filt. Signal', color='k')
_ax3.set_xlabel('t [s]')
# _plt.show(hfig, block=False)
_plt.draw()
# _plt.show()
# endif plotit
return u_n | 3bfdce2bb3b90278c04865185a52626ebf8b3f80 | 3,633,886 |
def convert_date_hours(times, start):
"""
This function converts model output time in hours to datetime objects.
:arg times: array of hours since the start date of a simulation.
From time_counter in model output.
:type times: int
:arg start: string containing the start date of the simulation in
format '01-Nov-2006'
:type start: str
:returns: array of datetime objects representing the time of model outputs.
"""
arr_times = []
for ii in range(0, len(times)):
arr_start = arrow.Arrow.strptime(start, '%d-%b-%Y')
arr_new = arr_start.replace(hours=times[ii])
arr_times.append(arr_new.datetime)
return arr_times | 5e6f4703a63f898f9653ed14c246398ae7c7108c | 3,633,887 |
def idf_unit_test(app=UT, dut=IDFDUT, chip="ESP32", module="unit-test", execution_time=1,
level="unit", erase_nvs=True, **kwargs):
"""
decorator for testing idf unit tests (with default values for some keyword args).
:param app: test application class
:param dut: dut class
:param chip: chip supported, string or tuple
:param module: module, string
:param execution_time: execution time in minutes, int
:param level: test level, could be used to filter test cases, string
:param erase_nvs: if need to erase_nvs in DUT.start_app()
:param kwargs: other keyword args
:return: test method
"""
try:
# try to config the default behavior of erase nvs
dut.ERASE_NVS = erase_nvs
except AttributeError:
pass
original_method = TinyFW.test_method(app=app, dut=dut, chip=chip, module=module,
execution_time=execution_time, level=level, **kwargs)
def test(func):
test_func = original_method(func)
test_func.case_info["ID"] = format_case_id(chip, test_func.case_info["name"])
return test_func
return test | 6afb6be19cd2760be829f01283f3d9a4073e87ef | 3,633,888 |
import os
def get_materials():
"""return _materials dictionary, creating it if needed"""
mat = {}
fname = 'materials.dat'
if os.path.exists(fname):
fh = open(fname, 'r')
lines = fh.readlines()
fh.close()
for line in lines:
line = line.strip()
if len(line) > 2 and not line.startswith('#'):
name, f, den = [i.strip() for i in line.split('|')]
mat[name.lower()] = (f.replace(' ', ''), float(den))
return mat | b89b230954d5690314069810b4595a49557e6620 | 3,633,889 |
import subprocess
import os
def decoratebiom(biom_file, outdir, metadata, core=""):
"""inserts rows and column data
"""
out_biom = '.'.join(biom_file.split('.')[0:-1]) + '.meta.biom'
cmd_sample = f"biom add-metadata -i {biom_file} -o {out_biom} -m {metadata} --output-as-json"
res_add = subprocess.check_output(cmd_sample, shell=True)
if core == "core":
in_biom = '.'.join(biom_file.split('.')[0:-1]) + '.meta.biom'
out_biom = '.'.join(biom_file.split('.')[0:-1]) + '.metacore.biom'
metadata_f = os.path.join(outdir, 'BiG-MAP.map.core.coverage.txt')
cmd_feature = f"biom add-metadata --observation-metadata-fp {metadata_f} -i {in_biom} -o {out_biom} --output-as-json"
res_feature = subprocess.check_output(cmd_feature, shell=True)
else:
in_biom = '.'.join(biom_file.split('.')[0:-1]) + '.meta.biom'
out_biom = '.'.join(biom_file.split('.')[0:-1]) + '.meta.biom'
metadata_f = os.path.join(outdir, 'BiG-MAP.map.coverage.txt')
cmd_feature = f"biom add-metadata --observation-metadata-fp {metadata_f} -i {in_biom} -o {out_biom} --output-as-json"
res_feature = subprocess.check_output(cmd_feature, shell=True)
return (out_biom) | 1c327110ba7b27d710dced5e3d59cfabf3f440fc | 3,633,890 |
def index(request):
"""The home page for MMS Pair App"""
# If the client-side forgets to run the command manage.py checkdb,
# this check ensures that the data will be updated on the home page.
coins = Coin.objects.all().count()
# If database is empty, call the load_coin function
if coins == 0:
load_coin()
# Else, call the update_coin function to update the database to the latest available date
else:
update_coin()
pair = Coin.objects.filter(pair='BRLBTC').order_by("-timestamp")
context = {'pair': pair}
return render(request, 'mms_pair/index.html', context) | d17e86e6020b7ded4d667aa1a5e33416ad9e960b | 3,633,891 |
from typing import Iterable
from typing import Tuple
def cbdiag(size: int, blocks: Iterable[Tuple[int, ndarray]]) -> ndarray:
"""
Build a block matrix with (sub-)diagonal blocks and the given size.
Each block is specified with its offset from the diagonal and its data
(sub matrix). All blocks are expected to have the same shape. If not,
a `MatrixShapeError` exception is raised.
Offset of each block can be positive, zero or negative. Zero offset means
that the block will be used in the diagonal. Non-zero offsets are counted
from left to right. For example, offset 1 means a subdiagonal to the
right of the main diagonal, i.e. positions (0, 1), (1, 2), etc. Negative
offsets specify subdiagonals to the left of the main diagonal.
Matrix size is computed from the given block size and block shape. If
size is equal to N, and block shape is (M, K), then the matrix will have
the shape (NM, NK).
Examples
--------
>>> # First example: 5-diagonal matrix:
>>> cbdiag (6, [
>>> (0, asarray([[1]])), (1, asarray([[2]])), (2, asarray([[3]])),
>>> (-1, asarray([[4]])), (-2, asarray([[5]]))
>>> ]).tolist()
>>> [[1, 2, 3, 0, 0, 0],
>>> [4, 1, 2, 3, 0, 0],
>>> [5, 4, 1, 2, 3, 0],
>>> [0, 5, 4, 1, 2, 3],
>>> [0, 0, 5, 4, 1, 2],
>>> [0, 0, 0, 5, 4, 1]]
>>> # Second example: 3-diagonal matrix with 1x2 blocks:
>>> cbdiag(4, [
>>> (0, asarray([[1, 1]])),
>>> (1, asarray([[2, 2]])),
>>> (-1, asarray([[3, 3]])),
>>> ]).tolist()
>>> [[1, 1, 2, 2, 0, 0, 0, 0],
>>> [3, 3, 1, 1, 2, 2, 0, 0],
>>> [0, 0, 3, 3, 1, 1, 2, 2],
>>> [0, 0, 0, 0, 3, 3, 1, 1]]
Parameters
----------
size : int
number of block-rows and block-cols
blocks : sequence of tuples of int and ndarray
blocks, each block is specified with integer offset and the matrix
Returns
-------
mat : ndarray
a block matrix built from the given blocks.
Raises
------
MatrixShapeError
raised if blocks have different shapes
"""
_validate_cb_blocks(blocks)
block_shape = blocks[0][-1].shape
mat = zeros((size * block_shape[0], size * block_shape[1]))
for block_row in range(size):
mat_row_0 = block_row * block_shape[0]
mat_row_1 = mat_row_0 + block_shape[0]
for offset, block in blocks:
block_col = offset + block_row
if 0 <= block_col < size:
mat_col_0 = block_col * block_shape[1]
mat_col_1 = mat_col_0 + block_shape[1]
mat[mat_row_0:mat_row_1, mat_col_0:mat_col_1] = block
return mat | 7d952221621996abe7b0f6dbed16774b67291302 | 3,633,892 |
from pathlib import Path
def read_hca_metadata(metadata_file: Path) -> nx.DiGraph:
"""
:param metadata_file:
:return: A unique set of donor metadata
"""
donor_data = pd.read_table(metadata_file)
row_data = []
for i, row in donor_data.iterrows():
dotted_row_dict = dict(zip(row.index, value_adjust(row)))
nested_row_dict = dotted_keys_to_nested_dict(dotted_row_dict)
row_data.append(nested_row_dict)
# Use "full" nested representation at this stage, so we can pull out
# the donor and specimen metadata soon
hashable_row_data = [freeze(v) for v in row_data]
g = nx.DiGraph()
for h in hashable_row_data:
# Since frozen subtrees are hashable, they are usable as nodes in
# a NetworkX graph, and NetworkX enforces uniqueness of nodes
donor = h['donor_organism']
specimen = h['specimen_from_organism']
g.add_edge(donor, specimen)
g.nodes[donor]['type'] = 'donor_organism'
g.nodes[specimen]['type'] = 'specimen_from_organism'
return g | 1c9d2ef837335d26a3a3fdaccfdb33ea3b2aa909 | 3,633,893 |
def search_results(rows):
"""
Display search results
"""
print()
print(to_table(rows))
print()
# Ask user input
input_ = menu.get_input(
message='Select a result # or type any key to go back to the main menu: ')
if input_:
try:
result = [row for row in rows if row.id == int(input_)]
if result:
return item_view(result[0])
except ValueError: # Non integer
pass
return False | ffca73c5e59fd2db463acf1559d7ed44065f0324 | 3,633,894 |
def eval_js(expression_, **args):
"""Execute JavaScript expression in the user's browser and get the value of the expression
:param str expression_: JavaScript expression. The value of the expression need to be JSON-serializable.
:param args: Local variables passed to js code. Variables need to be JSON-serializable.
:return: The value of the expression.
Note: When using :ref:`coroutine-based session <coroutine_based_session>`, you need to use the ``await eval_js(expression)`` syntax to call the function.
Example:
.. exportable-codeblock::
:name: eval_js
:summary: `eval_js()` usage
current_url = eval_js("window.location.href")
put_text(current_url) # ..demo-only
## ----
function_res = eval_js('''(function(){
var a = 1;
a += b;
return a;
})()''', b=100)
put_text(function_res) # ..demo-only
"""
script = r"""
(function(WebIO){
let ____result____ = null; // to avoid naming conflict
try{
____result____ = eval(%r);
}catch{};
WebIO.sendMessage({
event: "js_yield",
task_id: WebIOCurrentTaskID, // local var in run_script command
data: ____result____ || null
});
})(WebIO);""" % expression_
run_js(script, **args)
res = yield next_client_event()
assert res['event'] == 'js_yield', "Internal Error, please report this bug on " \
"https://github.com/wang0618/PyWebIO/issues"
return res['data'] | dab88f4d3cc574a9ffd60fc9992dfe4c038f0579 | 3,633,895 |
def quat2rot(q):
"""
Convert quaternion to 3x3 rotation matrix.
Source:
Blanco, Jose-Luis. "A tutorial on se (3) transformation parameterizations
and on-manifold optimization." University of Malaga, Tech. Rep 3 (2010): 6.
[Page 18, Equation (2.20)]
"""
assert len(q) == 4
qw, qx, qy, qz = q
qx2 = qx**2
qy2 = qy**2
qz2 = qz**2
qw2 = qw**2
# Homogeneous form
C11 = qw2 + qx2 - qy2 - qz2
C12 = 2.0 * (qx * qy - qw * qz)
C13 = 2.0 * (qx * qz + qw * qy)
C21 = 2.0 * (qx * qy + qw * qz)
C22 = qw2 - qx2 + qy2 - qz2
C23 = 2.0 * (qy * qz - qw * qx)
C31 = 2.0 * (qx * qz - qw * qy)
C32 = 2.0 * (qy * qz + qw * qx)
C33 = qw2 - qx2 - qy2 + qz2
return np.array([[C11, C12, C13], [C21, C22, C23], [C31, C32, C33]]) | 97f94538b30347f049349df7b4ad58c2d688936e | 3,633,896 |
def insert_stroke(seq, stroke, offset=0):
"""Insert into seq positions from the stroke dict"""
frame = offset
for i, p in enumerate(stroke):
frame = p["frame"] + offset
if i == 0:
# Do not override a keyframe at the start of the insert position.
if launch_keyframe(seq.name, frame) is not None:
continue
# XXX Maybe we should also remove any keyframes that already exists in
# the frame window of the new stroke?
insert_position(seq, p["value"], frame)
return frame | 76b6b683c03ec8cd58450a01ec65a8bee365d675 | 3,633,897 |
def get_dest_file(src_file):
"""
Takes a src file location
Returns the destination file location.
In the case of files with exif data, the destination file location is the
year/month/filename
In the case of files with exif data, but no Image DateTime, the destination
file location is no-exif-date/filename
In the case of files without exif data, the destination file is
no-exif-data/filename
"""
tags = {}
with open(src_file, 'rb') as f:
try:
tags = exifread.process_file(f, details=False, stop_tag='Image DateTime', strict=True)
except Exception:
return join('corrupted-exif-data', basename(src_file))
if tags:
try:
picture_time = arrow.get(str(tags['Image DateTime']),
'YYYY:MM:DD HH:mm:ss')
picture_year = str(picture_time.year)
picture_month = arrow.get(str(picture_time.month), 'M')\
.format('MMMM')
return join(picture_year, picture_month, basename(src_file))
except KeyError:
return join('no-exif-date', basename(src_file))
else:
return join('no-exif-data', basename(src_file)) | 8f3aeaca31efa57f8e04db10f914d0770f544d0c | 3,633,898 |
def Lazy(func=None, *, lazy=True):
""""Decorator that provides a function with a boolean parameter "lazy".
When set to true, the function will not be executed yet, sort of like a
coroutine (see examples). This can be nice for testing purposes. Also
opens the door to some interesting things (maybe sort of allows for
decorated objects? Not sure of all applications yet). Would have preferred
a lowercase function name but I also want to keep the parameter name as
"lazy" while avoiding confusion.
Examples
--------
@Lazy
def foo(a, b=3):
return a * b
>>> foo(2, lazy=False)
6
>>> res = foo(2)
>>> res()
6
In the second example, notice we didn't get any output until explicitly
calling the result. Also note that we can change the default mode by using
the decorator like (keyword argument, not positional):
@Lazy(lazy=False)
"""
if func is None: return partial(Lazy, lazy=lazy)
if 'lazy' in params(func):
raise RuntimeError(
f'Decorated function {func} must not have parameter named "lazy".'
'It will be inserted automatically.'
)
@wraps(func)
def wrapper(*args, lazy=lazy, **kwargs):
if lazy:
return lambda: func(*args, **kwargs)
return func(*args, **kwargs)
return wrapper | 44df263b412830ee3aa3a610f45953318ad8d2b1 | 3,633,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.