content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def ifft_function(G,Fs,axis=0):
"""
This function gives the IDFT
Arguments
---------------------------
G : double
DFT (complex Fourier coefficients)
Fs : double
sample rate, maximum frequency of G times 2 (=F_nyquist*2)
axis : double
the axis on which the IDFT operates
Returns
---------------------------
t : double
time axis
x : double
time series
Reference:
"""
G=np.atleast_2d(G)
n=np.shape(G)
n_points=n[axis]
G=np.fft.ifft( np.fft.ifftshift(G,None,axis) )/n_points
dt=1/np.double(Fs)
t=np.arange(0,dt*n_points,dt)
return t,x | 901218d6c795d0ee3163496b6889899b9be16342 | 3,638,200 |
def user_tweets_stats_grouped_new(_, group_type):
"""
Args:
_: Http Request (ignored in this function)
group_type: Keyword defining group label (day,month,year)
Returns: Activities grouped by (day or month or year) wrapped on response's object
"""
error_messages = []
success_messages = []
status = HTTP_200_OK
index_per_type = {
'year': 0,
'month': 1,
'day': 2
}
types = ["year", "month", "day"]
success, data, message = queries.user_tweets_stats_grouped(types[:index_per_type[group_type] + 1], accum=False)
if success:
success_messages.append(message)
else:
error_messages.append(message)
status = HTTP_403_FORBIDDEN
return create_response(data=data, error_messages=error_messages, success_messages=success_messages, status=status) | e1fc3dfd96fde3f2e01822b79bd97e508982e3d4 | 3,638,201 |
import itertools
def analyse_editing_percent(pileup_file, out_file, summary_file=None, add_headers=False, summary_only=False, min_editing=0,
max_noise=100, min_reads=1, edit_tag='' ):
"""
analyses pileup file editing sites and summarises it
@param pileup_file: input pileup file name
@param out_file: name of file to write new lines
@param summary_file: File to put the summary_string
@param add_headers: Boolean, whether to add csv like headers to new pileups
@param summary_only: Boolean, whether to only generate summary file and not generate edited pileup files
@param min_editing: minimal editing threshold, percent
@param max_noise: maximal noise threshold, percent
@param min_reads: minimal reads per site to be considered an editing site, int
@param edit_tag: tag to add to sites that are classified as editing sites
@param kwargs:
@return: dict with histogram of site editing types, side effect creates out_file a pile up file with tags
indicating noise, editing percent and wether a given site was classified as an editing site
"""
# with open in read and out write
with open(pileup_file, 'r') as in_fp, \
open(out_file, 'w') as out_fp:
#"r","y","s","w","k","m","b","d","h","v","n"
# intitalize zero counts in summary dict
#summary_dict = {(nuc1, nuc2): 0 for nuc1, nuc2 in itertools.product('acgtACGT', repeat=2) if nuc1.upper() != nuc2.upper()}
#TODO why do we need lower case references?
summary_dict = {(nuc1, nuc2): 0 for nuc1, nuc2 in itertools.product('ACGTN', repeat=2) if nuc1.upper() != nuc2.upper()}
#add RYSWKMBDHV to string if needed
summary_dict['unchanged'] = 0
if add_headers:
out_fp.writelines([get_header_line(edit_tag)])
out_fp.write("\n")
list_headers = ["editing_min_percent", "noise_percent"]
if edit_tag:
list_headers.append("const_tag")
pileup_gen = class_generator(Pileup_line, file=in_fp)
line_num = -1
for line_num, pileup_line in enumerate(pileup_gen):
edit_type, new_pileup_line = is_pileup_line_edited(pileup_line,
read_thresh_hold=min_reads,
editing_min_percent_threshold=float(min_editing),
noise_percent_threshold=float(max_noise),
const_tag=edit_tag
)
summary_dict[edit_type] += 1
# add empty tag if we make it into a csv so the columns will align
if add_headers and edit_type == 'unchanged' and edit_tag != '':
new_pileup_line.tags["const_tag"] = ''
# if not summarise only, print the pileup line
if not summary_only:
#out_fp.write("\n")
if not add_headers:
out_fp.writelines([str(new_pileup_line)])
out_fp.write("\n")
else:
out_fp.writelines([new_pileup_line.line_to_csv_with_short_tags(list_headers)])
out_fp.write("\n")
# dont skip file, create empty summary
#if line_num == -1 :
# open(summary_file, "w").close()
# return #when the file is empty - skip the file!
##out_fp.write("\n")
# make summary counts into percentage
total_line_number = line_num + 1
summary_dict_sub = {key: float(val) / total_line_number if total_line_number != 0 else 0 for key, val in summary_dict.items() }
# printing the summary format to an individual file
with open(summary_file, "w") as file1:
summer_str = individual_summary_format(pileup_file, summary_dict_sub)
file1.write(summer_str)
return summer_str | 2e34c3e32fb8ac9d5b4dabb188d404b5cf052466 | 3,638,202 |
def login_required(func, *args, **kwargs):
"""
This is a decorator that can be applied to a Controller method that needs a logged in user.
The inner method receives the Controller instance and checks if the user is logged in
using the `request.is_authenticated` Boolean on the Controller instance
:param func: The is the function being decorated.
:return: Either the method that is decorated (if user is logged in) else `unauthenticated` response (HTTP 401).
"""
def inner(controller_obj, *args, **kwargs):
if controller_obj.request.is_authenticated:
return func(controller_obj, *args, **kwargs)
else:
return response.json({
"message": "unauthenticated"
}, status=401)
return inner | 3611bb87544ece2516d4a738e3ab68b58ee154f4 | 3,638,203 |
def preProcessImage(rgbImage):
""" Preprocess the input RGB image
@rgbImage: Input RGB Image
"""
# Color space conversion
img_gray = cv2.cvtColor(rgbImage, cv2.COLOR_BGR2GRAY)
img_hsv = cv2. cvtColor(rgbImage, cv2.COLOR_BGR2HLS)
ysize, xsize = getShape(img_gray)
#Detecting yellow and white colors
low_yellow = np.array([20, 100, 100])
high_yellow = np.array([30, 255, 255])
mask_yellow = cv2.inRange(img_hsv, low_yellow, high_yellow)
mask_white = cv2.inRange(img_gray, 200, 255)
mask_yw = cv2.bitwise_or(mask_yellow, mask_white)
mask_onimage = cv2.bitwise_and(img_gray, mask_yw)
#Smoothing for removing noise
gray_blur = cv2.GaussianBlur(mask_onimage, (5,5), 0)
return gray_blur, xsize, ysize | ea70956bca99e28a6928867a40a3b579e2c8931b | 3,638,204 |
from typing import List
from typing import Dict
from typing import Any
import time
import json
def consume_messages(consumer: Consumer, num_expected: int, serialize: bool = True) -> List[Dict[str, Any]]:
"""helper function for polling 'everything' off a topic"""
start = time.time()
consumed_messages = []
while (time.time() - start) < POLL_TIMEOUT:
message = consumer.poll(1)
if message is None:
continue
if message.error():
logger.error(message.error())
else:
_msg = message.value().decode("utf-8")
if serialize:
msg = json.loads(_msg)
else:
msg = _msg
consumed_messages.append(msg)
if num_expected == len(consumed_messages):
break
consumer.close()
return consumed_messages | 5bf5db5180222d235e08a65a4e67e6daccf9c4d7 | 3,638,205 |
def get_compressed_size(data, compression, block_size=DEFAULT_BLOCK_SIZE):
"""
Returns the number of bytes required when the given data is
compressed.
Parameters
----------
data : buffer
compression : str
The type of compression to use.
block_size : int, optional
Input data will be split into blocks of this size (in bytes) before the compression.
Returns
-------
bytes : int
"""
compression = validate(compression)
encoder = _get_encoder(compression)
l = 0
for i in range(0, len(data), block_size):
l += len(encoder.compress(data[i:i+block_size]))
if hasattr(encoder, "flush"):
l += len(encoder.flush())
return l | f7c72cf7097ee9f15b9aa0b1b6d46fe060cc0c15 | 3,638,206 |
def flat_command(bias=False,
flat_map=False,
return_shortname=False,
dm_num=1):
"""
Creates a DmCommand object for a flat command.
:param bias: Boolean flag for whether to apply a bias.
:param flat_map: Boolean flag for whether to apply a flat_map.
:param return_shortname: Boolean flag that will return a string that describes the object as the second parameter.
:param dm_num: 1 or 2, for DM1 or DM2.
:return: DmCommand object, and optional descriptive string (good for filename).
"""
short_name = "flat"
# Bias.
if flat_map:
short_name += "_flat_map"
if bias:
short_name += "_bias"
num_actuators_pupil = CONFIG_INI.getint(config_name, 'dm_length_actuators')
zero_array = np.zeros((num_actuators_pupil, num_actuators_pupil))
dm_command_object = DmCommand(zero_array, dm_num, flat_map=flat_map, bias=bias)
if return_shortname:
return dm_command_object, short_name
else:
return dm_command_object | 7b375c4b73686f286f07b8a327f2237e3ecb9ad0 | 3,638,207 |
def plot_graph_routes(
G,
routes,
bbox=None,
fig_height=6,
fig_width=None,
margin=0.02,
bgcolor="w",
axis_off=True,
show=True,
save=False,
close=True,
file_format="png",
filename="temp",
dpi=300,
annotate=False,
node_color="#999999",
node_size=15,
node_alpha=1,
node_edgecolor="none",
node_zorder=1,
edge_color="#999999",
edge_linewidth=1,
edge_alpha=1,
use_geom=True,
orig_dest_points=None,
route_color="r",
route_linewidth=4,
route_alpha=0.5,
orig_dest_node_alpha=0.5,
orig_dest_node_size=100,
orig_dest_node_color="r",
orig_dest_point_color="b",
):
"""
Plot several routes along a networkx spatial graph.
Parameters
----------
G : networkx.MultiDiGraph
input graph
routes : list
the routes as a list of lists of nodes
bbox : tuple
bounding box as north,south,east,west - if None will calculate from
spatial extents of data. if passing a bbox, you probably also want to
pass margin=0 to constrain it.
fig_height : int
matplotlib figure height in inches
fig_width : int
matplotlib figure width in inches
margin : float
relative margin around the figure
axis_off : bool
if True turn off the matplotlib axis
bgcolor : string
the background color of the figure and axis
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
filename : string
the name of the file if saving
dpi : int
the resolution of the image file if saving
annotate : bool
if True, annotate the nodes in the figure
node_color : string
the color of the nodes
node_size : int
the size of the nodes
node_alpha : float
the opacity of the nodes
node_edgecolor : string
the color of the node's marker's border
node_zorder : int
zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot
nodes beneath them or 3 to plot nodes atop them
edge_color : string
the color of the edges' lines
edge_linewidth : float
the width of the edges' lines
edge_alpha : float
the opacity of the edges' lines
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
orig_dest_points : list of tuples
optional, a group of (lat, lng) points to plot instead of the
origins and destinations of each route nodes
route_color : string
the color of the route
route_linewidth : int
the width of the route line
route_alpha : float
the opacity of the route line
orig_dest_node_alpha : float
the opacity of the origin and destination nodes
orig_dest_node_size : int
the size of the origin and destination nodes
orig_dest_node_color : string
the color of the origin and destination nodes
orig_dest_point_color : string
the color of the origin and destination points if being plotted instead
of nodes
Returns
-------
fig, ax : tuple
"""
# plot the graph but not the routes
fig, ax = plot_graph(
G,
bbox=bbox,
fig_height=fig_height,
fig_width=fig_width,
margin=margin,
axis_off=axis_off,
bgcolor=bgcolor,
show=False,
save=False,
close=False,
filename=filename,
dpi=dpi,
annotate=annotate,
node_color=node_color,
node_size=node_size,
node_alpha=node_alpha,
node_edgecolor=node_edgecolor,
node_zorder=node_zorder,
edge_color=edge_color,
edge_linewidth=edge_linewidth,
edge_alpha=edge_alpha,
use_geom=use_geom,
)
# save coordinates of the given reference points
orig_dest_points_lats = []
orig_dest_points_lons = []
if orig_dest_points is None:
# if caller didn't pass points, use the first and last node in each route as
# origin/destination points
for route in routes:
origin_node = route[0]
destination_node = route[-1]
orig_dest_points_lats.append(G.nodes[origin_node]["y"])
orig_dest_points_lats.append(G.nodes[destination_node]["y"])
orig_dest_points_lons.append(G.nodes[origin_node]["x"])
orig_dest_points_lons.append(G.nodes[destination_node]["x"])
else:
# otherwise, use the passed points as origin/destination points
for point in orig_dest_points:
orig_dest_points_lats.append(point[0])
orig_dest_points_lons.append(point[1])
orig_dest_node_color = orig_dest_point_color
# scatter the origin and destination points
ax.scatter(
orig_dest_points_lons,
orig_dest_points_lats,
s=orig_dest_node_size,
c=orig_dest_node_color,
alpha=orig_dest_node_alpha,
edgecolor=node_edgecolor,
zorder=4,
)
# plot the routes lines
lines = []
for route in routes:
lines.extend(_node_list_to_coordinate_lines(G, route, use_geom))
# add the lines to the axis as a linecollection
lc = LineCollection(
lines, colors=route_color, linewidths=route_linewidth, alpha=route_alpha, zorder=3
)
ax.add_collection(lc)
# save and show the figure as specified
fig, ax = _save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off)
return fig, ax | 333479cd0924df968f66ba328735a309a10e41a9 | 3,638,208 |
import re
def _parse_book_info(html):
"""解析豆瓣图书信息(作者,出版社,出版年,定价)
:param html(string): 图书信息部分的原始html
"""
end_flag = 'END_FLAG'
html = html.replace('<br>', end_flag)
html = html.replace('<br/>', end_flag)
doc = lxml.html.fromstring(html)
text = doc.text_content()
pattern = r'{}[::](.*?){}'
return {
key: re.search(
pattern.format(column, end_flag), text, re.I | re.DOTALL
)
.group(1)
.strip()
for key, column in [
('author', '作者'),
('press', '出版社'),
('publish_date', '出版年'),
('price', '定价'),
]
} | d327d9561a1306f1242f1f78c01517bd2358aa0b | 3,638,209 |
def offers(request, region_slug, language_code=None):
"""
Function to iterate through all offers related to a region and adds them to a JSON.
Returns:
[String]: [description]
"""
region = Region.objects.get(slug=region_slug)
result = []
for offer in region.offers.all():
result.append(transform_offer(offer))
return JsonResponse(result, safe=False) | d0256abb9a1fda0fd0296dab811f3bef2091c6d3 | 3,638,210 |
def get_ellipse(mu: np.ndarray, cov: np.ndarray, draw_legend: bool = True):
"""
Draw an ellipse centered at given location and according to specified covariance matrix
Parameters
----------
mu : ndarray of shape (2,)
Center of ellipse
cov: ndarray of shape (2,2)
Covariance of Gaussian
Returns
-------
scatter: A plotly trace object of the ellipse
"""
l1, l2 = tuple(np.linalg.eigvalsh(cov)[::-1])
theta = atan2(l1 - cov[0, 0], cov[0, 1]) if cov[0, 1] != 0 else (
np.pi / 2 if cov[0, 0] < cov[1, 1] else 0)
t = np.linspace(0, 2 * pi, 100)
xs = (l1 * np.cos(theta) * np.cos(t)) - (l2 * np.sin(theta) * np.sin(t))
ys = (l1 * np.sin(theta) * np.cos(t)) + (l2 * np.cos(theta) * np.sin(t))
return go.Scatter(x=mu[0] + xs, y=mu[1] + ys, mode="lines",
marker_color="black", showlegend=draw_legend,
name="covariance") | 639e2161819e76c485efaf22598cfcc601a10122 | 3,638,211 |
from typing import Sequence
from typing import List
import hashlib
from typing import Dict
def load_hashes(
filename: str, hash_algorithm_names: Sequence[str]
) -> HashResult:
"""
Load the size and hash hex digests for the given file.
"""
# See https://github.com/python/typeshed/issues/2928
hashes: List['hashlib._hashlib._HASH'] = [] # type: ignore
for name in hash_algorithm_names:
hashes.append(hashlib.new(name))
size = 0
with open(filename, 'rb') as inp:
data = inp.read(_BUFFER_SIZE)
for hashf in hashes:
hashf.update(data)
size += len(data)
digests: Dict[str, str] = {}
for idx in range(len(hash_algorithm_names)):
hash_name = hash_algorithm_names[idx]
hashf = hashes[idx]
digests[hash_name] = hashf.hexdigest() # type: ignore
return (size, digests) | 6846e39838f2017a46472826ba07bc9974e80c5a | 3,638,212 |
def ucs(st: Pixel, end: Pixel, data: np.ndarray):
"""
Iterative method to find a Dijkstra path, if one exists from current to end vertex
:param startKey: start pixel point key
:param endKey: end pixel point key
:return: path
"""
q = PriorityQueue()
startPriorityPixel = PixelPriority(st, 0, 0) # start priority pixel with 0 priority
q.put((0, startPriorityPixel))
lowest = startPriorityPixel
visited = dict()
while lowest.pxl != end:
if q.empty(): # No way to get to end
return [], -1
thisDistace = lowest.distance
for u in lowest.pxl.getNeighbors():
if u is not None and (u.x, u.y) not in visited:
showImage(data, u.y, u.x)
visited[(u.x, u.y)] = 1
# distance travelled from start pixel to current pixel
dist = sqrt(pow(u.x - lowest.pxl.x, 2) + pow(u.y - lowest.pxl.y, 2) + \
pow(u.elevation - lowest.pxl.elevation, 2))
newDistance = thisDistace + dist
priority = newDistance
priorityPixel = PixelPriority(u, newDistance, priority)
priorityPixel.predecessor = lowest
q.put((priority, priorityPixel))
lowest = q.get()[1]
path = []
if lowest.distance != 0: # We found the end, but it never got connected.
lst = lowest
while lst is not None:
path.insert(0, lst.pxl)
lst = lst.predecessor
return path | 743dbe230073bde4ba7b95e4520f097d8f7a4443 | 3,638,213 |
def tvdb_refresh_token(token: str) -> str:
"""
Refreshes JWT token.
Online docs: api.thetvdb.com/swagger#!/Authentication/get_refresh_token.
"""
url = "https://api.thetvdb.com/refresh_token"
headers = {"Authorization": f"Bearer {token}"}
status, content = request_json(url, headers=headers, cache=False)
if status == 401:
raise MnamerException("invalid token")
elif status != 200 or not content.get("token"): # pragma: no cover
raise MnamerNetworkException("TVDb down or unavailable?")
return content["token"] | a1974f43ed0e314100c686545bb610be9cc910ed | 3,638,214 |
def get_data():
""" _ _ _ """
df_hospital = download_hospital_admissions()
#sliding_r_df = walkingR(df_hospital, "Hospital_admission")
df_lcps = download_lcps()
df_mob_r = download_mob_r()
df_gemeente_per_dag = download_gemeente_per_dag()
df_reprogetal = download_reproductiegetal()
df_uitgevoerde_testen = download_uitgevoerde_testen()
type_of_join = "outer"
df = pd.merge(df_mob_r, df_hospital, how=type_of_join, left_on = 'date',
right_on="Date_of_statistics")
#df = df_hospital
df.loc[df['date'].isnull(),'date'] = df['Date_of_statistics']
df = pd.merge(df, df_lcps, how=type_of_join, left_on = 'date', right_on="Datum")
df.loc[df['date'].isnull(),'date'] = df['Datum']
#df = pd.merge(df, sliding_r_df, how=type_of_join, left_on = 'date', right_on="date_sR", left_index=True )
df = pd.merge(df, df_gemeente_per_dag, how=type_of_join, left_on = 'date', right_on="Date_of_publication",
left_index=True )
df = pd.merge(df, df_reprogetal, how=type_of_join, left_on = 'date', right_on="Date",
left_index=True )
df = pd.merge(df, df_uitgevoerde_testen, how=type_of_join, left_on = 'date', right_on="Date_of_statistics",
left_index=True )
df = df.sort_values(by=['date'])
df = splitupweekweekend(df)
df, werkdagen, weekend_ = last_manipulations(df, None, None)
df.set_index('date')
return df, werkdagen, weekend_ | 2a9b909dc53b710ce9f1729e336464857a27bb30 | 3,638,215 |
def load_bin_file(bin_file, dtype="float32"):
"""Load data from bin file"""
data = np.fromfile(bin_file, dtype=dtype)
return data | facdabb726efd66ce6e7e462aed9458d8f3dc947 | 3,638,216 |
def nearest_value(array, value):
"""
Searches array for the closest value to a given target.
Arguments:
array {NumPy Array} -- A NumPy array of numbers.
value {float/int} -- The target value.
Returns:
float/int -- The closest value to the target value found in the array.
"""
return array[np.abs(array - value).argmin()] | e9bf37b02bd55a0bdd9bf6f001aca6bc69895d8c | 3,638,217 |
def interpret_go_point(s, size):
"""Convert a raw SGF Go Point, Move, or Stone value to coordinates.
s -- 8-bit string
size -- board size (int)
Returns a pair (row, col), or None for a pass.
Raises ValueError if the string is malformed or the coordinates are out of
range.
Only supports board sizes up to 26.
The returned coordinates are in the GTP coordinate system (as in the rest
of gomill), where (0, 0) is the lower left.
"""
if s == b"" or (s == b"tt" and size <= 19):
return None
# May propagate ValueError
col_s, row_s = s
col = _bytestring_ord(col_s) - 97 # 97 == ord("a")
row = size - _bytestring_ord(row_s) + 96
if not ((0 <= col < size) and (0 <= row < size)):
raise ValueError
return row, col | 6b15b141e9fe5fc4195133f24925672522cdcb35 | 3,638,218 |
def get_domain_name_for(host_string):
"""
Replaces namespace:serviceName syntax with serviceName.namespace one,
appending default as namespace if None exists
"""
return ".".join(
reversed(
("%s%s" % (("" if ":" in host_string else "default:"), host_string)).split(
":"
)
)
) | 6084e299f31d9c2eb922783d0488e9672051443f | 3,638,219 |
def bbox_classify(bboxes, possible_k):
"""bbox: x, y, w, h
return: best kmeans score anchor classes [(w1, h1), (w2, h2), ...]
"""
anchors = [bbox[2:4] for bbox in bboxes]
return anchors_classify(anchors, possible_k) | 5387c1441c94f4af0633b9cf73b0e5e53ce1bc9b | 3,638,220 |
def cleanFAAText(origText):
"""Take FAA text message and trim whitespace from end.
FAA text messages have all sorts of trailing whitespace
issues. We split the message into lines and remove all
right trailing whitespace. We then recombine them into
a uniform version with no trailing whitespace.
The final line will not have a newline character at the
end.
Args:
origText (str): Message text as it comes from the FAA.
Returns:
str: Cleaned up text as described above.
"""
lines = origText.split('\n')
numLines = len(lines)
# Remove empty line at end if present
if lines[-1] == '':
numLines -= 1
for i in range(0, numLines):
lines[i] = lines[i].rstrip()
newText = '\n'.join(lines).rstrip()
return newText | ea9882e24c60acaa35cae97f8e95acb48f5fd2a6 | 3,638,221 |
def LoadModel(gd_file, ckpt_file):
"""Load the model from GraphDef and Checkpoint.
Args: gd_file: GraphDef proto text file. ckpt_file: TensorFlow Checkpoint file.
Returns: TensorFlow session and tensors dict."""
with tf.Graph().as_default():
#class FastGFile: File I/O wrappers without thread locking.
with tf.gfile.FastGFile(gd_file, 'r') as f:
# Py 2: s = f.read().decode()
s = f.read()
# Serialized version of Graph
gd = tf.GraphDef()
# Merges an ASCII representation of a protocol message into a message.
text_format.Merge(s, gd)
tf.logging.info('Recovering Graph %s', gd_file)
t = {}
[t['states_init'], t['lstm/lstm_0/control_dependency'],
t['lstm/lstm_1/control_dependency'], t['softmax_out'], t['class_ids_out'],
t['class_weights_out'], t['log_perplexity_out'], t['inputs_in'],
t['targets_in'], t['target_weights_in'], t['char_inputs_in'],
t['all_embs'], t['softmax_weights'], t['global_step']
] = tf.import_graph_def(gd, {}, ['states_init',
'lstm/lstm_0/control_dependency:0',
'lstm/lstm_1/control_dependency:0',
'softmax_out:0',
'class_ids_out:0',
'class_weights_out:0',
'log_perplexity_out:0',
'inputs_in:0',
'targets_in:0',
'target_weights_in:0',
'char_inputs_in:0',
'all_embs_out:0',
'Reshape_3:0',
'global_step:0'], name='')
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run('save/restore_all', {'save/Const:0': ckpt_file})
sess.run(t['states_init'])
return sess, t | 08089910da145141df8446c1aab9d697b15a3aa6 | 3,638,222 |
from bs4 import BeautifulSoup
import re
def get_additional_rent(offer_markup):
""" Searches for additional rental costs
:param offer_markup:
:type offer_markup: str
:return: Additional rent
:rtype: int
"""
html_parser = BeautifulSoup(offer_markup, "html.parser")
table = html_parser.find_all(class_="item")
for element in table:
if "Czynsz" in element.text:
return int(("".join(re.findall(r'\d+', element.text))))
return | 8836beda16e21fe214344d647de9260195afa6a7 | 3,638,223 |
def make_known_disease_variants_filter(sample_ids_list=None):
""" Function for retrieving known disease variants by presence in Clinvar and Cosmic."""
result = {
"$or":
[
{
"$and":
[
{"clinvar.rcv.accession": {"$exists": True}},
{"clinvar.rcv.clinical_significance": {"$nin": ["Benign", "Likely benign"]}}
]
},
{"cosmic.cosmic_id": {"$exists": True}}
]
}
if sample_ids_list is not None:
result = _append_sample_id_constraint_if_needed([result], sample_ids_list)
return result | 288e5a0daa254016f9c1e1ee8e3106ea532008ec | 3,638,224 |
import multiprocessing
def sharedArray(dtype, dims):
"""Create a shared numpy array."""
mpArray = multiprocessing.Array(dtype, int(np.prod(dims)), lock=False)
return np.frombuffer(mpArray, dtype=dtype).reshape(dims) | e01b20f0f21386dd2ec8e1952547fbc9fc15cb65 | 3,638,225 |
def _read_hyperparameters(idx, hist):
"""Read hyperparameters as a dictionary from the specified history dataset."""
return hist.iloc[idx, 2:].to_dict() | b2a036a739ec3e45c61289655714d9b59b2f5490 | 3,638,226 |
def row_annotation(name=None, fn_require=None):
"""
Function decorator for methods in a subclass of BaseMTSchema.
Allows the function to be treated like an row_annotation with annotation name and value.
@row_annotation()
def a(self):
return 'a_val'
@row_annotation(name='b', fn_require=a)
def b_1(self):
return 'b_val'
Will generate a mt with rows of {a: 'a_val', 'b': 'b_val'} if the function is called.
TODO: Consider changing fn_require to be a list of requirements.
When calling the function with annotation already set in the MT, the default behavior is to
skip unless an overwrite=True is passed into the call.
:param name: name in the final MT. If not provided, uses the function name.
:param fn_require: method name strings in class that are dependencies.
:return:
"""
def mt_prop_wrapper(func):
annotation_name = name or func.__name__
# fn_require checking, done when declared, not called.
if fn_require:
if not callable(fn_require):
raise ValueError('Schema: dependency %s is not of type function.' % fn_require)
if not hasattr(fn_require, 'mt_cls_meta'):
raise ValueError('Schema: dependency %s is not a row annotation method.' % fn_require.__name__)
@wraps(func)
def wrapper(self, *args, overwrite=False, **kwargs):
# Called already.
instance_metadata = self.mt_instance_meta['row_annotations'][wrapper.__name__]
if instance_metadata['annotated'] > 0:
return self
# MT already has annotation, so only continue if overwrite requested.
if annotation_name in self.mt.rows()._fields:
logger.warning('MT using schema class %s already has %s annotation.' % (self.__class__, annotation_name))
if not overwrite:
return self
logger.info('Overwriting matrix table annotation %s' % annotation_name)
if fn_require:
getattr(self, fn_require.__name__)()
try:
func_ret = func(self, *args, **kwargs)
# Do not annotate when RowAnnotationOmit raised.
except RowAnnotationOmit:
return self
annotation = {annotation_name: func_ret}
self.mt = self.mt.annotate_rows(**annotation)
instance_metadata['annotated'] += 1
instance_metadata['result'] = func_ret
return self
wrapper.mt_cls_meta = {
'annotated_name': annotation_name
}
return wrapper
return mt_prop_wrapper | 443ce2c3259613352ccb6f1e9d687e89448d37d7 | 3,638,227 |
import os
def path_exists_case_insensitive(path, root="/"):
"""
Checks if a `path` exists in given `root` directory, similar to
`os.path.exists` but case-insensitive. If there are multiple
case-insensitive matches, the first one is returned. If there is no match,
an empty string is returned.
:param str path: Relative path of item to find in the `root` directory.
:param str root: Directory where we will look for `path`.
:return: Absolute and case-sensitive path to search result on hard drive.
:rtype: str
"""
if not osp.isdir(root):
raise ValueError("'{0}' is not a directory.".format(root))
if path in ["", "/"]:
return root
path_list = path.lstrip(osp.sep).split(osp.sep)
path_list_lower = [x.lower() for x in path_list]
i = 0
local_paths = []
for root, dirs, files in os.walk(root):
for d in list(dirs):
if not d.lower() == path_list_lower[i]:
dirs.remove(d)
for f in list(files):
if not f.lower() == path_list_lower[i]:
files.remove(f)
local_paths = [osp.join(root, name) for name in dirs + files]
i += 1
if i == len(path_list_lower):
break
if len(local_paths) == 0:
return ''
else:
return local_paths[0] | 0bfbc6fb91220b85e11eeed9acb23bf02fd0cc78 | 3,638,228 |
def parse_time(date_time, time_zone):
"""Returns the seconds between now and the scheduled time."""
now = pendulum.now(time_zone)
update = pendulum.parse(date_time, tz=time_zone)
# If a time zone is not specified, it will be set to local.
# When passing only time information the date will default to today.
# The time will be set to 00:00:00 if it's not specified.
# A future date is needed.
secs = update - now
if secs.seconds < 0:
raise ScheduleError(ScheduleError.pastDateError)
return secs.seconds | 5ca2f5dad85e3492bd9909808990aaef0587343a | 3,638,229 |
import os
def ls(request):
"""
List a directory on the server.
"""
dir = request.GET.get("dir", "")
root = os.path.relpath(os.path.join(
settings.MEDIA_ROOT,
settings.USER_FILES_PATH
))
fulldir = os.path.join(root, dir)
response = HttpResponse(mimetype="application/json")
simplejson.dump(entry_info(fulldir), response)
return response | 6323ad4f23addf7e475744b36bb47e279e1eb2a7 | 3,638,230 |
def upper_bounds_max_ppr_target(adj, alpha, fragile, local_budget, target):
"""
Computes the upper bound for x_target for any teleport vector.
Parameters
----------
adj : sp.spmatrix, shape [n, n]
Sparse adjacency matrix.
alpha : float
(1-alpha) teleport[v] is the probability to teleport to node v.
fragile : np.ndarray, shape [?, 2]
Fragile edges that are under our control.
local_budget : np.ndarray, shape [n]
Maximum number of local flips per node.
target : int
Target node.
Returns
-------
upper_bounds: np.ndarray, shape [n]
Computed upper bounds.
"""
n = adj.shape[0]
z = np.zeros(n)
z[target] = 1
opt_fragile, _ = policy_iteration(adj=adj, alpha=alpha, fragile=fragile, local_budget=local_budget,
reward=z, teleport=z)
adj_flipped = flip_edges(adj, opt_fragile)
# gets one column from the PPR matrix
# corresponds to the PageRank score value of target for any teleport vector (any row)
pre_inv = sp.eye(n) - alpha * sp.diags(1 / adj_flipped.sum(1).A1) @ adj_flipped
ppr = (1 - alpha) * gmres(pre_inv, z)[0]
correction = correction_term(adj, opt_fragile, fragile)
upper_bounds = ppr / correction
return upper_bounds | 5bab951605ad5181e2fb696836219167dd78a30e | 3,638,231 |
import os
def import_layer_data(node, path):
"""Import ngLayerData from JSON file.
Args:
node (str): Name of the mesh. Used to find the JSON file.
path (str): The parent folder where the file is saved.
Returns:
str: The raw ngLayer data (somehow, this is a string!)
"""
nice_name = node.rsplit("|")[-1].rsplit(":")[-1]
weight_file = os.path.join(path, "{0}.json".format(nice_name))
if not os.path.exists(weight_file):
LOG.info("%r not found.", weight_file)
return None
with open(weight_file, "r") as stream:
ng_data = stream.read()
return ng_data | e0421387245fe938ee644fab277d8943aa64129d | 3,638,232 |
def cramers_corrected_stat(contingency_table):
"""
Computes corrected Cramer's V statistic for categorial-categorial association
"""
try:
chi2 = chi2_contingency(contingency_table)[0]
except ValueError:
return np.NaN
n = contingency_table.sum().sum()
phi2 = chi2/n
r, k = contingency_table.shape
r_corrected = r - (((r-1)**2)/(n-1))
k_corrected = k - (((k-1)**2)/(n-1))
phi2_corrected = max(0, phi2 - ((k-1)*(r-1))/(n-1))
return (phi2_corrected / min( (k_corrected-1), (r_corrected-1)))**0.5 | 89581fbcc306afdf34dac8cb30d3e7b316a47f48 | 3,638,233 |
def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result | 525ea19b78cb2a360165085720d42df58aa72500 | 3,638,234 |
def generate_keyframe_chunks(animated_rotations, animated_locations, animated_scales, num_frames, chunksize):
"""
This function has a very high bug potential...
"""
# These lines create lists of length num_frames with None for frames with no data
rotations = populate_frames(num_frames, animated_rotations)
locations = populate_frames(num_frames, animated_locations)
scales = populate_frames(num_frames, animated_scales)
# The above is done so that the frames can be easily chunked by the following three lines:
rotations = chunk_frames(rotations, chunksize)
locations = chunk_frames(locations, chunksize)
scales = chunk_frames(scales, chunksize)
# And now we can iterate through the chunks and strip out the None values, and save the results
# We also might need to perform some interpolation inside these functions in order to satisfy the requirements of
# the DSCS animation format
# Also need to isolate the final frame in here for the same reasons
rotation_keyframe_chunks_data, rotation_bitvector_data = strip_and_validate_all_bones(rotations, chunksize, slerp)
location_keyframe_chunks_data, location_bitvector_data = strip_and_validate_all_bones(locations, chunksize, lerp)
scale_keyframe_chunks_data, scale_bitvector_data = strip_and_validate_all_bones(scales, chunksize, lerp)
# Now we can bundle all the chunks into a sequential list, ready for turning into KeyframeChunks instances
chunk_data = [[{}, {}, {}] for _ in range((num_frames // chunksize) + 1)]
for bone_idx, rotation_chunks in rotation_keyframe_chunks_data.items():
for i, rotation_data in enumerate(rotation_chunks):
chunk_data[i][0][bone_idx] = rotation_data
for bone_idx, location_chunks in location_keyframe_chunks_data.items():
for i, location_data in enumerate(location_chunks):
chunk_data[i][1][bone_idx] = location_data
for bone_idx, scale_chunks in scale_keyframe_chunks_data.items():
for i, scale_data in enumerate(scale_chunks):
chunk_data[i][2][bone_idx] = scale_data
# We also need the final elements of each animation
final_rotations = {bone_id: [list(data.values())[-1]] for bone_id, data in animated_rotations.items()}
final_locations = {bone_id: [list(data.values())[-1]] for bone_id, data in animated_locations.items()}
final_scales = {bone_id: [list(data.values())[-1]] for bone_id, data in animated_scales.items()}
chunks = []
for chunk_idx, chunk_datum in enumerate(chunk_data[:-1]):
r_bitvecs = [rotation_bitvector_data[bone_id][chunk_idx] for bone_id in rotation_bitvector_data]
l_bitvecs = [location_bitvector_data[bone_id][chunk_idx] for bone_id in location_bitvector_data]
s_bitvecs = [scale_bitvector_data[bone_id][chunk_idx] for bone_id in scale_bitvector_data]
chunks.append(ChunkHolder(*chunk_datum, r_bitvecs, l_bitvecs, s_bitvecs, chunksize))
pen_r_bitvecs = [rotation_bitvector_data[bone_id][-1] for bone_id in rotation_bitvector_data]
pen_l_bitvecs = [location_bitvector_data[bone_id][-1] for bone_id in location_bitvector_data]
pen_s_bitvecs = [scale_bitvector_data[bone_id][-1] for bone_id in scale_bitvector_data]
chunks.append(ChunkHolder.init_penultimate_chunk(*chunk_data[-1],
pen_r_bitvecs, pen_l_bitvecs, pen_s_bitvecs,
len(pen_r_bitvecs[0])))
chunks.append(ChunkHolder(final_rotations, final_locations, final_scales,
['1' for _ in final_rotations], ['1' for _ in final_locations], ['1' for _ in final_scales],
1))
return chunks | e45fc9b440b5ae278067c9c9f7de41e6c13f14ff | 3,638,235 |
def collection_tail(path_string):
"""Walk the path, return the tail collection"""
# pylint: disable=consider-using-enumerate
coll = None
parts = extract_path(path_string)
if parts:
try:
last_i = len(parts) - 1
coll = bpy.data.collections[parts[0]]
for i in range(1, len(parts)):
if i != last_i or \
is_path_terminated(path_string) or \
coll.children.get(parts[i]):
coll = coll.children[parts[i]] # Collection
else:
break # Blender Object
except KeyError:
return None
return coll | 9a9d4e594c654b35f15870d33bc24314f1c48e5c | 3,638,236 |
from pyadlml.dataset.devices import most_prominent_categorical_values
def create_raw(df_dev, most_likely_values=None):
"""
return df:
| time | dev_1 | .... | dev_n |
--------------------------------
| ts1 | 1 | .... | 0 |
"""
df_dev = df_dev.copy()
df = df_dev.pivot(index=TIME, columns=DEVICE, values=VAL)
df = df.reset_index()
dev_dtypes = _infer_types(df)
dev_cat = dev_dtypes['categorical']
dev_bool = dev_dtypes['boolean']
dev_num = dev_dtypes['numerical']
# set the first element for each boolean device to the opposite value of the
# first occurrence
for dev in dev_bool:
fvi = df[dev].first_valid_index()
if fvi != 0:
value = df[dev].iloc[fvi]
df.loc[0, dev] = not value
# set the first element of each categorical device to the most likely value
if len(dev_cat) != 0:
if most_likely_values is None:
tmp = df_dev[df_dev[DEVICE].isin(dev_cat)]
most_likely_values = most_prominent_categorical_values(tmp)
mlv = most_likely_values.set_index(DEVICE)
for dev in dev_cat:
new_val = mlv.loc[dev]['ml_state']
df.loc[0,dev] = new_val
df_num = df[dev_num]
df_cat_bool = df[dev_bool + dev_cat]
# fill from start to end NaNs with the preceeding correct value
df_cat_bool = df_cat_bool.ffill()
df = pd.concat([df[TIME], df_num, df_cat_bool], axis=1)
return df | e6659e70bf91876a3cbfcc98aaa71e4e97837a7f | 3,638,237 |
def p1_marker_loc(p1_input, board_list, player1):
"""Take the location of the marker for Player 1."""
# verify if the input is not in range or in range but in a already taken spot
while p1_input not in range(1, 10) or (
p1_input in range(1, 10) and board_list[p1_input] != " "
):
try:
p1_input = int(
input("Player 1: Where would you like to place the marker (1 - 9)? ")
)
# if a marker is already placed on that board location, display a message
# warning player 1 and ask for their input again
if board_list[p1_input] != " ":
print(
"There is already a marker there, please choose another location."
)
input("Press Enter to continue. ")
print()
# input the player for another location for the marker
continue
except ValueError:
print("This is not a number, please try again!")
print()
print(f"Player 1 is placing {player1} in position {p1_input}.")
# return the variable to reassign it locally on the game_logic() function
return p1_input | ea8cfd35e56d7e34efa7319667f1a655b597cf39 | 3,638,238 |
def chord(tones, dur, phrasing="", articulation="", ornamentation="", dynamics="", markup="", markdown="", prefix="", suffix=""):
""" Returns a list containing a single Point that prints as a chord with the specified tones and duration. """
tones = flatten([tonify(tones)])
return [Point(tones, dur, phrasing, articulation, ornamentation, dynamics, markup, markdown, prefix, suffix)] | b6fc7ba5c7e8541eeea540a869b1697c15c5ea47 | 3,638,239 |
def build_gem_graph():
"""Builds a gem graph, F4,1.
Ref: http://mathworld.wolfram.com/GemGraph.html"""
graph = build_5_cycle_graph()
graph.new_edge(1, 3)
graph.new_edge(1, 4)
return graph | 4979ae5643ca44d6fb5eadd4fff18489fd3b5629 | 3,638,240 |
import select
async def get_forecasts_by_user_year_epic(
user_id, epic_id, year, month, session: Session = Depends(get_session)
):
"""Get forecast by user, epic, year, month"""
statement = (
select(Forecast.id, Forecast.month, Forecast.year, Forecast.days)
.where(Forecast.user_id == user_id)
.where(Forecast.epic_id == epic_id)
.where(Forecast.year == year)
.where(Forecast.month == month)
)
results = session.exec(statement).all()
return results | 655863588ece0800d220386282d620d7296fc8a2 | 3,638,241 |
def face_xyz_to_uv(face, p):
"""(face, XYZ) to UV
see :cpp:func:`S2::FaceXYZtoUV`
"""
if face < 3:
if p[face] <= 0:
return False, 0, 0
else:
if p[face - 3] >= 0:
return False, 0, 0
u, v = valid_face_xyz_to_uv(face, p)
return True, u, v | 3483f918ed511c8fdf3c43e147c6cc605633754b | 3,638,242 |
import re
def cleanupString(string, replacewith="_", regex="([^A-Za-z0-9])"):
"""Remove all non-numeric or alphanumeric characters"""
# Please don't use the logging system here. The logging system
# needs this method, using the logging system here would
# introduce a circular dependency. Be careful not to call other
# functions that use the logging system.
return re.sub(regex, replacewith, string) | b327879a345a4236b871f824937997f6bd43d55b | 3,638,243 |
import socket
import json
def send_message(data, header_size=8):
"""Send data over socket."""
@_retry()
def _connect(socket_path):
"""Connect socket."""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(socket_path)
sock.settimeout(SOCKET_TIMEOUT)
return sock
def _check_response(response):
if response is None:
raise RuntimeError(
"No response received when sending message: {}.".format(data)
)
if "type_data" not in response:
raise ValueError(
"Response {} does not contain key 'type_data'.".format(response)
)
return response["type_data"] == "OK"
try:
sock = _connect(str(COMMUNICATOR_SOCKET))
message = json.dumps(data).encode()
message_length = len(message).to_bytes(header_size, byteorder="big")
sock.sendall(message_length)
sock.sendall(message)
response = _receive_data(sock)
if not _check_response(response):
logger.error("Error in respone to %s: %s.", data, response)
raise RuntimeError("Wrong response received, terminating processing.")
finally:
sock.close() | bbe9c11e5b29ac2b0f0d2d9dd357f806a682156b | 3,638,244 |
from typing import Optional
from typing import Set
from typing import Literal
from typing import Any
def _get_mapping_keys_in_condition(
condition: Expression, column_name: str
) -> Optional[Set[str]]:
"""
Finds the top level conditions that include filter based on the arrayJoin.
This is meant to be used to find the keys the query is filtering the arrayJoin
on.
We can only apply the arrayFilter optimization to arrayJoin conditions
that are not in OR with other columns. To simplify the problem, we only
consider those conditions that are included in the first level of the query:
[['tagskey' '=' 'a'],['col' '=' 'b'],['col2' '=' 'c']] works
[[['tagskey' '=' 'a'], ['col2' '=' 'b']], ['tagskey' '=' 'c']] does not
If we encounter an OR condition we return None, which means we cannot
safely apply the optimization. Empty set means we did not find any
suitable arrayJoin for optimization in this condition but that does
not disqualify the whole query in the way the OR condition does.
"""
keys_found = set()
conditions = get_first_level_and_conditions(condition)
for c in conditions:
if is_binary_condition(c, BooleanFunctions.OR):
return None
match = FunctionCall(
String(ConditionFunctions.EQ),
(array_join_pattern(column_name), Literal(Param("key", Any(str)))),
).match(c)
if match is not None:
keys_found.add(match.string("key"))
match = is_in_condition_pattern(array_join_pattern(column_name)).match(c)
if match is not None:
function = match.expression("tuple")
assert isinstance(function, FunctionCallExpr)
keys_found |= {
lit.value
for lit in function.parameters
if isinstance(lit, LiteralExpr) and isinstance(lit.value, str)
}
return keys_found | 7d890e4b68aeba9caca30e5a140214072c781a66 | 3,638,245 |
import requests
from bs4 import BeautifulSoup
def make_request(method, url, **kwargs):
"""Make HTTP request, raising an exception if it fails.
"""
request_func = getattr(requests, method)
response = request_func(url, **kwargs)
# raise an exception if request is not successful
if not response.status_code == requests.codes.ok:
response.raise_for_status()
return BeautifulSoup(response.text) | 1f47b178b66efe31fd78a4affc76a87d5be428bc | 3,638,246 |
def hold(source):
"""Place the active call on the source phone on hold"""
print("Holding call on {0}".format(source.Name))
return operation(source,'Hold') | 297cef77a3630bf3b9cab6256547ec43a5ba797c | 3,638,247 |
import math
def make_grid(batch, grid_height=None, zoom=1, old_buffer=None, border_size=1):
"""Creates a grid out an image batch.
Args:
batch: numpy array of shape [batch_size, height, width, n_channels]. The
data can either be float in [0, 1] or int in [0, 255]. If the data has
only 1 channel it will be converted to a grey 3 channel image.
grid_height: optional int, number of rows to have. If not given, it is
set so that the output is a square. If -1, then tiling will only be
vertical.
zoom: optional int, how much to zoom the input. Default is no zoom.
old_buffer: Buffer to write grid into if possible. If not set, or if shape
doesn't match, we create a new buffer.
border_size: int specifying the white spacing between the images.
Returns:
A numpy array corresponding to the full grid, with 3 channels and values
in the [0, 255] range.
Raises:
ValueError: if the n_channels is not one of [1, 3].
"""
batch_size, height, width, n_channels = batch.shape
if grid_height is None:
n = int(math.ceil(math.sqrt(batch_size)))
grid_height = n
grid_width = n
elif grid_height == -1:
grid_height = batch_size
grid_width = 1
else:
grid_width = int(math.ceil(batch_size/grid_height))
if n_channels == 1:
batch = np.tile(batch, (1, 1, 1, 3))
n_channels = 3
if n_channels != 3:
raise ValueError('Image batch must have either 1 or 3 channels, but '
'was {}'.format(n_channels))
# We create the numpy buffer if we don't have an old buffer or if the size has
# changed.
shape = (height * grid_height + border_size * (grid_height - 1),
width * grid_width + border_size * (grid_width - 1),
n_channels)
if old_buffer is not None and old_buffer.shape == shape:
buf = old_buffer
else:
buf = np.full(shape, 255, dtype=np.uint8)
multiplier = 1 if np.issubdtype(batch.dtype, np.integer) else 255
for k in range(batch_size):
i = k // grid_width
j = k % grid_width
arr = batch[k]
x, y = i * (height + border_size), j * (width + border_size)
buf[x:x + height, y:y + width, :] = np.clip(multiplier * arr,
0, 255).astype(np.uint8)
if zoom > 1:
buf = buf.repeat(zoom, axis=0).repeat(zoom, axis=1)
return buf | 72bbcebd121b13bce31d760b9d8890966155b603 | 3,638,248 |
def _get_patterns_map(resolver, default_args=None):
"""
Cribbed from http://www.djangosnippets.org/snippets/1153/
Recursively generates a map of
(pattern name or path to view function) -> (view function, default args)
"""
patterns_map = {}
if default_args is None:
default_args = {}
for pattern in resolver.url_patterns:
pattern_args = default_args.copy()
if isinstance(pattern, RegexURLResolver):
pattern_args.update(pattern.default_kwargs)
patterns_map.update(_get_patterns_map(pattern, pattern_args))
else:
pattern_args.update(pattern.default_args)
if pattern.name is not None:
patterns_map[pattern.name] = (pattern.callback, pattern_args)
# HACK: Accessing private attribute of RegexURLPattern
callback_str = getattr(pattern, '_callback_str', None)
if callback_str is not None:
patterns_map[pattern._callback_str] = (pattern.callback, pattern_args)
return patterns_map | 21f149773457b075ba984b028d2c44ac41f09a6a | 3,638,249 |
def encoder_package_to_options(encoder_package, post_url=None,
extra_numerics=None,
extra_categoricals=None,
omitted_fields=None):
"""
:param encoder_package: one hot encoder package
:param post_url: url to send form data to on submission
default is ''
for testing purposes, you may use PUBLIC and it will use
"http://httpbin.org/post" which prints the result
this is not secure so don't do that with sensitive data
:return:
"""
extra_numerics, extra_categoricals, omitted_fields = process_extras(extra_numerics,
extra_categoricals,
omitted_fields)
if post_url is None:
post_url = ''
if post_url == 'PUBLIC':
post_url = "http://httpbin.org/post"
fields = {}
numeric_cols = encoder_package['numeric_cols'] + list(extra_numerics.keys())
for field in numeric_cols:
if field in omitted_fields:
continue
fields[field] = {
"size": 20
}
encoder_dicts = encoder_package['one_hot_encoder_dicts']
for field, value_dicts in encoder_dicts.items():
if field in omitted_fields:
continue
values = sorted(value_dicts.items(), key=lambda x: x[1])
levels = [v[0] for v in values]
n_levels = len(levels)
levels = levels + [unknown_level_value]
if n_levels < LEVELS_MAX_FOR_DROP_DOWN:
fields[field] = {
"type": "select",
"optionLabels": levels,
"sort": False}
else:
fields[field] = {"size": 20}
for field, levels in extra_categoricals.items():
fields[field] = {
"type": "select",
"optionLabels": levels,
"sort": False
}
options = {
"form": {
"attributes": {
"action": post_url,
"method": "post"
},
"buttons": {
"submit": {}
}
},
"helper": "Hit submit to update the prediction",
"fields": fields}
return options | 1286aefef87b547d7a09db8fec3b50f7082e64f8 | 3,638,250 |
def get_subset_values(request, pk):
"""Return the numerical values of a subset as a formatted list."""
values = models.NumericalValue.objects.filter(
datapoint__subset__pk=pk).select_related(
'error').select_related('upperbound').order_by(
'qualifier', 'datapoint__pk')
total_len = len(values)
y_len = total_len
# With both x- and y-values, the y-values make up half the list.
if values.last().qualifier == models.NumericalValue.SECONDARY:
y_len = int(y_len/2)
response = []
for i in range(y_len):
response.append({'y': values[i].formatted()})
for i in range(y_len, total_len):
response[i-y_len]['x'] = values[i].formatted()
return JsonResponse(response, safe=False) | 1bc34a534a56a7f75742f455aad5575224ce976f | 3,638,251 |
import time
def timestamp(format_key: str) -> str:
"""
格式化时间
:Args:
- format_key: 转化格式方式, STR TYPE.
:Usage:
timestamp('format_day')
"""
format_time = {
'default':
{
'format_day': '%Y-%m-%d',
'format_now': '%Y-%m-%d-%H_%M_%S',
'unix_now': '%Y-%m-%d %H:%M:%S',
}
}
return time.strftime(format_time['default'][format_key], time.localtime(time.time())) | dab77afb630193d45fbc5b07c08fd82c3dfa3050 | 3,638,252 |
def _save_conn_form(
request: HttpRequest,
form: SQLConnectionForm,
template_name: str,
) -> JsonResponse:
"""Save the connection provided in the form.
:param request: HTTP request
:param form: form object with the collected information
:param template_name: To render the response
:return: AJAX response
"""
# Type of event to record
if form.instance.id:
event_type = Log.SQL_CONNECTION_EDIT
is_add = False
else:
event_type = Log.SQL_CONNECTION_CREATE
is_add = True
# If it is a POST and it is correct
if request.method == 'POST' and form.is_valid():
if not form.has_changed():
return JsonResponse({'html_redirect': None})
conn = form.save()
# Log the event
Log.objects.register(
request.user,
event_type,
None,
{
'name': conn.name,
'description': conn.description_text,
'conn_type': conn.conn_type,
'conn_driver': conn.conn_driver,
'db_user': conn.db_user,
'db_passwd': _('<PROTECTED>') if conn.db_password else '',
'db_host': conn.db_host,
'db_port': conn.db_port,
'db_name': conn.db_name,
'db_table': conn.db_table,
},
)
return JsonResponse({'html_redirect': ''})
# Request is a GET
return JsonResponse({
'html_form': render_to_string(
template_name,
{
'form': form,
'id': form.instance.id,
'add': is_add},
request=request,
),
}) | ee2639e1ab354b6ca722e35167bf6ab7cc57b351 | 3,638,253 |
def client() -> GivEnergyClient:
"""Supply a client with a mocked modbus client."""
# side_effects = [{1: 2, 3: 4}, {5: 6, 7: 8}, {9: 10, 11: 12}, {13: 14, 15: 16}, {17: 18, 19: 20}]
return GivEnergyClient(host='foo') | 9d419927ebcb5a39df27e92e3a378cd5448acf1e | 3,638,254 |
def test_bus(test_system):
"""Create the test system."""
test_system.run_load_flow()
return test_system.buses["bus3"] | fea4880446059171dae5d6fffc24bdc98eede5cd | 3,638,255 |
def mask_target(y_true, bbox_true, mask_true, mask_regress, proposal, assign = cls_assign, sampling_count = 256, positive_ratio = 0.25, mean = [0., 0., 0., 0.], std = [0.1, 0.1, 0.2, 0.2], method = "bilinear"):
"""
y_true = label #(padded_num_true, 1 or num_class)
bbox_true = [[x1, y1, x2, y2], ...] #(padded_num_true, bbox)
mask_true = mask #(padded_num_true, h, w)
mask_regress = mask regress #(num_proposals, h, w, num_class)
proposal = [[x1, y1, x2, y2], ...] #(num_proposals, bbox)
mask_true = targeted mask true #(sampling_count, h, w)
mask_pred = targeted mask regress #(sampling_count, h, w)
"""
if tf.keras.backend.ndim(mask_true) == 3:
mask_true = tf.expand_dims(mask_true, axis = -1)
pred_count = tf.shape(proposal)[0]
valid_true_indices = tf.where(tf.reduce_max(tf.cast(0 < bbox_true, tf.int32), axis = -1))
y_true = tf.gather_nd(y_true, valid_true_indices)
bbox_true = tf.gather_nd(bbox_true, valid_true_indices)
valid_pred_indices = tf.where(tf.reduce_max(tf.cast(0 < proposal, tf.int32), axis = -1))
proposal = tf.gather_nd(proposal, valid_pred_indices)
mask_true = tf.gather_nd(mask_true, valid_true_indices)
mask_regress = tf.gather_nd(mask_regress, valid_pred_indices)
true_indices, positive_indices, negative_indices = assign(bbox_true, proposal)
if isinstance(sampling_count, int) and 0 < sampling_count:
positive_count = tf.cast(sampling_count * positive_ratio, tf.int32)
indices = tf.range(tf.shape(positive_indices)[0])
indices = tf.random.shuffle(indices)[:positive_count]
positive_indices = tf.gather(positive_indices, indices)
true_indices = tf.gather(true_indices, indices)
positive_count = tf.cast(tf.shape(positive_indices)[0], tf.float32)
negative_count = tf.cast(1 / positive_ratio * positive_count - positive_count, tf.int32)
negative_indices = tf.random.shuffle(negative_indices)[:negative_count]
else:
sampling_count = pred_count
pred_indices = tf.concat([positive_indices, negative_indices], axis = 0)
y_true = tf.gather(y_true, true_indices)
proposal = tf.gather(proposal, positive_indices)
mask_true = tf.gather(mask_true, true_indices)
mask_pred = tf.gather(mask_regress, positive_indices)
n_class = tf.shape(y_true)[-1]
if tf.keras.backend.int_shape(true_indices)[0] != 0:
label = tf.cond(tf.equal(n_class, 1), true_fn = lambda: y_true, false_fn = lambda: tf.expand_dims(tf.cast(tf.argmax(y_true, axis = -1), y_true.dtype), axis = -1))
indices = tf.stack([tf.range(tf.shape(label)[0]), tf.cast(label[:, 0], tf.int32)], axis = -1)
if mask_true is not None and mask_regress is not None:
x1, y1, x2, y2 = tf.split(proposal, 4, axis = -1)
mask_bbox = tf.concat([y1, x1, y2, x2], axis = -1)
mask_shape = tf.shape(mask_pred)
mask_true = tf.image.crop_and_resize(image = tf.cast(mask_true, mask_pred.dtype), boxes = mask_bbox, box_indices = tf.range(0, tf.cast(positive_count, tf.int32)), crop_size = mask_shape[1:3], method = method)
mask_true = mask_true[..., 0]
mask_true = tf.clip_by_value(tf.round(mask_true), 0., 1.)
mask_pred = tf.transpose(mask_pred, [0, 3, 1, 2])
mask_pred = tf.gather_nd(mask_pred, indices)
else:
mask_pred = mask_pred[..., 0]
mask_true = tf.zeros_like(mask_pred, dtype = mask_pred.dtype)
negative_count = tf.shape(negative_indices)[0]
pad_count = tf.maximum(sampling_count - tf.shape(pred_indices)[0], 0)
mask_true = tf.pad(mask_true, [[0, negative_count + pad_count], [0, 0], [0, 0]])
mask_pred = tf.pad(mask_pred, [[0, negative_count + pad_count], [0, 0], [0, 0]])
return mask_true, mask_pred | b161178716d890721a7f3cd0bfd61fdcc3efffb4 | 3,638,256 |
from rowgenerators.exceptions import DownloadError
def display_context(doc):
"""Create a Jinja context for display"""
# Make a naive dictionary conversion
context = {s.name.lower(): s.as_dict() for s in doc if s.name.lower() != 'schema'}
mandatory_sections = ['documentation', 'contacts']
# Remove section names
deletes = []
for k, v in context.items():
try:
del v['@value']
except KeyError:
pass # Doesn't have the value
except TypeError:
# Is actually completely empty, and has a scalar value. Delete and re-create
deletes.append(k)
if isinstance(v, str): # Shouldn't ever happen, but who knows ?
deletes.append(k)
for d in deletes:
try:
del context[d]
except KeyError:
# Fails in TravisCI, no idea why.
pass
for ms in mandatory_sections:
if ms not in context:
context[ms] = {}
# Load inline documentation
inline = ''
for d in context.get('documentation', {}).get('documentation', []):
try:
u = parse_app_url(d['url'])
except TypeError:
continue
if u.target_format == 'md': # The README.md file
inline = ''
if u.proto == 'file':
# File really ought to be relative
t = doc.package_url.join_target(u).get_resource().get_target()
else:
try:
t = u.get_resource().get_target()
except DownloadError as e:
raise e
try:
with open(t.fspath) as f:
inline += f.read()
except FileNotFoundError:
pass
del d['title'] # Will cause it to be ignored in next section
# Strip off the leading title, if it exists, because it will be re-applied
# by the templates
lines = inline.strip().splitlines()
if lines and lines[0].startswith('# '):
lines = lines[1:]
context['inline_doc'] = '\n'.join(lines)
# Convert doc section
doc_links = {}
images = {}
for term_name, terms in context['documentation'].items():
if term_name == 'note':
context['notes'] = terms
elif terms:
for i, term in enumerate(terms):
try:
if term_name == 'image':
images[term['title']] = term
else:
doc_links[term['title']] = term
except AttributeError: # A scalar
pass # There should not be any scalars in the documentation section
except KeyError:
pass # ignore entries without titles
except TypeError:
pass # Also probably a ascalar
context['doc_links'] = doc_links
context['images'] = images
del context['documentation']
#
# Update contacts
origin = None
for term_name, terms in context['contacts'].items():
if isinstance(terms, dict):
origin = terms # Origin is a scalar in roort, must be converted to sequence here
else:
for t in terms:
try:
t.update(process_contacts_html(t))
except AttributeError:
pass # Probably got a scalar
if origin:
origin.update(process_contacts_html(origin))
context['contacts']['origin'] = [origin]
# For resources and references, convert scalars into lists of dicts, which are the
# default for Datafiles and References.
for section in ('references', 'resources'):
if section not in context:
context[section] = {}
for term_key, term_vals in context[section].items():
if isinstance(term_vals, dict):
if '@value' in term_vals:
term_vals['url'] = term_vals['@value']
del term_vals['@value']
new_term_vals = [term_vals]
elif isinstance(term_vals, list):
new_term_vals = None
else:
new_term_vals = [{'url': term_vals, 'name': term_vals}]
if new_term_vals:
context[section][term_key] = new_term_vals
# Add in other properties to the resources
for term in context.get('resources', {}).get('datafile', []):
r = doc.resource(term['name'])
if r is not None:
term['isgeo'] = r.isgeo
context['distributions'] = {}
for dist in doc.find('Root.Distribution'):
context['distributions'][dist.type] = dist.value
if doc.find('Root.Giturl'):
context['distributions']['source'] = doc.get_value('Root.Giturl')
context['schema'] = {}
if 'Schema' in doc:
for t in doc['Schema'].find('Root.Table'):
context['schema'][t.name] = []
for c in t.find('Table.Column'):
context['schema'][t.name].append(c.as_dict())
return context | 53d455448b37a1236e640a66436525fa9369e575 | 3,638,257 |
import torch
def _get_triplet_mask(labels: torch.Tensor) -> torch.BoolTensor:
"""Return a 3D mask where mask[a, p, n] is True if the triplet (a, p, n) is valid.
A triplet (i, j, k) is valid if:
- i, j, k are distinct
- labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels (torch.Tensor): `Tensor` with shape [batch_size]
Returns:
torch.BoolTensor: `Tensor` with shape [batch_size]
"""
# Check that i, j and k are distinct
indices = torch.logical_not(torch.eye(labels.size(0)).bool()).to(labels.device)
i_not_equal_j = indices.unsqueeze(2)
i_not_equal_k = indices.unsqueeze(1)
j_not_equal_k = indices.unsqueeze(0)
distinct_indices = (i_not_equal_j & i_not_equal_k) & j_not_equal_k
label_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
i_equal_j = label_equal.unsqueeze(2)
i_equal_k = label_equal.unsqueeze(1)
valid_labels = ~i_equal_k & i_equal_j
return valid_labels & distinct_indices | 91e4e88507979bacde12c4c2dd9725b4d52e0e90 | 3,638,258 |
import re
def analyse_registration_output(output_string):
"""Parse the registration command output and return appropriate error"""
parse_error="ERROR:Unable to parse error message:" + output_string
success=0
fail=1
status_regex = re.compile("Status\s*:\s*(?P<status>[A-Z]+).*")
try:
status = status_regex.search(output_string).groupdict()['status']
except:
return fail, parse_error
if status == "FAILED":
return_exit = fail
code_regex = re.compile("Result Code\s*:\s*CLI_(?P<code>[0-9]).*")
try:
code = code_regex.search(output_string).groupdict()['code']
except:
return fail, parse_error
if code == '0':
message = "CLI_0: Authentication error"
elif code == '1':
message = "CLI_1: Error reading file references from the properties file"
elif code == '2':
message = "CLI_2: Invalid user input"
elif code == '3':
message = "CLI_3: No input files to process"
elif code == '4':
message = "CLI_4: Failed to process collection"
elif code == '5':
message = "CLI_5: Failed to process data file"
else:
message = "Unknown error"
elif status == "COMPLETED":
return_exit = success
message = "Successful registration"
else:
return_exit = fail
message = parse_error
return return_exit, message | e6e90b9a55a8631bcb1c0963c943b08df82f03f4 | 3,638,259 |
import random
def randomrandrange(x, y=None):
"""Method randomRandrange.
return a randomly selected element from
range(start, stop). This is equivalent to
choice(range(start, stop)),
but doesnt actually build a range object.
"""
if isinstance(y, NoneType):
return random.randrange(x) # nosec
else:
return random.randrange(x, y) | 5c6304f20e6e1ddcfda931278defdc0c8867553f | 3,638,260 |
import os
def difficulties(prefix="data"):
""" Helper function that returns a list of template files. """
print("Loading difficulties ...")
difficulties = [ ]
os.path.walk(os.path.join(prefix, "templ_difficulties/"), processor, difficulties)
if (len(difficulties) == 0): die("FATAL: No difficulties to use!")
return difficulties | e1c6744f4c101418972fc0fdf9dfc140c71d337e | 3,638,261 |
from typing import Callable
def int_domains(ecoords: np.ndarray, qpos: np.ndarray,
qweight: np.ndarray, dshpfnc: Callable):
"""
Returns the measure (length, area or volume in 1d, 2d and 3d) of
several domains.
"""
nE = ecoords.shape[0]
res = np.zeros(nE, dtype=ecoords.dtype)
nG = len(qweight)
for iG in prange(nG):
dshp = dshpfnc(qpos[iG])
for i in prange(nE):
jac = ecoords[i].T @ dshp
djac = np.linalg.det(jac)
res[i] += qweight[iG] * djac
return res | 64ebe6dea6b86b4d391064b100a159c9641dcdde | 3,638,262 |
def pg_conn(postgresql):
"""Runs the sqitch plan and loads seed data before returning db connection.
"""
with postgresql:
# Loads data from blogdb fixture data
with postgresql.cursor() as cur:
cur.execute(
"""
create table users (
userid serial not null primary key,
username varchar(32) not null,
firstname varchar(255) not null,
lastname varchar(255) not null
);"""
)
cur.execute(
"""
create table blogs (
blogid serial not null primary key,
userid integer not null references users(userid),
title varchar(255) not null,
content text not null,
published date not null default CURRENT_DATE
);"""
)
with postgresql.cursor() as cur:
with USERS_DATA_PATH.open() as fp:
cur.copy_from(fp, "users", sep=",", columns=["username", "firstname", "lastname"])
with BLOGS_DATA_PATH.open() as fp:
cur.copy_from(
fp, "blogs", sep=",", columns=["userid", "title", "content", "published"]
)
return postgresql | df3245eecad1c8f0fd1228ff8f3bf8a57701dfef | 3,638,263 |
def partial_with_hound_context(hound, func, *args, **kwargs):
"""
Retuns a partially bound function
Propagates the currently active hound reason (if any)
Useful for capturing the current contextual hound reason when queueing a background action
"""
if hound is not None:
reason = hound.get_current_reason()
return partial(
call_with_context,
partial(hound.with_reason, reason),
func,
*args,
_context_callable=True,
**kwargs
)
return partial(
func,
*args,
**kwargs
) | e2547f3c59ac4168e0961db7216903c7fdca16af | 3,638,264 |
def rssfeed_edit(request, feed, ret_path):
""" Eigenschaften des RSS-Feeds aendern """
def save_values(feed, old, new):
""" geaenderte Werte des RSS-Feeds speichern """
has_changed = False
key = 'title'
if old[key] != new[key]:
feed.title = encode_html(new[key])
has_changed = True
key = 'text'
if old[key] != new[key]:
feed.description = encode_html(new[key])
has_changed = True
key = 'url_more'
if old[key] != new[key]:
feed.link = new[key]
has_changed = True
key = 'section'
if old[key] != new[key]:
feed.general_mode = new[key]
has_changed = True
if has_changed:
feed.last_modified = get_last_modified()
feed.save()
class DmsItemForm(forms.Form):
""" Elemente des Eingabeformulars """
title = forms.CharField(max_length=240,
widget=forms.TextInput(attrs={'size':60}) )
text = forms.CharField(max_length=180,
widget=forms.TextInput(attrs={'size':60}) )
url_more = forms.CharField(required=False, max_length=200,
widget=forms.TextInput(attrs={'size':60}) )
section = forms.ChoiceField(choices=get_global_choices(),
widget=forms.RadioSelect() )
data_init = {
'title' : decode_html(feed.title),
'text' : remove_link_icons(feed.description),
'url_more' : feed.link,
'section' : feed.general_mode,
}
app_name = 'rssfeed'
if request.method == 'POST' :
data = request.POST.copy()
else :
data = data_init
f = DmsItemForm(data)
my_title = _(u'RSS-Feed ändern')
tabs = [ ('tab_base', [ 'title', 'text', 'url_more', 'section', ]), ]
content = get_tabbed_form(tabs, help_form, app_name, f)
if request.method == 'POST' and not f.errors :
save_values(feed, data_init, f.data)
return HttpResponseRedirect(ret_path)
else:
path = request.path
n_pos = path[:-1].rfind('/')
path = path[:n_pos]
n_pos = path.rfind('/')
path = path[:n_pos+1]
item_container = get_item_container(path, '')
vars = get_item_vars_edit(request, item_container, app_name, my_title, content, f)
return render_to_response ( 'app/base_edit.html', vars ) | 0643c6ca976d448bf3faf5539e90e59ea7d06bd7 | 3,638,265 |
def disassemble_pretty(self, addr=None, insns=1,
arch=None, mode=None):
"""
Wrapper around disassemble to return disassembled instructions as string.
"""
ret = ""
disas = self.disassemble(addr, insns, arch, mode)
for i in disas:
ret += "0x%x:\t%s\t%s\n" % (i.address, i.mnemonic, i.op_str)
return ret | 39bddf246b880decbc84015ef20c5664f88d917e | 3,638,266 |
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45, debug=False):
"""
Performs the detection
"""
custom_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
custom_image = cv2.resize(custom_image, (lib.network_width(
net), lib.network_height(net)), interpolation=cv2.INTER_LINEAR)
im, arr = array_to_image(custom_image)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(
net, image.shape[1], image.shape[0], thresh, hier_thresh, None, 0, pnum, 0)
num = pnum[0]
if nms:
do_nms_sort(dets, num, meta.classes, nms)
res = []
if debug:
print("about to range")
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
if altNames is None:
nameTag = meta.names[i]
else:
nameTag = altNames[i]
res.append((nameTag, dets[j].prob[i], (b.x, b.y, b.w, b.h), i))
res = sorted(res, key=lambda x: -x[1])
free_detections(dets, num)
return res | 7209042478457e4219c9d600790b58d5b5d54e2f | 3,638,267 |
import torch
def overlay_boxes(image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(
image, tuple(top_left), tuple(bottom_right), tuple(color), 2
)
return image | 99905ae0206d285fa878b0063f227a9152600fad | 3,638,268 |
def convert_tilt_convention(iconfig, old_convention,
new_convention):
"""
convert the tilt angles from an old convention to a new convention
This should work for both configs with statuses and without
"""
if new_convention == old_convention:
return
def _get_tilt_array(data):
# This works for both a config with statuses, and without
if isinstance(data, dict):
return data.get('value')
return data
def _set_tilt_array(data, val):
# This works for both a config with statuses, and without
if isinstance(data, dict):
data['value'] = val
else:
data.clear()
data.extend(val)
old_axes, old_extrinsic = old_convention
new_axes, new_extrinsic = new_convention
det_keys = iconfig['detectors'].keys()
if old_axes is not None and old_extrinsic is not None:
# First, convert these to the matrix invariants
rme = RotMatEuler(np.zeros(3), old_axes, old_extrinsic)
for key in det_keys:
tilts = iconfig['detectors'][key]['transform']['tilt']
rme.angles = np.array(_get_tilt_array(tilts))
phi, n = angleAxisOfRotMat(rme.rmat)
_set_tilt_array(tilts, (phi * n.flatten()).tolist())
if new_axes is None or new_extrinsic is None:
# We are done
return
# Update to the new mapping
rme = RotMatEuler(np.zeros(3), new_axes, new_extrinsic)
for key in det_keys:
tilts = iconfig['detectors'][key]['transform']['tilt']
tilt = np.array(_get_tilt_array(tilts))
rme.rmat = makeRotMatOfExpMap(tilt)
# Use np.ndarray.tolist() to convert back to native python types
_set_tilt_array(tilts, np.array(rme.angles).tolist()) | a24126a20453cf7a7c42a74e71618643215ad5c9 | 3,638,269 |
def _type_of_plot(orientation, n_var, i, j):
"""internal helper function for determining plot type in a corner plot
Parameters
----------
orientation : str
the orientation
options: 'lower left', 'lower right', 'upper left', 'upper right'
i, j : int
the row, column index
Returns
-------
plot type : str
'remove' : do not show this plot
'same' : the axes are the same
'compare' : compare the two different axes
"""
if orientation == "lower left":
if j > i:
return i, j, "remove"
elif j == i:
return i, j, "same"
else: # j < i
return i, j, "compare"
elif orientation == "lower right":
raise ValueError("not yet supported orientation")
# if i + j < n_var - 1:
# return i, j, 'remove'
# elif i + j == n_var - 1:
# return i, j, 'same'
# else: # j < i
# return i, j, 'compare'
elif orientation == "upper left":
raise ValueError("not yet supported orientation")
# if i + j < n_var - 1:
# return i, j, 'compare'
# elif i + j == n_var - 1:
# return i, j, 'same'
# else: # j < i
# return i, j, 'remove'
elif orientation == "upper right":
raise ValueError("not yet supported orientation")
# if j < i:
# return i, j, 'remove'
# elif j == i:
# return i, j, 'same'
# else: # j < i
# return i, j, 'compare'
else:
raise ValueError("not supported orientation") | 9629af21f1995ccd1b582d4f9a7b1ecf2c621c84 | 3,638,270 |
def t2_function(t, M_0, T2, p):
"""Calculate stretched or un-stretched (p=1) exponential T2 curve
.. math::
f(t) = M_{0} e^{(-2(t/T_{2})^{p}}
Args:
t (array): time series
M_{0} (float): see equation
T_{2} (float): T2 value
p (float): see equation
Returns:
array: T2 curve
"""
return M_0 * _np.exp(-2.0 * (t / T2) ** p) | be4dabf4436832ca3dde9289610070ad41a3632b | 3,638,271 |
def ry(phi):
"""Returns the rotational matrix for an angle phi around the y-axis
"""
if type(phi) == np.ndarray:
m11 = np.cos(phi)
m12 = np.full(len(phi), 0)
m13 = np.sin(phi)
m22 = np.full(len(phi), 1)
m1 = np.stack((m11, m12, m13), axis=0)
m2 = np.stack((m12, m22, m12), axis=0)
m3 = np.stack((-m13, m12, m11), axis=0)
y_rot_mat = np.stack((m1, m2, m3), axis=0)
else:
y_rot_mat = np.array(([np.cos(phi), 0, np.sin(phi)],
[0, 1, 0],
[-np.sin(phi), 0, np.cos(phi)]))
return y_rot_mat | 06a22e478a0912ac3aeba53ba5b565690b94e652 | 3,638,272 |
def hashed_embedding_lookup_sparse(params,
sparse_values,
dimension,
combiner="mean",
default_value=None,
name=None):
"""Looks up embeddings of a sparse feature using parameter hashing.
See `tf.contrib.layers.hashed_embedding_lookup` for embedding with hashing.
Args:
params: A `Tensor` or `list` of `Tensors`.
Each tensor must be of rank 1 with fully-defined shape.
sparse_values: A 2-D `SparseTensor` containing the values to be embedded.
Some rows may be empty.
dimension: Embedding dimension
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_value: The value to use for an entry with no features.
name: An optional name for this op.
Returns:
Dense tensor with shape [N, dimension] with N the number of rows in
sparse_values.
Raises:
TypeError: If sparse_values is not a SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if not isinstance(params, list):
params = [params]
if not isinstance(sparse_values, ops.SparseTensor):
raise TypeError("sparse_values must be SparseTensor")
with ops.op_scope(params + [sparse_values], name,
"hashed_sparse_embedding_lookup") as scope:
# Fill in the empty rows.
if default_value is None:
# Random default values to reduce the risk of collision.
if sparse_values.dtype == dtypes.string:
default_value = "6ZxWzWOHxZ"
else:
default_value = 1288896567
sparse_values, _ = sparse_ops.sparse_fill_empty_rows(
sparse_values, default_value)
segment_ids = sparse_values.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
values = sparse_values.values
values, idx = array_ops.unique(values)
embeddings = hashed_embedding_lookup(params, values, dimension)
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids,
name=scope)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids,
name=scope)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx, segment_ids,
name=scope)
else:
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.")
return embeddings | e7b4e803d04336e1d0a88d4051473b895a422f08 | 3,638,273 |
def DelfFeaturePostProcessing(boxes, descriptors, use_pca, pca_parameters=None):
"""Extract DELF features from input image.
Args:
boxes: [N, 4] float array which denotes the selected receptive box. N is
the number of final feature points which pass through keypoint selection
and NMS steps.
descriptors: [N, input_dim] float array.
use_pca: Whether to use PCA.
pca_parameters: Only used if `use_pca` is True. Dict containing PCA
parameter tensors, with keys 'mean', 'matrix', 'dim', 'use_whitening',
'variances'.
Returns:
locations: [N, 2] float array which denotes the selected keypoint
locations.
final_descriptors: [N, output_dim] float array with DELF descriptors after
normalization and (possibly) PCA/whitening.
"""
# Get center of descriptor boxes, corresponding to feature locations.
locations = CalculateKeypointCenters(boxes)
final_descriptors = PostProcessDescriptors(descriptors, use_pca,
pca_parameters)
return locations, final_descriptors | dbd55fa19085179fae3f6695c3fb529666c4550d | 3,638,274 |
def render_field(field, **kwargs):
"""Render a field to a Bootstrap layout."""
renderer_cls = get_field_renderer(**kwargs)
return renderer_cls(field, **kwargs).render() | 35a5586991072ba4772df48f5b2b649b1c2d62fd | 3,638,275 |
def bisection(a, b, poly, tolerance):
"""
Assume that poly(a) <= 0 and poly(b) >= 0.
Modify a and b so that abs(b-a) < tolerance and poly(b) >= 0 and poly(a) <= 0.
Return (a+b)/2
:param a: poly(a) <= 0
:param b: poly(b) >= 0
:param poly: polynomial coefficients, low order first
:param tolerance: greater than 0
:return: an approximate root of the polynomial
"""
if evaluate(a, poly) > 0:
raise Exception("poly(a) must be <= 0")
if evaluate(b,poly) < 0:
raise Exception("poly(b) must be >= 0")
mid = (a+b) / 2
if abs(b-a) <= tolerance:
return mid
else:
val = evaluate(mid,poly)
if val <= 0:
return bisection(mid, b, poly, tolerance)
else:
return bisection(a, mid, poly, tolerance) | 9ff1961a95a63af587c9469dd2f987657f1661a9 | 3,638,276 |
def decrypt_message(key, message):
""" returns the decrypted message """
return translate_message(key, message, 'decrypt') | 74b590d493b21928880e43e5f8ae55acd8265bb2 | 3,638,277 |
def IOU(a_wh, b_wh):
"""
Intersection over Union
Args:
a_wh: (width, height) of box A
b_wh: (width, height) of box B
Returns float.
"""
aw, ah = a_wh
bw, bh = b_wh
I = min(aw, bw) * min(ah, bh)
area_a = aw * ah
area_b = bw * bh
U = area_a + area_b - I
return I / U | 92580147eac219d77e6c8a38875c5ee809783790 | 3,638,278 |
import base64
def decode_image(img_b64):
"""Decode image from base64.
https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/
"""
img_bytes = base64.b64decode(img_b64)
im_arr = np.frombuffer(img_bytes, dtype=np.uint8)
img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
return img | 22547d43fe1a20032ee095f3fe16d5550a4f08c8 | 3,638,279 |
from datetime import datetime
def date_from_string(date_str, format_str):
"""
returns a date object by a string
"""
return datetime.strptime(date_str, format_str).date() | 7ba2fa5652264c62e2a6711210a39613cf565e37 | 3,638,280 |
import re
def fix_sensor_name(name):
"""Cleanup sensor name, returns str."""
name = re.sub(r'^(\w+)-(\w+)-(\w+)', r'\1 (\2 \3)', name, re.IGNORECASE)
name = name.title()
name = name.replace('Acpi', 'ACPI')
name = name.replace('ACPItz', 'ACPI TZ')
name = name.replace('Coretemp', 'CoreTemp')
name = name.replace('Cpu', 'CPU')
name = name.replace('Id ', 'ID ')
name = name.replace('Isa ', 'ISA ')
name = name.replace('Pci ', 'PCI ')
name = name.replace('Smc', 'SMC')
name = re.sub(r'(\D+)(\d+)', r'\1 \2', name, re.IGNORECASE)
name = re.sub(r'^K (\d+)Temp', r'AMD K\1 Temps', name, re.IGNORECASE)
name = re.sub(r'T(ccd\s+\d+|ctl|die)', r'CPU (T\1)', name, re.IGNORECASE)
name = re.sub(r'\s+', ' ', name)
return name | 6a346ece5f03c60a2b5d23d5a66c52735aef2939 | 3,638,281 |
def get_relevant_coordinates():
"""Returns a numpy ndarray specifying the pixel a lidar ray hits when shot
through the near plane."""
coords_and_angles = np.genfromtxt('coords_and_angles.csv', delimiter=',')
return np.hsplit(coords_and_angles,2) | ad814528122777c99aab13652dfb708282993374 | 3,638,282 |
def _expand_host_port_user(lst):
"""
Input: list containing hostnames, (host, port)-tuples or (host, port, user)-tuples.
Output: list of (host, port, user)-tuples.
"""
def expand(v):
if isinstance(v, basestring):
return (v, None, None)
elif len(v) == 1:
return (v[0], None, None)
elif len(v) == 2:
return (v[0], v[1], None)
return v
return [expand(x) for x in lst] | 82cfc80f916ef739fc50d8d79a5e19b4aa4a8fa6 | 3,638,283 |
def noise(line, wl=11):
""" Return the noise after smoothing. """
signal = smooth_and_trim(line, window_len=wl)
noise = np.sqrt((line - signal) ** 2)
return noise | 009f05d1eeabf4d0218d78b6c41ff4877f66a5f5 | 3,638,284 |
from typing import Optional
from typing import Iterator
from typing import Tuple
import itertools
import tqdm
import torch
def _evaluate(
limit_batches: Optional[int],
train_pipeline: TrainPipelineSparseDist,
iterator: Iterator[Batch],
next_iterator: Iterator[Batch],
stage: str,
) -> Tuple[float, float]:
"""
Evaluates model. Computes and prints metrics including AUROC and Accuracy. Helper
function for train_val_test.
Args:
limit_batches (Optional[int]): number of batches.
train_pipeline (TrainPipelineSparseDist): pipelined model.
iterator (Iterator[Batch]): Iterator used for val/test batches.
next_iterator (Iterator[Batch]): Iterator used for the next phase (either train
if there are more epochs to train on or test if all epochs are complete).
Used to queue up the next TRAIN_PIPELINE_STAGES - 1 batches before
train_val_test switches to the next phase. This is done so that when the
next phase starts, the first output train_pipeline generates an output for
is the 1st batch for that phase.
stage (str): "val" or "test".
Returns:
Tuple[float, float]: auroc and accuracy result
"""
model = train_pipeline._model
model.eval()
device = train_pipeline._device
if limit_batches is not None:
limit_batches -= TRAIN_PIPELINE_STAGES - 1
# Because TrainPipelineSparseDist buffer batches internally, we load in
# TRAIN_PIPELINE_STAGES - 1 batches from the next_iterator into the buffers so that
# when train_val_test switches to the next phase, train_pipeline will start
# producing results for the TRAIN_PIPELINE_STAGES - 1 buffered batches (as opposed
# to the last TRAIN_PIPELINE_STAGES - 1 batches from iterator).
combined_iterator = itertools.chain(
iterator
if limit_batches is None
else itertools.islice(iterator, limit_batches),
itertools.islice(next_iterator, TRAIN_PIPELINE_STAGES - 1),
)
auroc = metrics.AUROC(compute_on_step=False).to(device)
accuracy = metrics.Accuracy(compute_on_step=False).to(device)
# Infinite iterator instead of while-loop to leverage tqdm progress bar.
for _ in tqdm(iter(int, 1), desc=f"Evaluating {stage} set"):
try:
_loss, logits, labels = train_pipeline.progress(combined_iterator)
preds = torch.sigmoid(logits)
auroc(preds, labels)
accuracy(preds, labels)
except StopIteration:
break
auroc_result = auroc.compute().item()
accuracy_result = accuracy.compute().item()
if dist.get_rank() == 0:
print(f"AUROC over {stage} set: {auroc_result}.")
print(f"Accuracy over {stage} set: {accuracy_result}.")
return auroc_result, accuracy_result | f0550b60c3d53192acb9ddd2d5057ade118fa79d | 3,638,285 |
import re
def check_pre_release(tag_name):
"""
Check the given tag to determine if it is a release tag, that is, whether it
is of the form rX.Y.Z. Tags that do not match (e.g., because they are
suffixed with someting like -beta# or -rc#) are considered pre-release tags.
Note that this assumes that the tag name has been validated to ensure that
it starts with something like rX.Y.Z and nothing else.
"""
release_re = re.compile('^r[0-9]+\\.[0-9]+\\.[0-9]+')
return False if release_re.match(tag_name) else True | 8e24a0a61bfa6fe84e936f004b4228467d724616 | 3,638,286 |
def _get_target_connection_details(target_connection_string):
"""
Returns a tuple with the raw connection details for the target machine extracted from the connection string provided
in the application arguments. It is a specialized parser of that string.
:param target_connection_string: the connection string provided in the arguments for the application.
:return: A tuple in the form of (user, password, host, port) if a password is present in the connection string or
(user, host, port) if a password is not present
"""
password = None
connection_string_format_error = 'Invalid connection string provided. Expected: user[/password]@host[:port]'
if '@' not in target_connection_string:
raise TypeError(connection_string_format_error)
connection_string_parts = target_connection_string.split('@')
if len(connection_string_parts) != 2:
raise TypeError(connection_string_parts)
authentication_part = connection_string_parts[0]
target_part = connection_string_parts[1]
if '/' in authentication_part:
auth_parts = authentication_part.split('/')
if len(auth_parts) != 2:
raise TypeError(connection_string_format_error)
user, password = auth_parts
else:
user = authentication_part
if ':' in target_part:
conn_parts = target_part.split(':')
if len(conn_parts) != 2:
raise TypeError(connection_string_format_error)
host, port = conn_parts
try:
port = int(port)
except ValueError:
raise TypeError(connection_string_format_error)
else:
host = target_part
port = 22
if not len(user) or not len(host):
raise TypeError(connection_string_format_error)
if password:
return user, password, host, int(port)
else:
return user, host, int(port) | 5e6ee870c0e196f54950f26ee6e551476688dce9 | 3,638,287 |
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up an Arlo IP sensor."""
arlo = hass.data.get(DATA_ARLO)
if not arlo:
return False
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
if sensor_type == 'total_cameras':
sensors.append(ArloSensor(hass,
SENSOR_TYPES[sensor_type][0],
arlo,
sensor_type))
else:
for camera in arlo.cameras:
name = '{0} {1}'.format(SENSOR_TYPES[sensor_type][0],
camera.name)
sensors.append(ArloSensor(hass, name, camera, sensor_type))
async_add_devices(sensors, True)
return True | 875ddac74d1e1d8dd10136214f8487d750094e61 | 3,638,288 |
from .tfr import _compute_tfr
def tfr_array_multitaper(epoch_data, sfreq, freqs, n_cycles=7.0,
zero_mean=True, time_bandwidth=None, use_fft=True,
decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS tapers.
Same computation as `~mne.time_frequency.tfr_multitaper`, but operates on
:class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
freqs : array-like of float, shape (n_freqs,)
The frequencies.
n_cycles : float | array of float
Number of cycles in the wavelet. Fixed number or one per
frequency. Defaults to 7.0.
zero_mean : bool
If True, make sure the wavelets have a mean of zero. Defaults to True.
time_bandwidth : float
If None, will be set to 4.0 (3 tapers). Time x (Full) Bandwidth
product. The number of good tapers (low-bias) is chosen automatically
based on this to equal floor(time_bandwidth - 1). Defaults to None.
use_fft : bool
Use the FFT for convolutions or not. Defaults to True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition. Defaults to 1.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels. Defaults to 1.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc.
See Also
--------
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
.. versionadded:: 0.14.0
"""
return _compute_tfr(epoch_data, freqs, sfreq=sfreq,
method='multitaper', n_cycles=n_cycles,
zero_mean=zero_mean, time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim, output=output,
n_jobs=n_jobs, verbose=verbose) | 28a6f998fdaa9acde77a521b5e0c5c51a4709887 | 3,638,289 |
import logging
def card(id: int):
"""
Show the selected card data (by id).
"""
for card in cards["cards"]:
if card["id"] == id:
logging.info("card")
return card
logging.info("card")
return "Card not found." | 8a26ea6add0d3ebe539b8a3c0c5dcbf0a458e923 | 3,638,290 |
def build_norm_layer(cfg, num_features, postfix=""):
""" Build normalization layer
Args:
cfg (dict): cfg should contain:
type (str): identify norm layer type.
layer args: args needed to instantiate a norm layer.
requires_grad (bool): [optional] whether stop gradient updates
num_features (int): number of channels from input.
postfix (int, str): appended into norm abbreviation to
create named layer.
Returns:
name (str): abbreviation + postfix
layer (nn.Module): created norm layer
"""
norm_cfg = {
# format: layer_type: (abbreviation, module)
"BN": ("bn", nn.BatchNorm2d),
"BN1d": ("bn1d", nn.BatchNorm1d),
"GN": ("gn", nn.GroupNorm),
}
assert isinstance(cfg, dict) and "type" in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop("type")
if layer_type not in norm_cfg:
raise KeyError("Unrecognized norm type {}".format(layer_type))
else:
abbr, norm_layer = norm_cfg[layer_type]
if norm_layer is None:
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
requires_grad = cfg_.pop("requires_grad", True)
cfg_.setdefault("eps", 1e-5)
if layer_type != "GN":
layer = norm_layer(num_features, **cfg_)
# if layer_type == 'SyncBN':
# layer._specify_ddp_gpu_num(1)
else:
assert "num_groups" in cfg_
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return name, layer | ef57209bfbd9ead48585ef478a0c74d74127f42f | 3,638,291 |
from ostap.core.core import Ostap, ROOTCWD
from ostap.io.root_file import REOPEN
def _add_response_tree ( tree , *args ) :
"""Specific action to ROOT.TChain
"""
tdir = tree.GetDirectory()
with ROOTCWD () , REOPEN ( tdir ) as tfile :
tdir.cd()
sc = Ostap.TMVA.addResponse ( tree , *args )
if sc.isFailure() : logger.error ( 'Error from Ostap::TMVA::addResponse %s' % sc )
if tfile.IsWritable() :
tfile.Write( "" , ROOT.TFile.kOverwrite )
return sc , tdir.Get ( tree.GetName() )
else : logger.warning ( "Can't write TTree back to the file" )
return sc , tree | 9ad52c4d6962ea3de8beaebc1616887c4c054dd1 | 3,638,292 |
def scalarProd(v,w):
""" A sum of 2 vectors in n-space.
Params: A 2 tuple point (V)
another 2 tuple point (W)
returns: Distance of (V,W)
"""
v = x[0] + x[1]
w = y[0] + y[1]
return np.array(v*w) | 604750efbef53dfb21468fcc7d4f41bd07af502d | 3,638,293 |
def sample_summary(df, extra_values=None, params=SummaryParams()):
"""
Returns table showing statistical summary from the sample parameters:
mean, std, mode, hpdi.
Parameters
------------
df : Panda's dataframe
Contains parameter sample values: each column is a parameter.
extra_values : Panda's dataframe
Additional values to be shown for parameters. Indexes are
parameter names, and columns contain additional values to
be shown in summary.
Returns
-------
Panda's dataframe
Panda's dataframe containing the summary for all parameters.
str
text of the summary table
"""
rows = []
for column in df:
values = df[column].to_numpy()
mean = df[column].mean()
std = df[column].std()
mode = get_mode(df[column])
summary_values = [column, mean, std, mode]
for i, probability in enumerate(params.hpdis):
hpdi_value = hpdi(values, probability=probability)
if i == 0:
# For the first interval, calculate upper and
# lower uncertainties
uncert_plus = hpdi_value[1] - mode
uncert_minus = mode - hpdi_value[0]
summary_values.append(uncert_plus)
summary_values.append(uncert_minus)
summary_values.append(hpdi_value[0])
summary_values.append(hpdi_value[1])
if extra_values is not None:
# Add extra columns
summary_values += extra_values.loc[column].values.tolist()
rows.append(summary_values)
headers = ['Name', 'Mean', 'Std', 'Mode', '+', '-']
for hpdi_percent in params.hpdi_percent():
headers.append(f'{hpdi_percent}CI-')
headers.append(f'{hpdi_percent}CI+')
if extra_values is not None:
headers += extra_values.columns.values.tolist()
formats = [".2f"] * len(headers)
if 'N_Eff' in headers:
formats[headers.index('N_Eff')] = ".0f"
table = tabulate(rows, headers=headers, floatfmt=formats, tablefmt="pipe")
df_summary = pd.DataFrame(rows, columns=headers, index=df.columns.values)
df_summary.drop('Name', axis=1, inplace=True)
return df_summary, table | 0fcedfb54f7a72c3811f7cb4c5df559b4d313383 | 3,638,294 |
from .gnat import GNAT
def classFactory(iface): # pylint: disable=invalid-name
"""Load GNAT class from file GNAT.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
return GNAT(iface) | 54036f9fa18d901426d45771409a48ab803302ef | 3,638,295 |
from aiida.common.hashing import get_random_string
def get_quicksetup_password(ctx, param, value): # pylint: disable=unused-argument
"""Determine the password to be used as default for the Postgres connection in `verdi quicksetup`
If a value is explicitly passed, that value is returned. If there is no value, the current username in the context
will be scanned for in currently existing profiles. If it does, the corresponding password will be used. If no such
user already exists, a random password will be generated.
:param ctx: click context which should contain the contextual parameters
:return: the password
"""
if value is not None:
return value
username = ctx.params['db_username']
config = get_config()
for available_profile in config.profiles:
if available_profile.storage_config['database_username'] == username:
value = available_profile.storage_config['database_password']
break
else:
value = get_random_string(16)
return value | 6ec0a8548bc632bdf008ba3e8e8b8d2bfdd5244b | 3,638,296 |
from typing import List
def convert(day_input: List[str]) -> List[List[str]]:
"""Breaks down the input into a list of directions for each tile"""
def dirs(line: str) -> List[str]:
dirs, last_c = [], ''
for c in line:
if c in ['e', 'w']:
dirs.append(last_c + c)
last_c = ''
else:
last_c = c
return dirs
return [dirs(line) for line in day_input] | fd1d683e69dbff8411cecdaa184355f2311d3e8a | 3,638,297 |
import codecs
def read(filepath):
"""Read file content from provided filepath."""
with codecs.open(filepath, encoding='utf-8') as f:
return f.read() | bff53fbb9b1ebe85c6a1fa690d28d6b6bec71f84 | 3,638,298 |
def trend_indicator(trend, style):
"""Get the trend indicator and corresponding color."""
if trend == 0.00042 or np.isnan(trend):
return '?', (0, 0, 0, 0)
arrows = ('→', '↗', '↑', '↓', '↘')
trend = min(max(trend, -1), 1) # limit the trend
trend_color = (1, 0, 0, trend * trend) if (trend > 0) != ("_up" in style) else (0, 1, 0, trend * trend)
return arrows[round(trend * 2)], trend_color | 009e95e45c3ba6f4e459f024c09511a7952053e4 | 3,638,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.