content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_boxes_info(
boxes_thinned,
retr_mode,
approx_method=cv2.CHAIN_APPROX_SIMPLE):
"""
Retreive contours and return a list of lists, each area of the box,
and coordinates of the box.
Parameters
----------
boxes_thinned : numpy.array
Array containing photographic information of the lines detected
in a document
retr_mode : cv::RetrievalModes
OpenCV contour retreival mode (see 'https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html#ga819779b9857cc2f8601e6526a3a5bc71')
approx_method :
Chain approximation method for contour detection
Returns
-------
box_info : list
List of tuples containing information about the areas and boung
ing rectangles of each contour (used for sorting algorithms)
"""
contours = cv2.findContours(boxes_thinned, retr_mode, cv2.CHAIN_APPROX_SIMPLE)[0]
areas = []
coordinates_list = []
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
area = w * h
areas.append(area)
coordinates_list.append([x, y, w, h])
box_info = [box for box in zip(areas, coordinates_list)]
return box_info | d3a877a2ebae771d521141048f0c05b15ac0aff5 | 3,633,000 |
def getMaxLen(fastaFilePath):
"""
Gets the length of the sequence that has maximum length in a fasta file.
"""
maxLen = 0
for val in fasta.getSequenceToBpDict(fastaFilePath).itervalues():
if maxLen < int(val):
maxLen = int(val)
return maxLen | 7b5893c5563b96f4253025f39ee8e171694ae418 | 3,633,001 |
import scipy
def align_face(filepath, output_size=1024, transform_size=4096, enable_padding=True):
"""
:param filepath: str
:return: PIL Image
"""
ensure_checkpoint_exists("models/dlibshape_predictor_68_face_landmarks.dat")
predictor = dlib.shape_predictor("models/dlibshape_predictor_68_face_landmarks.dat")
lm = get_landmark(filepath, predictor)
lm_chin = lm[0: 17] # left-right
lm_eyebrow_left = lm[17: 22] # left-right
lm_eyebrow_right = lm[22: 27] # left-right
lm_nose = lm[27: 31] # top-down
lm_nostrils = lm[31: 36] # top-down
lm_eye_left = lm[36: 42] # left-clockwise
lm_eye_right = lm[42: 48] # left-clockwise
lm_mouth_outer = lm[48: 60] # left-clockwise
lm_mouth_inner = lm[60: 68] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
# read image
img = Image.open(filepath)
transform_size = output_size
enable_padding = True
# Shrink.
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
# Transform.
img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(), Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), Image.ANTIALIAS)
# Return aligned image.
return img | 507c59ba35e62cff4c07a0caf7d75e654d56aca1 | 3,633,002 |
def build_model(lr_source, scaling_factor=3, hr_target=None):
"""
build espcn model
lr_source: source image batch tensor to be super resolved
hr_target: target image batch as training labels in sub-pixel convolved
shape, build a partial model for testing if hr_target is None
scaling_factor:
factor of up scaling. the delpth of the final layer should be
3 * scaling_factor * scaling_factor (sub-pixel layer)
"""
model = {
'lr_source': lr_source,
}
initializer = tf.truncated_normal_initializer(stddev=0.02)
# NOTE: arXiv:1609.05158v2, 3.2
# for the espcn, we set l = 3, (f1, n1) = (5, 64), (f2, n2) = (3, 32)
# and f3 = 3 in our evaluations.
#
# NOTE: arXiv:1609.05158v2, 3.2
# we choose tanh instead ofrelu as the activation function for the
# final model motivated by our experimental results.
tensors = tf.layers.conv2d(
lr_source,
filters=64,
kernel_size=5,
strides=1,
padding='same',
activation=tf.nn.tanh,
kernel_initializer=initializer,
name='f1')
tensors = tf.layers.conv2d(
tensors,
filters=32,
kernel_size=3,
strides=1,
padding='same',
activation=tf.nn.tanh,
kernel_initializer=initializer,
name='f2')
# NOTE: arXiv:1609.05158v2, 2.2
# note that we do not apply nonlinearity to the outputs of the
# convolution at the last layer.
# NOTE: fit depth to sub-pixel convolution layer
sr_result = tf.layers.conv2d(
tensors,
filters=3 * (scaling_factor ** 2),
kernel_size=3,
strides=1,
padding='same',
activation=None,
kernel_initializer=initializer,
name='f3')
model['sr_result'] = sr_result
if hr_target is None:
# NOTE: we do not want to train this model (freeze it to gain better
# performance)
return model
model['hr_target'] = hr_target
# NOTE: arXiv:1609.05158v2, 2.2
# and calculate the pixel-wise mean squared error (MSE) of the
# reconstruction as an objective function to train the network.
loss = tf.losses.mean_squared_error(
sr_result, hr_target, reduction=tf.losses.Reduction.MEAN)
# NOTE: arXiv:1609.05158v2, 3.2
# initial learning rate is set to 0.01 and final learning rate is
# set to 0.0001 and updated gradually when the improvement of the
# cost function is smaller than a threshold mu.
learning_rate = tf.placeholder(shape=[], dtype=tf.float32)
step = tf.train.get_or_create_global_step()
optimizer = tf.train \
.AdamOptimizer(learning_rate=learning_rate) \
.minimize(loss, global_step=step)
model['step'] = step
model['loss'] = loss
model['optimizer'] = optimizer
model['learning_rate'] = learning_rate
return model | 8383684eb4ca69c6c8b517fe50914dde902320dd | 3,633,003 |
def sum_up_validation_dataset(dataset, batch_size, repeat=True,
number_of_repetitions=0):
"""Define how the validation dataset is suppose to behave during training.
This function is applied to the validation dataset just before the actual
training process. The characteristics defined here address how images are
picked and how large the batch size is.
Args:
dataset (tensorflow dataset): The dataset to which the functions are
applied.
batch_size (int): Defines the number of images per validation step.
repeat (boolean): If set to false the validation data is only considered
once. If set to true, the dataset is either
considered endlessly or number_of_repetitions times.
number_of_repetitions (int): Defines how often the validation data is
considered.
Returns:
The tensorflow dataset which the applied changes described above.
"""
if (repeat):
if (number_of_repetitions > 0):
dataset = dataset.batch(batch_size).repeat(number_of_repetitions)
else:
dataset = dataset.batch(batch_size).repeat()
else:
dataset = dataset.batch(batch_size)
return dataset | 9bab85eba802d5198bfd39bc42bd2fae5209d356 | 3,633,004 |
def moist_lift_parcel(parcel_pressure, parcel_temperature, to_pressure, step=1):
"""
Recursively determine the temperature of a parcel lifted from one pressure to
another, assuming moist pseudoadiabatic processes.
Arguments:
parcel_pressure: The starting parcel pressure.
parcel_temperature: The starting parcel temperature.
to_pressure: The pressure to lift to.
Returns: The temperature of the lifted parcel.
"""
pressure_diff = to_pressure - parcel_pressure
if np.all(pressure_diff == 0):
# No pressure differences; nothing to do.
return parcel_temperature
# Otherwise, push the parcels by 'step' hPa towards the required pressure.
pressure_diff = pressure_diff.where(np.abs(pressure_diff) < step,
other=step * np.sign(pressure_diff))
lapse_rate = moist_lapse_rate(pressure=parcel_pressure,
temperature=parcel_temperature)
new_temperature = parcel_temperature + (pressure_diff * lapse_rate)
new_temperature.attrs['units'] = 'K'
new_pressure = parcel_pressure + pressure_diff
new_pressure.attrs['units'] = 'hPa'
return moist_lift_parcel(parcel_pressure=new_pressure,
parcel_temperature=new_temperature,
to_pressure=to_pressure) | 5b2ae0f29258602f921395b4a3c187523509bad3 | 3,633,005 |
def has_rule(table, chain, rule_d, ipv6=False):
""" Return True if rule exists in chain False otherwise """
iptc_chain = _iptc_getchain(table, chain, ipv6)
iptc_rule = encode_iptc_rule(rule_d, ipv6)
return iptc_rule in iptc_chain.rules | 9ff524f71334e82b2bb971794427da3ec98f4df3 | 3,633,006 |
def get_pings_measurements(ip):
"""
Performs a ping measurement to an IP or returns cached result
:param ip: string ip address
:return: dictionary {src: rtt} where src is the string ip address of the measurement source, and rtt is the
minimum rtt recorded for a ping from that src to the ip address given to the function.
"""
# suppress https warnings
requests.packages.urllib3.disable_warnings()
# if it's private, ignore it
if is_private_ip(ip):
return {}
min_pings = get_existing_pings(ip)
if min_pings is not None:
return min_pings
min_pings = {}
pings_to_perfom = []
vantage_points = get_fwd_nodes()
for vantage_point in vantage_points:
pings_to_perfom.append({'src': vantage_point,
'dst': ip,
'count': configs.pings.PING_COUNT})
pings_request_obj = {'pings': pings_to_perfom}
response = requests.post(configs.pings.PING_URL, headers=configs.pings.KEY,
data=json.dumps(pings_request_obj))
if response.status_code != 200:
#print "Initial Status", response.text, "for:", ip
return min_pings
try:
response_data = response.json()
except:
print "Initial Exception for:",ip
traceback.print_exc()
return min_pings
result_uri = response_data['results']
save_results_uri(ip,result_uri)
tries = 0
sleep_time_min = configs.pings.SLEEP_TIME * 0.5
while tries < configs.pings.MAX_TRIES:
sleep_time_in_iteration = sleep_time_min + random.random() * configs.pings.SLEEP_TIME
time.sleep(sleep_time_in_iteration)
tries += 1
try:
response = requests.get(result_uri, headers=configs.pings.KEY)
if response.status_code != 200:
print "Status", response.status_code, "for:", ip
#print "Status", response.text, "for:", ip, result_uri
# add an extra try for bad code
return
#print response.json()
if 'pings' in response.text:
response_data = response.json()
for ping in response_data['pings']:
src = ip_int_to_string(ping['src'])
dst_int = ping['dst']
#print src,ping['src']
if src=='':
continue
if 'responses' in ping:
pings_responses_from_dst = []
for ping_response in ping['responses']:
if 'from' in ping_response and ping_response['from']==dst_int and 'rtt' in ping_response:
pings_responses_from_dst.append(ping_response['rtt']/1000.00) #rtt is in microsecs
if len(pings_responses_from_dst) > 0:
min_ping_measured = min(pings_responses_from_dst)
min_pings[src] = min_ping_measured
response_data["result_url"] = result_uri
save_pings(ip, min_pings,response_data)
print "Saved:", ip
return min_pings
except:
print "Exception for:",ip
traceback.print_exc()
return min_pings
print "Max tries for:", ip
return min_pings | 2c703be4a6e585690b90a846580ea56ea2d712a0 | 3,633,007 |
def get_timestamp(date_time):
"""Return the Unix timestamp of an ISO8601 date/datetime in seconds.
If the datetime has no offset, it is assumed to be an UTC datetime.
:param date_time: the datetime string to return as timestamp
:type date_time: str
:returns: the timestamp corresponding to the date_time string in seconds
:rtype: float
"""
dt = isoparse(date_time)
if not dt.tzinfo:
dt = dt.replace(tzinfo=UTC)
return dt.timestamp() | daee72734b8994c11f4051a328ce49b7006451d6 | 3,633,008 |
def ssd_arg_scope(weight_decay=0.0005, data_format='NHWC'):
"""Defines the MobileNetV1 arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
padding='SAME',
data_format=data_format):
with slim.arg_scope([custom_layers.pad2d,
custom_layers.l2_normalization,
custom_layers.channel_to_last],
data_format=data_format) as sc:
return sc | 8b466f120f0ac8cb385a8a26d31ab7d3c54c17c3 | 3,633,009 |
import subprocess
def fntDoOneLine(mysLine, mynProc, mynLine):
"""Execute one single-line command.
Input: single line of command.
Output: tuple of the (Popen PIPE return code, command return code, list
of output lines as strings.
Contributes line(s) to be in log file.
Input lines and the first line of output blocks have timestamps;
other lines in output blocks are indented with spaces.
"""
sTimeBegin = fnsGetTimestamp()
proc = (subprocess.Popen(mysLine
, stdout=subprocess.PIPE
, close_fds=True # The default anyway, I think.
, stderr=subprocess.DEVNULL
, universal_newlines=True
, shell=True)
)
(sProcOut, sProcErr) = proc.communicate()
proc.stdout.close()
if not sProcErr: sProcErr = ""
sTimeEnd = fnsGetTimestamp()
# Format lines for output by timestamping or indenting each line.
sOut = ("-"*len(sTimeBegin) + "\n"
+ sTimeBegin + " " + "$ " + mysLine + "\n")
lTmpOut1 = sProcOut.rstrip().split("\n")
lTmpOut2 = [fnsStampLine(sTimeEnd, sLine, (i==0))
for i,sLine in enumerate(lTmpOut1)]
sOut += "\n".join(lTmpOut2)
sOut += sProcErr.rstrip()
# Collect and return everything to caller.
nCmdStat = "n/a - RBL"
nReturnCode = proc.returncode
lOut = sOut.split("\n")
NTRC.ntracef(4, "DO1L", "proc DoOneLine case|%s| line|%s| "
"sline|%s| lResult|%s|"
% (mynProc, mynLine, mysLine, lOut))
return(tLineOut(callstatus=nReturnCode, cmdstatus=nCmdStat
, linenr=mynLine, casenr=mynProc, ltext=lOut)) | 1e70b01083b0be7cc1ed3ff83ca3f5fcf642f8a4 | 3,633,010 |
def _build_module_page(page_info: parser.ModulePageInfo,
table_view: bool) -> str:
"""Constructs a markdown page given a `ModulePageInfo` object.
Args:
page_info: A `ModulePageInfo` object containing information that's used to
create a module page.
For example, see https://www.tensorflow.org/api_docs/python/tf/data
table_view: If True, `Args`, `Returns`, `Raises` or `Attributes` will be
converted to a tabular format while generating markdown. If False, they
will be converted to a markdown List view.
Returns:
The module markdown page.
"""
parts = [f'# Module: {page_info.full_name}\n\n']
parts.append('<!-- Insert buttons and diff -->\n')
parts.append(_top_source_link(page_info.defined_in))
parts.append('\n\n')
# First line of the docstring i.e. a brief introduction about the symbol.
parts.append(page_info.doc.brief + '\n\n')
parts.append(_build_collapsable_aliases(page_info.aliases))
# All lines in the docstring, expect the brief introduction.
for item in page_info.doc.docstring_parts:
parts.append(_format_docstring(item, table_view, table_title_template=None))
parts.append(_build_compatibility(page_info.doc.compatibility))
parts.append('\n\n')
custom_content = doc_controls.get_custom_page_content(page_info.py_object)
if custom_content is not None:
parts.append(custom_content)
return ''.join(parts)
if page_info.modules:
parts.append('## Modules\n\n')
parts.extend(
_build_module_parts(
module_parts=page_info.modules,
template='[`{short_name}`]({url}) module'))
if page_info.classes:
parts.append('## Classes\n\n')
parts.extend(
_build_module_parts(
module_parts=page_info.classes,
template='[`class {short_name}`]({url})'))
if page_info.functions:
parts.append('## Functions\n\n')
parts.extend(
_build_module_parts(
module_parts=page_info.functions,
template='[`{short_name}(...)`]({url})'))
if page_info.type_alias:
parts.append('## Type Aliases\n\n')
parts.extend(
_build_module_parts(
module_parts=page_info.type_alias,
template='[`{short_name}`]({url})'))
if page_info.other_members:
# TODO(markdaoust): Document the value of the members, for basic types.
parts.append('## Other Members\n\n')
parts.append(_other_members(page_info.other_members))
return ''.join(parts) | 95ac983b19a216c4e421182a2c1ae6c0dffc5838 | 3,633,011 |
async def async_get_maker_for_service(hass, service):
"""Get coffee maker to be used for specified service."""
device_id = None
key = 'device_id'
if key in service.data:
device_id = service.data.get(key)[0]
_LOGGER.info(f'Found target: {device_id}')
device = None
if device_id is not None:
device_registry = dr.async_get(hass)
device = device_registry.async_get(device_id)
_LOGGER.info(f'Found device: {device}')
maker = None
coordinator = hass.data[DOMAIN]
makers = coordinator.makers
if device is not None:
mac_address = list(device.identifiers)[0][1]
_LOGGER.info(f'Found maker mac address: {mac_address}')
for coffee_maker in makers:
if coffee_maker.mac_address == mac_address:
maker = coffee_maker
_LOGGER.info(f'Found coffee maker: {maker}')
break
if maker is None and len(makers) > 0:
maker = makers[0]
return maker | 19c4a9742f84796c2ebd1cebefa22e39cd57333f | 3,633,012 |
def make_deferred_related(factory, fixture, attr):
"""Make deferred function for the related factory declaration.
:param factory: Factory class.
:param fixture: Object fixture name e.g. "book".
:param attr: Declaration attribute name e.g. "publications".
:note: Deferred function name results in "book__publication".
"""
name = SEPARATOR.join((fixture, attr))
def deferred(request):
request.getfixturevalue(name)
deferred.__name__ = name
deferred._factory = factory
deferred._fixture = fixture
deferred._is_related = True
return deferred | c27e18adf7e7cd3646fea27ef34e9ae888d38510 | 3,633,013 |
import webbrowser
def getbrowser():
"""
Get the name of the browser currently being used
"""
# Try to find the browser
try:
# Get the browser name
webbrowser.get(using=None)
# Catch an error
except RuntimeError:
# Return nothing
return None | cc74f7db8cf82b32516a0f3b462ba91b7c48b21b | 3,633,014 |
def minimize_bias_geodetic(x, gd_mb=None, mb_geodetic=None,
h=None, w=None, pf=2.5,
absolute_bias=False,
ys=np.arange(2000, 2019, 1),
oggm_default_mb = False,
**kwargs):
""" calibrates the melt factor (melt_f) by getting the bias to zero
comparing modelled mean specific mass balance between 2000 and 2020 to
observed geodetic data
Parameters
----------
x : float
what is optimised (here the melt_f)
gd_mb: class instance
instantiated class of TIModel, this is updated by melt_f
mb_geodetic: float
geodetic mass balance between 2000-2020 of the instantiated glacier
h: np.array
heights of the instantiated glacier
w: np.array
widths of the instantiated glacier
pf: float
precipitation scaling factor
default is 2.5
absolute_bias : bool
if absolute_bias == True, the absolute value of the bias is returned.
if optimisation is done with Powell need absolute bias.
If optimisation is done with Brentq, absolute_bias has to be set False
The default is False.
ys: np.array
years for which specific mass balance is computed
default is 2000--2019 (when using W5E5)
oggm_default_mb : bool
if default oggm mass balance should be used (default is False)
**kwargs :
send to get_specific_mb , e.g. spinup=True
Returns
-------
float
bias: modeled mass balance mean - reference mean (geodetic)
if absolute_bias = True: np.abs(bias) is returned
"""
if oggm_default_mb:
gd_mb.mu_star = x
else:
gd_mb.melt_f = x
gd_mb.prcp_fac = pf
mb_specific = gd_mb.get_specific_mb(heights=h,
widths=w,
year=ys,
**kwargs).mean()
if absolute_bias:
bias_calib = np.abs(np.mean(mb_specific -
mb_geodetic))
else:
bias_calib = np.mean(mb_specific - mb_geodetic)
return bias_calib | 08313e2ed2bb04f58dba4f5c978978a3124005f2 | 3,633,015 |
def unpack_big_integer(binary_string):
"""
Convert a byte string into an integer.
Akin to a base-256 decode, big-endian.
"""
if len(binary_string) <= 8:
return unpack_big_integer_by_struct(binary_string)
# NOTE: 1.1 to 4 times as fast as unpack_big_integer_by_brute()
else:
return unpack_big_integer_by_brute(binary_string) | 3200758ff58076c03cb21280a4af9805da438436 | 3,633,016 |
import os
def file_uploader_helper(file) -> tuple:
"""
file upload helper function
:param file:
:return:
"""
temp_folder = os.path.join(os.getcwd(), 'temp')
if not os.path.isdir(temp_folder):
try:
os.mkdir(temp_folder)
except OSError:
raise OSError(f'Cannot create temp folder {temp_folder}')
file_name = secure_filename(file.filename)
temp_file_path = os.path.join(temp_folder, file_name)
file.save(temp_file_path)
with open(temp_file_path, 'rb') as temp_file_handler:
file_contents = temp_file_handler.read()
os.unlink(temp_file_path)
assert not os.path.exists(temp_file_path)
return file_name, file_contents | c320bfa8405c527fa230db8ad275355119257075 | 3,633,017 |
def extractLineFromTarget(**kwargs):
"""
based on the corridor and the related points, extract lines from original point clouds.
"""
img_buffer = kwargs['img_buffer']
min_xyz = kwargs['min_xyz']
cellsize = kwargs['cellsize']
pts_target = kwargs['pts_target'] # deep copy of the original data
pts_buffer = kwargs['pts_buffer']
zrange = kwargs['zrange']
zmin,zmax = zrange
ly, lx = np.nonzero(img_buffer)
x_min, y_min = min_xyz[0:2]
px = cellsize * lx + x_min
py = cellsize * ly + y_min
nid_line=[]
zipped = zip(px,py)
for i,j in zipped:
fcx = np.logical_and(pts_buffer[:,0] >= i, pts_buffer[:,0] < (i + cellsize))
fcy = np.logical_and(pts_buffer[:,1] >= j, pts_buffer[:,1] < (j + cellsize))
idx = np.logical_and(fcx, fcy)
a = pts_buffer[idx,2]
if len(a) == 0:
continue
dic_border = calOutlierByIQR(a)
lower_inner_fence = dic_border['lower_inner_fence']
upper_inner_fence = dic_border['upper_inner_fence']
lower_inner_fence = max(lower_inner_fence, min(a))
upper_inner_fence = min(upper_inner_fence, max(a))
# begin to filter target point cloud
fcx = np.logical_and(pts_target[:,0] >= i, pts_target[:,0] < (i + cellsize))
fcy = np.logical_and(pts_target[:,1] >= j, pts_target[:,1] < (j + cellsize))
fcz = np.logical_and(pts_target[:,2] > lower_inner_fence, pts_target[:,2] < upper_inner_fence)
idx = np.logical_and(fcx, fcy)
idx = np.logical_and(fcz, idx)
a = np.argwhere(idx==True)
a = a.ravel()
nid_line = nid_line + a.tolist()
return nid_line | 4cabf1a6de49d6014b0c31ce67aa87ff116d162a | 3,633,018 |
def contract_expanded_and_prob_pattern_nodes(
product: ProductNode
) -> ProductNode: # P(A and B) = P(A when B) * P(B)
"""Contract expanded And Probability pattern nodes ` P(Y) * P(X when Y) = P(X and Y)` in `product`
>>> contract_expanded_and_prob_pattern_nodes(1.5 * N(P(A when B)) * N(P(B)))
(1.5 * N(P(B and A)))
"""
node_list = _convert_SureEvent_in_node_list_to_float(product.args)
(float_value, normal_product_nodes,
reciprocal_nodes) = _split_float_vs_normal_vs_reciprocal_nodes(node_list)
normal_product_nodes = simplify_expanded_and_prob_exp(normal_product_nodes)
contracted_product = SumNode()
contracted_product.args = [
float_value
] + normal_product_nodes + reciprocal_nodes if float_value != 1 else normal_product_nodes + reciprocal_nodes
return contracted_product | 24eed700f76d3afcf46e3c2035c1157bb7428dfd | 3,633,019 |
import requests
def get_api_data(quote, date_range):
"""request to API with error check
Error message if any of query params are invaild
Note if API call limit is reached"""
print(API_URL.format(date_range, quote))
data = requests.get(API_URL.format(date_range, quote)).json()
if "Error Message" in data:
raise ValueError(data["Error Message"])
elif "Information" in data:
raise ValueError(data["Information"])
elif "Note" in data:
print(data["Note"])
return False
for i in data:
if "Time Series" in i:
return data[i] | a1718a7d8f558bd06da8a6d1925dc2cc00795a87 | 3,633,020 |
from typing import Tuple
from typing import List
from typing import Optional
import importlib
def find_module_path_and_all(module: str, pyversion: Tuple[int, int],
no_import: bool,
search_path: List[str],
interpreter: str) -> Optional[Tuple[str,
Optional[List[str]]]]:
"""Find module and determine __all__.
Return None if the module is a C module. Return (module_path, __all__) if
Python module. Raise an exception or exit if failed.
"""
module_path = None # type: Optional[str]
if not no_import:
if pyversion[0] == 2:
module_path, module_all = load_python_module_info(module, interpreter)
else:
# TODO: Support custom interpreters.
try:
mod = importlib.import_module(module)
except Exception:
raise CantImport(module)
if is_c_module(mod):
return None
module_path = mod.__file__
module_all = getattr(mod, '__all__', None)
else:
# Find module by going through search path.
module_path = mypy.build.find_module(module, ['.'] + search_path)
if not module_path:
raise SystemExit(
"Can't find module '{}' (consider using --search-path)".format(module))
module_all = None
return module_path, module_all | f9537eb435db824bde3ae163531de8fb86f9eee9 | 3,633,021 |
import numpy
def solve_TLS_Ab(A, b):
"""Solve an overdetermined TLS system by singular value decomposition.
"""
## solve by SVD
U, W, Vt = linalg.singular_value_decomposition(A, full_matrices=0)
V = numpy.transpose(Vt)
Ut = numpy.transpose(U)
## analyze singular values and generate smallness cutoff
cutoff = max(W) * 1E-10
## make W
dim_W = len(W)
Wi = numpy.zeros((dim_W, dim_W), float)
for i in range(dim_W):
if W[i]>cutoff:
Wi[i,i] = 1.0 / W[i]
else:
#print "SVD: ill conditioned value %d=%f" % (i, W[i])
Wi[i,i] = 0.0
## solve for x
Utb = numpy.dot(Ut, b)
WUtb = numpy.dot(Wi, Utb)
x = numpy.dot(V, WUtb)
return x | 5a68f574f2779cf1f436c268975955805d5c35c5 | 3,633,022 |
import torch
def FixCentralTensorCalculateAuxiliaryTensor(ori_tensor_set, ori_matrix, mpo_input_shape, mpo_output_shape, ranks):
"""
In put tensor set product by matrix2MPO, and New_matrix.
return the central tensor when auxiliary tensor was fixed.
We assumes n = 5
"""
ori_matrix = torch.from_numpy(ori_matrix).cuda() if type(
ori_matrix) is np.ndarray else ori_matrix.cuda()
if type(ori_tensor_set[0]) is np.ndarray:
a = torch.from_numpy(ori_tensor_set[0]).cuda()
b = torch.from_numpy(ori_tensor_set[1]).cuda()
c = torch.from_numpy(ori_tensor_set[2]).cuda()
d = torch.from_numpy(ori_tensor_set[3]).cuda()
e = torch.from_numpy(ori_tensor_set[4]).cuda()
f = torch.from_numpy(ori_tensor_set[5]).cuda()
else:
a = ori_tensor_set[0]
b = ori_tensor_set[1]
c = ori_tensor_set[2]
d = ori_tensor_set[3]
e = ori_tensor_set[4]
f = ori_tensor_set[5]
B = torch.tensordot(a, b, ([3], [0])).reshape(-1,
ranks[2]) # (i1j1i2j2,r2)
C = c.reshape(-1, ranks[3]) # (r3,i3j3,r4), D = (r4,i4j4,r5)
E = torch.tensordot(e, f, ([3], [0])).reshape(
ranks[4], -1) # (r4,i4j4i5j5,r6)
D = d.reshape(ranks[3], -1)
res = ori_matrix
index_permute = np.transpose(
np.array(range(len(mpo_input_shape) + len(mpo_output_shape))).reshape((2, -1))).flatten()
B_inv = torch.inverse(B)
E_inv = torch.inverse(E)
D_inv = torch.inverse(D)
res = res.reshape(tuple(mpo_input_shape[:]) + tuple(mpo_output_shape[:]))
# res = np.transpose(res, index_permute).reshape(left_basis_inv.shape[1],-1)
res = res.permute(tuple(index_permute)).reshape(B_inv.shape[1], -1)
new_cdef = torch.matmul(B_inv, res).reshape(-1, E_inv.shape[0])
new_cd = torch.matmul(new_cdef, E_inv).reshape(-1, ranks[3])
new_c = torch.matmul(new_cd, D_inv).reshape(
ranks[2], mpo_input_shape[2], mpo_output_shape[2], ranks[3])
return new_c | c330725679f64532872f6477189a97753a085ca9 | 3,633,023 |
def AddEntryPoint(ordinal, ea, name, makecode):
"""
Add entry point
@param ordinal: entry point number
if entry point doesn't have an ordinal
number, 'ordinal' should be equal to 'ea'
@param ea: address of the entry point
@param name: name of the entry point. If null string,
the entry point won't be renamed.
@param makecode: if 1 then this entry point is a start
of a function. Otherwise it denotes data bytes.
@return: 0 - entry point with the specifed ordinal already exists
1 - ok
"""
return idaapi.add_entry(ordinal, ea, name, makecode) | 3218edc3663c2f575941141b9b063b8adf9d377e | 3,633,024 |
import json
import requests
def goodsEditSkuChannel(skuId,regionId):
"""
:param skuId:
:param regionId:
:return:
"""
reqUrl = req_url('goods', "/sku/updateGroup")
if reqUrl:
url = reqUrl
else:
return "服务host匹配失败"
headers = {
'Content-Type': 'application/json',
'X-Region-Id': regionId,
}
body = json.dumps(
{
"skuId": skuId,
"groupNum": "40,41,42,43"
}
)
result = requests.post(url=url,headers=headers,data=body)
resultJ = json.loads(result.content)
return resultJ | 1fc51eb4f6ee2ee3bd0144bd3ad1e77d9962cdfc | 3,633,025 |
import math
def poly(x, y, order, rej_lo, rej_hi, niter):
"""linear least square polynomial fit with sigma-clipping"""
# x = list of x data
# y = list of y data
# order = polynomial order
# rej_lo = lower rejection threshold (units=sigma)
# rej_hi = upper rejection threshold (units=sugma)
# niter = number of sigma-clipping iterations
npts = []
iiter = 0
iterstatus = 1
# sigma-clipping iterations
while (iiter < niter and iterstatus > 0):
iterstatus = 0
tmpx = []
tmpy = []
npts.append(len(x))
coeffs = np.polyfit(x, y, order)
fit = np.polyval(coeffs, x)
# calculate sigma of fit
sig = 0
for ix in range(npts[iiter]):
sig = sig + (y[ix] - fit[ix])**2
sig = math.sqrt(sig / (npts[iiter] - 1))
# point-by-point sigma-clipping test
for ix in range(npts[iiter]):
if (y[ix] - fit[ix] < rej_hi * sig and
fit[ix] - y[ix] < rej_lo * sig):
tmpx.append(x[ix])
tmpy.append(y[ix])
else:
iterstatus = 1
x = tmpx
y = tmpy
iiter += 1
# coeffs = best fit coefficients
# iiter = number of sigma clipping iteration before convergence
return coeffs, iiter | 0043c58e4a579810d9f13218d6a6d0a768d1b8e4 | 3,633,026 |
import os
def check_watched_dir():
"""
A celery task that runs and moves files in WATCHED_DIR via the blind_media_move function
:return: None
"""
media_files = recursive_extract_files(WATCHED_DIR)
if len(media_files) > 0:
for item in media_files:
item_path = os.path.join(WATCHED_DIR, item)
if os.path.getmtime(item_path) > 5:
# Attempt to move the file as if it were a known TV show
blind_media_move(item_path)
remove_empty_dirs(WATCHED_DIR)
return None | 54f2fb74c7d68b3850320309592576d4a097308e | 3,633,027 |
def on_step_start(func):
"""A function decorator that wraps a Callable inside the NeMoCallback object and runs the function with the
on_step_start callback event.
"""
class NeMoCallbackWrapper(NeMoCallback):
def __init__(self, my_func):
self._func = my_func
def on_step_start(self, state):
self._func(state)
return NeMoCallbackWrapper(func) | 2028a8c0994927f6097fbc6f08852450e9ccb465 | 3,633,028 |
import ast
from operator import add
def reindent_docstring(node, indent_level=1, smart=True):
"""
Reindent the docstring
:param node: AST node
:type node: ```ast.AST```
:param indent_level: docstring indentation level whence: 0=no_tabs, 1=one tab; 2=two tabs
:type indent_level: ```int```
:param smart: Smart indent mode, if False `lstrip`s each line of the input and adds the indent
:type smart: ```bool```
:return: Node with reindent docstring
:rtype: ```ast.AST```
"""
doc_str = ast.get_docstring(node, clean=True)
if doc_str is not None:
_sep = tab * abs(indent_level)
node.body[0] = ast.Expr(
set_value(
"\n{_sep}{s}\n{_sep}".format(
_sep=_sep,
s="\n".join(
map(
lambda line: "{sep}{line}".format(
sep=tab * 2, line=line.lstrip()
)
if line.startswith(tab)
and len(line) > len(tab)
and line[
len(tab) : line.lstrip().find(" ") + len(tab)
].rstrip(":s")
not in frozenset((False,) + TOKENS.rest)
else line,
reindent(doc_str).splitlines(),
)
)
if smart
else "\n".join(
map(partial(add, _sep), map(str.lstrip, doc_str.splitlines()))
),
)
)
)
return node | 2dd90c2e72e79584ae8b4fed5fff2a4a5ba01d53 | 3,633,029 |
import numpy
def xyz(x, y, z):
"""Construct a Toyplot color from CIE XYZ values, using observer = 2 deg and illuminant = D65."""
x = x / 100.0
y = y / 100.0
z = z / 100.0
r = x * 3.2406 + y * -1.5372 + z * -0.4986
g = x * -0.9689 + y * 1.8758 + z * 0.0415
b = x * 0.0557 + y * -0.2040 + z * 1.0570
r = 1.055 * numpy.power(r, 1 / 2.4) - 0.055 if r > 0.0031308 else 12.92 * r
g = 1.055 * numpy.power(g, 1 / 2.4) - 0.055 if g > 0.0031308 else 12.92 * g
b = 1.055 * numpy.power(b, 1 / 2.4) - 0.055 if b > 0.0031308 else 12.92 * b
return rgb(*numpy.clip((r, g, b), 0, 1)) | 3d3189ac0d8987309d6c4839b7909d65a47f49af | 3,633,030 |
def _is_large_prime(num):
"""Inefficient primality test, but we can get away with this simple
implementation because we don't expect users to be running
print_fizzbuzz(n) for Fib(n) > 514229"""
if not num % 2 or not num % 5:
return False
test = 5
while test*test <= num:
if not num % test or not num % (test+2):
return False
test += 6
return True | 090b641872d8d25d55e8f32296e3893f59518308 | 3,633,031 |
def cleanup_code(content: str):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n') | a026668f01e1641618c5b25b06396516410dbe1e | 3,633,032 |
def name_generator(identifier: str= "") -> str:
"""
Generates a unique name.
:param identifier: identifier to add to the name
:return: the generated name
"""
return f"thrifty-builder-test-{identifier}{uuid4()}" | f1da6477beb1ce373b6d5e47e7f2375fe1a74661 | 3,633,033 |
import os
def merge_fcs(fcs, merged_fc, gdb):
"""combines like geometries into a feature class"""
desc = arcpy.Describe(os.path.join(gdb, fcs[0]))
if arcpy.Exists(merged_fc):
arcpy.Delete_management(merged_fc)
ifc = arcpy.CreateFeatureclass_management(out_path=os.path.dirname(merged_fc),
out_name=os.path.basename(merged_fc),
geometry_type=desc.shapeType.upper(),
spatial_reference=desc.spatialReference)[0]
icur = da.InsertCursor(ifc, ['SHAPE@'])
count = 0
for fc in fcs:
fc = os.path.join(gdb, fc)
with da.SearchCursor(fc, ["SHAPE@"]) as rows:
for row in rows:
icur.insertRow(row)
count += 1
del row
del rows
del fc
del icur, desc
return ifc, count | 8aa53f3a064e6888adb0ea250efe73ff495e4e4a | 3,633,034 |
import signal
def _ssim_for_multi_scale(img1,
img2,
max_val=255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03):
"""Calculate SSIM (structural similarity) and contrast sensitivity.
Ref:
Image quality assessment: From error visibility to structural similarity.
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
This function attempts to match the functionality of ssim_index_new.m by
Zhou Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Args:
img1 (ndarray): Images with range [0, 255] and order "NHWC".
img2 (ndarray): Images with range [0, 255] and order "NHWC".
max_val (int): the dynamic range of the images (i.e., the difference
between the maximum the and minimum allowed values).
Default to 255.
filter_size (int): Size of blur kernel to use (will be reduced for
small images). Default to 11.
filter_sigma (float): Standard deviation for Gaussian blur kernel (will
be reduced for small images). Default to 1.5.
k1 (float): Constant used to maintain stability in the SSIM calculation
(0.01 in the original paper). Default to 0.01.
k2 (float): Constant used to maintain stability in the SSIM calculation
(0.03 in the original paper). Default to 0.03.
Returns:
tuple: Pair containing the mean SSIM and contrast sensitivity between
`img1` and `img2`.
"""
if img1.shape != img2.shape:
raise RuntimeError(
'Input images must have the same shape (%s vs. %s).' %
(img1.shape, img2.shape))
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d' %
img1.ndim)
img1 = img1.astype(np.float32)
img2 = img2.astype(np.float32)
_, height, width, _ = img1.shape
# Filter size can't be larger than height or width of images.
size = min(filter_size, height, width)
# Scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
if filter_size:
window = np.reshape(_f_special_gauss(size, sigma), (1, size, size, 1))
mu1 = signal.fftconvolve(img1, window, mode='valid')
mu2 = signal.fftconvolve(img2, window, mode='valid')
sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')
else:
# Empty blur kernel so no need to convolve.
mu1, mu2 = img1, img2
sigma11 = img1 * img1
sigma22 = img2 * img2
sigma12 = img1 * img2
mu11 = mu1 * mu1
mu22 = mu2 * mu2
mu12 = mu1 * mu2
sigma11 -= mu11
sigma22 -= mu22
sigma12 -= mu12
# Calculate intermediate values used by both ssim and cs_map.
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
v1 = 2.0 * sigma12 + c2
v2 = sigma11 + sigma22 + c2
ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)),
axis=(1, 2, 3)) # Return for each image individually.
cs = np.mean(v1 / v2, axis=(1, 2, 3))
return ssim, cs | 5bf91fbb85a8eca8e52aeae566f23dc750037330 | 3,633,035 |
def index(request):
"""
:param request:
:return:
"""
panel = True
# auto login for test users
user = authenticate(username='admin', password='Aa1234567890')
login(request, user)
return render(request, "back/index.html", locals()) | fadd7e80eebf03c44c064754884e3dd0984450f8 | 3,633,036 |
def membersof(parser, token):
"""
Given a collection and a content type, sets the results of :meth:`collection.members.with_model <.CollectionMemberManager.with_model>` as a variable in the context.
Usage::
{% membersof <collection> with <app_label>.<model_name> as <var> %}
"""
params=token.split_contents()
tag = params[0]
if len(params) < 6:
raise template.TemplateSyntaxError('"%s" template tag requires six parameters' % tag)
if params[2] != 'with':
raise template.TemplateSyntaxError('"%s" template tag requires the third parameter to be "with"' % tag)
try:
app_label, model = params[3].strip('"').split('.')
ct = ContentType.objects.get(app_label=app_label, model=model)
except ValueError:
raise template.TemplateSyntaxError('"%s" template tag option "with" requires an argument of the form app_label.model (see django.contrib.contenttypes)' % tag)
except ContentType.DoesNotExist:
raise template.TemplateSyntaxError('"%s" template tag option "with" requires an argument of the form app_label.model which refers to an installed content type (see django.contrib.contenttypes)' % tag)
if params[4] != 'as':
raise template.TemplateSyntaxError('"%s" template tag requires the fifth parameter to be "as"' % tag)
return MembersofNode(collection=params[1], model=ct.model_class(), as_var=params[5]) | 011488e1949c1314b2f3fe73623879b9459c7585 | 3,633,037 |
def ext_s(variable, value, substitution):
"""ext_s is a helper function for eq, without checking for duplicates or
contradiction it adds a variable/value pair to the given substitution.
`unify` deals with all of the related verification.
@param variable: A LogicVariable
@param value: A value that can either be a LogicVariable or literal.
@param substitution: A set of tuples indicating equality for LogicVariables.
@return: A new substitution with variable and value set as being equal.
"""
return substitution | {(variable, value)} | bced042fc8ea5882d4dc901e3b7df94c9b0d0893 | 3,633,038 |
def sc_fermi_sub_wrap( calc, non_native, native, stoich, non_native_limit, native_limit, im_cor, charge ):
"""
sc_fermi_sub_wrap determines the formation energy of a substitutional defect, and formats a 'ChargeState' object to be fed into sc_fermi
args: calc = DFT calculation summary of the defective material (vasspy.calculation)
non_native = The species that is added to form the substitution (string)
non_native = The species that is removed to form the substitution (string)
stoich = DFT calculation summary of the stoichiometric material that the interstital is formed in (vasspy.caclulation)
non_native_limit = chemical potential of the interstital species (float)
native_limit = chemical potential of the vacant species (float)
im_cor = image charge correction for the relevant charge state (float)
charge = charge associated with the defect (integer)
Returns ChargeState class with defect charge, E_F = 0 formation energy, and the spin degeneracy of the defect.
"""
formation_energy = ( calc.energy - stoich.energy ) - ( 1 * (non_native.energy/sum(non_native.stoichiometry.values()) + non_native_limit) + -1 * (native.energy/sum(native.stoichiometry.values()) + native_limit)) + (charge * ( E_vbm ) + potals[calc.title]) + im_cor
return ChargeState( charge, formation_energy, 2**ue[calc.title]) | 1ff6161f0b88b9f1a7765565041549f50bd7c2aa | 3,633,039 |
import re
def _include_matcher(keyword="#include", delim="<>"):
"""Match an include statement and return a (keyword, file, extra)
duple, or a touple of None values if there isn't a match."""
rex = re.compile(r'^(%s)\s*%s(.*)%s(.*)$' % (keyword, delim[0], delim[1]))
def matcher(context, line):
m = rex.match(line)
return m.groups() if m else (None, ) * 3
return matcher | b5f57a8f007870952810a591bb8e15c86af467b1 | 3,633,040 |
import logging
def cod_converter(cod_decimal_string):
""" From a decimal value of CoD, map and retrieve the corresponding major class of a Bluetooth device
:param cod_decimal_string: numeric string corresponding to the class of device
:return: list of class(es)
"""
if not cod_decimal_string or cod_decimal_string == "":
return []
cod_decimal_string = int(cod_decimal_string)
# Major CoDs
classes = {0: {'major': 'Miscellaneous',
'minor': {}},
1: {
'major': 'Computer',
'minor': {
'bitwise': False,
'0': 'Uncategorized',
'1': 'Desktop workstation',
'2': 'Server-class computer',
'3': 'Laptop',
'4': 'Handheld PC/PDA (clamshell)',
'5': 'Palm-size PC/PDA',
'6': 'Wearable computer (watch size)',
'7': 'Tablet'}
},
2: {
'major': 'Phone',
'minor': {
'bitwise': False,
'0': 'Uncategorized',
'1': 'Cellular',
'2': 'Cordless',
'3': 'Smartphone',
'4': 'Wired modem or voice gateway',
'5': 'Common ISDN access'
}
},
3: {
'major': 'LAN/Network Access Point',
'minor': {
'bitwise': False,
'0': 'Fully available',
'1': '1% to 17% utilized',
'2': '17% to 33% utilized',
'3': '33% to 50% utilized',
'4': '50% to 67% utilized',
'5': '67% to 83% utilized',
'6': '83% to 99% utilized',
'7': 'No service available'
}
},
4: {
'major': 'Audio/Video',
'minor': {
'bitwise': False,
'0': 'Uncategorized',
'1': 'Wearable Headset Device',
'2': 'Hands-free Device',
'3': '(Reserved)',
'4': 'Microphone',
'5': 'Loudspeaker',
'6': 'Headphones',
'7': 'Portable Audio',
'8': 'Car audio',
'9': 'Set-top box',
'10': 'HiFi Audio Device',
'11': 'VCR',
'12': 'Video Camera',
'13': 'Camcorder',
'14': 'Video Monitor',
'15': 'Video Display and Loudspeaker',
'16': 'Video Conferencing',
'17': '(Reserved)',
'18': 'Gaming/Toy'
}
},
5: {
'major': 'Peripheral',
'minor': {
'bitwise': False,
'feel': {
'0': 'Not Keyboard / Not Pointing Device',
'1': 'Keyboard',
'2': 'Pointing device',
'3': 'Combo keyboard/pointing device'
},
'0': 'Uncategorized',
'1': 'Joystick',
'2': 'Gamepad',
'3': 'Remote control',
'4': 'Sensing device',
'5': 'Digitizer tablet',
'6': 'Card Reader',
'7': 'Digital Pen',
'8': 'Handheld scanner for bar-codes, RFID, etc.',
'9': 'Handheld gestural input device'
}
},
6: {
'major': 'Imaging',
'minor': {
'bitwise': True,
'4': 'Display',
'8': 'Camera',
'16': 'Scanner',
'32': 'Printer'
}
},
7: {
'major': 'Wearable',
'minor': {
'bitwise': False,
'0': 'Wristwatch',
'1': 'Pager',
'2': 'Jacket',
'3': 'Helmet',
'4': 'Glasses'
}
},
8: {
'major': 'Toy',
'minor': {
'bitwise': False,
'0': 'Robot',
'1': 'Vehicle',
'2': 'Doll / Action figure',
'3': 'Controller',
'4': 'Game'
}
},
9: {
'major': 'Health',
'minor': {
'bitwise': False,
'0': 'Undefined',
'1': 'Blood Pressure Monitor',
'2': 'Thermometer',
'3': 'Weighing Scale',
'4': 'Glucose Meter',
'5': 'Pulse Oximeter',
'6': 'Heart/Pulse Rate Monitor',
'7': 'Health Data Display',
'8': 'Step Counter',
'9': 'Body Composition Analyzer',
'10': 'Peak Flow Monitor',
'11': 'Medication Monitor',
'12': 'Knee Prosthesis',
'13': 'Ankle Prosthesis',
'14': 'Generic Health Manager',
'15': 'Personal Mobility Device'
}
}}
major_number = (cod_decimal_string >> 8) & 0x1f
minor_number = (cod_decimal_string >> 2) & 0x3f
minor_class_name = None
minor = {'minor': {}}
if major_number == 31:
major = {'major': 'Uncategorized'}
else:
major = classes.get(major_number, {'major': 'Reserved'})
minor = classes.get(major_number, minor)
minor_class = minor.get('minor', {})
if minor_class.get('bitwise', False):
# i.e. imaging
for key, value in minor_class.items():
try:
# if key is an integer, it is good to be evaluated
minor_key = int(key)
except ValueError:
continue
except:
logging.exception("Failed to evaluate minor device class with key %s" % key)
continue
if minor_number & minor_key:
minor_class_name = value
break
else:
minor_class_name = minor_class.get(str(minor_number), 'reserved')
major_class_name = major.get('major')
peripheral_classes = [major_class_name, minor_class_name]
if 'feel' in minor_class:
feel_number = minor_number >> 4
feel_class_name = minor_class['feel'].get(str(feel_number))
if feel_class_name:
peripheral_classes.append(feel_class_name)
return peripheral_classes | b566c70fcdfe8bd8801ce12e2358b1dcb9eb9f4d | 3,633,041 |
def get_params(ntrain, EXP_NAME, order, Nside, architecture="FCN", verbose=True):
"""Parameters for the cgcnn and cnn2d defined in deepsphere/models.py"""
n_classes = 2
params = dict()
params['dir_name'] = EXP_NAME
# Types of layers.
params['conv'] = 'chebyshev5' # Graph convolution: chebyshev5 or monomials.
params['pool'] = 'max' # Pooling: max or average.
params['activation'] = 'relu' # Non-linearity: relu, elu, leaky_relu, softmax, tanh, etc.
params['statistics'] = 'mean' # Statistics (for invariance): None, mean, var, meanvar, hist.
# Architecture.
params['F'] = [16, 32, 64, 64, 64, n_classes] # Graph convolutional layers: number of feature maps.
params['K'] = [5] * 6 # Polynomial orders.
params['batch_norm'] = [True] * 6 # Batch normalization.
params['M'] = [] # Fully connected layers: output dimensionalities.
# Pooling.
nsides = [Nside, Nside//2, Nside//4, Nside//8, Nside//16, Nside//32, Nside//32]
params['nsides'] = nsides
params['indexes'] = utils.nside2indexes(nsides, order)
# params['batch_norm_full'] = []
if architecture == "CNN":
# Classical convolutional neural network.
# Replace the last graph convolution and global average pooling by a fully connected layer.
# That is, change the classifier while keeping the feature extractor.
params['F'] = params['F'][:-1]
params['K'] = params['K'][:-1]
params['batch_norm'] = params['batch_norm'][:-1]
params['nsides'] = params['nsides'][:-1]
params['indexes'] = params['indexes'][:-1]
params['statistics'] = None
params['M'] = [n_classes]
elif architecture == "FCN":
pass
else:
raise ValueError('Unknown architecture {}.'.format(architecture))
# Regularization (to prevent over-fitting).
params['regularization'] = 0 # Amount of L2 regularization over the weights (will be divided by the number of weights).
params['dropout'] = 1 # Percentage of neurons to keep.
# Training.
params['num_epochs'] = 80 # Number of passes through the training data.
params['batch_size'] = max(8 * order, 1) # Constant quantity of information (#pixels) per step (invariant to sample size).
# Optimization: learning rate schedule and optimizer.
params['scheduler'] = lambda step: tf.train.exponential_decay(2e-4, step, decay_steps=1, decay_rate=0.999)
params['optimizer'] = lambda lr: tf.train.AdamOptimizer(lr, beta1=0.9, beta2=0.999, epsilon=1e-8)
# Number of model evaluations during training (influence training time).
n_evaluations = 80
params['eval_frequency'] = int(params['num_epochs'] * ntrain / params['batch_size'] / n_evaluations)
if verbose:
print('#sides: {}'.format(nsides))
# print('#pixels: {}'.format([(nside//order)**2 for nside in nsides]))
# Number of pixels on the full sphere: 12 * nsides**2.
print('#samples per batch: {}'.format(params['batch_size']))
# print('=> #pixels per batch (input): {:,}'.format(params['batch_size']*(Nside//order)**2))
# print('=> #pixels for training (input): {:,}'.format(params['num_epochs']*ntrain*(Nside//order)**2))
n_steps = params['num_epochs'] * ntrain // params['batch_size']
lr = [params['scheduler'](step).eval(session=tf.Session()) for step in [0, n_steps]]
print('Learning rate will start at {:.1e} and finish at {:.1e}.'.format(*lr))
return params | fb46d04050f88ce16f75a414dce62c1b08a0d3c9 | 3,633,042 |
def historify(
X: np.ndarray, history_len: int,
):
"""Generate (num_histories, history_len, input_dim) history from time series data
Todo:
* Implement striding
Warning:
* This converts back and forth between jnp and np, which is fine for
CPU, but may cause issues if we need to move off CPU
Args:
X (np.ndarray): first axis is time, remainder are feature dimensions
history_len: length of a history window
number of histories possible
Returns:
np.ndarray: nD array organized as (num_histories, history_len) + feature_shape
"""
num_histories = X.shape[0] - history_len + 1
if num_histories < 1 or history_len < 1:
raise ValueError("Must have positive history_len and at least one window")
if X.shape[0] < num_histories + history_len - 1:
raise ValueError(
"Not enough history ({}) to produce {} windows of length {}".format(
X.shape[0], num_histories, history_len
)
)
if num_histories == 1:
return X[None, :]
X = np.asarray(X)
shape = (num_histories, history_len) + X.shape[1:]
strides = (X.strides[0],) + X.strides
return jnp.asarray(np.lib.stride_tricks.as_strided(X, shape=shape, strides=strides)) | e6f86932aa227bc88246e4ce78a865fc10cdb446 | 3,633,043 |
import operator
def lcs(l1, l2, eq=operator.eq):
"""Finds the longest common subsequence of l1 and l2.
Returns a list of common parts and a list of differences.
>>> lcs([1, 2, 3], [2])
([2], [1, 3])
>>> lcs([1, 2, 3, 3, 4], [2, 3, 4, 5])
([2, 3, 4], [1, 3, 5])
>>> lcs('banana', 'baraban')
(['b', 'a', 'a', 'n'], ['a', 'r', 'b', 'n', 'a'])
>>> lcs('abraban', 'banana')
(['b', 'a', 'a', 'n'], ['a', 'r', 'n', 'b', 'a'])
>>> lcs([1, 2, 3], [4, 5])
([], [4, 5, 1, 2, 3])
>>> lcs([4, 5], [1, 2, 3])
([], [1, 2, 3, 4, 5])
"""
prefs_len = [
[0] * (len(l2) + 1)
for _ in range(len(l1) + 1)
]
for i in range(1, len(l1) + 1):
for j in range(1, len(l2) + 1):
if eq(l1[i - 1], l2[j - 1]):
prefs_len[i][j] = prefs_len[i - 1][j - 1] + 1
else:
prefs_len[i][j] = max(prefs_len[i - 1][j], prefs_len[i][j - 1])
common = []
diff = []
i, j = len(l1), len(l2)
while i and j:
assert i >= 0
assert j >= 0
if eq(l1[i - 1], l2[j - 1]):
common.append(l1[i - 1])
i -= 1
j -= 1
elif prefs_len[i - 1][j] >= prefs_len[i][j - 1]:
i -= 1
diff.append(l1[i])
else:
j -= 1
diff.append(l2[j])
diff.extend(reversed(l1[:i]))
diff.extend(reversed(l2[:j]))
return common[::-1], diff[::-1] | 4b5d3cb9911a6834c006e78f7b40061695c464e2 | 3,633,044 |
from mpunet.preprocessing import get_preprocessing_func
def get_data_sequences(project_dir, hparams, logger, args):
"""
Loads training and validation data as specified in the hyperparameter file.
Returns a batch sequencer object for each dataset, not the ImagePairLoader
dataset itself. The preprocessing function may make changes to the hparams
dictionary.
Args:
project_dir: A path to a mpunet project
hparams: A YAMLHParams object
logger: A mpunet logging object
args: argparse arguments
Returns:
train: A batch sequencer object for the training data
val: A batch sequencer object for the validation data,
or None if --no_val was specified
hparams: The YAMLHParams object
"""
func = get_preprocessing_func(hparams["build"].get("model_class_name"))
hparams['fit']['flatten_y'] = True
hparams['fit']['max_loaded'] = args.max_loaded_images
hparams['fit']['num_access'] = args.num_access
train, val, hparams = func(hparams=hparams,
logger=logger,
just_one=args.just_one,
no_val=args.no_val,
continue_training=args.continue_training,
base_path=project_dir)
return train, val, hparams | 57230e155c1f9817ad6974198e10c0ff04a7891f | 3,633,045 |
def request_factory(environ):
"""Factory function that adds the headers necessary for Cross-domain calls.
Adapted from:
http://stackoverflow.com/questions/21107057/pyramid-cors-for-ajax-requests
Copied from mtholder/pyraphyletic
"""
request = Request(environ)
_LOG.debug('trunctated request body: {b}'.format(b=request.body[:1000]))
return request | 7f241ed4e24cb3a58779b29015b1c8a1889bc338 | 3,633,046 |
def tick_payload():
""" Payload for tick """
data = TickEvent(tick_type=TickType.FULL).json()
return {
"context": {
"eventId": "some-eventId",
"timestamp": "some-timestamp",
"eventType": "some-eventType",
"resource": "some-resource",
},
"data": {"data": b64encode(data.encode()).decode()},
} | 266490cd502619ad27ee248171c1fd812baa4d6a | 3,633,047 |
def calc_sparsity(optimizer, total_params, total_quant_params):
"""
Returns the sparsity of the overall network and the sparsity of quantized layers only.
Parameters:
-----------
optimizer:
An optimizer containing quantized model layers in param_groups[1]['params'] and non-quantized layers,
such as BatchNorm, Bias, etc., in param_groups[1]['params'].
total_params:
Total number of parameters.
total_quant_params:
Number of quantized parameters.
Returns:
--------
sparsity_total:
Sparsity of the overall network.
sparsity_quant:
Sparsity of quantized layers of the network.
"""
nonzero_elements_quant = 0
for layer in optimizer.param_groups[1]['params']:
nonzero_elements_quant += layer[layer != 0].numel()
nonzero_elements_total = 0
for layer in optimizer.param_groups[0]['params']:
nonzero_elements_total += layer[layer != 0].numel()
nonzero_elements_total += nonzero_elements_quant
sparsity_total = (total_params - nonzero_elements_total) / total_params
sparsity_quant = (total_quant_params - nonzero_elements_quant) / total_quant_params
return sparsity_total, sparsity_quant | 92ee924239ee8d7ac97aebba2958671043aa2d89 | 3,633,048 |
def get_pck_normalized_joint_distances(gt_array: np.ndarray, pred_array: np.ndarray, visible_array: np.array, threshold, ref_distances):
"""
n = number of records
:param gt_array: (n, num_joints, 2)
:param pred_array: (n, num_joints, 2)
:param visible_array: (n, num_joints) # 0 if invisible, 1 if visible
:param threshold: PCK threshold
:return:
"""
gt_array = gt_array.copy()[ref_distances != 0]
pred_array = pred_array.copy()[ref_distances != 0]
visible_array = visible_array.copy()[ref_distances != 0]
ref_distances = ref_distances[ref_distances != 0]
normalized_joint_distances = get_normalized_joint_distances(gt_array, pred_array, visible_array, ref_distances)
# Counts how many skeletons contain a joint (thus 16, 1)
num_visible_joints_per_joint = np.sum(visible_array, axis=0)
# Get the PCK for each joint
pck = get_pck(normalized_joint_distances, threshold, num_visible_joints_per_joint, visible_array)
# for joint in SKELETON_PEDREC_JOINTS:
# print("{}: {:.2f}%".format(joint.name, pck[joint.value]))
# pck_wo_nans = pck[~np.isnan(pck)]
# print("Mean (w/o) NAN: {:.2f}%".format(np.sum(pck_wo_nans) / len(pck_wo_nans)))
return pck, normalized_joint_distances | 8e42b418cdda3e89007ee44ccd8c91b859b8ea69 | 3,633,049 |
import warnings
def dict_to_header_arrays(header=None, byteorder='='):
"""
Returns null hf, hi, hs arrays, optionally filled with values from a
dictionary.
No header checking.
:param header: SAC header dictionary.
:type header: dict
:param byteorder: Desired byte order of initialized arrays (little, native,
big).
:type byteorder: str {'<', '=', '>'}
:return: The float, integer, and string header arrays, in that order.
:rtype: tuple of :class:`numpy.ndarray`
"""
hf, hi, hs = init_header_arrays(byteorder=byteorder)
# have to split kevnm into two fields
# TODO: add .lower() to hdr lookups, for safety
if header is not None:
for hdr, value in header.items():
if hdr in HD.FLOATHDRS:
hf[HD.FLOATHDRS.index(hdr)] = value
elif hdr in HD.INTHDRS:
if not isinstance(value, (np.integer, int)):
msg = "Non-integers may be truncated: {} = {}"
warnings.warn(msg.format(hdr, value))
hi[HD.INTHDRS.index(hdr)] = value
elif hdr in HD.STRHDRS:
if hdr == 'kevnm':
# assumes users will not include a 'kevnm2' key
# XXX check for empty or null value?
kevnm = '{:<8s}'.format(value[0:8])
kevnm2 = '{:<8s}'.format(value[8:16])
hs[1] = kevnm.encode('ascii', 'strict')
hs[2] = kevnm2.encode('ascii', 'strict')
else:
# TODO: why was encoding done?
# hs[HD.STRHDRS.index(hdr)] = value.encode('ascii',
# 'strict')
hs[HD.STRHDRS.index(hdr)] = value
else:
msg = "Unrecognized header name: {}. Ignored.".format(hdr)
warnings.warn(msg)
return hf, hi, hs | c137ec7de45a248d638a87aaaf9c24a1710f0091 | 3,633,050 |
def swapKeys(d,keySwapDict):
"""
Swap keys in dictionary according to keySwap dictionary
"""
dNew = {}
for key, keyNew in keySwapDict.iteritems():
if key in d:
dNew[keyNew] = d[key]
for key in d:
if key not in keySwapDict:
dNew[key] = d[key]
return dNew | 0d8917e224574ee0bf682fed10d367f3a5d2bc2f | 3,633,051 |
def render_pyramid(pyr, levels):
"""
Renders a big image of horizontally stacked pyramid levels
:param pyr: Gaussian or Laplacian pyramid
:param levels: number of levels to present in the result <= max_levels
:return: single black image with pyramid levels stacked horizontally
"""
pyr[0] = (pyr[0] - pyr[0].min()) / (pyr[0].max() - pyr[0].min()) # stretch values to [0,1]
images = [pyr[0]]
num_of_rows = pyr[0].shape[0] # used to calc num of rows for other levels
for i in range(1, levels):
pyr[i] = (pyr[i] - pyr[i].min()) / (pyr[i].max() - pyr[i].min()) # stretch values to [0,1]
new_level = np.zeros((num_of_rows, pyr[i].shape[1])) # array of zeros of new size to insert to
new_level[:pyr[i].shape[0]] = pyr[i] # insert to array
images.append(new_level)
return np.hstack(images) | 46bd9fbcf8f973bb23a681f478087a848e93d559 | 3,633,052 |
def test_scatter_plot():
"""
Test plot of predicted electric conductivity as a
function of the mole fractions.
Input
-----
x_vals : numpy vector x-axis (mole fractions)
y_vals : numpy vector y-axis (predicted conductivities)
x_variable : string for labeling the x-axis
Returns
------
fig : matplotlib figure
Check:
1. The x_variable is a string
2. The x,y vector has the same dimension
"""
x_variable = 'm'
x_vals = np.arange(0,1)
y_vals = np.arange(0,1)
assert isinstance(x_variable,str), "x_variable should be a string variable"
assert len(x_vals)==len(y_vals), "The x and y vector should have the same dimension"
if (x_variable == 'm'):
x_variable = 'Mole Fraction A'
elif (x_variable == 'p'):
x_variable = 'Pressure (kPa)'
elif (x_variable == 't'):
x_variable = 'Temperature (K)'
fig = plt.figure(figsize=FIG_SIZE)
plt.scatter(x_vals, y_vals)
plt.xlabel(x_variable)
plt.ylabel('Electrical Conductivity')
return fig | 5e3ac6de37eb13574e85403921aea368b2224f71 | 3,633,053 |
def networkx2pandas(current_graph, input_type):
"""Converting current graph into a pandas data frame
:param: current_graph: a python dict containing all paths
:param: input_type: the semantic type of the input
"""
data = []
for paths in current_graph.values():
for path in paths:
path_data = {}
for i, edge in enumerate(path):
if i == 0:
path_data['input'] = edge['input_name']
path_data['input_type'] = input_type
path_data['pred' + str(i + 1)] = edge['info']['label']
path_data['pred' + str(i + 1) +
'_source'] = retrieve_prop_from_edge(edge['info'],
'source')
path_data['pred' + str(i + 1) +
'_api'] = retrieve_prop_from_edge(edge['info'],
'api')
path_data['pred' + str(i + 1) +
'_pubmed'] = retrieve_prop_from_edge(edge['info'],
'pubmed')
node = 'output' if i + 1 == len(path) else 'node' + str(i + 1)
path_data[node + '_type'] = edge['info']['info']['@type']
path_data[node + '_name'] = edge['output_name']
path_data[node + '_id'] = edge['output_id']
data.append(path_data)
return pd.DataFrame(data).drop_duplicates() | 8f6a1166c8bd5818ff0d3b433f5fa84e1e064dc1 | 3,633,054 |
from typing import Union
from typing import Sequence
def assemble_matrix(form: _fem.FormMetaClass,
constraint: Union[MultiPointConstraint,
Sequence[MultiPointConstraint]],
bcs: Sequence[_fem.DirichletBCMetaClass] = [],
diagval: _PETSc.ScalarType = 1,
A: _PETSc.Mat = None) -> _PETSc.Mat:
"""
Assemble a compiled DOLFINx bilinear form into a PETSc matrix with corresponding multi point constraints
and Dirichlet boundary conditions.
Parameters
----------
form
The compiled bilinear variational form
constraint
The multi point constraint
bcs
Sequence of Dirichlet boundary conditions
diagval
Value to set on the diagonal of the matrix (Default 1)
A
PETSc matrix to assemble into (optional)
Returns
-------
_PETSc.Mat
The assembled bi-linear form
"""
if not isinstance(constraint, Sequence):
assert(form.function_spaces[0] == form.function_spaces[1])
constraint = (constraint, constraint)
# Generate matrix with MPC sparsity pattern
if A is None:
A = cpp.mpc.create_matrix(form, constraint[0]._cpp_object,
constraint[1]._cpp_object)
A.zeroEntries()
# Assemble matrix in C++
cpp.mpc.assemble_matrix(A, form, constraint[0]._cpp_object,
constraint[1]._cpp_object, bcs, diagval)
# Add one on diagonal for Dirichlet boundary conditions
if form.function_spaces[0].id == form.function_spaces[1].id:
A.assemblyBegin(_PETSc.Mat.AssemblyType.FLUSH)
A.assemblyEnd(_PETSc.Mat.AssemblyType.FLUSH)
_cpp.fem.petsc.insert_diagonal(A, form.function_spaces[0], bcs, diagval)
A.assemble()
return A | e7d0bc5f779cc97889e52860f13b4e5e9b84b022 | 3,633,055 |
import torch
def getLayers(model):
"""
get each layer's name and its module
:param model:
:return: each layer's name and its module
"""
layers = {}
root = ''
def unfoldLayer(model, root):
"""
unfold each layer
:param model: the given model or a single layer
:param root: root name
:return:
"""
# get all layers of the model
layer_list = list(model.named_children())
for item in layer_list:
name = item[0]
module = item[1]
layer_type = str(module).split('(')[0]
sublayer_num = len(list(module.named_children()))
# if current layer contains sublayers, add current layer name on its sublayers
if sublayer_num > 0:
name = root + ":" + name if root else name
else:
name = root + ":" + name + '(' + layer_type + ')' if root else name
layers[name] = module
# if current layer contains sublayers, unfold them
if isinstance(module, torch.nn.Module):
unfoldLayer(module, root=name)
unfoldLayer(model, root)
return layers | e1120460b35fa49fe8ad43cc9ce606c1d217a584 | 3,633,056 |
def redeploy():
"""
Implements redeploy handle
Runs docker-compose pull and up commands for specified service
Service must be preconfigured with yml file in SERVICES_DIR
Docker URL can be configured with DOCKER_URL option
"""
service = request.args.get("service", type=str)
if not service:
return "service not specified", 400
config = _load_compose_config(service)
if not config:
return "can't load service config", 400
project = Project.from_config(service, config, _make_docker_client())
project.pull()
project.up(strategy=ConvergenceStrategy.always, detached=True)
return "ok" | e7e898c59f9719f6c0d136ffd1ada480f3e0f9e8 | 3,633,057 |
import unittest
def unittests():
"""
Short tests.
Runs on CircleCI on every commit. Returns everything in the tests root directory.
"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests')
test_suite = _circleci_parallelism(test_suite)
return test_suite | 153d716b4731bd3290d7af9ce5ff5d77f5b5309e | 3,633,058 |
import requests
import json
def get_weekly_forecasts(country_code, zip_code): #for the web app examining trends
"""
Fetches the weekly data from the Weather.gov API, for a given country and zip code.
Params:
country_code (str) the requested country, like "US"
zip_code (str) the requested postal code, like "20057"
Example:
result = get_weekly_forecasts(country_code="US", zip_code="20057")
Returns the forecast info "weekly_forecasts" along with more information about the requested geography ("city_name").
"""
geocoder = Geocoder(country_code)
geo = geocoder.query_postal_code(zip_code)
# using a null-checking method from pandas because geo is a pandas Series:
if isnull(geo.latitude) or isnull(geo.longitude) or isnull(geo.place_name) or isnull(geo.state_code):
return None
# unfortunately the weather.gov api makes us do two requests or use a more sophisticated caching strategy (see api docs)
request_url = f"https://api.weather.gov/points/{geo.latitude},{geo.longitude}"
response = requests.get(request_url)
if response.status_code != 200:
return None
parsed_response = json.loads(response.text)
forecast_url = parsed_response["properties"]["forecast"]
forecast_response = requests.get(forecast_url)
if forecast_response.status_code != 200:
return None
parsed_forecast_response = json.loads(forecast_response.text)
city_name = f"{geo.place_name}, {geo.state_code}" #> Washington, DC
weekly_forecasts = []
for period in parsed_forecast_response["properties"]["periods"]:
if period["isDaytime"] == True:
weekly_forecasts.append({
"day_of_week": period["name"],
"temp": format_temp(period["temperature"], period["temperatureUnit"]),
"conditions": period["shortForecast"],
"image_url": period["icon"]
})
else:
continue
return {"city_name": city_name, "weekly_forecasts": weekly_forecasts} | aa224eb54194f5115b221c66510a5b52e3b68bf8 | 3,633,059 |
import re
def parse_nml(string, ignore_comments=False):
""" parse a string namelist, and returns a list of param bundles
with four attrs: name, value, help, group
"""
group_re = re.compile(r'&([^&]+)/', re.DOTALL) # allow blocks to span multiple lines
array_re = re.compile(r'(\w+)\((\d+)\)')
# string_re = re.compile(r"\'\s*\w[^']*\'")
string_re = re.compile(r"[\'\"]*[\'\"]")
# self._complex_re = re.compile(r'^\((\d+.?\d*),(\d+.?\d*)\)$')
# list of parameters
params = []
# groups = odict()
filtered_lines = []
for line in string.split('\n'):
line = line.strip()
if line == "":
continue
# remove comments, since they may have forward-slashes
# set ignore_comments to True is you want to keep them.
if line.startswith('!'):
continue
if ignore_comments and '!' in line:
line = line[:line.index('!')]
filtered_lines.append(line)
group_blocks = re.findall(group_re, "\n".join(filtered_lines))
for i, group_block in enumerate(group_blocks):
group_lines = group_block.split('\n')
group_name = group_lines.pop(0).strip()
# check for comments
if "!" in group_name:
i = group_name.index("!")
group_name = group_name[:i].strip()
group_help = group_name[i+1:].strip()
# some lines are continuation of previous lines: filter
joined_lines = []
for line in group_lines:
line = line.strip()
if '=' in line:
joined_lines.append(line)
else:
# continuation of previous line
joined_lines[-1] += line
group_lines = joined_lines
for line in group_lines:
name, value, comment = _parse_line(line)
param = ParamNml(group_name, name, value, help=comment)
# group[variable_name] = parsed_value
params.append(param)
# groups[group_name] = group
return params | 6463e9b5b3fb7824b496fd4426a5728a797d0c92 | 3,633,060 |
def glo2loc_2D(c,s):
"""
Build rotation matrix from global to local 2D coordinate system.
-------
Inputs:
c: cosine in radian of the angle from global to local coordinate system
s: sine in radian of the angle from global to local coordinate system
-------
Output:
R_m: rotation matrix from local to global coordinate system
"""
R_m = np.matlib.zeros((6,6))
R_m[0,0] = c
R_m[0,1] = s
R_m[1,0] = -s
R_m[1,1] = c
R_m[2,2] = 1
for i in range(3,6):
for j in range(3,6):
I = i-3
J = j-3
R_m[i,j] = R_m[I,J]
return R_m | 6e3a7d1e05b438a93099390c580ae49b7e5ae006 | 3,633,061 |
def model_scatter_2d(c=1500, dc=150, freq=25, dx=5, dt=0.0001, nx=[50, 50],
propagator=None, prop_kwargs=None):
"""Create a point scatterer model, and the expected waveform at point,
and the forward propagated wave.
"""
nx = np.array(nx)
model = np.ones(nx, dtype=np.float32) * c
nt = int((3*nx[0]*dx/c + 0.05)/dt)
middle = int(nx[1]/2)
x_s, x_s_idx = _set_coords([[1, middle]], dx)
x_r, x_r_idx = _set_coords([[1, middle]], dx)
x_p, x_p_idx = _set_coords([[nx[0]-10, middle]], dx)
f = ricker(freq, nt, dt, 0.05)
model[x_p_idx[0, 0], x_p_idx[0, 1]] += dc
expected = scattered_2d(x_r, x_s, x_p, dx, dt, c, dc, f)
source, receiver_locations = _make_source_receiver(x_s_idx, x_r_idx, f)
if propagator is None:
propagator = Scalar2D
if prop_kwargs is None:
prop_kwargs = {}
prop = propagator(model, dx, dt, source, **prop_kwargs)
actual, _ = forward_model(prop, receiver_locations)
return expected, actual.receivers.ravel() | c2f7ec47dd4da5094a673edc4236ae39d1c772dc | 3,633,062 |
def contours_and_bounding_boxes(bw_image, rgb_image):
"""Extract contours and bounding_boxes.
Parameters
----------
bw_image: np.uint8
Input thresholded image
rgb_image: np.uint8
Input rgb image
Returns
-------
image_label_overlay: label
"""
cleared = clear_border(bw_image)
label_image = label(cleared)
image_label_overlay = label2rgb(label_image, image=rgb_image)
bounding_boxes = []
for region in regionprops(label_image):
minr, minc, maxr, maxc = region.bbox
box = ((minc, minr), maxc - minc, maxr - minr)
bounding_boxes.append(box)
return image_label_overlay, bounding_boxes | 8e46f837d1fc6bf1c6413f32a9339b9028052694 | 3,633,063 |
def Compute_RHS_and_LHS(functional, testfunc, dofs, do_simplifications = False):
""" This computes the LHS matrix and the RHS vector
Keyword arguments:
functional -- The functional to derivate
testfunc -- The test functions
dofs -- The dofs vectors
do_simplifications -- If apply simplifications
"""
rhs = Compute_RHS(functional, testfunc, do_simplifications)
lhs = Compute_LHS(rhs, testfunc, dofs, do_simplifications)
return rhs,lhs | a8deb075186dbf2f05c87eeeb1adf48e30eba19d | 3,633,064 |
import os
from datetime import datetime
import logging
def init_logging_yaml(config_file):
"""initialize logging configuration via a dict specified from a YAML file """
# https://docs.python.org/3/library/logging.config.html#logging-config-api
global log_root, log_file, log_dir
yaml=YAML(typ='safe') # default, if not specfied, is 'rt' (round-trip)
with open(config_file) as fh:
text = fh.read()
cfg = yaml.load(text)
if 'log_root' in cfg:
log_root = cfg['log_root']
if 'log_dir' in cfg:
d = cfg['log_dir']
log_dir = os.path.join(os.getcwd(), d)
# Create 'log' directory if not already there
if not os.path.exists(log_dir):
print(f"create: {log_dir}" )
os.mkdir(log_dir)
#print(f"root={log_root}")
for l in cfg['loggers'].keys():
#print(f"root={l}")
if not log_root:
# default: first one
log_root = l
if l == log_root:
#print("matches root")
pass
if not log_root in cfg['loggers']:
raise Exception(f"invalid log root {log_root}")
l = cfg['loggers'][log_root]
#print(f"log level={l['level']}")
log_file = cfg['handlers']['file']['filename']
t = cfg.get('timestamped', 0)
if cfg.get('timestamped', 0):
#ts = time.time()
d = datetime.now()
print(f"now={d} ")
ts = d.strftime(timestamp_format)
#log_file = re.sub('\.', ts + '.', log_file)
log_file = log_file.replace('.', ts + '.', 1)
print(f"replace {cfg['handlers']['file']['filename']} ")
if log_dir:
log_file = os.path.join(log_dir, log_file)
print(f"set log file={log_file}" )
cfg['handlers']['file']['filename'] = log_file
print(f"log file={log_file} ")
#print(f"config={config_file} ")
try:
logging.config.dictConfig(cfg)
except Exception as err:
#log.critical(f"bad config file {config_file} - {err}")
print(f"ERROR bad config file {config_file} - {err}")
#exit(1)
return
return get_mod_logger() | 3455434f627e1bccf8bf26a763b96897c33e71ac | 3,633,065 |
def buscaVizinhos(matrizCapacidades):
"""Função para buscar os vizihos de cada vertice"""
vizinhos = {}
for v in range(len(matrizCapacidades)):
vizinhos[v] = []
for v, fluxos in enumerate(matrizCapacidades):
for vizinho, fluxo in enumerate(fluxos):
if fluxo > 0:
vizinhos[v].append(vizinho)
vizinhos[vizinho].append(v)
return vizinhos | 1e9ace4be94d80ae2637689b3d25ee1116714888 | 3,633,066 |
import logging
def get_server_numeric_version(ami_env, is_local_run=False):
"""
Gets the current server version
Arguments:
ami_env: (str)
AMI version name.
is_local_run: (bool)
when running locally, assume latest version.
Returns:
(str) Server numeric version
"""
default_version = '99.99.98'
if is_local_run:
logging.info(f'Local run, assuming server version is {default_version}')
return default_version
env_json = load_env_results_json()
if not env_json:
logging.warning(f'Did not find {ENV_RESULTS_PATH} file, assuming server version is {default_version}.')
return default_version
instances_ami_names = {env.get('AmiName') for env in env_json if ami_env in env.get('Role', '')}
if len(instances_ami_names) != 1:
logging.warning(f'Did not get one AMI Name, got {instances_ami_names}.'
f' Assuming server version is {default_version}')
return default_version
instances_ami_name = list(instances_ami_names)[0]
return extract_server_numeric_version(instances_ami_name, default_version) | 1ee8b0eb40b5db28b38bbe9e1a2080b47534b99a | 3,633,067 |
def get_all_images():
"""
:return: all data from db, except db id's
"""
return list(cursor.find({}, {'_id': False})) | 27764e962d11f71d6f536a70f22013a032158aa3 | 3,633,068 |
import typing
def historical_market_capitalization(
apikey: str, symbol: str, limit: int = DEFAULT_LIMIT
) -> typing.List[typing.Dict]:
"""
Query FMP /historical-market-capitalization/ API.
:param apikey: Your API key.
:param symbol: Company ticker.
:param limit: Number of rows to return.
:return: A list of dictionaries.
"""
path = f"historical-market-capitalization/{symbol}"
query_vars = {"apikey": apikey, "limit": limit}
return __return_json(path=path, query_vars=query_vars) | 79a2adb718b6c60e4bf3a159476c7462dc8c39eb | 3,633,069 |
def get_search_url(query=None, start=None, end=None, page=None):
# type: (str, int, int, int) -> str
"""Constructs a search URL based on the given parameters"""
query = "+" if query is None else query
start = "+" if start is None else start
end = "+" if end is None else end
page = 1 if page is None else page
archive = "+" if SEARCH_ARCHIVE == "All" else SEARCH_ARCHIVE
colour = "+" if SEARCH_COLOUR == "All" else SEARCH_COLOUR
return BP_SEARCH_TEMPLATE.format(query, start, end, archive, colour, SEARCH_MAX_RESULTS, page) | 6a58f61ca3f30ef27db46bf346597fb8ca4f9a19 | 3,633,070 |
from pathlib import Path
import hashlib
def hash_file(path: PathType, algo: str, enc: str = "utf-8", bsize: int = 65536) -> str:
"""
Hash the name and contents of a file.
Parameters
----------
path : PathType
file to hash.
algo : str
hash algorithm name supported by haslib Python module.
enc : str, optional
string encoding. The default is "utf-8".
bsize : int, optional
buffer/block size to read. The default is 65536.
Returns
-------
str
DESCRIPTION.
"""
path = Path(path)
with open(path, encoding=enc) as inf:
hashalgo = getattr(hashlib, algo)()
hashalgo.update(path.name.encode(enc))
buffer = inf.read(bsize)
while len(buffer) > 0:
hashalgo.update(buffer.encode(enc))
buffer = inf.read(bsize)
return hashalgo.hexdigest() | fc9208fa762e50c5049afd2f5c37eaf351356c0c | 3,633,071 |
import json
import os
import shutil
def _check_json(out_file, status, hold_file):
"""Function: _check_json
Description: Private function for file_check function.
Arguments:
(input) out_file -> Path and file name of output file.
(input) status -> Status of check.
(input) hold_file -> Name of file if file check fail.
(output) status -> True|False - Status of check.
"""
try:
_ = json.load(open(out_file))
except ValueError:
status = False
print("\t\tError: %s is not in JSON format" % (out_file))
if not os.path.isfile(hold_file):
shutil.copy2(out_file, hold_file)
return status | 616ef16200e07975cdaedbebfa3fcf21746c367b | 3,633,072 |
import numpy
def _guess_z_grid_shape(x, y):
"""Guess the shape of a grid from (x, y) coordinates.
The grid might contain more elements than x and y,
as the last line might be partly filled.
:param numpy.ndarray x:
:paran numpy.ndarray y:
:returns: (order, (height, width)) of the regular grid,
or None if could not guess one.
'order' is 'row' if X (i.e., column) is the fast dimension, else 'column'.
:rtype: Union[List(str,int),None]
"""
width = _get_z_line_length(x)
if width != 0:
return 'row', (int(numpy.ceil(len(x) / width)), width)
else:
height = _get_z_line_length(y)
if height != 0:
return 'column', (height, int(numpy.ceil(len(y) / height)))
return None | ce84b67ead9083f297de62fb3030569353143512 | 3,633,073 |
def get_hosts_with_state(state):
"""Helper function to check the maintenance status and return all hosts
listed as being in a current state
:param state: State we are interested in ('down_machines' or 'draining_machines')
:returns: A list of hostnames in the specified state or an empty list if no machines
"""
try:
status = get_maintenance_status().json()
status = status['get_maintenance_status']['status']
except HTTPError:
raise HTTPError("Error getting maintenance status.")
if not status or state not in status:
return []
if 'id' in status[state][0]:
return [machine['id']['hostname'] for machine in status[state]]
else:
return [machine['hostname'] for machine in status[state]] | 075464cc7c8a0e6f66be5f655669167bab52ea1a | 3,633,074 |
def load_category_index(label_map_path, num_classes):
"""
load the category index from the lablemap with the given path
for example, a cateory index is like the following
CATEGORORY_INDEX = {
1 : {'id':1, 'name':'Green'},
2 : {'id':2, 'name':'Red'},
3 : {'id':3, 'name':'Yellow'}
}
"""
label_map = label_map_util.load_labelmap(label_map_path)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index | 1eba86fa8c6d28d265daa1c752e09e8114ad9368 | 3,633,075 |
def create_mask(M, N, path, radius):
"""
Fill a square block with values
Parameters
----------
M: int
Number of points in the first trajectory
N: int
Number of points in the second trajectory
p: list of [i, j]
A warping path one level up
radius: int
Half the width of the box to place around [2*i, 2*j]
for each [i, j] one level up
Returns
-------
An MxN sparse array which has a 1 in every cell
that needs to be checked and a 0 elsewhere
"""
Occ = sparse.lil_matrix((M, N))
## TODO: Fill this in; loop through all of the elements
## [i, j] in the path and place a box around [2*i, 2*j]
## of all 1s in Occ
return Occ | ecbc8aabef79856aadc76ac41fff915226024ab9 | 3,633,076 |
def format_hostname(domain_parts, uid):
"""Formats hostname for a docker based on domain parts and uid.
NOTE: Hostnames are also used as docker names!
domain_parts - a single or a list of consecutive domain parts that constitute a unique name
within environment e.g.: ['worker1', 'prov1'], ['ccm1', 'prov1'], 'client1'
uid - timestamp
"""
if isinstance(domain_parts, (str, unicode)):
domain_parts = [domain_parts]
domain_parts.extend([uid, env_domain_name()])
# Throw away any '@' signs - they were used some time ago in node names
# and might still occur by mistake.
return '.'.join(domain_parts).replace('@', '') | e7ad7ef470c23f132ed564caa0c157cc7ffc04b8 | 3,633,077 |
def get_aspect_ratio(width_first=True):
"""
Returns the aspect ratio of the game window, or the window's width / the window's height.
If width_first is True (default), then it will return the window's height / the window's width.
:param width_first: Bool - Whether to divide the height by the width.
:return: Float - The aspect ratio of the window
"""
if width_first:
return render.getWindowWidth() / render.getWindowHeight()
else:
return render.getWindowHeight() / render.getWindowWidth() | 3c87d3520aa26692835ce46ccab2ed5915bed444 | 3,633,078 |
import os
from datetime import datetime
def upload_to_blob(file_path, client, container):
"""
Upload a file to Azure Blob storage.
Args:
file_path (str): Path of the file to upload.
client (`azure.storage.blob.BlockBlobService`): Blob service
container (str): Name for the container
Returns:
(`azure.batch.models.ResourceFile`): A resource file object containing
the location of hte file uploaded.
"""
# Get name of file
file_name = os.path.basename(file_path)
# Create blob from the file, using it's file name
client.create_blob_from_path(container, file_name, file_path)
# Create a shared access signature (SAS) token to let nodes in the pool
# access the data in blob storage
sas_token = client.generate_blob_shared_access_signature(
container,
file_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2)
)
# Create a uri for the blob.
sas_url = client.make_blob_url(
container,
file_name,
sas_token=sas_token
)
return batchmodels.ResourceFile(
http_url=sas_url,
file_path=file_name
) | b2965de554038c26aa22846f31992a500796993a | 3,633,079 |
def _environ_cols_wrapper(): # pragma: no cover
"""
Return a function which returns console width.
Supported: linux, osx, windows, cygwin.
"""
warn("Use `_screen_shape_wrapper()(file)[0]` instead of"
" `_environ_cols_wrapper()(file)`", DeprecationWarning, stacklevel=2)
shape = _screen_shape_wrapper()
if not shape:
return None
@wraps(shape)
def inner(fp):
return shape(fp)[0]
return inner | e35669b63fd755b7ce44446f0327f9c01f2dcf71 | 3,633,080 |
from typing import Optional
def encode(content, encoding: Optional[str] = "json") -> Response:
"""Encode content in given encoding.
Warning: Not all encodings supports all types of content.
:param content: Content to encode
:param encoding:
- `json` (default)
- `bin`: nD array/scalars in bytes
- `csv`: nD arrays in downloadable csv files
- `npy`: nD arrays in downloadable npy files
- `tiff`: 2D arrays in downloadable TIFF files
:returns: A Response object containing content and headers
:raises ValueError: If encoding is not among the ones above.
"""
if encoding in ("json", None):
return Response(
orjson_encode(content),
headers={"Content-Type": "application/json"},
)
if encoding == "bin":
return Response(
bin_encode(content),
headers={
"Content-Type": "application/octet-stream",
},
)
if encoding == "csv":
return Response(
csv_encode(content),
headers={
"Content-Type": "text/csv",
"Content-Disposition": 'attachment; filename="data.csv"',
},
)
if encoding == "npy":
return Response(
npy_encode(content),
headers={
"Content-Type": "application/octet-stream",
"Content-Disposition": 'attachment; filename="data.npy"',
},
)
if encoding == "tiff":
return Response(
tiff_encode(content),
headers={
"Content-Type": "image/tiff",
"Content-Disposition": 'attachment; filename="data.tiff"',
},
)
raise ValueError(f"Unsupported encoding {encoding}") | fb903e3d5465328f471c3ef368023647da4e24ed | 3,633,081 |
def yesNoDialog(parent, msg, title):
"""
Convenience function to display a Yes/No dialog
Returns:
bool: return True if yes button press. No otherwise
"""
m = QMessageBox(parent)
m.setText(msg)
m.setIcon(QMessageBox.Question)
yesButton = m.addButton(_(Text.txt0082), QMessageBox.ButtonRole.YesRole)
noButton = m.addButton(" No ", QMessageBox.ButtonRole.NoRole)
m.setDefaultButton(noButton)
m.setFont(parent.font())
m.setWindowTitle(title)
m.exec_()
if m.clickedButton() == yesButton:
return True
return False | cc6165e017193fe85d64765fdeecbf2d056767ba | 3,633,082 |
def import_all_references(except_namespaces = []):
""" 导入所有reference文件
"""
done = False
while (done == False or (len(pm.listReferences()) != 0)):
refs = pm.listReferences()
#get rel refs
pro = []
if except_namespaces:
for ref in refs:
if ref.namespace not in except_namespaces:
#refs.remove(ref)
pro.append(ref)
if pro:
refs = pro
sn = len(refs)
en = 0
for ref in refs:
if ref.isLoaded():
done = False
ref.importContents()
else:
en += 1
done = True
if sn == en:
return True
return True | 145cc3b6260651728f5b16661fd1f9d68d70c91a | 3,633,083 |
from labels import get_annotation
def get_annotations(element):
"""
returns a dictionary of all the annotation features of an element,
e.g. tiger.pos = ART or coref.type = anaphoric.
"""
annotations = {}
for label in element.getchildren():
if get_xsi_type(label) == 'saltCore:SAnnotation':
annotations.update([get_annotation(label)])
return annotations | 7189e3f7b3d671af40b689c2586d373d344ca10c | 3,633,084 |
def _prefAdj(coupling, leg):
"""Prefactor for the creation of an adjoint
Only implemented for regular three-legged tensors with an (in, in, out)
flow and their adjoints at the moment.
"""
if len(coupling) != 1:
raise NotImplementedError("Only for three-legged tensors")
flow = tuple(c[1] for c in coupling[0])
if flow != (True, True, False) and flow != (True, False, False):
raise NotImplementedError("Only (in, in, out) and its adjoint allowed")
if flow == (True, False, False):
def loop(x):
return x[::-1]
else:
def loop(x):
return x
fid = [c[0] for c in loop(coupling[0])].index(leg) if leg else None
fpref = {
0: lambda key: 1. if loop(key[0])[1] % 2 == 0 else -1.,
1: lambda key: 1. if loop(key[0])[0] % 2 == 0 else -1.,
2: lambda key: 1.,
None: lambda key: 1. if loop(key[0])[2] % 2 == 0 else -1.,
}[fid]
def su2pref(key):
sign = (1, 1, -1)
return 1. if sum(s * k for s, k in
zip(sign, loop(key[0]))) % 4 == 0 else -1.
return {'fermionic': fpref, 'SU(2)': su2pref} | e2889badba0cef27c4ce8c51ed14bda71524c3ec | 3,633,085 |
def recon_traj_with_preds(dataset, preds, seq_id=0, **kwargs):
"""
Reconstruct trajectory with predicted global velocities.
"""
ts = dataset.ts[seq_id]
ind = np.array([i[1] for i in dataset.index_map if i[0] == seq_id], dtype=int)
dts = np.mean(ts[ind[1:]] - ts[ind[:-1]])
# pos = np.zeros([preds.shape[0] + 2, 2])
# pos[0] = dataset.gt_pos[seq_id][0, :2]
pos = np.zeros([preds.shape[0] + 2, 3])
pos[0] = dataset.gt_pos[seq_id][0, :3]
# pos[1:-1] = np.cumsum(preds[:, :2] * dts, axis=0) + pos[0]
pos[1:-1] = np.cumsum(preds[:, :3] * dts, axis=0) + pos[0]
pos[-1] = pos[-2]
ts_ext = np.concatenate([[ts[0] - 1e-06], ts[ind], [ts[-1] + 1e-06]], axis=0)
pos = interp1d(ts_ext, pos, axis=0)(ts)
return pos | aa3150fef73450ca83617292dd9e3abf7cfd2054 | 3,633,086 |
def fitfunPowerLaw(fitparamStart, fixedparam, fitInfo, x, y):
"""
Power law fit function
y = A * B^x + C
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
fitparamStart List with starting values for the fit parameters:
order: [A, B, C]
E.g. if only A and B are fitted, this becomes a two
element vector [1e-4, 0.2]
fixedparam List with values for the fixed parameters:
order: [A, B, C]
same principle as fitparamStart
fitInfo np.array boolean vector with always 3 elements
1 for a fitted parameter, 0 for a fixed parameter
x Vector with x values
y Vector with experimental y values
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
res Residuals
========== ===============================================================
"""
fitparam = np.float64(np.zeros(len(fitInfo)))
fitparam[fitInfo==1] = fitparamStart
fitparam[fitInfo==0] = fixedparam
# get parameters
A = fitparam[0]
B = fitparam[1]
C = fitparam[2]
# calculate theoretical power law function
ytheo = A * x**B + C
# calcualte residuals
res = y - ytheo
return res | 8e1d234ef8f123ca3d11e8b0896865c63e15407c | 3,633,087 |
def make_2d_histogram(x, y, n_bins, xlabel, ylabel, cbar_label,
figsize=(12, 4)):
"""
Generate a rainbow-colored 2D histogram.
:param x: X-axis values, i.e. barcode group indices
:param y: Y-axis values corresponding to x
:param n_bins: (x,y) bin sizes; x should usually be 1
:param xlabel: X-axis label
:param ylabel: Y-axis label
:param cbar_label: Color bar label
:returns: matplotlib figure
"""
cmap = plt.cm.get_cmap("Spectral_r")
cmap.set_under(color=(0.875, 0.875, 0.875)) # pylint: disable=no-member
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.patch.set_facecolor((0.875, 0.875, 0.875))
ax.grid(color="white", linewidth=0.5, linestyle='-')
counts, xedges, yedges, im = ax.hist2d(x, y, cmap=cmap, vmin=1,
bins=n_bins)
x_margin = 5
if n_bins[0] < 20:
x_margin = 1
elif n_bins[0] < 50:
x_margin = 2
ymax = max(y) if len(y) > 0 else 1
ax.set_xlim(-x_margin, n_bins[0] + x_margin)
ax.set_ylim(-(int(ymax * 0.05)), ymax + int(ymax * (0.05)))
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
for spine in ["left", "right", "top", "bottom"]:
ax.spines[spine].set_visible(False)
cbar = fig.colorbar(im, ax=ax, fraction=0.05, pad=0.01)
cbar.ax.set_ylabel(cbar_label)
fig.tight_layout()
return fig, ax | ed839db7428d743463ea37a22b0b7a16dc58aef1 | 3,633,088 |
def ingest_cop(years, month):
"""
Args:
years: list
month: str
Returns: list
"""
cop_data = []
for year in years:
directory = 'data\\raw\\copernicus\\'
wrf_file_name = year + month + '-C3S-L4_OZONE-O3_PRODUCTS-MSR-ASSIM-ALG-MONTHLY-v0021.nc'
nc = netcdf.netcdf_file(directory + wrf_file_name, 'r')
data = nc.variables['total_ozone_column'][0][:].copy() # copy as .netcdf_file gives direct view to memory
cop_data.append(data)
return cop_data | bbe6496e5240d1d3749707a4ab79e803ca156267 | 3,633,089 |
import os
import glob
def utils_files_count(directory):
"""Get number of files by searching directory recursively"""
if not os.path.exists(directory):
return 0
cnt = 0
for r, dirs, files in os.walk(directory):
for dr in dirs:
cnt += len(glob.glob(os.path.join(r, dr + "/*")))
return cnt | 3d749cfa55816612b3fb11016ad70e8d7aba16d5 | 3,633,090 |
from scipy.io.wavfile import read as readwav
def readwav(filename):
"""Read a WAV file and returns the data and sample rate
::
from spectrum.io import readwav
readwav()
"""
samplerate, signal = readwav(filename)
return signal, samplerate | 580b50d7d3585a300da1967d4481b5e9b0b9bf24 | 3,633,091 |
def get_connections_from_file(parent, filename):
"""load connections from connection file"""
error = 0
try:
doc = etree.parse(filename).getroot()
if doc.tag != 'qgsCSWConnections':
error = 1
msg = parent.tr('Invalid CSW connections XML.')
except etree.ParseError as err:
error = 1
msg = parent.tr('Cannot parse XML file: {0}').format(err)
except IOError as err:
error = 1
msg = parent.tr('Cannot open file: {0}').format(err)
if error == 1:
QMessageBox.information(parent, parent.tr('Loading Connections'), msg)
return
return doc | 64fad1ae1f5ab295d8f09aa64f69f487407f62d2 | 3,633,092 |
from typing import List
def extract_provenance_chain(credential: Credential) -> List[Credential]:
"""
Extract the chain into an ordered list of credentials.
Root credential will be at the start of the returned list
"""
def decode(credential: Credential, acc: List[Credential]):
if credential.provenance:
next = credential.provenance
acc.append(next)
decode(next, acc)
accumlator = [credential]
decode(credential, accumlator)
accumlator.reverse()
return accumlator | 00ed19cd953ba2fd6a3eaf591fde6c6b0113b64b | 3,633,093 |
def make_induces(x, y):
"""return [ [(0,0), (0,1),...,(0,y-1)], [(1,0),...], [(x-1, 0), (x-1, 1), ..., (x-1, y-1)]
"""
index_x = tf.expand_dims(tf.range(0, x), 1)
index_y = tf.expand_dims(tf.range(0, y), 0)
index_x = tf.tile(index_x, [1, y])
index_y = tf.tile(index_y, [x, 1])
induces = tf.stack([index_x, index_y], axis=2)
return induces | 64780900a4c854881ec0dbd0a058b1dfa523bdd3 | 3,633,094 |
def unused_argument(editor, item):
""" Pylint unused-argument method """
line_no = item.line_no
error_text = editor.lines[line_no]
LOGGER.info("unused argument: {0}".format(error_text))
return (line_no, 0) | fd4dc3cae169b34c3e2c16321f746cbe4b83054c | 3,633,095 |
def fitjordan(f, B, losses, Bo, fo):
"""fit coeffs of
losses(f,B)=(ch*(f/fo)**alpha + ch*(f/fo)**beta)*(B/Bo)**gamma
returns (ch, alpha, cw, beta, gamma)
"""
pfe = np.asarray(losses).T
z = []
for i, fx in enumerate(f):
if fx:
if isinstance(B[0], float):
z += [(fx, bx, y)
for bx, y in zip(B, pfe[i])
if y]
else:
z += [(fx, bx, y)
for bx, y in zip(B[i], pfe[i])
if y]
fbx = np.array(z).T[0:2]
y = np.array(z).T[2]
fitp, cov = so.curve_fit(lambda x, ch, alpha, cw, beta, gamma: pfe_jordan(
x[0], x[1], ch, alpha, cw, beta, gamma, fo, Bo),
fbx, y, (1.0, 1.0, 1.0, 2.0, 1.0))
return fitp | c236a0f3a7dd956cfee468b56d27a99fd90bb90c | 3,633,096 |
import itertools
def count_temporal_motif(G, sequence, delta, get_count_dict=False):
"""Count all temporal motifs.
Parameters
----------
G : the graph to count temporal motif from. This function only supports ImpulseDiGraph
sequence: a sequence of edges specifying the order of the motif. For example ((1,2), (2,3), (2,1)) means
1 -> 2 then 2 -> 3 then 2 -> 1. Note: The motif has to be connected.
delta: time window that specifies the maximum time limit that all edges in a motif must occur within.
get_count_dict: if True, return the motif count dictionary, which provides greater detail about which
motifs appear in a certain type of motif. If False, only returns the total count of all motifs of that type.
Returns
-------
count dictionary or total motif count
Examples
--------
>>> G = dnx.ImpulseDiGraph()
>>> G.add_edge(1, 2, 30)
>>> G.add_edge(3, 2, 30)
>>> G.add_edge(4, 2, 30)
>>> G.add_edge(2, 5, 32)
>>> G.add_edge(2, 5, 33)
>>> dnx.count_temporal_motif(G, ((1, 2), (2, 3), (2, 3)), 3)
3
>>> dnx.count_temporal_motif(G, ((1, 2), (2, 3), (2, 3)), 3, get_count_dict=True)
{(1, 2, 2, 5, 2, 5): 1, (4, 2, 2, 5, 2, 5): 1, (3, 2, 2, 5, 2, 5): 1}
"""
if not isinstance(G, dnx.ImpulseDiGraph):
raise TypeError('This function only supports ImpulseDiGraph')
total_counts = dict()
# this is used later for checking matching sequences
node_sequence = tuple(node for edge in sequence for node in edge)
g = Graph(G.to_networkx_graph())
static_motif = Graph()
static_motif.add_edges_from(sequence)
for sub in __enumerate_subgraphs(g, size_k=len(static_motif.nodes())):
# A way to check if nodes in sub may contain motif will help speed up. Using nx.is_isomorphic() will
# create error by dropping a lot of potential subgraphs.
counts = dict()
edges = list()
for u, v in itertools.combinations(sub.nodes(), 2):
edges.extend(G.edges(u, v))
edges.extend(G.edges(v, u))
# Motifs with self-loops won't be duplicated when iterating through subgraphs
for u in sub.nodes():
edges.extend(G.edges(u, u))
edges = sorted(edges, key=lambda x: x[2])
# Count all possible sequences from edges of the static subgraph
start = 0
end = 0
while end < len(edges):
while edges[start][2] + delta < edges[end][2]:
# combine all edges having the same timestamps to decrement counts
tmp_time = edges[start][2]
same_time_edges = list()
while edges[start][2] == tmp_time:
same_time_edges.append(edges[start][0:2])
start += 1
if start >= len(edges):
break
__decrement_counts(same_time_edges, len(sequence), counts)
# combine all edges having the same timestamps to increment counts
tmp_time = edges[end][2]
same_time_edges = list()
while edges[end][2] == tmp_time:
same_time_edges.append(edges[end][0:2])
end += 1
if end >= len(edges):
break
__increment_counts(same_time_edges, len(sequence), counts)
# Extract out count for sequences that are isomorphic to the temporal motifs
for keys in sorted(counts.keys()):
if len(keys) / 2 == len(sequence):
if counts[keys] == 0:
continue
node_map = dict()
isomorphic = True
# check matching sequences (node sequence vs key)
for n in range(len(node_sequence)):
if node_map.get(node_sequence[n]):
if node_map[node_sequence[n]] == keys[n]:
continue
else:
isomorphic = False
break
else:
if not keys[n] in node_map.values():
node_map[node_sequence[n]] = keys[n]
else:
isomorphic = False
break
if isomorphic:
total_counts[keys] = counts[keys]
if get_count_dict:
return total_counts
else:
return sum(total_counts.values()) | 88851133592fc002a3cde8e7712388361e3c8f51 | 3,633,097 |
from datetime import datetime
def calc_expiry_time(minutes_valid):
"""Return specific time an auth_hash will expire."""
return (
timezone.now() + datetime.timedelta(minutes=minutes_valid + 1)
).replace(second=0, microsecond=0) | 2915ca419d234808d960d9982e234aab16a10784 | 3,633,098 |
import re
def parse(s):
"""
Parse an XML tree from the given string, removing all
of the included namespace strings.
"""
ns = re.compile(r'^{.*?}')
et = etree.fromstring(s)
for elem in et.iter():
elem.tag = ns.sub('', elem.tag)
return et | 1d026ef8978c4d774543bc1149c218bbfcf97fe8 | 3,633,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.