content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def isready() -> bool:
"""Is the embedded R ready for use."""
INITIALIZED = RPY_R_Status.INITIALIZED
return bool(
rpy2_embeddedR_isinitialized == INITIALIZED.value
)
|
ce9bc69c897004f135297331c33101e30e71dca7
| 3,649,600
|
def yolo2_loss(args, anchors, num_classes, label_smoothing=0, use_crossentropy_loss=False, use_crossentropy_obj_loss=False, rescore_confidence=False):
"""YOLOv2 loss function.
Parameters
----------
yolo_output : tensor
Final convolutional layer features.
true_boxes : tensor
Ground truth boxes tensor with shape [batch, num_true_boxes, 5]
containing box x_center, y_center, width, height, and class.
y_true : array
output of preprocess_true_boxes, with shape [conv_height, conv_width, num_anchors, 6]
anchors : tensor
Anchor boxes for model.
num_classes : int
Number of object classes.
rescore_confidence : bool, default=False
If true then set confidence target to IOU of best predicted box with
the closest matching ground truth box.
Returns
-------
total_loss : float
total mean YOLOv2 loss across minibatch
"""
(yolo_output, true_boxes, y_true) = args
num_anchors = len(anchors)
yolo_output_shape = K.shape(yolo_output)
input_shape = yolo_output_shape[1:3] * 32
object_scale = 5
no_object_scale = 1
class_scale = 1
coordinates_scale = 1
object_mask = y_true[..., 4:5]
pred_xy, pred_wh, pred_confidence, pred_class_prob = yolo2_head(
yolo_output, anchors, num_classes, input_shape)
# Unadjusted box predictions for loss.
# TODO: Remove extra computation shared with yolo2_head.
batch_size = yolo_output_shape[0] # batch size, tensor
batch_size_f = K.cast(batch_size, K.dtype(yolo_output))
feats = K.reshape(yolo_output, [
-1, yolo_output_shape[1], yolo_output_shape[2], num_anchors,
num_classes + 5
])
pred_boxes = K.concatenate(
(K.sigmoid(feats[..., 0:2]), feats[..., 2:4]), axis=-1)
# TODO: Adjust predictions by image width/height for non-square images?
# IOUs may be off due to different aspect ratio.
# Expand pred x,y,w,h to allow comparison with ground truth.
# batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params
pred_xy = K.expand_dims(pred_xy, 4)
pred_wh = K.expand_dims(pred_wh, 4)
pred_wh_half = pred_wh / 2.
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
true_boxes_shape = K.shape(true_boxes)
# batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params
true_boxes = K.reshape(true_boxes, [
true_boxes_shape[0], 1, 1, 1, true_boxes_shape[1], true_boxes_shape[2]
])
true_xy = true_boxes[..., 0:2]
true_wh = true_boxes[..., 2:4]
# Find IOU of each predicted box with each ground truth box.
true_wh_half = true_wh / 2.
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
intersect_mins = K.maximum(pred_mins, true_mins)
intersect_maxes = K.minimum(pred_maxes, true_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = intersect_areas / union_areas
# Best IOUs for each location.
best_ious = K.max(iou_scores, axis=4) # Best IOU scores.
best_ious = K.expand_dims(best_ious)
# A detector has found an object if IOU > thresh for some true box.
object_detections = K.cast(best_ious > 0.6, K.dtype(best_ious))
# TODO: Darknet region training includes extra coordinate loss for early
# training steps to encourage predictions to match anchor priors.
# Determine confidence weights from object and no_object weights.
# NOTE: YOLOv2 does not use binary cross-entropy. Here we try it.
no_object_weights = (no_object_scale * (1 - object_detections) *
(1 - object_mask))
if use_crossentropy_obj_loss:
no_objects_loss = no_object_weights * K.binary_crossentropy(K.zeros(K.shape(pred_confidence)), pred_confidence, from_logits=False)
if rescore_confidence:
objects_loss = (object_scale * object_mask *
K.binary_crossentropy(best_ious, pred_confidence, from_logits=False))
else:
objects_loss = (object_scale * object_mask *
K.binary_crossentropy(K.ones(K.shape(pred_confidence)), pred_confidence, from_logits=False))
else:
no_objects_loss = no_object_weights * K.square(-pred_confidence)
if rescore_confidence:
objects_loss = (object_scale * object_mask *
K.square(best_ious - pred_confidence))
else:
objects_loss = (object_scale * object_mask *
K.square(1 - pred_confidence))
confidence_loss = objects_loss + no_objects_loss
# Classification loss for matching detections.
# NOTE: YOLOv2 does not use categorical cross-entropy loss.
# Here we try it.
matching_classes = K.cast(y_true[..., 5], 'int32')
matching_classes = K.one_hot(matching_classes, num_classes)
if label_smoothing:
matching_classes = _smooth_labels(matching_classes, label_smoothing)
if use_crossentropy_loss:
classification_loss = (class_scale * object_mask *
K.expand_dims(K.categorical_crossentropy(matching_classes, pred_class_prob, from_logits=False), axis=-1))
else:
classification_loss = (class_scale * object_mask *
K.square(matching_classes - pred_class_prob))
# Coordinate loss for matching detection boxes.
matching_boxes = y_true[..., 0:4]
coordinates_loss = (coordinates_scale * object_mask *
K.square(matching_boxes - pred_boxes))
confidence_loss_sum = K.sum(confidence_loss) / batch_size_f
classification_loss_sum = K.sum(classification_loss) / batch_size_f
coordinates_loss_sum = K.sum(coordinates_loss) / batch_size_f
total_loss = 0.5 * (
confidence_loss_sum + classification_loss_sum + coordinates_loss_sum)
# Fit for tf 2.0.0 loss shape
total_loss = K.expand_dims(total_loss, axis=-1)
return total_loss, coordinates_loss_sum, confidence_loss_sum, classification_loss_sum
|
bd0c123872e564beee45c0a9084bb043eb03b778
| 3,649,601
|
from typing import Optional
from typing import Dict
from typing import Any
from typing import Tuple
import types
def create_compressed_model(model: tf.keras.Model,
config: NNCFConfig,
compression_state: Optional[Dict[str, Any]] = None) \
-> Tuple[CompressionAlgorithmController, tf.keras.Model]:
"""
The main function used to produce a model ready for compression fine-tuning
from an original TensorFlow Keras model and a configuration object.
:param model: The original model. Should have its parameters already loaded
from a checkpoint or another source.
:param config: A configuration object used to determine the exact compression
modifications to be applied to the model.
:param compression_state: compression state to unambiguously restore the compressed model.
Includes builder and controller states. If it is specified, trainable parameter initialization will be skipped
during building.
:return: A tuple (compression_ctrl, compressed_model) where
- compression_ctrl: The controller of the compression algorithm.
- compressed_model: The model with additional modifications
necessary to enable algorithm-specific compression during fine-tuning.
"""
model = get_built_model(model, config)
original_model_accuracy = None
if is_accuracy_aware_training(config, compression_config_passed=True):
if config.has_extra_struct(ModelEvaluationArgs):
evaluation_args = config.get_extra_struct(ModelEvaluationArgs)
original_model_accuracy = evaluation_args.eval_fn(model)
builder = create_compression_algorithm_builder(config, should_init=not compression_state)
if compression_state:
builder.load_state(compression_state[BaseController.BUILDER_STATE])
compressed_model = builder.apply_to(model)
compression_ctrl = builder.build_controller(compressed_model)
compressed_model.original_model_accuracy = original_model_accuracy
if isinstance(compressed_model, tf.keras.Model):
compressed_model.accuracy_aware_fit = types.MethodType(accuracy_aware_fit, compressed_model)
return compression_ctrl, compressed_model
|
42ffc9c9426ce8b95db05e042fa2d51098fc544f
| 3,649,602
|
def load_misc_config():
"""Load misc configuration.
Returns: Misc object for misc config.
"""
return Misc(config.load_config('misc.yaml'))
|
b1eb2e8cc3e836b846d292c03bd28c4449d80805
| 3,649,603
|
def filter_activations_remove_neurons(X, neurons_to_remove):
"""
Filter activations so that they do not contain specific neurons.
.. note::
The returned value is a view, so modifying it will modify the original
matrix.
Parameters
----------
X : numpy.ndarray
Numpy Matrix of size [``NUM_TOKENS`` x ``NUM_NEURONS``]. Usually the
output of ``interpretation.utils.create_tensors``
neurons_to_remove : list or numpy.ndarray
List of neurons to remove
Returns
-------
filtered_X : numpy.ndarray view
Numpy Matrix of size [``NUM_TOKENS`` x ``NUM_NEURONS - len(neurons_to_remove)``]
"""
neurons_to_keep = np.arange(X.shape[1])
neurons_to_keep[neurons_to_remove] = -1
neurons_to_keep = np.where(neurons_to_keep != -1)[0]
return X[:, neurons_to_keep]
|
711a858f8d28e5d0909991d85538a24bf063c523
| 3,649,604
|
def adaptive_threshold(im, block_size, constant, mode=cv2.THRESH_BINARY):
"""
Performs an adaptive threshold on an image
Uses cv2.ADAPTIVE_THRESH_GAUSSIAN_C:
threshold value is the weighted sum of neighbourhood values where
weights are a gaussian window.
Uses cv2.THRESH_BINARY:
Pixels below the threshold set to black
Pixels above the threshold set to white
Parameters
----------
img: numpy array containing an image
block_size: the size of the neighbourhood area
constant: subtracted from the weighted sum
"""
out = cv2.adaptiveThreshold(
im,
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
mode,
block_size,
constant
)
return out
|
c237a0bb05dc8a43495f60ef9d8157c4b9c4bf1f
| 3,649,605
|
def get_loss(stochastic, variance_regularizer):
"""Get appropriate loss function for training.
Parameters
----------
stochastic : bool
determines if policy to be learned is deterministic or stochastic
variance_regularizer : float
regularization hyperparameter to penalize high variance policies
Returns
-------
Keras loss function to use for imitation learning.
"""
if stochastic:
return negative_log_likelihood_loss(variance_regularizer)
else:
return tf.keras.losses.mean_squared_error
|
e78d47c31a7762bcb091ea1a314348c27f2174b7
| 3,649,606
|
import copy
def simul_growth_ho_amir(nbstart, run_time, params, name):
"""Simulate the Ho and Amir model (Front. in Microbiol. 2015) with inter-initiation per origin adder and
timer from initiation to division
Parameters
----------
nbstart : int
number of cells to simulate
run_time: int
number of iterations
params: dict
experimental parameters
name: str
name of runs
Returns
-------
cells : list of dict
Each element of the list is a cell cycle defined by a
dictionary of features (Lb, Ld etc.)
"""
#initialize birth length and growth rate
L0 = np.exp(np.random.normal(params['Lb_logn_mu'],params['Lb_logn_sigma'],size=nbstart))
tau = np.exp(np.random.normal(params['tau_logn_mu'], params['tau_logn_sigma'], size=nbstart))
#standard value of growth rate. Used to scale the noise appropriately
normval = np.exp(params['tau_logn_mu'])
#initialize the inter-initiation adder (exact procedure doesn't really matter here)
#as all cells start with n_ori = 1, there's no initiation to division adder running
DLi = np.random.normal(params['DLi_mu'], params['DLi_sigma'], size=nbstart)
#time from initiation to division
tid_mu = 90
tid_var = 5
Tid = np.random.normal(tid_mu, tid_var, size=nbstart)
#initialize cell infos as a list of dictionaries. All cells start with n_ori = 1
cells = {}
for x in range(nbstart):
dict1 = {'Lb': L0[x],'L':L0[x], 'gen': str(x), 'tau':tau[x], 'Lt': [[0,L0[x],1]], 'finish': False,
'born':0, 'DLi': [[0,DLi[x]]],'DLdLi': [],'Li':[],'Ti':[],
'numori':1,'Ld':np.nan, 'numori_born':1,'name': name,'mLi':np.nan,
'mLd':np.nan, 'rfact':0.5, 'Tid': [[0,Tid[x]]]}
cells[str(x)] = dict1
for t in range(run_time):
divide_cell = []
for x in cells:
if cells[x]['finish']==False:
#update cell size
cells[x]['L'] = cells[x]['L']*(2**(1/cells[x]['tau']))
cells[x]['Lt'].append([t,cells[x]['L'],cells[x]['numori']])
#increment the most recent inter-initiation adder
cells[x]['DLi'][-1][0] = cells[x]['DLi'][-1][0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1])
#if at least one volume counter since RI is running, increment all of them
if len(cells[x]['DLdLi'])>0:
cells[x]['DLdLi'] = [[k[0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1]),k[1]] for k in cells[x]['DLdLi']]
cells[x]['Tid'] = [[k[0]+1,k[1]] for k in cells[x]['Tid']]
#if a volume counter has reached its limit divide
if len(cells[x]['DLdLi'])>0:
if (cells[x]['numori']>1) and (cells[x]['Tid'][0][0]>cells[x]['Tid'][0][1]):
cells[x]['finish'] = True#tag cell as finished
cells[x]['Ld'] = cells[x]['L']
cells[x]['Td'] = len(cells[x]['Lt'])
cells[x]['Td_abs'] = t
cells[x]['d_Ld_Lb'] = cells[x]['L']-cells[x]['Lb']
#assign the correct adders (the oldest ones) to the cell that just divided
cells[x]['final_DLdLi'] = cells[x]['DLdLi'][0][0]
cells[x]['final_DLi'] = cells[x]['DLi'][0][1]
cells[x]['final_Li'] = cells[x]['Li'][0]
cells[x]['final_Tid'] = cells[x]['Tid'][0][1]
#for each accumulated variable suppress the oldest one
if len(cells[x]['DLdLi'])==1:
cells[x]['DLdLi'] = []
else:
cells[x]['DLdLi'].pop(0)
if len(cells[x]['Tid'])==1:
cells[x]['Tid'] = []
else:
cells[x]['Tid'].pop(0)
if len(cells[x]['DLi'])==1:
cells[x]['DLi'] = []
else:
cells[x]['DLi'].pop(0)
if len(cells[x]['Li'])==1:
cells[x]['Li'] = []
else:
cells[x]['Li'].pop(0)
divide_cell.append(x)
#if the added volume has reached its limit make new RI
if cells[x]['DLi'][-1][0]>cells[x]['DLi'][-1][1]:
#duplicate origin
cells[x]['numori'] = cells[x]['numori']*2
#Version where adder is noisy itself
newdli = cells[x]['numori']*np.random.normal(params['DLi_mu'], params['DLi_sigma'])
cells[x]['DLi'].append([0,newdli])
cells[x]['Li'].append(cells[x]['L'])
#temporarilly store TL_S as absolute time
cells[x]['Ti'].append(t)
#Version where adder itself is noisy
new_dv = cells[x]['numori']*np.exp(np.random.normal(params['DLdLi_logn_mu'], params['DLdLi_logn_sigma']))
cells[x]['DLdLi'].append([0,new_dv])
cells[x]['Tid'].append([0,np.random.normal(tid_mu, tid_var, size=1)])
for x in divide_cell:
#Draw division ratio
rfact = 1/(1+np.random.normal(1,params['div_ratio']))
#Create new cell using mother information
new_tau = np.exp(correlated_normal(np.log(cells[x]['tau']), params['tau_logn_mu'], params['tau_logn_sigma'], params['tau_corr']))
new_Lb = copy.deepcopy(rfact*cells[x]['L'])
new_L = copy.deepcopy(rfact*cells[x]['L'])
new_Lt = [[t,copy.deepcopy(rfact*cells[x]['L']),copy.deepcopy(cells[x]['numori'])/2]]
new_DLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLi']])
new_DLdLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLdLi']])
new_Tid = copy.deepcopy(cells[x]['Tid'])
new_Li = copy.deepcopy([rfact*y for y in cells[x]['Li']])
new_numori = copy.deepcopy(cells[x]['numori'])/2
mother_initL = copy.deepcopy(cells[x]['final_Li'])/2
mother_Ld = copy.deepcopy(cells[x]['Ld'])
dict1 = {'Lb': new_Lb,'L': new_L, 'gen': str(x)+'B', 'tau': new_tau,'Lt': new_Lt, 'finish': False,
'born':t, 'DLi': new_DLi,'DLdLi': new_DLdLi,'Tid': new_Tid, 'Li':new_Li,'Ti':[], 'numori':new_numori,
'numori_born':copy.deepcopy(new_numori),'Ld':np.nan, 'name': name,'mLi': mother_initL, 'mLd':mother_Ld,
'rfact':rfact}
cells[x+'B'] = copy.deepcopy(dict1)
#keep oldest timer as final timer and give daughter remaining ones. Caclulate initiation time based on cell birth.
TL_S_val = copy.deepcopy(cells[x]['Ti'].pop(0))
cells[x+'B']['Ti'] = copy.deepcopy(cells[x]['Ti'])
cells[x]['Ti'] = TL_S_val-copy.deepcopy(cells[x]['born'])
for x in cells:
if len(cells[x]['Li'])>0:
cells[x]['Li'] = np.nan
return cells
|
fa4d35cfd26dbcb08217b3ffee6cf4e3e7431a08
| 3,649,607
|
def variable_id(variable):
"""Return variable identification for .dot file"""
if isinstance(variable, FileAccess):
return "a_{}".format(variable.id)
act_id = variable.activation_id
act_id = "global" if act_id == -1 else act_id
return "v_{}_{}".format(act_id, variable.id)
|
b68fd9d6b08a537768dc82b7925f0cb6f383428e
| 3,649,608
|
def node_set_power_state(request, node_id, state, soft=False):
"""Set power state for a given node.
:param request: HTTP request.
:param node_id: The UUID or name of the node.
:param state: the power state to set ['on', 'off', 'reboot'].
:param soft: flag for graceful power 'off' or reboot
:return: node.
http://docs.openstack.org/developer/python-ironicclient/api/ironicclient.v1.node.html#ironicclient.v1.node.NodeManager.set_power_state
"""
return ironicclient(request).node.set_power_state(node_id,
state,
soft)
|
e94a13f4a797d31bd0eae24803a782b049ea44dc
| 3,649,609
|
import sympy
def __sympyToC_Grad(exprs: list, doOpts: bool = False) -> str:
""" creates C code from a list of sympy functions (somewhat optimized).
source: https://stackoverflow.com/questions/22665990/optimize-code-generated-by-sympy
and modified """
tmpsyms = sympy.numbered_symbols("tmp")
if doOpts:
symbols, simple = sympy.cse(exprs, symbols=tmpsyms, optimizations="basic", order='none')
else:
symbols, simple = sympy.cse(exprs, symbols=tmpsyms)
c_code = ""
for s in symbols:
c_code += " double " +sympy.ccode(s[0]) + " = " + sympy.ccode(s[1]) + ";\n"
for i,s in enumerate(simple):
c_code += f" out({i}) = " + sympy.ccode(s) + ";\n"
return c_code
|
33a95d99b19458ac7b8dd8d8e4272485b0f5f206
| 3,649,610
|
import os
import logging
import sys
def startServer(mock=True, mockS3=False):
"""
Test cases that communicate with the server should call this
function in their setUpModule() function.
"""
# If the server starts, a database will exist and we can remove it later
dbName = cherrypy.config['database']['uri'].split('/')[-1]
usedDBs[dbName] = True
# By default, this passes "[]" to "plugins", disabling any installed plugins
server = setupServer(mode=ServerMode.TESTING, plugins=enabledPlugins)
if mock:
cherrypy.server.unsubscribe()
cherrypy.engine.start()
# Make server quiet (won't announce start/stop or requests)
cherrypy.config.update({'environment': 'embedded'})
# Log all requests if we asked to do so
if 'cherrypy' in os.environ.get('EXTRADEBUG', '').split():
cherrypy.config.update({'log.screen': True})
logHandler = logging.StreamHandler(sys.stdout)
logHandler.setLevel(logging.DEBUG)
cherrypy.log.error_log.addHandler(logHandler)
# Tell CherryPy to throw exceptions in request handling code
cherrypy.config.update({'request.throw_errors': True})
mockSmtp.start()
if mockS3:
global mockS3Server
mockS3Server = mock_s3.startMockS3Server()
return server
|
02bd95f96adda8a9af194952bcbe82c71235d07e
| 3,649,611
|
def index():
"""User friendly index page at the root of the server
guides the user to the reportss
"""
return render_template('index.html')
|
0e810716e0bbfae98736bc13f458636eb33dc87d
| 3,649,612
|
def read_lookup(infile):
"""
-----------------------------------------------------------------------------
Read data from a lookup database.
Inputs:
infile [string] Input file containing the lookup data base.
Outputs:
[tuple] each element of the tuple is a numpy array. The elements in order are
x-coordinates, y-coordinates, data value at those coordiantes. The
data values are real or complex depending on whether the lookup table
has an 'imag_value' column
-----------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input parameter infile must be of string data type')
try:
cols = ascii.read(infile, data_start=1, comment='#')
except IOError:
raise IOError('Could not read the specified file: '+infile)
if 'imag_value' in cols.colnames:
return cols['x'].data, cols['y'].data, cols['real_value'].data+1j*cols['imag_value'].data
else:
return cols['x'].data, cols['y'].data, cols['real_value'].data
|
a86a2e8da2580e66656f8328488941c402383c60
| 3,649,613
|
import traceback
import sys
def sum_function(context, nodeset, string):
"""
The dyn:sum function calculates the sum for the nodes passed as the first
argument, where the value of each node is calculated dynamically using an
XPath expression passed as a string as the second argument.
http://www.exslt.org/dyn/functions/sum/index.html
"""
nodeset = nodeset.evaluate_as_nodeset(context)
string = string.evaluate_as_string(context)
try:
expr = parse_xpath(string)
except XPathError:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Syntax error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
return datatypes.nodeset()
return sum(map(datatypes.number, _map(context, nodeset, expr)))
|
dac1abae26522db33826f0a0e635e9afb4b3efc1
| 3,649,614
|
import json
def event_detail(request, id):
""" Return a JSON dict mapping for event given id
"""
event = get_object_or_404(Event, pk=id)
event_dict = {
"success": 1,
"result": [{
"id": event.id,
"title": event.title,
"description": event.description,
"created_date": event.created_date.strftime('%Y/%m/%d'),
"location": event.location
}]
}
return HttpResponse(json.dumps(event_dict),
content_type="application/json")
|
4b4083a81d5de90e9156f05d9f7b0375981a42d0
| 3,649,615
|
import logging
def prepare_state(qubits: list[cirq.Qid], x: int) -> list[cirq.Gate]:
"""Prepare qubits into an initial state.
Args:
qubits: The qubits to prepare.
x: The initial state of the qubits. Must be non-negative.
Returns:
A list of gates to prepare the qubits.
Raises:
ValueError: If `x` is negative.
"""
gates = list()
if size_in_bits(x) > len(qubits):
logging.warning(f"prepare_state: `x` ({x}) cannot fit into {len(qubits)} qubits; some bits will be dropped.")
for q in qubits:
if x % 2:
gates.append(cirq.X(q))
x >>= 1
return gates
|
f11a4ddd83a6e2d1d7348c8ef3b5693a26e3e26d
| 3,649,616
|
def manage(id):
"""Manage room request."""
room_request = RoomRequest.query.get(id)
if room_request is None:
return abort(404)
return render_template('room_request/manage.html', room_request=room_request)
|
5a565342adbe53a647cb622e4688d1c26d88078d
| 3,649,617
|
def ger(self, y):
"""Computer an outer product between two vectors"""
assert self.dim() == 1 and y.dim() == 1, "Outer product must be on 1D tensors"
return self.view((-1, 1)).matmul(y.view((1, -1)))
|
003dda3dd678fdcf35f63f80c064586320c97d23
| 3,649,618
|
def load_data(database_filepath):
"""
Input:
1. database_filepath: the path of cleaned datasets
Output:
1. X: all messages
2. y: category columns generated by cleaning process
3. category_names: category columns' names
Process:
1. Read-in the datafrmae
2. Select required datasets
3. Generate category columns' names
"""
# 1. Read-in dataframe
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = pd.read_sql_table(database_filepath, engine)
# 2. Select required datasets
X = df['message']
y = df.iloc[:, 4:]
# 3. Generate category columns' names
category_names = y.columns
return X, y, category_names
|
15ec78cfac2dfde9294061432514001b21967b93
| 3,649,619
|
def lh_fus(temp):
"""latent heat of fusion
Args:
temp (float or array): temperature [K]
Returns:
float or array: latent heat of fusion
"""
return 3.336e5 + 1.6667e2 * (FREEZE - temp)
|
8127970612b031d2aaf7598379f41b549a3268e1
| 3,649,620
|
def to_eaf(file_path, eaf_obj, pretty=True):
"""
modified function from https://github.com/dopefishh/pympi/blob/master/pympi/Elan.py
Write an Eaf object to file.
:param str file_path: Filepath to write to, - for stdout.
:param pympi.Elan.Eaf eaf_obj: Object to write.
:param bool pretty: Flag to set pretty printing.
"""
def rm_none(x):
try: # Ugly hack to test if s is a string in py3 and py2
basestring
def isstr(s):
return isinstance(s, basestring)
except NameError:
def isstr(s):
return isinstance(s, str)
return {k: v if isstr(v) else str(v) for k, v in x.items()
if v is not None}
# Annotation Document
ADOCUMENT = etree.Element('ANNOTATION_DOCUMENT', eaf_obj.adocument)
# Licence
for m in eaf_obj.licenses:
n = etree.SubElement(ADOCUMENT, 'LICENSE', {'LICENSE_URL': m[1]})
n.text = m[0]
# Header
HEADER = etree.SubElement(ADOCUMENT, 'HEADER', eaf_obj.header)
# Media descriptiors
for m in eaf_obj.media_descriptors:
etree.SubElement(HEADER, 'MEDIA_DESCRIPTOR', rm_none(m))
# Linked file descriptors
for m in eaf_obj.linked_file_descriptors:
etree.SubElement(HEADER, 'LINKED_FILE_DESCRIPTOR', rm_none(m))
# Properties
for k, v in eaf_obj.properties:
etree.SubElement(HEADER, 'PROPERTY', {'NAME': k}).text = str(v)
# Time order
TIME_ORDER = etree.SubElement(ADOCUMENT, 'TIME_ORDER')
for t in sorted(eaf_obj.timeslots.items(), key=lambda x: int(x[0][2:])):
etree.SubElement(TIME_ORDER, 'TIME_SLOT', rm_none(
{'TIME_SLOT_ID': t[0], 'TIME_VALUE': t[1]}))
# Tiers
for t in sorted(eaf_obj.tiers.items(), key=lambda x: x[1][3]):
tier = etree.SubElement(ADOCUMENT, 'TIER', rm_none(t[1][2]))
for a in t[1][0].items():
ann = etree.SubElement(tier, 'ANNOTATION')
alan = etree.SubElement(ann, 'ALIGNABLE_ANNOTATION', rm_none(
{'ANNOTATION_ID': a[0], 'TIME_SLOT_REF1': a[1][0],
'TIME_SLOT_REF2': a[1][1], 'SVG_REF': a[1][3]}))
etree.SubElement(alan, 'ANNOTATION_VALUE').text = a[1][2]
for a in t[1][1].items():
ann = etree.SubElement(tier, 'ANNOTATION')
rean = etree.SubElement(ann, 'REF_ANNOTATION', rm_none(
{'ANNOTATION_ID': a[0], 'ANNOTATION_REF': a[1][0],
'PREVIOUS_ANNOTATION': a[1][2], 'SVG_REF': a[1][3]}))
etree.SubElement(rean, 'ANNOTATION_VALUE').text = a[1][1]
# Linguistic types
for l in eaf_obj.linguistic_types.values():
etree.SubElement(ADOCUMENT, 'LINGUISTIC_TYPE', rm_none(l))
# Locales
for lc, (cc, vr) in eaf_obj.locales.items():
etree.SubElement(ADOCUMENT, 'LOCALE', rm_none(
{'LANGUAGE_CODE': lc, 'COUNTRY_CODE': cc, 'VARIANT': vr}))
# Languages
for lid, (ldef, label) in eaf_obj.languages.items():
etree.SubElement(ADOCUMENT, 'LANGUAGE', rm_none(
{'LANG_ID': lid, 'LANG_DEF': ldef, 'LANG_LABEL': label}))
# Constraints
for l in eaf_obj.constraints.items():
etree.SubElement(ADOCUMENT, 'CONSTRAINT', rm_none(
{'STEREOTYPE': l[0], 'DESCRIPTION': l[1]}))
# Controlled vocabularies
for cvid, (descriptions, cv_entries, ext_ref) in\
eaf_obj.controlled_vocabularies.items():
cv = etree.SubElement(ADOCUMENT, 'CONTROLLED_VOCABULARY',
rm_none({'CV_ID': cvid, 'EXT_REF': ext_ref}))
for lang_ref, description in descriptions:
des = etree.SubElement(cv, 'DESCRIPTION', {'LANG_REF': lang_ref})
if description:
des.text = description
for cveid, (values, ext_ref) in cv_entries.items():
cem = etree.SubElement(cv, 'CV_ENTRY_ML', rm_none({
'CVE_ID': cveid, 'EXT_REF': ext_ref}))
for value, lang_ref, description in values:
val = etree.SubElement(cem, 'CVE_VALUE', rm_none({
'LANG_REF': lang_ref, 'DESCRIPTION': description}))
val.text = value
# Lexicon refs
for l in eaf_obj.lexicon_refs.values():
etree.SubElement(ADOCUMENT, 'LEXICON_REF', rm_none(l))
# Exteral refs
for eid, (etype, value) in eaf_obj.external_refs.items():
etree.SubElement(ADOCUMENT, 'EXTERNAL_REF', rm_none(
{'EXT_REF_ID': eid, 'TYPE': etype, 'VALUE': value}))
# https://github.com/dopefishh/pympi/blob/master/pympi/Elan.py
return '<?xml version="1.0" encoding="UTF-8"?>'+etree.tostring(ADOCUMENT, encoding='utf-8').decode("utf-8")
|
605e7f711f34661daae6869419d6f8bebb05a2c4
| 3,649,621
|
def delete_station(station_id):
"""Delete station from stations
:param station_id:
:return: string
"""
logger.debug(f"Call delete_stations: {station_id}")
# Load old data into structure
stations = load_stations()
# Find index in list of stations
target_index = find_index_in_list_of_dict(
lst=stations,
key='StationID',
value=station_id
)
# remove from list by index
stations.remove(stations[target_index])
# save changes
save_stations(stations)
return {"status": "success"}
|
d377f2b029cb206ec78acf220a83bf88df8fd758
| 3,649,622
|
def query_for_build_status(service, branch, target, starting_build_id):
"""Query Android Build Service for the status of the 4 builds in the target
branch whose build IDs are >= to the provided build ID"""
try:
print ('Querying Android Build APIs for builds of {} on {} starting at'
' buildID {}').format(target, branch, starting_build_id)
return service.build().list(buildType='submitted',
branch=branch, target=target, maxResults='4',
startBuildId=starting_build_id).execute()
except errors.HttpError as error:
print 'HTTP Error while attempting to query the build status.'
print error
return None
|
4e1e04dae1ce13217374207a1b57d7380552dfc5
| 3,649,623
|
def create_pool(
dsn=None,
*,
min_size=10,
max_size=10,
max_queries=50000,
max_inactive_connection_lifetime=300.0,
setup=None,
init=None,
loop=None,
authenticator=None,
**connect_kwargs,
):
"""Create an Asyncpg connection pool through Approzium authentication.
Takes same arguments as ``asyncpg.create_pool`` in addition to the
`authenticator` argument
:return: An instance of :class:`~approzium.asyncpg.pool._ApproziumPool`.
Example:
.. code-block:: python
>>> import approzium
>>> from approzium.asyncpg import create_pool
>>> auth = approzium.AuthClient("myauthenticator.com:6001", disable_tls=True)
>>> pool = await create_pool(user='postgres', authenticator=auth)
>>> con = await pool.acquire()
>>> try:
... await con.fetch('SELECT 1')
... finally:
... await pool.release(con)
"""
return _ApproziumPool(
dsn,
connection_class=Connection,
min_size=min_size,
max_size=max_size,
max_queries=max_queries,
loop=loop,
setup=setup,
init=init,
max_inactive_connection_lifetime=max_inactive_connection_lifetime,
authenticator=authenticator,
**connect_kwargs,
)
|
0b50a4cba07fb4797e04cc384dd46d1e21deed12
| 3,649,624
|
import logging
def _get_all_schedule_profile_entries_v1(profile_name, **kwargs):
"""
Perform a GET call to get all entries of a QoS schedule profile
:param profile_name: Alphanumeric name of the schedule profile
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Dictionary containing schedule profile entry URIs
"""
target_url = kwargs["url"] + "system/qos/%s/queues" % profile_name
response = kwargs["s"].get(target_url, verify=False)
if not common_ops._response_ok(response, "GET"):
logging.warning("FAIL: Getting dictionary of URIs of entries in QoS schedule profile '%s' failed with status code %d: %s"
% (profile_name, response.status_code, response.text))
else:
logging.info("SUCCESS: Getting dictionary of URIs of entries in QoS schedule profile '%s' succeeded" % profile_name)
schedule_profile_entries = response.json()
# for some reason, this API returns a list when empty, and a dictionary when there is data
# make this function always return a dictionary
if not schedule_profile_entries:
return {}
else:
return schedule_profile_entries
|
32d6278ce6704feb5831012c2d0050b226fc7dfa
| 3,649,625
|
def loadSource(path):
"""Loads a list of transportReactions. Format:
R("Macgamb_Transp")
R("Madnb_Transp")
R("MalaDb_Transp")..."""
file = open(path, 'r')
sources = [line.strip() for line in file]
file.close()
return sources
|
244e9e5619a5039822ef14dfbb3d99b55cb6cc74
| 3,649,626
|
from typing import Optional
import struct
def frombin(
__data: Bitcode,
__dtype: SupportedDataType | bytes,
num: int = 1,
*,
encoding: Optional[str] = None,
signed: bool = True,
) -> ValidDataset:
"""converts a string of 0 and 1 back into the original data
Args:
data (BinaryCode): a string of 0 and 1
dtype (Union[int, float, str]): the desired data type to convert to
Raises:
TypeError: if the desired datatype is not of the integer, floats or strings data type
Returns:
Union[int, float, str]: converted data
"""
if __dtype is int:
stop = len(__data)
step = stop // num
if signed:
decoded_data = [None] * num
for index, i in enumerate(range(0, stop, step)):
bindata = __data[i : i + step]
decoded_data[index] = int("-%s" % (bindata) if bindata[0] == "1" else bindata, 2)
else:
decoded_data = [int(__data[i : i + step], 2) for i in range(0, stop, step)]
return decoded_data if num != 1 else decoded_data[0]
bytedata = int(__data, 2).to_bytes((len(__data) + 7) // 8, config.ENDIAN)
if __dtype in ("s", str):
return "".join(bytes.decode(bytedata, encoding or config.DEFAULT_STR_FORMAT))
else:
try:
decoded_data = list(
struct.unpack("%s%s%s" % (">" if config.ENDIAN == "big" else "<", num, __dtype), bytedata)
)
return decoded_data if num != 1 else decoded_data[0]
except struct.error:
raise TypeError(f"cannot convert byte data to '{__dtype}'")
|
6fa7219ea8622071c7bb3277c8b59717543e9286
| 3,649,627
|
def check_size():
"""Assumes the problem size has been set by set_size before some operation.
This checks if the size was changed
Size is defined as (PIs, POs, ANDS, FF, max_bmc)
Returns TRUE is size is the same"""
global npi, npo, nands, nff, nmd
#print n_pis(),n_pos(),n_ands(),n_latches()
result = ((npi == n_pis()) and (npo == n_pos()) and (nands == n_ands()) and (nff == n_latches()) )
return result
|
361edb3b4f20a3ae4920c784ad2d1c56fe35e2d6
| 3,649,628
|
def vrms2dbm(vp):
"""
Converts a scalar or a numpy array from volts RMS to dbm assuming there is an impedence of 50 Ohm
Arguments:
- vp: scalar or numpy array containig values in volt RMS to be converted in dmb
Returns:
- scalar or numpy array containing the result
"""
return 10. * np.log10(20. * (vp) ** 2.)
|
7d0f76ab74cf82d2d56f97840153f1b9bc3cb8a8
| 3,649,629
|
def aa_i2c_read (aardvark, slave_addr, flags, data_in):
"""usage: (int return, u08[] data_in) = aa_i2c_read(Aardvark aardvark, u16 slave_addr, AardvarkI2cFlags flags, u08[] data_in)
All arrays can be passed into the API as an ArrayType object or as
a tuple (array, length), where array is an ArrayType object and
length is an integer. The user-specified length would then serve
as the length argument to the API funtion (please refer to the
product datasheet). If only the array is provided, the array's
intrinsic length is used as the argument to the underlying API
function.
Additionally, for arrays that are filled by the API function, an
integer can be passed in place of the array argument and the API
will automatically create an array of that length. All output
arrays, whether passed in or generated, are passed back in the
returned tuple."""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# data_in pre-processing
__data_in = isinstance(data_in, int)
if __data_in:
(data_in, num_bytes) = (array_u08(data_in), data_in)
else:
(data_in, num_bytes) = isinstance(data_in, ArrayType) and (data_in, len(data_in)) or (data_in[0], min(len(data_in[0]), int(data_in[1])))
if data_in.typecode != 'B':
raise TypeError("type for 'data_in' must be array('B')")
# Call API function
(_ret_) = api.py_aa_i2c_read(aardvark, slave_addr, flags, num_bytes, data_in)
# data_in post-processing
if __data_in: del data_in[max(0, min(_ret_, len(data_in))):]
return (_ret_, data_in)
|
59cca99e3ae811e957f9dd053205f3639c1451a4
| 3,649,630
|
import os
def get_log_dir(env=None):
"""
Get directory to use for writing log files. There are multiple
possible locations for this. The ROS_LOG_DIR environment variable
has priority. If that is not set, then ROS_HOME/log is used. If
ROS_HOME is not set, $HOME/.ros/log is used.
@param env: override os.environ dictionary
@type env: dict
@return: path to use use for log file directory
@rtype: str
"""
if env is None:
env = os.environ
if ROS_LOG_DIR in env:
return env[ROS_LOG_DIR]
else:
return os.path.join(get_ros_home(env), 'log')
|
9ede24a3afdf9869171c49f7f238b5b0a608830b
| 3,649,631
|
def urbandictionary_search(search):
"""
Searches urbandictionary's API for a given search term.
:param search: The search term str to search for.
:return: definition str or None on no match or error.
"""
if str(search).strip():
urban_api_url = 'http://api.urbandictionary.com/v0/define?term=%s' % search
response = util.web.http_get(url=urban_api_url, json=True)
if response['json'] is not None:
try:
definition = response['json']['list'][0]['definition']
return definition.encode('ascii', 'ignore')
except (KeyError, IndexError):
return None
else:
return None
|
3cd63486adc11f3ca20d4cd6216006d3f2d2239f
| 3,649,632
|
def Performance(ALGORITHM_CONFIG, CELLULAR_MODEL_CONFIG, alog_name):
"""
Performance testing
"""
# Server profile: num_ues=200, APs=16, Scale=200.0, explore_radius=1
loadbalanceRL = interface.Rainman2(SETTINGS)
loadbalanceRL.algorithm_config = ALGORITHM_CONFIG
loadbalanceRL.environment_config = CELLULAR_MODEL_CONFIG
if alog_name=='linear':
result_linear = loadbalanceRL.run_experiment(
'Cellular', 'Qlearning', 'LinearRegression')
return result_linear
if alog_name=='Naive':
result_Naive = loadbalanceRL.run_experiment(
'Cellular', 'Qlearning', 'Naive')
return result_Naive
if alog_name=='NN':
result_NN = loadbalanceRL.run_experiment(
'Cellular', 'Qlearning', 'NN')
return result_NN
if alog_name=='DQN':
result_DQN = loadbalanceRL.run_experiment(
'Cellular', 'Qlearning', 'DQN')
return result_DQN
|
87e5d6b0c400af0262b6a2c746e855b9b71a5c35
| 3,649,633
|
def launch(sid):
"""
Launch a scan
Launch the scan specified by the sid.
"""
data = connect('POST', '/scans/{0}/launch'.format(sid))
return data['scan_uuid']
|
fa99e7a50e9e2ddb30ba131ebd61c998c2cdabaa
| 3,649,634
|
import ast
def transpose_dict(data, data_key):
"""Function: transpose_dict
Description: Transpose specified keys in a list of dictionaries
to specified data types or None.
Arguments:
(input) data -> Initial list of dictionaries.
(input) data_key -> Dictionary of keys and data types.
(output) mod_data -> Modified list of dictionaries.
"""
data = list(data)
data_key = dict(data_key)
mod_data = list()
literal_list = ["bool", "list"]
for list_item in data:
list_item = dict(list_item)
for item in set(list_item.keys()) & set(data_key.keys()):
if not list_item[item] or list_item[item] == "None":
list_item[item] = None
elif data_key[item] == "int":
list_item[item] = int(list_item[item])
elif data_key[item] in literal_list:
list_item[item] = ast.literal_eval(list_item[item])
mod_data.append(list_item)
return mod_data
|
7675ea2f80e9e85993dc99a2a31df04abfeba2c8
| 3,649,635
|
def aligner_to_symbol(calls):
"""
Assign symbols to different aligners in the input file
Set the attribute of the class instances
return a list of indices for which each aligner is found uniquely and all aligners
sorted by aligners
"""
symbols = ['o', '+', 'x', 'v', '*', 'D', 's', 'p', '8', 'X']
aligners = sorted(set([c.aligner for c in calls]), reverse=True)
aligner_to_symbol_dict = {a: s for a, s in zip(aligners, symbols)}
for c in calls:
c.shape = aligner_to_symbol_dict[c.aligner]
index_and_aligners = zip([[c.aligner for c in calls].index(i) for i in aligners], aligners)
return zip(*sorted(index_and_aligners, key=lambda x: x[1]))
|
b9cef3ae33b6ce84daf78a8bc8ce528f97d7a8a6
| 3,649,636
|
import sys
def timestamped_filename(line):
"""Given a line like '.... filename <timestamp>', return filename."""
m = re_timestamped_line.search(line)
if m:
return m.group("filename")
else:
print >> sys.stderr, "Error: could not find filename in:", line
return None
|
5c63f976b1b56f347ab5926bd4247dad342b44e6
| 3,649,637
|
def nfvi_create_subnet(network_uuid, subnet_name, ip_version, subnet_ip,
subnet_prefix, gateway_ip, dhcp_enabled, callback):
"""
Create a subnet
"""
cmd_id = _network_plugin.invoke_plugin('create_subnet', network_uuid,
subnet_name, ip_version, subnet_ip,
subnet_prefix, gateway_ip,
dhcp_enabled, callback=callback)
return cmd_id
|
383a0ffeb6e364f761c8d4038bf8e53f367021c1
| 3,649,638
|
def convertCRS(powerplants, substations, towers, crs, grid):
"""
:param powerplants:
:param substations:
:param towers:
:param crs:
:return:
"""
substations.to_crs(crs)
# powerplants = powerplants.set_crs(crs)
# powerplants = powerplants.to_crs(crs)
# print(powerplants.crs)
towers = towers.to_crs(crs)
return(substations, powerplants, towers, grid)
|
9fcb8c51323c00935ba2c882502a273f2bf532ff
| 3,649,639
|
def get_pathway(page_name, end_pg, max_len, trail, paths):
"""
Finds a list of all paths from a starting wikipedia page to an end page
Assumes page_name is a valid wikipedia article title and end_pg is a valid
Wikipedia Page Object
Args:
page_name: (Str) The name of the current article
end_pg: (Wikipedia Page) The page the path should end at
max_len: (Int) The number of maximum steps between the start page and
the end page
trail: (List) The current path being searched
Paths: (List) A set of all the paths between
the starting page and the end page
Returns nothing but appends a given list of paths
"""
trail.append(page_name) # add the current page to the current trail
# Check if the page has the the end page as a link and
# add it to thhe list of paths
if h.has_end(page_name, end_pg):
# if the page contains a link to the end page
# add the end page to the trail, and add the trail to the paths list
trail.append(end_pg.title)
paths.append(trail)
print(f"**Pathway {len(paths)}**: {h.plot_path(trail)}")
return None
# if the trail is above the maximum length return none
elif max_len <= 1:
print(f"Not a path: {trail}")
return None
else:
# Check each of the links in the page
# Continue branching looking for the end
for link in h.get_links(page_name):
if link not in trail:
if h.is_page(link):
get_pathway(link, end_pg, max_len - 1, trail[:], paths)
|
3b8effcb1f5295a854d32cc6438093f5ba7c1fa4
| 3,649,640
|
def clip_to_ndc(point_clip_space, name="clip_to_ndc"):
"""Transforms points from clip to normalized device coordinates (ndc).
Note:
In the following, A1 to An are optional batch dimensions.
Args:
point_clip_space: A tensor of shape `[A1, ..., An, 4]`, where the last
dimension represents points in clip space.
name: A name for this op. Defaults to "clip_to_ndc".
Raises:
ValueError: If `point_clip_space` is not of size 4 in its last dimension.
Returns:
A tensor of shape `[A1, ..., An, 3]`, containing `point_clip_space` in
normalized device coordinates.
"""
with tf.name_scope(name):
point_clip_space = tf.convert_to_tensor(value=point_clip_space)
shape.check_static(
tensor=point_clip_space,
tensor_name="point_clip_space",
has_dim_equals=(-1, 4))
w = point_clip_space[..., -1:]
return point_clip_space[..., :3] / w
|
ee49d891da941b6da48797035c5b976f5d10762d
| 3,649,641
|
def korrektur(wordfile, datei):
"""Patch aus korrigierten Einträgen"""
if not datei:
datei = 'korrektur.todo'
teste_datei(datei)
korrekturen = {}
for line in open(datei, 'r'):
if line.startswith('#'):
continue
# Dekodieren, Zeilenende entfernen
line = line.decode('utf8').strip()
if not line:
continue
# Eintrag ggf. komplettieren
if u';' not in line:
line = u'%s;%s' % (join_word(line), line)
entry = WordEntry(line)
key = entry[0]
entry.regelaenderungen() # teste auf Dinge wie s-t/-st
korrekturen[key] = entry
wortliste = list(wordfile)
wortliste_neu = [] # korrigierte Liste
for entry in wortliste:
key = entry[0]
if key in korrekturen:
entry = korrekturen.pop(key)
wortliste_neu.append(entry)
if korrekturen:
print korrekturen # übrige Einträge
return (wortliste, wortliste_neu)
|
31b37d0787738d3424d8daacc4af945e883aeb9d
| 3,649,642
|
def read_number(dtype, prompt='', floor=None, ceil=None, repeat=False):
""" Reads a number within specified bounds. """
while True:
try:
result = dtype(input(prompt))
if floor is not None and result < floor:
raise ValueError(f'Number must be no less than {floor}.')
if ceil is not None and result > ceil:
raise ValueError(f'Number must be no greater than {ceil}.')
except ValueError as e:
print(e)
result = None
if result is not None or not repeat:
return result
|
a528b1f5912ba4bab0b87c87004311778eaa8187
| 3,649,643
|
from typing import Optional
def dem_adjust(
da_elevtn: xr.DataArray,
da_flwdir: xr.DataArray,
da_rivmsk: Optional[xr.DataArray] = None,
flwdir: Optional[pyflwdir.FlwdirRaster] = None,
connectivity: int = 4,
river_d8: bool = False,
logger=logger,
) -> xr.DataArray:
"""Returns hydrologically conditioned elevation.
The elevation is conditioned to D4 (`connectivity=4`) or D8 (`connectivity=8`)
flow directions based on the algorithm described in Yamazaki et al. [1]_
The method assumes the original flow directions are in D8. Therefore, if
`connectivity=4`, an intermediate D4 conditioned elevation raster is derived
first, based on which new D4 flow directions are obtained used to condition the
original elevation.
Parameters
----------
da_elevtn, da_flwdir, da_rivmsk : xr.DataArray
elevation [m+REF]
D8 flow directions [-]
binary river mask [-], optional
flwdir : pyflwdir.FlwdirRaster, optional
D8 flow direction raster object. If None it is derived on the fly from `da_flwdir`.
connectivity: {4, 8}
D4 or D8 flow connectivity.
river_d8 : bool
If True and `connectivity==4`, additionally condition river cells to D8.
Requires `da_rivmsk`.
Returns
-------
xr.Dataset
Dataset with hydrologically adjusted elevation ('elevtn') [m+REF]
References
----------
.. [1] Yamazaki et al. (2012). Adjustment of a spaceborne DEM for use in floodplain hydrodynamic modeling. Journal of Hydrology, 436-437, 81–91. https://doi.org/10.1016/j.jhydrol.2012.02.045
See Also
--------
pyflwdir.FlwdirRaster.dem_adjust
pyflwdir.FlwdirRaster.dem_dig_d4
"""
# get flow directions for entire domain and for rivers
if flwdir is None:
flwdir = flwdir_from_da(da_flwdir, mask=False)
if connectivity == 4 and river_d8 and da_rivmsk is None:
raise ValueError('Provide "da_rivmsk" in combination with "river_d8"')
elevtn = da_elevtn.values
nodata = da_elevtn.raster.nodata
logger.info(f"Condition elevation to D{connectivity} flow directions.")
# get D8 conditioned elevation
elevtn = flwdir.dem_adjust(elevtn)
# get D4 conditioned elevation (based on D8 conditioned!)
if connectivity == 4:
rivmsk = da_rivmsk.values == 1 if da_rivmsk is not None else None
# derive D4 flow directions with forced pits at original locations
d4 = pyflwdir.dem.fill_depressions(
elevtn=flwdir.dem_dig_d4(elevtn, rivmsk=rivmsk, nodata=nodata),
nodata=nodata,
connectivity=connectivity,
idxs_pit=flwdir.idxs_pit,
)[1]
# condition the DEM to the new D4 flow dirs
flwdir_d4 = pyflwdir.from_array(
d4, ftype="d8", transform=flwdir.transform, latlon=flwdir.latlon
)
elevtn = flwdir_d4.dem_adjust(elevtn)
# condition river cells to D8
if river_d8:
flwdir_river = flwdir_from_da(da_flwdir, mask=rivmsk)
elevtn = flwdir_river.dem_adjust(elevtn)
# assert np.all((elv2 - flwdir_d4.downstream(elv2))>=0)
# save to dataarray
da_out = xr.DataArray(
data=elevtn,
coords=da_elevtn.raster.coords,
dims=da_elevtn.raster.dims,
)
da_out.raster.set_nodata(nodata)
da_out.raster.set_crs(da_elevtn.raster.crs)
return da_out
|
d59f5bae1df44cc84c4eb98d8dd14ca923dc4809
| 3,649,644
|
from copy import copy
from numpy import zeros, unique
from itertools import product
def trainModel(label,bestModel,obs,trainSet,testSet,modelgrid,cv,optMetric='auc'):
""" Train a message classification model """
pred = zeros(len(obs))
fullpred = zeros((len(obs),len(unique(obs))))
model = copy(bestModel.model)
#find the best model via tuning grid
for tune in [dict(list(zip(modelgrid, v))) for v in product(*list(modelgrid.values()))]:
for k in list(tune.keys()):
setattr(model,k,tune[k])
i = 0
for tr, vl in cv:
model.fit(trainSet.ix[tr].values,obs[tr])
pred[vl] = model.predict_proba(trainSet.ix[vl].values)[:,1]
fullpred[vl,:] = model.predict_proba(trainSet.ix[vl].values)
i += 1
bestModel.updateModel(pred,fullpred,obs,model,trainSet.columns.values,tune,optMetric=optMetric)
#re-train with all training data
bestModel.model.fit(trainSet.values,obs)
print(bestModel)
return {label: {'pred': pred, 'test_pred':bestModel.model.predict_proba(testSet)[:,1]}}
|
fdf60d23894bfd997cdf7fa82cb59257ad7b2954
| 3,649,645
|
def vm_deploy(vm, force_stop=False):
"""
Internal API call used for finishing VM deploy;
Actually cleaning the json and starting the VM.
"""
if force_stop: # VM is running without OS -> stop
cmd = 'vmadm stop %s -F >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid)
else: # VM is stopped and deployed -> start
cmd = 'vmadm start %s >/dev/null 2>/dev/null; vmadm get %s 2>/dev/null' % (vm.uuid, vm.uuid)
msg = 'Deploy server'
lock = 'vmadm deploy ' + vm.uuid
meta = {
'output': {
'returncode': 'returncode',
'stderr': 'message',
'stdout': 'json'
},
'replace_stderr': ((vm.uuid, vm.hostname),),
'msg': msg, 'vm_uuid': vm.uuid
}
callback = ('api.vm.base.tasks.vm_deploy_cb', {'vm_uuid': vm.uuid})
return execute(ERIGONES_TASK_USER, None, cmd, meta=meta, lock=lock, callback=callback,
queue=vm.node.fast_queue, nolog=True, ping_worker=False, check_user_tasks=False)
|
324dffa2a181d4b796a8f263eeb57d1452826c78
| 3,649,646
|
import sys
def get_cpuinfo():
"""Returns the flags of the processor."""
if sys.platform == 'darwin':
return platforms.osx.get_cpuinfo()
if sys.platform == 'win32':
return platforms.win.get_cpuinfo()
if sys.platform == 'linux2':
return platforms.linux.get_cpuinfo()
return {}
|
2ac223337d54426d36c9fda8d88f3545c6d4c30a
| 3,649,647
|
from datetime import datetime
def previous_analytics(request, package, id):
"""
Return a list of previous analytics for the given package.
Only shows analytics which the user can access.
Also limits to the last 100 of them!
"""
context = []
profile = request.user.get_profile()
#TODO: this code block needs to go into a separate method
# together with the cut-off logic in _appdetails_get_objects_fast()
if profile.is_subscribed():
if (profile.get_subscription_plan_name() == 'Beaker'):
# show everything
cut_off = datetime.now()
else:
# show everything older than one week
cut_off = datetime.now() - timedelta(days=7)
else:
# show everything older than one month
cut_off = datetime.now() - timedelta(days=30)
#TODO: this query can be very slow if there are
# large number of previous analytics available
for adv in Advisory.objects.filter(
status=STATUS_LIVE,
old__package=id,
new__released_on__lte=cut_off,
).order_by(
'-new__released_on',
'-old__released_on'
)[:100]:
context.append(
{
'name' : adv.__unicode__(),
'url' : adv.get_full_path(),
}
)
return render(
request,
'previous_analytics.html',
{
'context' : context
}
)
|
9722cd424de89cfe8e189b425fe2db64cb1e129b
| 3,649,648
|
def get_monitor_value(image, monitor_key):
"""Return the monitor value from an image using an header key.
:param fabio.fabioimage.FabioImage image: Image containing the header
:param str monitor_key: Key containing the monitor
:return: returns the monitor else returns 1.0
:rtype: float
"""
if monitor_key is None or monitor_key == "":
return 1.0
try:
monitor = header_utils.get_monitor_value(image, monitor_key)
return monitor
except header_utils.MonitorNotFound:
logger.warning("Monitor %s not found. No normalization applied.", monitor_key)
return 1.0
except Exception as e:
logger.warning("Fail to load monitor. No normalization applied. %s", str(e))
return 1.0
|
cf74ab608837b6f5732a70d997afa1fe424b2ee1
| 3,649,649
|
import os
def resources(request):
"""
Page for accessing RMG resources, including papers and presentations
"""
folder = os.path.join(settings.STATIC_ROOT, 'presentations')
files = []
if os.path.isdir(folder):
files = os.listdir(folder)
toRemove = []
for f in files:
if not os.path.isfile(os.path.join(folder, f)):
# Remove any directories
toRemove.append(f)
elif f[0] == '.':
# Remove any hidden files
toRemove.append(f)
for item in toRemove:
files.remove(item)
# Parse file names for information to display on webpage
presentations = []
if files:
files.sort()
for f in files:
name = os.path.splitext(f)[0]
parts = name.split('_')
date = parts[0]
date = date[0:4] + '-' + date[4:6] + '-' + date[6:]
title = ' '.join(parts[1:])
title = title.replace('+', ' and ')
presentations.append((title, date, f))
return render(request, 'resources.html', {'presentations': presentations})
|
857d4a89571da2270ca072965c64840f0a022268
| 3,649,650
|
def default_thread_index (value, threads):
"""
find index in threads array value
:param value:
:param threads:
:return:
"""
value_index = threads.index(value)
return value_index
|
7be2efb6579f2880f53dac11705ba6a068c2d92d
| 3,649,651
|
import requests
def new_things(url):
"""Attempts to register new things on the directory
Takes 1 argument:
url - URL containing thing descriptions to register
"""
response = requests.post('{}/things/register_url'.format(settings.THING_DIRECTORY_HOST), headers={
'Authorization': settings.THING_DIRECTORY_KEY,
}, json={'url':url})
response.raise_for_status()
return response.json()['uuids']
|
0336d094e9581f3382dd33ac8a9bf8fd43754d82
| 3,649,652
|
def isID(value):
"""Checks if value looks like a Ulysses ID; i.e. is 22 char long.
Not an exact science; but good enougth to prevent most mistakes.
"""
return len(value) == 22
|
527db9446adc2b88c2117bd35c74474c3e7bad24
| 3,649,653
|
def tool_on_path(tool: str) -> str:
"""
Helper function to determine if a given tool is on the user's PATH variable. Wraps around
runspv.tool_on_path().
:param tool: the tool's filename to look for.
:return: the path of the tool, else ToolNotOnPathError if the tool isn't on the PATH.
"""
return runspv.tool_on_path(tool)
|
52963a818bcea59eaaec1d20000d3a4a1296ee26
| 3,649,654
|
def DefineDecode(i, n, invert=False):
"""
Decode the n-bit number i.
@return: 1 if the n-bit input equals i
"""
class _Decode(Circuit):
name = 'Decode_{}_{}'.format(i, n)
IO = ['I', In(Bits[ n ]), 'O', Out(Bit)]
@classmethod
def definition(io):
if n <= 8:
j = 1 << i
if invert:
m = 1 << n
mask = (1 << m) - 1
j = mask & (~j)
decode = ROMN(j, n)
else:
nluts = (n + 3) // 4
data = nluts * [0]
for j in range(nluts):
data[j] = (i >> 4*j) & 0xf # 4-bit pieces
decode = FlatHalfCascade(n, 4, data, ZERO, 1)
wire(io.I, decode.I)
wire(decode.O, io.O)
return _Decode
|
9be19b191a1048dffd8a6fe82caabdcb1dd33f42
| 3,649,655
|
def absent(name, database, **client_args):
"""
Ensure that given continuous query is absent.
name
Name of the continuous query to remove.
database
Name of the database that the continuous query was defined on.
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "continuous query {0} is not present".format(name),
}
if __salt__["influxdb.continuous_query_exists"](database, name, **client_args):
if __opts__["test"]:
ret["result"] = None
ret["comment"] = (
"continuous query {0} is present and needs to be removed"
).format(name)
return ret
if __salt__["influxdb.drop_continuous_query"](database, name, **client_args):
ret["comment"] = "continuous query {0} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
else:
ret["comment"] = "Failed to remove continuous query {0}".format(name)
ret["result"] = False
return ret
return ret
|
f280dad71275cd576edbefac9376463a2ab91fc7
| 3,649,656
|
def get_ads(client, customer_id, new_ad_resource_names):
"""Retrieves a google.ads.google_ads.v4.types.AdGroupAd instance.
Args:
client: A google.ads.google_ads.client.GoogleAdsClient instanc e.
customer_id: (str) Customer ID associated with the account.
new_ad_resource_names: (str) Resource name associated with the Ad group.
Returns:
An instance of the google.ads.google_ads.v4.types.AdGroupAd message
class of the newly created ad group ad.
"""
def formatter(given_string):
"""This helper function is used to assign ' ' to names of resources
so that this formatted string can be used within an IN clause.
Args:
given_string: (str) The string to be formatted.
"""
results = []
for i in given_string:
results.append(repr(i))
return ','.join(results)
resouce_names = formatter(new_ad_resource_names)
ga_service = client.get_service('GoogleAdsService', version='v4')
query = ('SELECT ad_group_ad.ad.id, '
'ad_group_ad.ad.expanded_text_ad.headline_part1, '
'ad_group_ad.ad.expanded_text_ad.headline_part2, '
'ad_group_ad.status, ad_group_ad.ad.final_urls, '
'ad_group_ad.resource_name '
'FROM ad_group_ad '
'WHERE ad_group_ad.resource_name in ({}) '.
format(resouce_names))
response = ga_service.search(customer_id, query, PAGE_SIZE)
response =iter(response)
ads = []
while response:
try:
current_row = next(response)
ads.append(current_row.ad_group_ad)
except StopIteration:
break
return ads
|
3e1bc99901490c53c66418a63238cf76de282896
| 3,649,657
|
def corrfact_vapor_rosolem(h, h_ref=None, const=0.0054):
"""Correction factor for vapor correction from absolute humidity (g/m3).
The equation was suggested by Rosolem et al. (2013).
If no reference value for absolute humidity ``h_ref`` is provided,
the average value will be used.
Parameters
----------
h : float or array of floats
Absolute humidity (g / m3)
h_ref : float
Reference value for absolute humidity
const : float
Empirical constant, defaults to 0.0054
Returns
-------
output : float or array of floats
Correction factor for water vapor effect (dimensionless)
"""
if h_ref is None:
h_ref = np.mean(h)
return 1 + const * (h - h_ref)
|
6add20bf118e85e77f245776101169efb9ba4eac
| 3,649,658
|
def sine_ease_out(p):
"""Modeled after quarter-cycle of sine wave (different phase)"""
return sin(p * tau)
|
58a78ad44e04df42f0533b6a94e51d04398407a9
| 3,649,659
|
def _extract_codes_from_element_text(dataset, parent_el_xpath, condition=None): # pylint: disable=invalid-name
"""Extract codes for checking from a Dataset. The codes are being extracted from element text.
Args:
dataset (iati.data.Dataset): The Dataset to check Codelist values within.
parent_el_xpath (str): An XPath to locate the element(s) with the attribute of interest.
condition (str): An optional XPath expression to limit the scope of what is extracted.
Returns:
list of tuple: A tuple in the format: `(str, int)` - The `str` is a matching code from within the Dataset; The `int` is the sourceline at which the parent element is located.
"""
# include the condition
if condition:
parent_el_xpath = parent_el_xpath + '[' + condition + ']'
parents_to_check = dataset.xml_tree.xpath(parent_el_xpath)
located_codes = list()
for parent in parents_to_check:
located_codes.append((parent.text, parent.sourceline))
return located_codes
|
45e4ec2a61dc38066ad9a71d41e63a48c6ccde23
| 3,649,660
|
def rotate_im(img, angle, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101, value=None):
"""Rotate the image.
Rotate the image such that the rotated image is enclosed inside the tightest
rectangle. The area not occupied by the pixels of the original image is colored
black.
Parameters
----------
image : numpy.ndarray
numpy image
angle : float
angle by which the image is to be rotated
Returns
-------
numpy.ndarray
Rotated Image
"""
# grab the dimensions of the image and then determine the
# centre
(h, w) = img.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
warp_fn = _maybe_process_in_chunks(
cv2.warpAffine, M=M, dsize=(nW, nH), flags=interpolation, borderMode=border_mode, borderValue=value
)
return warp_fn(img)
|
40ab5d9761bdb2044fe99af4d5a51187edd34327
| 3,649,661
|
def list_modules(curdir=CURDIR, pattern=MOD_FILENAME_RE):
"""List names from {ok,ng}*.py.
"""
return sorted(
m.name.replace('.py', '')
for m in curdir.glob('*.py') if pattern.match(m.name)
)
|
249b276ec5f42534a4ad162c02110bcf1f9cadf0
| 3,649,662
|
def encode_set_validator_config_and_reconfigure_script(
validator_account: AccountAddress,
consensus_pubkey: bytes,
validator_network_addresses: bytes,
fullnode_network_addresses: bytes,
) -> Script:
"""# Summary
Updates a validator's configuration, and triggers a reconfiguration of the system to update the
validator set with this new validator configuration.
Can only be successfully sent by a
Validator Operator account that is already registered with a validator.
# Technical Description
This updates the fields with corresponding names held in the `ValidatorConfig::ValidatorConfig`
config resource held under `validator_account`. It then emits a `DiemConfig::NewEpochEvent` to
trigger a reconfiguration of the system. This reconfiguration will update the validator set
on-chain with the updated `ValidatorConfig::ValidatorConfig`.
# Parameters
| Name | Type | Description |
| ------ | ------ | ------------- |
| `validator_operator_account` | `&signer` | Signer reference of the sending account. Must be the registered validator operator for the validator at `validator_address`. |
| `validator_account` | `address` | The address of the validator's `ValidatorConfig::ValidatorConfig` resource being updated. |
| `consensus_pubkey` | `vector<u8>` | New Ed25519 public key to be used in the updated `ValidatorConfig::ValidatorConfig`. |
| `validator_network_addresses` | `vector<u8>` | New set of `validator_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. |
| `fullnode_network_addresses` | `vector<u8>` | New set of `fullnode_network_addresses` to be used in the updated `ValidatorConfig::ValidatorConfig`. |
# Common Abort Conditions
| Error Category | Error Reason | Description |
| ---------------- | -------------- | ------------- |
| `Errors::NOT_PUBLISHED` | `ValidatorConfig::EVALIDATOR_CONFIG` | `validator_address` does not have a `ValidatorConfig::ValidatorConfig` resource published under it. |
| `Errors::REQUIRES_ROLE` | `Roles::EVALIDATOR_OPERATOR` | `validator_operator_account` does not have a Validator Operator role. |
| `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_TRANSACTION_SENDER` | `validator_operator_account` is not the registered operator for the validator at `validator_address`. |
| `Errors::INVALID_ARGUMENT` | `ValidatorConfig::EINVALID_CONSENSUS_KEY` | `consensus_pubkey` is not a valid ed25519 public key. |
| `Errors::INVALID_STATE` | `DiemConfig::EINVALID_BLOCK_TIME` | An invalid time value was encountered in reconfiguration. Unlikely to occur. |
# Related Scripts
* `Script::create_validator_account`
* `Script::create_validator_operator_account`
* `Script::add_validator_and_reconfigure`
* `Script::remove_validator_and_reconfigure`
* `Script::set_validator_operator`
* `Script::set_validator_operator_with_nonce_admin`
* `Script::register_validator_config`
"""
return Script(
code=SET_VALIDATOR_CONFIG_AND_RECONFIGURE_CODE,
ty_args=[],
args=[
TransactionArgument__Address(value=validator_account),
TransactionArgument__U8Vector(value=consensus_pubkey),
TransactionArgument__U8Vector(value=validator_network_addresses),
TransactionArgument__U8Vector(value=fullnode_network_addresses),
],
)
|
8b5e5d259750eecf3cea78e9abba82300baa2626
| 3,649,663
|
def _do_ecf_reference_data_import(
import_method,
widget,
logwidget=None,
specification_items=None,
ecfdate=None,
datecontrol=None,
):
"""Import a new ECF club file.
widget - the manager object for the ecf data import tab
"""
ecffile = widget.datagrid.get_data_source().dbhome
# The commented code fails if tkinter is compiled without --enable-threads
# as in OpenBSD 5.7 i386 packages. The standard build from FreeBSD ports
# until early 2015 at least, when this change was introduced, is compiled
# with --enable-threads so the commented code worked. Not sure if the
# change in compiler on FreeBSD from gcc to clang made a difference. The
# Microsoft Windows' Pythons seem to be compiled with --enable-threads
# because the commented code works in that environment. The situation on
# OS X, and any GNU-Linux distribution, is not known.
# Comparison with the validate_and_copy_ecf_ogd_players_post_2006_rules()
# method in the sibling module sqlite3ecfogddataimport, which worked on
# OpenBSD 5.7 as it stood, highlighted the changes needed.
# ecfdate = widget.get_ecf_date()
if not ecffile:
return False
if not ecfdate:
return False
results = widget.get_appsys().get_results_database()
if not results:
return False
results.do_database_task(
import_method,
logwidget=logwidget,
taskmethodargs=dict(
ecffile=ecffile,
ecfdate=ecfdate,
parent=widget.get_widget(),
# datecontrol=widget.ecfdatecontrol.get(),
datecontrol=datecontrol, # See --enable-threads comment just above.
),
use_specification_items=specification_items,
)
return True
|
593b1ac77688c92c9fcd3ea8fafb3f5089849293
| 3,649,664
|
import ast
import inspect
def ast_operators(node):
"""Return a set of all operators and calls in the given AST, or return an error if any are invalid."""
if isinstance(node, (ast.Name, ast.Constant)):
return set()
elif isinstance(node, ast.BinOp):
return {type(node.op)} | ast_operators(node.left) | ast_operators(node.right)
elif isinstance(node, ast.UnaryOp):
return {type(node.op)} | ast_operators(node.operand)
elif isinstance(node, ast.Call):
if node.func.id not in METRIC_OPS:
raise ValueError(f"Unknown fn `{node.func.id}` in metric equation.")
# Make sure the number of args matches the fn signature
fn_argspec = inspect.getfullargspec(METRIC_OPS[node.func.id])
if (not node.args or
(fn_argspec.varargs is None and fn_argspec.varkw is None
and len(node.args) != len(fn_argspec.args))):
raise ValueError(f"Unexpected number of args to {node.func.id}")
return {node.func.id}.union(*(ast_operators(arg) for arg in node.args))
else:
raise TypeError(node)
|
ce5c69e228fbab682cd41330a058b6f16b8d5d1a
| 3,649,665
|
def calibrate_clock(out, tolerance=0.002, dcor=False):
"""\
currently for F2xx only:
recalculate the clock calibration values and write them to the flash.
"""
device = get_msp430_type() >> 8
variables = {}
if device == 0xf2:
# first read the segment form the device, so that only the calibration values
# are updated. any other data in SegmentA is not changed.
segment_a = memory.Memory()
segment_a.append(memory.Segment(0x10c0, jtag._parjtag.memread(0x10c0, 64)))
# get the settings for all the frequencies
for frequency in calibvalues_memory_map:
measured_frequency, dco, bcs1 = clock.setDCO(
frequency * (1 - tolerance),
frequency * (1 + tolerance),
maxrsel=15,
dcor=dcor
)
variables['f%dMHz_dcoctl' % (frequency / 1e6)] = TYPE_8BIT, dco
variables['f%dMHz_bcsctl1' % (frequency / 1e6)] = TYPE_8BIT, bcs1
out.write('BCS settings for %s: DCOCTL=0x%02x BCSCTL1=0x%02x\n' % (
nice_frequency(measured_frequency), dco, bcs1)
)
segment_a.setMem(calibvalues_memory_map[frequency]['DCO'], chr(dco))
segment_a.setMem(calibvalues_memory_map[frequency]['BCS1'], chr(bcs1))
# erase segment and write new values
jtag._parjtag.memerase(jtag.ERASE_SEGMENT, segment_a[0].startaddress)
jtag._parjtag.memwrite(segment_a[0].startaddress, segment_a[0].data)
else:
raise NotImplementedError("--calibrate is not supported on %Xxx" % device)
return variables
|
6ad9940a0b43aff54317ff0b054a5a8e84fa5f73
| 3,649,666
|
def get_rejection_listings(username):
"""
Get Rejection Listings for a user
Args:
username (str): username for user
"""
activities = models.ListingActivity.objects.for_user(username).filter(
action=models.ListingActivity.REJECTED)
return activities
|
47f7078f193de651f282d1823900cd876bf9fd93
| 3,649,667
|
def quadratic_weighted_kappa(y_true, y_pred):
"""
QWK (Quadratic Weighted Kappa) Score
Args:
y_true:
target array.
y_pred:
predict array. must be a discrete format.
Returns:
QWK score
"""
return cohen_kappa_score(y_true, y_pred, weights='quadratic')
|
fe3208d58cfbed7fdc51ee6069bb4d72584ea6d7
| 3,649,668
|
def statistika():
"""Posodobi podatke in preusmeri na statistika.html"""
check_user_id()
data_manager.load_data_from_file()
data_manager.data_for_stats()
return bottle.template("statistika.html", data_manager=data_manager)
|
afc72610e4ca245089b131d06dfb5ed8a172615c
| 3,649,669
|
def decrement(x):
"""Given a number x, returns x - 1 unless that would be less than
zero, in which case returns 0."""
x -= 1
if x < 0:
return 0
else:
return x
|
56b95324c147a163d3bdd0e9f65782095b0a4def
| 3,649,670
|
def get_dagmaf(maf: msa.Maf) -> DAGMaf.DAGMaf:
"""Converts MAF to DagMaf.
Args:
maf: MAF to be converted.
Returns:
DagMaf built from the MAF.
"""
sorted_blocks = sort_mafblocks(maf.filecontent)
dagmafnodes = [
DAGMaf.DAGMafNode(block_id=b.id,
alignment=b.alignment,
orient=b.orient,
order=b.order(),
out_edges=b.out_edges)
for b in sorted_blocks
]
return DAGMaf.DAGMaf(dagmafnodes)
|
40fd06a9429874f1ca7188f2ff185c4dd8b64e01
| 3,649,671
|
def optdat10(area,lpdva,ndvab,nglb):
"""Fornece dados para a otimizacao"""
# Tipo de funcao objetivo: tpobj==1 ---Peso
# tpobj==2 ---Energia
# tpobj==3 ---Máxima tensão
# tpobj==4 ---Máximo deslocamento
#
tpobj = 1
#
# Tipo de funcao restrição: tpres==1 ---Peso
# tpres==2 ---Tensão
# tpres==3 ---Tensão e deslocamento
# tpres==4 ---Deslocamento
# tpres==5 ---Energia
tpres = 2
#
# Entrar com os valores limites das variáveis de projeto
# vlb---limite inferiores
# vub---limite superiores
# x0 --- valor inicial
#
xpdva = np.zeros(ndvab)
for idvab in range(ndvab):
iel = lpdva[idvab]
xpdva[idvab] = area[iel]
x0 = xpdva
vlb = 0.1 * np.ones(ndvab)
vlb = 0.1 * np.ones(ndvab)
vub = 10 * np.ones(ndvab)
#
# Entrar com os valores limites das restrições
# clb---limites inferiores
# cub---limites superiores
cones = np.ones(len(area)) # relacionado ao nº de elementos
cones2 = np.ones(nglb) # relacionado ao nº de graus de liberdade
clb1 = -250 * cones
cub1 = 250 * cones
# clb1 = -20*cones
# cub1 = 20*cones
# dlb1 = -0.4*cones2
# dub1 = 0.4*cones2
clbv = 1.5e+06 - 2.2204e-16 # 0
cubv = 1.5e+06
clbd = -1 * (10 ** -3) * cones2
cubd = 1 * (10 ** -3) * cones2
elbv = 2e-2
eubv = 2e-2
if tpres == 1:
# VOLUME
cub = cubv
clb = clbv
elif tpres == 2:
# TENSOES
clb = clb1
cub = cub1
elif tpres == 3:
# TENSOES e DESLOCAMENTOS
clb = [clb1, clbd]
cub = [cub1, cubd]
elif tpres == 4:
# DESLOCAMENTOS
clb = clbd
cub = cubd
else:
# ENERGIA
clb = elbv
cub = eubv
dadosoptdat10= [tpobj,tpres,vlb,vub,x0,clb,cub]
return dadosoptdat10
|
064813cb2e66adfed6cb5e694614b88343a7613c
| 3,649,672
|
def rotvec2quat(vec):
"""
A rotation vector is a 3 dimensional vector which is
co-directional to the axis of rotation and whose
norm gives the angle of rotation (in radians).
Args:
vec (list or np.ndarray): a rotational vector. Its norm
represents the angle of rotation.
Returns:
np.ndarray: quaternion [x,y,z,w] (shape: :math:`[4,]`).
"""
r = R.from_rotvec(vec)
return r.as_quat()
|
a19b7b67e9cd5877cc5045887d071e069892e0a6
| 3,649,673
|
def generate_pop(pop_size, length):
"""
初始化种群
:param pop_size: 种群容量
:param length: 编码长度
:return bin_population: 二进制编码种群
"""
decim_population = np.random.randint(0, 2**length-1, pop_size)
print(decim_population)
bin_population = [('{:0%sb}'%length).format(x) for x in decim_population]
return bin_population
|
d1248fe59161d2a75eaf08ffe2b180537c2d1af5
| 3,649,674
|
import argparse
import sys
def prepare_argument_parser():
"""
Set up the argument parser for the different commands.
Return:
Configured ArgumentParser object.
"""
argument_parser = argparse.ArgumentParser(
description='Build source code libraries from modules.')
argument_parser.add_argument(
'-r',
'--repository',
metavar="REPO",
dest='repositories',
action='append',
default=[],
help="Repository file(s) which should be available for the current library. "
"The loading of repository files from a VCS is only supported through "
"the library configuration file.")
argument_parser.add_argument(
'-c',
'--config',
dest='config',
default='project.xml',
help="Project configuration file. "
"Specifies the required repositories, modules and options "
"(default: '%(default)s').")
argument_parser.add_argument(
'-C',
'--cwd',
dest='cwd',
default=None,
help="Current working directory (default: '.').")
argument_parser.add_argument(
'-p',
'--path',
dest='path',
default=None,
help="Path in which the library will be generated (default: CWD).")
argument_parser.add_argument(
'-D',
'--option',
metavar='OPTION',
dest='options',
action='append',
type=str,
default=[],
help="Additional options. Options given here will be merged with options "
"from the configuration file and will overwrite the configuration "
"file definitions.")
argument_parser.add_argument(
'--collect',
metavar='COLLECTOR',
dest='collectors',
action='append',
type=str,
default=[],
help="Additional collectors. Values given here will be merged with collectors "
"from the configuration file.")
argument_parser.add_argument(
'-v',
'--verbose',
action='count',
default=0,
dest='verbose')
argument_parser.add_argument(
"--plain",
dest="plain",
action="store_true",
default=(not sys.stdout.isatty() or not sys.stderr.isatty()),
help="Disable styled output, only output plain ASCII.")
argument_parser.add_argument(
'--version',
action='version',
version='%(prog)s {}'.format(__version__),
help="Print the lbuild version number and exit.")
subparsers = argument_parser.add_subparsers(
title="Actions",
dest="action")
actions = [
DiscoverAction(),
DiscoverOptionsAction(),
SearchAction(),
ValidateAction(),
BuildAction(),
CleanAction(),
InitAction(),
UpdateAction(),
DependenciesAction(),
]
for action in actions:
action.register(subparsers)
return argument_parser
|
e57c3609ff54139dbb42ce95795f43acba9b3d25
| 3,649,675
|
def CountClusterSizes(clusterLabels):
""" This function takes the labels produced by spectral clustering (or
other clustering algorithm) and counts the members in each cluster.
This is primarily to see the distribution of cluster sizes over all
windows, particularly to see if there singleton clusters or a significant
number of clusters with a small number of members.
Parameters
---------
clusterLabels: numpy array of int (clustered customers) - the cluster
label of each customer
Returns
-------
clusterCounts: numpy array of int (0,k) - the number of customers
in each cluster
"""
currentK = len(np.unique(clusterLabels))
clusterCounts = np.zeros((1,currentK),dtype=int)
for clustCtr in range(0,currentK):
indices = np.where(clusterLabels==clustCtr)[0]
clusterCounts[0,clustCtr] = len(indices)
return clusterCounts
|
25bf78a83e55b72c7a33546450655efe7ee84874
| 3,649,676
|
def solver_problem1(digits_list):
"""input digits and return numbers that 1, 4, 7, 8 occurs"""
cnt = 0
for digits in digits_list:
for d in digits:
if len(d) in [2, 3, 4, 7]:
cnt += 1
return cnt
|
d1946d00d368ad498c9bb0a8562ec0ea76d26449
| 3,649,677
|
def spam_dotprods(rhoVecs, povms):
"""SPAM dot products (concatenates POVMS)"""
nEVecs = sum(len(povm) for povm in povms)
ret = _np.empty((len(rhoVecs), nEVecs), 'd')
for i, rhoVec in enumerate(rhoVecs):
j = 0
for povm in povms:
for EVec in povm.values():
ret[i, j] = _np.vdot(EVec.todense(), rhoVec.todense()); j += 1
# todense() gives a 1D array, so no need to transpose EVec
return ret
|
95adc6ea8e1d33899a7dc96ba99589ef9bffb7fe
| 3,649,678
|
import sys
def is_just_monitoring_error(unique_message):
"""
Return True if the unique_message is an intentional error just for
monitoring (meaning that it contains the one of the
JUST_MONITORING_ERROR_MARKERS somewhere in the exc_text)
"""
if sys.version_info == 2:
exc_text = unicode(unique_message.exc_text)
message = unicode(unique_message.message)
else:
exc_text = str(unique_message.exc_text)
message = str(unique_message.message)
return any([(marker in exc_text or marker in message)
for marker
in setting('MONITORING_ERROR_MARKERS')])
|
d566174d8f7f46aad588594aded7e78ef3a91957
| 3,649,679
|
def get_chi_atom_indices():
"""Returns atom indices needed to compute chi angles for all residue types.
Returns:
A tensor of shape [residue_types=21, chis=4, atoms=4]. The residue types are
in the order specified in rc.restypes + unknown residue type
at the end. For chi angles which are not defined on the residue, the
positions indices are by default set to 0.
"""
chi_atom_indices = []
for residue_name in rc.restypes:
residue_name = rc.restype_1to3[residue_name]
residue_chi_angles = rc.chi_angles_atoms[residue_name]
atom_indices = []
for chi_angle in residue_chi_angles:
atom_indices.append([rc.atom_order[atom] for atom in chi_angle])
for _ in range(4 - len(atom_indices)):
atom_indices.append(
[0, 0, 0, 0]
) # For chi angles not defined on the AA.
chi_atom_indices.append(atom_indices)
chi_atom_indices.append([[0, 0, 0, 0]] * 4) # For UNKNOWN residue.
return chi_atom_indices
|
5ac6f2208e2819b8e0d04329cbfb94cb5dcd26ba
| 3,649,680
|
def get_all_device_stats():
"""Obtain and return statistics for all attached devices."""
devices = get_devices()
stats = {}
for serial in devices:
model, device_stats = get_device_stats(serial)
if not stats.get(model):
stats[model] = {}
stats[model][serial] = device_stats
return stats
|
9f2a50c4f6008120bc9527260f501f7e261dd19f
| 3,649,681
|
def plot_coefs(coefficients, nclasses):
"""
Plot the coefficients for each label
coefficients: output from clf.coef_
nclasses: total number of possible classes
"""
scale = np.max(np.abs(coefficients))
p = plt.figure(figsize=(25, 5))
for i in range(nclasses):
p = plt.subplot(1, nclasses, i + 1)
p = plt.imshow(coefficients[i].reshape(28, 28),
cmap=plt.cm.RdBu, vmin=-scale, vmax=scale)
p = plt.axis('off')
p = plt.title('Class %i' % i)
return None
|
356c6c4bb96b08a370b8c492275e638b059594e2
| 3,649,682
|
import json
def infect():
"""Return a function that calls the infect endpoint on app."""
def inner(users, qs):
app.debug = True
with app.test_client() as client:
headers = {'Content-Type': 'application/json'}
data = json.dumps(users)
rv = client.post('/infect?{0}'.format(qs),
data=data, headers=headers)
return json.loads(rv.data.decode())
return inner
|
3c6798b39b8545425d671c6ece8d0220c2630b5c
| 3,649,683
|
from datetime import datetime
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """This plot shows the number of days with a high
temperature at or above a given threshold. You can optionally generate
this plot for the year to date period.
"""
today = datetime.date.today()
desc['arguments'] = [
dict(type='station', name='station', default='IA2203',
label='Select Station:', network='IACLIMATE'),
dict(type="year", name="year", default=today.year,
label="Year to Compare:"),
dict(type='select', options=PDICT, default='full',
label='Day Period Limit:', name='limit'),
]
return desc
|
479d98e9ab19dcc03332c1a95ccc0624cdcfe24d
| 3,649,684
|
def calc_cost_of_buying(count, price):
"""株を買うのに必要なコストと手数料を計算
"""
subtotal = int(count * price)
fee = calc_fee(subtotal)
return subtotal + fee, fee
|
391909bbff35c6eb7d68c965e3f36317e4164b1a
| 3,649,685
|
def add_units_to_query(df, udict=None):
"""
"""
for k, u in udict.items():
if k not in df.colnames:
continue
try:
df[k].unit
except Exception as e:
print(e)
setattr(df[k], 'unit', u)
else:
df[k] *= u / df[k].unit # TODO in-place
return df
|
984113ed8306d7734ac5351de347f331982c4251
| 3,649,686
|
import signal
def update_lr(it_lr, alg, test_losses, lr_info=None):
"""Update learning rate according to an algorithm."""
if lr_info is None:
lr_info = {}
if alg == 'seung':
threshold = 10
if 'change' not in lr_info.keys():
lr_info['change'] = 0
if lr_info['change'] >= 4:
return it_lr, lr_info
# Smooth test_losses then check to see if they are still decreasing
if len(test_losses) > threshold:
smooth_test = signal.savgol_filter(np.asarray(test_losses), 3, 2)
check_test = np.all(np.diff(smooth_test)[-threshold:] < 0)
if check_test:
it_lr = it_lr / 2.
lr_info['change'] += 1
return it_lr, lr_info
elif alg is None or alg == '' or alg == 'none':
return it_lr, lr_info
else:
raise NotImplementedError('No routine for: %s' % alg)
|
de6ef7d700a9c4b549b6d500f6737c84dc032c95
| 3,649,687
|
def ocr_page_image(
doc_path,
page_num,
lang,
**kwargs
):
"""
image = jpg, jpeg, png
On success returns ``mglib.path.PagePath`` instance.
"""
logger.debug("OCR image (jpeg, jpg, png) document")
page_path = PagePath(
document_path=doc_path,
page_num=page_num,
step=Step(1),
# jpeg, jpg, png are 1 page documents
page_count=1
)
notify_pre_page_ocr(
page_path,
page_num=page_num,
lang=lang,
file_name=doc_path.file_name,
**kwargs
)
# resize and eventually convert (png -> jpg)
resize_img(
page_path,
media_root=settings.MEDIA_ROOT
)
extract_txt(
page_path,
lang=lang,
media_root=settings.MEDIA_ROOT
)
notify_txt_ready(
page_path,
page_num=page_num,
lang=lang,
file_name=doc_path.file_name,
**kwargs
)
# First quickly generate preview images
for step in Steps():
page_path.step = step
resize_img(
page_path,
media_root=settings.MEDIA_ROOT
)
# reset page's step
page_path.step = Step(1)
# Now OCR each image
for step in Steps():
if not step.is_thumbnail:
extract_hocr(
page_path,
lang=lang,
media_root=settings.MEDIA_ROOT
)
notify_hocr_ready(
page_path,
page_num=page_num,
lang=lang,
# step as integer number
step=step.current,
file_name=doc_path.file_name,
**kwargs
)
return page_path
|
d1b87d4bdad967e40971eeb9e4b1e881781b87ad
| 3,649,688
|
from functools import reduce
def factors(n):
"""
return set of divisors of a number
"""
step = 2 if n%2 else 1
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(sqrt(n))+1, step) if n % i == 0)))
|
687608f5397181892aa338c96ee299f91d7b5431
| 3,649,689
|
import decimal
def round_decimal(x, digits=0):
"""This function returns the round up float.
Parameters
----------
x : a float
digits : decimal point
Returns
----------
Rounded up float
"""
x = decimal.Decimal(str(x))
if digits == 0:
return int(x.quantize(decimal.Decimal("1"), rounding='ROUND_HALF_UP'))
if digits > 1:
string = '1e' + str(-1 * digits)
else:
string = '1e' + str(-1 * digits)
return float(x.quantize(decimal.Decimal(string), rounding='ROUND_HALF_UP'))
|
8670fa1e9063376e012ebbc71df0a19c6205ea9c
| 3,649,690
|
def basic_gn_stem(model, data, **kwargs):
"""Add a basic ResNet stem (using GN)"""
dim = 64
p = model.ConvGN(
data, 'conv1', 3, dim, 7, group_gn=get_group_gn(dim), pad=3, stride=2
)
p = model.Relu(p, p)
p = model.MaxPool(p, 'pool1', kernel=3, pad=1, stride=2)
return p, dim
|
7cd1c1e0ff58431fc89acdec0f6c1d5f6fa9daa8
| 3,649,691
|
def log_scale(start,end,num):
"""Simple wrapper to generate list of numbers equally spaced in logspace
Parameters
----------
start: floar
Inital number
end: Float
Final number
num: Float
Number of number in the list
Returns
-------
list: 1d array
List of number spanning start to end, equally space in log space
"""
return np.logspace(np.log10(start), np.log10(end), num = num)
|
32d3976cb9cbcceb4cef9af15da373ea84e4d0c7
| 3,649,692
|
def measure_xtran_params(neutral_point, transformation):
"""
Description: Assume that the transformation from robot coord to camera coord is: RotX -> RotY -> RotZ -> Tranl
In this case: RotX = 180, RotY = 0; RotZ = -90; Tranl: unknown
But we know coords of a determined neutral point in 2 coord systems,
hence we can measure Transl from robot centroid to camera centroid.(Step 2)
:param neutral_point : Dict, list of 2 coords of neutral_point in 2 coord systems
:param transformation : Dict, list of 3 rotating transformations
:return: r2c_xtran : Matrix 4x4 floats, transformation from robot coord to camera coord
:return: c2r_xtran : Matrix 4x4 floats, transformation from camera coord to robot coord
# :return: tranl : Matrix 4x4 floats, translation from robot coord to camera coord
"""
# 1: Load coords of the neutral point
neutral_robot = mm2m(coords=np.array(neutral_point['robot_coord'])) # neutral point coord in robot coord system
neutral_camera = mm2m(coords=np.array(neutral_point['camera_coord'])) # neutral point coord in camera coord system
rotx = create_rotx_matrix(theta=-transformation['rotx']) # load transformation matrix of rotation around x
roty = create_roty_matrix(theta=-transformation['roty']) # load transformation matrix of rotation around y
rotz = create_rotz_matrix(theta=-transformation['rotz']) # load transformation matrix of rotation around z
# 2: Find transformation between robot coord centroid and camera coord centroid
rotxyz = np.dot(np.dot(rotz, roty), rotx) # determine transformation matrix after rotate sequently around x, y, z
neutral_robot3 = np.dot(rotxyz, np.append(neutral_robot, 1))[:3] # find coord of neutral point after RotXYZ
Oc_in_3 = neutral_robot3 - neutral_camera # find coord of robot centroid in camera coord system
tranl = create_tranl_matrix(vector=-Oc_in_3)
# 3: Find transformation matrix from robot to camera
# r2c_xtran = np.dot(np.dot(np.dot(tranl, rotz), roty), rotx)
# c2r_xtran = np.linalg.inv(r2c_xtran)
return rotx, roty, rotz, tranl
|
c2758158d545dbc6c2591f7f64f1df159a0c82db
| 3,649,693
|
def getPrefix(routetbl, peer_logical):
""" FUNCTION TO GET THE PREFIX """
for route in routetbl:
if route.via == peer_logical:
return route.name
else:
pass
|
2ca32a1fd63d6fcefbcc9ac23e8636c73e88455b
| 3,649,694
|
def Logger_log(level, msg):
"""
Logger.log(level, msg)
logs a message to the log.
:param int level: the level to log at.
:param str msg: the message to log.
"""
return _roadrunner.Logger_log(level, msg)
|
af552b17aaeebef9713efffedcabd75946c961f1
| 3,649,695
|
import typing
def obj_test(**field_tests: typing.Callable[[typing.Any], bool]) -> typing.Callable[[typing.Any], bool]:
"""Return a lambda that tests for dict with string keys and a particular type for each key"""
def test(dat: typing.Any) -> bool:
type_test(dict)(dat)
dom_test = type_test(str)
for dom, rng in dat.items():
dom_test(dom)
if dom not in field_tests:
continue
rng_test = field_tests[dom]
rng_test(rng)
missing = set(field_tests.keys()) - set(dat.keys())
if missing:
raise Exception(f"{dat!r} lacks fields {missing}")
return True
return test
|
0439821b634807e178539b0444b69305c15e2e4e
| 3,649,696
|
def hist2D(x, y, xbins, ybins, **kwargs):
""" Create a 2 dimensional pdf vias numpy histogram2d"""
H, xedg, yedg = np.histogram2d(x=x, y=y, bins=[xbins,ybins], density=True, **kwargs)
xcen = (xedg[:-1] + xedg[1:]) / 2
ycen = (yedg[:-1] + yedg[1:]) / 2
return xcen, ycen, H
|
7f192f4db38e954aad96abc66fa4dc9c190acd82
| 3,649,697
|
def generate_ngram_dict(filename, tuple_length):
"""Generate a dict with ngrams as key following words as value
:param filename: Filename to read from.
:param tuple_length: The length of the ngram keys
:return: Dict of the form {ngram: [next_words], ... }
"""
def file_words(file_pointer):
"""Generator for words in a file"""
for line in file_pointer:
for word in line.split():
yield word
ngrams = defaultdict(lambda: set())
with open(filename, 'r') as fp:
word_list = []
for word in file_words(fp):
if len(word_list) < tuple_length:
word_list.append(word)
continue
ngrams[tuple(word_list)].add(word)
word_list = word_list[1:] + [word]
return {key: tuple(val) for key, val in ngrams.items()}
|
45f7eccae852e61f20044448955cade00174998c
| 3,649,698
|
def get_end_point(centerline, offset=0):
"""
Get last point(s) of the centerline(s)
Args:
centerline (vtkPolyData): Centerline(s)
offset (int): Number of points from the end point to be selected
Returns:
centerline_end_point (vtkPoint): Point corresponding to end of centerline.
"""
centerline_end_points = []
for i in range(centerline.GetNumberOfLines()):
line = extract_single_line(centerline, i)
centerline_end_points.append(line.GetPoint(line.GetNumberOfPoints() - 1 - offset))
return centerline_end_points
|
f476e93b55bb046cfb6afb61a2e3ae37a172def3
| 3,649,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.