content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def sort_results(boxes):
"""Returns the top n boxes based on score given DenseCap
results.json output
Parameters
----------
boxes : dictionary
output from load_output_json
n : integer
number of boxes to return
Returns
-------
sorted dictionary
"""
return sorted(results[k], key=lambda x : x['score'], reverse=True)
| 25,300
|
def get_date(d : str) -> datetime.datetime:
"""A helper function that takes a ModDB string representation of time and returns an equivalent
datetime.datetime object. This can range from a datetime with the full year to
second to just a year and a month.
Parameters
-----------
d : str
String representation of a datetime
Returns
-------
datetime.datetime
The datetime object for the given string
"""
try:
return datetime.datetime.strptime(d[:-3] + d[-2:], '%Y-%m-%dT%H:%M:%S%z')
except ValueError:
pass
try:
return datetime.datetime.strptime(d, '%Y-%m-%d')
except ValueError:
pass
return datetime.datetime.strptime(d, '%Y-%m')
| 25,301
|
def validate_selected_audioTrackUID(audioTrackUID):
"""Check that an audioTrackUID has references required for this implementation"""
if audioTrackUID.trackIndex is None:
raise AdmError("audioTrackUID {atu.id} does not have a track index, "
"which should be specified in the CHNA chunk".format(
atu=audioTrackUID,
))
if audioTrackUID.audioTrackFormat is None:
raise AdmError("audioTrackUID {self.id} is not linked "
"to an audioTrackFormat".format(
self=audioTrackUID,
))
if audioTrackUID.audioPackFormat is None:
raise AdmError("audioTrackUID {atu.id} does not have an audioPackFormat "
"reference. This may be used in coded formats, which are not "
"currently supported.".format(
atu=audioTrackUID,
))
audioStreamFormat = audioTrackUID.audioTrackFormat.audioStreamFormat
if audioStreamFormat.audioChannelFormat is None:
raise AdmError("audioStreamFormat {asf.id} does not have an audioChannelFormat "
"reference. This may be used in coded formats, which are not "
"currently supported.".format(
asf=audioStreamFormat,
))
| 25,302
|
def var_policer(*args):
"""Returns a variable policer object built from args."""
return VarPolicer(args)
| 25,303
|
def winner(board):
"""
Returns the winner of the game, if there is one.
"""
#looking horizontal winner
i = 0
while i < len(board):
j = 1
while j <len(board):
if board[i][j-1]==board[i][j] and board[i][j] == board[i][j+1]:
return board[i][j]
j += 2
i += 1
#looking vertical winner
i = 1
while i < len(board):
j = 0
while j <len(board):
if board[i-1][j]==board[i][j] and board[i][j] == board[i+1][j]:
return board[i][j]
j += 1
i += 2
#looking diagonal winner
if board[1][1] ==board[0][0] and board[1][1] == board[2][2]:
return board[1][1]
elif board[1][1] ==board[0][2] and board[1][1] == board[2][0]:
return board[1][1]
else:
return None
| 25,304
|
def freqz_resp_list(b, a=np.array([1]), mode='dB', fs=1.0, n_pts=1024, fsize=(6, 4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
Parameters
----------
b : ndarray of numerator coefficients
a : ndarray of denominator coefficents
mode : display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
n_pts : number of points to plot; default is 1024
fsize : figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
else:
return None
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w, H = signal.freqz(b[n], a[n], 2 * np.pi * f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f * fs, 20 * np.log10(np.abs(H)))
if n == N_filt - 1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f * fs, np.angle(H))
if n == N_filt - 1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2 * theta) / 2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2) / np.diff(w)
# For gain almost zero set groupdelay = 0
idx = pylab.find(20 * np.log10(H[:-1]) < -400)
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
# print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1] * fs, Tg / fs)
plt.ylim([0, 1.2 * max_Tg])
else:
plt.plot(f[:-1] * fs, Tg)
plt.ylim([0, 1.2 * max_Tg])
if n == N_filt - 1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
| 25,305
|
def _nslookup(ipv4):
"""Lookup the hostname of an IPv4 address.
Args:
ipv4: IPv4 address
Returns:
hostname: Name of host
"""
# Initialize key variables
hostname = None
# Return result
try:
ip_results = socket.gethostbyaddr(ipv4)
if len(ip_results) > 1:
hostname = ip_results[0]
except:
hostname = None
return (ipv4, hostname)
| 25,306
|
def get_service_node(service):
""" Returns the name of the node that is providing the given service, or empty string """
node = rosservice_get_service_node(service)
if node == None:
node = ""
return node
| 25,307
|
def set_up_boxes(path, device):
"""
Entrypoint for the segmentation file. Cycles through all the videos in the specified path and attempts to set up
the bounding boxes for that video
:param path: path to the videos
:type path: str
:param device: device to run models on
:type device: str
"""
for filename in tqdm(os.listdir(path)):
# If not a video
if filename[-4:] != '.mp4':
continue
print("\nGenerating boxes for " + filename)
new_filename = filename[:-4]
video = cv2.VideoCapture(path + filename)
find_boxes(video, 'face', 12, track=True, csv=path + new_filename + '_face_boxes.csv', device=device)
video = cv2.VideoCapture(path + filename)
find_boxes(video, 'body', 12, track=True, csv=path + new_filename + '_body_boxes.csv', device=device)
| 25,308
|
def _download_nasdaq_symbols(timeout):
"""
@param timeout: the time to wait for the FTP connection
"""
try:
ftp_session = FTP(_NASDAQ_FTP_SERVER, timeout=timeout)
ftp_session.login()
except all_errors as err:
raise RemoteDataError('Error connecting to %r: $s' %
(_NASDAQ_FTP_SERVER, err))
lines = []
try:
ftp_session.retrlines('RETR ' + _NASDAQ_TICKER_LOC, lines.append)
except all_errors as err:
raise RemoteDataError('Error downloading from %r: $s' %
(_NASDAQ_FTP_SERVER, err))
finally:
ftp_session.close()
# Sanity Checking
if not lines[-1].startswith('File Creation Time:'):
raise RemoteDataError('Missing expected footer. Found %r' % lines[-1])
# Convert Y/N to True/False.
converter_map = dict((col, _bool_converter) for col, t in _TICKER_DTYPE
if t is bool)
# For pandas >= 0.20.0, the Python parser issues a warning if
# both a converter and dtype are specified for the same column.
# However, this measure is probably temporary until the read_csv
# behavior is better formalized.
with warnings.catch_warnings(record=True):
data = read_csv(StringIO('\n'.join(lines[:-1])), '|',
dtype=_TICKER_DTYPE, converters=converter_map,
index_col=1)
# Properly cast enumerations
for cat in _CATEGORICAL:
data[cat] = data[cat].astype('category')
return data
| 25,309
|
def sessions(request):
"""
Cookies prepeocessor
"""
context = {}
return context
| 25,310
|
def dynamax_mnn(src: nb.typed.Dict, trg: nb.typed.Dict,
src_emb: np.ndarray, trg_emb: np.ndarray,
src_k: np.ndarray, trg_k: np.ndarray) -> np.ndarray:
"""
Run Dynamax-Jaccard in both directions and infer mutual neighbors.
:param src nb.typed.Dict: src_id2pointers dictionary
:param trg nb.typed.Dict: trg_id2pointers dictionary
:param src_emb np.ndarray: unnormalized word embeddings matrix for src lang
:param trg_emb np.ndarray: unnormalized word embeddings matrix for trg lang
:param src_k np.ndarray: preranked target candidates for source lanaguage
:param trg_k np.ndarray: preranked source candidates for target lanaguage
"""
logging.info('DynaMax: commencing first loop')
src_argmax = dynamax_loop(src, trg, src_emb, trg_emb, src_k)
logging.info('DynaMax: commencing second loop')
trg_argmax = dynamax_loop(trg, src, trg_emb, src_emb, trg_k)
logging.info('DynaMax: inferring mutual nearest neighbors')
mnn = mutual_nn(src_argmax, trg_argmax)
return mnn
| 25,311
|
def resnet152(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', DistillerBottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
| 25,312
|
def initialize_weights(model: nn.Module, args: Namespace):
"""
Initializes the weights of a model in place.
:param model: An nn.Module.
"""
for param in model.parameters():
if param.dim() == 1:
nn.init.constant_(param, 0)
else:
if args.uniform_init: # for relu, to match chembl paper
nn.init.xavier_uniform_(param, gain=nn.init.calculate_gain('relu'))
else:
nn.init.xavier_normal_(param)
| 25,313
|
def _load_model(featurizer_path):
"""Load the featurization model
Parameters
----------
featurizer_path: str
Path to the saved model file
Returns
-------
The loaded PyTorch model
"""
# load in saved model
pth = torch.load(featurizer_path)
model_args = pth['model_args']
model_state = pth['model_state']
model = UNet(**model_args)
model.load_state_dict(model_state)
# remove last layer and activation
model.segment = layers.Identity()
model.activate = layers.Identity()
model.eval()
return model
| 25,314
|
def grid0_baseline(num_runs, render=True):
"""Run script for the grid0 baseline.
Parameters
----------
num_runs : int
number of rollouts the performance of the environment is evaluated
over
render : bool, optional
specifies whether to use the gui during execution
Returns
-------
flow.core.experiment.Experiment
class needed to run simulations
"""
exp_tag = flow_params['exp_tag']
sim_params = flow_params['sim']
vehicles = flow_params['veh']
env_params = flow_params['env']
net_params = flow_params['net']
initial_config = flow_params.get('initial', InitialConfig())
# define the traffic light logic
tl_logic = TrafficLightParams(baseline=False)
phases = [{"duration": "31", "minDur": "8", "maxDur": "45",
"state": "GrGr"},
{"duration": "6", "minDur": "3", "maxDur": "6",
"state": "yryr"},
{"duration": "31", "minDur": "8", "maxDur": "45",
"state": "rGrG"},
{"duration": "6", "minDur": "3", "maxDur": "6",
"state": "ryry"}]
for i in range(N_ROWS * N_COLUMNS):
tl_logic.add('center'+str(i), tls_type='actuated', phases=phases,
programID=1)
# modify the rendering to match what is requested
sim_params.render = render
# set the evaluation flag to True
env_params.evaluate = True
# import the network class
module = __import__('flow.networks', fromlist=[flow_params['network']])
network_class = getattr(module, flow_params['network'])
# create the network object
network = network_class(
name=exp_tag,
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
traffic_lights=tl_logic
)
# import the environment class
module = __import__('flow.envs', fromlist=[flow_params['env_name']])
env_class = getattr(module, flow_params['env_name'])
# create the environment object
env = env_class(env_params, sim_params, network)
exp = Experiment(env)
results = exp.run(num_runs, env_params.horizon)
total_delay = np.mean(results['returns'])
return total_delay
| 25,315
|
def read_aims(filename):
"""Method to read FHI-aims geometry files in phonopy context."""
lines = open(filename, 'r').readlines()
cell = []
is_frac = []
positions = []
symbols = []
magmoms = []
for line in lines:
fields = line.split()
if not len(fields):
continue
if fields[0] == "lattice_vector":
vec = lmap(float, fields[1:4])
cell.append(vec)
elif fields[0][0:4] == "atom":
if fields[0] == "atom":
frac = False
elif fields[0] == "atom_frac":
frac = True
pos = lmap(float, fields[1:4])
sym = fields[4]
is_frac.append(frac)
positions.append(pos)
symbols.append(sym)
magmoms.append(None)
# implicitly assuming that initial_moments line adhere to FHI-aims geometry.in specification,
# i.e. two subsequent initial_moments lines do not occur
# if they do, the value specified in the last line is taken here - without any warning
elif fields[0] == "initial_moment":
magmoms[-1] = float(fields[1])
for (n,frac) in enumerate(is_frac):
if frac:
pos = [ sum( [ positions[n][l] * cell[l][i] for l in range(3) ] ) for i in range(3) ]
positions[n] = pos
if None in magmoms:
atoms = Atoms(cell=cell, symbols=symbols, positions=positions)
else:
atoms = Atoms(cell=cell, symbols=symbols, positions=positions, magmoms=magmoms)
return atoms
| 25,316
|
def test(model, issue_batches):
"""
return accuracy on test set
"""
session = tf.get_default_session()
num_correct = 0
num_predict = 0
for epoch, step, eigens, labels in issue_batches:
feeds = {
model['eigens']: eigens,
}
guess = session.run(model['guess'], feed_dict=feeds)
num_predict += guess.shape[0]
num_correct += \
np.sum(np.argmax(labels, axis=1) == np.argmax(guess, axis=1))
return float(num_correct) / float(num_predict)
| 25,317
|
def evaluate_sample(ResNet50_model, X_train, Y_train, X_val_b,Y_val_b,X_data,Y_data,checkpoint_path):
"""
A function that accepts a labeled-unlabeled data split and trains the relevant model on the labeled data, returning
the model and it's accuracy on the test set.
"""
# shuffle the training set:
perm = np.random.permutation(X_train.shape[0])
X_train = X_train[perm]
Y_train = Y_train[perm]
X_validation = X_val_b
Y_validation=to_categorical(Y_val_b)
# train and evaluate the model:
model = train_disease_classification_model(ResNet50_model,X_train, Y_train, X_validation, Y_validation, checkpoint_path)
acc = model.evaluate(X_data, Y_data, verbose=0)
return acc, model
| 25,318
|
def groupbys(df1, df2, by=None, left_by=None, right_by=None, allow_right_empty=False):
"""
df1: Left pandas.DataFrame
df2: Right pandas.DataFrame
by: "by" of groupby. Use intersection of each columns, if it is None.
left_by: This or "by" is used as "by" of df1.groupby.
right_by: This or "by" is used as "by" of df2.groupby.
allow_right_empty: Output right, if it is empty.
"""
if by is None:
by = df1.columns.intersection(df2.columns).tolist()
g1 = df1.groupby(left_by if left_by else by)
g2 = df2.groupby(right_by if right_by else by)
for k, v1 in g1:
v2 = df2.iloc[g2.indices.get(k, [])]
if allow_right_empty or len(v2):
yield k, v1, v2
| 25,319
|
def plotThreePlanes(potentialOp, gridFun, limits, dimensions,
colorRange=None, transformation='real', evalOps=None):
"""
Plot the potential generated by applying a potential operator to a grid
function on the xy, xz and yz planes.
*Parameters:*
- potentialOp (PotentialOperator)
A potential operator.
- gridFun (GridFunction)
A grid function.
- limits (tuple)
Tuple (min, max) or (xmin, xmax, ymin, ymax, zmin, zmax)
specifying the extent of each plane on which the potential
will be plotted.
- dimensions (tuple)
Scalar or tuple (xdim, ydim, zdim) specifying the number of samples
in each direction.
- colorRange (tuple)
Tuple (min, max) determining the range of data to be plotted.
If set to None, the data range is determined automatically.
- transformation ('real', 'imag', 'abs' or a callable object)
Determines how the potential is transformed before plotting.
- evalOps (EvaluationOptions)
Options controlling the evaluation of the potential.
"""
if np.isscalar(dimensions):
dims = (dimensions, dimensions, dimensions)
else:
if len(dimensions) == 3:
dims = dimensions
else:
raise ValueError("dimensions must be a scalar or a tuple with 3 elements")
if len(limits) == 2:
lims = (limits[0], limits[1], limits[0], limits[1], limits[0], limits[1])
elif len(limits) == 6:
lims = limits
else:
raise ValueError("limits must be a tuple with 2 or 6 elements")
origin = ((lims[0] + lims[1]) / 2.,
(lims[2] + lims[3]) / 2.,
(lims[4] + lims[5]) / 2.)
(points1,vals1) = py_ext.evaluatePotentialOnPlane(
potentialOp,gridFun,lims[:4],dims[:2],plane="xy",origin=origin,
evalOps=evalOps)
(points2,vals2) = py_ext.evaluatePotentialOnPlane(
potentialOp,gridFun,lims[:2]+lims[4:],dims[:1]+dims[2:],plane="xz",
origin=origin,evalOps=evalOps)
(points3,vals3) = py_ext.evaluatePotentialOnPlane(
potentialOp,gridFun,lims[2:],dims[1:],plane="yz",origin=origin,
evalOps=evalOps)
if not hasattr(transformation, '__call__'):
if transformation=='real':
data_transform = lambda point,val:np.real(val)
elif transformation=='imag':
data_transform = lambda point,val:np.imag(val)
elif transformation=='abs':
data_transform = lambda point,val:np.abs(val)
else:
raise ValueError("Unknown value for 'transformation'. It needs to be 'real', 'imag', 'abs' or a Python Callable!")
else:
data_transform = transformation
vals1 = data_transform(points1,vals1)
vals2 = data_transform(points2,vals2)
vals3 = data_transform(points3,vals3)
if colorRange is None:
minVal = np.min([vals1,vals2,vals3])
maxVal = np.max([vals1,vals2,vals3])
colorRange = (minVal,maxVal)
g1 = tvtk.StructuredGrid(dimensions=(dims[0],dims[1],1),points=points1)
g2 = tvtk.StructuredGrid(dimensions=(dims[0],dims[2],1),points=points2)
g3 = tvtk.StructuredGrid(dimensions=(dims[1],dims[2],1),points=points3)
# Add data
g1.point_data.scalars = vals1
g2.point_data.scalars = vals2
g3.point_data.scalars = vals3
# Create actors
mapper1 = tvtk.DataSetMapper(input=g1)
mapper1.scalar_range = colorRange
actor1 = tvtk.Actor(mapper=mapper1)
mapper2 = tvtk.DataSetMapper(input=g2)
mapper2.scalar_range = colorRange
actor2 = tvtk.Actor(mapper=mapper2)
mapper3 = tvtk.DataSetMapper(input=g3)
mapper3.scalar_range = colorRange
actor3 = tvtk.Actor(mapper=mapper3)
gActor = gridActor(gridFun.grid())
legend = legendActor(actor1)
plotTvtkActors([actor1,actor2,actor3,gActor,legend])
| 25,320
|
def Base64WSEncode(s):
"""
Return Base64 web safe encoding of s. Suppress padding characters (=).
Uses URL-safe alphabet: - replaces +, _ replaces /. Will convert s of type
unicode to string type first.
@param s: string to encode as Base64
@type s: string
@return: Base64 representation of s.
@rtype: string
NOTE: Taken from keyczar (Apache 2.0 license)
"""
if isinstance(s, six.text_type):
# Make sure input string is always converted to bytes (if not already)
s = s.encode("utf-8")
return base64.urlsafe_b64encode(s).decode("utf-8").replace("=", "")
| 25,321
|
def test_tf2zpk():
"""test the tf2zpk function"""
(a, b, c) = tf2zpk(1, 2)
assert a.size == 0
assert b.size == 0
assert c == 0.5
| 25,322
|
def onlyWikipediaURLS(urls):
"""Some example HTML page data is from wikipedia. This function converts
relative wikipedia links to full wikipedia URLs"""
wikiURLs = [url for url in urls if url.startswith('/wiki/')]
return ["https://en.wikipedia.org"+url for url in wikiURLs]
| 25,323
|
def run():
"""Parses command line and dispatches the commands"""
args = docopt(__doc__)
configure_logging(args["--debug"])
disco_route53 = DiscoRoute53()
if args['list-zones']:
for hosted_zone in disco_route53.list_zones():
is_private_zone = hosted_zone.config['PrivateZone']
print("{0:<20} {1:10} {2:5}".format(hosted_zone.name, hosted_zone.id, is_private_zone))
elif args['list-records']:
for hosted_zone in disco_route53.list_zones():
# the Hosted Zone name is the domain name with a period appended to it
# allow searching by either with or without the period
if not args['--zone'] or hosted_zone.name in (args['--zone'], args['--zone'] + '.'):
for record in disco_route53.list_records(hosted_zone.name):
values = ','.join(record.resource_records)
print("{0:<5} {1:20} {2:50}".format(record.type, record.name, values))
elif args['create-record']:
disco_route53.create_record(args['<zone-name>'],
args['<record-name>'],
args['<type>'],
args['<value>'])
elif args['delete-record']:
record_name = args['<record-name>']
# AWS appends a . to the end of the record name.
# Add it here as a convenience if the argument is missing it
if not record_name.endswith('.'):
record_name += '.'
disco_route53.delete_record(args['<zone-name>'], record_name, args['<type>'])
| 25,324
|
def get_image_filename_index():
"""
Obtain a mapping of filename -> filepath for images
:return:
"""
index_path = osp.join(SEG_ROOT, 'privacy_filters', 'cache', 'fname_index.pkl')
if osp.exists(index_path):
print 'Found cached index. Loading it...'
return pickle.load(open(index_path, 'rb'))
else:
print 'Creating filename index ...'
fname_index = dict()
images_dir = osp.join(SEG_ROOT, 'images')
for fold in os.listdir(images_dir):
for img_filename in os.listdir(osp.join(images_dir, fold)):
image_path = osp.join(images_dir, fold, img_filename)
fname_index[img_filename] = image_path
pickle.dump(fname_index, open(index_path, 'wb'))
return fname_index
| 25,325
|
def _on_post_syncdb(app, **kwargs):
"""Handler to install baselines after syncdb has completed.
This will install baselines for any new apps, once syncdb has completed
for the app, and will notify the user if any evolutions are required.
Args:
app (module):
The app whose models were migrated.
**kwargs (dict):
Keyword arguments passed to the signal handler.
"""
_on_app_models_updated(app=app,
using=kwargs.get('db', DEFAULT_DB_ALIAS),
**kwargs)
| 25,326
|
def test_find_matches_no_dupes(small_ktree):
"""Test find matches with small ktree."""
temp = find_matches(small_ktree, 3)
assert len(temp) == 1
| 25,327
|
def merge_coordinates(coordinates, capture_size):
"""Merge overlapping coordinates for MIP targets.
Parameters
----------
coordinates: python dictionary
Coordinates to be merged in the form {target-name: {chrom: chrx,
begin: start-coordinate, end: end-coordinate}, ..}
capture_size: int
Anticipated MIP capture size. If two regions are as close as 2 times
this value, they will be merged.
Returns
-------
target_coordinates: python dictionary
merged coordinates dictionary
target_names: python dictionary
names of included targets in each merged region.
"""
# create target regions to cover all snps
# start by getting snps on same chromosome together
chroms = {}
for c in coordinates:
chrom = coordinates[c]["chrom"]
try:
chroms[chrom].append([coordinates[c]["begin"],
coordinates[c]["end"]])
except KeyError:
chroms[chrom] = [[coordinates[c]["begin"],
coordinates[c]["end"]]]
# merge snps that are too close to get separate regions
# the length should be twice the capture size
merged_chroms = {}
for c in chroms:
merged_chroms[c] = merge_overlap(chroms[c], 2 * capture_size)
# create regions for alignment
# create target coordinate for each region
target_coordinates = {}
target_names = {}
for c in merged_chroms:
regions = merged_chroms[c]
for reg in regions:
targets_in_region = []
for co in coordinates:
if (coordinates[co]["chrom"] == c
and reg[0] <= coordinates[co]["begin"]
<= coordinates[co]["end"] <= reg[1]):
targets_in_region.append(co)
region_name = targets_in_region[0]
target_names[region_name] = targets_in_region
r_start = reg[0]
r_end = reg[1]
target_coordinates[region_name] = [c, r_start, r_end]
return target_coordinates, target_names
| 25,328
|
def update_one_record(dns, zones, record_name, record_type, data):
"""
Update a DNS record to point at a specific value, creating it if necessary,
deleting any other records of other types pointing at the same DNS name if
necessary.
@param dns: The DNS provider.
@param zones: The list of zone objects from the DNS provider.
@param record_name: The fully qualified domain name to update.
@param record_type: The type of the record ("A" or "AAAA" probably).
@param data: The data to update the record with (the string formated IP
address).
"""
sub_domain, top_level_domain = (
re.match("(?:(.*)\\.|)(.*\\..*)", record_name).groups()
)
for zone in zones:
if zone.domain == top_level_domain:
target_zone = zone
for record in dns.iterate_records(target_zone):
if record.name == sub_domain:
if record.type != record_type:
dns.delete_record(record=record)
else:
dns.update_record(record=record, name=sub_domain,
type=record_type, data=data,
extra=dict(ttl=300))
break
else:
target_zone.create_record(name=sub_domain, type=record_type, data=data,
extra=dict(ttl=300))
| 25,329
|
def log_in_directly(context):
"""
This differs to the `log_in` function above by logging in directly to a page where the user login form is presented
:param context:
:return:
"""
assert context.persona
context.execute_steps(u"""
When I fill in "login" with "$name"
And I fill in "password" with "$password"
And I press the element with xpath "//button[contains(string(), 'Login')]"
Then I should see an element with xpath "//a[@title='Log out']"
""")
| 25,330
|
def induce_and_write_rules():
"""Induce and write set of rules."""
examples = tsv_utils.read_tsv(FLAGS.input)
config = induction_utils.InductionConfig(
sample_size=FLAGS.sample_size,
max_iterations=FLAGS.max_iterations,
min_delta=FLAGS.min_delta,
terminal_codelength=FLAGS.terminal_codelength,
non_terminal_codelength=FLAGS.non_terminal_codelength,
parse_sample=FLAGS.parse_sample,
allow_repeated_target_nts=FLAGS.allow_repeated_target_nts,
seed_exact_match=FLAGS.seed_exact_match,
balance_parens=FLAGS.balance_parens,
)
induced_rules = induction_utils.induce_rules(examples, config)
qcfg_file.write_rules(induced_rules, FLAGS.output)
| 25,331
|
def Emojify_V2(input_shape, word_to_vec_map, word_to_index):
"""
Function creating the Emojify-v2 model's graph.
Arguments:
input_shape -- shape of the input, usually (max_len,)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its
50-dimensional vector representation word_to_index -- dictionary mapping from
words to their indices in the vocabulary (400,001 words)
Returns:
model -- a model instance in Keras
"""
# Define sentence_indices as the input of the graph,
# it should be of shape input_shape and dtype 'int32' (as it contains indices).
sentence_indices = Input(shape=input_shape, dtype=np.int32)
# Create the embedding layer pretrained with GloVe Vectors (≈1 line)
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
# Propagate sentence_indices through your embedding layer, you get back the embeddings
embeddings = embedding_layer(sentence_indices)
# Propagate the embeddings through an LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a batch of sequences.
X = LSTM(128, return_sequences=True)(embeddings)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X trough another LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a single hidden state, not a batch of sequences.
X = LSTM(128)(X)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.
X = Dense(5, activation='softmax')(X)
# Add a softmax activation
X = Activation('softmax')(X)
# Create Model instance which converts sentence_indices into X.
model = Model(sentence_indices, X)
return model
| 25,332
|
def SetFdBlocking(fd, is_blocking):
"""Set a file descriptor blocking or nonblocking.
Please note that this may affect more than expected, for example it may
affect sys.stderr when called for sys.stdout.
Returns:
The old blocking value (True or False).
"""
if hasattr(fd, 'fileno'):
fd = fd.fileno()
old = fcntl.fcntl(fd, fcntl.F_GETFL)
if is_blocking:
value = old & ~os.O_NONBLOCK
else:
value = old | os.O_NONBLOCK
if old != value:
fcntl.fcntl(fd, fcntl.F_SETFL, value)
return bool(old & os.O_NONBLOCK)
| 25,333
|
def de_parser(lines):
"""return a dict of {OfficalName: str, Synonyms: str, Fragment: bool,
Contains: [itemdict,], Includes: [itemdict,]} from DE lines
The DE (DEscription) lines contain general descriptive information about
the sequence stored. This information is generally sufficient to identify
the protein precisely.
The description always starts with the proposed official name of the
protein. Synonyms are indicated between brackets. Examples below
If a protein is known to be cleaved into multiple functional components,
the description starts with the name of the precursor protein, followed by
a section delimited by '[Contains: ...]'. All the individual components are
listed in that section and are separated by semi-colons (';'). Synonyms are
allowed at the level of the precursor and for each individual component.
If a protein is known to include multiple functional domains each of which
is described by a different name, the description starts with the name of
the overall protein, followed by a section delimited by '[Includes: ]'. All
the domains are listed in that section and are separated by semi-colons
(';'). Synonyms are allowed at the level of the protein and for each
individual domain.
In rare cases, the functional domains of an enzyme are cleaved, but the
catalytic activity can only be observed, when the individual chains
reorganize in a complex. Such proteins are described in the DE line by a
combination of both '[Includes:...]' and '[Contains:...]', in the order
given in the following example:
If the complete sequence is not determined, the last information given on
the DE lines is '(Fragment)' or '(Fragments)'. Example:
DE Dihydrodipicolinate reductase (EC 1.3.1.26) (DHPR) (Fragment).
DE Arginine biosynthesis bifunctional protein argJ [Includes: Glutamate
DE N-acetyltransferase (EC 2.3.1.35) (Ornithine acetyltransferase)
DE (Ornithine transacetylase) (OATase); Amino-acid acetyltransferase
DE (EC 2.3.1.1) (N-acetylglutamate synthase) (AGS)] [Contains: Arginine
DE biosynthesis bifunctional protein argJ alpha chain; Arginine
DE biosynthesis bifunctional protein argJ beta chain] (Fragment).
Trouble maker:
DE Amiloride-sensitive amine oxidase [copper-containing] precursor(EC
DE 1.4.3.6) (Diamine oxidase) (DAO).
"""
labeloff_lines = labeloff(lines)
joined = join_parser(labeloff_lines, chars_to_strip="). ")
keys = ["Includes", "Contains", "Fragment"]
fragment_label = "(Fragment"
contains_label = "[Contains:"
includes_label = "[Includes:"
# Process Fragment
fragment = False
if joined.endswith(fragment_label):
fragment = True
joined = joined.rsplit("(", 1)[0]
# Process Contains
contains = []
if contains_label in joined:
joined, contains_str = joined.split(contains_label)
contains_str = contains_str.strip(" ]")
contains = list(map(de_itemparser, contains_str.split("; ")))
# Process Includes
includes = []
if includes_label in joined:
joined, includes_str = joined.split(includes_label)
includes_str = includes_str.strip(" ]")
includes = list(map(de_itemparser, includes_str.split("; ")))
# Process Primary
primary = de_itemparser(joined)
result = dict(list(zip(keys, (includes, contains, fragment))))
result.update(primary)
return result
| 25,334
|
def send_mail(address, passwd):
"""
Send verification emails based on below template.
"""
message = (
"Hey there!\n\nYour HOBY Feedback account is ready to be logged "
+ "into. If you have any problems logging in, please contact the "
+ "operations staff who created your account."
+ "\n\nURL: https://feedback.hobynye.org\nPassword: {}\n\n"
+ "Please change this password once you have logged in.\n\n"
+ "Thanks,\nHOBY NYE\n"
+ "\n\nThis message was automatically generated by HOBY Feedback"
)
message = message.format(passwd)
email = MIMEText(message)
email["To"] = address
email["From"] = "HOBY Feedback <{}>".format(app.config["EMAIL_USER"])
email["Subject"] = "Your HOBY Feedback Account"
email["Date"] = formatdate()
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server.login(app.config["EMAIL_USER"], app.config["EMAIL_PASS"])
server.send_message(email)
server.quit()
| 25,335
|
def roundPrecision(number, precision=4):
""" Rounds the given floating point number to a certain precision, for output."""
return float(('{:.' + str(precision) + 'E}').format(number))
| 25,336
|
def get_profile(host, cluster = False):
""" Download profile to temporary file and return tempfile handle. """
cmd = [GETPROF, host]
if cluster: cmd.insert(1, "-C")
if debug:
cmd.insert(1, "-D")
sys.stderr.write("%s: launching '%s'\n" % (CALL, " ".join(cmd)))
tempfh = tempfile.NamedTemporaryFile(prefix="tmp.%s." % CALL)
pipe = Popen(cmd, stdout=tempfh)
rc = pipe.wait()
if rc != 0: raise RuntimeError("'%s' returned exit status %d" % \
(" ".join(cmd), rc))
return tempfh
| 25,337
|
def stringToNumbers(string, separators=[","], commentSymbol="#"):
""" Return a list of splitted string and numbers from string "string". Numbers will be converted into floats. Text after "#" will be skipped.
--- string: the string to be converted.
--- separators: a list of additional separators other than whitespace to be used.
--- commentSymbol: text after which will be ignored.
"""
if "#" in string: string = string[:string.index("#")].strip(); # take everything before "#" symbol, then strip
splitted = [string];
for separator in flatten(separators): splitted = FLI([x.split(separator) for x in splitted]);
splitted = FLI([x.split() for x in splitted]); # clean up empty strings
if splitted == []: return [];
lineData = [];
for piece in splitted:
if isFloat(piece):
lineData.append(float(piece));
else:
lineData.append(piece);
return lineData;
| 25,338
|
def friend_invitation_by_facebook_send_view(request): # friendInvitationByFacebookSend
"""
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
recipients_facebook_id_array = request.GET.getlist('recipients_facebook_id_array[]', "")
recipients_facebook_name_array = request.GET.getlist('recipients_facebook_name_array[]', "")
facebook_request_id = request.GET.get('facebook_request_id', "")
results = friend_invitation_by_facebook_send_for_api(voter_device_id, recipients_facebook_id_array,
recipients_facebook_name_array, facebook_request_id)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
'all_friends_facebook_link_created_results': results['all_friends_facebook_link_created_results'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
| 25,339
|
def test_multichoiceanswer_init3():
"""Test wrong arguments
"""
text = "text"
with pytest.raises(TypeError):
exam.MultiChoiceAnswer(image=text)
| 25,340
|
def inv_send_received(r, **attr):
"""
Confirm a Shipment has been Received
- called via POST from inv_send_rheader
- called via JSON method to reduce request overheads
"""
if r.http != "POST":
r.error(405, current.ERROR.BAD_METHOD,
next = URL(),
)
T = current.T
send_id = r.id
if not send_id:
r.error(405, "Can only confirm a single shipment.")
auth = current.auth
s3db = current.s3db
stable = s3db.inv_send
if not auth.s3_has_permission("update", stable,
record_id = send_id,
):
r.unauthorised()
db = current.db
tracktable = s3db.inv_track_item
db(stable.id == send_id).update(status = SHIP_STATUS_RECEIVED)
db(tracktable.send_id == send_id).update(status = TRACK_STATUS_ARRIVED)
if current.deployment_settings.get_inv_send_req():
rtable = s3db.inv_req
srtable = s3db.inv_send_req
reqs = db(srtable.send_id == send_id).select(srtable.req_id)
if reqs:
req_ids = [row.req_id for row in reqs]
# Get the full list of items in the request(s)
ritable = s3db.inv_req_item
for req_id in req_ids:
query = (ritable.req_id == req_id)
ritems = db(query).select(ritable.id,
ritable.item_pack_id,
ritable.quantity,
# Virtual Field
#ritable.pack_quantity,
)
# Get all Received Shipments in-system for this request
query = (stable.status == SHIP_STATUS_RECEIVED) & \
(tracktable.send_id == send_id) & \
(stable.id == srtable.send_id) & \
(srtable.req_id == req_id)
sitems = db(query).select(tracktable.item_pack_id,
tracktable.quantity,
# Virtual Field
#tracktable.pack_quantity,
)
fulfil_qty = {}
for item in sitems:
item_pack_id = item.item_pack_id
if item_pack_id in fulfil_qty:
fulfil_qty[item_pack_id] += (item.quantity * item.pack_quantity())
else:
fulfil_qty[item_pack_id] = (item.quantity * item.pack_quantity())
complete = False
for item in ritems:
if item.item_pack_id in fulfil_qty:
quantity_fulfil = fulfil_qty[item.item_pack_id]
db(ritable.id == item.id).update(quantity_fulfil = quantity_fulfil)
req_quantity = item.quantity * item.pack_quantity()
complete = quantity_fulfil >= req_quantity
# Update overall Request Status
if complete:
# REQ_STATUS_COMPLETE
db(rtable.id == req_id).update(fulfil_status = 2)
else:
# REQ_STATUS_PARTIAL
db(rtable.id == req_id).update(fulfil_status = 1)
message = T("Shipment received")
current.session.confirmation = message
current.response.headers["Content-Type"] = "application/json"
return json.dumps({"message": s3_str(message),
"tree": URL(args = [send_id, "track_item"]),
}, separators=SEPARATORS)
| 25,341
|
def gradcheck(trnodes,cost,datasrc,maxtime=2,rtol=1e-5,atol=1e-4):
"""
Checks the computation of grad() against numerical gradient.
If any component of the numerical gradient does not match, an
AssertionError is raised with information about the problem.
"""
# Collect each trainable node in the dependency graph
if len(trnodes) == 0:
return # No trainable nodes, means zero-dimensional gradient
# trnodes[0].P[:] = 0
print "gradient check (dtype=%s)..." % str(trnodes[0].P.dtype)
globals.flags.push("train_mode",False)
# Update the dP value of each trainable node, so that it contains
# the symbolic gradient for that trainable node's parameters
cost.bprop_trainable(trnodes,datasrc)
# Compute components numeric gradient by starting from the symbolic gradient
# and then checking several weight components by central-difference
failures = {}
data = datasrc.data()
# Loop over each
for trnode in trnodes:
# Get the parameter vector P, and also the symbolic gradient that was computed at the beginning
P = trnode.P.ravel() # Keep as sarray
dP = trnode.dP.asnumpy().ravel() # Download
# Decide on step size and what order to perturb the parameters of this trainable node
step = 1e-8 if P.dtype == np.float64 else 1e-4
#order = npr.permutation(len(P)) # Use same permutation every time for consistency in output
order = np.arange(len(P))
# For the time allotted, perturb each parameter and evaluate the cost
starttime = time.time()
numcheck,mincheck = 0,min(len(order),50)
for i in order:
# Temporarily perturb weight i by 'step' and evaluate the new cost at each position
x = float(P[i])
P[i] = x-step; c0 = np.sum(cost.eval(**data)["cost"].asnumpy());
P[i] = x+step; c1 = np.sum(cost.eval(**data)["cost"].asnumpy());
P[i] = x
# Compute the numeric gradient for paraneter i, and check its closeness to symbolic gradient
dc_num = float(c1-c0)/float(2*step)
dc_sym = dP[i]
if not np.allclose(dc_num,dc_sym,rtol=rtol,atol=atol) or not np.allclose(dc_sym,dc_num,rtol=rtol,atol=atol):
if trnode not in failures:
failures[trnode] = []
failures[trnode].append((i,dc_num,dc_sym))
# Quit early if we ran out of time for this particular node
numcheck += 1
if time.time()-starttime >= maxtime and numcheck >= mincheck:
break
globals.flags.pop("train_mode")
# No errors? Then simply return
if len(failures) == 0:
logging.info("gradient check PASSED")
return
msg = "...gradient check FAILED\n"
for trnode,items in failures.iteritems():
msg += " %s FAILED at weights:\n" % (trnode.Z.dsts[0].origin().path)
# Sort by index
items.sort(cmp=lambda x,y: int(x[0]-y[0]))
count = 0
for index,dc_num,dc_sym in items:
msg += "\t\t[%d] num: %.8f sym: %.8f\n" % (index,dc_num,dc_sym)
count += 1
if count > 8:
msg += "\t\t...\n"
break
for trnode in trnodes:
if trnode not in failures:
msg += " %s SUCCEEDED\n" % (trnode.Z.dsts[0].origin().path)
logging.info("gradient check FAILED")
logging.info(msg)
raise AssertionError(msg)
| 25,342
|
def filter_table(table, filter_series, ignore=None):
"""
Filter a table based on a set of restrictions given in
Series of column name / filter parameter pairs. The column
names can have suffixes `_min` and `_max` to indicate
"less than" and "greater than" constraints.
Parameters
----------
table : pandas.DataFrame
Table to filter.
filter_series : pandas.Series
Series of column name / value pairs of filter constraints.
Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
ignore : sequence of str, optional
List of column names that should not be used for filtering.
Returns
-------
filtered : pandas.DataFrame
"""
with log_start_finish('filter table', logger):
ignore = ignore if ignore else set()
filters = [_filterize(name, val)
for name, val in filter_series.iteritems()
if not (name in ignore or
(isinstance(val, numbers.Number) and
np.isnan(val)))]
return apply_filter_query(table, filters)
| 25,343
|
def analogy_computation_2d(f_first_enc,
f_first_frame,
f_current_enc,
first_depth):
"""Implements the deep analogy computation."""
with tf.variable_scope('analogy_computation'):
frame_enc_diff = f_first_frame - f_first_enc
frame_enc_diff_enc = tf.layers.conv2d(
frame_enc_diff,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
f_current_enc_enc = tf.layers.conv2d(
f_current_enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.concat([frame_enc_diff_enc, f_current_enc_enc], 3)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.contrib.layers.layer_norm(analogy)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
return tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
| 25,344
|
def format_to_TeX(elements):
"""returns BeautifulSoup elements in LaTeX.
"""
accum = []
for el in elements:
if isinstance(el, NavigableString):
accum.append(escape_LaTeX(el.string))
else:
accum.append(format_el(el))
return "".join(accum)
| 25,345
|
def plot_light_curve(time, magnitude, xunit="s", filename=None):
"""
Plot light curve caused by the doppler beaming in a binary system.
Parameters
----------
time : 1D numpy.array(dtype=float)
Array represents time.
magnitude : 1D numpy.array(dtype=float)
Array represents magnitude of the binary system.
xunit : str
String which is x's label on the image.
Default set in seconds.
filename : str
The name of a file where the image will be saved to.
It should have the .eps extenstion. If None the image
will be only displayed on a screen.
"""
figure = _light_curve(time, magnitude, xunit)
_display_or_save_figure(figure, filename)
| 25,346
|
def namedlist(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of list with named fields.
>>> Point = namedlist('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with pos args or keywords
>>> p[0] + p[1] # indexable like a plain list
33
>>> x, y = p # unpack like a regular list
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split()
field_names = [str(x) for x in field_names]
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c == '_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if not all(c.isalnum() or c == '_' for c in name):
raise ValueError('Type names and field names can only contain '
'alphanumeric characters and underscores: %r'
% name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with '
'a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
fmt_kw = {'typename': typename}
fmt_kw['field_names'] = tuple(field_names)
fmt_kw['num_fields'] = len(field_names)
fmt_kw['arg_list'] = repr(tuple(field_names)).replace("'", "")[1:-1]
fmt_kw['repr_fmt'] = ', '.join(_repr_tmpl.format(name=name)
for name in field_names)
fmt_kw['field_defs'] = '\n'.join(_m_field_tmpl.format(index=index, name=name)
for index, name in enumerate(field_names))
class_definition = _namedlist_tmpl.format(**fmt_kw)
if verbose:
print(class_definition)
def _itemsetter(key):
def _itemsetter(obj, value):
obj[key] = value
return _itemsetter
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter,
_itemsetter=_itemsetter,
__name__='namedlist_%s' % typename,
OrderedDict=OrderedDict,
_property=property,
_list=list)
try:
exec_(class_definition, namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to
# the frame where the named list is created. Bypass this step in
# environments where sys._getframe is not defined (Jython for
# example) or sys._getframe is not defined for arguments greater
# than 0 (IronPython).
try:
frame = _sys._getframe(1)
result.__module__ = frame.f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
| 25,347
|
def messages_to_corpus(messages, filename='corpus.txt', append=True):
"""
Converts midi messages to corpus of encoding to feed into the network
:param
messages: array of instrument messages
filename: Name of the file in which to save the converted text
append: Overwrites if false
"""
# todo
pass
| 25,348
|
def preprocess_things(data_dir: str = "./data", file_identifier: str = "*-5k.json"):
"""Preprocess things (essentially proper nouns) from WikiData with files named "*-5k.json":
- Filters things
- Combines thingLabel and thingAltLabel to labels field
"""
fps = glob(os.path.join(data_dir, file_identifier))
for fp in fps:
print(f"Processing {fp}")
# open and preprocess file
preprocessed_data = []
with open(fp, "r") as f:
data = json.load(f)
for thing in tqdm(data):
# filter things
if thing["thing"].split("/")[-1] != thing["thingLabel"]:
# clean label by removing parenthesis and everything inside
thing_label = thing.pop("thingLabel")
thing_label = re.sub(r"\([^)]*\)", "", thing_label).strip()
# combine thingLabel and thingAltLabel
label_set = set()
label_set.add(thing_label)
alt_labels = thing.pop("thingAltLabel", None)
if alt_labels:
alt_labels = re.sub(r"\([^)]*\)", "", alt_labels).strip()
for label in alt_labels.split(", "):
label_set.add(label)
thing["labels"] = sorted(label_set)
preprocessed_data.append(thing)
# write to file
base_fp, ext = os.path.splitext(fp)
out_fp = base_fp + "-preprocessed" + ext
with open(out_fp, "w") as f:
json.dump(preprocessed_data, f, indent=4)
print(f"Saved to {out_fp}\n")
| 25,349
|
def has_good_frames(frames: List[MonitoredFrame]) -> bool:
"""
Find a frame with a score larger than X
"""
return any([frame.score and frame.score > 3 for frame in frames])
| 25,350
|
def updateHistory(conn, author, message_id, backer):
"""
Updates the history
Returns success
"""
c = conn.cursor()
c.execute(prepareQuery("INSERT INTO votes_history (user_id, message_id, backer) VALUES (?,?,?)"), (int(author), int(message_id), int(backer), ))
conn.commit()
return c.rowcount > 0
| 25,351
|
def pyc_loads(data):
"""
Load a .pyc file from a bytestring.
Arguments:
data(bytes): The content of the .pyc file.
Returns:
PycFile: The parsed representation of the .pyc file.
"""
return pyc_load(six.BytesIO(data))
| 25,352
|
def get_master_tag_list(context, ecosystem, use_token):
"""Call API endpoint master tag list."""
if use_token:
context.response = requests.get(master_tag_list_url(context, ecosystem),
headers=authorization(context))
else:
context.response = requests.get(master_tag_list_url(context, ecosystem))
| 25,353
|
def main() -> None:
"""The main function for this file"""
print(read_all('../../settings.cfg'))
| 25,354
|
def time_series_figure(time_series, polynomial, drift, snr):
""" Return a matplotlib figure containing the time series and its
polynomial model.
"""
figure = plt.figure()
plot = figure.add_subplot(111)
plot.grid()
plt.title("Drift: {0: .1f}% - SNR: {1: .1f}dB".format(
drift * 100, 10 * numpy.log10(snr)))
x = numpy.arange(2, 2 + len(time_series))
model = numpy.polyval(polynomial, x)
plot.plot(x, time_series, "k-")
plot.plot(x, model, "k-")
plot.axes.set_xlabel("Volume number")
plot.axes.set_ylabel("Intensity")
return figure
| 25,355
|
def _create_standard_configuration_models_py_(code, geometry_type, absolute_path, schema=None,
primary_key_is_string=False):
"""
The simplest way to get a python file containing a database definition in sqlalchemy orm way. It will
contain all necessary definitions to produce an extract as the specification defines for the new topic.
Args:
code (str): The unique Code for the new model (see oereb specification for more details)
geometry_type (str): A valid geometry type.
absolute_path (str): The absolute Path where the genderated python file will be placed. It
must bewriteable by the user running this command.
schema (str): The schema name. If not specified, "name" will be used.
primary_key_is_string (bool): The type of the primary key. You can use this to switch between STRING
type or INTEGER type. Standard is to INTEGER => False
"""
if primary_key_is_string:
template = Template(
filename=AssetResolver('pyramid_oereb').resolve(
'standard/templates/plr_string_primary_keys.py.mako'
).abspath()
)
else:
template = Template(
filename=AssetResolver('pyramid_oereb').resolve(
'standard/templates/plr_integer_primary_keys.py.mako'
).abspath()
)
name = convert_camel_case_to_snake_case(code)
content = template.render(**{
'topic': convert_camel_case_to_text_form(code),
'schema_name': schema or name,
'geometry_type': geometry_type
})
models_path = '{path}/{name}.py'.format(
path=absolute_path,
name=name
)
models_path = os.path.abspath(models_path)
if os.path.exists(models_path):
os.remove(models_path)
models_file = open(models_path, 'w+')
models_file.write(content)
models_file.close()
| 25,356
|
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
| 25,357
|
def debug_config(
config_path: Union[str, Path],
overrides: dict = {},
code_path: Union[str, Path] = None,
show_funcs: bool = False,
show_vars: bool = False
):
"""Debug a config file and show validation errors.
The function will create all objects in the tree and validate them. Note that
some config validation errors are blocking and will prevent the rest of the
config from being resolved. This means that you may not see all validation errors
at once and some issues are only shown once previous errors have been fixed.
As with the 'train' command, you can override settings from the config by passing
arguments in the `overrides` dict.
DOCS: https://spacy.io/api/cli#debug-config
Args:
config_path (Union[str, Path]): Path to the configuration file.
overrides (dict): A dictionary of config overrides.
code_path (Union[str, Path]): Path to Python file with additional code (registered functions)
to be imported.
show_funcs (bool): Show an overview of all registered functions used in the config and where they
come from (modules, files etc.).
show_vars (bool): Show an overview of all variables referenced in the config and their values.
This will also reflect variables overwritten in the function call.
"""
if isinstance(config_path, str):
config_path = Path(config_path)
if isinstance(code_path, str):
code_path = Path(code_path)
import_code(code_path)
spacy_debug_config(
config_path, overrides=overrides, show_funcs=show_funcs, show_vars=show_vars
)
| 25,358
|
def log(cm_uuid: UUID):
"""
:GET: returns the most recent logs for the specified control module. accepts the following url parameters
- limit: the number of logs that should be returned
- offset: offset the number of logs that should be returned
- log_type: the type of log that should be returned
:POST: inserts log with data into the database
"""
if request.method == 'GET':
limit = 20
offset = 0
log_type = "%"
if limit_arg := request.args.get('limit'):
limit = limit_arg
if offset_arg := request.args.get('offset'):
offset = offset_arg
if log_type_arg := request.args.get('log_type'):
log_type = log_type_arg
logs = CMLog.query.filter_by(cm_uuid=cm_uuid, log_type=log_type)\
.order_by(CMLog.timestamp.desc())\
.limit(limit)\
.offset(offset*limit)\
.all()
returnval = dict()
returnval['cm_uuid'] = logs[0].cm_uuid
returnval['status'] = 'success'
returnval['data'] = []
for current_log in logs:
log_data = {
'id': current_log.id,
'log_type': current_log.log_type,
'timestamp': current_log.timestamp,
'data': current_log.data
}
returnval['data'].append(log_data)
return jsonify(returnval), 200
if request.method == 'POST':
if not request.is_json:
return jsonify({
"status": "error",
"message": "missing json"
}), 415
if not CMMeta.query.filter_by(uuid=cm_uuid).first():
return jsonify({
'status': 'error',
'message': 'invalid control module uuid'
}), 404
log_type = request.json.get('log_type')
data = request.json.get('data')
error = False
missing = None
if not log_type:
error = True
missing = "log_type"
if not data:
error = True
missing = "data"
if error:
return jsonify({
"status": "error",
"message": "missing " + missing
}), 422
if not CMLogTypes.query.filter_by(cm_uuid=cm_uuid, log_type=log_type).first():
CMLogTypes.create(cm_uuid, log_type)
return jsonify(CMLog.create(cm_uuid, log_type, request.json.get("data"))), 201
| 25,359
|
def RetrieveResiduesNumbers(ResiduesInfo):
"""Retrieve residue numbers."""
# Setup residue IDs sorted by residue numbers...
ResNumMap = {}
for ResName in ResiduesInfo["ResNames"]:
for ResNum in ResiduesInfo["ResNum"][ResName]:
ResNumMap[ResNum] = ResName
ResNumsList = []
if len(ResNumMap):
ResNumsList = sorted(ResNumMap, key = int)
return ResNumsList
| 25,360
|
def get_data_with_station(station_id):
"""
*** Returns Pandas DataFrame ***
Please Input Station ID: (String)"""
print("\nGETTING DATA FOR STATION: ",station_id)
ftp = FTP('ftp.ncdc.noaa.gov')
ftp.login()
ftp.cwd('pub/data/ghcn/daily/all')
ftp.retrbinary('RETR '+station_id+'.dly', open(station_id+'.dly', 'wb').write)
ftp.quit()
outfile=station_id+".dly"
dt = read_ghcn_data_file(filename=outfile)
dt = dt.rename_axis("DATE", axis="columns")
print('{} STATION DATA IS TAKEN'.format(station_id))
return dt
| 25,361
|
def _canonicalize(path):
"""Makes all paths start at top left, and go clockwise first."""
# convert args to floats
path = [[x[0]] + list(map(float, x[1:])) for x in path]
# _canonicalize each subpath separately
new_substructures = []
for subpath in _separate_substructures(path):
leftmost_point, leftmost_idx = _get_leftmost_point(subpath)
reordered = ([['M', leftmost_point[0], leftmost_point[1]]] + subpath[leftmost_idx + 1:] + subpath[1:leftmost_idx + 1])
new_substructures.append((reordered, leftmost_point))
new_path = []
first_substructure_done = False
should_flip_cardinality = False
for sp, _ in sorted(new_substructures, key=lambda x: (x[1][1], x[1][0])):
if not first_substructure_done:
# we're looking at the first substructure now, we can determine whether we
# will flip the cardniality of the whole icon or not
should_flip_cardinality = not _is_clockwise(sp)
first_substructure_done = True
if should_flip_cardinality:
sp = _make_clockwise(sp)
new_path.extend(sp)
# convert args to strs
path = [[x[0]] + list(map(str, x[1:])) for x in new_path]
return path
| 25,362
|
def cleanup_all_built_pipelines():
"""Cleans up all built pipelines."""
for pipeline_config_path in find_all_built_pipelines():
cleanup_pipeline(pipeline_config_path)
| 25,363
|
def AddAdvancedOptions(parser, required=False):
"""Adds the cloud armor advanced options arguments to the argparse."""
parser.add_argument(
'--json-parsing',
choices=['DISABLED', 'STANDARD'],
type=lambda x: x.upper(),
required=required,
help=('The JSON parsing behavior for this rule. '
'Must be one of the following values: [DISABLED, STANDARD].'))
parser.add_argument(
'--log-level',
choices=['NORMAL', 'VERBOSE'],
type=lambda x: x.upper(),
required=required,
help='The level of detail to display for WAF logging.')
| 25,364
|
def extract_meta(src: bytes) -> Tuple[int, int]:
"""
Return a 2-tuple:
- the length of the decoded block
- the number of bytes that the length header occupied.
"""
v, n = uvarint(src)
if n <= 0 or v > 0xFFFFFFFF:
raise CorruptError
if v > 0x7FFFFFFF:
raise TooLargeError
return v, n
| 25,365
|
def metric_source_configuration_table(data_model, metric_key, source_key) -> str:
"""Return the metric source combination's configuration as Markdown table."""
configurations = data_model["sources"][source_key].get("configuration", {}).values()
relevant_configurations = [config for config in configurations if metric_key in config["metrics"]]
if not relevant_configurations:
return ""
markdown = markdown_table_header("Configuration", "Value")
for configuration in sorted(relevant_configurations, key=lambda config: str(config["name"])):
name = configuration["name"]
values = ", ".join(sorted(configuration["value"], key=lambda value: value.lower()))
markdown += markdown_table_row(name, values)
markdown += "\n"
return markdown
| 25,366
|
def search(keyword, limit=20):
"""
Search is the iTunes podcast directory for the given keywords.
Parameter:
keyword = A string containing the keyword to search.
limit: the maximum results to return,
The default is 20 results.
returns:
A JSON object.
"""
keyword = keyword.replace(' ', '+') # Replace white space with +.
# Set user agent.
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
headers = {'User-Agent': user_agent}
# ITunes podcast search URL.
itunesurl = 'https://itunes.apple.com/search?term=%s&country=us&limit=%d&entity=podcast' % (keyword, limit)
req = requests.get(itunesurl, headers=headers)
return json.loads(req.text)
| 25,367
|
def job_checks(name: str):
"""
Check if the job has parameters
and ask to insert them printing
the default value
"""
p = job_parameters(name)
new_param = {}
if p:
ask = Confirm.ask(
f"Job [bold green] {name} [/bold green] has parameters, do you want to insert them?", default=True
)
if ask:
for k, v in p.items():
t = Prompt.ask(f"{k}", default=f"{v}")
new_param[k] = t
return new_param
else:
ask = Confirm.ask(
f"Job [bold green] {name} [/bold green] has no parameters, do you want to proceed?", default=True
)
if ask:
return new_param
else:
exit(0)
| 25,368
|
def post():
"""Post new message"""
error = None
if request.method == 'POST'\
and request.form['message'] != '' and request.form['message'] is not None:
user_zid = session['logged_in']
post_message = request.form['message']
post_privacy = request.form['post_privacy']
# print('post_privacy: "{}"'.format(post_privacy))
cur_time_txt = time_date2txt()
db = get_db()
db.execute('INSERT INTO POST (zid, time, message, privacy) values (?, ?, ?, ?)',
[user_zid, cur_time_txt, post_message, post_privacy])
db.commit()
for m_zid in set(re.findall(r'z[0-9]{7}', post_message)):
m_user = get_user(zid=m_zid)
if m_user and m_user['email']:
email_subj = '{} Mentioned you in his post!!'.format(g.user['full_name'])
path = url_for('search', _external=True)+'?suggestion={}'.format(m_zid)
print(path)
email_body = 'Check the link to check the post: <a href="{0}">{0}</a>'.format(path)
send_email(m_user['email'], email_subj, email_body)
elif request.form['message'] == '' or request.form['message'] is None:
error = "Post cannot be empty"
return redirect(url_for('index', new_post_error=error))
| 25,369
|
def read_article_feed():
""" Get articles from RSS feed """
feed = feedparser.parse(FEED)
for article in feed['entries']:
if article_is_not_db(article['title'], article['published']):
send_notification(article['title'], article['link'])
add_article_to_db(article['title'], article['published'])
| 25,370
|
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_connection_end_pointtopology_uuidnode_uuidnode_edge_point_uuidconnection_end_point_uuid_get(uuid, local_id, topology_uuid, node_uuid, node_edge_point_uuid, connection_end_point_uuid): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_connection_end_pointtopology_uuidnode_uuidnode_edge_point_uuidconnection_end_point_uuid_get
returns tapi.connectivity.ConnectionEndPointRef # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param topology_uuid: Id of connection-end-point
:type topology_uuid: str
:param node_uuid: Id of connection-end-point
:type node_uuid: str
:param node_edge_point_uuid: Id of connection-end-point
:type node_edge_point_uuid: str
:param connection_end_point_uuid: Id of connection-end-point
:type connection_end_point_uuid: str
:rtype: TapiConnectivityConnectionEndPointRef
"""
return 'do some magic!'
| 25,371
|
def git_clone(url: str, path: Union[Path, str]) -> None:
"""Clone a git repository."""
Repo.clone_from(url, str(path))
| 25,372
|
def add_member_to_crypto_key_policy(
project_id, location_id, key_ring_id, crypto_key_id, member, role):
"""Adds a member with a given role to the Identity and Access Management
(IAM) policy for a given CryptoKey associated with a KeyRing."""
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the CryptoKey.
resource = client.crypto_key_path_path(project_id, location_id,
key_ring_id, crypto_key_id)
# Get the current IAM policy.
policy = client.get_iam_policy(resource)
# Add member
policy.bindings.add(
role=role,
members=[member])
# Update the IAM Policy.
client.set_iam_policy(resource, policy)
# Print results
print('Member {} added with role {} to policy for CryptoKey {} \
in KeyRing {}'.format(member, role, crypto_key_id, key_ring_id))
| 25,373
|
def softmax(logits):
"""Take the softmax over a set of logit scores.
Args:
logits (np.array): a 1D numpy array
Returns:
a 1D numpy array of probabilities, of the same shape.
"""
if not isinstance(logits, np.ndarray):
logits = np.array(logits) # 1D array
logits = logits - np.max(logits) # re-center
exp_logits = np.exp(logits)
probs = exp_logits / np.sum(exp_logits)
return probs
| 25,374
|
def _aggregate(df, variable, components=None, method=np.sum):
"""Internal implementation of the `aggregate` function"""
# list of variables require default components (no manual list)
if islistable(variable) and components is not None:
raise ValueError(
"Aggregating by list of variables does not support `components`!"
)
mapping = {}
msg = "Cannot aggregate variable '{}' because it has no components!"
# if single variable
if isstr(variable):
# default components to all variables one level below `variable`
components = components or df._variable_components(variable)
if not len(components):
logger.info(msg.format(variable))
return
for c in components:
mapping[c] = variable
# else, use all variables one level below `variable` as components
else:
for v in variable if islistable(variable) else [variable]:
_components = df._variable_components(v)
if not len(_components):
logger.info(msg.format(v))
continue
for c in _components:
mapping[c] = v
# rename all components to `variable` and aggregate
_df = df._data[df._apply_filters(variable=mapping.keys())]
_df.index = replace_index_values(_df, "variable", mapping)
return _group_and_agg(_df, [], method)
| 25,375
|
def get_attributes(klass):
"""Get all class attributes.
"""
attributes = list()
for attr, value in inspect.\
getmembers(klass, lambda x: not inspect.isroutine(x)):
if not (attr.startswith("__") and attr.endswith("__")):
attributes.append(attr)
return attributes
| 25,376
|
def render(html):
"""Convert HTML to a PDF"""
output = io.BytesIO()
surface = cairo.PDFSurface(output, 595, 842)
ctx = cairo.Context(surface)
cffictx = cairocffi.Context._from_pointer(cairocffi.ffi.cast('cairo_t **', id(ctx) + object.__basicsize__)[0], incref=True)
html = etree.parse(io.StringIO(html), etree.HTMLParser())
for pdf in html.xpath("//img[substring(@src, string-length(@src) - 3)=\'.pdf\']"):
for prev in pdf.xpath("preceding-sibling::*"):
pdf.getparent().remove(prev)
pdfsrc = pdf.get("src")
pdf.getparent().remove(pdf)
section = deepcopy(html)
for nextpdf in section.xpath("//img[substring(@src, string-length(@src) - 3)=\'.pdf\']"):
for nextel in nextpdf.xpath("following-sibling::*"):
nextpdf.getparent().remove(nextel)
nextpdf.getparent().remove(nextpdf)
html_pages = weasyprint.HTML(tree=section).render().pages
surface.set_size(html_pages[0].width * 72 / 96.0, html_pages[0].height * 72 / 96.0)
if pdfsrc != "blank.pdf":
with weasyprint.default_url_fetcher(str(pdfsrc))['file_obj'] as fetch:
pdf_pages = Poppler.Document.new_from_stream(Gio.MemoryInputStream.new_from_bytes(GLib.Bytes.new_take(fetch.read())), -1, None, None)
else:
pdf_pages = None
for pageno in range(max(pdf_pages.get_n_pages() if pdf_pages else 0, len(html_pages))):
if pdf_pages and pageno < pdf_pages.get_n_pages():
pdf_pages.get_page(pageno).render_for_printing(ctx)
if pageno < len(html_pages):
html_pages[pageno].paint(cffictx, scale=72 / 96.0)
ctx.show_page()
surface.finish()
return output.getbuffer()
| 25,377
|
def computeFlowImage(u,v,logscale=True,scaledown=6,output=False):
"""
topleft is zero, u is horiz, v is vertical
red is 3 o'clock, yellow is 6, light blue is 9, blue/purple is 12
"""
colorwheel = makecolorwheel()
ncols = colorwheel.shape[0]
radius = np.sqrt(u**2 + v**2)
if output:
print("Maximum flow magnitude: %04f" % np.max(radius))
if logscale:
radius = np.log(radius + 1)
if output:
print("Maximum flow magnitude (after log): %0.4f" % np.max(radius))
radius = radius / scaledown
if output:
print("Maximum flow magnitude (after scaledown): %0.4f" % np.max(radius))
rot = np.arctan2(-v, -u) / np.pi
fk = (rot+1)/2 * (ncols-1) # -1~1 maped to 0~ncols
k0 = fk.astype(np.uint8) # 0, 1, 2, ..., ncols
k1 = k0+1
k1[k1 == ncols] = 0
f = fk - k0
ncolors = colorwheel.shape[1]
img = np.zeros(u.shape+(ncolors,))
for i in range(ncolors):
tmp = colorwheel[:,i]
col0 = tmp[k0]
col1 = tmp[k1]
col = (1-f)*col0 + f*col1
idx = radius <= 1
# increase saturation with radius
col[idx] = 1 - radius[idx]*(1-col[idx])
# out of range
col[~idx] *= 0.75
img[:,:,i] = np.floor(255*col).astype(np.uint8)
return img.astype(np.uint8)
| 25,378
|
def day_display(year, month, all_month_events, day):
"""
Returns the events that occur on the given day.
Works by getting all occurrences for the month, then drilling
down to only those occurring on the given day.
"""
# Get a dict with all of the events for the month
count = CountHandler(year, month, all_month_events).get_count()
pks = [x[1] for x in count[day]] # list of pks for events on given day
# List enables sorting.
# See the comments in EventMonthView in views.py for more info
day_events = list(Event.objects.filter(pk__in=pks).order_by(
'start_date').prefetch_related('cancellations'))
day_events.sort(key=lambda x: x.l_start_date.hour)
return day_events
| 25,379
|
def decimal_to_octal(num):
"""Convert a Decimal Number to an Octal Number."""
octal = 0
counter = 0
while num > 0:
remainder = num % 8
octal = octal + (remainder * math.pow(10, counter))
counter += 1
num = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return "{0:g}".format(float(octal))
| 25,380
|
def get_formatted_dates(date_ranges):
"""Returns list of dates specified by date_ranges, formtted for Swiftly API use.
date_ranges is a list of dict, with each dict specifying a range of dates
in string format. sample dict for Tue/Wed/Thu in Sep/Oct:
{
"start_date": "09-01-2019",
"end_date": "10-31-2019",
"include_days": [0, 1, 1, 1, 0, 0, 0]
}
"""
final_date_list = []
for date_range in date_ranges:
timestamp_list = pd.bdate_range(
start=date_range["start_date"],
end=date_range["end_date"],
weekmask=date_range["include_days"],
freq="C"
).to_list()
final_date_list += [ts.strftime("%m-%d-%Y") for ts in timestamp_list]
return final_date_list
| 25,381
|
def _get_config_path(config_path):
"""Find path to yaml config file
Args:
config_path: (str) Path to config.yaml file
Returns:
Path to config.yaml if specified else default config.yaml
Raises:
ValueError: If the config_path is not None but doesn't exist
"""
if config_path is None:
dirname = os.path.dirname(cli.__file__)
config_path = os.path.join(dirname, 'config/config.yaml')
if not os.path.exists(config_path):
raise ValueError("Config path {} does not exist!".format(config_path))
return config_path
| 25,382
|
def read_img(img: str, no_data: float, mask: str = None, classif: str = None, segm: str = None) ->\
xr.Dataset:
"""
Read image and mask, and return the corresponding xarray.DataSet
:param img: Path to the image
:type img: string
:type no_data: no_data value in the image
:type no_data: float
:param mask: Path to the mask (optional): 0 value for valid pixels, !=0 value for invalid pixels
:type mask: string
:param classif: Path to the classif (optional)
:type classif: string
:param segm: Path to the mask (optional)
:type segm: string
:return: xarray.DataSet containing the variables :
- im : 2D (row, col) xarray.DataArray float32
- msk : 2D (row, col) xarray.DataArray int16, with the convention defined in the configuration file
:rtype: xarray.DataSet
"""
img_ds = rasterio_open(img)
data = img_ds.read(1)
if np.isnan(no_data):
no_data_pixels = np.where(np.isnan(data))
else:
no_data_pixels = np.where(data == no_data)
# We accept nan values as no data on input image but to not disturb cost volume processing as stereo computation
# step,nan as no_data must be converted. We choose -9999 (can be another value). No_data position aren't erased
# because stored in 'msk'
if no_data_pixels[0].size != 0 and np.isnan(no_data):
data[no_data_pixels] = -9999
no_data = -9999
dataset = xr.Dataset({'im': (['row', 'col'], data.astype(np.float32))},
coords={'row': np.arange(data.shape[0]),
'col': np.arange(data.shape[1])})
# Add image conf to the image dataset
dataset.attrs = {'no_data_img': no_data,
'valid_pixels': 0, # arbitrary default value
'no_data_mask': 1} # arbitrary default value
if classif is not None:
input_classif = rasterio_open(classif).read(1)
dataset['classif'] = xr.DataArray(np.full((data.shape[0], data.shape[1]), 0).astype(np.int16),
dims=['row', 'col'])
dataset['classif'].data = input_classif
if segm is not None:
input_segm = rasterio_open(segm).read(1)
dataset['segm'] = xr.DataArray(np.full((data.shape[0], data.shape[1]), 0).astype(np.int16),
dims=['row', 'col'])
dataset['segm'].data = input_segm
# If there is no mask, and no data in the images, do not create the mask to minimize calculation time
if mask is None and no_data_pixels[0].size == 0:
return dataset
# Allocate the internal mask (!= input_mask)
# Mask convention:
# value : meaning
# dataset.attrs['valid_pixels'] : a valid pixel
# dataset.attrs['no_data_mask'] : a no_data_pixel
# other value : an invalid_pixel
dataset['msk'] = xr.DataArray(np.full((data.shape[0], data.shape[1]),
dataset.attrs['valid_pixels']).astype(np.int16), dims=['row', 'col'])
# Mask invalid pixels if needed
# convention: input_mask contains information to identify valid / invalid pixels.
# Value == 0 on input_mask represents a valid pixel
# Value != 0 on input_mask represents an invalid pixel
if mask is not None:
input_mask = rasterio_open(mask).read(1)
# Masks invalid pixels
# All pixels that are not valid_pixels, on the input mask, are considered as invalid pixels
dataset['msk'].data[np.where(input_mask > 0)] = dataset.attrs['valid_pixels'] + \
dataset.attrs['no_data_mask'] + 1
# Masks no_data pixels
# If a pixel is invalid due to the input mask, and it is also no_data, then the value of this pixel in the
# generated mask will be = no_data
dataset['msk'].data[no_data_pixels] = int(dataset.attrs['no_data_mask'])
return dataset
| 25,383
|
def any_user(password=None, permissions=[], groups=[], **kwargs):
"""
Shortcut for creating Users
Permissions could be a list of permission names
If not specified, creates active, non superuser
and non staff user
"""
is_active = kwargs.pop('is_active', True)
is_superuser = kwargs.pop('is_superuser', False)
is_staff = kwargs.pop('is_staff', False)
user = any_model(User, is_active = is_active, is_superuser = is_superuser,
is_staff = is_staff, **kwargs)
for group_name in groups :
group = Group.objects.get(name=group_name)
user.groups.add(group)
for permission_name in permissions:
app_label, codename = permission_name.split('.')
permission = Permission.objects.get(
content_type__app_label=app_label,
codename=codename)
user.user_permissions.add(permission)
if password:
user.set_password(password)
user.save()
return user
| 25,384
|
def devices_to_use():
"""Returns the device objects for the accel. we are the most likely to use.
Returns:
List of logical devices of the accelerators we will use.
"""
if tf.config.list_logical_devices("TPU"):
devices = tf.config.list_logical_devices("TPU")
elif tf.config.list_logical_devices("GPU"):
devices = tf.config.list_logical_devices("GPU")
else:
devices = tf.config.list_logical_devices("CPU")
devices.sort()
return devices
| 25,385
|
def barcode_density(bars, length):
"""
calculates the barcode density (normalized average cycle lifetime)
of a barcode
"""
densities = np.zeros(len(bars))
nums = np.array([len(bars[i][1]) for i in range(len(bars))])
num_infs = np.zeros(len(bars))
for i in range(len(bars)):
tot = 0
intervals = bars[i][1]
for intr in intervals:
if np.isinf(intr[1]):
num_infs[i] += 1
tot += (length-intr[0])/(length-1)
else:
tot += (intr[1] - intr[0])/(length-1)
densities[i] = tot
normed_density = densities/nums
normed_density[np.isnan(normed_density)] = 0
return np.stack([densities, nums, normed_density, num_infs])
| 25,386
|
def create_parser():
"""Creates the default argument parser.
Returns
-------
parser : ArgumentParser
"""
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--config-file')
parser.add_argument('--update-config', action='store_true')
update_parser_arguments(parser)
parser.add_argument('command', default='paste', nargs='?',
choices=('init', 'paste', 'clipboard', 'screenshot'))
return parser
| 25,387
|
def test_prediction_gradient():
"""Test computation of prediction gradients."""
# Binary classification
n_classes = 1
mlp = MLPClassifier(n_epochs=100, random_state=42, hidden_units=(5,))
X, y = make_classification(
n_samples=1000, n_features=20, n_informative=n_classes, n_redundant=0,
n_classes=n_classes, n_clusters_per_class=1, shuffle=False)
mlp.fit(X, y)
grad = mlp.prediction_gradient(X)
grad_means = grad.mean(axis=0)
assert grad.shape == X.shape
# Check that only the informative feature has a large gradient.
# The values of 1 and 0.5 here are somewhat arbitrary but should serve as
# a regression test if nothing else.
assert np.abs(grad_means[0]) > 1.
for m in grad_means[1:]:
assert np.abs(m) < 0.5
# Multiclass classification: here, we'll just check that it runs and that
# the output is the right shape.
n_classes = 5
X, y = make_classification(
n_samples=1000, n_features=20, n_informative=n_classes,
n_redundant=0, n_classes=n_classes, n_clusters_per_class=1,
shuffle=False)
mlp.fit(X, y)
grad = mlp.prediction_gradient(X)
assert grad.shape == (X.shape[0], n_classes, X.shape[1])
# Multilabel binary classification.
X, y = make_multilabel_classification(
n_samples=1000, random_state=42, n_classes=n_classes)
mlp.fit(X, y)
grad = mlp.prediction_gradient(X)
assert grad.shape == (X.shape[0], n_classes, X.shape[1])
# Raise an exception for sparse inputs, which are not yet supported.
X_sp = sp.csr_matrix(X)
mlp.fit(X_sp, y)
with pytest.raises(NotImplementedError):
mlp.prediction_gradient(X_sp)
| 25,388
|
def preprocess_data(datadir, verbose=False):
"""
This function gets the raw data and the FSL results and restructures them in a convenient data format.
"""
dataDir = os.path.abspath(datadir)
if verbose:
print(f"Performing data preprocessing in directory:\n{dataDir:s}")
time.sleep(1)
# Define current data locations
rawDir = os.path.join(dataDir, "raw")
fslDir = os.path.join(dataDir, "FSL_results")
# Get day_0, day_4 masks and images
day0_images = glob(os.path.join(rawDir, "rat*_dwib0_1_bet.nii.gz"))
day4_images = glob(os.path.join(fslDir, "rat*", "bet_flirt.nii.gz"))
day0_masks = glob(os.path.join(rawDir, "rat*_adc1f_lesionmask.nii.gz"))
day4_masks = glob(os.path.join(fslDir, "rat*", "mask_flirt.nii.gz"))
# Define new data locations
newDir = os.path.join(dataDir, "preprocessed")
# Copy and rename masks and images to new directory
day0_images_new = [os.path.join(newDir, os.path.split(old_path)[-1][:5], "day0_img.nii.gz") for old_path in day0_images]
day4_images_new = [os.path.join(newDir, os.path.split(os.path.dirname(old_path))[-1], "day4_img.nii.gz") for old_path in day4_images]
day0_masks_new = [os.path.join(newDir, os.path.split(old_path)[-1][:5], "day0_mask.nii.gz") for old_path in day0_masks]
day4_masks_new = [os.path.join(newDir, os.path.split(os.path.dirname(old_path))[-1], "day4_mask.nii.gz") for old_path in day4_masks]
# Create needed directories and copy files into appropriate locations
if not os.path.isdir(newDir):
os.mkdir(newDir)
# Check whether subject directory exists and create it if not
for i in range(len(day0_images_new)):
subjectDir = os.path.dirname(day0_images_new[i])
if not os.path.isdir(subjectDir):
os.mkdir(subjectDir)
# Iteratively copy all files into the new subject directory
img_paths = range(len(day0_images_new))
for i in (tqdm(img_paths, ascii=True) if verbose else img_paths):
copyfile(day0_images[i], day0_images_new[i])
copyfile(day4_images[i], day4_images_new[i])
copyfile(day0_masks[i], day0_masks_new[i])
copyfile(day4_masks[i], day4_masks_new[i])
return
| 25,389
|
def genRankSurvey(readername, candidates, binsize, shareWith=None):
"""
readername (str)
candidates (iterable)
binsize (int)
shareWith (str) optional
"""
# connect and craete survey
c = cornellQualtrics()
surveyname = "Ranking Survey for {}".format(readername)
surveyId = c.createSurvey(surveyname)
desc = (
u"This survey is for: {0}.\n\n"
u"Rank students into the top 50%-ile bins. "
u"Put exactly {1} students in each bin. "
u"All uncategorized students will automatically "
u"be placed in the bottom 50%-ile. Ordering within a bin "
u"does not matter.".format(readername, binsize)
)
choices = {}
for j, choice in enumerate(candidates):
choices[str(j + 1)] = {"Display": choice}
choiceOrder = list(range(1, len(choices) + 1))
questionDef = {
"QuestionText": desc,
"DefaultChoices": False,
"DataExportTag": "Q1",
"QuestionID": "QID1",
"QuestionType": "PGR",
"Selector": "DragAndDrop",
"SubSelector": "Columns",
"Configuration": {
"QuestionDescriptionOption": "UseText",
"Stack": False,
"StackItemsInGroups": False,
},
"QuestionDescription": desc,
"Choices": choices,
"ChoiceOrder": choiceOrder,
"Validation": {
"Settings": {
"ForceResponse": "ON",
"Type": "GroupChoiceRange",
"MinChoices": "{}".format(binsize),
"MaxChoices": "{}".format(binsize),
}
},
"GradingData": [],
"Language": [],
"NextChoiceId": len(choices) + 1,
"NextAnswerId": 6,
"Groups": ["Top 10%", "Top 20%", "Top 30%", "Top 40%", "Top 50%"],
"NumberOfGroups": 5,
"QuestionText_Unsafe": desc,
}
c.addSurveyQuestion(surveyId, questionDef)
if shareWith:
c.shareSurvey(surveyId, shareWith)
c.publishSurvey(surveyId)
c.activateSurvey(surveyId)
link = "https://cornell.qualtrics.com/jfe/form/%s" % surveyId
return link
| 25,390
|
def _get_badge_status(
self_compat_res: dict,
google_compat_res: dict,
dependency_res: dict) -> BadgeStatus:
"""Get the badge status.
The badge status will determine the right hand text and the color of
the badge.
Args:
self_compat_res: a dict containing a package's self compatibility
status for py2 and py3. See _get_self_compatibility_dict().
google_compat_res: a dict containing a package's pair compatibility
status for py2 and py3. See _get_pair_compatibility_dict().
dependency_res: a dict containing a package's dependency status.
See _get_dependency_dict().
Returns:
The cumulative badge status.
"""
statuses = []
for pyver in ['py2', 'py3']:
statuses.append(self_compat_res[pyver]['status'])
statuses.append(google_compat_res[pyver]['status'])
statuses.append(dependency_res['status'])
return BadgeStatus.get_highest_status(statuses)
| 25,391
|
def _get_yaml_as_string_from_mark(marker):
"""Gets yaml and converts to text"""
testids_mark_arg_no = len(marker.args)
if testids_mark_arg_no > 1:
raise TypeError(
'Incorrect number of arguments passed to'
' @pytest.mark.test_yaml, expected 1 and '
'received {}'.format(testids_mark_arg_no))
else:
yaml_object = yaml.load(marker.args[0])
yaml_text_block = '\n---\n' \
+ yaml.dump(yaml_object, default_flow_style=False) \
+ '...'
indented_yaml_text_block = '\n '.join(yaml_text_block.split('\n'))
return indented_yaml_text_block
| 25,392
|
def createsuperuser(name, username, email):
"""Create the superuser account to access the admin panel."""
from ..contrib.auth.models import User, Group
name:str = name or Console.input.String("enter admin name: ")
email:str = email or Console.input.String("enter admin email: ")
username:str = username or Console.input.String("enter admin username: ") or email
while True:
password:str = Console.input.Password("enter admin password: ")
confirm_password:str = Console.input.Password("enter admin confirm password: ")
if not password == confirm_password:
Console.log.Warning('confirm password not matched.\nPlease enter again.')
continue
else:
break
if Console.input.Boolean( "Are you sure, you want to create superuser using inserted data"):
with app.app_context():
new_admin = User(first_name=name.rsplit(" ")[0], last_name=name.rsplit(" ")[1], email=email,
username=username, password=password)
group = Group.query.filter_by(name='super_admin').first()
new_admin.groups.append(group)
new_admin.save()
Console.log.Success("superuser created successfully.")
else:
Console.log.Error("superuser creation canceled!")
| 25,393
|
def combine_dicts(w_dict1, w_dict2, params, model):
"""
Combine two dictionaries:
"""
w_dict = w_dict1 + w_dict2
eps = params[0]
params[0] = 0
P_w = []
w_dict = md.remove_duplicates_w_dict(P_w,w_dict,params,model)
return w_dict
| 25,394
|
def geocode_mapping(row, aian_ranges, aian_areas, redefine_counties, strong_mcd_states):
"""
Maps an RDD row to a tuple with format (state, AIAN_bool, AIANNHCE, county, place/MCD, tract, block), where
place/MCD is the five digit MCD in MCD-strong states and 5 digit place otherwise
AIAN_bool is '1' if the block is inside the AIAN area and '0' otherwise.
:param row: An RDD row with format (state, AIANNHCE, county, place, MCD, tract, block)
:param aian_ranges: a dictionary with keys given by the AIAN type and values given by a tuple with two elements
that indicate the starting and ending AIANNHCE values for the AIAN area catagory.
:param aian_areas: a specification of AIANNHCE code groups that should be used to define AIAN areas; see also
make_grfc_ids().
:param redefine_counties: specifies that counties inside of AIAN areas should be redefined as incorporated places or
MCDs "in_strong_MCDs", "everywhere", or "nowhere"
:param strong_mcd_states: a tuple of the state geoids that are strong MCD states
:return res: a tuple with format (state, AIAN_bool, AIANNHCE, county, place/MCD, tract, block)
"""
state, aiannhce, county, place, cousub, tract, block = row
county = '10' + county
is_strong_MCD = state in strong_mcd_states
# The following AIANNHCE values are not in the universe of possible AIANNHCE codes:
assert aiannhce not in [str(x) for x in range(4990, 5000)], "AIANNHCE codes cannot be between 4990 and 4999"
if aiannhce == '9999':
# Not in any of the AIAN area catagories:
aian = '0'
else:
# Check if AIAN area catagory is included in the user's specification of AIAN areas:
for aian_definition, aian_range in aian_ranges.items():
if aiannhce <= aian_range[1] and aiannhce >= aian_range[0]:
aian = '1' if aian_definition in aian_areas else '0'
# If the user wishes to bypass from the county geounit to the individual AIAN areas, do so here:
if aian_definition in aian_areas and ((redefine_counties == 'in_strong_MCDs' and is_strong_MCD) or redefine_counties == 'everywhere'):
county = '0' + aiannhce
break
# An alternative would be to remove the second condition in the next if statement to increase accuracy in MCDs:
if is_strong_MCD and aian == '0':
mcd_or_place = cousub
else:
mcd_or_place = place
das_aian_area_code = aiannhce if (aian == '1') else '9999'
return state, aian, county, mcd_or_place, tract, block, das_aian_area_code
| 25,395
|
def get_stan_input(
scores: pd.DataFrame,
priors: Dict,
likelihood: bool,
) -> Dict:
"""Get an input to cmdstanpy.CmdStanModel.sample.
:param measurements: a pandas DataFrame whose rows represent measurements
:param model_config: a dictionary with keys "priors", "likelihood" and
"x_cols".
"""
return {
**priors,
**{
"N": len(scores),
"N_skater": scores["name"].nunique(),
"N_grade": N_GRADE,
"skater": one_encode(scores["name"]).values,
"y": scores["score"].astype(int).add(6).values,
"N_test": len(scores),
"skater_test": one_encode(scores["name"]).values,
"y_test": scores["score"].astype(int).add(6).values,
"likelihood": int(likelihood),
},
}
| 25,396
|
def get_mph(velocity):
"""
Returns
-------
convert m/s to miles per hour [mph].
"""
velocity = velocity * 3600 /1852
return velocity
| 25,397
|
def _load_jsonl(input_path) -> list:
"""
Read list of objects from a JSON lines file.
"""
data = []
with open(input_path, 'r', encoding='utf-8') as f:
for line in f:
data.append(json.loads(line.rstrip('\n|\r')))
print('[LoadJsonl] Loaded {} records from {}'.format(len(data), input_path))
return data
| 25,398
|
def run(config):
"""Run the nowcast system worker launch scheduler.
* Prepare the schedule as specified in the configuration file.
* Loop forever, periodically checking to see if it is time to launch the
scheduled workers.
:param config: Nowcast system configuration.
:type config: :py:class:`nemo_nowcast.config.Config`
"""
sleep_seconds = _prep_schedule(config)
while True:
schedule.run_pending()
time.sleep(sleep_seconds)
| 25,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.