content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def detect_ascii_slice(lines):
# type: (List[str]) -> slice
"""
Given a list of strings, this will return the most likely positions of byte
positions. They are returned slice which should be able to extract the
columns from each line.
"""
for line in lines:
# if the content contains a ":" character, it contains the byte offset
# in the beginning. This is the case for libsnmp command output using
# the "-d" switch. We need to remove the offset
match = re.match(r"^\d{4}:", line)
if ":" in line:
return slice(6, 56)
else:
return slice(0, 50)
return slice(0, -1)
| 5,335,200
|
def inception_model_pytorch():
"""The InceptionBlocks model the WebGME folks provided as a test case for deepforge."""
class InceptionBlocks(nn.Module):
def __init__(self):
super().__init__()
self.asymmetric_pad = nn.ZeroPad2d((0, 1, 0, 1))
self.conv2d = nn.Conv2d(
in_channels=5, out_channels=64, kernel_size=(5, 5), padding=2, bias=True
)
self.prelu = nn.PReLU(init=0.0)
self.averagepooling2d = nn.AvgPool2d((2, 2), stride=2, padding=0)
self.conv2d2 = nn.Conv2d(
in_channels=64,
out_channels=48,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu2 = nn.PReLU(init=0.0)
self.conv2d3 = nn.Conv2d(
in_channels=48,
out_channels=64,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu3 = nn.PReLU(init=0.0)
self.conv2d4 = nn.Conv2d(
in_channels=64,
out_channels=48,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu4 = nn.PReLU(init=0.0)
self.averagepooling2d2 = nn.AvgPool2d((2, 2), stride=1)
self.conv2d5 = nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu5 = nn.PReLU(init=0.0)
self.conv2d6 = nn.Conv2d(
in_channels=64,
out_channels=48,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu6 = nn.PReLU(init=0.0)
self.conv2d7 = nn.Conv2d(
in_channels=48,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu7 = nn.PReLU(init=0.0)
self.conv2d8 = nn.Conv2d(
in_channels=240,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.conv2d9 = nn.Conv2d(
in_channels=240,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.conv2d10 = nn.Conv2d(
in_channels=240,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu8 = nn.PReLU(init=0.0)
self.conv2d11 = nn.Conv2d(
in_channels=64,
out_channels=92,
kernel_size=(5, 5),
padding=2,
bias=True,
)
self.prelu9 = nn.PReLU(init=0.0)
self.prelu10 = nn.PReLU(init=0.0)
self.averagepooling2d3 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d12 = nn.Conv2d(
in_channels=240,
out_channels=64,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu11 = nn.PReLU(init=0.0)
self.conv2d13 = nn.Conv2d(
in_channels=64,
out_channels=92,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu12 = nn.PReLU(init=0.0)
self.prelu13 = nn.PReLU(init=0.0)
self.averagepooling2d4 = nn.AvgPool2d((2, 2), stride=2, padding=0)
self.conv2d14 = nn.Conv2d(
in_channels=340,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu14 = nn.PReLU(init=0.0)
self.conv2d15 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(5, 5),
padding=2,
bias=True,
)
self.prelu15 = nn.PReLU(init=0.0)
self.conv2d16 = nn.Conv2d(
in_channels=340,
out_channels=128,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu16 = nn.PReLU(init=0.0)
self.conv2d17 = nn.Conv2d(
in_channels=340,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu17 = nn.PReLU(init=0.0)
self.averagepooling2d5 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d18 = nn.Conv2d(
in_channels=340,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu18 = nn.PReLU(init=0.0)
self.conv2d19 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu19 = nn.PReLU(init=0.0)
self.conv2d20 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu20 = nn.PReLU(init=0.0)
self.conv2d21 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu21 = nn.PReLU(init=0.0)
self.conv2d22 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu22 = nn.PReLU(init=0.0)
self.averagepooling2d6 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d23 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu23 = nn.PReLU(init=0.0)
self.conv2d24 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(5, 5),
padding=2,
bias=True,
)
self.prelu24 = nn.PReLU(init=0.0)
self.conv2d25 = nn.Conv2d(
in_channels=476,
out_channels=128,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu25 = nn.PReLU(init=0.0)
self.averagepooling2d7 = nn.AvgPool2d((2, 2), stride=2, padding=0)
self.conv2d26 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu26 = nn.PReLU(init=0.0)
self.averagepooling2d8 = nn.AvgPool2d((2, 2), stride=1, padding=0)
self.conv2d27 = nn.Conv2d(
in_channels=476,
out_channels=92,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu27 = nn.PReLU(init=0.0)
self.conv2d28 = nn.Conv2d(
in_channels=92,
out_channels=128,
kernel_size=(3, 3),
padding=1,
bias=True,
)
self.prelu28 = nn.PReLU(init=0.0)
self.conv2d29 = nn.Conv2d(
in_channels=476,
out_channels=128,
kernel_size=(1, 1),
padding=0,
bias=True,
)
self.prelu29 = nn.PReLU(init=0.0)
self.dense = nn.Linear(22273, 1096, bias=True)
self.prelu30 = nn.PReLU(init=0.0)
self.dense2 = nn.Linear(1096, 1096, bias=True)
self.prelu31 = nn.PReLU(init=0.0)
self.dense3 = nn.Linear(1096, 180, bias=True)
def forward(self, galaxy_images_output, ebv_output):
conv2d_output = self.conv2d(galaxy_images_output)
prelu_output = self.prelu(conv2d_output)
averagepooling2d_output = self.averagepooling2d(prelu_output)
conv2d_output2 = self.conv2d2(averagepooling2d_output)
prelu_output2 = self.prelu2(conv2d_output2)
conv2d_output3 = self.conv2d3(prelu_output2)
prelu_output3 = self.prelu3(conv2d_output3)
conv2d_output4 = self.conv2d4(averagepooling2d_output)
prelu_output4 = self.prelu4(conv2d_output4)
prelu_output4 = self.asymmetric_pad(prelu_output4)
averagepooling2d_output2 = self.averagepooling2d2(prelu_output4)
conv2d_output5 = self.conv2d5(averagepooling2d_output)
prelu_output5 = self.prelu5(conv2d_output5)
conv2d_output6 = self.conv2d6(averagepooling2d_output)
prelu_output6 = self.prelu6(conv2d_output6)
conv2d_output7 = self.conv2d7(prelu_output6)
prelu_output7 = self.prelu7(conv2d_output7)
concatenate_output = torch.cat(
(prelu_output5, prelu_output3, prelu_output7, averagepooling2d_output2),
dim=1,
)
conv2d_output8 = self.conv2d8(concatenate_output)
conv2d_output9 = self.conv2d9(concatenate_output)
conv2d_output10 = self.conv2d10(concatenate_output)
prelu_output8 = self.prelu8(conv2d_output10)
conv2d_output11 = self.conv2d11(prelu_output8)
prelu_output9 = self.prelu9(conv2d_output11)
prelu_output10 = self.prelu10(conv2d_output8)
prelu_output10 = self.asymmetric_pad(prelu_output10)
averagepooling2d_output3 = self.averagepooling2d3(prelu_output10)
conv2d_output12 = self.conv2d12(concatenate_output)
prelu_output11 = self.prelu11(conv2d_output12)
conv2d_output13 = self.conv2d13(prelu_output11)
prelu_output12 = self.prelu12(conv2d_output13)
prelu_output13 = self.prelu13(conv2d_output9)
concatenate_output2 = torch.cat(
(
prelu_output13,
prelu_output12,
prelu_output9,
averagepooling2d_output3,
),
dim=1,
)
averagepooling2d_output4 = self.averagepooling2d4(concatenate_output2)
conv2d_output14 = self.conv2d14(averagepooling2d_output4)
prelu_output14 = self.prelu14(conv2d_output14)
conv2d_output15 = self.conv2d15(prelu_output14)
prelu_output15 = self.prelu15(conv2d_output15)
conv2d_output16 = self.conv2d16(averagepooling2d_output4)
prelu_output16 = self.prelu16(conv2d_output16)
conv2d_output17 = self.conv2d17(averagepooling2d_output4)
prelu_output17 = self.prelu17(conv2d_output17)
prelu_output17 = self.asymmetric_pad(prelu_output17)
averagepooling2d_output5 = self.averagepooling2d5(prelu_output17)
conv2d_output18 = self.conv2d18(averagepooling2d_output4)
prelu_output18 = self.prelu18(conv2d_output18)
conv2d_output19 = self.conv2d19(prelu_output18)
prelu_output19 = self.prelu19(conv2d_output19)
concatenate_output3 = torch.cat(
(
prelu_output16,
prelu_output19,
prelu_output15,
averagepooling2d_output5,
),
dim=1,
)
conv2d_output20 = self.conv2d20(concatenate_output3)
prelu_output20 = self.prelu20(conv2d_output20)
conv2d_output21 = self.conv2d21(prelu_output20)
prelu_output21 = self.prelu21(conv2d_output21)
conv2d_output22 = self.conv2d22(concatenate_output3)
prelu_output22 = self.prelu22(conv2d_output22)
prelu_output22 = self.asymmetric_pad(prelu_output22)
averagepooling2d_output6 = self.averagepooling2d6(prelu_output22)
conv2d_output23 = self.conv2d23(concatenate_output3)
prelu_output23 = self.prelu23(conv2d_output23)
conv2d_output24 = self.conv2d24(prelu_output23)
prelu_output24 = self.prelu24(conv2d_output24)
conv2d_output25 = self.conv2d25(concatenate_output3)
prelu_output25 = self.prelu25(conv2d_output25)
concatenate_output4 = torch.cat(
(
prelu_output25,
prelu_output21,
prelu_output24,
averagepooling2d_output6,
),
dim=1,
)
averagepooling2d_output7 = self.averagepooling2d7(concatenate_output4)
conv2d_output26 = self.conv2d26(averagepooling2d_output7)
prelu_output26 = self.prelu26(conv2d_output26)
prelu_output26 = self.asymmetric_pad(prelu_output26)
averagepooling2d_output8 = self.averagepooling2d8(prelu_output26)
conv2d_output27 = self.conv2d27(averagepooling2d_output7)
prelu_output27 = self.prelu27(conv2d_output27)
conv2d_output28 = self.conv2d28(prelu_output27)
prelu_output28 = self.prelu28(conv2d_output28)
conv2d_output29 = self.conv2d29(averagepooling2d_output7)
prelu_output29 = self.prelu29(conv2d_output29)
concatenate_output5 = torch.cat(
(prelu_output29, prelu_output28, averagepooling2d_output8), dim=1
)
flatten_output = torch.flatten(concatenate_output5)
concatenate_output6 = torch.cat((flatten_output, ebv_output), dim=0)
dense_output = self.dense(concatenate_output6)
prelu_output30 = self.prelu30(dense_output)
dense_output2 = self.dense2(prelu_output30)
prelu_output31 = self.prelu31(dense_output2)
dense_output3 = self.dense3(prelu_output31)
return dense_output3
torch.manual_seed(0)
model = InceptionBlocks()
model.eval()
return model
| 5,335,201
|
def check_api_errors(response):
"""
Check that we have enough api points to call to Spoonacular.
:param response: A response object
Raises exception if there are not enough points, otherwise does nothing.
"""
if int(response.headers["X-RateLimit-requests-Remaining"]) <= \
api_out_of_points_threshold or \
int(response.headers["X-RateLimit-results-Remaining"]) <= \
api_out_of_points_threshold or \
int(response.headers["X-RateLimit-tinyrequests-Remaining"]) \
<= api_out_of_points_threshold:
raise ApikeyOutOfPoints("This API Key is out of points for the day")
| 5,335,202
|
def get_trained_coefficients(X_train, y_train):
"""
Create and train a model based on the training_data_file data.
Return the model, and the list of coefficients for the 'X_columns' variables in the regression.
"""
# TODO: create regression model and train.
# The following codes are adapted from https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html .
model = LinearRegression(fit_intercept = False, normalize = False, copy_X = True, n_jobs = None, positive = False)
model = model.fit(X_train, y_train, sample_weight = None)
coefficients = model.coef_
return model, coefficients
| 5,335,203
|
def register():
"""Handles the creation of a new user"""
form = dds_web.forms.RegistrationForm()
# Validate form - validators defined in form class
if form.validate_on_submit():
# Create new user row by loading form data into schema
try:
new_user = user_schemas.NewUserSchema().load(form.data)
except marshmallow.ValidationError as valerr:
flask.current_app.logger.warning(valerr)
raise
except (sqlalchemy.exc.SQLAlchemyError, sqlalchemy.exc.IntegrityError) as sqlerr:
raise ddserr.DatabaseError from sqlerr
# Go to two factor authentication setup
# TODO: Change this after email is introduced
flask_login.login_user(new_user)
return flask.redirect(flask.url_for("auth_blueprint.two_factor_setup"))
# Go to registration form
return flask.render_template("user/register.html", form=form)
| 5,335,204
|
def project_in_2D(K, camera_pose, mesh, resolution_px):
"""
Project all 3D triangle vertices in the mesh into
the 2D image of given resolution
Parameters
----------
K: ndarray
Camera intrinsics matrix, 3x3
camera_pose: ndarray
Camera pose (inverse of extrinsics), 4x4
mesh: ndarray
Triangles to be projected in 2d, (Nx3x3)
resolution_px: tuple
Resolution of image in pixel
Returns
-------
coords_projected_2D: ndarray
Triangle vertices projected in 2D and clipped to
image resolution
"""
resolution_x_px, resolution_y_px = resolution_px # image resolution in pixels
# Decompose camera pose into rotation and translation
RT = camera_pose[:-1, :] # remove homogeneous row
R = RT[:, :-1] # rotation matrix 3x3
T = RT[:, -1:] # translation vector 3x1
# Invert the camera pose matrix to get the camera extrinsics
# Due to the particular matrix geometry we can avoid raw inversion
Rc = tf.matrix_transpose(R)
Tc = tf.matmul(-Rc, T)
RT = tf.concat([Rc, Tc], axis=-1) # camera extrinsics
# Correct reference system of extrinsics matrix
# y is down: (to align to the actual pixel coordinates used in digital images)
# right-handed: positive z look-at direction
correction_factor = tf.constant(value=np.array([[1., 0., 0.],
[0., -1., 0.],
[0., 0., -1.]]), dtype=tf.float32)
RT = tf.matmul(correction_factor, RT)
# Compose whole camera projection matrix (3x4)
P = tf.matmul(K, RT)
mesh_flat = tf.reshape(mesh, shape=(-1, 3))
len_mesh_flat = tf.shape(mesh_flat)[0]
# Create constant tensor to store 3D model coordinates
coords_3d_h = tf.concat([mesh_flat, tf.ones(shape=(len_mesh_flat, 1), dtype=tf.float32)], axis=-1) # n_triangles, 4
coords_3d_h = tf.transpose(coords_3d_h, perm=[1, 0]) # 4, n_triangles
# Project 3D vertices into 2D
coords_projected_2D_h = tf.transpose(tf.matmul(P, coords_3d_h), perm=[1, 0]) # n_triangles, 3
coords_projected_2D = coords_projected_2D_h[:, :2] / (coords_projected_2D_h[:, 2:3] + 1e-8)
# Clip indexes in image range
coords_projected_2D_x_clip = tf.clip_by_value(coords_projected_2D[:, 0:0 + 1],
clip_value_min=-1, clip_value_max=resolution_x_px)
coords_projected_2D_y_clip = tf.clip_by_value(coords_projected_2D[:, 1:1 + 1],
clip_value_min=-1, clip_value_max=resolution_y_px)
return tf.concat([coords_projected_2D_x_clip, coords_projected_2D_y_clip], axis=-1)
| 5,335,205
|
def forbidden(description: Any) -> APIGatewayProxyResult:
"""Return a response with FORBIDDEN status code."""
error = ForbiddenError(description)
return _build_response(error, HTTPStatus.FORBIDDEN)
| 5,335,206
|
def str_2_datetime(p_str, fmt="%Y-%m-%d %H:%M:%S"):
""" 将字符串转换成日期
:param p_str: 原始时间字符串
:param fmt: 时间格式
:rtype: datetime.datetime
"""
# don't need to transform
if isinstance(p_str, datetime.datetime):
return p_str
if not isinstance(p_str, str):
raise TypeError("params `p_str` must be type of str")
return datetime.datetime.strptime(p_str, fmt)
| 5,335,207
|
def traitement(l):
"""Permet de retirer les cartes blanches inutiles"""
while l[-1][1] == 'nan':
del l[-1]
return l
| 5,335,208
|
def create_ivr_database(dbname):
"""Create sqlite database schema from sql script file."""
conn = sqlite3.connect(dbname)
cursor = conn.cursor()
parent_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sql_script_path = os.path.join(parent_directory, 'db.sql')
sql_script_file = open(sql_script_path, 'r')
sql = sql_script_file.read()
sql_script_file.close()
cursor.executescript(sql)
cursor.close()
conn.close()
| 5,335,209
|
def train(model, data_iterator, optimizer, scheduler, params):
"""Train the model on `steps` batches"""
# set model to training mode
model.train()
# scheduler.step()
# a running average object for loss
loss_avg = utils.RunningAverage()
# Use tqdm for progress bar
one_epoch = trange(params.train_steps)
for batch in one_epoch:
# fetch the next training batch
batch_data, batch_token_starts, batch_tags = next(data_iterator)
batch_masks = batch_data.gt(0) # get padding mask
# compute model output and loss
loss = model((batch_data, batch_token_starts), token_type_ids=None, attention_mask=batch_masks, labels=batch_tags)[0]
if params.n_gpu > 1 and args.multi_gpu:
loss = loss.mean() # mean() to average on multi-gpu
# clear previous gradients, compute gradients of all variables wrt loss
model.zero_grad()
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=params.clip_grad)
# performs updates using calculated gradients
scheduler.step()
optimizer.step()
# update the average loss
loss_avg.update(loss.item())
one_epoch.set_postfix(loss='{:05.3f}'.format(loss_avg()))
| 5,335,210
|
def save_audio(text: str, filename: str, dir: str):
"""
Converts text to audio and saves
Notes
-----
If the .mp3 file extension is missing in the filename, it will be added
If a file with the same name exists, it will not save, only notify the user
Returns
_______
Path : str
"""
# Make the path to the folder
path = '{0}/{1}'.format(dir, filename)
if not filename.endswith('.mp3'):
path += '.mp3'
# Generates and saves audio file
tts = gTTS(text=text, lang='en')
# Only saves when file does not exist
if os.path.isfile(path):
print("File named {0} already exist, will not safe".format(path))
else:
tts.save(path)
return path
| 5,335,211
|
def build_report(test_controller):
"""Report on the test results."""
options = test_controller.options
citest_log_dir = os.path.join(options.log_dir, 'citest_logs')
if not os.path.exists(citest_log_dir):
logging.warning('%s does not exist -- no citest logs.', citest_log_dir)
return
response = run_quick(
'cd {log_dir}'
'; python -m citest.reporting.generate_html_report --index *.journal'
.format(log_dir=citest_log_dir))
if response.returncode != 0:
logging.error('Error building report: %s', response.stdout)
logging.info('Logging information is in %s', options.log_dir)
return test_controller.build_summary()
| 5,335,212
|
def group_frames_by_track_date(frames):
"""Classify frames by track and date."""
hits = {}
grouped = {}
dates = {}
footprints = {}
metadata = {}
for h in frames:
if h['_id'] in hits: continue
fields = h['fields']['partial'][0]
#print("h['_id'] : %s" %h['_id'])
# get product url; prefer S3
prod_url = fields['urls'][0]
if len(fields['urls']) > 1:
for u in fields['urls']:
if u.startswith('s3://'):
prod_url = u
break
#print("prod_url : %s" %prod_url)
hits[h['_id']] = "%s/%s" % (prod_url, fields['metadata']['archive_filename'])
match = SLC_RE.search(h['_id'])
#print("match : %s" %match)
if not match:
raise RuntimeError("Failed to recognize SLC ID %s." % h['_id'])
day_dt = datetime(int(match.group('start_year')),
int(match.group('start_month')),
int(match.group('start_day')),
0, 0, 0)
#print("day_dt : %s " %day_dt)
bisect.insort(grouped.setdefault(fields['metadata']['trackNumber'], {}) \
.setdefault(day_dt, []), h['_id'])
slc_start_dt = datetime(int(match.group('start_year')),
int(match.group('start_month')),
int(match.group('start_day')),
int(match.group('start_hour')),
int(match.group('start_min')),
int(match.group('start_sec')))
#print("slc_start_dt : %s" %slc_start_dt)
slc_end_dt = datetime(int(match.group('end_year')),
int(match.group('end_month')),
int(match.group('end_day')),
int(match.group('end_hour')),
int(match.group('end_min')),
int(match.group('end_sec')))
#print("slc_end_dt : %s" %slc_end_dt)
dates[h['_id']] = [ slc_start_dt, slc_end_dt ]
footprints[h['_id']] = fields['location']
metadata[h['_id']] = fields['metadata']
#break
#print("grouped : %s" %grouped)
logger.info("grouped keys : %s" %grouped.keys())
return {
"hits": hits,
"grouped": grouped,
"dates": dates,
"footprints": footprints,
"metadata": metadata,
}
| 5,335,213
|
def generate_violin_figure(dataframe, columns, ytitle, legend_title=None):
""" Plot 2 columns of data as violin plot, grouped by block.
:param dataframe: Variance of projections.
:type dataframe: pandas.DataFrame
:param columns: 2 columns for the negative and the positive side of the violins.
:type columns: list
:param ytitle: Title of Y-axis. What is being plotted? What are the units of the data?
:type ytitle: str
:param legend_title: What's the common denominator of the columns?
:type legend_title: str
:return: Figure object of graph.
:rtype: plotly.graph_objs.Figure
"""
legend = go.layout.Legend(
xanchor='right',
yanchor='top',
orientation='v',
title=legend_title,
)
fig = go.Figure()
fig.layout.update(xaxis_title='Task',
yaxis_title=ytitle,
legend=legend,
margin=theme['graph_margins'])
if dataframe.empty:
return fig
# Make sure we plot only 2 columns, left and right.
columns = columns[:2]
sides = ('negative', 'positive')
grouped = dataframe.groupby('task')
for name, group_df in grouped:
for i, col in enumerate(columns):
fig.add_trace(go.Violin(x=group_df['task'].map(task_order),
y=group_df[col],
legendgroup=col, scalegroup=col, name=col,
side=sides[i],
pointpos=i - 0.5,
line_color=theme[col],
text=[f"{col}<br />participant: {j['user']}<br />"
f"block: {j['block']}<br />condition: {j['condition']}"
for _, j in group_df.iterrows()],
hoverinfo='y+text',
spanmode='hard',
showlegend=bool(name == dataframe['task'].unique()[0]), # Only 1 legend.
)
)
# update characteristics shared by all traces
fig.update_traces(meanline={'visible': True, 'color': 'dimgray'},
box={'visible': True, 'width': 0.5, 'line_color': 'dimgray'},
points='all', # Show all points.
jitter=0.1, # Add some jitter on points for better visibility.
scalemode='count') # Scale violin plot area with total count.
fig.update_layout(violingap=0, violingroupgap=0, violinmode='overlay', hovermode='closest')
fig.update_xaxes(tickvals=task_order[dataframe['task'].unique()],
ticktext=task_order[dataframe['task'].unique()].index)
fig.update_yaxes(zeroline=True, zerolinewidth=2, zerolinecolor='LightPink')
return fig
| 5,335,214
|
def measure_single(state, bit):
"""
Method one qubit one time
:param state:
:param bit:
:return:
"""
n = len(state.shape)
axis = list(range(n))
axis.remove(n - 1 - bit)
probs = np.sum(np.abs(state) ** 2, axis=tuple(axis))
rnd = np.random.rand()
# measure single bit
if rnd < probs[0]:
out = 0
prob = probs[0]
else:
out = 1
prob = probs[1]
# collapse single bit
if out == 0:
matrix = np.array([[1.0 / np.sqrt(prob), 0.0],
[0.0, 0.0]], complex)
else:
matrix = np.array([[0.0, 0.0],
[0.0, 1.0 / np.sqrt(prob)]], complex)
state = transfer_state(state, matrix, [bit])
return out, state
| 5,335,215
|
def anchor_to_offset(anchors, ground_truth):
"""Encodes the anchor regression predictions with the
ground truth.
Args:
anchors: A numpy array of shape (N, 6) representing
the generated anchors.
ground_truth: A numpy array of shape (6,) containing
the label boxes in the anchor format.
Returns:
anchor_offsets: A numpy array of shape (N, 6)
encoded/normalized with the ground-truth, representing the
offsets.
"""
fc.check_anchor_format(anchors)
anchors = np.asarray(anchors).reshape(-1, 6)
ground_truth = np.reshape(ground_truth, (6,))
# t_x_gt = (x_gt - x_anch)/dim_x_anch
t_x_gt = (ground_truth[0] - anchors[:, 0]) / anchors[:, 3]
# t_y_gt = (y_gt - y_anch)/dim_y_anch
t_y_gt = (ground_truth[1] - anchors[:, 1]) / anchors[:, 4]
# t_z_gt = (z_gt - z_anch)/dim_z_anch
t_z_gt = (ground_truth[2] - anchors[:, 2]) / anchors[:, 5]
# t_dx_gt = log(dim_x_gt/dim_x_anch)
t_dx_gt = np.log(ground_truth[3] / anchors[:, 3])
# t_dy_gt = log(dim_y_gt/dim_y_anch)
t_dy_gt = np.log(ground_truth[4] / anchors[:, 4])
# t_dz_gt = log(dim_z_gt/dim_z_anch)
t_dz_gt = np.log(ground_truth[5] / anchors[:, 5])
anchor_offsets = np.stack((t_x_gt,
t_y_gt,
t_z_gt,
t_dx_gt,
t_dy_gt,
t_dz_gt), axis=1)
return anchor_offsets
| 5,335,216
|
def horizontal_flip(img_array):
"""Flip image horizontally."""
img_array = cv2.flip(img_array, 1)
return img_array
| 5,335,217
|
async def repeat(interval, func, *args, **kwargs):
"""Run func every interval seconds.
source: https://stackoverflow.com/a/55505152/13989012
If func has not finished before *interval*, will run again
immediately when the previous iteration finished.
*args and **kwargs are passed as the arguments to func.
"""
return_exceptions = kwargs.pop('return_exceptions', False)
while True:
async with context_exception_handler():
await asyncio.gather(
func(*args, **kwargs),
asyncio.sleep(interval),
return_exceptions=return_exceptions
)
| 5,335,218
|
def chunker(file_path):
"""
Read a block of lines from a file
:param file_path:
:return:
"""
words = []
with open(file_path, 'r') as file_object:
for word in file_object:
word = word.strip()
if word:
words.append(word)
return words
| 5,335,219
|
def c2c_dist(commande,octree_lvl=0):
"""
Commande CC cloud2cloud distance
"""
if octree_lvl==0:
commande+=" -C2C_DIST -split_xyz -save_clouds"
else:
commande+=" -C2C_DIST -split_xyz -octree_level "+str(octree_lvl)+" -save_clouds"
subprocess.call(commande)
return True
| 5,335,220
|
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.ArgumentParser(description=__doc__)
parser.add_argument("--version", action='version', version="1.0")
parser.add_argument(
"-a", "--first-fastq-file", dest="fastq1", type=str,
help="supply read1 fastq file")
parser.add_argument(
"-b", "--second-fastq-file", dest="fastq2", type=str,
help="supply read2 fastq file")
# add common options (-h/--help, ...) and parse command line
(args, unknown) = E.start(parser,
argv=argv,
unknowns=True)
if unknown and len(unknown) == 2:
args.fastq1, args.fastq2 = unknown
fastq1 = iotools.open_file(args.fastq1)
fastq2 = iotools.open_file(args.fastq2)
E.info("iterating over fastq files")
f1_count = 0
for f1, f2 in zip_longest(Fastq.iterate(fastq1),
Fastq.iterate(fastq2)):
if not (f1 and f2) or (not f2 and f1):
try:
raise PairedReadError(
"unpaired reads detected. Are files sorted? are "
"files of equal length?")
except PairedReadError as e:
raise PairedReadError(e).with_traceback(sys.exc_info()[2])
else:
assert f1.identifier.endswith("/1") and \
f2.identifier.endswith("/2"), \
"Reads in file 1 must end with /1 and reads in file 2 with /2"
args.stdout.write(
">%s\n%s\n>%s\n%s\n" %
(f1.identifier, f1.seq, f2.identifier, f2.seq))
f1_count += 1
E.info("output: %i pairs" % f1_count)
# write footer and output benchmark information.
E.stop()
| 5,335,221
|
def legalize_names(varnames):
"""returns a dictionary for conversion of variable names to legal
parameter names.
"""
var_map = {}
for var in varnames:
new_name = var.replace("_", "__").replace("$", "_").replace(".", "_")
assert new_name not in var_map
var_map[var] = new_name
return var_map
| 5,335,222
|
def _IsUidUsed(uid):
"""Check if there is any process in the system running with the given user-id
@type uid: integer
@param uid: the user-id to be checked.
"""
pgrep_command = [constants.PGREP, "-u", uid]
result = utils.RunCmd(pgrep_command)
if result.exit_code == 0:
return True
elif result.exit_code == 1:
return False
else:
raise errors.CommandError("Running pgrep failed. exit code: %s"
% result.exit_code)
| 5,335,223
|
def _pyint_to_mpz(n, a):
"""
Set `a` from `n`.
:type n: int,long
:type a: mpz_t
"""
if -sys.maxsize - 1 <= n <= sys.maxsize:
gmp.mpz_set_si(a, n)
elif sys.maxsize < n <= MAX_UI:
gmp.mpz_set_ui(a, n)
else:
gmp.mpz_set_str(a, hex(n).rstrip('L').encode('UTF-8'), 0)
| 5,335,224
|
def clean_migrations(c):
"""Removes all migration jobs
Usage: inv pod.clean-migrations
"""
c.run(
f"kubectl delete pods -n {c.config.namespace} -ljob-name=migrate"
)
| 5,335,225
|
def main_L8CCA():
""" Demo L8CCA data loading. """
base_path = Path(
"datasets/clouds/"
+ "Landsat-Cloud-Cover-Assessment-Validation-Data-Partial"
)
img_paths = [base_path / "Barren" / "LC81390292014135LGN00",
base_path / "Forest" / "LC80160502014041LGN00"]
split_names = ("train", "validation", "test")
splits = ((0., 0.8), (0.8, 0.95), (0.95, 1.))
for name, split in zip(split_names, splits):
dg = DG_L8CCA(img_paths=img_paths, batch_size=16,
data_part=split, with_gt=True)
sample_batch_x, sample_batch_y = dg[2]
sample_batch_y = sample_batch_y[:, :, :, 0]
plt.figure()
plt.subplot(1, 3, 1)
plt.imshow(strip_nir(sample_batch_x[0]))
plt.title(f"Split: { name }\n sample image")
plt.subplot(1, 3, 2)
plt.imshow(sample_batch_y[0])
plt.title(f"Split: { name }\n sample gt mask")
plt.show()
| 5,335,226
|
def mobilenetv3_large(data_channel):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, NL, s
[3, 16, 16, 0, 0, 1],
[3, 64, 24, 0, 0, 2],
[3, 72, 24, 0, 0, 1],
[5, 72, 40, 1, 0, 2],
[5, 120, 40, 1, 0, 1],
[5, 120, 40, 1, 0, 1],
[3, 240, 80, 0, 1, 2],
[3, 200, 80, 0, 1, 1],
[3, 184, 80, 0, 1, 1],
[3, 184, 80, 0, 1, 1],
[3, 480, 112, 1, 1, 1],
[3, 672, 112, 1, 1, 1],
[5, 672, 160, 1, 1, 1],
[5, 672, 160, 1, 1, 2],
[5, 960, 160, 1, 1, 1]
]
model = MobileNetV3(cfgs, mode='large', data_channel=data_channel)
model.set_name(BackboneName.MobileNetv3_large)
return model
| 5,335,227
|
def ravel_lom_dims(tensor, name='ravel_lom_dims'):
"""Assumes LOM is in the last 3 dims."""
return tf.reshape(tensor, tensor.shape_as_list()[:-3] + [-1], name=name)
| 5,335,228
|
def run_cv(cfg, df, horiz, freq, cv_start, cv_stride=1, dc_dict=None,
metric="smape"):
"""Run a sliding-window temporal cross-validation (aka backtest) using a
given forecasting function (`func`).
"""
y = df["demand"].values
# allow only 1D time-series arrays
assert(y.ndim == 1)
params, func = cfg
if len(y) == 1:
y = np.pad(y, [1, 0], constant_values=1)
# the cross-val horizon length may shrink depending on the length of
# historical data; shrink the horizon if it is >= the timeseries
if horiz >= len(y):
cv_horiz = len(y) - 1
else:
cv_horiz = horiz
if len(df) == len(y):
ts = df.index
else:
assert len(y) > len(df)
diff = len(y) - len(df)
ts = np.append(
pd.date_range(end=df.index[0], freq=freq, periods=diff+1), df.index)
# sliding window horizon actuals
Y = sliding_window_view(y[cv_start:], cv_horiz)[::cv_stride,:]
Ycv = []
# | y | horiz |..............|
# | y | horiz |.............|
# | y | horiz |............|
# ::
# ::
# | y | horiz |
for i in range(cv_start, len(y)-cv_horiz+1, cv_stride):
yp = func(y[:i], cv_horiz, freq, dc=dc_dict[i])
Ycv.append(yp)
# keep the backtest forecasts at each cv_stride
Ycv = np.vstack(Ycv)
# keep the backtest forecast time indices
Yts = sliding_window_view(ts[cv_start:], cv_horiz)[::cv_stride,:]
assert Yts.shape == Y.shape
assert Yts.shape == Ycv.shape
assert not np.any(np.isnan(Ycv))
assert Ycv.shape == Y.shape
# calc. error metrics
df_results = calc_metrics(Y, Ycv, metric)
df_results.insert(0, "model_type", params.split("|")[0])
df_results.insert(1, "params", params)
# store the final backtest window actuals and predictions
df_results["y_cv"] = [Y]
df_results["yp_cv"] = [Ycv]
df_results["ts_cv"] = [Yts]
# generate the final forecast (1-dim)
df_results["yhat"] = [func(y, horiz, freq, dc=dc_dict[len(y)-1])]
return df_results
| 5,335,229
|
def ST_Area(geos):
"""
Calculate the 2D Cartesian (planar) area of geometry.
:type geos: Series(dtype: object)
:param geos: Geometries in WKB form.
:rtype: Series(dtype: float64)
:return: The value that represents the area of geometry.
:example:
>>> import pandas
>>> import arctern
>>> data = ["POLYGON((0 0,1 0,1 1,0 1,0 0))", "POLYGON((0 0,0 8,8 8,8 0,0 0))"]
>>> data = pandas.Series(data)
>>> rst = arctern.ST_Area(arctern.ST_GeomFromText(data1))
>>> print(rst)
0 1.0
1 64.0
dtype: float64
"""
import pyarrow as pa
arr_geos = pa.array(geos, type='binary')
return arctern_caller(arctern_core_.ST_Area, arr_geos)
| 5,335,230
|
def check_conda_packages(edit_mode=False, packages=None):
"""Check conda inslalled packages information filtering for packages.
It is Python/Conda environment dependent.
Returns:
dict(str): Dictionary filled with respective information.
"""
info = {'CONDA PACKAGES': {}}
all_packages = ''
try:
if not edit_mode:
all_packages = _run_subprocess_split(['conda', 'list', '--no-pip', '--export'])
else:
all_packages = _run_subprocess_split(['conda', 'list', '--no-pip',
'--export', '--develop'])
except (subprocess.CalledProcessError, FileNotFoundError2and3):
info['CONDA PACKAGES']['Status'] = 'Conda not available!'
else:
# split lines and remove head
line_packages = all_packages.split("\n")[3:]
# clean spaces, create a list and insert in the dictionary
for line in line_packages:
splitted = line.split('=')
cleaned = ' '.join(splitted).split()
info['CONDA PACKAGES'][cleaned[0]] = cleaned[1]
if packages:
info['CONDA PACKAGES'] = _filter(info['CONDA PACKAGES'], packages)
return info
| 5,335,231
|
def prove(domain, account):
"""Create domain based self-verification proof"""
if not domain:
domain = click.prompt("Domain to prove (example: https://example.com/)")
w3 = w3_client()
address = w3.eth.accounts[account]
proof = create_proof(domain, address)
if click.confirm(f"Save {domain} to {iscc_registry.ENV_PATH}?"):
with open(iscc_registry.ENV_PATH, "a+") as outf:
outf.write(f"VERIFICATION_DOMAIN={domain}\n")
click.echo("Self-verification domain saved.")
else:
click.echo(f"Remember to set VERIFICATION_DOMAIN env variable to {domain}")
url = domain + "/iscc-proof.json"
click.echo(
f"Publish the following data at {url} to validate your account {address}:"
)
click.echo(proof)
| 5,335,232
|
async def test_get_bluetooth(aresponses: ResponsesMockServer) -> None:
"""Test getting bluetooth information."""
aresponses.add(
"127.0.0.2:4343",
"/api/v2/device/bluetooth",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text=load_fixture("bluetooth.json"),
),
)
async with aiohttp.ClientSession() as session:
demetriek = LaMetricDevice(host="127.0.0.2", api_key="abc", session=session)
bluetooth = await demetriek.bluetooth()
assert bluetooth
assert bluetooth.active is True
assert bluetooth.address == "AA:BB:CC:DD:EE:FF"
assert bluetooth.available is True
assert bluetooth.discoverable is True
assert bluetooth.name == "LM1234"
assert bluetooth.pairable is True
| 5,335,233
|
def P_from_K_R_t(K, R, t):
"""Returns the 3x4 projection matrix P = K [R | t]."""
K = K.astype(np.float64)
R = R.astype(np.float64)
t = t.astype(np.float64)
return matmul(K, np.column_stack((R, t)))
| 5,335,234
|
def multi_value_precondition(parameter_selector: List[Union[int, str]], predicate: Callable[..., bool],
exception_factory: Union[Type[BaseException], Callable[[OrderedDict], BaseException]]
=PreconditionViolatedError) -> Any:
"""
This is a factory that will create a decorator for a method based on a parameter selector and a predicate. The
decorator will cause the method to raise an Exception (PreConditionViolatedError) if the selected parameters do not
satisfy the predicate.
:param parameter_selector: a selector that indicates which parameters of the method should be checked. This may
be ints for positional parameters or strings for keyword parameters. The parameter_selector will indicate some
parameters, these will be passed (positionally in the listed order) to the predicate.
:param predicate: a predicate that evaluates parameters (function that returns True or False)
:param exception_factory: Either an Exception class or a Callable that can create the desired Exception (defaults
to PreconditionViolatedError)
:return: a decorator based on the passed parameter selector and predicate
"""
def decorator(decorated_function):
"""
This decorator adds a check to this function that one of its parameters matches a predicate
:param decorated_function: The function to be decorated
:return: The decorated function
"""
_signature = signature(decorated_function)
_verify_decorator_correctness(_signature, parameter_selector, exception_factory)
@wraps(decorated_function)
def function_with_condition(*args, **kwargs):
"""
a decorated function that checks parameter values of the original match a given predicate.
If the parameters do not match, the original function is never called.
:param args: The positional arguments for the original function
:param kwargs: The keyword arguments for the original function
:return: The result of the function if the parameters matched the predicate
:raises: PreConditionViolatedError if the parameters of the function do not match the predicate
"""
arguments = _get_bound_arguments(_signature, *args, **kwargs)
selected_parameters = _get_key_value_pairs(arguments, parameter_selector)
if not predicate(*selected_parameters.values()):
if isinstance(exception_factory, type) and issubclass(exception_factory, BaseException):
parameter_descriptions = map(lambda key_value: _parameter_description(*key_value),
selected_parameters.items())
descriptions = ', '.join(parameter_descriptions).capitalize()
message = f"{descriptions} failed to pass precondition {predicate.__name__}"
raise exception_factory(message)
elif isinstance(exception_factory, FunctionType):
raise exception_factory(selected_parameters)
else:
raise MalformedDecoratorError(f'Incorrect type for exception_factory: {type(exception_factory)}')
return decorated_function(*args, **kwargs)
return function_with_condition
return decorator
| 5,335,235
|
def activity_list_retrieve_view(request): # activityListRetrieve
"""
Retrieve activity so we can populate the news page
:param request:
:return:
"""
status = ''
activity_list = []
activity_manager = ActivityManager()
activity_notice_seed_list = []
activity_post_list = []
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
voter_friend_we_vote_id_list = []
voter_we_vote_id = ''
activity_tidbit_we_vote_id_list = request.GET.getlist('activity_tidbit_we_vote_id_list[]')
activity_tidbit_we_vote_id_list = list(filter(None, activity_tidbit_we_vote_id_list))
if positive_value_exists(voter_device_id):
voter_we_vote_id = fetch_voter_we_vote_id_from_voter_device_link(voter_device_id)
if not positive_value_exists(voter_we_vote_id):
status += "RETRIEVE_ACTIVITY_LIST_MISSING_VOTER_WE_VOTE_ID "
json_data = {
'status': status,
'success': False,
'activity_list': activity_list,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
# Retrieve the NOTICE_FRIEND_ENDORSEMENTS_SEED and the ActivityPost entries below
results = activity_manager.retrieve_activity_notice_seed_list_for_recipient(
recipient_voter_we_vote_id=voter_we_vote_id,
kind_of_seed_list=[NOTICE_FRIEND_ENDORSEMENTS_SEED],
limit_to_activity_tidbit_we_vote_id_list=activity_tidbit_we_vote_id_list)
if results['success']:
activity_notice_seed_list = results['activity_notice_seed_list']
voter_friend_we_vote_id_list = results['voter_friend_we_vote_id_list']
else:
status += results['status']
status += "RETRIEVE_ACTIVITY_LIST_FAILED "
for activity_notice_seed in activity_notice_seed_list:
new_positions_entered_count = 0
position_name_list = []
position_we_vote_id_list = []
# In this scenario we want to return both friends and public values
# Position names
if positive_value_exists(activity_notice_seed.position_names_for_friends_serialized):
position_name_list += json.loads(activity_notice_seed.position_names_for_friends_serialized)
if positive_value_exists(activity_notice_seed.position_names_for_public_serialized):
position_name_list += json.loads(activity_notice_seed.position_names_for_public_serialized)
# Position we_vote_ids
if positive_value_exists(activity_notice_seed.position_we_vote_ids_for_friends_serialized):
position_we_vote_id_list += json.loads(activity_notice_seed.position_we_vote_ids_for_friends_serialized)
if positive_value_exists(activity_notice_seed.position_we_vote_ids_for_public_serialized):
position_we_vote_id_list += json.loads(activity_notice_seed.position_we_vote_ids_for_public_serialized)
new_positions_entered_count += len(position_we_vote_id_list)
if not positive_value_exists(activity_notice_seed.we_vote_id):
try:
activity_notice_seed.save()
except Exception as e:
status += "COULD_NOT_UPDATE_SEED_WE_VOTE_ID: " + str(e) + ' '
activity_notice_seed_dict = {
'date_created': activity_notice_seed.date_of_notice.strftime('%Y-%m-%d %H:%M:%S'),
'date_last_changed': activity_notice_seed.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'date_of_notice': activity_notice_seed.date_of_notice.strftime('%Y-%m-%d %H:%M:%S'),
'id': activity_notice_seed.id, # We normalize to generate activityTidbitKey
'activity_notice_seed_id': activity_notice_seed.id,
'kind_of_activity': "ACTIVITY_NOTICE_SEED",
'kind_of_seed': activity_notice_seed.kind_of_seed,
'new_positions_entered_count': new_positions_entered_count,
'position_name_list': position_name_list,
'position_we_vote_id_list': position_we_vote_id_list,
'speaker_name': activity_notice_seed.speaker_name,
'speaker_organization_we_vote_id': activity_notice_seed.speaker_organization_we_vote_id,
'speaker_voter_we_vote_id': activity_notice_seed.speaker_voter_we_vote_id,
'speaker_profile_image_url_medium': activity_notice_seed.speaker_profile_image_url_medium,
'speaker_profile_image_url_tiny': activity_notice_seed.speaker_profile_image_url_tiny,
'speaker_twitter_handle': activity_notice_seed.speaker_twitter_handle,
'speaker_twitter_followers_count': activity_notice_seed.speaker_twitter_followers_count,
'we_vote_id': activity_notice_seed.we_vote_id,
}
activity_list.append(activity_notice_seed_dict)
# ####################################################
# Retrieve entries directly in the ActivityPost table
results = activity_manager.retrieve_activity_post_list_for_recipient(
recipient_voter_we_vote_id=voter_we_vote_id,
limit_to_activity_tidbit_we_vote_id_list=activity_tidbit_we_vote_id_list,
voter_friend_we_vote_id_list=voter_friend_we_vote_id_list)
if results['success']:
activity_post_list = results['activity_post_list']
else:
status += results['status']
status += "RETRIEVE_ACTIVITY_POST_LIST_FAILED "
for activity_post in activity_post_list:
date_created_string = ''
if activity_post.date_created:
date_created_string = activity_post.date_created.strftime('%Y-%m-%d %H:%M:%S')
if not positive_value_exists(activity_post.we_vote_id):
try:
activity_post.save()
except Exception as e:
status += "COULD_NOT_UPDATE_POST_WE_VOTE_ID: " + str(e) + ' '
activity_post_dict = {
'date_created': date_created_string,
'date_last_changed': activity_post.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'date_of_notice': date_created_string,
'id': activity_post.id, # We normalize to generate activityTidbitKey
'activity_post_id': activity_post.id,
'kind_of_activity': 'ACTIVITY_POST',
'kind_of_seed': '',
'new_positions_entered_count': 0,
'position_name_list': [],
'position_we_vote_id_list': [],
'speaker_name': activity_post.speaker_name,
'speaker_organization_we_vote_id': activity_post.speaker_organization_we_vote_id,
'speaker_voter_we_vote_id': activity_post.speaker_voter_we_vote_id,
'speaker_profile_image_url_medium': activity_post.speaker_profile_image_url_medium,
'speaker_profile_image_url_tiny': activity_post.speaker_profile_image_url_tiny,
'speaker_twitter_handle': activity_post.speaker_twitter_handle,
'speaker_twitter_followers_count': activity_post.speaker_twitter_followers_count,
'statement_text': activity_post.statement_text,
'visibility_is_public': activity_post.visibility_is_public,
'we_vote_id': activity_post.we_vote_id,
}
activity_list.append(activity_post_dict)
# Now cycle through these activities and retrieve all related comments
activity_list_with_comments = []
for activity_tidbit_dict in activity_list:
results = activity_manager.retrieve_activity_comment_list(
parent_we_vote_id=activity_tidbit_dict['we_vote_id'])
activity_comment_list = []
if results['success']:
activity_comment_object_list = results['activity_comment_list']
for activity_comment in activity_comment_object_list:
# Retrieve the Child comments
child_results = activity_manager.retrieve_activity_comment_list(
parent_comment_we_vote_id=activity_comment.we_vote_id)
child_comment_list = []
if results['success']:
child_comment_object_list = child_results['activity_comment_list']
for child_comment in child_comment_object_list:
child_comment_dict = {
'date_created': child_comment.date_created.strftime('%Y-%m-%d %H:%M:%S'),
'date_last_changed': child_comment.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'commenter_name': child_comment.commenter_name,
'commenter_organization_we_vote_id': child_comment.commenter_organization_we_vote_id,
'commenter_voter_we_vote_id': child_comment.commenter_voter_we_vote_id,
'commenter_profile_image_url_medium': child_comment.commenter_profile_image_url_medium,
'commenter_profile_image_url_tiny': child_comment.commenter_profile_image_url_tiny,
'commenter_twitter_handle': child_comment.commenter_twitter_handle,
'commenter_twitter_followers_count': child_comment.commenter_twitter_followers_count,
'parent_we_vote_id': child_comment.parent_we_vote_id,
'parent_comment_we_vote_id': child_comment.parent_comment_we_vote_id,
'statement_text': child_comment.statement_text,
'visibility_is_public': child_comment.visibility_is_public,
'we_vote_id': child_comment.we_vote_id,
}
child_comment_list.append(child_comment_dict)
activity_comment_dict = {
'comment_list': child_comment_list,
'date_created': activity_comment.date_created.strftime('%Y-%m-%d %H:%M:%S'),
'date_last_changed': activity_comment.date_last_changed.strftime('%Y-%m-%d %H:%M:%S'),
'commenter_name': activity_comment.commenter_name,
'commenter_organization_we_vote_id': activity_comment.commenter_organization_we_vote_id,
'commenter_voter_we_vote_id': activity_comment.commenter_voter_we_vote_id,
'commenter_profile_image_url_medium': activity_comment.commenter_profile_image_url_medium,
'commenter_profile_image_url_tiny': activity_comment.commenter_profile_image_url_tiny,
'commenter_twitter_handle': activity_comment.commenter_twitter_handle,
'commenter_twitter_followers_count': activity_comment.commenter_twitter_followers_count,
'parent_we_vote_id': activity_comment.parent_we_vote_id,
'parent_comment_we_vote_id': activity_comment.parent_comment_we_vote_id,
'statement_text': activity_comment.statement_text,
'visibility_is_public': activity_comment.visibility_is_public,
'we_vote_id': activity_comment.we_vote_id,
}
activity_comment_list.append(activity_comment_dict)
activity_tidbit_dict['activity_comment_list'] = activity_comment_list
activity_list_with_comments.append(activity_tidbit_dict)
# Order entries in the activity_list by "date_created"
from operator import itemgetter
activity_list_ordered = sorted(activity_list_with_comments, key=itemgetter('date_created'), reverse=True)
json_data = {
'status': status,
'success': True,
'activity_list': activity_list_ordered,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
| 5,335,236
|
def arg_parser() -> argparse.Namespace:
""" Reads command line arguments.
:returns: Values of accepted command line arguments.
"""
_parser = argparse.ArgumentParser(
description=dedent(
"""Find all recipes in a directory,
build them and push all their images to an sregistry.
Recipes are identified by the suffix ".recipe".
The image name will be taken from the recipe name
using everything from the first character till the first "." occurrence.
The version will be taken from the recipe name
using everything from the first "." till the suffix ".recipe".
The collection name will be taken from the recipes parent folder.
"""
)
)
_parser.add_argument(
'--path',
'-p',
type=str,
help="Base path to search recipes.",
required=True
)
_parser.add_argument(
'--image_type',
'-i',
type=str,
help="The type of image to be build."
)
_parser.add_argument(
'--build_log_dir',
'-b',
type=str,
help="The directory, that should contain the build logs. Will be created if not existent."
)
return _parser.parse_args()
| 5,335,237
|
def create(
issue: str,
mine: bool,
assignee: str,
body: str,
type: str,
label: str,
parent: str,
web: bool,
story_points: str,
) -> None:
"""
Create an issue.
ISSUE is the title to be used for the new work item.
"""
work_item_id = cmd_create_issue(
title=issue,
mine=mine,
assignee=assignee,
label=label,
body=body,
type=type,
parent=parent,
story_points=story_points,
**get_common_options(),
)
if web:
cmd_open_issue(work_item_id)
| 5,335,238
|
def edit_action(
request: http.HttpRequest,
pk: int,
workflow: Optional[models.Workflow] = None,
action: Optional[models.Action] = None,
) -> http.HttpResponse:
"""Invoke the specific edit view.
:param request: Request object
:param pk: Action PK
:param workflow: Workflow being processed,
:param action: Action being edited (set by the decorator)
:return: HTML response
"""
del pk
return services.ACTION_PROCESS_FACTORY.process_edit_request(
request,
workflow,
action)
| 5,335,239
|
def to_set(data: Any) -> Set[Any]:
"""Convert data to a set. A single None value will be converted to the empty set.
```python
x = fe.util.to_set(None) # set()
x = fe.util.to_set([None]) # {None}
x = fe.util.to_set(7) # {7}
x = fe.util.to_set([7, 8]) # {7,8}
x = fe.util.to_set({7}) # {7}
x = fe.util.to_set((7)) # {7}
```
Args:
data: Input data, within or without a python container. The `data` must be hashable.
Returns:
The input `data` but inside a set instead of whatever other container type used to hold it.
"""
if data is None:
return set()
if not isinstance(data, set):
if isinstance(data, (tuple, list, KeysView)):
data = set(data)
else:
data = {data}
return data
| 5,335,240
|
def Pei92(wavelength, Av, z, Rv=-99.0, ext_law="smc", Xcut=False):
"""
Extinction laws from Pei 1992 article
Parameters
----------
wavelength: `array` or `float`
wavlength in angstroms
Av: `float`
amount of extinction in the V band
z: `float`
redshift
Rv: `float`, optional, default: -99.
selective attenuation Rv = Av / E(B-V)
if 'd-99.' set values by default from article
if a float is given, use this value instead
ext_law: `str`
type of extinction law to use.
Choices: mw, lmc, smc
Xcut: `boolean`, optional, default: False
Whether to set attenuation to 0 for wavelength below 700 angstrom
Useful when coupling with X-ray data
Returns
-------
[Alambda_over_Av, Trans_dust]
Alambda_over_Av : `array`
atteanuation as a function of wavelength normalise by Av
(attenuation in V band)
Trans_dust: `array`
transmission through dust as a function of wavelength
"""
wvl = wavelength * 1e-4 / (1 + z)
if ext_law.lower() == "smc":
if Rv == -99.:
Rv = 2.93
a = [185, 27, 0.005, 0.010, 0.012, 0.03]
wvl_ = [0.042, 0.08, 0.22, 9.7, 18, 25]
b = [90, 5.50, -1.95, -1.95, -1.80, 0.0]
n = [2.0, 4.0, 2.0, 2.0, 2.0, 2.0]
elif ext_law.lower() == "lmc":
if Rv == -99.:
Rv = 3.16
a = [175, 19, 0.023, 0.005, 0.006, 0.02]
wvl_ = [0.046, 0.08, 0.22, 9.7, 18, 25]
b = [90, 5.5, -1.95, -1.95, -1.8, 0.0]
n = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0]
elif ext_law.lower() == "mw":
if Rv == -99.:
Rv = 3.08
a = [165, 14, 0.045, 0.002, 0.002, 0.012]
wvl_ = [0.046, 0.08, 0.22, 9.7, 18, 25]
b = [90, 4.0, -1.95, -1.95, -1.8, 0.0]
n = [2.0, 6.5, 2.0, 2.0, 2.0, 2.0]
sums = np.zeros(len(wvl))
for i in range(len(a)):
sums += a[i] / ((wvl / wvl_[i]) ** n[i] + (wvl_[i] / wvl) ** n[i] + b[i])
# Need to check whether extrapolation is needed
# outside the range defined in Pei92
# convert Alambda_over_Ab to Alambda_over_Av
Alambda_over_Av = (1.0 / Rv + 1.0) * sums
# Applied a cut for wavelength below 700 angstrom
# Useful when coupling with Xray data
if Xcut:
w = np.where(wvl < 0.07)
Alambda_over_Av[w] = 0
# Return optical depth due to dust reddening in funtion of wavelength
Tau_dust = Av * Alambda_over_Av / 1.086
Trans_dust = np.exp(-Tau_dust)
Trans_dust[Trans_dust < 0] = 0
Trans_dust[Trans_dust > 1] = 1
return [Alambda_over_Av, Trans_dust]
| 5,335,241
|
def is_empty(context: KedroContext, branch: str = "") -> None:
"""Empty pipelines should not swap filepaths on init.
filepaths are only swapped after_catalog_created if they exist
"""
assert context.branch == branch
for dataset in context.catalog.list():
try:
d = getattr(context.catalog.datasets, dataset)
except AttributeError:
return
assert ~hasattr(d, "_filepath_swapped")
assert ~d.exists()
| 5,335,242
|
def plot_profile_avg_with_bounds(
data,
ax=None,
confint_alpha=0.05,
label=None,
xs=None,
axis=0,
bounds: str = "ci",
**kwargs,
):
"""
TODO: Documentation
Parameters
----------
data
ax
confint_alpha
label
kwargs
Returns
-------
"""
with np.errstate(invalid="ignore"):
mean = np.nanmean(data, axis=0)
sem = stats.sem(data)
bounds_map = {
"ci": DescrStatsW(data).tconfint_mean(alpha=confint_alpha),
"sem": (mean - sem, mean + sem),
}
if ax is None:
ax = plt.gca()
if xs is None:
try:
# if the data is an xr.DataArray
xs = data.position
except ValueError:
# if it's a numpy array
xs = np.arange(len(data))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax.plot(xs, np.nanmean(data, axis=axis), label=label, **kwargs)
lower, upper = bounds_map[bounds]
kwargs.pop("linestyle", None)
kwargs.pop("linewidth", None)
kwargs.pop("lw", None)
ax.fill_between(xs, lower, upper, alpha=0.3, lw=0, **kwargs)
return ax
| 5,335,243
|
def get_mimetype(path):
"""
Get (guess) the mimetype of a file.
"""
mimetype, _ = mimetypes.guess_type(path)
return mimetype
| 5,335,244
|
async def read_clients_epics(
client_id: int = None, session: Session = Depends(get_session)
):
"""Get epics from a client_id"""
statement = (
select(Client.id, Client.name, Epic.name)
.select_from(Client)
.join(Epic)
.where(Client.id == client_id)
)
results = session.exec(statement).all()
return results
| 5,335,245
|
def id_number_checksum(gd):
"""
Calculates a Swedish ID number checksum, using the Luhn algorithm
"""
n = s = 0
for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']):
# Letter? It's an interimspersonnummer and we substitute the letter
# with 1.
if c.isalpha():
c = 1
tmp = ((n % 2) and 1 or 2) * int(c)
if tmp > 9:
tmp = sum([int(i) for i in str(tmp)])
s += tmp
n += 1
if (s % 10) == 0:
return 0
return (((s // 10) + 1) * 10) - s
| 5,335,246
|
def date_loss_l1(pred,
target_min,
target_max,
mask):
"""L1 loss function for dates."""
pred = jnp.squeeze(pred, 0)
loss = 0.
loss += jnp.abs(pred - target_min) * jnp.less(pred, target_min).astype(
pred.dtype)
loss += jnp.abs(pred - target_max) * jnp.greater(pred, target_max).astype(
pred.dtype)
# Mask loss
loss = jnp.multiply(loss, mask.astype(loss.dtype))
return loss
| 5,335,247
|
def new_single_genres(genres, val):
"""Takes the genres list and returns only one genre back if multiple genres are present
Also has the parameter val with values "high" and "low"
High picks the genres belonging to the existing genres with the highest examples count
Low picks the genres belonging to the existing genres with the least examples count"""
genres_file = "fma_metadata/genres.csv"
reference_genres = pd.read_csv(genres_file)
reference_tracks = reference_genres.iloc[:, 1]
reference_genres = reference_genres.iloc[:, 0]
for index, genre in genres.items():
split = genre.split(",")
if len(split) == 1:
new_genre = split[0]
new_genre = new_genre.strip("[]")
genres[index] = int(new_genre)
elif len(split) > 1:
new_genre = [int(item.strip(" [ ] ")) for item in split]
count = {}
for indices, value in reference_genres.items():
if value in new_genre:
count[value] = reference_tracks[indices]
counts = {k: v for k, v in sorted(count.items(), key=lambda item: item[1])}
if val == "high":
genres[index] = int(list(counts.keys())[-1])
elif val == "low":
genres[index] = int(list(counts.keys())[0])
print("The shape of genres after single is:{}".format(genres.shape))
genres = genres.astype('int')
return genres
| 5,335,248
|
def run(funcs_to_test=None, tests_to_run=None, verbosity=2):
"""
run testing routine
args:
- funcs_to_test: dict: {lang_name <str>: lang_func <callable>}
of language processing modules to test
if None `corpus.functions` passed
- tests_to_run: list[<str>]: list of tests to run
if None `TESTS_TO_RUN` passed
- verbosity: int: verbosity mode
return: List[str]: list of failing langs
"""
failing_langs = []
funcs_to_test = FUNCS_TO_TEST if funcs_to_test is None else funcs_to_test
tests_to_run = TESTS_TO_RUN if tests_to_run is None else tests_to_run
stream = sys.stderr
log_header = '\n%s\nTESTING: %s_corpus.py\n%s\n\n'
runner = unittest.TextTestRunner(stream, verbosity=verbosity)
for lang, func in funcs_to_test.items():
stream.write(log_header % ('*' * 20, lang, '*' * 20))
lang_fails = []
suite = unittest.TestSuite()
routine = TestLangFunc
routine.set_env(routine, func, lang)
c_test_res = runner.run(routine('test_fetch_data'))
lang_fails.extend(c_test_res.failures + c_test_res.errors)
if routine.fetch_data is not None:
for test in tests_to_run:
suite.addTest(routine(test))
c_test_res = runner.run(suite)
lang_fails.extend(c_test_res.failures + c_test_res.errors)
if lang_fails:
failing_langs.append(lang)
return failing_langs
| 5,335,249
|
def roparameter(cosphi, hist, s_cosphi=0.25):
"""
...
Parameters
----------
cosphi : ...
hist : ...
s_cosphi : ...
Returns
-------
...
"""
perp=(np.abs(cosphi)>1.-s_cosphi).nonzero()
para=(np.abs(cosphi)<s_cosphi).nonzero()
xi=(np.sum(hist[para])-np.sum(hist[perp]))/float(np.sum(hist[para])+np.sum(hist[perp]))
return xi
| 5,335,250
|
def check_gc_referrers(typename: Any, w_obj: Callable, name: str) -> None:
"""
Check if any variable is getting out of control.
Great for checking and tracing memory leaks.
"""
import threading
import time
def checkfn() -> None:
import gc
time.sleep(2)
gc.collect()
obj = w_obj()
if not obj:
return
# TODO: Si ves el mensaje a continuación significa que "algo" ha dejado
# ..... alguna referencia a un formulario (o similar) que impide que se destruya
# ..... cuando se deja de usar. Causando que los connects no se destruyan tampoco
# ..... y que se llamen referenciando al código antiguo y fallando.
# print("HINT: Objetos referenciando %r::%r (%r) :" % (typename, obj, name))
for ref in gc.get_referrers(obj):
if isinstance(ref, dict):
x: List[str] = []
for k, v in ref.items():
if v is obj:
k = "(**)" + k
x.insert(0, k)
# print(" - dict:", repr(x), gc.get_referrers(ref))
else:
if "<frame" in str(repr(ref)):
continue
# print(" - obj:", repr(ref), [x for x in dir(ref) if getattr(ref, x) is obj])
threading.Thread(target=checkfn).start()
| 5,335,251
|
def execute(model_fn, input_fn, **params):
"""Execute train or eval and/or inference graph writing.
Args:
model_fn: An estimator compatible function taking parameters
(features, labels, mode, params) that returns a EstimatorSpec.
input_fn: An estimator compatible function taking 'params' that returns a
dataset
**params: Dict of additional params to pass to both model_fn and input_fn.
"""
params['is_graph'] = False
if params['write_inference_graph']:
inference_graph.write(model_fn, input_fn, params, params['model_dir'])
params['is_graph'] = True
#todo youxiugai
def estimator_model_fn(features, labels, mode, params):
spec = model_fn(features, labels, mode, params)
return spec
def train_input_fn():
train_params = params.copy()
train_params['input_data'] = params['input_data_train']
train_params['batch_size'] = params['train_batch_size']
if params['randomize_training']:
train_params['randomize_order'] = True
return input_fn(train_params)
def eval_input_fn():
eval_params = params.copy()
eval_params['input_data'] = params['input_data_eval']
eval_params['batch_size'] = params['eval_batch_size']
return input_fn(eval_params)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=params['train_steps'])
eval_steps = int(round(params['eval_examples'] / params['eval_batch_size']))
eval_spec = tf.estimator.EvalSpec(
name=params['eval_suffix'], input_fn=eval_input_fn, steps=eval_steps,
throttle_secs=params.get('eval_throttle_secs', 600))
run_config = tf.estimator.RunConfig(
model_dir=params['model_dir'],
save_summary_steps=params['save_summary_steps'],
save_checkpoints_secs=params['save_checkpoints_secs'],
keep_checkpoint_every_n_hours=params['keep_checkpoint_every_n_hours'])
estimator = tf.estimator.Estimator(
model_fn=estimator_model_fn,
params=params,
config=run_config)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| 5,335,252
|
def polyfill_bbox(
min_lng, max_lng, min_lat, max_lat, min_resolution=0, max_resolution=30
):
"""Polyfill a planar bounding box with compact s2 cells between resolution levels"""
check_valid_polyfill_resolution(min_resolution, max_resolution)
rc = s2sphere.RegionCoverer()
rc.min_level = min_resolution
rc.max_level = max_resolution
lower_left = s2sphere.LatLng(radians(min_lat), radians(min_lng))
upper_right = s2sphere.LatLng(radians(max_lat), radians(max_lng))
rect = s2sphere.LatLngRect(lower_left, upper_right)
cell_ids = [int(uint64_to_int64(cell.id())) for cell in rc.get_covering(rect)]
cell_ids_str = '[' + ','.join([str(id) for id in cell_ids]) + ']'
return cell_ids_str
| 5,335,253
|
def whisper(caller, recipients, message):
"""Display a message with a specific format."""
if 'mute' in caller.character.flags:
commands.pemit(caller, caller, "\c(red)!!! Unable to speak, you are muted.")
return
# If you are flagged 'dark' (invisible), everyone hears you as a disembodied voice.
sender = '%N' if 'dark' not in caller.character.flags else db.Object.get(2).properties['odark']
# Build a list of non-deaf recipients and send the message to them.
recipients = search(caller, recipients, within=caller.character.location, kind=db.Character, flags=['!deaf'])
commands.pemit(caller, recipients, '%s whispers, "%s"' % (sender, message))
# Find everyone else in the room and tell them that you've been whispering.
others = search(caller, within=caller.character.location, not_within=[caller.character]+recipients.all(), kind=db.Character, flags=['!deaf'])
commands.pemit(caller, others, "%s whispers something." % (sender, ))
# Finally, echo your whisper.
commands.pemit(caller, caller, 'You whisper, "%s"' % (message, ))
| 5,335,254
|
def qa_skysub(param, frame, skymodel, quick_look=False):
"""Calculate QA on SkySubtraction
Note: Pixels rejected in generating the SkyModel (as above), are
not rejected in the stats calculated here. Would need to carry
along current_ivar to do so.
Args:
param : dict of QA parameters : see qa_frame.init_skysub for example
frame : lvmspec.Frame object; Should have been flat fielded
skymodel : lvmspec.SkyModel object
quick_look : bool, optional
If True, do QuickLook specific QA (or avoid some)
Returns:
qadict: dict of QA outputs
Need to record simple Python objects for yaml (str, float, int)
"""
from lvmspec.qa import qalib
import copy
#- QAs
#- first subtract sky to get the sky subtracted frame. This is only for QA. Pipeline does it separately.
tempframe=copy.deepcopy(frame) #- make a copy so as to propagate frame unaffected so that downstream pipeline uses it.
subtract_sky(tempframe,skymodel) #- Note: sky subtract is done to get residuals. As part of pipeline it is done in fluxcalib stage
# Sky residuals first
qadict = qalib.sky_resid(param, tempframe, skymodel, quick_look=quick_look)
# Sky continuum
if not quick_look: # Sky continuum is measured after flat fielding in QuickLook
channel = frame.meta['CAMERA'][0]
wrange1, wrange2 = param[channel.upper()+'_CONT']
skyfiber, contfiberlow, contfiberhigh, meancontfiber, skycont = qalib.sky_continuum(frame,wrange1,wrange2)
qadict["SKYFIBERID"] = skyfiber.tolist()
qadict["SKYCONT"] = skycont
qadict["SKYCONT_FIBER"] = meancontfiber
if quick_look: # The following can be a *large* dict
qadict_snr = qalib.SignalVsNoise(tempframe,param)
qadict.update(qadict_snr)
return qadict
| 5,335,255
|
def _code_to_symbol(code):
"""
生成symbol代码标志
"""
if code in ct.INDEX_LABELS:
return ct.INDEX_LIST[code]
else:
if len(code) != 6 :
return ''
else:
return 'sh%s'%code if code[:1] in ['5', '6', '9'] else 'sz%s'%code
| 5,335,256
|
def guaranteeFolderExists(path_name):
""" Make sure the given path exists after this call """
path = Path(path_name).absolute()
path.mkdir(parents=True, exist_ok=True)
| 5,335,257
|
def classic_rk(solution, deck, dgsolver, limiter, coeffs, alphas, betas):
"""Integrate in time using the classic RK4 scheme"""
# Initialize storage variables
K = [np.zeros(solution.u.shape) for _ in range(len(coeffs))]
us = solution.copy()
uk = solution.copy()
# Output time array (ignore the start time)
nout = 0
tout_array = iter(np.linspace(solution.t, deck.finaltime, deck.nout)[1:])
# Flags
done = False
# Write the initial condition to file
solution.printer(0, 0.0)
nout += 1
tout = next(tout_array)
# main RK loop
while (not done):
# Get the next time step
dt, output, done = get_next_time_step(
solution, tout, deck.cfl, deck.finaltime)
# Store the solution at the previous step: us = u
us.copy_data_only(solution)
# RK inner loop
for k, (c, alpha) in enumerate(zip(coeffs, alphas)):
# Get the solution at this sub-time step:
# u_k = u_0 + \Delta t \sum_{k=0}^{n-1} \beta_{k,j} f(t_j,u_j)
# t_k = t_0 + \alpha_k \Delta t
uk.copy_data_only(us)
for j, beta in enumerate(betas[k, :k]):
uk.smart_axpy(beta, K[j])
uk.t += alpha * dt
# Limit solution if necessary
if k > 0:
limiter.limit(uk)
# Evaluate and store the solution increment: K_k = \Delta t f(t_k,
# u_k)
K[k] = dt * dgsolver.residual(uk)
# Weighted sum of the residuals
solution.smart_axpy(c, K[k])
# Update the current time and make sure the boundary elements
# are correct
solution.t += dt
solution.n += 1
solution.apply_bc()
# Limit solution if necessary
limiter.limit(solution)
# Output the solution if necessary
if output:
solution.printer(nout, dt)
if not done:
nout += 1
tout = next(tout_array)
| 5,335,258
|
def gesv(a, b):
"""Solve a linear matrix equation using cusolverDn<t>getr[fs]().
Computes the solution to a system of linear equation ``ax = b``.
Args:
a (cupy.ndarray): The matrix with dimension ``(M, M)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(M)`` or ``(M, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != a.shape[1]:
raise ValueError('a must be a square matrix.')
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = _numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
helper = getattr(_cusolver, t + 'getrf_bufferSize')
getrf = getattr(_cusolver, t + 'getrf')
getrs = getattr(_cusolver, t + 'getrs')
n = b.shape[0]
nrhs = b.shape[1] if b.ndim == 2 else 1
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = _cupy.asfortranarray(a, dtype=dtype)
b = _cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
handle = _device.get_cusolver_handle()
dipiv = _cupy.empty(n, dtype=_numpy.int32)
dinfo = _cupy.empty(1, dtype=_numpy.int32)
lwork = helper(handle, n, n, a.data.ptr, n)
dwork = _cupy.empty(lwork, dtype=a.dtype)
# LU factrization (A = L * U)
getrf(handle, n, n, a.data.ptr, n, dwork.data.ptr, dipiv.data.ptr,
dinfo.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
getrf, dinfo)
# Solves Ax = b
getrs(handle, _cublas.CUBLAS_OP_N, n, nrhs, a.data.ptr, n,
dipiv.data.ptr, b.data.ptr, n, dinfo.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
getrs, dinfo)
return b
| 5,335,259
|
def del_none(d):
"""
Delete dict keys with None values, and empty lists, recursively.
"""
for key, value in d.items():
if value is None or (isinstance(value, list) and len(value) == 0):
del d[key]
elif isinstance(value, dict):
del_none(value)
return d
| 5,335,260
|
def aes_block(ciphertext, key):
"""Uses the AES algorithm in ECB mode to decrypt a 16-byte ciphertext
block with a given key of the same length.
Keyword arguments:
ciphertext -- the byte string to be decrypted
key -- the byte string key
"""
if len(ciphertext) != 16:
raise ValueError("The ciphertext can only be one block (16 bytes).")
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=backend)
decryptor = cipher.decryptor()
return decryptor.update(ciphertext) + decryptor.finalize()
| 5,335,261
|
def getLogger(*args, **kwargs):
"""
Wrapper around ``logging.getLogger`` that respects `overrideLogLevel <#setOverrideLogLevel>`_.
"""
logger = logging.getLogger(*args, **kwargs)
if _overrideLogLevel is not None:
logger.setLevel(logging.NOTSET)
return logger
| 5,335,262
|
def split_metadata_string(text, chunk_length=None):
"""Split string by length.
Split text to chunks by entered length.
Example:
```python
text = "ABCDEFGHIJKLM"
result = split_metadata_string(text, 3)
print(result)
>>> ['ABC', 'DEF', 'GHI', 'JKL']
```
Args:
text (str): Text that will be split into chunks.
chunk_length (int): Single chunk size. Default chunk_length is
set to global variable `TVPAINT_CHUNK_LENGTH`.
Returns:
list: List of strings with at least one item.
"""
if chunk_length is None:
chunk_length = TVPAINT_CHUNK_LENGTH
chunks = []
for idx in range(chunk_length, len(text) + chunk_length, chunk_length):
start_idx = idx - chunk_length
chunks.append(text[start_idx:idx])
return chunks
| 5,335,263
|
def load_kimmel_data(root_data_path,
flag_size_factor=True, total_ct_per_cell=1e4,
flag_log1p=True):
"""Load normalized data from Kimmel et al, GR, 2019
1. Size factor normalization to counts per 1 million (total_ct_per_cell)
2. log(x+1) transform
Args:
file_path (str): file path. Should contain ./Kimmel_GR_2019_data
Returns:
adata_combine (AnnData): Combined data for kidney, lung, and spleen
"""
# Load filtered data
file_path=root_data_path+'/Kimmel_GR_2019_data'
adata_kidney = read_h5ad(file_path + '/kidney.h5ad')
adata_lung = read_h5ad(file_path + '/lung.h5ad')
adata_spleen = read_h5ad(file_path + '/spleen.h5ad')
# Size factor normalization
if flag_size_factor == True:
sc.pp.normalize_per_cell(adata_kidney, counts_per_cell_after=total_ct_per_cell)
sc.pp.normalize_per_cell(adata_lung, counts_per_cell_after=total_ct_per_cell)
sc.pp.normalize_per_cell(adata_spleen, counts_per_cell_after=total_ct_per_cell)
# log(x+1) transform
if flag_log1p == True:
sc.pp.log1p(adata_kidney)
sc.pp.log1p(adata_lung)
sc.pp.log1p(adata_spleen)
# Combine data
adata = adata_kidney.concatenate(adata_lung, adata_spleen,
batch_key='batch_combine', join='inner')
adata.obs['tissue'] = ''
adata.obs.loc[adata.obs['batch_combine']=='0', 'tissue'] = 'Kidney'
adata.obs.loc[adata.obs['batch_combine']=='1', 'tissue'] = 'Lung'
adata.obs.loc[adata.obs['batch_combine']=='2', 'tissue'] = 'Spleen'
adata.obs['sex'] = 'male'
adata.obs['age_old'] = adata.obs['age'].values.copy()
adata.obs['age'] = ['7m' if x=='young' else '22m' for x in adata.obs['age_old']]
adata.obs['age_num'] = [7 if x=='young' else 22 for x in adata.obs['age_old']]
return adata
| 5,335,264
|
def get_coupon_page() -> bytes:
"""
Gets the coupon page HTML
"""
try:
response = requests.get(COUPONESE_DOMINOS_URL)
return response.content
except RequestException as e:
bot.logger.error(e.response.content)
return None
| 5,335,265
|
def get_program_similarity(fingerprint_a, fingerprint_b):
"""Find similarity between fingerprint of two programs.
A fingerprint is a subset of k-gram hashes generated from program. Each of
the k-gram hashes is formed by hashing a substring of length K and hence
fingerprint is indirectly based on substrings of a program. Fingerprint acts
as identity of the program and can be used to compare two programs.
Args:
fingerprint_a: list((int, int)). Fingerprint of first program. First
integer stores the fingerprint hash value and 2nd integer stores
location in the program where fingerprint is present.
fingerprint_b: list((int, int)). Fingerprint of second program.
Returns:
float. Similarity between first and second program.
"""
multiset_a = [h for (h, _) in fingerprint_a]
multiset_b = [h for (h, _) in fingerprint_b]
return calc_jaccard_index(multiset_a, multiset_b)
| 5,335,266
|
def load_regions_with_bounding_boxes():
"""Loads bounding boxes as shapely objects.
Returns:
list: list of shapely objects containing regional geometries
"""
print(
"loading region bounding boxes for computing carbon emissions region, this may take a moment..."
)
dir_path = os.path.dirname(os.path.realpath(__file__))
all_geoms = []
# with open('data/zone_geometries.json') as f:
all_geoms = read_terrible_json(os.path.join(dir_path, "data/zonegeometries.json"))
for i, geom in enumerate(all_geoms):
all_geoms[i]["geometry"] = shape(geom["geometry"])
print("Done!")
return all_geoms
| 5,335,267
|
def check_operating_system_supported() -> None:
""" Exists code if operating system is not supported. """
for platform in PLATFORMS_SUPPORTED:
# Iterating over all platforms in the supported platforms.
if sys.platform.startswith(platform):
# If current PC is have this platform (Supported).
if platform in PLATFORMS_DEVELOPMENT:
# If this is development platform.
# Showing debug message.
debug_message(f"You are currently running this app on platform {platform} "
"which is not fully supported!")
# Returning from the function as current platform is supported.
return
# Code lines below only executes if code above don't found supported platform.
# Debug message.
debug_message("Oops... You are running app on the platform "
f"{sys.platform} which is not supported! Sorry for that!")
sys.exit(1)
| 5,335,268
|
def create_variables(name, shape, initializer=tf.contrib.layers.xavier_initializer(), is_fc_layer=False):
"""
:param name: A string. The name of the new variable
:param shape: A list of dimensions
:param initializer: User Xavier as default.
:param is_fc_layer: Want to create fc layer variable? May use different weight_decay for fc
layers.
:return: The created variable
"""
# TODO: to allow different weight decay to fully connected layer and conv layer
if is_fc_layer is True:
regularizer = tf.contrib.layers.l2_regularizer(scale=FLAGS.weight_decay)
else:
regularizer = tf.contrib.layers.l2_regularizer(scale=FLAGS.weight_decay)
new_variables = tf.get_variable(name, shape=shape, initializer=initializer, regularizer=regularizer)
return new_variables
| 5,335,269
|
def linear_transformation(x, y_min, y_max):
"""
x : the range to be transformed
y_min, y_max : lower and upper boundaries for the range into which x
is transformed to
Returns y = f(x), f(x) = m * x + b
"""
x_min = np.min(x)
x_max = np.max(x)
if x_min == x_max:
x_max = x_min * 1.0001
return (y_min + (y_max - y_min) / (x_max - x_min) * (x - x_min))
| 5,335,270
|
def resize_image(image, desired_width=768, desired_height=384, random_pad=False):
"""Resizes an image keeping the aspect ratio mostly unchanged.
Returns:
image: the resized image
window: (x1, y1, x2, y2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [left, top, right, bottom]
"""
# Default window (x1, y1, x2, y2) and default scale == 1.
w, h = image.size
width_scale = desired_width / w
height_scale = desired_height / h
scale = min(width_scale, height_scale)
# Resize image using bilinear interpolation
if scale != 1:
image = functional.resize(image, (round(h * scale), round(w * scale)))
w, h = image.size
y_pad = desired_height - h
x_pad = desired_width - w
top_pad = random.randint(0, y_pad) if random_pad else y_pad // 2
left_pad = random.randint(0, x_pad) if random_pad else x_pad // 2
padding = (left_pad, top_pad, x_pad - left_pad, y_pad - top_pad)
assert all([x >= 0 for x in padding])
image = functional.pad(image, padding)
window = [left_pad, top_pad, w + left_pad, h + top_pad]
return image, window, scale, padding
| 5,335,271
|
def get_state(*names):
"""
Return a list of the values of the given state keys
Paramters
---------
*names : *str
List of name of state values to retreive
Returns
-------
[any, ...]
List of value matching the requested state property names
"""
_app = get_app_instance()
results = []
for name in names:
results.append(_app.get(name))
return results
| 5,335,272
|
def ver_datos_basicos(request, anexo_id):
"""
Visualización de los datos básicos de un anexo.
"""
anexo = __get_anexo(request, anexo_id)
parts = anexo.get_cue_parts()
return my_render(request, 'registro/anexo/ver_datos.html', {
'template': 'registro/anexo/ver_datos_basicos.html',
'anexo': anexo,
'page_title': 'Datos básicos',
'actual_page': 'datos_basicos',
'configuracion_solapas': ConfiguracionSolapasAnexo.get_instance(),
'datos_verificados': anexo.get_verificacion_datos().get_datos_verificados()
})
| 5,335,273
|
def fill_sections(source, sections):
"""
>>> fill_sections(\
' /* Begin User Code Section: foobar *//* End User Code Section: foobar */', {'foobar': 'barbaz'})
' /* Begin User Code Section: foobar */\\n barbaz\\n /* End User Code Section: foobar */'
"""
def repl(matches):
indent_amt = len(matches[1])
secname = matches[2]
return indent(create_section(secname, sections.get(secname, '') + '\n'), indent_amt)
return fill_section.sub(repl, source)
| 5,335,274
|
def CNOT(n):
"""CNOT gate on 2-Qubit system with control qubit = 0 and target qubit = 1"""
x=np.copy(I4)
t=np.copy(x[2,])
x[2,]=x[3,]
x[3,]=t
return x.dot(n)
| 5,335,275
|
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend_Mp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 5,335,276
|
def command_stop_submission_over_quota(syn, args):
"""
Sets an annotation on Synapse Docker submissions such that it will
be terminated by the orchestrator. Usually applies to submissions
that have been running for longer than the alloted time.
>>> challengeutils stop-submission-over-quota submission_viewid quota
"""
submission.stop_submission_over_quota(syn, args.submission_viewid, quota=args.quota)
| 5,335,277
|
def parse(s):
"""Parse a single string. This is just a convenience function."""
return pogo(parseSingleExpression(tokenize(s),
identity_cont))
| 5,335,278
|
def set_default_rio_config(aws=None, cloud_defaults=False, **kwargs):
""" Setup default configuration for rasterio/GDAL.
Doesn't actually activate one, just stores configuration for future
use from IO threads.
:param aws: Dictionary of options for rasterio.session.AWSSession
OR 'auto' -- session = rasterio.session.AWSSession()
:param cloud_defaults: When True inject settings for reading COGs
:param **kwargs: Passed on to rasterio.Env(..) constructor
"""
global _CFG # pylint: disable=global-statement
with _CFG_LOCK:
_CFG = SimpleNamespace(aws=aws,
cloud_defaults=cloud_defaults,
kwargs=kwargs,
epoch=_CFG.epoch + 1)
| 5,335,279
|
def link_match_check(row):
"""
Indicating that link is already in database
"""
all_objects = Post.objects.all()
try:
row_link = row.a["href"]
for object_founded in all_objects:
return row_link == object_founded.link
except TypeError:
return False
| 5,335,280
|
def update_safety_check(first_dict, second_dict, compat=operator.eq):
"""Check the safety of updating one dictionary with another.
Raises ValueError if dictionaries have non-compatible values for any key,
where compatibility is determined by identity (they are the same item) or
the `compat` function.
Parameters
----------
first_dict, second_dict : dict-like
All items in the second dictionary are checked against for conflicts
against items in the first dictionary.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equality.
"""
for k, v in second_dict.iteritems():
if (k in first_dict and
not (v is first_dict[k] or compat(v, first_dict[k]))):
raise ValueError('unsafe to merge dictionaries without '
'overriding values; conflicting key %r' % k)
| 5,335,281
|
def merge_s2_threshold(log_area, gap_thresholds):
"""Return gap threshold for log_area of the merged S2
with linear interpolation given the points in gap_thresholds
:param log_area: Log 10 area of the merged S2
:param gap_thresholds: tuple (n, 2) of fix points for interpolation
"""
for i, (a1, g1) in enumerate(gap_thresholds):
if log_area < a1:
if i == 0:
return g1
a0, g0 = gap_thresholds[i - 1]
return (log_area - a0) * (g1 - g0) / (a1 - a0) + g0
return gap_thresholds[-1][1]
| 5,335,282
|
def rename_indatabet_cols(df_orig):
"""
"""
df = df_orig.copy(deep=True)
odds_cols = {'odds_awin_pinn': 'awinOddsPinnIndatabet',
'odds_draw_pinn': 'drawOddsPinnIndatabet',
'odds_hwin_pinn': 'hwinOddsPinnIndatabet',
'odds_awin_bet365': 'awinOddsBet365Indatabet',
'odds_draw_bet365': 'drawOddsBet365Indatabet',
'odds_hwin_bet365': 'hwinOddsBet365Indatabet',
'odds_ftgoalso2.5_bet365': 'ftGoalsO2.5OddsBet365Indatabet',
'odds_ftgoalsu2.5_bet365': 'ftGoalsU2.5OddsBet365Indatabet',
'odds_ftgoalso2.5_pinn': 'ftGoalsO2.5OddsPinnIndatabet',
'odds_ftgoalsu2.5_pinn': 'ftGoalsU2.5OddsPinnIndatabet'}
df.rename(columns=odds_cols, inplace=True)
return df
| 5,335,283
|
def get_market_updates(symbols, special_tags):
"""
Get current yahoo quote.
'special_tags' is a list of tags. More info about tags can be found at
http://www.gummy-stuff.org/Yahoo-data.htm
Returns a DataFrame
"""
if isinstance(symbols, str):
sym_list = symbols
elif not isinstance(symbols, pd.Series):
symbols = pd.Series(symbols)
sym_list = str.join('+', symbols)
else:
sym_list = str.join('+', symbols)
# Symbol must be in the special_tags for now
if not 's' in special_tags:
special_tags.insert(0, 's')
request = ''.join(special_tags) # code request string
special_tag_names = [settings.YAHOO_SYMBOL_TAGS[x] for x in special_tags]
header = special_tag_names
data = dict(list(zip(
list(special_tag_names), [[] for i in range(len(special_tags))]
)))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (
sym_list, request)
try:
lines = urllib.request.urlopen(urlStr).readlines()
except Exception as e:
s = "Failed to download:\n{0}".format(e)
print(s)
return None
for line in lines:
fields = line.decode('utf-8').strip().split(',')
for i, field in enumerate(fields):
if field[-2:] == '%"':
data[header[i]].append(float(field.strip('"%')))
elif field[0] == '"':
data[header[i]].append(field.strip('"'))
else:
try:
data[header[i]].append(float(field))
except ValueError:
data[header[i]].append(np.nan)
idx = data.pop('Symbol')
return pd.DataFrame(data, index=idx)
| 5,335,284
|
def HLRBRep_SurfaceTool_Torus(*args):
"""
:param S:
:type S: Standard_Address
:rtype: gp_Torus
"""
return _HLRBRep.HLRBRep_SurfaceTool_Torus(*args)
| 5,335,285
|
def load_augmentation_class():
"""
Loads the user augmentation class.
Similar in spirit to django.contrib.auth.load_backend
"""
try:
class_name = AUTH.USER_AUGMENTOR.get()
i = class_name.rfind('.')
module, attr = class_name[:i], class_name[i + 1:]
mod = import_module(module)
klass = getattr(mod, attr)
LOG.info("Augmenting users with class: %s" % (klass,))
return klass
except:
LOG.exception('failed to augment class')
raise ImproperlyConfigured("Could not find user_augmentation_class: %s" % (class_name,))
| 5,335,286
|
def FieldTypeFor(descriptor, field_desc, nullable):
"""Returns the Javascript type for a given field descriptor.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: A field descriptor for a particular field in a message.
nullable: Whether or not the value may be null.
Returns:
The Javascript type for the given field descriptor.
"""
element_type = {
descriptor.FieldDescriptor.TYPE_DOUBLE: lambda: 'number',
descriptor.FieldDescriptor.TYPE_INT32: lambda: 'number',
descriptor.FieldDescriptor.TYPE_BOOL: lambda: 'boolean',
descriptor.FieldDescriptor.TYPE_STRING: lambda: 'string',
descriptor.FieldDescriptor.TYPE_ENUM: (
lambda: field_desc.enum_type.full_name),
descriptor.FieldDescriptor.TYPE_MESSAGE: (
lambda: field_desc.message_type.full_name),
}[field_desc.type]()
# However, if the field is actually a reference to a tagspec name (string),
# make it a number instead as we'll be replacing this with the tagspec id.
if field_desc.full_name in TAG_NAME_REFERENCE_FIELD:
element_type = 'number'
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if nullable:
return 'Array<!%s>' % element_type
return '!Array<!%s>' % element_type
if nullable:
return '?%s' % element_type
return '%s' % element_type
| 5,335,287
|
def unadmin(bot, input):
"""Removes person from admins list, owner only"""
if not input.owner: return False
bot.config.set_del('admins',input.group(2).lower())
bot.reply("Unadmin'd {0}".format(input.group(2)))
| 5,335,288
|
def arrange_and_plot(wholedict, ptitle):
"""
Organize and plot data.
Parameters
----------
wholedict : OrderedDict
dictionary of information to be plotted, ordered by input file
required keys are 'ftitle' 'titleMols' 'rmsds'
ptitle : string
Title on plot
"""
# fill in dictionary for ref file for list comprehension later
wholedict[0]['titleMols'] = np.full(len(wholedict[1]['titleMols']), np.nan)
wholedict[0]['rmsds'] = np.full(len(wholedict[1]['rmsds']), np.nan)
# extract part of dictionary using list comprehension
subset = np.array([[(wholedict[fd][key]) for key in\
('ftitle','titleMols','rmsds')] for fd in list(wholedict.keys())[1:]], dtype=object).T
# build plot list
plotlist = []
for m in range(len(wholedict[1]['titleMols'])):
for f in range(len(wholedict) - 1):
temp = []
#temp.append(subset[0][f].split('/')[-1].split('.')[0])
temp.append(subset[0][f])
temp.append(subset[1][f][m])
temp.append(subset[2][f][m])
plotlist.append(temp)
plotlist = np.array(plotlist)
# generate plot
fig = plt.figure()
ax = fig.add_subplot(111)
barplot(ax, plotlist, ptitle)
plt.savefig('barchart.png', bbox_inches='tight')
plt.show()
| 5,335,289
|
def addMySymbol(command, sortedSymbols, mySymbols):
""" addMySymbol handles add symbol command. """
expression = command.strip().lower()
words = expression.split()
if (words[1].isdigit() and (int(words[1]) <= (len(sortedSymbols) - 1))):
symbol = sortedSymbols[int(words[1]) - 1].upper()
mySymbols[symbol] = getSymbol(symbol)
elif (words[1].isalpha() and words[1].upper() in sortedSymbols):
symbol = words[1].upper()
mySymbols[symbol] = getSymbol(symbol)
# Special handling when there are no pre-loaded symbols
elif (words[1].isalpha() and len(sortedSymbols) == 0):
symbol = words[1].upper()
mySymbols[symbol] = getSymbol(symbol)
| 5,335,290
|
def sim_beta_ratio(table, threshold, prior_strength, hyperparam, N,
return_bayes=False):
"""
Calculates simulated ratios of match probabilites using a beta
distribution and returns corresponding means and 95% credible
intervals, posterior parameters, Bayes factor
Parameters
------------
table : 2x2 numpy array
corresponds to contingency table,
for example,
False True
GroupA 5 4
GroupB 3 4
contains frequency counts: [[5, 4], [3, 4]]
threshold : float
value to split continuous variable on
prior_strength : string from {'weak', 'strong', 'uniform'}
prior distribution to be 'informative'/'noninformative'/'uniform'
N : int
number of posterior samples to draw for each simulation
Returns
------------
list : means and 95% credible intervals, posterior parameters, Bayes factor
"""
n_sim = N
# store array of total counts in table by category
category_counts = table.sum(axis=1, dtype=float)
# store array of number of matches by categories
match_counts = table[:, 1]
# set hyperparameters according to threshold and sample size
if prior_strength == 'weak':
# weakly informative prior, has standard deviation
# of 0.1 at alpha / (alpha + beta) = 0.5
# coefficient 24 is empirically derived for best smoothing at small N
alpha1, beta1 = (1 - threshold) * 24., threshold * 24.
alpha2, beta2 = (1 - threshold) * 24., threshold * 24.
elif prior_strength == 'strong':
# observing 'idealized' dataset of size n
alpha1 = round((1 - threshold) * category_counts[0])
beta1 = round(threshold * category_counts[0])
alpha2 = round((1 - threshold) * category_counts[1])
beta2 = round(threshold * category_counts[1])
elif prior_strength == 'uniform':
# uniform prior
alpha1, beta1 = 1, 1
alpha2, beta2 = 1, 1
else:
# user specified, defaults to uniform
alpha1, beta1, alpha2, beta2 = hyperparam
# draw posterior sample of matching probabilities
post_alpha1 = alpha1 + match_counts[0]
post_beta1 = beta1 + category_counts[0] - match_counts[0]
post_alpha2 = alpha2 + match_counts[1]
post_beta2 = beta2 + category_counts[1] - match_counts[1]
p1 = np.random.beta(post_alpha1, post_beta1, n_sim)
p2 = np.random.beta(post_alpha2, post_beta2, n_sim)
# posterior draw of ratios
p1p2 = p1 / p2
p2p1 = p2 / p1
sim_beta_ratio_metrics = [np.mean(p1p2), np.mean(p2p1),
np.std(p1p2), np.std(p2p1),
np.percentile(p1p2, 2.5),
np.percentile(p2p1, 2.5),
np.percentile(p1p2, 97.5),
np.percentile(p2p1, 97.5),
(post_alpha1, post_beta1),
(post_alpha2, post_beta2)]
if return_bayes:
# Return bayes factor for % of posterior ratios in range [.8, 1.25]
post_prob_null = np.sum((p1p2 >= 0.8) & (p1p2 <= 1.25)) / float(n_sim)
bayes_factor = post_prob_null / (1 - post_prob_null)
sim_beta_ratio_metrics.append(bayes_factor)
return sim_beta_ratio_metrics
| 5,335,291
|
def force_remove(*paths):
"""Remove files without printing errors. Like ``rm -f``, does NOT
remove directories."""
for path in paths:
try:
os.remove(path)
except OSError:
pass
| 5,335,292
|
def write_mm_atom_properties_txt(cgmodel,list_of_atoms_to_add):
"""
Given a cgmodel and a 'list_of_atoms_to_add', this function adds the atoms to 'mm_atom_properties.txt'.
Parameters
----------
cgmodel: CGModel() class object
list_of_atoms_to_add: List of atom types to write to 'mm_atom_properties.txt'
List([ strings ])
"""
mm_atom_type_sets_directory = str(str(pyrosetta_database_path)+"mm_atom_type_sets/coarse_grain")
if not os.path.exists(mm_atom_type_sets_directory): os.mkdir(mm_atom_type_sets_directory)
atom_properties_file = str(str(atom_type_sets_directory)+"/mm_atom_properties.txt")
if os.path.exists(atom_properties_file):
existing_mm_atom_types = get_existing_mm_atom_types(atom_properties_file)
file_obj = open(atom_properties_file,'a')
for atom_type in list_of_atoms_to_add:
if residue_type in existing_residue_types:
print("WARNING: found an existing atom type with the same name in:\n")
print(str(str(atom_properties_file)+"\n"))
print("Removing the existing atom type definition from 'atom_properties.txt'")
remove_existing_residue_types(file_name,[residue_type])
else:
file_obj = open(atom_properties_file,'w')
file_obj.write("NAME ATOM LJ_RADIUS LJ_WDEPTH LK_DGFREE LK_LAMBDA LK_VOLUME\n")
file_obj.write("## Coarse grained residue types to follow\n")
for atom_type in atom_type_list:
if len(atom_type) > 4:
print("ERROR: an atom type with a name longer than 4 characters has been defined for this model.\n")
print("PyRosetta syntax requires that all atom types have names with for characters or less.")
exit()
particle_type = get_particle_type(cgmodel,particle_index=-1,particle_name=atom_type)
sigma = get_sigma(cgmodel,particle_index=-1,particle_type=particle_type)
epsilon = get_epsilon(cgmodel,particle_index=-1,particle_type=particle_type)
lk_dgfree = 0.0000
lk_lambda = 3.5000
lk_volume = 0.0000
symbol = 'X'
comments = ""
line_list = [atom_type,symbol,sigma,epsilon,lk_dgfree,lk_lambda,lk_volume,comments]
line = '%4s %1s %9s %9s %9s %6s %9s %s' % (line_list[i] for i in len(line_list))
file_obj.write(line)
file_obj.close()
return
| 5,335,293
|
def dump_yaml_and_check_difference(obj, filename, sort_keys=False):
"""Dump object to a yaml file, and check if the file content is different
from the original.
Args:
obj (any): The python object to be dumped.
filename (str): YAML filename to dump the object to.
sort_keys (str); Sort key by dictionary order.
Returns:
Bool: If the target YAML file is different from the original.
"""
str_dump = mmcv.dump(obj, None, file_format='yaml', sort_keys=sort_keys)
if osp.isfile(filename):
file_exists = True
with open(filename, 'r', encoding='utf-8') as f:
str_orig = f.read()
else:
file_exists = False
str_orig = None
if file_exists and str_orig == str_dump:
is_different = False
else:
is_different = True
with open(filename, 'w', encoding='utf-8') as f:
f.write(str_dump)
return is_different
| 5,335,294
|
def get_data(
db: Redis[bytes],
store: StorageEngine,
source: Artefact[T],
carry_error: Optional[hash_t] = None,
do_resolve_link: bool = True,
) -> Result[T]:
"""Retrieve data corresponding to an artefact."""
stream = get_stream(db, store, source.hash, carry_error, do_resolve_link)
if isinstance(stream, Error):
return stream
else:
raw = stream.read()
stream.close()
return _serdes.decode(source.kind, raw, carry_error=carry_error)
| 5,335,295
|
def record_edit(request, pk):
"""拜访记录修改"""
user = request.session.get('user_id')
record = get_object_or_404(Record, pk=pk, user=user, is_valid=True)
if request.method == 'POST':
form = RecordForm(data=request.POST, instance=record)
if form.is_valid():
form.save()
return redirect('record')
else:
print(form.errors.as_json)
else:
form = RecordForm(instance=record)
return render(request, 'record_edit.html', {
'form': form,
'pk': pk
})
| 5,335,296
|
def remove_body_footer(raw):
"""
Remove a specific body footer starting with the delimiter : -=-=-=-=-=-=-=-=-=-=-=-
"""
body = raw[MELUSINE_COLS[0]]
return body.replace(r'-=-=-=-=.*?$', '')
| 5,335,297
|
def get_strategy_name():
"""Return strategy module name."""
return 'store_type'
| 5,335,298
|
def pyccel_to_sympy(expr, symbol_map, used_names):
"""
Convert a pyccel expression to a sympy expression saving any pyccel objects
converted to sympy symbols in a dictionary to allow the reverse conversion
to be carried out later
Parameters
----------
expr : PyccelAstNode
The pyccel node to be translated
symbol_map : dict
Dictionary containing any pyccel objects converted to sympy symbols
used_names : Set
A set of all the names which already exist and therefore cannot
be used to create new symbols
Returns
----------
expr : sympy Object
"""
#Constants
if isinstance(expr, LiteralInteger):
return sp.Integer(expr.p)
elif isinstance(expr, LiteralFloat):
return sp.Float(expr)
#Operators
elif isinstance(expr, PyccelDiv):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return args[0] / args[1]
elif isinstance(expr, PyccelMul):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return sp.Mul(*args)
elif isinstance(expr, PyccelMinus):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return args[0] - args[1]
elif isinstance(expr, PyccelUnarySub):
arg = pyccel_to_sympy(expr.args[0], symbol_map, used_names)
return -arg
elif isinstance(expr, PyccelAdd):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return sp.Add(*args)
elif isinstance(expr, PyccelPow):
args = [pyccel_to_sympy(e, symbol_map, used_names) for e in expr.args]
return sp.Pow(*args)
elif isinstance(expr, PyccelAssociativeParenthesis):
return pyccel_to_sympy(expr.args[0], symbol_map, used_names)
elif isinstance(expr, MathCeil):
return sp.ceiling(pyccel_to_sympy(expr.args[0], symbol_map, used_names))
elif expr in symbol_map.values():
return list(symbol_map.keys())[list(symbol_map.values()).index(expr)]
elif isinstance(expr, Variable):
sym = sp.Symbol(expr.name)
symbol_map[sym] = expr
return sym
elif isinstance(expr, PyccelArraySize):
sym_name,_ = create_incremented_string(used_names, prefix = 'tmp_size')
sym = sp.Symbol(sym_name)
symbol_map[sym] = expr
return sym
elif isinstance(expr, CodeBlock):
body = (pyccel_to_sympy(b, symbol_map, used_names) for b in expr.body)
return CodeBlock(body)
elif isinstance(expr, (Comment)):
return Comment('')
elif isinstance(expr, For):
target = pyccel_to_sympy(expr.target, symbol_map, used_names)
iter_obj = pyccel_to_sympy(expr.iterable, symbol_map, used_names)
body = pyccel_to_sympy(expr.body, symbol_map, used_names)
return For(target, iter_obj, body)
elif isinstance(expr, PythonRange):
start = pyccel_to_sympy(expr.start, symbol_map, used_names)
stop = pyccel_to_sympy(expr.stop , symbol_map, used_names)
step = pyccel_to_sympy(expr.step , symbol_map, used_names)
return sp.Range(start, stop, step)
elif isinstance(expr, Assign):
lhs = pyccel_to_sympy(expr.lhs, symbol_map, used_names)
rhs = pyccel_to_sympy(expr.rhs, symbol_map, used_names)
return Assign(lhs, rhs)
elif isinstance(expr, (sp.core.basic.Atom, sp.core.operations.AssocOp, sp.Set)):
# Already translated
return expr
else:
raise TypeError(str(type(expr)))
| 5,335,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.