content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def add_campaign_data(campaign_data):
"""Adds CampaignData type to datastore
Args:
campaign_data:
The CampaignData object that is prepared and added to datastore
as CampaignData entity
Returns:
None
"""
campaign_entity = convert_campaign_to_entity(campaign_data)
put_entity_in_datastore(campaign_entity) | 26,700 |
def deploy(cluster_name, hub_name, config_path):
"""
Deploy one or more hubs in a given cluster
"""
validate_cluster_config(cluster_name)
validate_hub_config(cluster_name, hub_name)
assert_single_auth_method_enabled(cluster_name, hub_name)
with get_decrypted_file(config_path) as decrypted_file_path:
with open(decrypted_file_path) as f:
config = yaml.load(f)
# Most of our hubs use Auth0 for Authentication. This lets us programmatically
# determine what auth provider each hub uses - GitHub, Google, etc. Without
# this, we'd have to manually generate credentials for each hub - and we
# don't want to do that. Auth0 domains are tied to a account, and
# this is our auth0 domain for the paid account that 2i2c has.
auth0 = config["auth0"]
k = KeyProvider(auth0["domain"], auth0["client_id"], auth0["client_secret"])
# Each hub needs a unique proxy.secretToken. However, we don't want
# to manually generate & save it. We also don't want it to change with
# each deploy - that causes a pod restart with downtime. So instead,
# we generate it based on a single secret key (`PROXY_SECRET_KEY`)
# combined with the name of each hub. This way, we get unique,
# cryptographically secure proxy.secretTokens without having to
# keep much state. We can rotate them by changing `PROXY_SECRET_KEY`.
# However, if `PROXY_SECRET_KEY` leaks, that means all the hub's
# proxy.secretTokens have leaked. So let's be careful with that!
SECRET_KEY = bytes.fromhex(config["secret_key"])
config_file_path = find_absolute_path_to_cluster_file(cluster_name)
with open(config_file_path) as f:
cluster = Cluster(yaml.load(f), config_file_path.parent)
with cluster.auth():
hubs = cluster.hubs
if hub_name:
hub = next((hub for hub in hubs if hub.spec["name"] == hub_name), None)
print_colour(f"Deploying hub {hub.spec['name']}...")
hub.deploy(k, SECRET_KEY)
else:
for i, hub in enumerate(hubs):
print_colour(
f"{i+1} / {len(hubs)}: Deploying hub {hub.spec['name']}..."
)
hub.deploy(k, SECRET_KEY) | 26,701 |
def split_by_rank(faf, ranks, outdir, verbose=False):
"""
Split the fasta file
:param faf: fasta file
:param ranks: dict of taxid and rank
:param outdir: output directory
:param verbose: more output
:return:
"""
s = re.compile('TaxID=(\d+)')
if args.v:
sys.stderr.write(f"{colours.GREEN}Splitting {faf}{colours.ENDC}\n")
fhs = {}
for seqid, seq in stream_fasta(faf):
m = s.search(seqid)
rnk = "root"
if m:
tid = m.groups()[0]
if tid in ranks:
rnk = ranks[tid]
else:
sys.stderr.write(f"{colours.RED}ERROR: No taxonomy in {seqid}{colours.ENDC}\n")
if rnk not in fhs:
fhs[rnk] = open(os.path.join(outdir, rnk + ".fasta"), 'w')
fhs[rnk].write(f">{seqid}\n{seq}\n")
for fh in fhs:
fhs[fh].close() | 26,702 |
def auth_optional(request):
"""
view method for path '/sso/auth_optional'
Return
200 reponse: authenticated and authorized
204 response: not authenticated
403 reponse: authenticated,but not authorized
"""
res = _auth(request)
if res:
#authenticated, but can be authorized or not authorized
return res
else:
#not authenticated
return AUTH_NOT_REQUIRED_RESPONSE | 26,703 |
def main():
"""docstring for main"""
#set system default encoding to utf-8 to avoid encoding problems
reload(sys)
sys.setdefaultencoding( "utf-8" )
#load channel configurations
channels = json.load(open('conf/channel.json'))
#find one account
rr = SNSPocket()
for c in channels:
rr.add_channel(c)
if rr is None:
print "cannot find one renren platform in channel.json"
return
else:
rr.load_config()
rr.auth()
#load record to avoid repeated reply
try:
sIDs = json.load(open('statusID.json'))
except IOError, e:
if e.errno == 2: #no such file
sIDs = {}
else:
raise e
status_list = rr.home_timeline(NEWS_QUERY_COUNT)
for s in status_list:
s.show()
msg_string = "".join( unicode(x) for x in \
[s.parsed.time, s.ID, s.parsed.username, \
s.parsed.userid, s.parsed.text])
sig = hashlib.sha1(msg_string.encode('utf-8')).hexdigest()
if not sig in sIDs and can_reply(s):
print '[reply it]'
REPLY_STRING = translate(get_word(s.parsed.text))
ret = rr.reply(s.ID, REPLY_STRING.decode('utf-8'))
print "[ret: %s]" % ret
print "[wait for %d seconds]" % REPLY_GAP
time.sleep(REPLY_GAP)
if ret:
sIDs[sig] = msg_string
else:
print '[no reply]'
#save reply record
json.dump(sIDs, open('statusID.json', 'w')) | 26,704 |
def is_pip_main_available():
"""Return if the main pip function is available. Call get_pip_main before calling this function."""
return PIP_MAIN_FUNC is not None | 26,705 |
def energy_target(flattened_bbox_targets, pos_bbox_targets,
pos_indices, r, max_energy):
"""Calculate energy targets based on deep watershed paper.
Args:
flattened_bbox_targets (torch.Tensor): The flattened bbox targets.
pos_bbox_targets (torch.Tensor): Bounding box lrtb values only for
positions within the bounding box. We use this as an argument
to prevent recalculating it since it is used for other things as
well.
pos_indices (torch.Tensor): The indices of values in
flattened_bbox_targets which are within a bounding box
max_energy (int): Max energy level possible.
Notes:
The energy targets are calculated as:
E_max \cdot argmax_{c \in C}[1 - \sqrt{((l-r)/2)^2 + ((t-b) / 2)^2}
/ r]
- r is a hyperparameter we would like to minimize.
- (l-r)/2 is the horizontal distance to the center and will be
assigned the variable name "horizontal"
- (t-b)/2 is the vertical distance to the center and will be
assigned the variable name "vertical"
- E_max is self.max_energy
- We don't need the argmax in this code implementation since we
already select the bounding boxes and their respective pixels in
a previous step.
Returns:
tuple: A 2 tuple with values ("pos_energies_targets",
"energies_targets"). Both are flattened but pos_energies_targets
only contains values within bounding boxes.
"""
horizontal = pos_bbox_targets[:, 0] - pos_bbox_targets[:, 2]
vertical = pos_bbox_targets[:, 1] - pos_bbox_targets[:, 3]
# print("Horizontals: {}".format(horizontal))
# print("Verticals: {}".format(vertical))
horizontal = torch.div(horizontal, 2)
vertical = torch.div(vertical, 2)
c2 = (horizontal * horizontal) + (vertical * vertical)
# print("c2: \n{}".format(c2))
# We use x * x instead of x.pow(2) since it's faster by about 30%
square_root = torch.sqrt(c2)
# print("Sqrt: \n{}".format(square_root))
type_dict = {'dtype': square_root.dtype,
'device': square_root.device}
pos_energies = (torch.tensor([1], **type_dict)
- torch.div(square_root, r))
pos_energies *= max_energy
pos_energies = torch.max(pos_energies,
torch.tensor([0], **type_dict))
pos_energies = pos_energies.floor()
energies_targets = torch.zeros(flattened_bbox_targets.shape[0],
**type_dict)
energies_targets[pos_indices] = pos_energies
# torch.set_printoptions(profile='full')
# print("Energy targets: \n {}".format(pos_energies))
# torch.set_printoptions(profile='default')
# input()
return pos_energies, energies_targets | 26,706 |
def dashtable(df):
"""
Convert df to appropriate format for dash datatable
PARAMETERS
----------
df: pd.DataFrame,
OUTPUT
----------
dash_cols: list containg columns for dashtable
df: dataframe for dashtable
drop_dict: dict containg dropdown list for dashtable
"""
dash_cols = [] # create dashtable column names
for x in df.columns :
temp_dict = {'name':x,'id':x}
if x in dropdown_cols:
temp_dict.update({'presentation': 'dropdown'})
# append to list
dash_cols.append(temp_dict)
# get dropdown contents for each column
drop_dict = {}
for i in range(len(dropdown_cols)): # loop through dropdown columns
drop_list = []
for x in drop_options[i]: # loop through column elements
drop_list.append({'label': x, 'value': x})
drop_dict.update({dropdown_cols[i]:{'options': drop_list, 'clearable':False}}) # append to dict
return dash_cols, df, drop_dict | 26,707 |
def validate_gradient():
"""
Function to validate the implementation of gradient computation.
Should be used together with gradient_check.py.
This is a useful thing to do when you implement your own gradient
calculation methods.
It is not required for this assignment.
"""
from gradient_check import eval_numerical_gradient, rel_error
# randomly initialize W
dim = 4
num_classes = 4
num_inputs = 5
params = {}
std = 0.001
params['W1'] = std * np.random.randn(dim, 10)
params['b1'] = np.zeros(10)
params['W2'] = std * np.random.randn(10, num_classes)
params['b2'] = np.zeros(num_classes)
X = np.random.randn(num_inputs, dim)
y = np.array([0, 1, 2, 2, 1])
loss, grads = compute_neural_net_loss(params, X, y, reg=0.1)
# these should all be less than 1e-8 or so
for param_name in params:
f = lambda W: compute_neural_net_loss(params, X, y, reg=0.1)[0]
param_grad_num = eval_numerical_gradient(f, params[param_name], verbose=False)
print('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))) | 26,708 |
def other_identifiers_to_metax(identifiers_list):
"""Convert other identifiers to comply with Metax schema.
Arguments:
identifiers_list (list): List of other identifiers from frontend.
Returns:
list: List of other identifiers that comply to Metax schema.
"""
other_identifiers = []
for identifier in identifiers_list:
id_dict = {}
id_dict["notation"] = identifier
other_identifiers.append(id_dict)
return other_identifiers | 26,709 |
def print_formatted_table(delimiter):
"""Read tabular data from standard input and print a table."""
data = []
for line in sys.stdin:
line = line.rstrip()
data.append(line.split(delimiter))
print(format_table(data)) | 26,710 |
def enforce_excel_cell_string_limit(long_string, limit):
"""
Trims a long string. This function aims to address a limitation of CSV
files, where very long strings which exceed the char cell limit of Excel
cause weird artifacts to happen when saving to CSV.
"""
trimmed_string = ''
if limit <= 3:
limit = 4
if len(long_string) > limit:
trimmed_string = (long_string[:(limit-3)] + '...')
return trimmed_string
else:
return long_string | 26,711 |
def gaussian_blur(image: np.ndarray, sigma_min: float, sigma_max: float) -> np.ndarray:
"""
Blurs an image using a Gaussian filter.
Args:
image: Input image array.
sigma_min: Lower bound of Gaussian kernel standard deviation range.
sigma_max: Upper bound of Gaussian kernel standard deviation range.
Returns:
Blurred image array.
"""
sigma_value = np.random.uniform(sigma_min, sigma_max)
return cv2.GaussianBlur(image, (0, 0), sigma_value) | 26,712 |
def get_number_of_pcs_in_pool(pool):
"""
Retrun number of pcs in a pool
"""
pc_count = Computer.objects.filter(pool=pool).count()
return pc_count | 26,713 |
def save_images(scene_list, video_manager, num_images=3, frame_margin=1,
image_extension='jpg', encoder_param=95,
image_name_template='$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER',
output_dir=None, downscale_factor=1, show_progress=False,
scale=None, height=None, width=None):
# type: (List[Tuple[FrameTimecode, FrameTimecode]], VideoManager,
# Optional[int], Optional[int], Optional[str], Optional[int],
# Optional[str], Optional[str], Optional[int], Optional[bool],
# Optional[float], Optional[int], Optional[int])
# -> Dict[List[str]]
""" Saves a set number of images from each scene, given a list of scenes
and the associated video/frame source.
Arguments:
scene_list: A list of scenes (pairs of FrameTimecode objects) returned
from calling a SceneManager's detect_scenes() method.
video_manager: A VideoManager object corresponding to the scene list.
Note that the video will be closed/re-opened and seeked through.
num_images: Number of images to generate for each scene. Minimum is 1.
frame_margin: Number of frames to pad each scene around the beginning
and end (e.g. moves the first/last image into the scene by N frames).
Can set to 0, but will result in some video files failing to extract
the very last frame.
image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp').
encoder_param: Quality/compression efficiency, based on type of image:
'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp.
'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode.
image_name_template: Template to use when creating the images on disk. Can
use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image
extension is applied automatically as per the argument image_extension.
output_dir: Directory to output the images into. If not set, the output
is created in the working directory.
downscale_factor: Integer factor to downscale images by. No filtering
is currently done, only downsampling (thus requiring an integer).
show_progress: If True, shows a progress bar if tqdm is installed.
scale: Optional factor by which to rescale saved images.A scaling factor of 1 would
not result in rescaling. A value <1 results in a smaller saved image, while a
value >1 results in an image larger than the original. This value is ignored if
either the height or width values are specified.
height: Optional value for the height of the saved images. Specifying both the height
and width will resize images to an exact size, regardless of aspect ratio.
Specifying only height will rescale the image to that number of pixels in height
while preserving the aspect ratio.
width: Optional value for the width of the saved images. Specifying both the width
and height will resize images to an exact size, regardless of aspect ratio.
Specifying only width will rescale the image to that number of pixels wide
while preserving the aspect ratio.
Returns:
Dict[List[str]]: Dictionary of the format { scene_num : [image_paths] },
where scene_num is the number of the scene in scene_list (starting from 1),
and image_paths is a list of the paths to the newly saved/created images.
Raises:
ValueError: Raised if any arguments are invalid or out of range (e.g.
if num_images is negative).
"""
if not scene_list:
return {}
if num_images <= 0 or frame_margin < 0:
raise ValueError()
# TODO: Validate that encoder_param is within the proper range.
# Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png.
imwrite_param = [get_cv2_imwrite_params()[image_extension],
encoder_param] if encoder_param is not None else []
video_name = video_manager.get_video_name()
# Reset video manager and downscale factor.
video_manager.release()
video_manager.reset()
video_manager.set_downscale_factor(downscale_factor)
video_manager.start()
# Setup flags and init progress bar if available.
completed = True
logger.info('Generating output images (%d per scene)...', num_images)
progress_bar = None
if show_progress and tqdm:
progress_bar = tqdm(
total=len(scene_list) * num_images,
unit='images',
dynamic_ncols=True)
filename_template = Template(image_name_template)
scene_num_format = '%0'
scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'
image_num_format = '%0'
image_num_format += str(math.floor(math.log(num_images, 10)) + 2) + 'd'
framerate = scene_list[0][0].framerate
timecode_list = [
[
FrameTimecode(int(f), fps=framerate) for f in [
# middle frames
a[len(a)//2] if (0 < j < num_images-1) or num_images == 1
# first frame
else min(a[0] + frame_margin, a[-1]) if j == 0
# last frame
else max(a[-1] - frame_margin, a[0])
# for each evenly-split array of frames in the scene list
for j, a in enumerate(np.array_split(r, num_images))
]
]
for i, r in enumerate([
# pad ranges to number of images
r
if 1+r[-1]-r[0] >= num_images
else list(r) + [r[-1]] * (num_images - len(r))
# create range of frames in scene
for r in (
range(start.get_frames(), end.get_frames())
# for each scene in scene list
for start, end in scene_list
)
])
]
image_filenames = {i: [] for i in range(len(timecode_list))}
aspect_ratio = get_aspect_ratio(video_manager)
if abs(aspect_ratio - 1.0) < 0.01:
aspect_ratio = None
for i, scene_timecodes in enumerate(timecode_list):
for j, image_timecode in enumerate(scene_timecodes):
video_manager.seek(image_timecode)
ret_val, frame_im = video_manager.read()
if ret_val:
file_path = '%s.%s' % (
filename_template.safe_substitute(
VIDEO_NAME=video_name,
SCENE_NUMBER=scene_num_format % (i + 1),
IMAGE_NUMBER=image_num_format % (j + 1),
FRAME_NUMBER=image_timecode.get_frames()),
image_extension)
image_filenames[i].append(file_path)
if aspect_ratio is not None:
frame_im = cv2.resize(
frame_im, (0, 0), fx=aspect_ratio, fy=1.0,
interpolation=cv2.INTER_CUBIC)
# Get frame dimensions prior to resizing or scaling
frame_height = frame_im.shape[0]
frame_width = frame_im.shape[1]
# Figure out what kind of resizing needs to be done
if height and width:
frame_im = cv2.resize(
frame_im, (width, height), interpolation=cv2.INTER_CUBIC)
elif height and not width:
factor = height / float(frame_height)
width = int(factor * frame_width)
frame_im = cv2.resize(
frame_im, (width, height), interpolation=cv2.INTER_CUBIC)
elif width and not height:
factor = width / float(frame_width)
height = int(factor * frame_height)
frame_im = cv2.resize(
frame_im, (width, height), interpolation=cv2.INTER_CUBIC)
elif scale:
frame_im = cv2.resize(
frame_im, (0, 0), fx=scale, fy=scale,
interpolation=cv2.INTER_CUBIC)
cv2.imwrite(
get_and_create_path(file_path, output_dir),
frame_im, imwrite_param)
else:
completed = False
break
if progress_bar:
progress_bar.update(1)
if not completed:
logger.error('Could not generate all output images.')
return image_filenames | 26,714 |
def get_used_http_ports() -> List[int]:
"""Returns list of ports, used by http servers in existing configs."""
return [rc.http_port for rc in get_run_configs().values()] | 26,715 |
def test_show_environment(session):
"""Session.show_environment() returns dict."""
_vars = session.show_environment()
assert isinstance(_vars, dict) | 26,716 |
def get_img_from_fig(fig, dpi=180, color_cvt_flag=cv2.COLOR_BGR2RGB) -> np.ndarray:
"""Make numpy array from mpl fig
Parameters
----------
fig : plt.Figure
Matplotlib figure, usually the result of plt.imshow()
dpi : int, optional
Dots per inches of the image to save. Note, that default matplotlib
figsize is given in inches. Example: px, py = w * dpi, h * dpi pixels
6.4 inches * 100 dpi = 640 pixels, by default 180
color_cvt_flag : int, optional
OpenCV cvtColor flag. to get grayscale image,
use `cv2.COLOR_BGR2GRAY`, by default `cv2.COLOR_BGR2RGB`.
Returns
-------
np.ndarray[np.uint8]
Image array
"""
with io.BytesIO() as buffer:
fig.savefig(buffer, format="png", dpi=dpi)
buffer.seek(0)
img_arr = np.frombuffer(buffer.getvalue(), dtype=np.uint8)
return cv2.cvtColor(cv2.imdecode(img_arr, 1), color_cvt_flag) | 26,717 |
def split_data_by_target(data, target, num_data_per_target):
"""
Args:
data: np.array [num_data, *data_dims]
target: np.array [num_data, num_targets]
target[i] is a one hot
num_data_per_target: int
Returns:
result_data: np.array [num_data_per_target * num_targets, *data_dims]
result_target: np.array
[num_data_per_target * num_targets, num_targets]
"""
num_unique_targets = len(np.unique(target, axis=0))
target_numeric = np.dot(target, np.arange(num_unique_targets))
result_data = []
result_target = []
for target_id in range(num_unique_targets):
result_data.append(data[target_numeric == target_id][:num_data_per_target])
result_target.append(target[target_numeric == target_id][:num_data_per_target])
return np.concatenate(result_data), np.concatenate(result_target) | 26,718 |
def test_sa_new_list_odd(test_sa):
"""
function to ensure empty list to populate is available
"""
test_sa.insert_shift_array(test_sa.test_list, test_sa.test_num, 2.5)
assert test_sa.zero == 6
assert test_sa.new_list == [3, 4, 5, 6, 7, 8] | 26,719 |
def top_mutations(mutated_scores, initial_score, top_results=10):
"""Generate list of n mutations that improve localization probability
Takes in the pd.DataFrame of predictions for mutated sequences and the
probability of the initial sequence. After substracting the initial value
from the values of the mutations, it generates a list of the mutations
that increase the probability that the protein is localized at the target
localization. The number of mutations returned is determined with the
top_results variable, which defaults to 10. Note that if there are not so
many beneficial mutations as indicated in top_results, the returned list is
shorter to avoid returning mutations that would decrease the probability of
being localized at the target localization. This means that if all
mutations are detrimental, the function returns an empty pd.DataFrame.
The returned mutations are sorted from larger increase to smaller increase
and include information about the amino acid position, the original
residue at that position, the mutation, the improvement with respect to
initial_score and the final probability of the sequence with that mutation.
Args:
mutated_scores: a pd.DataFrame with the probability predicted by the
model for each mutation (rows) at each position (columns).
initial_score: a float representing the probability predicted by the
model for the initial sequence.
top_results: an integer indicating the number of mutations to return.
Returns:
top_res: a pd.DataFrame with the mutations that improve the
probability that a protein is localized at the target localization,
showing position, mutation and improvement with respect to the
original score.
"""
# check if top_results is an integer
if type(top_results) != int:
raise TypeError("top results should be an integer")
else:
pass
# get the increase or decrease in probability of mutations compared to the
# initial_score of the original sequence
prob_change = mutated_scores - initial_score
# prepare data frame for results
top_res = pd.DataFrame(
columns=["Position", "Mutation", "Prob_increase", "Target_probability"]
)
i = 0
# initialize at max value so that it enters the loop
pred_increase = 1
# get best mutations until reaching top_results or mutations that do
# not improve the probability
while i < top_results and pred_increase > 0:
# get column with maximum value
position_mut = prob_change.max().idxmax()
# get row with maximum value
mutation = prob_change.idxmax()[position_mut]
# get increase and localization probability of the sequence with the
# mutation of interest
pred_increase = prob_change.loc[mutation, position_mut]
prob_value = mutated_scores.loc[mutation, position_mut]
# change it for nan so that we can look for next worse mutation at the
# next iteration
prob_change.loc[mutation, position_mut] = np.nan
# append to results
mut_series = pd.Series(
{
"Position": position_mut,
"Mutation": mutation,
"Prob_increase": pred_increase,
"Target_probability": prob_value,
}
)
top_res = top_res.append(mut_series, ignore_index=True)
i += 1
return top_res | 26,720 |
def npmat4_to_pdmat4(npmat4):
"""
# updated from cvtMat4
convert numpy.2darray to LMatrix4 defined in Panda3d
:param npmat3: a 3x3 numpy ndarray
:param npvec3: a 1x3 numpy ndarray
:return: a LMatrix3f object, see panda3d
author: weiwei
date: 20170322
"""
return Mat4(npmat4[0, 0], npmat4[1, 0], npmat4[2, 0], 0, \
npmat4[0, 1], npmat4[1, 1], npmat4[2, 1], 0, \
npmat4[0, 2], npmat4[1, 2], npmat4[2, 2], 0, \
npmat4[0, 3], npmat4[1, 3], npmat4[2, 3], 1) | 26,721 |
def is_at_NWRC(url):
"""
Checks that were on the NWRC network
"""
try:
r = requests.get(url)
code = r.status_code
except Exception as e:
code = 404
return code==200 | 26,722 |
def _key_press(event, params):
"""Handle key presses for the animation."""
if event.key == 'left':
params['pause'] = True
params['frame'] = max(params['frame'] - 1, 0)
elif event.key == 'right':
params['pause'] = True
params['frame'] = min(params['frame'] + 1, len(params['frames']) - 1) | 26,723 |
def lmo(x,radius):
"""Returns v with norm(v, self.p) <= r minimizing v*x"""
shape = x.shape
if len(shape) == 4:
v = torch.zeros_like(x)
for first_dim in range(shape[0]):
for second_dim in range(shape[1]):
inner_x = x[first_dim][second_dim]
rows, cols = x[first_dim][second_dim].shape
v[first_dim][second_dim] = torch.zeros_like(inner_x)
maxIdx = torch.argmax(torch.abs(inner_x),0)
for col in range(cols):
v[first_dim][second_dim][maxIdx[col],col] = -radius*torch.sign(inner_x[maxIdx[col],col])
elif len(shape) == 3:
v = torch.zeros_like(x)
for first_dim in range(shape[0]):
inner_x = x[first_dim]
rows, cols = x[first_dim].shape
v[first_dim] = torch.zeros_like(inner_x)
maxIdx = torch.argmax(torch.abs(inner_x),0)
for col in range(cols):
v[first_dim][maxIdx[col],col] = -radius*torch.sign(inner_x[maxIdx[col],col])
elif len(shape)==2:
rows, cols = x.shape
v = torch.zeros_like(x)
maxIdx = torch.argmax(torch.abs(x),0)
for col in range(cols):
v[maxIdx[col],col] = -radius*torch.sign(x[maxIdx[col],col])
else :
v = torch.zeros_like(x)
maxIdx = torch.argmax(torch.abs(x))
v.view(-1)[maxIdx] = -radius * torch.sign(x.view(-1)[maxIdx])
return v | 26,724 |
def test_user_class():
"""
User supplied meta class.
"""
class First(object):
"User class."
def __init__(self, seconds, a, b, c):
"Constructor must be without parameters."
# Testing that additional attributes
# are preserved.
self.some_attr = 1
self.seconds = seconds
self.a = a
self.b = b
self.c = c
for second in self.seconds:
# Make sure seconds have already been instantiated
assert hasattr(second, 'sec') and isinstance(second.sec, int)
class Second(object):
"User class"
def __init__(self, parent, sec):
self.parent = parent
self.sec = sec
modelstr = """
first 34 45 65 "sdf" 45
"""
mm = metamodel_from_str(grammar)
model = mm.model_from_str(modelstr)
# Test that generic First class is created
assert type(model).__name__ == "First"
assert type(model) is not First
mm = metamodel_from_str(grammar, classes=[First, Second])
model = mm.model_from_str(modelstr)
# Test that user class is instantiated
assert type(model).__name__ == "First"
assert type(model) is First
# Check default attributes
assert type(model.a) is list
assert model.a == []
assert type(model.b) is bool
assert model.b is False
# Check additional attributes
assert model.some_attr == 1 | 26,725 |
def _transform_index(index, func):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
"""
if isinstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name) | 26,726 |
def add_to_cmake(list_file: Path, comp_path: Path):
""" Adds new component to CMakeLists.txt"""
print("[INFO] Found CMakeLists.txt at '{}'".format(list_file))
with open(list_file, "r") as file_handle:
lines = file_handle.readlines()
topology_lines = [(line, text) for line, text in enumerate(lines) if "/Top/" in text]
line = len(topology_lines)
if topology_lines:
line, text = topology_lines[0]
print("[INFO] Topology inclusion '{}' found on line {}.".format(text.strip(), line + 1))
if not confirm("Add component {} to {} {}?".format(comp_path, list_file, "at end of file" if not topology_lines else " before topology inclusion")):
return
addition = 'add_fprime_subdirectory("${{CMAKE_CURRENT_LIST_DIR}}/{}/")\n'.format(comp_path)
lines.insert(line, addition)
with open(list_file, "w") as file_handle:
file_handle.write("".join(lines)) | 26,727 |
def extractQualiTeaTranslations(item):
"""
# 'QualiTeaTranslations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Harry Potter and the Rise of the Ordinary Person' in item['tags']:
return None
if 'Romance of Dragons and Snakes' in item['tags']:
return buildReleaseMessageWithType(item, 'Romance of Dragons and Snakes', vol, chp, frag=frag, postfix=postfix)
return False | 26,728 |
def feature_norm_ldc(df):
"""
Process the features to obtain the standard metrics in LDC mode.
"""
df['HNAP'] = df['HNAC']/df['ICC_abs']*100
df['TCC'] = (df['ICC_abs']+df['DCC_abs'])/df['VOL']
df['ICC'] = df['ICC_abs']/df['VOL']
df['DCC'] = df['DCC_abs']/df['VOL']
return df | 26,729 |
def configure():
"""Configure HIL."""
config_testsuite()
config.load_extensions() | 26,730 |
def dice_jaccard(y_true, y_pred, y_scores, shape, smooth=1, thr=None):
"""
Computes Dice and Jaccard coefficients.
Args:
y_true (ndarray): (N,4)-shaped array of groundtruth bounding boxes coordinates in xyxy format
y_pred (ndarray): (N,4)-shaped array of predicted bounding boxes coordinates in xyxy format
y_scores (ndarray): (N,)-shaped array of prediction scores
shape (tuple): shape of the map, i.e. (h, w)
smooth (int, optional): Smoothing factor to avoid ZeroDivisionError. Defaults to 1.
thr (float, optional): Threshold to binarize predictions; if None, the soft version of
the coefficients are computed. Defaults to None.
Returns:
tuple: The Dice and Jaccard coefficients.
"""
m_true = np.zeros(shape, dtype=np.float32)
for x0, y0, x1, y1 in y_true.astype(int):
m_true[y0:y1 + 1, x0: x1 + 1] = 1.
if thr is not None:
keep = y_scores >= thr
y_pred = y_pred[keep]
y_scores = y_scores[keep]
m_pred = np.zeros_like(m_true)
for (x0, y0, x1, y1), score in zip(y_pred.astype(int), y_scores):
m_pred[y0:y1 + 1, x0: x1 + 1] = np.maximum(m_pred[y0:y1 + 1, x0: x1 + 1], score)
intersection = np.sum(m_true * m_pred)
sum_ = np.sum(m_true) + np.sum(m_pred)
union = sum_ - intersection
jaccard = (intersection + smooth) / (union + smooth)
dice = 2. * (intersection + smooth) / (sum_ + smooth)
return dice.mean(), jaccard.mean() | 26,731 |
def forward_pass(output_node, sorted_nodes):
"""
Performs a forward pass through a list of sorted nodes.
Arguments:
`output_node`: A node in the graph, should be the output node (have no outgoing edges).
`sorted_nodes`: A topologically sorted list of nodes.
Returns the output Node's value
"""
for n in sorted_nodes:
n.forward()
return output_node.value | 26,732 |
def qr_decomposition(q, r, iter, n):
"""
Return Q and R matrices for iter number of iterations.
"""
v = column_convertor(r[iter:, iter])
Hbar = hh_reflection(v)
H = np.identity(n)
H[iter:, iter:] = Hbar
r = np.matmul(H, r)
q = np.matmul(q, H)
return q, r | 26,733 |
def main(args=None):
"""Main entry point for `donatello`'s command-line interface.
Args:
args (List[str]): Custom arguments if you wish to override sys.argv.
Returns:
int: The exit code of the program.
"""
try:
init_colorama()
opts = get_parsed_args(args)
if opts.bad_chars is not None:
bad_chars = _parse_bytes(opts.bad_chars, check_dups=True)
else:
bad_chars = b''
bad_chars_as_ints = tuple(int(bc) for bc in bad_chars)
if opts.max_factors is not None:
max_factors = _parse_max_factors(opts.max_factors)
else:
max_factors = 2
if opts.ops is not None:
ops = _parse_ops(opts.ops)
else:
ops = IMPLEMENTED_OPS
if opts.command not in ('factor', 'encode',):
raise DonatelloConfigurationError(
'must specify either `factor` or `encode`; `' + opts.command +
'` is invalid')
if opts.target == '-':
# TODO: https://docs.python.org/3/library/fileinput.html
pass
else:
target = opts.target
if opts.command == 'factor':
value = _parse_target_hex(target)
print_i('Attempting to factor target value ', format_dword(value))
for num_factors in range(2, max_factors+1):
factors = factor_by_byte(
value, bad_chars_as_ints, usable_ops=ops,
num_factors=num_factors)
if factors is not None:
print_i('Found factorization!')
res = [' 0x00000000']
for f in factors:
res.append('{0: <3}'.format(f.operator) + ' ' +
format_dword(f.operand))
print('\n'.join(res))
break
else:
print_e('Unable to find any factors')
elif opts.command == 'encode':
payload = _parse_bytes(target)
print_i('Attempting to encode payload...')
asm = encode_x86_32(payload, bad_chars, max_factors=max_factors)
print_i('Successfully encoded payload!')
print(asm)
return 0
except (DonatelloCannotEncodeError, DonatelloNoPossibleNopsError) as e:
print_e('Failed to factor/encode the specified target: ', e)
return 1
except DonatelloConfigurationError as e:
print_e('Configuration error: ', e)
return 1
except DonatelloNoPresentBadCharactersError:
print_e('No bad characters present in the specified payload; ',
'use the -f/--force flag to bypass this check')
return 1
except DonatelloError as e:
print_e('This should not be reached! See below for error.')
print_e(e)
return 1
except Exception as e:
print_e('Received unexpected exception; re-raising it.')
raise e | 26,734 |
def build_detection_train_loader(cfg, mapper=None):
"""
A data loader is created by the following steps:
1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
2. Coordinate a random shuffle order shared among all processes (all GPUs)
3. Each process spawn another few workers to process the dicts. Each worker will:
* Map each metadata dict into another format to be consumed by the model.
* Batch them by simply putting dicts into a list.
The batched ``list[mapped_dict]`` is what this dataloader will yield.
Args:
cfg (CfgNode): the config
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, True)`.
Returns:
an infinite iterator of training data
"""
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = PlusDatasetMapper(cfg, dataset, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = samplers.TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
sampler = samplers.RepeatFactorTrainingSampler(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=None,
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
data_loader = AspectRatioGroupedDataset(data_loader, images_per_worker)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_worker, drop_last=True
)
# drop_last so the batch always have the same size
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
)
return data_loader | 26,735 |
def cond_scatter(ary, indexes, values, mask):
"""
scatter(ary, indexes, values, mask)
Scatter 'values' into 'ary' selected by 'indexes' where 'mask' is true.
The values of 'indexes' are absolute indexed into a flatten 'ary'
The shape of 'indexes', 'value', and 'mask' must be equal.
Parameters
----------
ary : array_like
The target array to write the values to.
indexes : array_like, interpreted as integers
Array or list of indexes that will be written to in 'ary'
values : array_like
Values to write into 'ary'
mask : array_like, interpreted as booleans
A mask that specifies which indexes and values to include and exclude
"""
from . import _bh
indexes = array_manipulation.flatten(array_create.array(indexes, dtype=numpy.uint64), always_copy=False)
values = array_manipulation.flatten(array_create.array(values, dtype=ary.dtype), always_copy=False)
mask = array_manipulation.flatten(array_create.array(mask, dtype=numpy.bool), always_copy=False)
assert (indexes.shape == values.shape and values.shape == mask.shape)
if ary.size == 0 or indexes.size == 0:
return
# In order to ensure a contiguous array, we do the scatter on a flatten copy
flat = array_manipulation.flatten(ary, always_copy=True)
_bh.ufunc(_info.op['cond_scatter']['id'], (flat, values, indexes, mask))
ary[...] = flat.reshape(ary.shape) | 26,736 |
def discRect(radius,w,l,pos,gap,layerRect,layerCircle,layer):
"""
This function creates a disc that is recessed inside of a rectangle. The
amount that the disc is recessed is determined by a gap that surrounds the
perimeter of the disc. This much hangs out past the rectangle to couple to
a bus waveguide.Calls subCircle(...) in order to accomplish the subtraction
This function returns the disc and the surrounding rectangle
radius: radius of circle
w: width of rectangle (vertical)
l: length of rectangle (horizontal)
pos: tuple giving a relative offset of the circle. The offset is determined
by the gap specified, but it can also be added to this other offset. The
default is no additional recession into the rectangle and just a shift
along the length of the rectangle.
gap: the gap surrounding the disc
layerRect: the layer on which the rectangle is written
layerCircle: the layer on which the disc subtracted from the rectangle is
written. This layer is temporarily used for the boolean operation since
ultimately the disc is returned on the same layer on which the rectangle is
drawn.
"""
newRad=radius+gap
# the circle is offset by the gap width away from the rect
posx,posy=pos
pos=(posx,w/2-radius+posy+gap)
print('pos: '+str(pos))
sub=subCircle(newRad,w,l,pos,layerRect,layerCircle,layer)
# add the disc
disc=gdspy.Round(pos,radius,number_of_points=199,**layerRect)
return sub,disc | 26,737 |
def sub_factory():
"""Subscript text: <pre>H[sub]2[/sub]O</pre><br />
Example:<br />
H[sub]2[/sub]O
"""
return make_simple_formatter("sub", "<sub>%(value)s</sub>"), {} | 26,738 |
def train_test_split(
structures: list, targets: list, train_frac: float = 0.8
) -> Tuple[Tuple[list, list], Tuple[list, list]]:
"""Split structures and targets into training and testing subsets."""
num_train = floor(len(structures) * train_frac)
return (
(structures[:num_train], targets[:num_train]),
(structures[num_train:], targets[num_train:]),
) | 26,739 |
def reset_password(token):
"""
Handles the reset password process.
"""
if not current_user.is_anonymous():
return redirect(url_for("forum.index"))
form = ResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
expired, invalid, data = user.verify_reset_token(form.token.data)
if invalid:
flash(("Your password token is invalid."), "danger")
return redirect(url_for("auth.forgot_password"))
if expired:
flash(("Your password is expired."), "danger")
return redirect(url_for("auth.forgot_password"))
if user and data:
user.password = form.password.data
user.save()
flash(("Your password has been updated."), "success")
return redirect(url_for("auth.login"))
form.token.data = token
return render_template("auth/reset_password.html", form=form) | 26,740 |
def writePlist(path, data):
"""Write a property list to file."""
if Path(path).exists():
_data = readPlist(path)
_data.update(data)
else:
_data = data
if DEPRECATED:
with open(path, 'wb') as _f:
plistlib.dump(_data, _f)
else:
plistlib.writePlist(_data, path) | 26,741 |
def parse():
"""
格式化账户信息,并输出
:return:
"""
x = prettytable.PrettyTable()
x.field_names = ["Address", "Health", "B. ETH", "B.Tokens", "Supply", "Estimated profit", "On Chain Liquidity"]
req = requests.get("https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD").json()
account_list = api()
eth_price = req
usd_eth = float(eth_price["USD"])
for account in account_list["accounts"]:
balance2 = ''
balance3 = ''
borrow_tokens = {}
supply_tokens = {}
address = account["address"]
onchainliquidity = getAccountLiquidity(address)
try:
health = float(account["health"]["value"])
except Exception as e:
health = 0
beth = float(account["total_borrow_value_in_eth"]["value"])
beth_format = "{:.8f} ETH".format(round(beth, 8)) + "\n" + colored(
"{:.3f}".format(round(usd_eth * beth, 3)) + "$",
'green')
if (usd_eth * beth) / 2 * 0.05 < keys['profit']:
continue
estimated_p = "{:.3f}".format((usd_eth * beth) / 2 * 0.05) + "$"
tokens = account["tokens"]
for token in tokens:
balance_borrow = token["borrow_balance_underlying"]["value"]
balance_supply = token["supply_balance_underlying"]["value"]
token_symbol = token['symbol']
if float(balance_supply) > 0:
bresult_supply = "{:.8f} ".format(round(float(balance_supply), 8)) + token_symbol
balance3 += bresult_supply + "\n"
supply_tokens[token_symbol] = round(float(balance_supply), 8)
if float(balance_borrow) > 0:
bresult_borrow = "{:.8f} ".format(round(float(balance_borrow), 8)) + token_symbol
balance2 += bresult_borrow + "\n"
borrow_tokens[token_symbol] = round(float(balance_borrow), 8)
if len(supply_tokens.keys()) == 1 and supply_tokens.keys()[0] == keys['supply_token'] and \
len(borrow_tokens.keys()) == 1 and borrow_tokens.keys()[0] == keys['borrow_token']:
if borrow_tokens.keys()[0] == "cETH":
tx_hash = CEtherliquidateBorrow(address, tokenToAddress(supply_tokens.keys()[0]))
else:
tx_hash = CErc20liquidateBorrow(tokenToAddress(borrow_tokens.keys()[0]), address, borrow_tokens.values()[0], tokenToAddress(supply_tokens.keys()[0]))
print("交易ID: %s" % tx_hash)
x.add_row(
[address, round(health, 3), beth_format, balance2, balance3, colored(estimated_p, 'green', attrs=['bold']),
onchainliquidity])
print(x)
time.sleep(20) | 26,742 |
async def config_doc(ctx, items):
"""Display configuration documentation."""
config = ctx.obj
config.doc(*items) | 26,743 |
def delete(vol_path):
"""
Delete a kv store object for this volume identified by vol_path.
Return true if successful, false otherwise
"""
return kvESX.delete(vol_path) | 26,744 |
def auto_adjust_xlsx_column_width(df, writer, sheet_name, margin=3, length_factor=1.0, decimals=3, index=True):
"""
Auto adjust column width to fit content in a XLSX exported from a pandas DataFrame.
How to use:
```
with pd.ExcelWriter(filename) as writer:
df.to_excel(writer, sheet_name="MySheet")
auto_adjust_column_width_index(df, writer, sheet_name="MySheet", margin=3)
```
:param DataFrame df: The DataFrame used to export the XLSX.
:param pd.ExcelWriter writer: The pandas exporter with engine="xlsxwriter"
:param str sheet_name: The name of the sheet
:param int margin: How many extra space (beyond the maximum size of the string)
:param int length_factor: The factor to apply to the character length to obtain the column width.
:param int decimals: The number of decimal places to assume for floats: Should be the same as the number of decimals displayed in the XLSX
:param bool index: Whether the DataFrame's index is inserted as a separate column (if index=False in df.to_xlsx() set index=False here!)
"""
writer_type = type(writer.book).__module__ # e.g. 'xlsxwriter.workbook' or 'openpyxl.workbook.workbook'
is_openpyxl = writer_type.startswith("openpyxl")
is_xlsxwriter = writer_type.startswith("xlsxwriter")
_to_str = functools.partial(_to_str_for_length, decimals=decimals)
"""
Like str() but rounds decimals to predefined length
"""
if not is_openpyxl and not is_xlsxwriter:
raise ValueError("Only openpyxl and xlsxwriter are supported as backends, not " + writer_type)
sheet = writer.sheets[sheet_name]
# Compute & set column width for each column
for column_name in df.columns:
# Convert the value of the columns to string and select the
column_length = max(df[column_name].apply(_to_str).map(text_length).max(), text_length(column_name))
# Get index of column in XLSX
# Column index is +1 if we also export the index column
col_idx = df.columns.get_loc(column_name)
if index:
col_idx += 1
# Set width of column to (column_length + margin)
if is_openpyxl:
sheet.column_dimensions[openpyxl.utils.cell.get_column_letter(col_idx + 1)].width = column_length * length_factor + margin
else: # is_xlsxwriter
sheet.set_column(col_idx, col_idx, column_length * length_factor + margin)
# Compute column width of index column (if enabled)
if index: # If the index column is being exported
index_length = max(df.index.map(_to_str).map(text_length).max(), text_length(df.index.name))
if is_openpyxl:
sheet.column_dimensions["A"].width = index_length * length_factor + margin
else: # is_xlsxwriter
sheet.set_column(0, 0, index_length * length_factor + margin) | 26,745 |
def insert_event(events: List[str], service: Resource, calendar_id: str) -> None:
"""Add events to calendar."""
batch = service.new_batch_http_request()
# Add each event to batch
for i, event in enumerate(events):
batch.add(service.events().insert(calendarId=calendar_id, body=event))
batch.execute() | 26,746 |
def get_tablespace_data(tablespace_path, db_owner):
"""This function returns the tablespace data"""
data = {
"name": "test_%s" % str(uuid.uuid4())[1:8],
"seclabels": [],
"spcacl": [
{
"grantee": db_owner,
"grantor": db_owner,
"privileges": [
{
"privilege_type": "C",
"privilege": True,
"with_grant": False
}
]
}
],
"spclocation": tablespace_path,
"spcoptions": [],
"spcuser": db_owner
}
return data | 26,747 |
def join_chunks(chunks):
"""empty all chunks out of their sub-lists to be split apart again by split_chunks(). this is because chunks now
looks like this [[t,t,t],[t,t],[f,f,f,][t]]"""
return [item for sublist in chunks for item in sublist] | 26,748 |
def urls_equal(url1, url2):
"""
Compare two URLObjects, without regard to the order of their query strings.
"""
return (
url1.without_query() == url2.without_query()
and url1.query_dict == url2.query_dict
) | 26,749 |
def bytes_to_ints(bs):
"""
Convert a list of bytes to a list of integers.
>>> bytes_to_ints([1, 0, 2, 1])
[256, 513]
>>> bytes_to_ints([1, 0, 1])
Traceback (most recent call last):
...
ValueError: Odd number of bytes.
>>> bytes_to_ints([])
[]
"""
if len(bs) % 2 != 0:
raise ValueError("Odd number of bytes.")
pairs = zip(bs[::2], bs[1::2])
return [(a << 8) + b for a, b in pairs] | 26,750 |
def validate_cesion_and_dte_montos(cesion_value: int, dte_value: int) -> None:
"""
Validate amounts of the "cesión" and its associated DTE.
:raises ValueError:
"""
if not (cesion_value <= dte_value):
raise ValueError('Value of "cesión" must be <= value of DTE.', cesion_value, dte_value) | 26,751 |
def create_default_yaml(config_file):
"""This function creates and saves the default configuration file."""
config_file_path = config_file
imgdb_config_dir = Config.IMGDB_CONFIG_HOME
if not imgdb_config_dir.is_dir():
try:
imgdb_config_dir.mkdir(parents=True, exist_ok=True)
except Exception as e:
logging.critical(
"Something went wrong while trying to create"
" the configuration directory!"
)
logging.debug("Error: %s" % e)
click.echo(
Tcolors.FAIL + "Something went wrong while trying to create"
" the configuration directory!" + Tcolors.ENDC
)
return 0
try:
with open(config_file_path, "w", encoding="utf-8") as config:
yaml.safe_dump(
Config.DEFAULT_CONFIG,
config,
encoding="utf-8",
allow_unicode=True,
default_flow_style=False,
)
click.echo(
Tcolors.OK_GREEN
+ "➜ The configuration file: %s \nhas been created successfully!"
% config_file_path
+ Tcolors.ENDC
)
except Exception as e:
logging.critical(
"Something went wrong while trying to save"
" the program's configuration file!"
)
logging.debug("Error: %s" % e)
click.echo(
Tcolors.FAIL + "Something went wrong while trying to save"
" the program's configuration file!" + Tcolors.ENDC
)
return 0
return parse_config_yaml(Config.DEFAULT_CONFIG, first_run=True) | 26,752 |
def canRun(page):
""" Returns True if the given check page is still set to "Run";
otherwise, returns false. Accepts one required argument, "page."
"""
print("Checking checkpage.")
page = site.Pages[page]
text = page.text()
if text == "Run":
print("We're good!")
return True
return False | 26,753 |
def parse_configs_for_multis(conf_list):
"""
parse list of condor config files searching for multi line configurations
Args:
conf_list: string, output of condor_config_val -config
Returns:
multi: dictionary. keys are first line of multi line config
values are the rest of the multi line config
keeping original formatting
example: this paragraph in a condor_configuration :
JOB_ROUTER_CREATE_IDTOKEN_atlas @=end
sub = "Atlasfetime = 900"
lifetime = 900
scope = "ADVERTISE_STARTD, ADVERTISE_MASTER, READ"
dir = "$(LOCAL_DIR)/jrtokens"
filename = "ce_atlas.idtoken"
owner = "atlas"
@end
would generate a multi entry like this:
multi["JOB_ROUTER_CREATE_IDTOKEN_atlas"] =
'@=end\n sub = "Atlas"\n lifetime = 900\n ..... @end\n'
these entries will be rendered into the frontend.condor_config with proper spacing and line returns
unlike how they would be rendered by condor_config_val --dump
KNOWN PROBLEM: if condor config has two multi-line configs with same name and different
lines generated config file may be incorrect. The condor config is probably incorrect
as well :)
"""
multi = {}
for conf in conf_list:
conf = conf.strip()
if os.path.exists(conf):
with open(conf) as fd:
text = fd.readlines()
pdict = find_multilines(text)
multi.update(pdict)
return multi | 26,754 |
def _bgp_predict_wrapper(model, *args, **kwargs):
"""
Just to ensure that the outgoing shapes are right (i.e. 2D).
"""
mean, cov = model.predict_y(*args, **kwargs)
if len(mean.shape) == 1:
mean = mean[:, None]
if len(cov.shape) == 1:
cov = cov[:, None]
return mean, cov | 26,755 |
def create_waninterface(config_waninterface, waninterfaces_n2id, site_id):
"""
Create a WAN Interface
:param config_waninterface: WAN Interface config dict
:param waninterfaces_n2id: WAN Interface Name to ID dict
:param site_id: Site ID to use
:return: New WAN Interface ID
"""
# make a copy of waninterface to modify
waninterface_template = copy.deepcopy(config_waninterface)
# perform name -> ID lookups
name_lookup_in_template(waninterface_template, 'network_id', wannetworks_n2id)
name_lookup_in_template(waninterface_template, 'label_id', waninterfacelabels_n2id)
local_debug("WANINTERFACE TEMPLATE: " + str(json.dumps(waninterface_template, indent=4)))
# create waninterface
waninterface_resp = sdk.post.waninterfaces(site_id, waninterface_template)
if not waninterface_resp.cgx_status:
throw_error("Waninterface creation failed: ", waninterface_resp)
waninterface_name = waninterface_resp.cgx_content.get('name')
waninterface_id = waninterface_resp.cgx_content.get('id')
if not waninterface_name or not waninterface_id:
throw_error("Unable to determine waninterface attributes (Name: {0}, ID {1})..".format(waninterface_name,
waninterface_id))
output_message(" Created waninterface {0}.".format(waninterface_name))
# update caches
waninterfaces_n2id[waninterface_name] = waninterface_id
return waninterface_id | 26,756 |
def max_pool(images, imgshp, maxpoolshp):
"""
Implements a max pooling layer
Takes as input a 2D tensor of shape batch_size x img_size and performs max pooling.
Max pooling downsamples by taking the max value in a given area, here defined by
maxpoolshp. Outputs a 2D tensor of shape batch_size x output_size.
Parameters
----------
images : 2D tensor
Tensorcontaining images on which to apply convolution. Assumed to be \
of shape `batch_size x img_size`
imgshp : tuple
Tuple containing image dimensions
maxpoolshp : tuple
Tuple containing shape of area to max pool over
Returns
-------
out1 : WRITEME
Symbolic result (2D tensor)
out2 : WRITEME
Logical shape of the output
"""
N = numpy
poolsize = N.int64(N.prod(maxpoolshp))
# imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
# in the first case, default nfeatures to 1
if N.size(imgshp)==2:
imgshp = (1,)+imgshp
# construct indices and index pointers for sparse matrix, which, when multiplied
# with input images will generate a stack of image patches
indices, indptr, spmat_shape, sptype, outshp = \
convolution_indices.conv_eval(imgshp, maxpoolshp, maxpoolshp, mode='valid')
logger.info('XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX')
logger.info('imgshp = {0}'.format(imgshp))
logger.info('maxpoolshp = {0}'.format(maxpoolshp))
logger.info('outshp = {0}'.format(outshp))
# build sparse matrix, then generate stack of image patches
csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices, indptr, spmat_shape)
patches = sparse.structured_dot(csc, images.T).T
pshape = tensor.stack(images.shape[0]*\
tensor.as_tensor(N.prod(outshp)),
tensor.as_tensor(imgshp[0]),
tensor.as_tensor(poolsize))
patch_stack = tensor.reshape(patches, pshape, ndim=3);
out1 = tensor.max(patch_stack, axis=2)
pshape = tensor.stack(images.shape[0],
tensor.as_tensor(N.prod(outshp)),
tensor.as_tensor(imgshp[0]))
out2 = tensor.reshape(out1, pshape, ndim=3);
out3 = tensor.DimShuffle((False,)*3, (0,2,1))(out2)
return tensor.flatten(out3,2), outshp | 26,757 |
def rmean(A):
""" Removes time-mean of llc_4320 3d fields; axis=2 is time"""
ix,jx,kx = A.shape
Am = np.repeat(A.mean(axis=2),kx)
Am = Am.reshape(ix,jx,kx)
return A-Am | 26,758 |
def default_pruning_settings():
"""
:return: the default pruning settings for optimizing a model
"""
mask_type = "unstructured" # TODO: update based on quantization
sparsity = 0.85 # TODO: dynamically choose sparsity level
balance_perf_loss = 1.0
filter_min_sparsity = 0.4
filter_min_perf_gain = 0.75
filter_min_recovery = -1.0
return PruningSettings(
mask_type,
sparsity,
balance_perf_loss,
filter_min_sparsity,
filter_min_perf_gain,
filter_min_recovery,
) | 26,759 |
def claim_node(vars=None):
"""
Claim the node connected to the given serial port
(Get cloud credentials)
:param vars: `port` as key - Serial Port, defaults to `None`
:type vars: str | None
:raises Exception: If there is an HTTP issue while claiming
:return: None on Success
:rtype: None
"""
try:
if not vars['port'] and not vars['mac'] and not vars['addr'] and not vars['platform']:
sys.exit(vars['parser'].print_help())
if vars['addr'] and not vars['port'] and not vars['platform']:
sys.exit('Invalid. <port> or --platform argument is needed.')
if vars['port']:
if not vars['mac'] and not vars['platform']:
claim(port=vars['port'], node_platform=vars['platform'], mac_addr=vars['mac'], flash_address=vars['addr'])
return
if (vars['mac'] and not vars['platform']):
sys.exit("Invalid. --platform argument needed.")
if (not vars['mac'] and vars['platform']):
sys.exit("Invalid. --mac argument needed.")
if vars['mac']:
if not re.match(r'([0-9A-F]:?){12}', vars['mac']):
sys.exit('Invalid MAC address.')
claim(port=vars['port'], node_platform=vars['platform'], mac_addr=vars['mac'], flash_address=vars['addr'])
except Exception as claim_err:
log.error(claim_err)
return | 26,760 |
def build_request_url(base_url, sub_url, query_type, api_key, value):
"""
Function that creates the url and parameters
:param base_url: The base URL from the app.config
:param sub_url: The sub URL from the app.config file. If not defined it will be: "v1/pay-as-you-go/"
:param query_type: The query type of the request
:param apikey: The api key from the app.config
:param value: The artifact value
:return: Tuple. A string of the URL and a dict of the params
:rtype: tuple
"""
# Setup the mapping dict for APIVoid API call types and the url and params for the requests call.
url_map = {
"IP Reputation": {
"url": "iprep",
"params": {
"ip": value
}
},
"Domain Reputation": {
"url": "domainbl",
"params": {
"host": value
}
},
"DNS Lookup": {
"url": "dnslookup",
"params": {
"action": "dns-a",
"host": value
}
},
"Email Verify": {
"url": "emailverify",
"params": {
"email": value
}
},
"ThreatLog": {
"url": "threatlog",
"params": {
"host": value
}
},
"SSL Info": {
"url": "sslinfo",
"params": {
"host": value
}
},
"URL Reputation": {
"url": "urlrep",
"params": {
"url": url_encode(value.encode('utf8')) if isinstance(value, str) else value
}
},
"selftest": {
"url": "sitetrust",
"params": {
"stats": value
}
},
}
try:
request_type = url_map.get(query_type)
request_url = request_type.get("url")
request_params = request_type.get("params")
except KeyError:
raise ValueError("%s is an Invalid IP Void request type or it's not supported", query_type)
# Join the base url, the request type and the sub url
the_url = "/".join((base_url, request_url, sub_url))
# Append the api key
the_url = u"{0}?key={1}".format(the_url, api_key)
# Append the params
for (k, v) in request_params.items():
the_url = u"{0}&{1}={2}".format(the_url, k, v)
LOG.info("Using URL: %s", the_url)
return the_url | 26,761 |
def snake_to_camel(action_str):
"""
for all actions and all objects unsnake case and camel case.
re-add numbers
"""
if action_str == "toggle object on":
return "ToggleObjectOn"
elif action_str == "toggle object off":
return "ToggleObjectOff"
def camel(match):
return match.group(1)[0].upper() + match.group(1)[1:] + match.group(2).upper()
action_str = re.sub(r'(.*?) ([a-zA-Z])', camel, action_str)
if action_str.startswith("Look"): # LookDown_15, LookUp_15
action_str += "_15"
if action_str.startswith("Rotate"): # RotateRight_90, RotateLeft_90
action_str += "_90"
if action_str.startswith("Move"): # MoveAhead_25
action_str += "_25"
return action_str[0].upper() + action_str[1:] | 26,762 |
def todayDate() -> datetime.date:
"""
:return: ex: datetime.date(2020, 6, 28)
"""
return datetime.date.today() | 26,763 |
def lookup_facade(name, version):
"""
Given a facade name and version, attempt to pull that facade out
of the correct client<version>.py file.
"""
for _version in range(int(version), 0, -1):
try:
facade = getattr(CLIENTS[str(_version)], name)
return facade
except (KeyError, AttributeError):
continue
else:
raise ImportError("No supported version for facade: "
"{}".format(name)) | 26,764 |
def create_service(netUrl, gwUrl, attributes, token):
"""
Create NFN Service in MOP Environment.
:param netUrl: REST Url endpoint for network
:param gwUrl: REST Url endpoint for gateway
:param serviceAttributes: service paramaters, e.g. service type or name, etc
:param token: seesion token for NF Console
:return serviceId, serviceUrl: created service details
"""
url = netUrl+'/services'
gwId = gwUrl.split('/')[8]
if attributes['type'] == 'host':
# checking if service name is provided
if attributes['name']:
serviceName = attributes['name']
else:
serviceName = attributes['gateway']+'--'+str(attributes['ip'])+'--'+str(attributes['port'])
data = {
"serviceClass": "CS",
"name": serviceName,
"serviceInterceptType": "IP",
"serviceType": "ALL",
"endpointId": gwId,
"pbrType": "WAN",
"dataInterleaving": "NO",
"transparency": "NO",
"networkIp": attributes['ip'],
"networkFirstPort": attributes['port'],
"networkLastPort": attributes['port'],
"interceptIp": attributes['ip'],
"interceptFirstPort": attributes['port'],
"interceptLastPort": attributes['port']
}
if attributes['type'] == 'network':
# checking if service name is provided
if attributes['name']:
serviceName = attributes['name']
else:
serviceName = attributes['gateway']+'--'+str(attributes['netIp'])+'--'+str(attributes['netCidr'])
data = {
"serviceClass": "GW",
"name": serviceName,
"serviceInterceptType": "IP",
"serviceType": "ALL",
"endpointId": gwId,
"lowLatency": "NO",
"dataInterleaving": "NO",
"transparency": "NO",
"multicast": "OFF",
"dnsOptions": "NONE",
"icmpTunnel": "YES",
"cryptoLevel": "STRONG",
"permanentConnection": "YES",
"collectionLocation": "BOTH",
"pbrType": "WAN",
"rateSmoothing": "NO",
"gatewayClusterId": None,
"interceptIp": attributes['netIp'],
"gatewayIp": attributes['netIp'],
"gatewayCidrBlock": attributes['netCidr'],
"localNetworkGateway": "YES"
}
returnData = nfreq((url, data), "post", token)
serviceUrl = returnData['_links']['self']['href']
time.sleep(1)
return serviceUrl, serviceName | 26,765 |
def _drawBlandAltman(mean, diff, md, sd, percentage, limitOfAgreement, confidenceIntervals, detrend, title, ax, figureSize, dpi, savePath, figureFormat, meanColour, loaColour, pointColour):
"""
Sub function to draw the plot.
"""
if ax is None:
fig, ax = plt.subplots(1,1, figsize=figureSize, dpi=dpi)
plt.rcParams.update({'font.size': 15,'xtick.labelsize':15,
'ytick.labelsize':15})
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=15)
# ax.rcParams.update({'font.size': 15})
# ax=ax[0,0]
draw = True
else:
draw = False
##
# Plot CIs if calculated
##
if 'mean' in confidenceIntervals.keys():
ax.axhspan(confidenceIntervals['mean'][0],
confidenceIntervals['mean'][1],
facecolor='lightblue', alpha=0.2)
if 'upperLoA' in confidenceIntervals.keys():
ax.axhspan(confidenceIntervals['upperLoA'][0],
confidenceIntervals['upperLoA'][1],
facecolor='wheat', alpha=0.2)
if 'lowerLoA' in confidenceIntervals.keys():
ax.axhspan(confidenceIntervals['lowerLoA'][0],
confidenceIntervals['lowerLoA'][1],
facecolor='wheat', alpha=0.2)
##
# Plot the mean diff and LoA
##
ax.axhline(md, color=meanColour, linestyle='--')
ax.axhline(md + limitOfAgreement*sd, color=loaColour, linestyle='--')
ax.axhline(md - limitOfAgreement*sd, color=loaColour, linestyle='--')
##
# Plot the data points
##
# ax.scatter(mean[0:22], diff[0:22], alpha=0.8, c='orange', marker='.', s=100, label='India Male')
# ax.scatter(mean[22:44], diff[22:44], alpha=0.8, c='blue', marker='.', s=100, label='India Female')
# ax.scatter(mean[44:66], diff[44:66], alpha=0.8, c='red', marker='.', s=100, label='Sierra Leone Male')
# ax.scatter(mean[66:88], diff[66:88], alpha=0.8, c='purple', marker='.', s=100, label='Sierra Leone Female')
ax.scatter(mean[0:20], diff[0:20], alpha=0.8, c='orange', marker='.', s=100, label='India Male')
ax.scatter(mean[20:39], diff[20:39], alpha=0.8, c='blue', marker='.', s=100, label='India Female')
ax.scatter(mean[39:59], diff[39:59], alpha=0.8, c='red', marker='.', s=100, label='Sierra Leone Male')
ax.scatter(mean[59:77], diff[59:77], alpha=0.8, c='purple', marker='.', s=100, label='Sierra Leone Female')
ax.set_ylim(-50, 70)
ax.legend(loc='upper right', fontsize=12)
trans = transforms.blended_transform_factory(
ax.transAxes, ax.transData)
limitOfAgreementRange = (md + (limitOfAgreement * sd)) - (md - limitOfAgreement*sd)
offset = (limitOfAgreementRange / 100.0) * 1.5
ax.text(0.98, md + offset, 'Mean', ha="right", va="bottom", transform=trans)
ax.text(0.98, md - offset, f'{md:.2f}', ha="right", va="top", transform=trans)
ax.text(0.98, md + (limitOfAgreement * sd) + offset, f'+{limitOfAgreement:.2f} SD', ha="right", va="bottom", transform=trans)
ax.text(0.98, md + (limitOfAgreement * sd) - offset, f'{md + limitOfAgreement*sd:.2f}', ha="right", va="top", transform=trans)
ax.text(0.98, md - (limitOfAgreement * sd) - offset, f'-{limitOfAgreement:.2f} SD', ha="right", va="top", transform=trans)
ax.text(0.98, md - (limitOfAgreement * sd) + offset, f'{md - limitOfAgreement*sd:.2f}', ha="right", va="bottom", transform=trans)
# Only draw spine between extent of the data
# ax.spines['left'].set_bounds(min(diff), max(diff))
# ax.spines['bottom'].set_bounds(min(mean), max(mean))
# Hide the right and top spines
# ax.spines['right'].set_visible(False)
# ax.spines['top'].set_visible(False)
if percentage:
ax.set_ylabel('Percentage difference between methods', fontsize=20)
else:
ax.set_ylabel('Difference between methods', fontsize=20)
ax.set_xlabel('Mean of methods', fontsize=20)
# tickLocs = ax.xaxis.get_ticklocs()
# cadenceX = tickLocs[2] - tickLocs[1]
# tickLocs = rangeFrameLocator(tickLocs, (min(mean), max(mean)))
# ax.xaxis.set_major_locator(ticker.FixedLocator(tickLocs))
# tickLocs = ax.yaxis.get_ticklocs()
# cadenceY = tickLocs[2] - tickLocs[1]
# tickLocs = rangeFrameLocator(tickLocs, (min(diff), max(diff)))
# ax.yaxis.set_major_locator(ticker.FixedLocator(tickLocs))
# plt.draw() # Force drawing to populate tick labels
# labels = rangeFrameLabler(ax.xaxis.get_ticklocs(), [item.get_text() for item in ax.get_xticklabels()], cadenceX)
# ax.set_xticklabels(labels)
# labels = rangeFrameLabler(ax.yaxis.get_ticklocs(), [item.get_text() for item in ax.get_yticklabels()], cadenceY)
# ax.set_yticklabels(labels)
# ax.patch.set_alpha(0)
if detrend[0] is None:
pass
else:
plt.text(1, -0.1, f'{detrend[0]} slope correction factor: {detrend[1]:.2f} ± {detrend[2]:.2f}', ha='right', transform=ax.transAxes)
if title:
ax.set_title(title)
##
# Save or draw
##
plt.tight_layout()
if (savePath is not None) & draw:
fig.savefig(savePath, format=figureFormat, dpi=dpi)
plt.close()
elif draw:
plt.show()
else:
return ax | 26,766 |
def reader(args):
"""
Realign BAM hits to miRBase to get better accuracy and annotation
"""
samples = []
database = mapper.guess_database(args)
args.database = database
precursors = fasta.read_precursor(args.hairpin, args.sps)
args.precursors = precursors
matures = mapper.read_gtf_to_precursor(args.gtf)
args.matures = matures
# TODO check numbers of miRNA and precursors read
# TODO print message if numbers mismatch
if args.keep_name and len(args.files) > 1:
logger.warning("--keep-name when running multiple samples\n"
"can generate wrong results if the\n"
"name read is different across sample\n"
"for the same sequence.")
for fn in args.files:
fn = op.normpath(fn)
if args.format != "gff":
sample = op.splitext(op.basename(fn))[0]
samples.append(sample)
fn_out = op.join(args.out, sample + ".%s" % args.out_format)
h = header.create([sample], args.database, "")
out_handle = open(fn_out, 'w')
print(h, file=out_handle)
if args.format == "BAM":
if args.genomic:
low_memory_genomic_bam(fn, sample, out_handle, args)
else:
low_memory_bam(fn, sample, out_handle, args)
elif args.format == "seqbuster":
seqbuster.read_file_low_memory(fn, sample, args, out_handle)
else:
raise ValueError("%s not supported for low memory" % args.format)
out_handle.close() | 26,767 |
def rename_columns(df):
"""This function renames certain columns of the DataFrame
:param df: DataFrame
:type df: pandas DataFrame
:return: DataFrame
:rtype: pandas DataFrame
"""
renamed_cols = {"Man1": "Manufacturer (PE)",
"Pro1": "Model (PE)",
"Man2": "Manufacturer (BAT)",
"Pro2": "Model (BAT)",
"Top": "Type [-coupled]",
'P_PV2AC_in': 'P_PV2AC_in [W]',
'P_PV2AC_out': 'P_PV2AC_out [W]',
'U_PV_min': 'U_PV_min [V]',
'U_PV_nom': 'U_PV_nom [V]',
'U_PV_max': 'U_PV_max [V]',
'U_MPP_min': 'U_MPP_min [V]',
'U_MPP_max': 'U_MPP_max [V]',
'P_AC2BAT_in': 'P_AC2BAT_in [W]',
'P_BAT2AC_out': 'P_BAT2AC_out [W]',
'P_PV2BAT_in': 'P_PV2BAT_in [W]',
'P_BAT2PV_out': 'P_BAT2PV_out [W]',
'P_PV2BAT_out': 'P_PV2BAT_out [W]',
'P_BAT2AC_in': 'P_BAT2AC_in [W]',
'U_BAT_min': 'U_BAT_min [V]',
'U_BAT_nom': 'U_BAT_nom [V]',
'U_BAT_max': 'U_BAT_max [V]',
'E_BAT_100': 'E_BAT_100 [kWh]',
'E_BAT_50': 'E_BAT_50 [kWh]',
'E_BAT_25': 'E_BAT_25 [kWh]',
'E_BAT_usable': 'E_BAT_usable [kWh]',
'eta_BAT_100': 'eta_BAT_100',
'eta_BAT_50': 'eta_BAT_50',
'eta_BAT_25': 'eta_BAT_25',
'eta_BAT': 'eta_BAT',
'P_SYS_SOC1_AC': 'P_SYS_SOC1_AC [W]',
'P_SYS_SOC1_DC': 'P_SYS_SOC1_DC [W]',
'P_SYS_SOC0_AC': 'P_SYS_SOC0_AC [W]',
'P_SYS_SOC0_DC': 'P_SYS_SOC0_DC [W]',
'P_PVINV_AC': 'P_PVINV_AC [W]',
'P_PERI_AC': 'P_PERI_AC [W]',
'P_PV2BAT_DEV_IMPORT': 'P_PV2BAT_DEV_IMPORT [W]',
'P_PV2BAT_DEV_EXPORT': 'P_PV2BAT_DEV_EXPORT [W]',
'P_BAT2AC_DEV_IMPORT': 'P_BAT2AC_DEV_IMPORT [W]',
'P_BAT2AC_DEV_EXPORT': 'P_BAT2AC_DEV_EXPORT [W]',
't_DEAD': 't_DEAD [s]',
't_SETTLING': 't_SETTLING [s]'
}
return df.rename(columns=renamed_cols) | 26,768 |
def retseq(seq_fh):
"""
Parse a fasta file and returns non empty records
:seq_fh: File handle of the input sequence
:return: Non empty sequences
"""
for record in SeqIO.parse(seq_fh, 'fasta'):
if len(record.seq):
yield record | 26,769 |
def get_github_emoji(): # pragma: no cover
"""Get Github's usable emoji."""
try:
resp = requests.get(
'https://api.github.com/emojis',
timeout=30
)
except Exception:
return None
return json.loads(resp.text) | 26,770 |
def transport_stable(p, q, C, lambda1, lambda2, epsilon, scaling_iter, g):
"""
Compute the optimal transport with stabilized numerics.
Args:
p: uniform distribution on input cells
q: uniform distribution on output cells
C: cost matrix to transport cell i to cell j
lambda1: regularization parameter for marginal constraint for p.
lambda2: regularization parameter for marginal constraint for q.
epsilon: entropy parameter
scaling_iter: number of scaling iterations
g: growth value for input cells
"""
u = np.zeros(len(p))
v = np.zeros(len(q))
b = np.ones(len(q))
p = p * g
q = q * np.average(g)
K0 = np.exp(-C / epsilon)
K = np.copy(K0)
alpha1 = lambda1 / (lambda1 + epsilon)
alpha2 = lambda2 / (lambda2 + epsilon)
for i in range(scaling_iter):
# scaling iteration
a = (p / (K.dot(b))) ** alpha1 * np.exp(-u / (lambda1 + epsilon))
b = (q / (K.T.dot(a))) ** alpha2 * np.exp(-v / (lambda2 + epsilon))
# stabilization
if (max(max(abs(a)), max(abs(b))) > 1e100):
u = u + epsilon * np.log(a)
v = v + epsilon * np.log(b) # absorb
K = (K0.T * np.exp(u / epsilon)).T * np.exp(v / epsilon)
a = np.ones(len(p))
b = np.ones(len(q))
return (K.T * a).T * b | 26,771 |
def clean(files, nite, wave, refSrc, strSrc, badColumns=None, field=None,
skyscale=False, skyfile=None, angOff=0.0, fixDAR=True,
raw_dir=None, clean_dir=None,
instrument=instruments.default_inst, check_ref_loc=True):
"""
Clean near infrared NIRC2 or OSIRIS images.
This program should be run from the reduce/ directory.
Example directory structure is:
calib/
flats/
flat_kp.fits
flat.fits (optional)
masks/
supermask.fits
kp/
sci_nite1/
sky_nite1/
sky.fits
All output files will be put into clean_dir (if specified, otherwise
../clean/) in the following structure:
kp/
c*.fits
distort/
cd*.fits
weight/
wgt*.fits
The clean directory may be optionally modified to be named
<field_><wave> instead of just <wave>. So for instance, for Arches
field #1 data reduction, you might call clean with: field='arch_f1'.
Parameters
----------
files : list of int
Integer list of the files. Does not require padded zeros.
nite : str
Name for night of observation (e.g.: "nite1"), used as suffix
inside the reduce sub-directories.
wave : str
Name for the observation passband (e.g.: "kp"), used as
a wavelength suffix
field : str, default=None
Optional prefix for clean directory and final
combining. All clean files will be put into <field_><wave>. You
should also pass the same into combine(). If set to None (default)
then only wavelength is used.
skyscale : bool, default=False
Whether or not to scale the sky files to the common median.
Turn on for scaling skies before subtraction.
skyfile : str, default=''
An optional file containing image/sky matches.
angOff : float, default = 0
An optional absolute offset in the rotator
mirror angle for cases (wave='lp') when sky subtraction is done with
skies taken at matching rotator mirror angles.
cent_box: int (def = 12)
the box to use for better centroiding the reference star
badColumns : int array, default = None
An array specifying the bad columns (zero-based).
Assumes a repeating pattern every 8 columns.
raw_dir : str, optional
Directory where raw files are stored. By default,
assumes that raw files are stored in '../raw'
clean_dir : str, optional
Directory where clean files will be stored. By default,
assumes that clean files will be stored in '../clean'
instrument : instruments object, optional
Instrument of data. Default is `instruments.default_inst`
"""
# Make sure directory for current passband exists and switch into it
util.mkdir(wave)
os.chdir(wave)
# Determine directory locatons
waveDir = os.getcwd() + '/'
redDir = util.trimdir(os.path.abspath(waveDir + '../') + '/')
rootDir = util.trimdir(os.path.abspath(redDir + '../') + '/')
sciDir = waveDir + '/sci_' + nite + '/'
util.mkdir(sciDir)
ir.cd(sciDir)
# Set location of raw data
rawDir = rootDir + 'raw/'
# Check if user has specified a specific raw directory
if raw_dir is not None:
rawDir = util.trimdir(os.path.abspath(raw_dir) + '/')
# Setup the clean directory
cleanRoot = rootDir + 'clean/'
# Check if user has specified a specific clean directory
if clean_dir is not None:
cleanRoot = util.trimdir(os.path.abspath(clean_dir) + '/')
if field is not None:
clean = cleanRoot + field + '_' + wave + '/'
else:
clean = cleanRoot + wave + '/'
distort = clean + 'distort/'
weight = clean + 'weight/'
masks = clean + 'masks/'
util.mkdir(cleanRoot)
util.mkdir(clean)
util.mkdir(distort)
util.mkdir(weight)
util.mkdir(masks)
# Open a text file to document sources of data files
data_sources_file = open(clean + 'data_sources.txt', 'w')
try:
# Setup flat. Try wavelength specific, but if it doesn't
# exist, then use a global one.
flatDir = redDir + 'calib/flats/'
flat = flatDir + 'flat_' + wave + '.fits'
if not os.access(flat, os.F_OK):
flat = flatDir + 'flat.fits'
# Bad pixel mask
_supermask = redDir + 'calib/masks/' + supermaskName
# Determine the reference coordinates for the first image.
# This is the image for which refSrc is relevant.
firstFile = instrument.make_filenames([files[0]], rootDir=rawDir)[0]
hdr1 = fits.getheader(firstFile, ignore_missing_end=True)
radecRef = [float(hdr1['RA']), float(hdr1['DEC'])]
aotsxyRef = nirc2_util.getAotsxy(hdr1)
# Setup a Sky object that will figure out the sky subtraction
skyDir = waveDir + 'sky_' + nite + '/'
skyObj = Sky(sciDir, skyDir, wave, scale=skyscale,
skyfile=skyfile, angleOffset=angOff,
instrument=instrument)
# Prep drizzle stuff
# Get image size from header - this is just in case the image
# isn't 1024x1024 (e.g., NIRC2 sub-arrays). Also, if it's
# rectangular, choose the larger dimension and make it square
imgsizeX = float(hdr1['NAXIS1'])
imgsizeY = float(hdr1['NAXIS2'])
distXgeoim, distYgeoim = instrument.get_distortion_maps(hdr1)
if (imgsizeX >= imgsizeY):
imgsize = imgsizeX
else:
imgsize = imgsizeY
setup_drizzle(imgsize)
##########
# Loop through the list of images
##########
for f in files:
# Define filenames
_raw = instrument.make_filenames([f], rootDir=rawDir)[0]
_cp = instrument.make_filenames([f])[0]
_ss = instrument.make_filenames([f], prefix='ss')[0]
_ff = instrument.make_filenames([f], prefix='ff')[0]
_ff_f = _ff.replace('.fits', '_f.fits')
_ff_s = _ff.replace('.fits', '_s.fits')
_bp = instrument.make_filenames([f], prefix='bp')[0]
_cd = instrument.make_filenames([f], prefix='cd')[0]
_ce = instrument.make_filenames([f], prefix='ce')[0]
_cc = instrument.make_filenames([f], prefix='c')[0]
_wgt = instrument.make_filenames([f], prefix='wgt')[0]
_statmask = instrument.make_filenames([f], prefix='stat_mask')[0]
_crmask = instrument.make_filenames([f], prefix='crmask')[0]
_mask = instrument.make_filenames([f], prefix='mask')[0]
_pers = instrument.make_filenames([f], prefix='pers')[0]
_max = _cc.replace('.fits', '.max')
_coo = _cc.replace('.fits', '.coo')
_rcoo = _cc.replace('.fits', '.rcoo')
_dlog_tmp = instrument.make_filenames([f], prefix='driz')[0]
_dlog = _dlog_tmp.replace('.fits', '.log')
out_line = '{0} from {1} ({2})\n'.format(_cc, _raw,
datetime.now())
data_sources_file.write(out_line)
# Clean up if these files previously existed
util.rmall([_cp, _ss, _ff, _ff_f, _ff_s, _bp, _cd, _ce, _cc,
_wgt, _statmask, _crmask, _mask, _pers, _max, _coo, _rcoo, _dlog])
### Copy the raw file to local directory ###
ir.imcopy(_raw, _cp, verbose='no')
### Make persistance mask ###
# - Checked images, this doesn't appear to be a large effect.
#clean_persistance(_cp, _pers, instrument=instrument)
### Sky subtract ###
# Get the proper sky for this science frame.
# It might be scaled or there might be a specific one for L'.
sky = skyObj.getSky(_cp)
ir.imarith(_cp, '-', sky, _ss)
### Flat field ###
ir.imarith(_ss, '/', flat, _ff)
### Make a static bad pixel mask ###
# _statmask = supermask + bad columns
clean_get_supermask(_statmask, _supermask, badColumns)
### Fix bad pixels ###
# Produces _ff_f file
bfixpix.bfixpix(_ff, _statmask)
util.rmall([_ff_s])
### Fix cosmic rays and make cosmic ray mask. ###
clean_cosmicrays(_ff_f, _crmask, wave)
### Combine static and cosmic ray mask ###
# This will be used in combine later on.
# Results are stored in _mask, _mask_static is deleted.
clean_makemask(_mask, _crmask, _statmask, wave, instrument=instrument)
### Background Subtraction ###
bkg = clean_bkgsubtract(_ff_f, _bp)
### Drizzle individual file ###
clean_drizzle(distXgeoim, distYgeoim, _bp, _ce, _wgt, _dlog, fixDAR=fixDAR, instrument=instrument)
### Make .max file ###
# Determine the non-linearity level. Raw data level of
# non-linearity is 12,000 but we subtracted
# off a sky which changed this level. The sky is
# scaled, so the level will be slightly different
# for every frame.
nonlinSky = skyObj.getNonlinearCorrection(sky)
coadds = fits.getval(_ss, instrument.hdr_keys['coadds'])
satLevel = (coadds*instrument.get_saturation_level()) - nonlinSky - bkg
file(_max, 'w').write(str(satLevel))
### Rename and clean up files ###
ir.imrename(_bp, _cd)
# util.rmall([_cp, _ss, _ff, _ff_f])
### Make the *.coo file and update headers ###
# First check if PA is not zero
hdr = fits.getheader(_raw, ignore_missing_end=True)
phi = instrument.get_position_angle(hdr)
clean_makecoo(_ce, _cc, refSrc, strSrc, aotsxyRef, radecRef,
instrument=instrument, check_loc=check_ref_loc,
cent_box=cent_box)
### Move to the clean directory ###
util.rmall([clean + _cc, clean + _coo, clean + _rcoo,
distort + _cd, weight + _wgt,
clean + _ce, clean + _max,
masks + _mask, _ce])
os.rename(_cc, clean + _cc)
os.rename(_cd, distort + _cd)
os.rename(_wgt, weight + _wgt)
os.rename(_mask, masks + _mask)
os.rename(_max, clean + _max)
os.rename(_coo, clean + _coo)
os.rename(_rcoo, clean + _rcoo)
# This just closes out any sky logging files.
#skyObj.close()
data_sources_file.close()
finally:
# Move back up to the original directory
#skyObj.close()
ir.cd('../')
# Change back to original directory
os.chdir('../') | 26,772 |
def add(A: Coord, B: Coord, s: float = 1.0, t: float = 1.0) -> Coord:
"""Return the point sA + tB."""
return (s * A[0] + t * B[0], s * A[1] + t * B[1]) | 26,773 |
def from_binary(bin_data: str, delimiter: str = " ") -> bytes:
"""Converts binary string into bytes object"""
if delimiter == "":
data = [bin_data[i:i+8] for i in range(0, len(bin_data), 8)]
else:
data = bin_data.split(delimiter)
data = [int(byte, 2) for byte in data]
return bytes(data) | 26,774 |
def test_list_date_time_length_2_nistxml_sv_iv_list_date_time_length_3_2(mode, save_output, output_format):
"""
Type list/dateTime is restricted by facet length with value 7.
"""
assert_bindings(
schema="nistData/list/dateTime/Schema+Instance/NISTSchema-SV-IV-list-dateTime-length-3.xsd",
instance="nistData/list/dateTime/Schema+Instance/NISTXML-SV-IV-list-dateTime-length-3-2.xml",
class_name="NistschemaSvIvListDateTimeLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 26,775 |
def send_raw(task, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:returns: a tuple with stdout and stderr.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
node_uuid = task.node.uuid
LOG.debug('Sending node %(node)s raw bytes %(bytes)s',
{'bytes': raw_bytes, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'raw %s' % raw_bytes
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('send raw bytes returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception('IPMI "raw bytes" failed for node %(node_id)s '
'with error: %(error)s.',
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
return out, err | 26,776 |
def one_on_f_weight(f, normalize=True):
""" Literally 1/f weight. Useful for fitting linspace data in logspace.
Parameters
----------
f: array
Frequency
normalize: boolean, optional
Normalized the weight to [0, 1].
Defaults to True.
Returns
-------
weight: array
The 1/f weight.
"""
weight = 1/f
if normalize:
weight /= max(weight)
return(weight) | 26,777 |
def PCSPRE1M2SOC(p0, meas_pcs, meas_pre, x_pcs ,y_pcs, z_pcs, \
x_pre ,y_pre, z_pre, wt_pcs=1.0, wt_pre=1.0, \
tol_pcs=None, tol_pre=None):
"""
Optimize two X-tensors and two PRE centres to two common sites
@param p0: List containing initial guesses for (17 unknowns):
metal site 1 <x,y,z> , Xaxial and Xrhomic at site 1, Euler angles
<A,B,G> at site 1 AND metal site 2 <x,y,z> , Xaxial and Xrhomic at
site 2, Euler angles <A,B,G> at site 2 and the PRE constant c
@param meas_pcs: The numpy array of measused PCS
@param meas_pre: The numpy array of measured PRE
@param x: The numpy array of x coordinates of associated exp vals
@param y: The numpy array of y coordinates of associated exp vals
@param z: The numpy array of z coordinates of associated exp vals
@param wt_pcs: [OPTIONAL] The weight of the PCS terms in optimization
@param wt_pre: [OPTIONAL] The weight of the PRE terms in optimization
"""
xm1,ym1,zm1, ax1,rh1, a1,b1,g1,xm2,ym2,zm2, ax2,rh2, a2,b2,g2, c = p0
r_v1 = sqrt((x_pre-xm1)**2 +(y_pre-ym1)**2 + (z_pre-zm1)**2)
r_v2 = sqrt((x_pre-xm2)**2 +(y_pre-ym2)**2 + (z_pre-zm2)**2)
zyz_rot1 = ZYZRot(a1, b1, g1)
zyz_rot2 = ZYZRot(a2, b2, g2)
X1, X2 = (x_pcs - xm1), (x_pcs - xm2)
Y1, Y2 = (y_pcs - ym1), (y_pcs - ym2)
Z1, Z2 = (z_pcs - zm1), (z_pcs - zm2)
x_t1 = zyz_rot1[0][0]*X1 + zyz_rot1[0][1]*Y1 + zyz_rot1[0][2]*Z1
y_t1 = zyz_rot1[1][0]*X1 + zyz_rot1[1][1]*Y1 + zyz_rot1[1][2]*Z1
z_t1 = zyz_rot1[2][0]*X1 + zyz_rot1[2][1]*Y1 + zyz_rot1[2][2]*Z1
x_t2 = zyz_rot2[0][0]*X2 + zyz_rot2[0][1]*Y2 + zyz_rot2[0][2]*Z2
y_t2 = zyz_rot2[1][0]*X2 + zyz_rot2[1][1]*Y2 + zyz_rot2[1][2]*Z2
z_t2 = zyz_rot2[2][0]*X2 + zyz_rot2[2][1]*Y2 + zyz_rot2[2][2]*Z2
r2_1 = (x_t1*x_t1)+(y_t1*y_t1)+(z_t1*z_t1)
r2_2 = (x_t2*x_t2)+(y_t2*y_t2)+(z_t2*z_t2)
r5_1 = (r2_1*r2_1) * sqrt(r2_1)
r5_2 = (r2_2*r2_2) * sqrt(r2_2)
tmp_1 = 1.0/r5_1
tmp_2 = 1.0/r5_2
PCS_1 = (tmp_1*(ax1*(3.0*z_t1*z_t1-r2_1)+rh1*1.5*(x_t1*x_t1-y_t1*y_t1)))
PCS_2 = (tmp_2*(ax2*(3.0*z_t2*z_t2-r2_2)+rh2*1.5*(x_t2*x_t2-y_t2*y_t2)))
err_pcs = meas_pcs - (PCS_1 + PCS_2)
err_pre = meas_pre - (c/r_v1**6 + c/r_v2**6)
if tol_pcs != None and tol_pre != None:
for pcs_item in range(0, len(err_pcs)):
if abs(err_pcs[pcs_item]) - tol_pcs[pcs_item] <= 0.0:
err_pcs[pcs_item] = 0.0
for pre_item in range(0, len(err_pre)):
if abs(err_pre[pre_item]) - tol_pre[pre_item] <= 0.0:
err_pre[pre_item] = 0.0
#TODO: Check if this should be squared (below)
err_pcs = err_pcs*wt_pcs
err_pre = err_pre*wt_pre
err = append(err_pcs, err_pre)
return err | 26,778 |
def copy_asset_file(source, destination, context=None, renderer=None):
# type: (unicode, unicode, Dict, BaseRenderer) -> None
"""Copy an asset file to destination.
On copying, it expands the template variables if context argument is given and
the asset is a template file.
:param source: The path to source file
:param destination: The path to destination file or directory
:param context: The template variables. If not given, template files are simply copied
:param renderer: The template engine. If not given, SphinxRenderer is used by default
"""
if not os.path.exists(source):
return
if os.path.isdir(destination):
# Use source filename if destination points a directory
destination = os.path.join(destination, os.path.basename(source))
if source.lower().endswith('_t') and context is not None:
if renderer is None:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
with open(source, encoding='utf-8') as fsrc: # type: ignore
if destination.lower().endswith('_t'):
destination = destination[:-2]
with open(destination, 'w', encoding='utf-8') as fdst: # type: ignore
fdst.write(renderer.render_string(fsrc.read(), context))
else:
copyfile(source, destination) | 26,779 |
def load_randomdata(dataset_str, iter):
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
NL = 2312
NC = 120
elif dataset_str == 'cora':
NL = 1708
NC = 140
else:
NL = 18717
NC = 60
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
#fixed 500 for validation read from file, choose random 140 from the others for train
'''
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
'''
idx_val=[int(item) for item in open("source/"+dataset_str+"/val_idx"+str(iter)+".txt").readlines()]
idx_test = test_idx_range.tolist()
idx_train = random.sample(list(set(range(0,NL))-set(idx_val)),NC);
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask | 26,780 |
def plot(figsize=None, formats=None, limit=100, titlelen=10, **kwargs):
"""Display an image [in a Jupyter Notebook] from a Quilt fragment path.
Intended for use with `%matplotlib inline`.
Convenience method that loops over supblots that call
`plt.imshow(image.imread(FRAG_PATH))`.
Keyword arguments
* figsize=None # None means auto, else provide (HEIGHT_INCHES, WIDTH_INCHES)
* formats=None # List of extensions as strings ['jpg', 'png', ...]
* limit=100 # maximum number of images to display
* titlelen=10 # max number of characters in subplot title
* **kwargs - all remaining kwargs are passed to plt.subplots;
see https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html
"""
# pylint: disable=protected-access
def _plot(node, paths):
lower_formats = set((x.lower() for x in formats)) if formats is not None else None
def node_filter(frag, meta):
filepath = meta.get('_system', {}).get('filepath', None)
# don't try to read DataFrames as images
if isinstance(frag, string_types) and filepath:
_, ext = splitext_no_dot(filepath)
if lower_formats is None or ext.lower() in lower_formats:
return True
return False
# assume DataNode has one path; doesn't work with multi-fragment images
display = [('', paths[0], node._meta)]
# for GroupNodes, display all DataNode children
if isinstance(node, GroupNode):
datanodes = [(x, y) for (x, y) in node._items() if isinstance(y, DataNode)]
display = [(x, y._data(), y._meta) for (x, y) in datanodes]
# sort by name so iteration is reproducible (and unit tests pass)
display = sorted(display, key=lambda rec: rec[0])
display = [x for x in display if node_filter(x[1], x[2])]
if len(display) > limit:
print('Displaying {} of {} images{}'.format(limit, len(display), ELLIPSIS))
display = display[:limit]
# display can be empty e.g. if no DataNode children
if not display:
print('No images to display.')
return
# cast to int to avoid downstream complaints of
# 'float' object cannot be interpreted as an index
floatlen = float(len(display)) # prevent integer division in 2.7
cols = min(int(floor(sqrt(floatlen))), 8)
rows = int(ceil(floatlen/cols))
plt.tight_layout()
plt.subplots(
rows,
cols,
figsize=(cols*2, rows*2) if not figsize else figsize,
**kwargs)
for i in range(rows*cols):
axes = plt.subplot(rows, cols, i + 1) # subplots start at 1, not 0
axes.axis('off')
if i < len(display):
(name, frag, meta) = display[i]
plt.title(name[:titlelen] + ELLIPSIS if len(name) > titlelen else name)
filepath = meta.get('_system', {}).get('filepath', None)
_, ext = splitext_no_dot(filepath)
try:
bits = mpimg.imread(frag, format=ext)
plt.imshow(bits)
# Mac throws OSError, Linux IOError if file not recognizable
except (IOError, OSError) as err:
print('{}: {}'.format(name, str(err)))
continue
return _plot | 26,781 |
async def ws_adapter(in_q: curio.Queue,
out_q: curio.Queue,
client: curio.io.Socket, _):
"""A queue-based WebSocket bridge. Sits between a
a `curio.tcp_server` and a user-defined handler; the handler
should accept an ingoing and outgoing queue which will be loaded
by the adapter.
Args:
in_q (curio.Queue): Queue for incoming messages.
out_q (curio.Queue): Queue for outgoing messages.
client (curio.Socket): The client socket.
_: dummy
Returns:
"""
client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
wsconn = WSConnection(ConnectionType.SERVER)
closed = False
# need to accept the request first
try:
auth = await client.recv(65535)
except ConnectionResetError:
# connection dropped
print('Connection to {0} reset.'.format(client), file=sys.stderr)
return
wsconn.receive_data(auth)
closed, rsp = await process_incoming(wsconn, in_q, client)
await client.sendall(rsp)
while not closed:
try:
wstask = await spawn(client.recv, 65535)
outqtask = await spawn(out_q.get)
async with TaskGroup([wstask, outqtask]) as g:
task = await g.next_done()
result = await task.join()
await g.cancel_remaining()
except ConnectionResetError:
print('Connection to {0} reset.'.format(client), file=sys.stderr)
return
if task is wstask:
wsconn.receive_data(result)
closed, rsp = await process_incoming(wsconn, in_q, client)
await client.sendall(rsp)
else:
# We got something from the out queue.
if result is None:
# Terminate the connection.
print('Closing WebSocket connection from server:', client.getpeername(),
file=sys.stderr)
wsconn.close()
closed = True
else:
payload = wsconn.send(Message(data=result))
await client.sendall(payload)
await out_q.task_done() | 26,782 |
def tresize(tombfile, keyfile, passphrase, newsize):
"""
Resize a tomb.
Keyfile, passphrase and new size are needed.
"""
cmd = ['tomb',
'resize',
tombfile,
'-k',
keyfile,
'--unsafe',
'--tomb-pwd',
sanitize_passphrase(passphrase),
'-s',
str(newsize),
'--no-color']
return execute(cmd) | 26,783 |
def auto_z_levels(fid, x, y, variable, t_idx, n_cont, n_dec):
"""
list(float) = auto_z_levels(fid, variable, t_idx, n_cont, n_dec)
... # contour lines
... # post .
"""
fig, ax = plt.subplots()
z_levs = np.ndarray(0)
for i in t_idx:
data = fid.variables[variable][i]
cp = ax.contour(x, y, data, n_cont)
z_levs = np.concatenate((z_levs, cp.levels), axis=0)
z_levs = np.sort(np.unique(np.around(z_levs, n_dec)))
plt.close(fig)
return z_levs | 26,784 |
def test_Get_Histogram_key():
"""
Standard use test
"""
PauliWord = 'I0 Z1 Z2 I3 I4 X5'
Histogram_key = Get_Histogram_key(PauliWord)
expected = '1,2,5'
assert expected and Histogram_key | 26,785 |
def test_get_source_files():
"""
Should probably be removed or altered, but this does help test that pytest is
importing correctly.
"""
list_of_files = utils.get_source_files()
assert os.path.join("drivers", "experiment1.py") in list_of_files
assert os.path.join("src", "utils.py") in list_of_files
assert os.path.join("drivers", "initialize.py") in list_of_files
assert os.path.join("notebooks", "EDA.ipynb") in list_of_files
files_minus_notebooks = utils.get_source_files(False)
assert os.path.join("notebooks", "EDA.ipynb") not in files_minus_notebooks
assert os.path.join("drivers", "experiment1.py") in files_minus_notebooks
assert os.path.join("src", "utils.py") in files_minus_notebooks
assert os.path.join("drivers", "initialize.py") in files_minus_notebooks | 26,786 |
def spider_next(url, lev):
"""
# spider_next
@Description:
core function of spider
with recursive structure to traverse all the nodes in it
---------
@Param:
url: str
lev: recursive level
-------
@Returns:
recursion, void type
-------
"""
# choose spider_class in order to select specific table elements in specific page
if lev == 2:
spider_class = "city"
elif lev == 3:
spider_class = "county"
elif lev == 4:
spider_class = "town"
else:
spider_class = "village"
# indent is used to format the output
global indent
indent += "\t"
has_cur_lev = 0 # becaution to that dongguan has only four levels, we use this to gap the missing lev
for item in get_html(url).select("tr." + spider_class + "tr"): # select the assigned table row data
item_td = item.select("td")
item_td_code = item_td[0].select_one("a")
item_td_name = item_td[1].select_one("a")
if item_td_code is None: # some td has no link with it
item_href = None
item_code = item_td[0].get_text() # it can get the text even it has an enter symbol following it
item_name = item_td[1].get_text()
if lev == 5:
item_name = item_td[2].get_text() + item_td[1].get_text()
# the most childist ones has different output format with a identification code
else:
item_href = item_td_code.get("href")
item_code = item_td_code.get_text()
item_name = item_td_name.get_text()
content2 = indent
content2 += item_code + item_name
has_cur_lev = 1
print(content2, file=wFile)
tcs = datetime.now() #time count
if lev == 2 or lev == 3:
print("["+format_time(tcs)+"] " + '*' *
(lev-1) + item_name + "开始爬取...")
if item_href is not None: # recursion
spider_next(get_prefix(url) + item_href, lev + 1)
tce = datetime.now()
if lev == 2 or lev == 3:
print("["+format_time(tce)+"] " + '*' * (lev-1) +
item_name + "爬取完成,用时" + format_time((tce-tcs), False))
if lev == 2:
print("--------------------------------------------------------")
indent = indent[:-1]
if has_cur_lev is not True and lev != 5: # deal with those ones without full 5 levels, directly deep in
spider_next(url, lev + 1) | 26,787 |
def canonicalize(curie: str):
"""Return the best CURIE."""
# TODO maybe normalize the curie first?
norm_prefix, norm_identifier = normalize_curie(curie)
if norm_prefix is None or norm_identifier is None:
return jsonify(
query=curie,
normalizable=False,
)
norm_curie = f'{norm_prefix}:{norm_identifier}'
rv = dict(query=curie)
if norm_curie != curie:
rv['norm_curie'] = norm_curie
if norm_curie not in canonicalizer.graph:
rv['found'] = False
else:
result_curie = canonicalizer.canonicalize(norm_curie)
rv.update(
found=True,
result=result_curie,
mappings=url_for(
f'.{all_mappings.__name__}',
source_curie=norm_curie,
target_curie=result_curie,
),
)
return jsonify(rv) | 26,788 |
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if _IsMapField(field):
return _GetInitializeDefaultForMap(field)
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeSubMessageDefault(message):
assert getattr(message_type, '_concrete_class', None), (
'Uninitialized concrete class found for field %r (message type %r)'
% (field.full_name, message_type.full_name))
result = message_type._concrete_class()
result._SetListener(
_OneofListener(message, field)
if field.containing_oneof is not None
else message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
return field.default_value
return MakeScalarDefault | 26,789 |
async def test_http_errors(hass, mock_setup):
"""Test HTTP Errors."""
with patch("flipr_api.FliprAPIRestClient.search_flipr_ids", side_effect=Timeout()):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data={
CONF_EMAIL: "nada",
CONF_PASSWORD: "nada",
CONF_FLIPR_ID: "",
},
)
assert result["type"] == "form"
assert result["errors"] == {"base": "cannot_connect"}
with patch(
"flipr_api.FliprAPIRestClient.search_flipr_ids",
side_effect=Exception("Bad request Boy :) --"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data={
CONF_EMAIL: "nada",
CONF_PASSWORD: "nada",
CONF_FLIPR_ID: "",
},
)
assert result["type"] == "form"
assert result["errors"] == {"base": "unknown"} | 26,790 |
def create_work_database(target_work_database_path, country_vector_path):
"""Create a runtime status database if it doesn't exist.
Parameters:
target_work_database_path (str): path to database to create.
Returns:
None.
"""
LOGGER.debug('launching create_work_database')
# processed quads table
# annotations
create_database_sql = (
"""
CREATE TABLE work_status (
grid_id INTEGER NOT NULL PRIMARY KEY,
lng_min REAL NOT NULL,
lat_min REAL NOT NULL,
lng_max REAL NOT NULL,
lat_max REAL NOT NULL,
country_list TEXT NOT NULL,
processed INT NOT NULL);
CREATE INDEX lng_min_work_status_index ON work_status (lng_min);
CREATE INDEX lat_min_work_status_index ON work_status (lat_min);
CREATE INDEX lng_max_work_status_index ON work_status (lng_max);
CREATE INDEX lat_max_work_status_index ON work_status (lat_max);
CREATE TABLE detected_dams (
dam_id INTEGER NOT NULL PRIMARY KEY,
lng_min REAL NOT NULL,
lat_min REAL NOT NULL,
lng_max REAL NOT NULL,
lat_max REAL NOT NULL,
probability REAL NOT NULL,
country_list TEXT NOT NULL,
image_uri TEXT NOT NULL);
CREATE INDEX lng_min_detected_dams_index ON detected_dams (lng_min);
CREATE INDEX lat_min_detected_dams_index ON detected_dams (lat_min);
CREATE INDEX lng_max_detected_dams_index ON detected_dams (lng_max);
CREATE INDEX lat_max_detected_dams_index ON detected_dams (lat_max);
CREATE INDEX image_uri_detected_dams_index
ON detected_dams (image_uri);
""")
if os.path.exists(target_work_database_path):
os.remove(target_work_database_path)
connection = sqlite3.connect(target_work_database_path)
connection.executescript(create_database_sql)
connection.commit()
connection.close()
grid_insert_args = []
grid_id = 0
for lat_max in range(-60, 60):
LOGGER.debug(lat_max)
for lng_min in range(-180, 180):
grid_box = shapely.geometry.box(
lng_min, lat_max-1, lng_min+1, lat_max)
intersecting_country_list = \
get_country_intersection_list(
grid_box, country_vector_path)
if intersecting_country_list:
grid_insert_args.append((
grid_id, lng_min, lat_max-1, lng_min+1, lat_max,
','.join(intersecting_country_list), 0))
grid_id += 1
_execute_sqlite(
"""
INSERT INTO
work_status
(grid_id, lng_min, lat_min, lng_max, lat_max, country_list,
processed)
VALUES (?, ?, ?, ?, ?, ?, ?)
""", target_work_database_path,
argument_list=grid_insert_args, mode='modify', execute='many') | 26,791 |
def clean_text(dirty_text):
"""
Given a string, this function tokenizes the words of that string.
:param dirty_text: string
:return: list
input = "American artist accomplishments american"
output = ['accomplishments', 'american', 'artist']
"""
lower_dirty_text = dirty_text.lower()
regex_pattern = r"[\w']+"
tokenizer = RegexpTokenizer(regex_pattern)
tokens = tokenizer.tokenize(lower_dirty_text)
unique_tokens = list(set(tokens))
return unique_tokens | 26,792 |
def write_json(data, file_out):
"""
Write JSON to a file.
:param data: In-memory JSON.
:param file_out: The file to output the JSON to.
"""
with open(file_out, "w") as jf:
jf.seek(0)
jf.write(json.dumps(data, sort_keys=False, indent=4))
jf.truncate() | 26,793 |
def test_create_file_data_json():
"""Unit test
Given
- Raw response of an attachment in email reply.
When
- There is an attachment in the email reply.
Then
- Validate that the file data is in the right json format.
"""
from SendEmailReply import create_file_data_json
attachment_response = util_load_json('test_data/attachment_example.json')
expected_result = util_open_file('test_data/file_data.txt')
result = create_file_data_json(attachment_response)
assert result == expected_result | 26,794 |
def GetCurrentUserController(AuthJSONController):
""" Return the CurrentUserController in the proper scope """
class CurrentUserController(AuthJSONController):
""" Controller to return the currently signed in user """
def __init__(self, toJson):
""" Initialize with the Json Converter """
self.toJson = toJson
AuthJSONController.__init__(self)
def performWithJSON(self, json=None, user=None):
""" Convert the existing Word Lists to JSON """
if user:
return {'user':self.toJson(user)}
return Errors.NO_USER.toJSON()
return CurrentUserController | 26,795 |
def configure_blueprints(app, blueprints):
"""Registers blueprints for given 'app' and
associates with @before_request and @errorhandler functions.
"""
for blueprint in blueprints:
blueprint.before_request(before_request)
app.register_blueprint(blueprint) | 26,796 |
def fft_in_range(audiomatrix, startindex, endindex, channel):
"""
Do an FFT in the specified range of indices
The audiomatrix should have the first index as its time domain and
second index as the channel number. The startindex and endinex
select the time range to use, and the channel parameter selects
which channel to do the FFT on.
Returns a vector of data in the frequency domain
"""
n = endindex - startindex
indat = audiomatrix[startindex:endindex, channel]
outdat = (numpy.fft.fft(indat)[range(n//2)])/n
return outdat | 26,797 |
def drop_arrays_by_name(gt_names, used_classes):
"""Drop irrelevant ground truths by name.
Args:
gt_names (list[str]): Names of ground truths.
used_classes (list[str]): Classes of interest.
Returns:
np.ndarray: Indices of ground truths that will be dropped.
"""
inds = [i for i, x in enumerate(gt_names) if x not in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds | 26,798 |
def test_SimplePulsar_atnf():
"""Test functions against ATNF pulsar catalog values"""
atnf = load_atnf_sample()
P = Quantity(atnf['P0'], 's')
P_dot = Quantity(atnf['P1'], '')
simple_pulsar = SimplePulsar(P=P, P_dot=P_dot)
assert_allclose(simple_pulsar.tau.to('yr'), atnf['AGE'], rtol=0.01)
assert_allclose(simple_pulsar.luminosity_spindown.to('erg s^-1'), atnf['EDOT'], rtol=0.01)
assert_allclose(simple_pulsar.magnetic_field.to('gauss'), atnf['BSURF'], rtol=0.01) | 26,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.