content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def set(isamAppliance, dsc, check_mode=False, force=False):
"""
Updating the tracing levels
"""
check_value,warnings = _check(isamAppliance, dsc)
if force is True or check_value is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put(
"Updating the tracing levels",
"/isam/cluster/tracing/v1",
{
'dsc': dsc
}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings) | 32,800 |
def calibrateCameraCharuco(charucoCorners, charucoIds, board, imageSize, cameraMatrix, distCoeffs, rvecs=None, tvecs=None, flags=None, criteria=None):
""" calibrateCameraCharuco(charucoCorners, charucoIds, board, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs """
pass | 32,801 |
def submit_annotations(ann_srv_url, annotations, send_zip=False):
"""
Call the Annotation Storage Service to save annotations.
:param ann_srv_url: URL of the annotation service where the annotations
will be stored.
:param annotations: Annotations to append to the annotations Document.
:param send_zip: indicates if the annotations should be sent in a zip file
:type annotations: list
"""
logger = logging.getLogger(__name__)
logger.info("Submitting annotations to target %s", ann_srv_url)
if not isinstance(annotations, list):
raise InvalidAnnotationFormat("Annotations should be an object of type"
" list")
cur_try = 1
max_tries = 5
result = None
payload = json.dumps({'common': {},
'data': annotations})
logger.debug("Upload URL is %s", ann_srv_url)
logger.debug("Submitted data is %s", payload)
files = None
temp_text_file = None
temp_zip_file = None
if send_zip:
headers = {'accept': 'application/json'}
# creating annotations.txt file to be zipped
temp_dir = tempfile.gettempdir()
text_file_name = "annotations.txt"
zip_file_name = "annotations.zip"
temp_text_file = os.path.join(temp_dir, text_file_name)
temp_zip_file = os.path.join(temp_dir, zip_file_name)
with open(temp_text_file, 'w') as file_to_zip:
file_to_zip.write(str(payload))
# creating zipped file
with zipfile.ZipFile(temp_zip_file, "w",
compression=zipfile.ZIP_DEFLATED) as zippy:
zippy.write(temp_text_file, text_file_name)
zippy.close()
# Opened for transport (HTTP POST via form-data, as bytes)
opened_zipped_file = open(temp_zip_file, "rb")
files = {"file": opened_zipped_file}
else:
headers = {'content-type': 'application/json',
'accept': 'application/json'}
while cur_try <= max_tries and not result:
logger.debug("Trying HTTP POST request %s/%s", cur_try, max_tries)
try:
if files is None:
result = requests.post(ann_srv_url,
data=payload,
timeout=TIMEOUT,
headers=headers)
else:
result = requests.post(ann_srv_url,
files=files,
timeout=TIMEOUT,
headers=headers)
if result.status_code not in [200, 201, 204]:
logger.error("Got following code : %s", result.status_code)
result.raise_for_status()
except requests.exceptions.Timeout as error:
# Handle timeout error separately
if cur_try < max_tries:
cur_try += 1
logger.debug("Current try : %s", cur_try)
logger.warning("Timeout occurred while uploading document to "
"%s. Retry (%s/%s)",
ann_srv_url, cur_try, max_tries)
else:
logger.error("Could not upload document to %s", ann_srv_url)
raise UploadError(error)
except requests.exceptions.RequestException as error:
logger.error("Could not upload document to %s", ann_srv_url)
raise UploadError(error)
finally:
# Delete artifacts
if temp_text_file and os.path.exists(temp_text_file) and\
os.path.isfile(temp_text_file):
os.remove(temp_text_file)
if temp_zip_file and os.path.exists(temp_zip_file) and\
os.path.isfile(temp_zip_file):
os.remove(temp_zip_file)
return result | 32,802 |
def parse_logfile(logfile):
"""
Read iotime log entries from logfile
Return Table with columns function duration readwrite filename timestamp datetime
"""
from astropy.table import Table
from astropy.time import Time
rows = list()
with open(logfile) as fx:
for line in fx:
row = parse(line)
if row is not None:
rows.append(row)
timing = Table(rows=rows)
timing['datetime'] = Time(timing['timestamp']).datetime
return timing | 32,803 |
def histogram2d(x,y,n=10,range=None,density=False,keep_outliers=False,out=None):
"""2D histogram with uniform bins. Accelerated by numba
x, y: array_like
x and y coordinates of each point. x and y will be flattened
n : scalar or (nx, ny)
number of bins in x and y
range : None or ((xmin,xmax),(ymin,ymax))
range of bins. If any is None, the min/max is computed
density : optional, bool
if True, compute bin_count / (sample_count * bin_area)
keep_outliers : optional, bool
if True, add rows and columns to each edge of the histogram to count the outliers
out : array_like, optional, shape = (nx, ny)
Array to store output. Note that for compatibility with numpy's histogram2d, out
is indexed out[x,y]. If keep_outliers is True, out must have shape (nx+2,ny+2)
"""
x = np.asarray(x)
y = np.asarray(y)
if x.shape != y.shape:
raise RuntimeError("x and y must be same shape")
x = x.reshape(-1)
y = y.reshape(-1)
if range is None:
xmin,xmax = None,None
ymin,ymax = None,None
else:
xmin,xmax = range[0]
ymin,ymax = range[1]
if xmin is None or xmax is None:
xmm = aminmax(x)
if xmin is None: xmin = xmm[0]
if xmax is None: xmax = xmm[1]
if ymin is None or ymax is None:
ymm = aminmax(y)
if ymin is None: ymin = ymm[0]
if ymax is None: ymax = ymm[1]
if np.isscalar(n):
nx,ny = n,n
else:
nx,ny = n
if keep_outliers:
out_shape = (nx+2,ny+2)
else:
out_shape = (nx,ny)
if density:
# 1/ (sample_count * bin_area)
d = (nx*ny)/(len(x)*(xmax-xmin)*(ymax-ymin))
if out is None:
out = np.empty(out_shape,np.float64)
else:
d = 1
if out is None:
out = np.empty(out_shape,np.uint64)
_histogram2d(out, x,y,nx,ny,xmin,xmax,ymin,ymax,d,keep_outliers)
return out | 32,804 |
def createSplash(app):
"""Creates a splash screen object to show while the Window is loading.
Return:
SplashScreen object.
"""
uiDir = os.path.dirname(inspect.getfile(KrakenUI))
splashPixmap = QtGui.QPixmap()
splashImgPath = os.path.join(uiDir, 'images', 'KrakenUI_Splash.png')
splashPixmap.load(splashImgPath)
splash = QtGui.QSplashScreen(splashPixmap, QtCore.Qt.WindowStaysOnTopHint)
splash.setMask(splashPixmap.mask())
splash.showMessage("Loading Extensions...",
QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft,
QtCore.Qt.white)
splash.show()
app.processEvents()
return splash | 32,805 |
def _anatomical_swaps(pd):
"""Return swap and flip arrays for data transform to anatomical
use_hardcoded: no-brain implementation for 90deg rots
"""
use_hardcoded = True
# hardcoded for 90degs
if use_hardcoded:
if _check90deg(pd) != True:
raise(Exception('Not implemented'))
ori = pd['orient']
if ori == 'trans':
#swap = [0,1,2]
swap = [0,2,1]
flip = [1,2]
elif ori == 'trans90':
#swap = [1,0,2]
swap = [2,0,1]
flip = [0]
elif ori == 'sag':
#swap = [1,2,0]
swap = [2,1,0]
flip = [1,2]
elif ori == 'sag90':
#swap = [2,1,0]
swap = [1,2,0]
flip = [0]
elif ori == 'cor':
#swap = [0,2,1]
swap = [0,1,2]
flip = [1]
elif ori == 'cor90':
swap = [1,0,2]
flip = []
return swap, flip
# with rot matrix
else:
rot_matrix = vj.core.niftitools._qform_rot_matrix(pd)
inv = np.linalg.inv(rot_matrix).astype(int)
swap = inv.dot(np.array([1,2,3], dtype=int))
flipaxes = []
for num, i in enumerate(swap):
if i < 0:
flipaxes.append(num)
swapaxes = (np.abs(swap) - 1).astype(int)
return swapaxes, flipaxes | 32,806 |
def block_variants_and_samples(variant_df: DataFrame, sample_ids: List[str],
variants_per_block: int,
sample_block_count: int) -> (DataFrame, Dict[str, List[str]]):
"""
Creates a blocked GT matrix and index mapping from sample blocks to a list of corresponding sample IDs. Uses the
same sample-blocking logic as the blocked GT matrix transformer.
Requires that:
- Each variant row has the same number of values
- The number of values per row matches the number of sample IDs
Args:
variant_df : The variant DataFrame
sample_ids : The list of sample ID strings
variants_per_block : The number of variants per block
sample_block_count : The number of sample blocks
Returns:
tuple of (blocked GT matrix, index mapping)
"""
assert check_argument_types()
first_row = variant_df.selectExpr("size(values) as numValues").take(1)
if not first_row:
raise Exception("DataFrame has no values.")
num_values = first_row[0].numValues
if num_values != len(sample_ids):
raise Exception(
f"Number of values does not match between DataFrame ({num_values}) and sample ID list ({len(sample_ids)})."
)
__validate_sample_ids(sample_ids)
blocked_gt = glow.transform("block_variants_and_samples",
variant_df,
variants_per_block=variants_per_block,
sample_block_count=sample_block_count)
index_map = __get_index_map(sample_ids, sample_block_count, variant_df.sql_ctx)
output = blocked_gt, index_map
assert check_return_type(output)
return output | 32,807 |
def sparql_service_update(service, update_query):
"""
Helper function to update (DELETE DATA, INSERT DATA, DELETE/INSERT) data.
"""
sparql = SPARQLWrapper(service)
sparql.setMethod(POST)
sparql.setRequestMethod(POSTDIRECTLY)
sparql.setQuery(update_query)
result = sparql.query()
#SPARQLWrapper is going to throw an exception if result.response.status != 200:
return 'Done' | 32,808 |
def filtered_events(request):
"""Get the most recent year of stocking and
pass the information onto our annual_events view.
"""
dataUrl = reverse("api:api-get-stocking-events")
maxEvents = settings.MAX_FILTERED_EVENT_COUNT
return render(
request,
"stocking/found_events.html",
context={"dataUrl": dataUrl, "maxEvents": maxEvents},
) | 32,809 |
def track_user_session(user=None, request=None):
"""Creates, filters and updates UserSessions on the core and sends UserSessions to the hub on next login
Filter the local UserSession objects per user and get their most recent user_session object
If its a LOGIN REQUEST and the UserSession exists, we send this UserSession to the hub
Then FeedbackActivity model hours_used_release field is calculated on the hub
Then we create a new UserSession on the core-service
Else any other type of REQUEST (e.q. verify, refresh), overwrite session_end
Args:
user: The user objects from people.models
request: The request object from django_rest_framework
Returns:
None
"""
user_session = UserSession.objects.filter(user_email=user.email).order_by('session_start').last()
# serialize, then transform to JSON format (.data)
user_session_serial = UserSessionSerializer(user_session).data
request_url = request.META.get('PATH_INFO', '')
if '/api-token-auth/' in request_url: # User login, new session start
if user_session:
hub_token = ServiceToServiceAuth().generate_hub_token()
request_params = {
'url': settings.HUB_URL + 'feedback/user-sessions/',
'json': user_session_serial,
'headers': {"Authorization": "STS-JWT {0}".format(hub_token)}
}
try:
hub_response = requests.post(**request_params)
new_user_session = UserSession.objects.create(user_email=user.email, session_start=datetime.now(),
session_end=datetime.now(),
tag=settings.VERSION) # settings.VERSION
except requests.ConnectionError:
return create_error_response('Failed to connect to hub service: {url}'.format(**request_params))
else: # Any other request e.q. refresh, verify
user_session.session_end = datetime.now()
user_session.save() | 32,810 |
def score(string, goal):
"""
Compare randomly generated string to the goal, check how many
letters are correct and return
"""
check_counter = 0
string = generate(values)
for i in range(len(string)):
if string[i] == goal[i]:
check_counter += 1
return check_counter | 32,811 |
def validate_parent():
"""
This api validates a parent in the DB.
"""
parent_id = request.json.get('parent_id', None)
decision = request.json.get('decision', 0)
parent = query_existing_user(parent_id)
if parent:
parent.validated = decision
parent.approver_id = get_jwt_identity().get('id')
parent.approve_time = datetime.utcnow()
db.session.add(parent)
db.session.commit()
return jsonify(message="Parent validation updated"), 201
else:
return jsonify(message='User does not exist'), 201 | 32,812 |
def parse_pdu(data, **kwargs):
"""Parse binary PDU"""
command = pdu.extract_command(data)
if command is None:
return None
new_pdu = make_pdu(command, **kwargs)
new_pdu.parse(data)
return new_pdu | 32,813 |
def alchemy_nodes(mol):
"""Featurization for all atoms in a molecule. The atom indices
will be preserved.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule object
Returns
-------
atom_feats_dict : dict
Dictionary for atom features
"""
atom_feats_dict = defaultdict(list)
is_donor = defaultdict(int)
is_acceptor = defaultdict(int)
fdef_name = osp.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
mol_featurizer = ChemicalFeatures.BuildFeatureFactory(fdef_name)
mol_feats = mol_featurizer.GetFeaturesForMol(mol)
mol_conformers = mol.GetConformers()
assert len(mol_conformers) == 1
for i in range(len(mol_feats)):
if mol_feats[i].GetFamily() == 'Donor':
node_list = mol_feats[i].GetAtomIds()
for u in node_list:
is_donor[u] = 1
elif mol_feats[i].GetFamily() == 'Acceptor':
node_list = mol_feats[i].GetAtomIds()
for u in node_list:
is_acceptor[u] = 1
num_atoms = mol.GetNumAtoms()
for u in range(num_atoms):
atom = mol.GetAtomWithIdx(u)
atom_type = atom.GetAtomicNum()
num_h = atom.GetTotalNumHs()
atom_feats_dict['node_type'].append(atom_type)
h_u = []
h_u += atom_type_one_hot(atom, ['H', 'C', 'N', 'O', 'F', 'S', 'Cl'])
h_u.append(atom_type)
h_u.append(is_acceptor[u])
h_u.append(is_donor[u])
h_u += atom_is_aromatic(atom)
h_u += atom_hybridization_one_hot(atom, [Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3])
h_u.append(num_h)
atom_feats_dict['n_feat'].append(F.tensor(np.array(h_u).astype(np.float32)))
atom_feats_dict['n_feat'] = F.stack(atom_feats_dict['n_feat'], dim=0)
atom_feats_dict['node_type'] = F.tensor(np.array(
atom_feats_dict['node_type']).astype(np.int64))
return atom_feats_dict | 32,814 |
def generate(*, artifacts: artifacts_types.ModelArtifacts, name: str) -> str:
"""
Generate the class source from the schema.
Args:
schema: The schema of the model.
name: The name of the model.
Returns:
The source code for the model class.
"""
model_artifacts = models_file_artifacts.calculate(artifacts=artifacts, name=name)
return _source.generate(artifacts=model_artifacts) | 32,815 |
def main():
"""Main function that trains and/or evaluates a model."""
params = interpret_args()
# Prepare the dataset into the proper form.
data = atis_data.ATISDataset(params)
# Construct the model object.
model_type = InteractionATISModel if params.interaction_level else ATISModel
model = model_type(
params,
data.input_vocabulary,
data.output_vocabulary,
data.anonymizer if params.anonymize and params.anonymization_scoring else None)
last_save_file = ""
if params.train:
last_save_file = train(model, data, params)
if params.evaluate:
evaluate(model, data, params, last_save_file)
if params.interactive:
interact(model, params, data.anonymizer, last_save_file)
if params.attention:
evaluate_attention(model, data, params, params.save_file) | 32,816 |
def get_rot_mat_kabsch(p_matrix, q_matrix):
"""
Get the optimal rotation matrix with the Kabsch algorithm. Notation is from
https://en.wikipedia.org/wiki/Kabsch_algorithm
Arguments:
p_matrix: (np.ndarray)
q_matrix: (np.ndarray)
Returns:
(np.ndarray) rotation matrix
"""
h = np.matmul(p_matrix.transpose(), q_matrix)
u, _, vh = np.linalg.svd(h)
d = np.linalg.det(np.matmul(vh.transpose(), u.transpose()))
int_mat = np.identity(3)
int_mat[2, 2] = d
rot_matrix = np.matmul(np.matmul(vh.transpose(), int_mat), u.transpose())
return rot_matrix | 32,817 |
def test_tfenv_uninstall_arg_takes_precedence(cd_tmp_path: Path) -> None:
"""Test ``runway tfenv uninstall`` arg takes precedence over file."""
version = "1.0.0"
version_dir = cd_tmp_path / version
version_dir.mkdir()
(cd_tmp_path / TF_VERSION_FILENAME).write_text("0.12.0")
runner = CliRunner()
result = runner.invoke(cli, ["tfenv", "uninstall", "1.0.0"])
assert result.exit_code == 0
assert not version_dir.exists() | 32,818 |
def patch_tex(texfile):
"""Fix errors in the given texfile, acting on the whole text."""
tex = texfile.read_text(encoding='utf8')
tex = tex.replace(
r'\begin{equation*}' + "\n" + r'\begin{split}\begin{align}',
r'\begin{align*}',
)
tex = tex.replace(
r'\end{align}\end{split}' + "\n" + r'\end{equation*}', r'\end{align*}'
)
tex = tex.replace(
_multiline_str(
r'\chapter{Indices and tables}',
r'\label{\detokenize{index:indices-and-tables}}\begin{itemize}',
r'\item {} ',
r'\DUrole{xref,std,std-ref}{genindex}',
r'',
r'\item {} ',
r'\DUrole{xref,std,std-ref}{modindex}',
r'',
r'\end{itemize}',
),
'',
)
texfile.write_text(tex, encoding='utf8') | 32,819 |
def test_full_with_weeks():
"""Test duration"""
duration = 'P3Y2M1WT12H11M10S'
value = iso8601_to_timedelta(duration)
roundtrip = timedelta_to_iso8601(value)
assert roundtrip == duration | 32,820 |
def _msrest_next(iterator):
""""To avoid:
TypeError: StopIteration interacts badly with generators and cannot be raised into a Future
"""
try:
return next(iterator)
except StopIteration:
raise _MsrestStopIteration() | 32,821 |
def record_export(record=None, export_format=None, pid_value=None, permissions=None):
"""Export marc21 record page view."""
exporter = current_app.config.get("INVENIO_MARC21_RECORD_EXPORTERS", {}).get(
export_format
)
if exporter is None:
abort(404)
options = current_app.config.get(
"INVENIO_MARC21_RECORD_EXPORTER_OPTIONS",
{
"indent": 2,
"sort_keys": True,
},
)
serializer = obj_or_import_string(exporter["serializer"])(options=options)
exported_record = serializer.serialize_object(record.to_dict())
return render_template(
"invenio_records_marc21/records/export.html",
pid_value=pid_value,
export_format=exporter.get("name", export_format),
exported_record=exported_record,
record=Marc21UIJSONSerializer().dump_one(record.to_dict()),
permissions=permissions,
) | 32,822 |
def body_range(
operators: List[str],
font_changes: List[Tuple]) -> Tuple[Optional[int], Optional[int]]:
"""given some assumptions about how headers and footers are formatted,
find the operations describing the body text of of a page"""
# font_changes: (idx, weight, size)
thresh = 20.0
if font_changes[0][2] > thresh:
# if the first font is big, this is a chapter heading page
# we want everything after the next font change
# find the first Td after this point
if len(font_changes) < 2:
start_idx = None
else:
start_idx = font_changes[1][0]
# And last three operations (for the page number) can be discarded.
end_idx = len(operators) - 3
elif font_changes[0][1] == "regular":
# otherwise, we are looking for a (regular bold regular) pattern
if len(font_changes) < 3:
start_idx = None
else:
start_idx = font_changes[2][0] + 1
# discard the final operation
end_idx = len(operators) - 1
elif font_changes[0][1] == "bold":
# or (bold regular) pattern
if len(font_changes) < 2:
start_idx = None
else:
start_idx = font_changes[1][0] + 1 + 2 # (to skip over page number)
# discard the final operation
end_idx = len(operators) - 1
else:
start_idx = None
end_idx = None
if start_idx is not None and start_idx < len(operators):
start_idx = operators[start_idx:].index(b"Td") + start_idx
if end_idx is not None and end_idx > len(operators):
end_idx = None
return start_idx, end_idx | 32,823 |
def sine(value, default=_SENTINEL):
"""Filter and function to get sine of the value."""
try:
return math.sin(float(value))
except (ValueError, TypeError):
if default is _SENTINEL:
warn_no_default("sin", value, value)
return value
return default | 32,824 |
def wmt_affine_base_1e4():
"""Set of hyperparameters."""
hparams = wmt_affine_base()
hparams.kl_reg = 1e-4
hparams.learning_rate_constant = 2.0
hparams.learning_rate_warmup_steps = 8000
return hparams | 32,825 |
def random_crop_with_constraints(bbox, size, height, width, min_scale=0.3, max_scale=1,
max_aspect_ratio=2, constraints=None,
max_trial=1000):
"""Crop an image randomly with bounding box constraints.
This data augmentation is used in training of
Single Shot Multibox Detector [#]_. More details can be found in
data augmentation section of the original paper.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Parameters
----------
bbox : numpy.ndarray
Numpy.ndarray with shape (N, 4+) where N is the number of bounding boxes.
The second axis represents attributes of the bounding box.
Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
we allow additional attributes other than coordinates, which stay intact
during bounding box transformations.
size : tuple
Tuple of length 2 of image shape as (width, height).
min_scale : float
The minimum ratio between a cropped region and the original image.
The default value is :obj:`0.3`.
max_scale : float
The maximum ratio between a cropped region and the original image.
The default value is :obj:`1`.
max_aspect_ratio : float
The maximum aspect ratio of cropped region.
The default value is :obj:`2`.
constraints : iterable of tuples
An iterable of constraints.
Each constraint should be :obj:`(min_iou, max_iou)` format.
If means no constraint if set :obj:`min_iou` or :obj:`max_iou` to :obj:`None`.
If this argument defaults to :obj:`None`, :obj:`((0.1, None), (0.3, None),
(0.5, None), (0.7, None), (0.9, None), (None, 1))` will be used.
max_trial : int
Maximum number of trials for each constraint before exit no matter what.
Returns
-------
numpy.ndarray
Cropped bounding boxes with shape :obj:`(M, 4+)` where M <= N.
tuple
Tuple of length 4 as (x_offset, y_offset, new_width, new_height).
"""
# default params in paper
if constraints is None:
constraints = (
(0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None),
(None, 1),
)
if len(bbox) == 0:
constraints = []
w, h = size
candidates = []
for min_iou, max_iou in constraints:
min_iou = -np.inf if min_iou is None else min_iou
max_iou = np.inf if max_iou is None else max_iou
for _ in range(max_trial):
scale = random.uniform(min_scale, max_scale)
aspect_ratio = random.uniform(
max(1 / max_aspect_ratio, scale * scale),
min(max_aspect_ratio, 1 / (scale * scale)))
crop_h = int(height * scale / np.sqrt(aspect_ratio))
crop_w = int(width * scale * np.sqrt(aspect_ratio))
crop_t = random.randrange(h - crop_h)
crop_l = random.randrange(w - crop_w)
crop_bb = np.array((crop_l, crop_t, crop_l + crop_w, crop_t + crop_h))
iou = bbox_iou(bbox, crop_bb[np.newaxis])
if min_iou <= iou.min() and iou.max() <= max_iou:
top, bottom = crop_t, crop_t + crop_h
left, right = crop_l, crop_l + crop_w
candidates.append((left, top, right-left, bottom-top))
break
# random select one
while candidates:
crop = candidates.pop(np.random.randint(0, len(candidates)))
new_bbox = bbox_crop(bbox, crop, allow_outside_center=False)
if new_bbox.size < 1:
continue
new_crop = (crop[0], crop[1], crop[2], crop[3])
return new_bbox, new_crop
return random_crop_with_constraints(bbox, (w, h), height, width,min_scale=0.9,max_scale=1,max_trial=50) | 32,826 |
def unravelContent(originalData):
"""
This is the primary function responsible for creating an alternate data stream of unraveled data.
Args:
contentData: Script content
Returns:
contentData: Unraveled additional content
"""
contentData = normalize(originalData)
loopCount = 0
while True:
modificationFlag = None
# Reversed Strings - Changes STATE
# Looks only in originalData, can be problematic flipping unraveled content back and forth.
reverseString = ["noitcnuf", "marap", "nruter", "elbairav", "tcejbo-wen", "ecalper",]
if any(entry in originalData.lower() for entry in reverseString):
contentData, modificationFlag = reverseStrings(originalData, contentData, modificationFlag)
# Decompress Streams - Changes STATE
if all(entry in contentData.lower() for entry in ["streamreader", "frombase64string"]) or \
all(entry in contentData.lower() for entry in ["deflatestream", "decompress"]) or \
all(entry in contentData.lower() for entry in ["memorystream", "frombase64string"]):
contentData, modificationFlag = decompressContent(contentData, modificationFlag)
# Base64 Decodes - Changes STATE
if re.search("[A-Za-z0-9+/=]{30,}", contentData):
contentData, modificationFlag = decodeBase64(contentData, modificationFlag)
# Decrypts SecureStrings - Changes STATE
if "convertto-securestring" in contentData.lower() and \
re.search("(?:[0-9]{1,3},){15,}[0-9]{1,3}", contentData.replace(" ", "")) and \
re.search("[A-Za-z0-9+=/]{255,}", contentData):
contentData, modificationFlag = decryptStrings(contentData, modificationFlag)
# Normalize / De-Obfuscate the new contents before proceeding.
contentData = normalize(contentData)
if modificationFlag == None:
break
loopCount += 1
return contentData | 32,827 |
def fcwrapper(pyenv='python2', instruction=None, data=None, reprint_output=False):
"""Wrapper to isolate FreeCAD Python 2.7 calls from the Python 3 code base.
:param str pyenv: Python interpreter, defaults to 'python2'.
:param str instruction: A registered instruction for the QMT FreeCAD module.
:param data: Any data type serialisable through pickle.
:param bool reprint_output: Reprint suppressed output of wrapped call.
:return: Any data type serialisable through pickle.
"""
qmtPath = os.path.join(os.path.dirname(qmt.__file__))
runPath = os.path.join(qmtPath, 'geometry', 'freecad', 'run.py')
serial_data = pickle.dumps(data, protocol=2)
proc = subprocess.Popen([pyenv, runPath, instruction],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate(serial_data)
# output[1] not checked because stderr is used for mere warnings too often.
if proc.returncode != 0:
print(output[1].decode())
print(os.linesep + ' --- END OF FC WRAPPED STDERR ---' + os.linesep)
raise ValueError('pywrapper returned ' + str(proc.returncode))
# The returned serialised byte stream is demarcated by the separator string
# "MAGICQMTRANSFERBYTES". Most data preceding the separator corresponds to
# FreeCAD notifications and gets discarded.
pipe_data = output[0].decode().split('MAGICQMTRANSFERBYTES')
if reprint_output is True:
print(os.linesep + ' --- FC WRAPPED STDOUT ---' + os.linesep)
print(str(*pipe_data[0:-1]))
print(os.linesep + ' --- FC WRAPPED STDERR ---' + os.linesep)
print(output[1].decode())
print(os.linesep + ' --- END OF FC WRAPPED STDERR ---' + os.linesep)
serial_data = pipe_data[-1].encode()
return pickle.loads(serial_data) | 32,828 |
def quote_verify(data, validation, aik, pcrvalues):
"""Verify that a generated quote came from a trusted TPM and matches the
previously obtained PCR values
:param data: The TPM_QUOTE_INFO structure provided by the TPM
:param validation: The validation information provided by the TPM
:param aik: The object representing the Attestation Identity Key
:param pcrvalues: A dictionary containing the PCRs read from the TPM
:returns: True if the quote can be verified, False otherwise
"""
select = 0
maxpcr = 0
# Verify that the validation blob was generated by a trusted TPM
pubkey = aik.get_pubkey()
n = m2.bin_to_bn(pubkey)
n = m2.bn_to_mpi(n)
e = m2.hex_to_bn("010001")
e = m2.bn_to_mpi(e)
rsa = M2Crypto.RSA.new_pub_key((e, n))
m = hashlib.sha1()
m.update(data)
md = m.digest()
try:
ret = rsa.verify(md, str(validation), algo='sha1')
except M2Crypto.RSA.RSAError:
return False
# And then verify that the validation blob corresponds to the PCR
# values we have
values = bytearray()
for pcr in sorted(pcrvalues):
values += pcrvalues[pcr]
select |= (1 << pcr)
maxpcr = pcr
if maxpcr < 16:
header = struct.pack('!H', 2)
header += struct.pack('@H', select)
header += struct.pack('!I', len(values))
else:
header = struct.pack('!H', 4)
header += struct.pack('@I', select)
header += struct.pack('!I', len(values))
pcr_blob = header + values
m = hashlib.sha1()
m.update(pcr_blob)
pcr_hash = m.digest()
if pcr_hash == data[8:28]:
return True
else:
return False | 32,829 |
def _rectify_base(base):
"""
transforms base shorthand into the full list representation
Example:
>>> assert _rectify_base(NoParam) is DEFAULT_ALPHABET
>>> assert _rectify_base('hex') is _ALPHABET_16
>>> assert _rectify_base('abc') is _ALPHABET_26
>>> assert _rectify_base(10) is _ALPHABET_10
>>> assert _rectify_base(['1', '2']) == ['1', '2']
>>> import pytest
>>> assert pytest.raises(TypeError, _rectify_base, 'uselist')
"""
if base is NoParam or base == 'default':
return DEFAULT_ALPHABET
elif base in [26, 'abc', 'alpha']:
return _ALPHABET_26
elif base in [16, 'hex']:
return _ALPHABET_16
elif base in [10, 'dec']:
return _ALPHABET_10
else:
if not isinstance(base, (list, tuple)):
raise TypeError(
'Argument `base` must be a key, list, or tuple; not {}'.format(
type(base)))
return base | 32,830 |
def loadjson(filename):
""" Load a python object saved with savejson."""
if filename.endswith('.gz'):
with gzip.open(filename, "rb") as f:
obj = json.loads(f.read().decode("ascii"))
else:
with open(filename, 'rt') as fh:
obj = json.load(fh)
return obj | 32,831 |
def _write_ffxml(xml_compiler, filename=None):
"""Generate an ffxml file from a compiler object.
Parameters
----------
xml_compiler : _TitratableForceFieldCompiler
The object that contains all the ffxml template data
filename : str, optional
Location and name of the file to save. If not supplied, returns the ffxml template as a string.
Returns
-------
str or None
"""
# Generate the string version.
xmlstring = etree.tostring(
xml_compiler.ffxml, encoding="utf-8", pretty_print=True, xml_declaration=False
)
xmlstring = xmlstring.decode("utf-8")
if filename is not None:
with open(filename, "w") as fstream:
fstream.write(xmlstring)
else:
return xmlstring | 32,832 |
def get_utctime(
md_keys: List[str], md: Union[pyexiv2.metadata.ImageMetadata, None]
) -> Union[datetime, None]:
"""Extract the datetime (to the nearest millisecond)"""
utctime = None
dt_key = "Exif.Image.DateTime"
if md is not None:
if dt_key in md_keys:
utctime = datetime.strptime(md[dt_key].raw_value, "%Y:%m:%d %H:%M:%S")
# utctime can also be obtained with DateTimeOriginal:
# utctime = datetime.strptime(
# md["Exif.Photo.DateTimeOriginal"].raw_value, "%Y:%m:%d %H:%M:%S"
# )
# extract the millisecond from the EXIF metadata:
subsec = int(md["Exif.Photo.SubSecTime"].raw_value)
sign = -1.0 if subsec < 0 else 1.0
millisec = sign * 1e3 * float("0.{}".format(abs(subsec)))
utctime += timedelta(milliseconds=millisec)
timezone = pytz.timezone("UTC")
utctime = timezone.localize(utctime)
return utctime | 32,833 |
def find_file_recursively(file_name, start_dir=getcwd(), stop_dir=None):
"""
This method will walk trough the directory tree upwards
starting at the given directory searching for a file with
the given name.
:param file_name: The name of the file of interest. Make sure
it does not contain any path information.
:type file_name: str
:param start_dir: The directory where the search should start.
If it is omitted, the cwd is used.
:type start_dir: str
:param stop_dir: The directory where the search should stop. If
this is omitted, it will stop at the root directory.
:type stop_dir: str
:rtype: str
:return: The file path where the file was first found.
"""
cur_dir = abspath(start_dir) if not isabs(start_dir) else start_dir
while True:
if exists(join(cur_dir, file_name)):
# The file of interest exists in the current directory
# so return it.
return join(cur_dir, file_name)
# The file was not found yet so try in the parent directory.
parent_dir = dirname(cur_dir)
if parent_dir == cur_dir or parent_dir == stop_dir:
# We are either at the root directory or reached the stop
# directory.
return None
else:
cur_dir = parent_dir | 32,834 |
def text_cleaning(any_text, nlp):
"""
The function filters out stop words from any text and returns tokenized and lemmatized words
"""
doc = nlp(any_text.lower())
result = []
for token in doc:
if token.text in nlp.Defaults.stop_words:
continue
# if token.is_punct:
# continue
result.append(token.lemma_)
clean_text = " ".join(result)
return clean_text | 32,835 |
def test_answer_2():
"""https://stackoverflow.com/questions/63499479/extract-value-from-text-string-using-format-string-in-python"""
data = """
name=username1, age=1001
name=username2, age=1002
name=username3, age=1003
"""
template = "name={{ name }}, age={{ age }}"
parser = ttp(data, template)
parser.parse()
res = parser.result(structure="flat_list")
# pprint.pprint(res)
assert res == [
{"age": "1001", "name": "username1"},
{"age": "1002", "name": "username2"},
{"age": "1003", "name": "username3"},
] | 32,836 |
def shave(q,options,undef=MISSING,has_undef=1,nbits=12):
"""
Shave variable. On input, nbits is the number of mantissa bits to keep
out of maximum of 24.
"""
# no compression, no shave
# ------------------------
if not options.zlib:
return q
# Determine shaving parameters
# ----------------------------
xbits = 24 - nbits
shp = q.shape
rank = len(shp)
if rank == 2: # yx
chunksize = shp[0]*shp[1]
elif rank == 3: # zyx
chunksize = shp[1]*shp[2]
else:
raise ValueError, "invalid rank=%d"%rank
# Shave it
# --------
qs, rc = shave32(q.ravel(),xbits,has_undef,undef,chunksize)
if rc:
raise ValueError, "error on return from shave32, rc=%d"%rc
return qs.reshape(shp) | 32,837 |
def any_none_nan(values: Union[List, np.ndarray, pd.Series, pd.DataFrame, object]) -> bool:
"""Can be used with a single value or a collection of values. Returns `True` if any item in `values` are
`None`, `np.Nan`, `pd.NA`, `pd.NaT` or if the length of `values` is `0`.
Args:
values:
A collection of values to check.
Returns:
bool - True if any item in `values` are None/np.NaN
"""
# pylint: disable=too-many-return-statements
if values is None or values is np.NaN or values is pd.NA or values is pd.NaT: # pylint: disable=nan-comparison
return True
if isinstance(values, Sized) and not isinstance(values, str) and len(values) == 0:
return True
if isinstance(values, pd.Series):
return values.isnull().any() or values.isna().any()
if isinstance(values, pd.DataFrame):
return values.isnull().any().any() or values.isna().any().any()
if isinstance(values, Iterable) and not isinstance(values, str):
if len(values) == 0:
return True
return any((any_none_nan(x) for x in values))
try:
if not isinstance(values, str) and None in values:
return True
except Exception: # pylint: disable=broad-except # noqa
pass
try:
if np.isnan(values).any():
return True
except TypeError:
return False
return False | 32,838 |
def _assert_gat_exists(genomes, gname, aname=None, tname=None, allow_incomplete=False):
"""
Make sure the genome/asset:tag combination exists in the provided mapping and has any seek keys defined.
Seek keys are required for the asset completeness.
:param Mapping[str, Mapping[str, Mapping[str, object]]] genomes: nested
collection of key-value pairs, keyed at top level on genome ID, then by
asset name, then by asset attribute
:param str gname: top level key to query -- genome ID, e.g. mm10
:param str aname: second-level key to query -- asset name, e.g. fasta
:param str tname: third-level key to query -- tag name, e.g. default
:raise MissingGenomeError: if the given key-value pair collection does not
contain as a top-level key the given genome ID
:raise MissingAssetError: if the given key-value pair collection does
contain the given genome ID, but that key's mapping doesn't contain
the given asset name as a key
:raise GenomeConfigFormatError: if it's discovered during the query that
the structure of the given genomes mapping suggests that it was
parsed from an improperly formatted/structured genome config file.
"""
_LOGGER.debug("checking existence of: {}/{}:{}".format(gname, aname, tname))
try:
genome = genomes[gname]
except KeyError:
raise MissingGenomeError("Your genomes do not include '{}'".format(gname))
if aname is not None:
try:
asset_data = genome[CFG_ASSETS_KEY][aname]
except KeyError:
raise MissingAssetError("Genome '{}' exists, but asset '{}' is missing".format(gname, aname))
except TypeError:
_raise_not_mapping(asset_data, "Asset section ")
if tname is not None:
try:
tag_data = asset_data[CFG_ASSET_TAGS_KEY][tname]
except KeyError:
raise MissingTagError(
"genome/asset bundle '{}/{}' exists, but tag '{}' is missing".format(gname, aname, tname))
except TypeError:
_raise_not_mapping(asset_data, "Asset section ")
try:
tag_data[CFG_SEEK_KEYS_KEY]
except KeyError:
if not allow_incomplete:
raise MissingSeekKeyError("Asset incomplete. No seek keys are defined for '{}/{}:{}'. "
"Build or pull the asset again.".format(gname, aname, tname)) | 32,839 |
def get_or_create_package(name, epoch, version, release, arch, p_type):
""" Get or create a Package object. Returns the object. Returns None if the
package is the pseudo package gpg-pubkey, or if it cannot create it
"""
package = None
name = name.lower()
if name == 'gpg-pubkey':
return
if epoch in [None, 0, '0']:
epoch = ''
try:
with transaction.atomic():
package_names = PackageName.objects.all()
p_name, c = package_names.get_or_create(name=name)
except IntegrityError as e:
error_message.send(sender=None, text=e)
p_name = package_names.get(name=name)
except DatabaseError as e:
error_message.send(sender=None, text=e)
package_arches = PackageArchitecture.objects.all()
with transaction.atomic():
p_arch, c = package_arches.get_or_create(name=arch)
try:
with transaction.atomic():
packages = Package.objects.all()
package, c = packages.get_or_create(name=p_name,
arch=p_arch,
epoch=epoch,
version=version,
release=release,
packagetype=p_type)
except IntegrityError as e:
error_message.send(sender=None, text=e)
package = packages.get(name=p_name,
arch=p_arch,
epoch=epoch,
version=version,
release=release,
packagetype=p_type)
except DatabaseError as e:
error_message.send(sender=None, text=e)
return package | 32,840 |
def sort_dict(value, case_sensitive=False, by='key', reverse=False, index=0):
"""
字典排序
:param value: 字典对象
:param case_sensitive: 是否大小写敏感
:param by: 排序对象
:param reverse: 排序方式(正序:True、倒序:False)
:param index: 索引号(此处针对 value 为 list 情况下可根据 list 的某一 index 排序)
:return:
"""
if by == 'key':
pos = 0
elif by == 'value':
pos = 1
else:
raise FilterArgumentError('You can only sort by either "key" or "value"')
def sort_func(item):
value = item[pos]
if index:
try:
value = value[index]
except:
pass
if isinstance(value, string_types) and not case_sensitive:
value = value.lower()
return value
return sorted(value.items(), key=sort_func, reverse=reverse) | 32,841 |
async def test_create_user_uses_request_helper_method_with_correct_values(mock_request, jupyterhub_api_environ):
"""
Does create_user method use the helper method and pass the correct values?
"""
sut = JupyterHubAPI()
await sut.create_user('new_user')
assert mock_request.called
body_usernames = {'usernames': ['new_user']}
mock_request.assert_called_with('users/new_user', method='POST', body='') | 32,842 |
def initialize_system(name=None):
"""Initializes a distributed NPU system for use with TensorFlow.
Args:
name: Name of ops.
Returns:
The npu init ops which will open the NPU system using `Session.run`.
"""
return NPUInit(name) | 32,843 |
def zmap_1perm_2samp(X, cat1, cat2=None, rand_seed=-1, fstat=None, name=None):
""" une permutation
X (D, N, P) K points, N subjects, D dim
return:
Y (D,) zvalue at each point
"""
if fstat is None:
fstat = hotelling_2samples
#name = "MP-Hotelling"
if cat2 is None:
cat2 = np.logical_not(cat1)
# Données
if rand_seed < 0:
# Sans permutation (on peut remplacer cat par idx[cat])
ix1 = cat1
ix2 = cat2
else:
# Avec permutation
np.random.seed(rand_seed)
idx = np.arange(X.shape[1])[cat1 | cat2]
per = np.random.permutation(idx.size)
nsplit = cat1.sum()
ix1 = idx[per][:nsplit]
ix2 = idx[per][nsplit:]
# Run
Y = fstat(X[:, ix1, :], X[:, ix2, :])
if name is not None:
print(name + " {0}, {1}\n".format(Y.min(), Y.max()))
return Y | 32,844 |
def create_graph(checkpoint):
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(checkpoint, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='') | 32,845 |
def plotDriftmap(ks_directory, sample_rate = 30000, time_range = [0, np.inf], exclude_noise=True, subselection = 50, fig=None, output_path=None):
"""
Plots a "driftmap" of spike depths over time.
This is a useful way to assess overall data quality for an experiment, as it makes probe
motion very easy to see.
This implementation is based on Matlab code from github.com/cortex-lab/spikes
Inputs:
------
ks_directory : str
Path to Kilosort outputs
sample_rate : float
Sample rate of original data (Hz)
time_range : [float, float]
Min/max of time range for plot
exclude_noise : bool
True if noise units should be ignored, False otherwise
subselection : int
Number of spikes to skip (helpful for large datasets)
fig : matplotlib.pyplot.figure
Figure handle to use for plotting
output_path : str
Path for saving the image
Outputs:
--------
Saves image to output_path (optional)
"""
spike_times, spike_clusters, spike_templates, amplitudes, templates, channel_map, clusterIDs, cluster_quality, pc_features, pc_feature_ind = \
load_kilosort_data(ks_directory,
sample_rate,
use_master_clock = False,
include_pcs = True)
spike_depths = get_spike_depths(spike_clusters, pc_features, pc_feature_ind)
spike_amplitudes = get_spike_amplitudes(spike_templates, templates, amplitudes)
if exclude_noise:
good_units = clusterIDs[cluster_quality != 'noise']
else:
good_units = clusterIDs
spikes_in_time_range = np.where((spike_times > time_range[0]) * (spike_times < time_range[1]))[0]
spikes_from_good_units = np.where(np.in1d(spike_clusters, good_units))[0]
spikes_to_use = np.intersect1d(spikes_in_time_range, spikes_from_good_units)
if fig is None:
fig = plt.figure(figsize=(16,6))
ax = plt.subplot(111)
selection = np.arange(0, spikes_to_use.size, subselection)
ax.scatter(spike_times[spikes_to_use[selection]] / (60*60),
spike_depths[spikes_to_use[selection]],
c = spike_amplitudes[spikes_to_use[selection]],
s = np.ones(selection.shape),
vmin=0,
vmax=3000,
alpha=0.25,
cmap='Greys')
ax.set_ylim([0,3840])
ax.set_xlabel('Time (hours)')
ax.set_ylabel('Distance from tip (um)')
if output_path is not None:
plt.savefig(output_path)
plt.close('all') | 32,846 |
async def team(ctx, game=None, *, team_message=None):
"""
Creates a private text channel between you and your opponent(s)
"""
# exit if no game
if game is None:
message = "You didn't pick a game! `val` or `lol`"
await ctx.send(embed=await embeds.missing_param_error(message))
return
# exit if no teams
if team_message is None:
message = "You didn't list any members!"
await ctx.send(embed=await embeds.missing_param_error(message))
return
# guild and category objects
guild = bot.get_guild(guild_id)
category = None
if game == 'val':
category = guild.get_channel(val_category_id)
elif game == 'lol':
category = guild.get_channel(lol_category_id)
else:
message = "Not a valid game! `val` or `lol`"
await ctx.send(embed=await embeds.missing_param_error(message))
return
# create team
team_entry = team_message.split('=+=')
# game coordinator role
exec_role = guild.get_role(exec_role_id)
# create the role
name = f"{game.upper()} TEAM: {'-'.join(team_entry)}"
team_role = await guild.create_role(name=name)
# loop through members and server members
for team_member in team_entry:
found = False
for member in guild.members:
# check if their names match
if team_member.lower() == member.name.lower():
# add role if they match
await member.add_roles(team_role)
# let loop know they were found and break
found = True
break
# if not found
if not found:
await ctx.send(f"Couldn't find user: `{team_member}`")
# overwrites for the match channel
overwrites = {
team_role: discord.PermissionOverwrite(add_reactions=True,
read_messages=True,
send_messages=True,
external_emojis=True,
attach_files=True,
embed_links=True),
exec_role: discord.PermissionOverwrite(add_reactions=True,
read_messages=True,
send_messages=True,
external_emojis=True,
attach_files=True,
embed_links=True),
guild.default_role: discord.PermissionOverwrite(read_messages=False)
}
# create channel
name = team_role.name[4:]
topic = f"{team_role.name[:10]} {' | '.join(team_entry)}"
team_channel = await guild.create_text_channel(name=name,
category=category,
topic=topic,
overwrites=overwrites)
# send message linking to channel
await ctx.send(embed=await embeds.match_started(team_channel))
# send instructions into the channel
await team_channel.send(f"{team_role.mention}\n\n{init_message}") | 32,847 |
def grids_have_same_coords(grid0, grid1):
"""Whether two `ESMF.Grid` instances have identical coordinates.
:Parameters:
grid0, grid1: `ESMF.Grid`, `ESMF.Grid`
The `ESMF` Grid instances to be compared
:Returns:
`bool`
Whether or not the Grids have identical coordinates.
"""
coords0 = grid0.coords
coords1 = grid1.coords
if len(coords0) != len(coords1):
return False
for c, d in zip(coords0, coords1):
if len(c) != len(d):
return False
for a, b in zip(c, d):
if not np.array_equal(a, b):
return False
return True | 32,848 |
def _CreateTargetProfDataFileFromProfRawFiles(target, profraw_file_paths):
"""Returns a relative path to target profdata file by merging target
profraw files.
Args:
profraw_file_paths: A list of relative paths to the profdata data files
that are to be merged.
Returns:
A relative path to the merged coverage profdata file.
Raises:
CalledProcessError: An error occurred merging profdata files.
"""
logging.info('Creating target profile data file.')
logging.debug('Merging target profraw files to create target profdata file.')
profdata_file_path = os.path.join(OUTPUT_DIR, '%s.profdata' % target)
try:
subprocess_cmd = [
LLVM_PROFDATA_PATH, 'merge', '-o', profdata_file_path, '-sparse=true'
]
subprocess_cmd.extend(profraw_file_paths)
output = subprocess.check_output(subprocess_cmd)
logging.debug('Merge output: %s', output)
except subprocess.CalledProcessError as error:
logging.error(
'Failed to merge target profraw files to create target profdata.')
raise error
logging.debug('Finished merging target profraw files.')
logging.info('Target "%s" profile data is created as: "%s".', target,
profdata_file_path)
return profdata_file_path | 32,849 |
def create_prediction_file(save_dir, identifiers, predictions):
"""
Create the prediction file.
Args:
save_dir: The all classes predicted results provided by network
identifiers: The data record id
predictions: The predict scores
"""
if not os.path.exists(save_dir):
os.makedirs(save_dir)
preds_file = os.path.abspath(os.path.join(save_dir, 'submission.json'))
with open(preds_file, 'w') as fout:
tmp_dict = {}
for index, predicted_label in enumerate(predictions):
if identifiers[index] not in tmp_dict:
tmp_dict[identifiers[index]] = [predicted_label]
else:
tmp_dict[identifiers[index]].append(predicted_label)
for key in tmp_dict.keys():
data_record = {
'item_id': key,
'label_list': tmp_dict[key],
}
fout.write(json.dumps(data_record, ensure_ascii=False) + '\n') | 32,850 |
def run_image_registrator(colmap_binary_path: str,
colmap_db_path: str,
input_path: str,
output_path: str,
image_registrator_options: List[str]) -> None:
"""
run colmap image_registrator:
Register new images in the database against an existing model
:param colmap_binary_path: path to colmap executable
:type colmap_binary_path: str
:param colmap_db_path: value for --database_path
:type colmap_db_path: str
:param input_path: value for --input_path
:type input_path: str
:param output_path: value for --output_path
:type output_path: str
:param image_registrator_options: list of additional parameters to add to the command
:type image_registrator_options: List[str]
"""
image_registrator_args = ["image_registrator",
"--database_path",
colmap_db_path,
"--input_path",
input_path,
"--output_path",
output_path]
if image_registrator_options is not None and len(image_registrator_options) > 0:
image_registrator_args += image_registrator_options
run_colmap_command(colmap_binary_path, image_registrator_args) | 32,851 |
def get_mwa_eor_spec(nu_obs=150.0, nu_emit=1420.40575, bw=8.0, tint=1000.0,
area_eff=21.5, n_stations=50, bmax=100.0):
"""
Parameters
----------
nu_obs : float or array-like, optional
observed frequency [MHz]
nu_emit : float or array-like, optional
rest frequency [MHz]
bw : float or array-like, optional
frequency bandwidth [MHz]
tint : float or array-like, optional
integration time [hour]
area_eff : float or array-like, optional
effective area per station [m ** 2]
n_stations : int or array-like, optional
number of stations
bmax : float or array-like, optional
maximum baseline [wavelength]
Returns
-------
nu_obs, nu_emit, bw, tint, area_eff, n_stations, bmax
"""
return nu_obs, nu_emit, bw, tint, area_eff, n_stations, bmax | 32,852 |
def element_add_inplace(x : torch.Tensor, y : torch.Tensor) -> None:
"""
x += y
"""
assert x.is_cuda and x.is_contiguous() and x.dtype == torch.half
assert y.is_cuda and y.is_contiguous() and y.dtype == torch.half
assert x.device == y.device
arith.arith_element_add(
x.size(0),
x.stride(0),
x.data_ptr(),
y.data_ptr(),
x.data_ptr(),
torch.cuda.current_stream().cuda_stream
) | 32,853 |
def baseModel(data):
"""
原有模型
"""
formula = "label_code ~ education_num + capital_gain + capital_loss + hours_per_week"
model = sm.Logit.from_formula(formula, data=data)
re = model.fit()
return re | 32,854 |
def load_screen(options: list) -> int:
"""Callback for loading a screen."""
return get_selection(options) | 32,855 |
def retrieve_url(url):
"""
Retrieve the URL and parse the response for
success/failure and a structured data output.
:param url: The fully qualified URL which you want to query.
Example: https://www.google.com.au
:type url: string
:return resp_ok: A True/False boolean to indicate whether a
valid response was retrieved from the URL request
:type resp_ok: boolean
:return output: The JSON decoded output from the request.
Example:
- When URL is successful, a dictionary is returned
- When URL is not successful, a string is returned
"""
# Try/Except block to retrieve the URL
try:
# Get the URL
resp = requests.get(url)
# Raise exception, print HTTP error
except HTTPError as http_err:
print(f"HTTP error occurred: {http_err}")
# Raise exception, print other error
except Exception as err:
print(f"Other error occurred: {err}")
# Debug printouts
# print(f"Response {resp}")
# print(f"Response OK? - {resp.ok}")
# Assign response OK to a variable
resp_ok = resp.ok
# If/Else block to assess whether response is OK
if resp_ok is True:
# Assign JSON decoded output to a variable
output = resp.json()
else:
# Assign the raw text string output to a variable
output = resp.text
# Return response ok and output
return resp_ok, output | 32,856 |
def translate_compile_cli() -> None:
"""Compile all languages."""
tdir: Path = current_app.config["TRANSLATIONS_DIR"]
if tdir.is_dir():
for lang in current_app.config["TRANSLATIONS_DIR"].iterdir():
catalog = lang / _lc_dir() / _po_file()
if catalog.is_file():
_pybabel_compile()
break
else:
print("No message catalogs to compile") | 32,857 |
def test_model_invalid_task(caplog):
"""Unit test of model with invalid task."""
caplog.set_level(logging.INFO)
dirpath = "temp_test_model_with_invalid_task"
Meta.reset()
init(
dirpath,
config={
"meta_config": {"verbose": 0},
},
)
task_name = "task1"
task = EmmentalTask(
name=task_name,
module_pool=nn.ModuleDict(
{
"input_module0": IdentityModule(),
f"{task_name}_pred_head": IdentityModule(),
}
),
task_flow=[
{
"name": "input1",
"module": "input_module0",
"inputs": [("_input_", "data")],
},
{
"name": f"{task_name}_pred_head",
"module": f"{task_name}_pred_head",
"inputs": [("input1", 0)],
},
],
module_device={"input_module0": -1},
loss_func=None,
output_func=None,
action_outputs=None,
scorer=None,
require_prob_for_eval=False,
require_pred_for_eval=True,
)
task1 = EmmentalTask(
name=task_name,
module_pool=nn.ModuleDict(
{
"input_module0": IdentityModule(),
f"{task_name}_pred_head": IdentityModule(),
}
),
task_flow=[
{
"name": "input1",
"module": "input_module0",
"inputs": [("_input_", "data")],
},
{
"name": f"{task_name}_pred_head",
"module": f"{task_name}_pred_head",
"inputs": [("input1", 0)],
},
],
module_device={"input_module0": -1},
loss_func=None,
output_func=None,
action_outputs=None,
scorer=None,
require_prob_for_eval=False,
require_pred_for_eval=True,
)
model = EmmentalModel(name="test")
model.add_task(task)
model.remove_task(task_name)
assert model.task_names == set([])
model.remove_task("task_2")
assert model.task_names == set([])
model.add_task(task)
# Duplicate task
with pytest.raises(ValueError):
model.add_task(task1)
# Invalid task
with pytest.raises(ValueError):
model.add_task(task_name)
shutil.rmtree(dirpath) | 32,858 |
def count_cells(notebook):
"""
The function takes a notebook and returns the number of cells
Args:
notebook(Notebook): python object representing the notebook
Returns:
len(nb_dict["cells"]): integer value representing the number of cells into the notebook
A way you might use me is
cells_count = count_cells(nb)
"""
nb_dict = notebook.nb_dict
return len(nb_dict["cells"]) | 32,859 |
def assignments():
"""
This is called for the assignments tab on the instructor interface
When an assignment is selected get_assignment is called to gather the details
for that assignment.
"""
response.title = "Assignments"
cur_assignments = db(db.assignments.course == auth.user.course_id).select(
orderby=db.assignments.duedate
)
assigndict = OrderedDict()
for row in cur_assignments:
assigndict[row.id] = row.name
tags = []
tag_query = db(db.tags).select()
for tag in tag_query:
tags.append(tag.tag_name)
course = get_course_row(db.courses.ALL)
base_course = course.base_course
chapter_labels = []
chapters_query = db(db.chapters.course_id == base_course).select(
db.chapters.chapter_label
)
for row in chapters_query:
chapter_labels.append(row.chapter_label)
# See `models/db_ebook.py` for course_attributes table
set_latex_preamble(course.base_course)
return dict(
coursename=auth.user.course_name,
confirm=False,
course_id=auth.user.course_name,
assignments=assigndict,
tags=tags,
chapters=chapter_labels,
toc=_get_toc_and_questions(), # <-- This Gets the readings and questions
course=course,
) | 32,860 |
def handle(name, userdata, cloud, log, args):
"""Cloud-init processing function"""
tag = MODULE_NAME
enabled = False
if tag in userdata:
try:
enabled = bool(userdata[tag]['enabled'])
except Exception:
LOG.error('%s missing enabled attribute', tag)
return
if enabled:
keys = []
if "ssh_authorized_keys" in userdata:
cfgkeys = userdata["ssh_authorized_keys"]
keys.extend(cfgkeys)
if keys:
bigiq_onboard_utils.inject_public_ssh_keys(keys)
rd_enabled = True
if 'rd_enabled' in userdata[tag]:
rd_enabled = bool(userdata[tag]['rd_enabled'])
default_route_interface = None
if 'default_route_interface' in userdata[tag]:
default_route_interface = str(
userdata[tag]['default_route_interface'])
device_discovery_interface = None
if 'device_discovery_interface' in userdata[tag]:
device_discovery_interface = str(
userdata[tag]['device_discovery_interface'])
inject_routes = True
dhcp_timeout = 120
if 'dhcp_timeout' in userdata[tag]:
dhcp_timeout = int(userdata[tag]['dhcp_timeout'])
if 'inject_routes' in userdata[tag]:
inject_routes = bool(userdata[tag]['inject_routes'])
license_key = None
if 'license_key' in userdata[tag]:
license_key = userdata[tag]['license_key']
node_type = 'cm'
if 'node_type' in userdata[tag]:
node_type = userdata[tag]['node_type']
post_onboard_enabled = False
if 'post_onboard_enabled' in userdata[tag]:
post_onboard_enabled = bool(userdata[tag]['post_onboard_enabled'])
post_onboard_commands = []
if 'post_onboard_commands' in userdata[tag]:
post_onboard_commands = userdata[tag]['post_onboard_commands']
if 'chpasswd' in userdata and 'list' in userdata['chpasswd']:
for user in userdata['chpasswd']['list'].splitlines():
parts = user.split(':')
if len(parts) > 1:
bigiq_onboard_utils.set_password(parts[0].strip(),
parts[1].strip())
resources = resolve_resources(rd_enabled, default_route_interface,
device_discovery_interface,
inject_routes, dhcp_timeout)
LOG.debug('resolved config resources: %s', resources)
create_onboard_artifacts(resources, license_key, node_type,
post_onboard_commands)
create_onboard()
create_post_onboard()
phone_home_url = None
if 'phone_home_url' in userdata[tag]:
phone_home_url = userdata[tag]['phone_home_url']
phone_home_url_verify_tls = True
if 'phone_home_url_verify_tls' in userdata[tag]:
phone_home_url_verify_tls = userdata[tag][
'phone_home_url_verify_tls']
phone_home_url_metadata = {}
if 'phone_home_url_metadata' in userdata[tag]:
phone_home_url_metadata = userdata[tag]['phone_home_url_metadata']
phone_home_cli = None
if 'phone_home_cli' in userdata[tag]:
phone_home_cli = userdata[tag]['phone_home_cli']
try:
onboard(post_onboard_enabled, phone_home_url,
phone_home_url_verify_tls, phone_home_url_metadata,
phone_home_cli)
except Exception as err:
LOG.error("onboard exception - %s", err)
try:
bigiq_onboard_utils.clean()
except Exception as err:
LOG.error('onboard cleanup exception - %s', err) | 32,861 |
def get_sentences(data: Iterable[JSON_Object],
match_by: str) -> Dict[Hash, JSON_Object]:
"""
Collect sentence objects w.r.t. matching criteria.
:param data: Iterable of sentence objects
:param match_by: Matching criteria / method
:return: Dict of hash: sentence objects
"""
return {
hash_sentence(sentence, match_by): sentence
for sentence in data
} | 32,862 |
def create_infrastructure(aws_key, aws_secret):
"""Create Redshift infrastructure for this project and set ARN."""
ec2_client, s3_client, iam_client, redshift_client = create_clients(
aws_key, aws_secret
)
role_arn = create_iam_role(iam_client)
create_redshift_cluster(redshift_client, role_arn)
# Loop until the cluster status becomes "Available"
status = ""
while status.lower() != "available":
cluster_properties = get_cluster_properties(redshift_client)
status = cluster_properties['ClusterStatus']
print('Cluster status is %s' % status)
time.sleep(30)
set_vpc_properties(ec2_client, cluster_properties['VpcId'])
print_cluster_properties(redshift_client) | 32,863 |
def getInterfaceText(caller_text, callee_method):
"""
This method parses method text that we grabbed from getMethodSignature for
potential interface text that is present. It makes a sort of 'best guess'
as to which lines of the caller method text invoke the callee
This current implementation uses only the 'in' string method to detect interfaces
and that is why it is a 'best guess' it is by no means perfect
"""
#pass in the caller method text and then search it for the callee method call
callee_method = callee_method.split('#')[0].strip() # get the dict_link out
interface_text = []
caller_text = commentRemover(caller_text) # don't want to trigger interface text on a comment
caller_text = caller_text.split('\n')
i = 0
end_header_regex = re.compile(r'^.+?\)')
while i < len(caller_text): # remove method header from the interface text as to not accidentally trigger regex later
to_remove = end_header_regex.search(caller_text[i]) #regex that searches to see the end parentheses where the header ends
if not to_remove: # for when the method head # TODO replace this regex with regex for '{' not parentheses (?)
del caller_text[i] #remove every line of the method header leading up to the end line
i -= 1
else: # we are at the end line of the header
caller_text[i] = caller_text[i].replace(to_remove.group(), '') # replace this line with empty string
break
i+=1
# after this loop ^ we should have isolated just the caller method text for searching of interface
if '::' in callee_method: #class defined
tmp = ''
done = True
for line in caller_text:
line = line.strip()
if not done: # if we haven't reached the end of the interface, i.e. it extends over many lines
tmp = tmp + '\n' + line #capture the lines
if callee_method in line: #found start of interface
done = False
tmp = tmp + line # capture it
if ';' in line: # if the line has a semicolon we know that we have reached the end
done = True
if done and tmp != '': # we have the end of this interface, add it to the list and then reset tmp
interface_text.append(tmp)
tmp = ''
if len(interface_text) == 0: #didn't find it with the class:: notation (usually because in method in same class)
split = callee_method.split("::") # here we will search for just the method name
cclass = split[0].strip()
cname = split[1].strip()
for line in caller_text:
line = line.strip()
if not done: # same process as previous loop but doesn't include ClassName:: in search
tmp = tmp + '\n' + line # this happens in C++ when the namespace is implicit
if cname in line: #found start
done = False
tmp = tmp + line
if ';' in line:
done = True
if done and tmp != '':
interface_text.append(tmp)
tmp = ''
else: #there is no class defined (ex. memset, or other built in functions, or C style methods with no class)
tmp = ''
done = True
for line in caller_text: # same method as above, searches method text and appends results to a list
line = line.strip()
if not done:
tmp = tmp + '\n' + line
if callee_method in line: #found start
done = False
tmp = tmp + line
if ';' in line:
done = True
if done and tmp != '':
interface_text.append(tmp)
tmp = ''
return interface_text | 32,864 |
def str2bool(value):
"""
Args:
value - text to be converted to boolean
True values: y, yes, true, t, on, 1
False values: n, no, false, off, 0
"""
return value in ['y', 'yes', 'true', 't', '1'] | 32,865 |
def numeric_float(max_abs: float = 1e3) -> st.SearchStrategy:
"""Search strategy for numeric (non-inf, non-NaN) floats with bounded absolute value."""
return st.floats(min_value=-max_abs, max_value=max_abs, allow_nan=False, allow_infinity=False) | 32,866 |
def add2dict(dict, parent_list, key, value):
""" Add a key/value pair to a dictionary; the pair is added following the
hierarchy of 'parents' as define in the parent_list list. That is
if parent list is: ['5', '1'], and key='k', value='v', then the new,
returned dictionary will have a value:
dict['5']['1'][k] = v
"""
d = dict
for p in parent_list:
if p not in d:
d[p] = {}
d = d[p]
d[key] = value
return dict | 32,867 |
def transform(x):
"""
transform
x1 x2 ---> 1 x1 x2 x1**2 x2**2 x1x2 |x1 - x2| |x1 + x2|
"""
ones = np.ones(len(x))
x1 = x[:,0]
x2 = x[:,1]
x1_sqr = x1**2
x2_sqr = x2**2
x1x2 = x1 * x2
abs_x1_minus_x2 = abs(x1-x2)
abs_x1_plus_x2 = abs(x1+x2)
return np.stack([ones, x1, x2, x1_sqr, x2_sqr, x1x2, abs_x1_minus_x2, abs_x1_plus_x2], axis=1) | 32,868 |
async def get_cities(session: ClientSession) -> AsyncGenerator[City, None]:
"""
Get cities.
:param session: aiohttp session
"""
url = urljoin(BASE_URL, "Station/GetCities")
async with session.get(url, raise_for_status=True) as response:
async for row in _iter_rows(response):
id_str, name, *_ = row
yield City(int(id_str), name) | 32,869 |
def gen_new_contact_json(csv_data):
"""
Generate json with data about Subnets and theirs Contacts
:param csv_data: entry data
:return: Stats about created subnets
"""
dist = {"subnets": csv_data}
with open(f'{PATH}{CONTACTS_SUFIX}', 'w') as out_file:
out_file.write(json.dumps(dist, indent=2, sort_keys=True))
stat = len(dist["subnets"])
return f'Reloaded {stat} subnets and their contacts. ' | 32,870 |
def resize(a, shape):
"""
if array a is larger than shape, crop a; if a is smaller than shape, pad a with zeros
Args:
a (numpy array): 2D array to resize
shape: desired shape of the return
Returns:
numpy array: array a resized according to shape
"""
if a.shape[0] < shape[0]:
a = np.pad(a, ((0, shape[0]-a.shape[0]), (0, 0)), mode="constant")
if a.shape[1] < shape[1]:
a = np.pad(a, ((0, 0), (0, shape[1]-a.shape[1])), mode="constant")
if a.shape[0] > shape[0]:
a = a[0:shape[0], :]
if a.shape[1] > shape[1]:
a = a[:, 0:shape[1]]
return a | 32,871 |
def _Run(args, holder, url_map_arg, release_track):
"""Issues requests necessary to import URL maps."""
client = holder.client
url_map_ref = url_map_arg.ResolveAsResource(
args,
holder.resources,
default_scope=compute_scope.ScopeEnum.GLOBAL,
scope_lister=compute_flags.GetDefaultScopeLister(client))
data = console_io.ReadFromFileOrStdin(args.source or '-', binary=False)
try:
url_map = export_util.Import(
message_type=client.messages.UrlMap,
stream=data,
schema_path=_GetSchemaPath(release_track))
except yaml_validator.ValidationError as e:
raise exceptions.ToolException(e.message)
# Get existing URL map.
try:
url_map_old = url_maps_utils.SendGetRequest(client, url_map_ref)
except apitools_exceptions.HttpError as error:
if error.status_code != 404:
raise error
# Url Map does not exist, create a new one.
return _SendInsertRequest(client, url_map_ref, url_map)
# No change, do not send requests to server.
if url_map_old == url_map:
return
console_io.PromptContinue(
message=('Url Map [{0}] will be overwritten.').format(url_map_ref.Name()),
cancel_on_no=True)
# Populate id and fingerprint fields. These two fields are manually
# removed from the schema files.
url_map.id = url_map_old.id
url_map.fingerprint = url_map_old.fingerprint
return _SendPatchRequest(client, url_map_ref, url_map) | 32,872 |
def resolve_function(module, function):
"""
Locate specified Python function in the specified Python package.
:param module: A Python module
:type module: ``types.ModuleType.``
:param function: Name of Python function
:type ``str``
:return: Function or None if not found.
"""
func = None
if function_exists(module, function):
func = getattr(module, function)
if not func:
nuoca_log(logging.ERROR, "Cannot find Python function %s in module %s" % (
function, module
))
return func | 32,873 |
def from_table(table, engine, limit=None):
"""
Select data in a database table and put into prettytable.
Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`.
**中文文档**
将数据表中的数据放入prettytable中.
"""
sql = select([table])
if limit is not None:
sql = sql.limit(limit)
result_proxy = engine.execute(sql)
return from_db_cursor(result_proxy.cursor) | 32,874 |
def test_idxgz_load_and_save(tmpdir):
"""Create idxgz.load() and idxgz.save()"""
archive = tmpdir.join("idx.gz")
# Test data
data = np.array([1, 2, 3]).astype('uint8')
print("Before packing:")
print("data:", data)
print("type(data):", type(data))
print("data.dtype:", data.dtype)
print("data.shape:", data.shape)
# Save to file
idxgz.save(archive, data)
# Load from file
data2 = idxgz.load(archive)
print("After unpacking:")
print("data2:", data2)
print("type(data2):", type(data2))
print("data2.dtype:", data2.dtype)
print("data2.shape:", data2.shape)
assert_array_equal(data, data2)
assert type(data) == type(data2)
assert data.dtype == data2.dtype
assert data.shape == data2.shape | 32,875 |
def futures_sgx_daily(trade_date: str = "2020/03/06", recent_day: str = "3") -> pd.DataFrame:
"""
Futures daily data from sgx
P.S. it will be slowly if you do not use VPN
:param trade_date: it means the specific trade day you want to fetch
:type trade_date: str e.g., "2020/03/06"
:param recent_day: the data range near the specific trade day
:type recent_day: str e.g. "3" means 3 day before specific trade day
:return: data contains from (trade_date - recent_day) to trade_day
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
index_df = get_country_index(country="新加坡", index_name="FTSE Singapore", start_date="2020/01/01", end_date=trade_date)
index_df.sort_index(inplace=True)
index_df.reset_index(inplace=True)
index_df.reset_index(inplace=True)
index_df.index = index_df["index"] + 5840
date_start = index_df.index[-1] + 1 - int(recent_day)
date_end = index_df.index[-1] + 1
for page in tqdm(range(date_start, date_end)):
# page = 5883
url = f"https://links.sgx.com/1.0.0/derivatives-daily/{page}/FUTURE.zip"
r = requests.get(url)
with zipfile.ZipFile(BytesIO(r.content)) as file:
with file.open(file.namelist()[0]) as my_file:
data = my_file.read().decode()
if file.namelist()[0].endswith("txt"):
data_df = pd.read_table(StringIO(data))
else:
data_df = pd.read_csv(StringIO(data))
big_df = big_df.append(data_df)
return big_df | 32,876 |
def draw_spectra(md, ds):
""" Generate best-fit spectra for all the test objects
Parameters
----------
md: model
The Cannon spectral model
ds: Dataset
Dataset object
Returns
-------
best_fluxes: ndarray
The best-fit test fluxes
best_ivars:
The best-fit test inverse variances
"""
coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model
nstars = len(dataset.test_SNR)
cannon_flux = np.zeros(dataset.test_flux.shape)
cannon_ivar = np.zeros(dataset.test_ivar.shape)
for i in range(nstars):
x = label_vector[:,i,:]
spec_fit = np.einsum('ij, ij->i', x, coeffs_all)
cannon_flux[i,:] = spec_fit
bad = dataset.test_ivar[i,:] == SMALL**2
cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2
return cannon_flux, cannon_ivar | 32,877 |
def _pivot(p: SimplexMethod.Problem) -> None:
"""
对给定问题原址执行转轴操作(基变换)
"""
main_element = p.a[p.leaving_idx][p.entering_idx]
p.a[p.leaving_idx] /= main_element
p.b[p.leaving_idx] /= main_element
p.base_idx[p.leaving_idx] = p.entering_idx
for i in range(len(p.b)):
if i != p.leaving_idx and p.a[i][p.entering_idx] != 0:
p.b[i] -= p.a[i][p.entering_idx] * p.b[p.leaving_idx]
p.a[i] -= p.a[i][p.entering_idx] * p.a[p.leaving_idx]
p.c -= p.c[p.entering_idx] * p.a[p.leaving_idx] | 32,878 |
def deploy_zebra(suffix=None, usergroup='', subnets=None, security_groups=None, env=None):
"""deploy tibanna zebra to AWS cloud (zebra is for CGAP only)"""
API().deploy_zebra(suffix=suffix, usergroup=usergroup, subnets=subnets,
security_groups=security_groups, env=env) | 32,879 |
def get_response(
schema, # type: GraphQLSchema
params, # type: RequestParams
catch_exc, # type: Type[BaseException]
allow_only_query=False, # type: bool
**kwargs # type: Any
):
# type: (...) -> Optional[ExecutionResult]
"""Get an individual execution result as response, with option to catch errors.
This does the same as execute_graphql_request() except that you can catch errors
that belong to an exception class that you need to pass as a parameter.
"""
# noinspection PyBroadException
execute = (
execute_graphql_request_as_promise
if kwargs.get("return_promise", False)
else execute_graphql_request
)
try:
execution_result = execute(schema, params, allow_only_query, **kwargs)
except catch_exc:
return None
return execution_result | 32,880 |
def _get_status_arrays():
""" Get status for all arrays.
"""
results = []
try:
# Get array(s) status for a site.
result = get_status_arrays()
if result is not None:
results = result
return results
except Exception as err:
message = str(err)
current_app.logger.info(message)
raise Exception(message) | 32,881 |
def get_cycle_amplitude(data, cycle, metric_to_use, hourly_period_to_exclude):
"""
given data (eg results[opposite_pair]
[substratification][
substratification_level]
['take_simple_means_by_group_no_individual_mean'])
and a cycle and a metric to use (max_minus_min or average_absolute_difference_from_mean)
computes the cycle amplitude.
"""
data = deepcopy(data)
assert metric_to_use in ['max_minus_min' ,'average_absolute_difference_from_mean']
assert cycle in ['date_relative_to_period', 'local_hour', 'weekday', 'month', 'week_of_year']
if cycle == 'date_relative_to_period':
data[cycle] = data[cycle].loc[data[cycle].index.map(lambda x:np.abs(x) <= 14)]
assert list(data[cycle].index) == list(range(-14, 15))
if cycle == 'local_hour':
if hourly_period_to_exclude is None:
assert list(data[cycle].index) == list(range(24))
else:
assert len(hourly_period_to_exclude) == 2
assert hourly_period_to_exclude[0] < hourly_period_to_exclude[1]
data[cycle] = data[cycle].loc[data[cycle].index.map(lambda x:(x < hourly_period_to_exclude[0]) or (x > hourly_period_to_exclude[1]))]
assert list(data[cycle].index) == [a for a in list(range(24)) if a < hourly_period_to_exclude[0] or a > hourly_period_to_exclude[1]]
if cycle == 'weekday':
assert list(data[cycle].index) == list(['Friday', 'Monday', 'Saturday', 'Sunday', 'Thursday', 'Tuesday',
'Wednesday'])
if cycle == 'month':
assert list(data[cycle].index) == list(range(1, 13))
if cycle == 'week_of_year':
assert list(data[cycle].index) == list(range(52))
y = np.array(data[cycle]['mean'])
y_mu = y.mean()
average_absolute_difference_from_mean = np.mean(np.abs(y - y_mu))
largest_difference = y.max() - y.min()
if metric_to_use == 'max_minus_min':
metric_val = largest_difference
else:
metric_val = average_absolute_difference_from_mean
return metric_val | 32,882 |
def submit_job(scheduler_args, command):
"""Submit a job to the scheduler, returning the supplied job ID.
"""
cl = ["qsub", "-cwd", "-b", "y", "-j", "y"] + scheduler_args + command
status = subprocess.check_output(cl)
match = _jobid_pat.search(status)
return match.groups("jobid")[0] | 32,883 |
def _download(url: str, dst: str) -> int:
"""
@param: url to download file
@param: dst place to put the file
"""
file_size = int(urlopen(url).info().get("Content-Length", -1))
r = requests.get(url, stream=True)
with open(get_full_data_path(dst), "wb") as f:
pbar = tqdm(total=int(r.headers['Content-Length']))
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(len(chunk))
pbar.close()
return file_size | 32,884 |
def target2line(target, img_size, k, eval=False):
"""
target: line representetitve in grid [L, grid_h, grid_w]
img_size: (width, height): Input image size, PIL Image size
eval=False : Default. For inference. Line width not big.
eval=True : For iou. Line width is bigger.
return line_img
"""
line_img = Image.new("L", img_size)
draw = ImageDraw.Draw(line_img)
resolution = 32 / pow(2, k)
grid_h = int(img_size[1] // resolution)
grid_w = int(img_size[0] // resolution)
line_width = 4
if not eval:
line_width = 2
for i in range(1, grid_h):
grid = []
grid.append(((0, i * img_size[1]/grid_h)) )
grid.append(((img_size[0], i * img_size[1]/grid_h)) )
draw.line(grid, fill='blue', width=0)
for i in range(grid_w):
grid = []
grid.append(((i * img_size[0]/grid_w, 0)) )
grid.append(((i * img_size[0]/grid_w, img_size[1])) )
draw.line(grid, fill='blue', width=0)
targets = np.transpose(target, [1, 2, 0])
targets = targets.reshape(grid_h, grid_w, -1, 4)
offset_x = np.linspace(0, grid_w - 1, grid_w)
offset_y = np.linspace(0, grid_h - 1, grid_h)
off_w, off_h = np.meshgrid(offset_x, offset_y)
indexes = np.argwhere( np.sum( targets.reshape(-1, 4) , axis=1, keepdims=False ) > 0 )
targets = np.transpose(targets, (3, 2, 0, 1))
targets[0,:] += off_w
targets[1,:] += off_h
targets[2,:] += off_w
targets[3,:] += off_h
targets = (targets * resolution)
targets = np.transpose(targets, ( 2, 3, 1, 0)).reshape(-1, 4)
detected = targets[indexes[..., 0]]
# print( 'detected lines shape: ', detected.shape)
[draw.line([(x1, y1), (x2,y2)], fill='white', width=line_width) for (x1, y1, x2, y2) in detected ]
return line_img | 32,885 |
def unmarshal(raw, signature):
"""Unmarshal objects.
The elements of the returned tuple will be of types according
to the column *Python OUT* in the :ref:`types summary <ref-types-table>`.
:param RawData raw: raw message data
:param signature: see :class:`~dcar.signature.Signature`
:return: tuple of unmarshalled data
:rtype: tuple
:raises ~dcar.MessageError: if the data could not be unmarshalled
"""
signature = _signature(signature)
data = []
for t, s in signature:
data.append(types[t].unmarshal(raw, s))
return tuple(data) | 32,886 |
def remove_keys(d, to_remove):
""" This function removes the given keys from the dictionary d. N.B.,
"not in" is used to match the keys.
Args:
d (dict): a dictionary
to_remove (list): a list of keys to remove from d
Returns:
dict: a copy of d, excluding keys in to_remove
"""
ret = {
k:v for k,v in d.items() if k not in to_remove
}
return ret | 32,887 |
def parse_line(line_str):
"""
Parse a line from sha1sum output into tuple of hash, directory path and
file name.
Eg. line '3af30443352a5760cb0f88e619819cee1b1599e0 foo/bar/baz' would
be parsed into tuple
('3af30443352a5760cb0f88e619819cee1b1599e0', 'foo/bar', 'baz').
"""
line_str = line_str.rstrip()
hash_str, path_str = line_str.split(' ', maxsplit=1)
path_pair = os.path.split(path_str)
return hash_str, path_pair[0], path_pair[1] | 32,888 |
def get_crypto_quotes(**kwargs):
"""
Top-level function for obtaining all available cryptocurrency quotes
"""
return CryptoReader(**kwargs).fetch() | 32,889 |
def mutual_information(y_true, y_pred):
"""Mutual information score.
"""
# This is a simple wrapper for returning the score as given in y_pred
return y_pred | 32,890 |
def zipf(a, size=None): # real signature unknown; restored from __doc__
"""
zipf(a, size=None)
Draw samples from a Zipf distribution.
Samples are drawn from a Zipf distribution with specified parameter
`a` > 1.
The Zipf distribution (also known as the zeta distribution) is a
continuous probability distribution that satisfies Zipf's law: the
frequency of an item is inversely proportional to its rank in a
frequency table.
Parameters
----------
a : float or array_like of floats
Distribution parameter. Should be greater than 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized Zipf distribution.
See Also
--------
scipy.stats.zipf : probability density function, distribution, or
cumulative density function, etc.
Notes
-----
The probability density for the Zipf distribution is
.. math:: p(x) = \frac{x^{-a}}{\zeta(a)},
where :math:`\zeta` is the Riemann Zeta function.
It is named for the American linguist George Kingsley Zipf, who noted
that the frequency of any word in a sample of a language is inversely
proportional to its rank in the frequency table.
References
----------
.. [1] Zipf, G. K., "Selected Studies of the Principle of Relative
Frequency in Language," Cambridge, MA: Harvard Univ. Press,
1932.
Examples
--------
Draw samples from the distribution:
>>> a = 2. # parameter
>>> s = np.random.zipf(a, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> from scipy import special
Truncate s values at 50 so plot is interesting:
>>> count, bins, ignored = plt.hist(s[s<50], 50, density=True)
>>> x = np.arange(1., 50.)
>>> y = x**(-a) / special.zetac(a)
>>> plt.plot(x, y/max(y), linewidth=2, color='r')
>>> plt.show()
"""
pass | 32,891 |
def AvgPool(window_shape: Sequence[int],
strides: Optional[Sequence[int]] = None,
padding: str = Padding.VALID.name,
normalize_edges: bool = False,
batch_axis: int = 0,
channel_axis: int = -1) -> InternalLayerMasked:
"""Layer construction function for an average pooling layer.
Based on `jax.example_libraries.stax.AvgPool`.
Args:
window_shape: The number of pixels over which pooling is to be performed.
strides: The stride of the pooling window. `None` corresponds to a stride of
`(1, 1)`.
padding: Can be `VALID`, `SAME`, or `CIRCULAR` padding. Here `CIRCULAR`
uses periodic boundary conditions on the image.
normalize_edges: `True` to normalize output by the effective receptive
field, `False` to normalize by the window size. Only has effect at the
edges when `SAME` padding is used. Set to `True` to retain correspondence
to `ostax.AvgPool`.
batch_axis: Specifies the batch dimension. Defaults to `0`, the leading
axis.
channel_axis: Specifies the channel / feature dimension. Defaults to `-1`,
the trailing axis. For `kernel_fn`, channel size is considered to be
infinite.
Returns:
`(init_fn, apply_fn, kernel_fn)`.
"""
return _Pool(_Pooling.AVG, window_shape, strides, padding, normalize_edges,
batch_axis, channel_axis) | 32,892 |
def main(args, special=False):
"""Główna Funkcja Programu"""
verbose_debug_mode = False
print("PAiP Web Build System " + version)
pwbs_main(args, verbose_debug_mode, special)
sys.exit() | 32,893 |
def test_workflow_patch(api_request, mocker):
"""PATCH a Workflow by its ID"""
has_permission = mocker.spy(WorkflowPermission, "has_permission")
workflow = WorkflowFactory()
template = TemplateFactory()
group_refs = [{"name": "group1", "uuid": "uuid1"}]
args = (
"patch",
"approval:workflow-detail",
workflow.id,
{"name": "update", "group_refs": group_refs, "template": template.id},
)
assert api_request(*args).status_code == 400
mocker.patch(
"pinakes.main.approval.validations.validate_approver_groups",
return_value=group_refs,
)
response = api_request(*args)
assert response.status_code == 200
content = response.data
assert content["name"] == "update"
assert content["group_refs"] == group_refs
assert content["template"] == template.id
assert has_permission.call_count == 2 | 32,894 |
def pairwise_distances(x, y):
"""Computes pairwise squared l2 distances between tensors x and y.
Args:
x: Tensor of shape [n, feature_dim].
y: Tensor of shape [m, feature_dim].
Returns:
Float32 distances tensor of shape [n, m].
"""
# d[i,j] = (x[i] - y[j]) * (x[i] - y[j])'
# = sum(x[i]^2, 1) + sum(y[j]^2, 1) - 2 * x[i] * y[j]'
xs = tf.reduce_sum(x * x, axis=1)[:, tf.newaxis]
ys = tf.reduce_sum(y * y, axis=1)[tf.newaxis, :]
d = xs + ys - 2 * tf.matmul(x, y, transpose_b=True)
return d | 32,895 |
def timerange(rstring):
"""
range from string specifier
| 2010-M08 -> range of August 2010
| 2009-Q1 -> range of first quarter, 2009
| 2001-S1 -> range of first "semi" 2001
| 2008 -> range of year 2008
:param rstring: range string
:rtype: timerange dictionary
"""
m_match = re.search(r'(\d{4})-M(\d{2})', rstring)
if m_match:
return month_range(int(m_match.group(1)), int(m_match.group(2)))
q_match = re.search(r'(\d{4})-Q(\d{1})', rstring)
if q_match:
return quarter_range(int(q_match.group(1)), int(q_match.group(2)))
s_match = re.search(r'(\d{4})-S(\d{1})', rstring)
if s_match:
return semi_range(int(s_match.group(1)), int(s_match.group(2)))
y_match = re.search(r'(\d{4})', rstring)
if y_match:
return year_range(int(y_match.group(1))) | 32,896 |
def comp_height_wire(self):
"""Return bar height
Parameters
----------
self : CondType21
A CondType21 object
Returns
-------
H: float
Height of the bar [m]
"""
return self.Hbar | 32,897 |
def get_update_seconds(str_time: str) -> int:
"""This function calculates the seconds between the current time and the
scheduled time utelising the datetime module.
Args:
str_time (str): Time of scheduled event taken from user input as a
string
Returns:
int: Returns the seconds until the scheduled event should occur
"""
#creates timedeltas for current time and update time
interval_bin = datetime(1900,1,1)
update_time = datetime.strptime(str_time, '%H:%M') - interval_bin
current_time = datetime.now()
current_timedelta = timedelta(hours=current_time.hour,
minutes = current_time.minute, seconds= current_time.second)
#calculates update interval by comparing the two timedeltas
if update_time >= current_timedelta:
update_interval = update_time - current_timedelta
if update_time < current_timedelta:
update_time+= timedelta(hours=24)
update_interval = update_time - current_timedelta
logging.info('UPDATE INTERVAL: ' + str(update_interval.seconds))
return update_interval.seconds | 32,898 |
def full_name(decl, with_defaults=True):
"""
Returns declaration full qualified name.
If `decl` belongs to anonymous namespace or class, the function will return
C++ illegal qualified name.
:param decl: :class:`declaration_t`
:type decl: :class:`declaration_t`
:rtype: full name of declarations.
"""
if None is decl:
raise RuntimeError("Unable to generate full name for None object!")
if with_defaults:
if not decl.cache.full_name:
path = declaration_path(decl)
if path == [""]:
# Declarations without names are allowed (for examples class
# or struct instances). In this case set an empty name..
decl.cache.full_name = ""
else:
decl.cache.full_name = full_name_from_declaration_path(path)
return decl.cache.full_name
else:
if not decl.cache.full_partial_name:
path = partial_declaration_path(decl)
if path == [""]:
# Declarations without names are allowed (for examples class
# or struct instances). In this case set an empty name.
decl.cache.full_partial_name = ""
else:
decl.cache.full_partial_name = \
full_name_from_declaration_path(path)
return decl.cache.full_partial_name | 32,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.