content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def decode_hint(hint: int) -> str:
"""Decodes integer hint as a string.
The format is:
⬜ (GRAY) -> .
🟨 (YELLOW) -> ?
🟩 (GREEN) -> *
Args:
hint: An integer representing the hint.
Returns:
A string representing the hint.
"""
hint_str = []
for _ in range(_WORD_LENGTH):
hint_chr = hint % 3
hint //= 3
if hint_chr == 0:
hint_str.append(_HINT_NOT_IN_ANY_SPOT)
elif hint_chr == 1:
hint_str.append(_HINT_WRONG_SPOT)
else:
hint_str.append(_HINT_CORRECT_SPOT)
return ''.join(hint_str[::-1])
| 13,200
|
def validate_notebook(nb_path, timeout=60):
""" Executes the notebook via nbconvert and collects the output
Args:
nb_path (string): path to the notebook of interest
timeout (int): max allowed time (in seconds)
Returns:
(parsed nbformat.NotebookNode object, list of execution errors)
"""
dirname, __ = os.path.split(nb_path)
os.chdir(dirname)
kname = find_kernel(nb_path)
if kname is None:
raise OSError("No kernel found")
# Set delete=False as workaround for Windows OS
with tempfile.NamedTemporaryFile(suffix=".ipynb", delete=False) as tf:
args = [
"jupyter",
"nbconvert",
"--to",
"notebook",
"--execute",
f"--ExecutePreprocessor.timeout={timeout}",
f"--ExecutePreprocessor.kernel_name={kname}",
"--ExecutePreprocessor.allow_errors=True",
"--output",
tf.name,
nb_path,
]
subprocess.check_call(args)
tf.seek(0)
nb = nbformat.read(tf, nbformat.current_nbformat)
errors = list_errors(nb)
# broken urls are currently counted as errors; consider including as
# warnings
broken_urls = find_broken_urls(nb)
if any(broken_urls):
broken_urls = ["broken url: " + u for u in broken_urls]
errors += broken_urls
return nb, errors
| 13,201
|
def is_symmetric(arr, i_sym=True, j_sym=True):
"""
Takes in an array of shape (n, m) and check if it is symmetric
Parameters
----------
arr : 1D or 2D array
i_sym : array
symmetric with respect to the 1st axis
j_sym : array
symmetric with respect to the 2nd axis
Returns
-------
a binary array with the symmetry condition for the corresponding quadrants.
The globa
Notes
-----
If both **i_sym** = ``True`` and **j_sym** = ``True``, the input array is
checked for polar symmetry.
See `issue #34 comment
<https://github.com/PyAbel/PyAbel/issues/34#issuecomment-160344809>`_
for the defintion of a center of the image.
"""
Q0, Q1, Q2, Q3 = tools.symmetry.get_image_quadrants(
arr, reorient=False)
if i_sym and not j_sym:
valid_flag = [np.allclose(np.fliplr(Q1), Q0),
np.allclose(np.fliplr(Q2), Q3)]
elif not i_sym and j_sym:
valid_flag = [np.allclose(np.flipud(Q1), Q2),
np.allclose(np.flipud(Q0), Q3)]
elif i_sym and j_sym:
valid_flag = [np.allclose(np.flipud(np.fliplr(Q1)), Q3),
np.allclose(np.flipud(np.fliplr(Q0)), Q2)]
else:
raise ValueError('Checking for symmetry with both i_sym=False \
and j_sym=False does not make sense!')
return np.array(valid_flag)
| 13,202
|
def linear_powspec(k, a):
"""linear power spectrum P(k) - linear_powspec(k in h/Mpc, scale factor)"""
return _cosmocalc.linear_powspec(k, a)
| 13,203
|
def main(request, response):
"""
Simple handler that sets a response header based on which client hint
request headers were received.
"""
response.headers.append(b"Access-Control-Allow-Origin", b"*")
response.headers.append(b"Access-Control-Allow-Headers", b"*")
response.headers.append(b"Access-Control-Expose-Headers", b"*")
if b"device-memory" in request.headers:
response.headers.set(b"device-memory-received", request.headers.get(b"device-memory"))
if b"dpr" in request.headers:
response.headers.set(b"dpr-received", request.headers.get(b"dpr"))
if b"viewport-width" in request.headers:
response.headers.set(b"viewport-width-received", request.headers.get(b"viewport-width"))
if b"sec-ch-viewport-height" in request.headers:
response.headers.set(b"viewport-height-received", request.headers.get(b"sec-ch-viewport-height"))
if b"rtt" in request.headers:
response.headers.set(b"rtt-received", request.headers.get(b"rtt"))
if b"downlink" in request.headers:
response.headers.set(b"downlink-received", request.headers.get(b"downlink"))
if b"ect" in request.headers:
response.headers.set(b"ect-received", request.headers.get(b"ect"))
if b"sec-ch-ua-mobile" in request.headers:
response.headers.set(b"mobile-received", request.headers.get(b"sec-ch-ua-mobile"))
if b"sec-ch-prefers-color-scheme" in request.headers:
response.headers.set(b"prefers-color-scheme-received", request.headers.get(b"sec-ch-prefers-color-scheme"))
| 13,204
|
def light_eff(Pmax, Iz, I0, Ik):
"""
Photosynthetic efficiency based on the light conditions. By definition, the
efficiency has a value between 0 and 1.
Parameters
----------
Pmax : numeric
Maximum photosynthetic rate [-].
Iz : numeric
Coral biomass-averaged light-intensity [mol photons m^-2 s^-1].
I0 : numeric
Light-intensity at the surface water (but within the water column)
[mol photons m^-2 s^-1].
Ik : numeric
Saturation light-intensity [mol photons m^-2 s^-1].
Returns
-------
PI : numeric
Photo-efficiency [-].
"""
# # calculations
try:
if Ik > 0:
PI = Pmax * (np.tanh(Iz / Ik) - np.tanh(.01 * I0 / Ik))
else:
PI = 0.
except ValueError:
PI = np.zeros(len(Ik))
PI[Ik > 0] = Pmax[Ik > 0] * (np.tanh(Iz[Ik > 0] / Ik[Ik > 0]) -
np.tanh(.01 * I0 / Ik[Ik > 0]))
# # Output
return PI
| 13,205
|
def radial_kernel_evaluate(rmax, kernel, pos, wts, log=null_log, sort_data=False,
many_ngb_approx=None):
"""
Perform evaluation of radial kernel over neighbours.
Note you must set-up the linear-interpolation kernel before calling this
function.
rmax - radius to evaluate within
kernel - kernel table
pos - (N,3) array of positions
wts - (N,) array of weights
[many_ngb_approx - guess for number of neighbours. If this is included and
large, i.e. >140, we will use combine the kernels due to
particles in non-adjacent cells (=monopole approximation
for the 1/r^2 force)]
returns pairs, f
where
pairs - the number of pairs within rmax
f - An (N,3) array s.t.
f_i = Sum_j wts_j (pos_j - pos_i) * kernel(|pos_j - pos_i|)
"""
pos_arr = array(pos)
num_pts = len(pos)
if len(pos) != len(wts):
raise Exception('Number of weights ({:,}) must be the same as number of points ({:,})'.format(len(wts),num_pts))
stencil = None
# Choose a stencil based on number of neighbours
if many_ngb_approx is not None:
guess_ngb = int(many_ngb_approx)
if guess_ngb>400:
# 7x7x7 stencil (only inner 3x3x3 direct)
stencil = 7
ngrid = int(3.0/rmax)
elif guess_ngb>140:
# 5x5x5 stencil (inner 3x3x3 direct)
stencil = 5
ngrid = int(2.0/rmax)
else:
# 3x3x3, all direct
ngrid = int(1.0/rmax)
else:
ngrid = int(1.0/rmax) # 3x3x3 by direct summation
# Avoid nasty hashing problems, make sure ngrid&3 == 3
if ngrid&3!=3 and ngrid >=3:
ngrid = (ngrid//4)*4 -1
print('Using hash grid of size {:,}^3 bins, binning particles.'.format(ngrid), file=log)
cells = get_cells(pos_arr, ngrid, log)
sort_idx, cellbin_data = _bin_id_data(cells, log)
pos = pos_arr[sort_idx].copy()
wts= array(wts)[sort_idx].copy()
print(MU.OKBLUE+'Kernel evalations at {:,} positions'.format(num_pts)+MU.ENDC,
file=log)
t0 = time()
lattice_setup_kernel(rmax, kernel, log)
pairs, accel = lattice_kernel(pos, cellbin_data, ngrid, masses=wts, stencil=stencil)
t1 = time()
dt = t1-t0
if stencil is None:
mean_num_ngb = pairs * 2.0 / num_pts
print('Within r=%.4f, mean number of neighbours was'%rmax,
MU.OKBLUE+'%.2f'%(mean_num_ngb)+MU.ENDC, file=log)
print('{:,} pairs in'.format(pairs), '%.2f seconds'%dt,
'i.e. {:,} positions/sec, {:,} kernels/sec'.format(int(num_pts/dt), int(2*pairs/dt)), file=log)
else:
print('%dx%dx%d monopole approximation, so no exact count for neighbours\n'%((stencil,)*3),
'but {:,} force-pairs in'.format(pairs), '%.2f seconds'%dt,
'i.e. {:,} positions/sec, {:,} kernels/sec'.format(int(num_pts/dt), int(2*pairs/dt)), file=log)
if sort_data:
# return the sort index along with sorted positions and masses, and corresponding accelerations.
# If you want to unsort you need to do it yourself
return pairs, sort_idx, pos, wts, accel
# indices for 'un'-sorting
unsort = empty_like(sort_idx)
unsort[sort_idx] = arange(num_pts)
return pairs, accel[unsort]
| 13,206
|
def get_referents(source, exclude=None):
"""
:return: dict storing lists of objects referring to source keyed by type.
"""
res = {}
for obj_cls, ref_cls in [
(models.Language, models.LanguageSource),
(models.ValueSet, models.ValueSetReference),
(models.Sentence, models.SentenceReference),
(models.Contribution, models.ContributionReference),
]:
if obj_cls.mapper_name().lower() in (exclude or []):
continue
q = DBSession.query(obj_cls).join(ref_cls).filter(ref_cls.source_pk == source.pk)
if obj_cls == models.ValueSet:
q = q.options(
joinedload_all(models.ValueSet.parameter),
joinedload_all(models.ValueSet.language))
res[obj_cls.mapper_name().lower()] = q.all()
return res
| 13,207
|
def test_smplify():
"""Test adaptive batch size."""
smplify_config = dict(mmcv.Config.fromfile('configs/smplify/smplify.py'))
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
smplify_config['body_model'] = dict(
type='SMPL',
gender='neutral',
num_betas=10,
keypoint_src='smpl_45',
keypoint_dst='smpl_45',
model_path='data/body_models/smpl',
batch_size=1 # need not to know batch size at init
)
smplify_config['num_epochs'] = 1
smplify_config['use_one_betas_per_video'] = True
smplify = build_registrant(smplify_config)
# Generate keypoints
smpl = build_body_model(
dict(
type='SMPL',
gender='neutral',
num_betas=10,
keypoint_src='smpl_45',
keypoint_dst='smpl_45',
model_path='data/body_models/smpl',
batch_size=batch_size) # keypoints shape: (2, 45, 3)
)
keypoints3d = smpl()['joints'].detach().to(device=device)
keypoints3d_conf = torch.ones(*keypoints3d.shape[:2], device=device)
# Run SMPLify
smplify_output = smplify(
keypoints3d=keypoints3d, keypoints3d_conf=keypoints3d_conf)
for k, v in smplify_output.items():
if isinstance(v, torch.Tensor):
assert not np.any(np.isnan(
v.detach().cpu().numpy())), f'{k} fails.'
# Run SMPLify with init parameters
smplify_output = smplify(
keypoints3d=keypoints3d,
keypoints3d_conf=keypoints3d_conf,
init_global_orient=torch.rand([1, 3]).to(device),
init_body_pose=torch.rand([1, 69]).to(device),
init_betas=torch.rand([1, 10]).to(device),
init_transl=torch.rand([1, 3]).to(device),
)
for k, v in smplify_output.items():
if isinstance(v, torch.Tensor):
assert not np.any(np.isnan(
v.detach().cpu().numpy())), f'{k} fails.'
| 13,208
|
def asarray(buffer=None, itemsize=None, shape=None, byteoffset=0,
bytestride=None, padc=" ", kind=CharArray):
"""massages a sequence into a chararray.
If buffer is *already* a chararray of the appropriate kind, it is
returned unaltered.
"""
if isinstance(buffer, kind) and buffer.__class__ is kind:
return buffer
else:
return array(buffer, itemsize, shape, byteoffset, bytestride,
padc, kind)
| 13,209
|
def _flake():
"""Test flake8"""
orig_dir = os.getcwd()
import_dir, dev = _get_import_dir()
os.chdir(op.join(import_dir, '..'))
if dev:
sys.argv[1:] = ['vispy', 'examples', 'make']
else:
sys.argv[1:] = [op.basename(import_dir)]
sys.argv.append('--ignore=E226,E241,E265,E266,W291,W293,W503,F999,E305,'
'F405')
sys.argv.append('--exclude=six.py,glfw.py,'
'_proxy.py,_es2.py,_gl2.py,_pyopengl2.py,'
'_constants.py,png.py,decorator.py,ipy_inputhook.py,'
'experimental,wiki,_old,mplexporter.py,cubehelix.py,'
'cassowary')
try:
try:
from flake8.main import main
except ImportError:
from flake8.main.cli import main
except ImportError:
print('Skipping flake8 test, flake8 not installed')
else:
print('Running flake8... ') # if end='', first error gets ugly
sys.stdout.flush()
try:
main()
except SystemExit as ex:
if ex.code in (None, 0):
pass # do not exit yet, we want to print a success msg
else:
raise RuntimeError('flake8 failed')
finally:
os.chdir(orig_dir)
| 13,210
|
def _extract_symlink(zipinfo: zipfile.ZipInfo,
pathto: str,
zipfile: zipfile.ZipFile,
nofixlinks: bool=False) -> str:
"""
Extract: read the link path string, and make a new symlink.
'zipinfo' is the link file's ZipInfo object stored in zipfile.
'pathto' is the extract's destination folder (relative or absolute)
'zipfile' is the ZipFile object, which reads and parses the zip file.
"""
assert zipinfo.external_attr >> 28 == SYMLINK_TYPE
zippath = zipinfo.filename
linkpath = zipfile.read(zippath)
linkpath = linkpath.decode('utf8')
# drop Win drive + unc, leading slashes, '.' and '..'
zippath = os.path.splitdrive(zippath)[1]
zippath = zippath.lstrip(os.sep)
allparts = zippath.split(os.sep)
okparts = [p for p in allparts if p not in ('.', '..')]
zippath = os.sep.join(okparts)
# where to store link now
destpath = os.path.join(pathto, zippath)
destpath = os.path.normpath(destpath)
# make leading dirs if needed
upperdirs = os.path.dirname(destpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
# adjust link separators for the local platform
if not nofixlinks:
linkpath = linkpath.replace('/', os.sep).replace('\\', os.sep)
# test+remove link, not target
if os.path.lexists(destpath):
os.remove(destpath)
# windows dir-link arg
isdir = zipinfo.external_attr & SYMLINK_ISDIR
if (isdir and
sys.platform.startswith('win') and
int(sys.version[0]) >= 3):
dirarg = dict(target_is_directory=True)
else:
dirarg ={}
# make the link in dest (mtime: caller)
os.symlink(linkpath, destpath, **dirarg)
return destpath
| 13,211
|
def read_file(fname, ObsClass, verbose=False):
"""This method is used to read the file.
"""
if verbose:
print('reading menyanthes file {}'.format(fname))
if ObsClass == observation.GroundwaterObs:
_rename_dic = {'xcoord': 'x',
'ycoord': 'y',
'upfiltlev': 'bovenkant_filter',
'lowfiltlev': 'onderkant_filter',
'surflev': 'maaiveld',
'filtnr': 'filternr',
'meetpunt': 'measpointlev'
}
_keys_o = ['name', 'x', 'y', 'locatie', 'filternr',
'metadata_available', 'maaiveld', 'meetpunt',
'bovenkant_filter', 'onderkant_filter']
elif ObsClass == observation.WaterlvlObs:
_rename_dic = {'xcoord': 'x',
'ycoord': 'y',
'meetpunt': 'measpointlev'
}
_keys_o = ['name', 'x', 'y', 'locatie']
# Check if file is present
if not (os.path.isfile(fname)):
print('Could not find file ', fname)
mat = loadmat(fname, struct_as_record=False, squeeze_me=True,
chars_as_strings=True)
d_h = read_oseries(mat)
locations = d_h.keys()
obs_list = []
for location in locations:
if verbose:
print('reading location -> {}'.format(location))
metadata = d_h[location]
metadata['projection'] = 'epsg:28992'
metadata['metadata_available'] = True
s = metadata.pop('values')
df = DataFrame(s, columns=['stand_m_tov_nap'])
for key in _rename_dic.keys():
if key in metadata.keys():
metadata[_rename_dic[key]] = metadata.pop(key)
meta_o = {k: metadata[k] for k in _keys_o if k in metadata}
o = ObsClass(df, meta=metadata, **meta_o)
obs_list.append(o)
return obs_list
| 13,212
|
def cumulative_gain_curve(df: pd.DataFrame,
treatment: str,
outcome: str,
prediction: str,
min_rows: int = 30,
steps: int = 100,
effect_fn: EffectFnType = linear_effect) -> np.ndarray:
"""
Orders the dataset by prediction and computes the cumulative gain (effect * proportional sample size) curve
according to that ordering.
Parameters
----------
df : Pandas' DataFrame
A Pandas' DataFrame with target and prediction scores.
treatment : Strings
The name of the treatment column in `df`.
outcome : Strings
The name of the outcome column in `df`.
prediction : Strings
The name of the prediction column in `df`.
min_rows : Integer
Minimum number of observations needed to have a valid result.
steps : Integer
The number of cumulative steps to iterate when accumulating the effect
effect_fn : function (df: pandas.DataFrame, treatment: str, outcome: str) -> int or Array of int
A function that computes the treatment effect given a dataframe, the name of the treatment column and the name
of the outcome column.
Returns
----------
cumulative gain curve: float
The cumulative gain according to the predictions ordering.
"""
size = df.shape[0]
n_rows = list(range(min_rows, size, size // steps)) + [size]
cum_effect = cumulative_effect_curve(df=df, treatment=treatment, outcome=outcome, prediction=prediction,
min_rows=min_rows, steps=steps, effect_fn=effect_fn)
return np.array([effect * (rows / size) for rows, effect in zip(n_rows, cum_effect)])
| 13,213
|
def last(*args):
"""Return last value from any object type - list,tuple,int,string"""
if len(args) == 1:
return int(''.join(map(str,args))) if isinstance(args[0],int) else args[0][-1]
return args[-1]
| 13,214
|
def load_ann_kwargs():
"""emboss text"""
from matplotlib.patheffects import withStroke
myeffect = withStroke(foreground="w", linewidth=3)
ann_kwargs = dict(path_effects=[myeffect])
return ann_kwargs
| 13,215
|
def color_conversion(img_name, color_type="bgr2rgb"):
"""
色空間の変換
Parameters
----------
img_name : numpy.ndarray
入力画像
color_type : str
変換のタイプ
bgr2rgb, bgr2hsv, bgr2gray, rgb2bgr,
rgb2hsv, rgb2gray, hsv2bgr, hsv2rgb
Return
-------
conversion_img : numpy.ndarray
処理後の画像
"""
if color_type == "bgr2rgb":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_BGR2RGB)
elif color_type == "bgr2hsv":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_BGR2HSV)
elif color_type == "bgr2gray":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_BGR2GRAY)
elif color_type == "rgb2bgr":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_RGB2BGR)
elif color_type == "rgb2hsv":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_RGB2HSV)
elif color_type == "rgb2gray":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_RGB2GRAY)
elif color_type == "hsv2bgr":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_HSV2BGR)
elif color_type == "hsv2rgb":
conversion_img = cv2.cvtColor(img_name, cv2.COLOR_HSV2RGB)
else:
sys.exit(1)
return conversion_img
| 13,216
|
def predict(self, celldata):
"""
This is the method that's to perform prediction based on a model
For now it just returns dummy data
:return:
"""
ai_model = load_model_parameter()
ret = predict_unseen_data(ai_model, celldata)
print("celldata: ", celldata)
print("Classification: ", ret)
return ret
| 13,217
|
def statements_to_str(statements: List[ASTNode], indent: int) -> str:
"""Takes a list of statements and returns a string with their C representation"""
stmt_str_list = list()
for stmt in statements:
stmt_str = stmt.to_str(indent + 1)
if not is_compound_statement(stmt) and not isinstance(stmt, Label):
stmt_str += ";" + NEW_LINE
stmt_str_list.append(stmt_str)
return "".join(stmt_str_list)
| 13,218
|
def _make_filter(class_name: str, title: str):
"""https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-enumwindows"""
def enum_windows(handle: int, h_list: list):
if not (class_name or title):
h_list.append(handle)
if class_name and class_name not in win32gui.GetClassName(handle):
return True # continue enumeration
if title and title not in win32gui.GetWindowText(handle):
return True # continue enumeration
h_list.append(handle)
return enum_windows
| 13,219
|
def is_float(s):
"""
Detertmine if a string can be converted to a floating point number.
"""
try:
float(s)
except:
return False
return True
| 13,220
|
def whitelist_sharing_job(h_producer, operator_config, conn, logger):
"""Whitelist distribution job method.
This method listens to a specific database notifications events which are generated when the
historic_whitelist table is update or inserted with records. It than transmit those changes to operators.
"""
try:
with conn.cursor() as cursor:
cursor.execute('LISTEN distributor_updates')
time_passed = 0
while 1:
conn.commit()
if select.select([conn], [], [], 5) == ([], [], []):
time_passed += 5
logger.debug('Listening to notification still after {0} seconds...'.format(time_passed))
else:
time_passed = 0
imei_adds = []
imei_updates = []
imei_deletes = []
update_msg = {
'type': 'whitelist_update',
'content': {
'adds': imei_adds,
'updates': imei_updates,
'deletes': imei_deletes
}
}
conn.poll()
conn.commit()
while conn.notifies:
notification = conn.notifies.pop()
logger.debug('Notification: {0}, {1}, {2}'
.format(notification.pid, notification.channel, notification.payload))
payload = json.loads(notification.payload)
imei_norm = payload.get('imei_norm')
imei_adds.append(imei_norm) if payload.get('end_date') is None \
else imei_deletes.append(imei_norm)
logger.debug('Dispatching whitelist to each operator update...')
logger.debug(update_msg)
for op in operator_config:
h_producer.send(op.topic, update_msg)
except Exception as e:
logger.info('DIRBS encountered an exception during whitelist distribution job. See below for details')
logger.error(str(e))
sys.exit(1)
| 13,221
|
def import_from_scale(
dataset, labels_dir_or_json, label_prefix=None, scale_id_field="scale_id",
):
"""Imports the Scale AI labels into the FiftyOne dataset.
This method supports importing annotations from the following Scale API
endpoints:
- `General Image Annotation <https://docs.scale.com/reference#general-image-annotation>`_
- `Semantic Segmentation Annotation <https://docs.scale.com/reference#semantic-segmentation-annotation>`_
- `General Video Annotation <https://docs.scale.com/reference#general-video-annotation>`_
- `Video Playback <https://docs.scale.com/reference#video-playback>`_
The ``scale_id_field`` of the FiftyOne samples are used to associate
samples with their corresponding Scale task IDs.
The provided ``labels_dir_or_json`` can either be the path to a JSON
export in the following format::
[
{
"task_id": <scale-task-id1>,
"response": {...},
...
},
{
"task_id": <scale-task-id2>,
"response": {...},
...
},
...
]
or a directory of per-task JSON files, which can either (a) directly
contain the elements of the list above, or (b) contain task labels
organized in the following format::
labels_dir/
<scale-task-id1>.json
<scale-task-id2>.json
...
where each JSON file contains only the contents of the ``response`` field
for the task.
The contents of the ``response`` field should be as follows:
- `General Image Annotation <https://docs.scale.com/reference#general-image-annotation>`_::
{
"annotations": [...]
"global_attributes": {...}
}
- `Semantic Segmentation Annotation <https://docs.scale.com/reference#semantic-segmentation-annotation>`_::
{
"annotations": {
...
"combined": {
...
"indexedImage": <url-or-filepath>
}
},
"labelMapping": {...}
}
where the ``indexedImage`` field (which is the only version of the
segmentation used by this method) can contain either a URL, in which
case the mask is downloaded from the web, or the path to the mask on
disk.
- `General Video Annotation <https://docs.scale.com/reference#general-video-annotation>`_::
{
"annotations": {
"url": <url-or-filepath>
},
"events": {
"url": <url-or-filepath>
}
}
where the ``url`` fields can contain either a URL, in which case the
file is downloaded from the web, or the path to JSON file on disk.
The annotations file should contain per-frame annotations in the
following format::
[
{
"annotations": [...],
"global_attributes": {...}
},
{
"annotations": [...],
"global_attributes": {...}
},
...
]
where the n-th element in the list contains the labels for the n-th
frame that was labeled.
Note that, if parameters such as ``duration_time``, ``frame_rate``, and
``start_time`` were used to specify which frames of the video to
annotate, then you must ensure that the ``labels_dir_or_json`` JSON
that you provide to this method contains the ``task`` fields for each
Scale task so that the correct frame numbers can be determined from
these values.
The optional events file should contain a list of events in the video::
{
"events": [...]
}
- `Video Playback <https://docs.scale.com/reference#video-playback>`_::
{
"annotations": {
"url": <url-or-filepath>
},
"events": {
"url": <url-or-filepath>
}
}
where the ``url`` fields can contain either a URL, in which case the
file is downloaded from the web, or the path to JSON files on disk.
The annotations file should contain a dictionary of object
trajectories::
{
"annotations": {...}
}
The optional events file should contain a list of events in the video::
{
"events": [...]
}
Args:
dataset: a :class:`fiftyone.core.dataset.Dataset`
labels_dir_or_json: the path to a Scale AI JSON export or a directory
of JSON exports as per the formats described above
label_prefix (None): a prefix to prepend to the sample label field(s)
that are created, separated by an underscore
scale_id_field ("scale_id"): the sample field to use to associate Scale
task IDs with FiftyOne samples
"""
# Load labels
if labels_dir_or_json.endswith(".json"):
labels = _load_labels(labels_dir_or_json)
else:
labels = _load_labels_dir(labels_dir_or_json)
id_map = {k: v for k, v in zip(*dataset.values([scale_id_field, "id"]))}
if label_prefix:
label_key = lambda k: label_prefix + "_" + k
else:
label_key = lambda k: k
is_video = dataset.media_type == fomm.VIDEO
with fou.ProgressBar(total=len(labels)) as pb:
for task_id, task_labels in pb(labels.items()):
if task_id not in id_map:
logger.info(
"Skipping labels for unknown Scale ID '%s'", task_id
)
continue
sample = dataset[id_map[task_id]]
if sample.metadata is None:
if is_video:
sample.metadata = fom.VideoMetadata.build_for(
sample.filepath
)
else:
sample.metadata = fom.ImageMetadata.build_for(
sample.filepath
)
if is_video:
frames = _parse_video_labels(task_labels, sample.metadata)
sample.frames.merge(
{
frame_number: {
label_key(fname): flabel
for fname, flabel in frame_dict.items()
}
for frame_number, frame_dict in frames.items()
}
)
else:
frame_size = (sample.metadata.width, sample.metadata.height)
anno_dict = task_labels["response"]
labels_dict = _parse_image_labels(anno_dict, frame_size)
sample.update_fields(
{label_key(k): v for k, v in labels_dict.items()}
)
sample.save()
| 13,222
|
def build_lm_model(config):
"""
"""
if config["model"] == "transformer":
model = build_transformer_lm_model(config)
elif config["model"] == "rnn":
model = build_rnn_lm_model(config)
else:
raise ValueError("model not correct!")
return model
| 13,223
|
def citizenship_fst(
test_file: str, fst_file: str, fuzzy_match: bool = True, verbose: bool = False
):
"""Evaluate citizenship finite state transducer and return report to stdout
Arguments:
test_file: test file path
fst_file: fst file path
fuzzy_match: accept/reject fuzzy match
verbose: report verbosity
**Usage:**
```shell
patentcity eval citizenship-fst data/gold_cit_uspatent01.csv lib/fst_cit.json
```
"""
fst = json.loads(open(fst_file, "r").read())
test_df = pd.read_csv(test_file, sep=";")
test_df = test_df.replace({np.nan: None})
res = []
for i, row in test_df.iterrows():
text = row["text"]
pred = get_cit_code(text, fst, fuzzy_match)
res += [
[row["publication_number"], text, row["gold"], pred, row["gold"] == pred]
]
res = pd.DataFrame(
res, columns=["publication_number", "text", "gold", "pred", "res"]
)
errors = res.query("res==False")
filename = os.path.basename(test_file)
acc = 1 - len(errors) / len(res)
typer.secho(f"## {filename}\n", fg=typer.colors.BLUE)
typer.echo(f"Accuracy (fuzzy-match {fuzzy_match}): {acc * 100:.2f}%\n")
if verbose:
typer.echo(f"### Errors\n{errors.to_markdown()}")
| 13,224
|
def __virtual__():
"""
Only load if boto3 libraries exist.
"""
has_boto_reqs = salt.utils.versions.check_boto_reqs()
if has_boto_reqs is True:
__utils__["boto3.assign_funcs"](__name__, "cloudfront")
return has_boto_reqs
| 13,225
|
def extract_binaries(pbitmap, psamples):
"""
Extract sample binaries from subdirectories according to dataset defined in bitmap.
"""
bins = glob.glob(psamples+'/**/*.bin', recursive=True)
bitmap = pd.read_csv(pbitmap) if '.tsv' not in pbitmap else pd.read_csv(pbitmap, sep='\t')
hashes = bitmap['sha1sum'].tolist()
if not os.path.exists('bins'):
os.makedirs('bins')
missed = []
for hash in hashes:
found = False
for bin in bins:
if hash in bin:
cmd = 'cp %s bins/%s.bin' % (bin, hash)
os.system(cmd)
found = True
break
if not found:
missed += [hash]
print('Sample not found: %s' % hash)
res = os.listdir('bins')
print('Total found =', len(res))
return res
| 13,226
|
def test_get_start_offset(request, fixture, result):
"""
Test the function that returns the offset for the RLM parser
"""
text = request.getfixturevalue(fixture)
assert result == _get_start_offset(text.splitlines())
| 13,227
|
def opening_github():
""" This function opens the github. """
webbrowser.open('https://github.com/RIDERIUS/Image-Viewer')
| 13,228
|
def search_range(nums, target):
"""
Find first and last position of target in given array by binary search
:param nums: given array
:type nums : list[int]
:param target: target number
:type target: int
:return: first and last position of target
:rtype: list[int]
"""
result = [-1, -1]
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) // 2
# note that we move right pointer when nums[mid] == target
# to find the first occurrence of target
if nums[mid] >= target:
right = mid - 1
else:
left = mid + 1
if 0 <= left < len(nums) and nums[left] == target:
result[0] = left
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) // 2
# note that we move left pointer when nums[mid] == target
# to find the last occurrence of target
if nums[mid] > target:
right = mid - 1
else:
left = mid + 1
if 0 <= right < len(nums) and nums[right] == target:
result[1] = right
return result
| 13,229
|
def process_mean_results(data, capacity, constellation, scenario, parameters):
"""
Process results.
"""
output = []
adoption_rate = scenario[1]
overbooking_factor = parameters[constellation.lower()]['overbooking_factor']
constellation_capacity = capacity[constellation]
max_capacity = constellation_capacity['capacity_kmsq']
number_of_satellites = constellation_capacity['number_of_satellites']
satellite_coverage_area = constellation_capacity['satellite_coverage_area']
for idx, item in data.iterrows():
users_per_km2 = item['pop_density_km2'] * (adoption_rate / 100)
active_users_km2 = users_per_km2 / overbooking_factor
if active_users_km2 > 0:
per_user_capacity = max_capacity / active_users_km2
else:
per_user_capacity = 0
output.append({
'scenario': scenario[0],
'constellation': constellation,
'number_of_satellites': number_of_satellites,
'satellite_coverage_area': satellite_coverage_area,
'iso3': item['iso3'],
'GID_id': item['regions'],
'population': item['population'],
'area_m': item['area_m'],
'pop_density_km2': item['pop_density_km2'],
'adoption_rate': adoption_rate,
'users_per_km2': users_per_km2,
'active_users_km2': active_users_km2,
'per_user_capacity': per_user_capacity,
})
return output
| 13,230
|
def identify_ossim_kwl(ossim_kwl_file):
"""
parse geom file to identify if it is an ossim model
:param ossim_kwl_file : ossim keyword list file
:type ossim_kwl_file : str
:return ossim kwl info : ossimmodel or None if not an ossim kwl file
:rtype str
"""
try:
with open(ossim_kwl_file, encoding="utf-8") as ossim_file:
content = ossim_file.readlines()
geom_dict = {}
for line in content:
(key, val) = line.split(": ")
geom_dict[key] = val.rstrip()
if "type" in geom_dict:
if geom_dict["type"].strip().startswith("ossim"):
return geom_dict["type"].strip()
return None
except Exception: # pylint: disable=broad-except
return None
| 13,231
|
def conv_cond_concat(x, y):
""" Concatenate conditioning vector on feature map axis.
# Arguments
x: 4D-Tensor
y: 4D-Tensor
# Return
4D-Tensor
"""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat(3, [x, y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])
| 13,232
|
def test_apply_same_period(client):
"""attempt to apply to the same period with the previous application
1. test: error is returned
target_url: /lotteries/<id> [POST]
"""
idx = 1
token = login(client, test_user['secret_id'],
test_user['g-recaptcha-response'])['token']
with client.application.app_context():
target_lottery = Lottery.query.get(idx)
index = target_lottery.index
booking_lottery = Lottery.query.filter_by(
index=index).filter(Lottery.id != idx).first()
user = User.query.filter_by(secret_id=test_user['secret_id']).first()
application = Application(lottery=booking_lottery, user_id=user.id)
db.session.add(application)
db.session.commit()
with mock.patch('api.routes.api.get_time_index',
return_value=index):
resp = client.post(f'/lotteries/{idx}',
headers={'Authorization': f'Bearer {token}'},
json={'group_members': []})
message = resp.get_json()['message']
assert resp.status_code == 400
assert 'already applying to a lottery in this period' in message
| 13,233
|
def get(args, syn):
"""TODO_Sphinx."""
entity = syn.get(args.id)
## TODO: Is this part even necessary?
## (Other than the print statements)
if 'files' in entity:
for file in entity['files']:
src = os.path.join(entity['cacheDir'], file)
dst = os.path.join('.', file.replace(".R_OBJECTS/",""))
print 'creating %s' % dst
if not os.path.exists(os.path.dirname(dst)):
os.mkdir(dst)
shutil.copyfile(src, dst)
else:
sys.stderr.write('WARNING: No files associated with entity %s\n' % (args.id,))
syn.printEntity(entity)
return entity
| 13,234
|
def swath_pyresample_gdaltrans(file: str, var: str, subarea: dict, epsilon: float, src_tif: str, dst_tif: str):
"""Reprojects swath data using pyresample and translates the image to EE ready tif using gdal
Parameters
----------
file: str
file to be resampled and uploaded to GC -> EE
var: str
input variable name
subarea: dict
string name of the projection to resample the data onto (pyproj supported)
epsilon: float
The distance to a found value is guaranteed to be no further than (1 + eps)
times the distance to the correct neighbour. Allowing for uncertainty decreases execution time.
src_tif: str
temporary target geotif file
dst_tif: str
final geotif output, GDAL processed
Returns
-------
dict:
global and var attributes
"""
# -----------
# get dataset
# -----------
resample_dst = create_dataset(file=file, key=var, subarea=subarea)
resample_dst['epsilon'] = epsilon
# ---------------
# resample swaths
# ---------------
if var in ('l2_flags', 'QA_flag'):
meta = flags_band(dataset=resample_dst,
key=var,
src_tif=src_tif,
dst_tif=dst_tif)
else:
attrs = resample_dst.pop(var)
glob_attrs = resample_dst.pop('glob_attrs')
proj = resample_dst.pop('proj')
fill_value = attrs['_FillValue']
result = swath_resample(swath=resample_dst, trg_proj=proj)
np.ma.set_fill_value(result, fill_value=fill_value)
# ---------------------
# write out the g-tif-f
# ---------------------
meta = write_tif(file=src_tif,
dataset=result,
data_type='Float32',
metadata={var: attrs, 'glob_attrs': glob_attrs},
area_def=proj)
gdal_translate(src_tif=src_tif,
dst_tif=dst_tif,
ot='Float32',
nodata=fill_value)
return meta
| 13,235
|
def run_drc(cell_name, gds_name, sp_name=None, extract=True, final_verification=False):
"""Run DRC check on a cell which is implemented in gds_name."""
global num_drc_runs
num_drc_runs += 1
write_drc_script(cell_name, gds_name, extract, final_verification, OPTS.openram_temp, sp_name=sp_name)
(outfile, errfile, resultsfile) = run_script(cell_name, "drc")
# Check the result for these lines in the summary:
# Total DRC errors found: 0
# The count is shown in this format:
# Cell replica_cell_6t has 3 error tiles.
# Cell tri_gate_array has 8 error tiles.
# etc.
try:
f = open(resultsfile, "r")
except FileNotFoundError:
debug.error("Unable to load DRC results file from {}. Is klayout set up?".format(resultsfile), 1)
results = f.readlines()
f.close()
errors=len([x for x in results if "<visited>" in x])
# always display this summary
result_str = "DRC Errors {0}\t{1}".format(cell_name, errors)
if errors > 0:
debug.warning(result_str)
else:
debug.info(1, result_str)
return errors
| 13,236
|
def getKeyPairPrivateKey(keyPair):
"""Extracts the private key from a key pair.
@type keyPair: string
@param keyPair: public/private key pair
@rtype: base string
@return private key PEM text
"""
return crypto.dump_privatekey(crypto.FILETYPE_PEM, keyPair)
| 13,237
|
def playbook_input(request, playbook_id, config_file=None, template=None):
"""Playbook input view."""
# Get playbook
playbook = Playbook.objects.get(pk=playbook_id)
# Get username
user = str(request.user)
# Check user permissions
if user not in playbook.permissions.users:
return playbooks(request)
# Get asset name if provided
asset_name = request.POST.get('asset_name', None)
# Get Assets
if playbook.asset_filter != '*':
inventory = netspot.NetSPOT()
assets = inventory.search(playbook.asset_filter, key='asset')
else:
assets = None
# Get config if confgi_file is provided
config = None
if config_file:
with open(config_file, 'r') as file_handle:
config = file_handle.read().strip()
variables = PlaybookVariable.objects.filter(playbook=playbook)
return render(
request,
'playbook.htm',
context={'playbook': playbook.name,
'playbook_id': playbook.id,
'assets': assets,
'asset_name': asset_name,
'asset_filter': playbook.asset_filter,
'user_auth': playbook.user_auth,
'inputs': variables,
'config_file': config_file,
'config': config,
'template': template,
'description': playbook.description},
)
| 13,238
|
def _filter_gtf_df(GTF_df, col, selection, keep_columns, silent=False):
"""
Filter a GTF on a specific feature type (e.g., genes)
Parameters:
-----------
GTF_df
pandas DataFrame of a GTF
type: pd.DataFrame
col
colname on which df.loc will be performed
type: str
selection
value in df[col]
type: str, int, float, etc. (most likely str)
keep_columns
A list of strings of colnames to keep. If False (default behavior), all cols are kept.
type: bool
default: False
silent
default: False
type: bool
Returns:
--------
GTF_filtered
type: pandas.DataFrame
"""
msg = _Messages(silent)
msg.filtering(col, selection)
return GTF_df.loc[GTF_df[col] == selection][keep_columns]
| 13,239
|
def validation_generator_for_dir(data_dir, model_dict):
"""Create a Keras generator suitable for validation
No data augmentation is performed.
:param data_dir: folder with subfolders for the classes and images therein
:param model_dict: dict as returned by `create_custom_model`
:returns: a generator for batches suitable for validating the model
:rtype: ??
"""
return _generator_for_dir(test_datagen, data_dir, model_dict)
| 13,240
|
def convert_cbaois_to_kpsois(cbaois):
"""Convert coordinate-based augmentables to KeypointsOnImage instances.
Parameters
----------
cbaois : list of imgaug.augmentables.bbs.BoundingBoxesOnImage or list of imgaug.augmentables.bbs.PolygonsOnImage or list of imgaug.augmentables.bbs.LineStringsOnImage or imgaug.augmentables.bbs.BoundingBoxesOnImage or imgaug.augmentables.bbs.PolygonsOnImage or imgaug.augmentables.bbs.LineStringsOnImage
Coordinate-based augmentables to convert, e.g. bounding boxes.
Returns
-------
list of imgaug.augmentables.kps.KeypointsOnImage or imgaug.augmentables.kps.KeypointsOnImage
``KeypointsOnImage`` instances containing the coordinates of input
`cbaois`.
"""
if not isinstance(cbaois, list):
return cbaois.to_keypoints_on_image()
kpsois = []
for cbaoi in cbaois:
kpsois.append(cbaoi.to_keypoints_on_image())
return kpsois
| 13,241
|
def gen_stream_from_zip(zip_path, file_extension='wav', label_files=None, label_names=None, utt2spk=None,
corpus_name=None, is_speech_corpus=True, is_rir=False, get_duration=False):
""" Generate speech stream from zip file and utt2spk. The zip file contains wavfiles.
Parameters
-----------
zip_path: path of the zip file that contains the waveforms.
label_files: list of label files. Each line of label_files contains label for one utterance and have following
format:
utt_id_1 label_1
utt_id_2 label_2
...
where utt_id_1 and utt_id_2 are utterance IDs of the sentences and can be any string as far as each utterance
has an unique ID. The utt_ids must be compatible with the file_names (excluding extension) in the zip file.
file_extension: define the extension of the files in the zip file. Used to filter out non-waveform files.
label_names: list of strings specifying the name of the label_files, e.g. "frame_label', 'word_label', etc.
utt2spk: a dictionary mapping from utterance ID to speaker ID. If not provided, corpus_name must be provided.
is_speech_corpus: bool, whether the zip contains speech.
is_rir: bool, whether the zip contains RIR. If True, expect a config file in the zip that contains the meta data
info about the RIRs.
get_duration: bool, whether to get duration of the waveforms
Returned:
An object of type SpeechDataStream, RIRDataStream, or DataStream.
"""
wav_reader = reader.ZipWaveIO(precision="float32")
zip_file = zipfile.ZipFile(zip_path)
all_list = zip_file.namelist()
wav_list = [i for i in all_list if os.path.splitext(i)[1][1:].lower() == file_extension]
utt_id_wav = wavlist2uttlist(wav_list)
# sort wav_list by utterance ID
tmp = sorted(zip(utt_id_wav, wav_list))
utt_id_wav = [i[0] for i in tmp]
wav_list = [i[1] for i in tmp]
def get_label(label_lines, selected_utt_id):
selected_label_list = []
for line in label_lines:
tmp = line.split(" ")
utt_id = tmp[0]
if utt_id in selected_utt_id:
tmp_label = np.asarray([int(j) for j in tmp[1:] if len(j)>0])[np.newaxis,:]
selected_label_list.append(tmp_label)
return selected_label_list
if label_files is not None:
# Find the intersection of the utterance IDs
selected_utt_id = set(utt_id_wav)
utt_id_label = []
label_file_lines = []
for i in range(len(label_files)):
lines = my_cat(label_files[i])
lines.sort() # each lines start with utterance ID, hence effectively sort the labels with utterance ID.
curr_utt_id_label = [i.split(" ")[0] for i in lines]
selected_utt_id = set(curr_utt_id_label) & selected_utt_id
utt_id_label.append(curr_utt_id_label)
label_file_lines.append(lines)
# Build DataStream for each label types
label_streams = dict()
if label_names is None:
label_names = ['label_'+str(i) for i in range(len(label_files))]
for i in range(len(label_files)):
selected_label_list = get_label(label_file_lines[i], selected_utt_id) # selected_label_list is sorted, as label_file_lines[i] is sorted.
label_streams[label_names[i]] = DataStream(selected_label_list, is_file=False, reader=None)
selected_wav_list = [wav_list[i] for i in range(len(wav_list)) if utt_id_wav[i] in selected_utt_id]
selected_utt_id = list(selected_utt_id)
selected_utt_id.sort()
# note that selected_wav_list, selected_label_list, and selected_utt_id are all sorted by utterance ID. So they
# are guaranteed to have one-to-one correspondence if the utterance IDs are unique.
else:
label_streams = None
selected_utt_id = utt_id_wav
selected_wav_list = wav_list
root_dir = zip_path + '@/'
if is_speech_corpus:
assert utt2spk is not None or corpus_name is not None
data_stream = DataStream(selected_wav_list, is_file=True, reader=wav_reader, root=root_dir)
if corpus_name == 'LibriSpeech':
corpus_stream = LibriDataStream(selected_utt_id, data_stream, label_streams=label_streams)
elif corpus_name == 'WSJ':
corpus_stream = WSJDataStream(selected_utt_id, data_stream, label_streams=label_streams)
elif corpus_name == 'TIMIT':
corpus_stream = TimitDataStream(selected_utt_id, data_stream, label_streams=label_streams)
else: # for unknown corpus, you need to provide the utt2spk mapping.
corpus_stream = SpeechDataStream(selected_utt_id, data_stream, utt2spk=utt2spk, label_streams=label_streams)
elif is_rir:
for i in all_list:
if os.path.splitext(i)[1][1:] == 'pkl':
config_file = i
break
byte_chunk = zip_file.read(config_file)
byte_stream = io.BytesIO(byte_chunk)
config = pickle.load(byte_stream)
zip_base = os.path.splitext(os.path.basename(zip_path))[0]
wav_list = [zip_base+'/'+i['file'] for i in config]
data_stream = RIRStream(wav_list, config=config, is_file=True, reader=wav_reader, root=root_dir)
corpus_stream = data_stream
else:
data_stream = DataStream(selected_wav_list, is_file=True, reader=wav_reader, root=root_dir)
corpus_stream = data_stream
if get_duration:
if is_speech_corpus:
corpus_stream.data_stream.set_data_len()
corpus_stream.data_stream.reader = reader.ZipWaveIO(precision="float32")
else:
corpus_stream.set_data_len()
corpus_stream.reader = reader.ZipWaveIO(precision="float32")
return corpus_stream
| 13,242
|
def is_partinioned_beurocracy(dsc):
""" Partitioned beurocracy
Args:
dsc
Vars:
standardised_training_programmes (bool) :
relative_theoretical_cohesion (bool) :
focus_on_analytical_work (bool) :
training_programmes (list) :
Returns:
Theory:
standardised training programmes, relative theoretical cohesion and a focus on
analytical work (very little control of empirical phenomena). Anglo- Saxon neoclassical
economics, which operates like a
Examples:
Anglo- Saxon neoclassical economics
"""
example_disciplines = ['Anglo- Saxon neoclassical economics']
return
| 13,243
|
def get_query_dsl(
query_string, global_filters=None, facets_query_size=20, default_operator='and'):
"""
returns an elasticsearch query dsl for a query string
param: query_string : an expression of the form
type: person title:foo AND description:bar
where type corresponds to an elastic search document type
which gets added as a filter
param: global_filters : a dictionary of the form
{user_id: 1234}. This gets added as a filter to the query
so that the query can be narrowed down to fewer documents.
It is translated into an elastic search term filter.
"""
global FACETS_QUERY_SIZE, DEFAULT_OPERATOR
FACETS_QUERY_SIZE = facets_query_size
DEFAULT_OPERATOR = default_operator
global_filters = global_filters if global_filters else {}
expression = tokenizer.tokenize(query_string)
bool_lists = expression['query']['filtered']['filter']['bool']
[bool_lists['should'].append({"term": orele}) for orele in global_filters.get('or', [])]
[bool_lists['must'].append({"term": andele}) for andele in global_filters.get('and', [])]
[bool_lists['must_not'].append({"term": notele}) for notele in global_filters.get('not', [])]
if global_filters.has_key('sort'):
expression['sort'] = global_filters.get('sort')
return expression
| 13,244
|
def prune_visualization_dict(visualization_dict):
"""
Get rid of empty entries in visualization dict
:param visualization_dict:
:return:
"""
new_visualization_dict = {}
# when the form is left blank the entries of visualization_dict have
# COLUMN_NAME key that points to an empty list
for vis_key, vis_dict in visualization_dict.items():
if vis_dict.get(COLUMN_NAME):
new_visualization_dict[vis_key] = vis_dict
return new_visualization_dict
| 13,245
|
def get_file(level, lesson, file_type):
"""Wrap method to download file
"""
msg = 'level: %s; lesson: %s; file type: %s' % (level, lesson, file_type)
print 'Processing ' + msg
lesson_url = get_lesson_url(level, lesson)
if lesson_url is None:
print 'Lesson URL is None'
return
if file_type in ['mp3', 'pdf']:
file_url = get_file_url(lesson_url, level, lesson, file_type)
try:
download2(file_url, level, lesson, file_type)
except Exception, e:
print 'Exception ' + str(e) + ' occurs for ' + msg
else:
print 'File type %s is not recognized.' % file_type
| 13,246
|
def _LocationListToGoTo( request_data, positions ):
"""Convert a LSP list of locations to a ycmd GoTo response."""
try:
if len( positions ) > 1:
return [
responses.BuildGoToResponseFromLocation(
*_PositionToLocationAndDescription( request_data, position ) )
for position in positions
]
return responses.BuildGoToResponseFromLocation(
*_PositionToLocationAndDescription( request_data, positions[ 0 ] ) )
except ( IndexError, KeyError ):
raise RuntimeError( 'Cannot jump to location' )
| 13,247
|
def calculate_average_grades_and_deviation(course):
"""Determines the final average grade and deviation for a course."""
avg_generic_likert = []
avg_contribution_likert = []
dev_generic_likert = []
dev_contribution_likert = []
avg_generic_grade = []
avg_contribution_grade = []
dev_generic_grade = []
dev_contribution_grade = []
for __, contributor, __, results, __ in calculate_results(course):
average_likert = avg([result.average for result in results if result.question.is_likert_question])
deviation_likert = avg([result.deviation for result in results if result.question.is_likert_question])
average_grade = avg([result.average for result in results if result.question.is_grade_question])
deviation_grade = avg([result.deviation for result in results if result.question.is_grade_question])
(avg_contribution_likert if contributor else avg_generic_likert).append(average_likert)
(dev_contribution_likert if contributor else dev_generic_likert).append(deviation_likert)
(avg_contribution_grade if contributor else avg_generic_grade).append(average_grade)
(dev_contribution_grade if contributor else dev_generic_grade).append(deviation_grade)
# the final total grade will be calculated by the following formula (GP = GRADE_PERCENTAGE, CP = CONTRIBUTION_PERCENTAGE):
# final_likert = CP * likert_answers_about_persons + (1-CP) * likert_answers_about_courses
# final_grade = CP * grade_answers_about_persons + (1-CP) * grade_answers_about_courses
# final = GP * final_grade + (1-GP) * final_likert
final_likert_avg = mix(avg(avg_contribution_likert), avg(avg_generic_likert), settings.CONTRIBUTION_PERCENTAGE)
final_likert_dev = mix(avg(dev_contribution_likert), avg(dev_generic_likert), settings.CONTRIBUTION_PERCENTAGE)
final_grade_avg = mix(avg(avg_contribution_grade), avg(avg_generic_grade), settings.CONTRIBUTION_PERCENTAGE)
final_grade_dev = mix(avg(dev_contribution_grade), avg(dev_generic_grade), settings.CONTRIBUTION_PERCENTAGE)
final_avg = mix(final_grade_avg, final_likert_avg, settings.GRADE_PERCENTAGE)
final_dev = mix(final_grade_dev, final_likert_dev, settings.GRADE_PERCENTAGE)
return final_avg, final_dev
| 13,248
|
def get_slurm_params(n,runtime=None,mem=None,n_jobs=None):
"""Get remaining parameters to submit SLURM jobs based on specified parameters and number of files to process.
Parameters
----------
n : int
Number of files to process.
runtime : str, None
Time per run, string formatted 'hours:minutes:seconds".
mem : str, None
Memory, string formatted for SLURM e.g. '1G', '500MB'.
n_jobs : int, None
Number of SLURM jobs to launch.
Returns
-------
str
Time per job.
str
Memory per job.
int
Number of jobs.
"""
#TIME ~5s per subject (ADHD200 and fmri dev dataset)
#MEM 1G overall (cleans up after each subject, takes about peak around ~500)
#Tested w/ MIST64 and MIST444
if mem == None:
mem = '1G'
if runtime==None:
if n_jobs==None:
if n < 1000:
n_per_job = 50
elif n < 10000:
n_per_job = 200
else:
n_per_job = 500
n_jobs = int(n/n_per_job)
else:
n_per_job = int(n/n_jobs) #round down (add one later to calc for time)
if n_per_job == 0:
n_per_job = 1
sec = 2*n_per_job*5 #(seconds)
if sec < 300:
sec = 300
runtime = str(datetime.timedelta(seconds=sec))
else:
if len(runtime.split(':')) == 3:
sec = int(runtime.split(':')[0])*3600 + int(runtime.split(':')[1])*60 + int(runtime.split(':')[2])
elif len(runtime.split(':')) == 2:
sec = int(runtime.split(':')[1])*60 + int(runtime.split(':')[2])
if n_jobs == None:
n_jobs = int((10*n)/sec)
if n_jobs == 0:
n_jobs = 1
return runtime,mem,n_jobs
| 13,249
|
def get_db_comment_text(file_name) -> DataFrame:
"""
db_comment 파일에서 text를 추출하여 DataFrame type으로 return
:param file_name: 입력 파일명 (str type)
:return: 입력 파일에서 추출한 text
"""
# :return: 입력 파일에서 추출한 text에 형태소 분석기로 명사 추출한 DataFrame
start_time = time.time()
print('\r\nget_db_comment_text: %s' % file_name)
excel_app = win32com.client.Dispatch('Excel.Application')
full_path_file_name = os.path.abspath(file_name)
excel_file = excel_app.Workbooks.Open(full_path_file_name, True)
# region Table comment
table_comment_sheet = excel_file.Worksheets(1)
last_row = table_comment_sheet.Range("A1").End(-4121).Row # -4121: xlDown
table_comment_range = 'A2:D%s' % (str(last_row))
print('table_comment_range : %s (%d rows)' % (table_comment_range, last_row - 1))
table_comments = table_comment_sheet.Range(table_comment_range).Value2
df_table = pd.DataFrame(list(table_comments),
columns=['DB', 'Schema', 'Table', 'Text'])
df_table['FileName'] = full_path_file_name
df_table['FileType'] = 'table'
df_table['Page'] = 0
df_table = df_table[df_table.Text.notnull()] # Text 값이 없는 행 제거
df_table['Source'] = df_table['DB'] + '.' + df_table['Schema'] + '.' + df_table['Table'] \
+ '(' + df_table['Text'].astype(str) + ')'
# print(df_table)
# endregion
# region Column comment
column_comment_sheet = excel_file.Worksheets(2)
last_row = column_comment_sheet.Range("A1").End(-4121).Row # -4121: xlDown
column_comment_range = 'A2:E%s' % (str(last_row))
print('column_comment_range : %s (%d rows)' % (column_comment_range, last_row - 1))
column_comments = column_comment_sheet.Range(column_comment_range).Value2
df_column = pd.DataFrame(list(column_comments),
columns=['DB', 'Schema', 'Table', 'Column', 'Text'])
df_column['FileName'] = full_path_file_name
df_column['FileType'] = 'column'
df_column['Page'] = 0
df_column = df_column[df_column.Text.notnull()] # Text 값이 없는 행 제거
df_column['Source'] = df_column['DB'] + '.' + df_column['Schema'] + '.' + df_column['Table'] \
+ '.' + df_column['Column'] + '(' + df_column['Text'].astype(str) + ')'
# print(df_column)
# endregion
excel_file.Close()
df_text = df_column.append(df_table, ignore_index=True)
# print(df_text)
end_time = time.time()
# elapsed_time = end_time - start_time
elapsed_time = str(datetime.timedelta(seconds=end_time - start_time))
print('[pid:%d] get_db_comment_text elapsed time: %s' % (os.getpid(), elapsed_time))
print('text count: %s' % str(df_text.shape[0]))
# return get_word_list(df_text)
return df_text
| 13,250
|
def test_migration_trans_sync_err(mock_trans):
"""
Tests the device returning an error when the migration state is written to.
"""
global ctx, sock
data = VFIO_DEVICE_STATE_V1_SAVING.to_bytes(c.sizeof(c.c_int), 'little')
write_region(ctx, sock, VFU_PCI_DEV_MIGR_REGION_IDX, offset=0,
count=len(data), data=data, expect=errno.EPERM)
ret = vfu_run_ctx(ctx)
assert ret == 0
| 13,251
|
def source_open() -> bool:
"""Open a source MS Excel spreadsheet file.
Returns
-------
boolean
Flag about successful processing.
"""
try:
Source.wbook = openpyxl.load_workbook(cmdline.workbook)
except Exception:
logger.error(
'Cannot open the MS Excel workbook %s',
cmdline.workbook
)
return False
return True
| 13,252
|
def cli():
"""
Just pong it
"""
click.echo('ping')
| 13,253
|
def make_prompt(token: str, config: Path, model: str = ''):
"""Make a summary using the Studio21 API
Args:
token (str): Your api token to use.
config (Path): The path to the config file.
model (str, optional): Which model to use. If empty
then read the model from the config file. Defaults to ''.
Returns:
bool: Whether or not to continue calling the api.
"""
header = {'Authorization': f'Bearer {token}'}
with open(config) as f:
cfg = yaml.safe_load(f)
if not model:
model = cfg['model']
logger.debug(f'Using model {model} for generation.')
url = f'https://api.ai21.com/studio/v1/j1-{model}/complete'
cfg_name = os.path.basename(config)
prompt, extra, output_dir = generate_summary_prompt('studio21', config=cfg_name)
# If the prompt is over 1900 tokens we will most likely get
# An API error. The model can only take 2048 tokens.
prompt_tokens = len(prompt.split())
if prompt_tokens > 1800:
logger.warning(f'Our prompt was too long. Had {prompt_tokens} tokens.')
return True
else:
logger.debug(f'Our prompt had {prompt_tokens} tokens.')
data = {'prompt': prompt, **cfg['apiParams']}
result = requests.post(url, headers=header, json=data)
if result.status_code >= 400:
logger.critical(f'API request error!!! {result.status_code}: {result.text} {result.reason}')
# A 429 status code means we have reached our quota. So we return false.
# Any other code we ignore and continue.
return result.status_code != 429
else:
text = result.json()['completions'][0]['data']['text']
json.dump(result.json(), open(output_dir+'/output.json', 'w'), indent=4)
with open(f'{output_dir}/{cfg["summaryType"]}.txt', 'w') as f:
f.write(text+'\n'+extra)
return True
| 13,254
|
def get_license_match_error(lic, lic_file_path):
"""Returns an Error of the type 'warning' if the FreeRTOS license is present in the
input file. Otherwise an empty list is returned.
"""
# Get the words in the license template
with open('license.templ', 'r') as file:
template_lic = file.read()
template_lic_words = list(filter(None, re.split('[^0-9a-zA-Z]+', template_lic)))
# Split on non-alphanumeric characters
# re.split() will match the empty string.
lic_words = list(filter(None, re.split('[^0-9a-zA-Z]+', lic)))
i = 0
same = False
for i, word in enumerate(lic_words):
if word == template_lic_words[0]:
# Element wise comparison of the two arrays.
if lic_words[i:i+len(template_lic_words)] == template_lic_words:
same = True
break
if same:
return [Error(type='warning', info='FreeRTOS license is in file: ' + lic_file_path)]
return []
| 13,255
|
def test_craysdbproc_from_cache():
"""
Initialize CraySdb from decompressed cache
"""
# Create an uncompressed cache file
tokiotest.TEMP_FILE.close()
tokiotest.gunzip(tokiotest.SAMPLE_XTDB2PROC_FILE, tokiotest.TEMP_FILE.name)
print("Decompressed %s to %s" % (tokiotest.SAMPLE_XTDB2PROC_FILE, tokiotest.TEMP_FILE.name))
# Read from a cache file
craysdbproc = tokio.connectors.craysdb.CraySdbProc(tokiotest.TEMP_FILE.name)
verify_craysdbproc(craysdbproc)
| 13,256
|
def start_workers(size, delete=False, migrate=False):
"""Starts FluxxWorkers.
:returns: Pair of queues.
"""
streams = (queue.Queue(), queue.Queue(maxsize=size))
for _ in range(THREAD_COUNT):
worker = FluxxWorker(streams, delete, migrate)
worker.daemon = True
worker.start()
return streams
| 13,257
|
def harmonizationApply(data, covars, model):
"""
Applies harmonization model with neuroCombat functions to new data.
Arguments
---------
data : a numpy array
data to harmonize with ComBat, dimensions are N_samples x N_features
covars : a pandas DataFrame
contains covariates to control for during harmonization
all covariates must be encoded numerically (no categorical variables)
must contain a single column "SITE" with site labels for ComBat
dimensions are N_samples x (N_covariates + 1)
model : a dictionary of model parameters
the output of a call to harmonizationLearn()
Returns
-------
bayes_data : a numpy array
harmonized data, dimensions are N_samples x N_features
"""
# transpose data as per ComBat convention
data = data.T
# prep covariate data
batch_col = covars.columns.get_loc('SITE')
cat_cols = []
num_cols = [covars.columns.get_loc(c) for c in covars.columns if c!='SITE']
covars = np.array(covars, dtype='object')
# load the smoothing model
smooth_model = model['smooth_model']
smooth_cols = smooth_model['smooth_cols']
### additional setup code from neuroCombat implementation:
# convert batch col to integer
covars[:,batch_col] = np.unique(covars[:,batch_col],return_inverse=True)[-1]
# create dictionary that stores batch info
(batch_levels, sample_per_batch) = np.unique(covars[:,batch_col],return_counts=True)
info_dict = {
'batch_levels': batch_levels.astype('int'),
'n_batch': len(batch_levels),
'n_sample': int(covars.shape[0]),
'sample_per_batch': sample_per_batch.astype('int'),
'batch_info': [list(np.where(covars[:,batch_col]==idx)[0]) for idx in batch_levels]
}
###
# check sites are identical in training dataset
check_sites = info_dict['n_batch']==model['info_dict']['n_batch']
if not check_sites:
raise ValueError('Number of sites in holdout data not identical to training data.')
# apply ComBat without re-learning model parameters
design = make_design_matrix(covars, batch_col, cat_cols, num_cols)
### additional setup if smoothing is performed
if smooth_model['perform_smoothing']:
# create cubic spline basis for smooth terms
X_spline = covars[:, smooth_cols].astype(float)
bs_basis = smooth_model['bsplines_constructor'].transform(X_spline)
# construct formula and dataframe required for gam
formula = 'y ~ '
df_gam = {}
for b in batch_levels:
formula = formula + 'x' + str(b) + ' + '
df_gam['x' + str(b)] = design[:, b]
for c in num_cols:
if c not in smooth_cols:
formula = formula + 'c' + str(c) + ' + '
df_gam['c' + str(c)] = covars[:, c].astype(float)
formula = formula[:-2] + '- 1'
df_gam = pd.DataFrame(df_gam)
# check formulas are identical in training dataset
check_formula = formula==smooth_model['formula']
if not check_formula:
raise ValueError('GAM formula for holdout data not identical to training data.')
# for matrix operations, a modified design matrix is required
design = np.concatenate((df_gam, bs_basis), axis=1)
###
s_data, stand_mean, var_pooled = ApplyStandardizationAcrossFeatures(data, design, info_dict, model)
bayes_data = adjust_data_final(s_data, design, model['gamma_star'], model['delta_star'],
stand_mean, var_pooled, info_dict)
# transpose data to return to original shape
bayes_data = bayes_data.T
return bayes_data
| 13,258
|
def create_back_links(env, option):
"""
Create back-links in all found needs.
But do this only once, as all needs are already collected and this sorting is for all
needs and not only for the ones of the current document.
:param env: sphinx enviroment
:return: None
"""
option_back = f"{option}_back"
if env.needs_workflow[f"backlink_creation_{option}"]:
return
needs = env.needs_all_needs
for key, need in needs.items():
for link in need[option]:
link_main = link.split(".")[0]
try:
link_part = link.split(".")[1]
except IndexError:
link_part = None
if link_main in needs:
if key not in needs[link_main][option_back]:
needs[link_main][option_back].append(key)
# Handling of links to need_parts inside a need
if link_part and link_part in needs[link_main]["parts"]:
if option_back not in needs[link_main]["parts"][link_part].keys():
needs[link_main]["parts"][link_part][option_back] = []
needs[link_main]["parts"][link_part][option_back].append(key)
env.needs_workflow[f"backlink_creation_{option}"] = True
| 13,259
|
def plot_single_hand_2d(keypoints, ax, occlusion=None, color_fixed=None, linewidth='1'):
""" Plots a hand stick figure into a matplotlib figure. """
for connection, color in hand_bones:
coord1 = keypoints[connection[0], :]
coord2 = keypoints[connection[1], :]
coords = np.stack([coord1, coord2])
if (coords[:, 0] <= 1).any():
continue
if (coords[:, 1] <= 1).any():
continue
if occlusion is not None:
if not occlusion[0] or not occlusion[1]:
continue
if color_fixed is None:
ax.plot(coords[:, 0], coords[:, 1], color=color, linewidth=linewidth)
else:
ax.plot(coords[:, 0], coords[:, 1], color_fixed, linewidth=linewidth)
| 13,260
|
def from_rkm(code):
"""Convert an RKM code string to a string with a decimal point.
Parameters
----------
code : str
RKM code string.
Returns
-------
str
String with a decimal point and an R value.
Examples
--------
>>> from pyaedt.circuit import from_rkm
>>> from_rkm('R47')
'0.47'
>>> from_rkm('4R7')
'4.7'
>>> from_rkm('470R')
'470'
>>> from_rkm('4K7')
'4.7k'
>>> from_rkm('47K')
'47k'
>>> from_rkm('47K3')
'47.3k'
>>> from_rkm('470K')
'470k'
>>> from_rkm('4M7')
'4.7M'
"""
# Matches RKM codes that start with a digit.
# fd_pattern = r'([0-9]+)([LREkKMGTFmuµUnNpP]+)([0-9]*)'
fd_pattern = r'([0-9]+)([{}]+)([0-9]*)'.format(''.join(RKM_MAPS.keys()), )
# matches rkm codes that end with a digit
# ld_pattern = r'([0-9]*)([LREkKMGTFmuµUnNpP]+)([0-9]+)'
ld_pattern = r'([0-9]*)([{}]+)([0-9]+)'.format(''.join(RKM_MAPS.keys()))
fd_regex = re.compile(fd_pattern, re.I)
ld_regex = re.compile(ld_pattern, re.I)
for regex in [fd_regex, ld_regex]:
m = regex.match(code)
if m:
fd, base, ld = m.groups()
ps = RKM_MAPS[base]
if ld:
return_str = ''.join([fd, '.', ld, ps])
else:
return_str = ''.join([fd, ps])
return return_str
return code
| 13,261
|
def update_cache(makefile_dirs: List[str]) -> None:
"""Given a list of directories containing Makefiles, update caches."""
import multiprocessing
cpus = multiprocessing.cpu_count()
fnames1: List[str] = []
fnames2: List[str] = []
for path in makefile_dirs:
cdp = f'cd {path} && ' if path else ''
# First, make sure all cache files are built.
mfpath = os.path.join(path, 'Makefile')
print(f'Building efrocache targets for {Clr.SBLU}{mfpath}{Clr.RST}...')
subprocess.run(f'{cdp}make -j{cpus} efrocache-build',
shell=True,
check=True)
rawpaths = subprocess.run(f'{cdp}make efrocache-list',
shell=True,
check=True,
capture_output=True).stdout.decode().split()
# Make sure the paths they gave were relative.
for rawpath in rawpaths:
if rawpath.startswith('/'):
raise RuntimeError(f'Invalid path returned for caching '
f'(absolute paths not allowed): {rawpath}')
# Break these into 2 lists, one of which will be included in the
# starter-cache.
for rawpath in rawpaths:
fullpath = os.path.join(path, rawpath)
# The main reason for this cache is to reduce round trips to
# the staging server for tiny files, so let's include small files
# only here. For larger stuff its ok to have a request per file..
if os.path.getsize(fullpath) < 100000:
fnames1.append(fullpath)
else:
fnames2.append(fullpath)
# Ok, we've got 2 lists of filenames that we need to cache in the cloud.
# First, however, let's look up modtimes for everything and if everything
# is exactly the same as last time we can skip this step.
hashes = _gen_hashes(fnames1 + fnames2)
if os.path.isfile(UPLOAD_STATE_CACHE_FILE):
with open(UPLOAD_STATE_CACHE_FILE) as infile:
hashes_existing = infile.read()
else:
hashes_existing = ''
if hashes == hashes_existing:
print(
f'{Clr.SBLU}Efrocache state unchanged;'
f' skipping cache push.{Clr.RST}',
flush=True)
else:
_upload_cache(fnames1, fnames2, hashes, hashes_existing)
print(f'{Clr.SBLU}Efrocache update successful!{Clr.RST}')
# Write the cache state so we can skip the next run if nothing changes.
os.makedirs(os.path.dirname(UPLOAD_STATE_CACHE_FILE), exist_ok=True)
with open(UPLOAD_STATE_CACHE_FILE, 'w') as outfile:
outfile.write(hashes)
| 13,262
|
def register_refinement(name, refinementof, cython_cimport=None, cython_cyimport=None,
cython_pyimport=None, cython_c2py=None, cython_py2c=None):
"""This function will add a refinement to the type system so that it may be used
normally with the rest of the type system.
"""
refined_types[name] = refinementof
cyci = _ensure_importable(cython_cimport)
_cython_cimport_base_types[name] = _cython_cimport_template_types[name] = cyci
cycyi = _ensure_importable(cython_cyimport)
_cython_cyimport_base_types[name] = _cython_cyimport_template_types[name] = cycyi
cypyi = _ensure_importable(cython_pyimport)
_cython_pyimport_base_types[name] = _cython_pyimport_template_types[name] = cypyi
if isinstance(cython_c2py, basestring):
cython_c2py = (cython_c2py,)
cython_c2py = None if cython_c2py is None else tuple(cython_c2py)
if cython_c2py is not None:
_cython_c2py_conv[name] = cython_c2py
if isinstance(cython_py2c, basestring):
cython_py2c = (cython_py2c, False)
if cython_py2c is not None:
_cython_py2c_conv[name] = cython_py2c
| 13,263
|
def print_err(msg, error=None, fatal=False):
"""Affiche un message d'erreur
Le pendant de print_ok. On met un failed en rouge. Si une erreur
est passé en paramètre, on affiche son message. Si fatal=True, on
fait un sys.exit(1) à la fin.
"""
print("[" + colors['FAIL'] + "FAILED" + colors['ENDC'] + "]")
print(colors['FAIL'] + "<<<<<<<<<<<<<< ERROR <<<<<<<<<<<<<" + colors['ENDC'])
print(msg)
if error is not None:
print(colors['WARNING'] + "%s" % error + colors['ENDC'])
if fatal:
print(colors['FAIL'] + "This Error is fatal !")
print(colors['FAIL'] + ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" + colors['ENDC'])
sys.stdout.flush()
if fatal:
sys.exit(1)
| 13,264
|
def get_arguments(method, rpc_version):
"""
Get arguments for method in specified Transmission RPC version.
"""
if method in ('torrent-add', 'torrent-get', 'torrent-set'):
args = constants.TORRENT_ARGS[method[-3:]]
elif method in ('session-get', 'session-set'):
args = constants.SESSION_ARGS[method[-3:]]
else:
return ValueError('Method "%s" not supported' % (method))
accessible = []
for argument, info in args.iteritems():
valid_version = True
if rpc_version < info[1]:
valid_version = False
if info[2] and info[2] <= rpc_version:
valid_version = False
if valid_version:
accessible.append(argument)
return accessible
| 13,265
|
def compose_local_noises(*functions: NoiseModel) -> NoiseModel:
"""Helper to compose multiple NoiseModel.
Args:
*functions: a list of functions
Returns:
The mathematical composition of *functions. The last element is applied
first. If *functions is [f, g, h], it returns f∘g∘h.
"""
return functools.reduce(
lambda f, g: lambda x: f(g(x)), functions, lambda x: x
)
| 13,266
|
def test_request_password_reset_unverified_email(live_server, mailoutbox):
"""
If the user provides an email address that does not exist in the
system, no action should be taken.
"""
user = get_user_model().objects.create_user(username="Test User")
email = models.EmailAddress.objects.create(
address="test@example.com", is_verified=False, user=user
)
data = {"email": email.address}
url = f"{live_server}/rest/password-reset-requests/"
response = requests.post(url, data)
assert response.status_code == 201
assert response.json() == data
assert len(mailoutbox) == 0
| 13,267
|
def sls_build(
repository, tag="latest", base="opensuse/python", mods=None, dryrun=False, **kwargs
):
"""
.. versionchanged:: 2018.3.0
The repository and tag must now be passed separately using the
``repository`` and ``tag`` arguments, rather than together in the (now
deprecated) ``image`` argument.
Build a Docker image using the specified SLS modules on top of base image
.. versionadded:: 2016.11.0
The base image does not need to have Salt installed, but Python is required.
repository
Repository name for the image to be built
.. versionadded:: 2018.3.0
tag : latest
Tag name for the image to be built
.. versionadded:: 2018.3.0
name
.. deprecated:: 2018.3.0
Use both ``repository`` and ``tag`` instead
base : opensuse/python
Name or ID of the base image
mods
A string containing comma-separated list of SLS with defined states to
apply to the base image.
saltenv : base
Specify the environment from which to retrieve the SLS indicated by the
`mods` parameter.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
.. versionadded:: 2018.3.0
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
.. versionadded:: 2018.3.0
dryrun: False
when set to True the container will not be committed at the end of
the build. The dryrun succeed also when the state contains errors.
**RETURN DATA**
A dictionary with the ID of the new container. In case of a dryrun,
the state result is returned and the container gets removed.
CLI Example:
.. code-block:: bash
salt myminion docker.sls_build imgname base=mybase mods=rails,web
"""
create_kwargs = __utils__["args.clean_kwargs"](**copy.deepcopy(kwargs))
for key in ("image", "name", "cmd", "interactive", "tty", "extra_filerefs"):
try:
del create_kwargs[key]
except KeyError:
pass
# start a new container
ret = create(
image=base, cmd="sleep infinity", interactive=True, tty=True, **create_kwargs
)
id_ = ret["Id"]
try:
start_(id_)
# Now execute the state into the container
ret = sls(id_, mods, **kwargs)
# fail if the state was not successful
if not dryrun and not __utils__["state.check_result"](ret):
raise CommandExecutionError(ret)
if dryrun is False:
ret = commit(id_, repository, tag=tag)
finally:
stop(id_)
rm_(id_)
return ret
| 13,268
|
def RightDragDrop(x1: int, y1: int, x2: int, y2: int, moveSpeed: float = 1, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Simulate mouse right button drag from point x1, y1 drop to point x2, y2.
x1: int.
y1: int.
x2: int.
y2: int.
moveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster.
waitTime: float.
"""
RightPressMouse(x1, y1, 0.05)
MoveTo(x2, y2, moveSpeed, 0.05)
RightReleaseMouse(waitTime)
| 13,269
|
def parse_date(date):
"""
Parses a date string and returns number of seconds from the EPOCH.
"""
# yyyy-mm-dd [hh:mm:ss[.s][ [+-]hh[:][mm]]]
p = re.compile( r'''(?P<year>\d{1,4}) # yyyy
- #
(?P<month>\d{1,2}) # mm or m
- #
(?P<day>\d{1,2}) # dd or d
#
(?: # [optional time and timezone]
(?:\s|T) #
(?P<hour>\d{1,2}) # hh or h
:? #
(?P<min>\d{1,2})? # mm or m
(?: # [optional seconds]
: #
(?P<sec>\d{1,2}) # ss or s
#
(?: # [optional decisecond]
\. # .
(?P<dsec>\d) # s
)? #
)? #
(?: # [optional timezone]
\s? #
((?: #
(?P<ho>[+-]? # [+ or -]
\d{1,2}) # hh or h
:? # [:]
(?P<mo>\d{2})? # [mm]
) #
| # or
(?:UTC)|(?:Z)) # UTC | Z
)? #
)? #
$ # EOL
''', re.VERBOSE)
m = p.match(date)
if m:
c = m.groupdict(0)
for k, v in c.items():
c[k] = int(v)
# get timezone offset in seconds
tz_offset = c['ho']*HOUR + c['mo']*MINUTE
# Some datasets use the date "0000-01-01 00:00:00" as an origin, even though
# the year zero does not exist in the Gregorian/Julian calendars.
if c['year'] == 0:
c['year'] = 1
year_offset = LEAP_YEAR
else:
year_offset = 0
origin = datetime(c['year'], c['month'], c['day'], c['hour'], c['min'], c['sec'], c['dsec'] * 100000)
dt = origin - EPOCH
return dt.days*DAY + dt.seconds + dt.microseconds*MICROSECOND - year_offset - tz_offset
raise ParserError('Invalid date: %s' % date)
| 13,270
|
def promote_parameter_to_node(scriptargs: dict): # pylint: disable=too-many-locals
"""Promote a parameter to a target node.
:param scriptargs: kwargs dict from PARMmenu entry.
:return:
"""
# Get the parms to act on.
parms = scriptargs["parms"]
# The start node for the node chooser prompt
start_node = None
parm_tuple: hou.ParmTuple = None
parm_tuple_map: Dict[hou.ParmTuple, List[hou.Parm]] = {}
parm_tuple_nodes = []
# Process all the selected parms, partitioning by parm tuple.
for parm in parms:
parm_tuple = parm.tuple()
# Get or create a list of parms for this tuple.
parms_for_tuple = parm_tuple_map.setdefault(parm_tuple, [])
parms_for_tuple.append(parm)
node = parm_tuple.node()
parm_tuple_nodes.append(node)
# Update the start node to be the parent of this tuple's node.
start_node = node.parent()
# The number of parms in the tuple.
num_components = len(parm_tuple)
# Determine how many components of the tuple we will set.
num_components_to_set = max([len(value) for value in list(parm_tuple_map.values())])
# Prompt for a target node. Start at the parent (the most logical choice?)
result = hou.ui.selectNode(initial_node=start_node)
# Try to find ths selected node.
target_node = hou.node(result)
if target_node is not None:
# Can't promote to a selected node.
if target_node in parm_tuple_nodes:
raise hou.OperationFailed("Cannot promote to a source node.")
# Should the target parm will be set to the source value?
set_value = True
# The target node already has a parm tuple with the desired name so we
# should prompt to use it.
if target_node.parmTuple(parm_tuple.name()) is not None:
choice = hou.ui.displayMessage(
"Parameter already exists on {}. Link to existing parameter?".format(
target_node.path()
),
buttons=(
"Yes and keep current value",
"Yes and update value",
"Cancel",
),
severity=hou.severityType.ImportantMessage,
)
# Use parm but keep value, so don't set.
if choice == 0:
set_value = False
# Use parm and update value.
elif choice == 1:
set_value = True
# Bail out since we're cancelling.
else:
return
# No existing parameter so we'll have to create one.
else:
# Get the target node's parm interface.
target_ptg = target_node.parmTemplateGroup()
# The parameter definition for the parm we are trying to link.
parm_template = parm_tuple.parmTemplate()
# If we are trying to link a single parm inside a tuple then modify
# the parm definition to represent that single parm.
if num_components_to_set != num_components:
parm_template.setNumComponents(1)
# Since we're just setting a single component the parms should all
# have the same name so just grab the first.
parm_template.setName(parms[0].name())
# Add the parameter definition to the parm list.
target_ptg.addParmTemplate(parm_template)
# Update the interface with the new definition.
target_node.setParmTemplateGroup(target_ptg)
# Process each parm to set.
for parm in parms:
# Get the target parm.
target_parm = target_node.parm(parm.name())
# Set the target parm to the current value if required.
if set_value:
target_parm.set(parm.eval())
# Create the channel reference.
parm.set(target_parm)
| 13,271
|
def check_free_memory(free_mb):
"""
Check *free_mb* of memory is available, otherwise do pytest.skip
"""
import pytest
try:
mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
msg = '{0} MB memory required, but environment SCIPY_AVAILABLE_MEM={1}'.format(
free_mb, os.environ['SCIPY_AVAILABLE_MEM'])
except KeyError:
mem_free = _get_mem_available()
if mem_free is None:
pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM "
"variable to free memory in MB to run the test.")
msg = '{0} MB memory required, but {1} MB available'.format(
free_mb, mem_free/1e6)
if mem_free < free_mb * 1e6:
pytest.skip(msg)
| 13,272
|
def CorrectOrWrong(Input,word):
"""Check if Input is inside word"""
if Input in word:
return True
else:
return False
| 13,273
|
def get_fair_metrics(dataset, pred, pred_is_dataset=False):
"""
Measure fairness metrics.
Parameters:
dataset (pandas dataframe): Dataset
pred (array): Model predictions
pred_is_dataset, optional (bool): True if prediction is already part of the dataset, column name 'labels'.
Returns:
fair_metrics: Fairness metrics.
"""
if pred_is_dataset:
dataset_pred = pred
else:
dataset_pred = dataset.copy()
dataset_pred.labels = pred
cols = ['statistical_parity_difference', 'equal_opportunity_difference', 'average_abs_odds_difference', 'disparate_impact', 'theil_index']
obj_fairness = [[0,0,0,1,0]]
fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)
for attr in dataset_pred.protected_attribute_names:
idx = dataset_pred.protected_attribute_names.index(attr)
privileged_groups = [{attr:dataset_pred.privileged_protected_attributes[idx][0]}]
unprivileged_groups = [{attr:dataset_pred.unprivileged_protected_attributes[idx][0]}]
classified_metric = ClassificationMetric(dataset,
dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
metric_pred = BinaryLabelDatasetMetric(dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
acc = classified_metric.accuracy()
row = pd.DataFrame([[metric_pred.mean_difference(),
classified_metric.equal_opportunity_difference(),
classified_metric.average_abs_odds_difference(),
metric_pred.disparate_impact(),
classified_metric.theil_index()]],
columns = cols,
index = [attr]
)
fair_metrics = fair_metrics.append(row)
fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)
return fair_metrics
| 13,274
|
def _queue_into_buffer(transfersession):
"""
Takes a chunk of data from the store to be put into the buffer to be sent to another morango instance.
ALGORITHM: We do Filter Specific Instance Counter arithmetic to get our newest data compared to the server's older data.
We use raw sql queries to place data in the buffer and the record max counter buffer, which matches the conditions of the FSIC,
as well as the partition for the data we are syncing.
"""
filter_prefixes = Filter(transfersession.filter)
server_fsic = json.loads(transfersession.server_fsic)
client_fsic = json.loads(transfersession.client_fsic)
if transfersession.push:
fsics = _fsic_queuing_calc(client_fsic, server_fsic)
else:
fsics = _fsic_queuing_calc(server_fsic, client_fsic)
# if fsics are identical or receiving end has newer data, then there is nothing to queue
if not fsics:
return
profile_condition = ["profile = '{}'".format(transfersession.sync_session.profile)]
partition_conditions = []
# create condition for filtering by partitions
for prefix in filter_prefixes:
partition_conditions += ["partition LIKE '{}%'".format(prefix)]
if filter_prefixes:
partition_conditions = [_join_with_logical_operator(partition_conditions, "OR")]
chunk_size = 200
fsics = list(fsics.items())
fsics_len = len(fsics)
fsics_limit = chunk_size * SQL_UNION_MAX
if fsics_len >= fsics_limit:
raise MorangoLimitExceeded(
"Limit of {limit} instance counters exceeded with {actual}".format(
limit=fsics_limit,
actual=fsics_len,
)
)
# chunk fsics creating multiple SQL selects which will be unioned before insert
i = 0
chunk = fsics[:chunk_size]
select_buffers = []
select_rmc_buffers = []
while chunk:
# create condition for all push FSICs where instance_ids are equal, but internal counters are higher than
# FSICs counters
last_saved_by_conditions = [
"(last_saved_instance = '{0}' AND last_saved_counter > {1})".format(
instance, counter
)
for instance, counter in chunk
]
if last_saved_by_conditions:
last_saved_by_conditions = [
_join_with_logical_operator(last_saved_by_conditions, "OR")
]
# combine conditions and filter by profile
where_condition = _join_with_logical_operator(
profile_condition + last_saved_by_conditions + partition_conditions,
"AND",
)
# execute raw sql to take all records that match condition, to be put into buffer for transfer
select_buffers.append(
"""SELECT
id, serialized, deleted, last_saved_instance, last_saved_counter, hard_deleted, model_name, profile,
partition, source_id, conflicting_serialized_data,
CAST ('{transfer_session_id}' AS {transfer_session_id_type}), _self_ref_fk
FROM {store} WHERE {condition}
""".format(
transfer_session_id=transfersession.id,
transfer_session_id_type=TransferSession._meta.pk.rel_db_type(
connection
),
condition=where_condition,
store=Store._meta.db_table,
)
)
# take all record max counters that are foreign keyed onto store models, which were queued into the buffer
select_rmc_buffers.append(
"""SELECT instance_id, counter, CAST ('{transfer_session_id}' AS {transfer_session_id_type}), store_model_id
FROM {record_max_counter} AS rmc
INNER JOIN {outgoing_buffer} AS buffer ON rmc.store_model_id = buffer.model_uuid
WHERE buffer.transfer_session_id = '{transfer_session_id}'
""".format(
transfer_session_id=transfersession.id,
transfer_session_id_type=TransferSession._meta.pk.rel_db_type(
connection
),
record_max_counter=RecordMaxCounter._meta.db_table,
outgoing_buffer=Buffer._meta.db_table,
)
)
i += chunk_size
chunk = fsics[i : i + chunk_size]
with connection.cursor() as cursor:
cursor.execute(
"""INSERT INTO {outgoing_buffer}
(model_uuid, serialized, deleted, last_saved_instance, last_saved_counter,
hard_deleted, model_name, profile, partition, source_id, conflicting_serialized_data,
transfer_session_id, _self_ref_fk)
{select}
""".format(
outgoing_buffer=Buffer._meta.db_table,
select=" UNION ".join(select_buffers),
)
)
cursor.execute(
"""INSERT INTO {outgoing_rmcb}
(instance_id, counter, transfer_session_id, model_uuid)
{select}
""".format(
outgoing_rmcb=RecordMaxCounterBuffer._meta.db_table,
select=" UNION ".join(select_rmc_buffers),
)
)
| 13,275
|
def union_poi_bus_station(data: DataFrame, label_poi: Optional[Text] = TYPE_POI):
"""
Performs the union between the different bus station categories
for Points of Interest in a single category named 'bus_station'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
"""
print('union bus station categories to one category')
filter_bus_station = data[label_poi].isin(
['transit_station', 'pontos_de_onibus']
)
data.at[data[filter_bus_station].index, label_poi] = 'bus_station'
| 13,276
|
def test_cli_note_import_from_stdin(mocker, mock_nncli):
"""test cli_note_import"""
mocker.patch('sys.stdin',
new=StringIO('{"content": "test"}'))
nn_obj = nncli.nncli.Nncli(False)
mocker.patch.object(nn_obj.ndb, 'import_note')
mocker.patch.object(nn_obj.ndb, 'sync_now')
nn_obj.cli_note_import(True)
nn_obj.ndb.import_note.assert_called_once_with({'content': 'test'})
nn_obj.ndb.sync_now.assert_called_once()
| 13,277
|
def make_params(
key_parts: Sequence[str],
variable_parts: VariablePartsType) -> Dict[str, Union[str, Tuple[str]]]:
"""
Map keys to variables. This map\
URL-pattern variables to\
a URL related parts
:param key_parts: A list of URL parts
:param variable_parts: A linked-list\
(ala nested tuples) of URL parts
:return: The param dict with the values\
assigned to the keys
:private:
"""
# The unwrapped variable parts are in reverse order.
# Instead of reversing those we reverse the key parts
# and avoid the O(n) space required for reversing the vars
return dict(zip(reversed(key_parts), _unwrap(variable_parts)))
| 13,278
|
def load_sentiments(file_name=DATA_PATH + "sentiments.csv"):
"""Read the sentiment file and return a dictionary containing the sentiment
score of each word, a value from -1 to +1.
"""
sentiments = {}
for line in open(file_name):
word, score = line.split(',')
sentiments[word] = float(score.strip())
return sentiments
| 13,279
|
def fun2():
"""
Use the dictionary for input instead of the list to improve the performance of updating values
by lookup
:return:
"""
try:
while True:
array_range=int(input().strip())
input_list=list(map(int,input().strip().split()))
print(input_list)
input_dict={} ########
for i in range(array_range):
input_dict[i]=input_list[i] ########
# print(input_dict)
opr_num=int(input().strip())
# print(opr_num)
for i in range(opr_num):
cmd_line=input().strip().split()
cmd=cmd_line[0]
cmd_list=list(map(int,cmd_line[1:]))
# print("inter_list",cmd_list)
if(cmd=='U'):
inter_list = sorted(cmd_list[:-1])
# print("inter_list", inter_list)
for j in range(inter_list[0],inter_list[1]+1):
input_dict[j]= int(math.pow(input_dict[j],cmd_list[2])) % G
# print(input_dict[j])
elif(cmd=='C'):
print(sum(list(input_dict.values())[cmd_list[0]:cmd_list[1]+1]))
except:
return
| 13,280
|
def run_example(
device_id: str,
server_host: str = "localhost",
server_port: int = 8004,
plot: bool = True,
scope_length: int = 8192,
historylength: int = 1,
):
"""run the example."""
apilevel_example = 6 # The API level supported by this example.
# Call a zhinst utility function that returns:
# - an API session `daq` in order to communicate with devices via the data server.
# - the device ID string that specifies the device branch in the server's node hierarchy.
# - the device's discovery properties.
# This example can't run with HF2 Instruments or instruments without the DIG option.
(daq, device, props) = zhinst.utils.create_api_session(
device_id, apilevel_example, server_host=server_host, server_port=server_port
)
zhinst.utils.api_server_version_check(daq)
# Enable the API's log.
daq.setDebugLevel(3)
# Create a base configuration: Disable all available outputs, awgs, demods, scopes,...
zhinst.utils.disable_everything(daq, device)
# Now configure the instrument for this experiment. The following channels
# and indices work on all device configurations. The values below may be
# changed if the instrument has multiple input/output channels and/or either
# the Multifrequency or Multidemodulator options installed.
# Signal output mixer amplitude [V].
amplitude = 0.500
out_channel = 0
# Get the value of the instrument's default Signal Output mixer channel.
out_mixer_channel = zhinst.utils.default_output_mixer_channel(props)
in_channel = 0
osc_index = 0
scope_in_channel = 0 # scope input channel
if props["devicetype"].startswith("UHF"):
frequency = 1.0e6
else:
frequency = 100e3
exp_setting = [
# The output signal.
["/%s/sigouts/%d/on" % (device, out_channel), 1],
["/%s/sigouts/%d/enables/%d" % (device, out_channel, out_mixer_channel), 1],
["/%s/sigouts/%d/range" % (device, out_channel), 1],
[
"/%s/sigouts/%d/amplitudes/%d" % (device, out_channel, out_mixer_channel),
amplitude,
],
["/%s/sigins/%d/imp50" % (device, in_channel), 1],
["/%s/sigins/%d/ac" % (device, in_channel), 0],
["/%s/sigins/%d/range" % (device, in_channel), 2 * amplitude],
["/%s/oscs/%d/freq" % (device, osc_index), frequency],
]
node_branches = daq.listNodes(f"/{device}/", 0)
if "DEMODS" in node_branches:
# NOTE we don't need any demodulator data for this example, but we need
# to configure the frequency of the output signal on out_mixer_c.
exp_setting.append(
["/%s/demods/%d/oscselect" % (device, out_mixer_channel), osc_index]
)
daq.set(exp_setting)
# Perform a global synchronisation between the device and the data server:
# Ensure that the signal input and output configuration has taken effect
# before calculating the signal input autorange.
daq.sync()
# Perform an automatic adjustment of the signal inputs range based on the
# measured input signal's amplitude measured over approximately 100 ms.
# This is important to obtain the best bit resolution on the signal inputs
# of the measured signal in the scope.
zhinst.utils.sigin_autorange(daq, device, in_channel)
# Configure the instrument's scope via the /dev..../scopes/n/ node tree branch.
# 'length' : the length of each scope record
daq.setInt("/%s/scopes/0/length" % device, scope_length)
# 'channel' : select the scope channel(s) to enable.
# Bit-encoded as following:
# 1 - enable scope channel 0
# 2 - enable scope channel 1
# 3 - enable both scope channels (requires DIG option)
# NOTE we are only interested in one scope channel: scope_in_channel and leave
# the other channel unconfigured
daq.setInt("/%s/scopes/0/channel" % device, 1 << in_channel)
# 'channels/0/bwlimit' : bandwidth limit the scope data. Enabling bandwidth
# limiting avoids antialiasing effects due to subsampling when the scope
# sample rate is less than the input channel's sample rate.
# Bool:
# 0 - do not bandwidth limit
# 1 - bandwidth limit
daq.setInt("/%s/scopes/0/channels/%d/bwlimit" % (device, scope_in_channel), 1)
# 'channels/0/inputselect' : the input channel for the scope:
# 0 - signal input 1
# 1 - signal input 2
# 2, 3 - trigger 1, 2 (front)
# 8-9 - auxiliary inputs 1-2
# The following inputs are additionally available with the DIG option:
# 10-11 - oscillator phase from demodulator 3-7
# 16-23 - demodulator 0-7 x value
# 32-39 - demodulator 0-7 y value
# 48-55 - demodulator 0-7 R value
# 64-71 - demodulator 0-7 Phi value
# 80-83 - pid 0-3 out value
# 96-97 - boxcar 0-1
# 112-113 - cartesian arithmetic unit 0-1
# 128-129 - polar arithmetic unit 0-1
# 144-147 - pid 0-3 shift value
daq.setInt(
"/%s/scopes/0/channels/%d/inputselect" % (device, scope_in_channel), in_channel
)
# 'time' : timescale of the wave, sets the sampling rate to 1.8GHz/2**time.
# 0 - sets the sampling rate to 1.8 GHz
# 1 - sets the sampling rate to 900 MHz
# ...
# 16 - sets the samptling rate to 27.5 kHz
daq.setInt("/%s/scopes/0/time" % device, 0)
# 'single' : only get a single scope record.
# 0 - acquire continuous records
# 1 - acquire a single record
# Note: configured below in main loop.
# daq.setInt('/%s/scopes/0/single' % device, 1)
# Configure the scope's trigger to get aligned data
# 'trigenable' : enable the scope's trigger (boolean).
# 0 - acquire continuous records
# 1 - only acquire a record when a trigger arrives
daq.setInt("/%s/scopes/0/trigenable" % device, 1)
# Specify the trigger channel, we choose the same as the scope input
daq.setInt("/%s/scopes/0/trigchannel" % device, in_channel)
# Trigger on rising edge?
daq.setInt("/%s/scopes/0/trigrising" % device, 1)
# Trigger on falling edge?
daq.setInt("/%s/scopes/0/trigfalling" % device, 0)
# Set the trigger threshold level.
daq.setDouble("/%s/scopes/0/triglevel" % device, 0.00)
# Set hysteresis triggering threshold to avoid triggering on noise
# 'trighysteresis/mode' :
# 0 - absolute, use an absolute value ('scopes/0/trighysteresis/absolute')
# 1 - relative, use a relative value ('scopes/0trighysteresis/relative') of the trigchannel's
# input range
# (0.1=10%).
daq.setDouble("/%s/scopes/0/trighysteresis/mode" % device, 1)
daq.setDouble("/%s/scopes/0/trighysteresis/relative" % device, 0.05)
# Set the trigger hold-off mode of the scope. After recording a trigger event, this specifies
# when the scope should become re-armed and ready to trigger, 'trigholdoffmode':
# 0 - specify a hold-off time between triggers in seconds ('scopes/0/trigholdoff'),
# 1 - specify a number of trigger events before re-arming the scope ready to trigger
# ('scopes/0/trigholdcount').
daq.setInt("/%s/scopes/0/trigholdoffmode" % device, 0)
daq.setDouble("/%s/scopes/0/trigholdoff" % device, 50e-6)
# Set trigdelay to 0.: Start recording from when the trigger is activated.
daq.setDouble("/%s/scopes/0/trigdelay" % device, 0.0)
# The trigger reference position relative within the wave, a value of 0.5 corresponds to the
# center of the wave.
daq.setDouble("/%s/scopes/0/trigreference" % device, 0.25)
# Disable trigger gating.
daq.setInt("/%s/scopes/0/triggate/enable" % device, 0)
# Enable segmented data transfer from the device.
daq.setInt("/%s/scopes/0/segments/enable" % device, 1)
# The number of segments to transfer in one shot.
# NOTE: We will set 'segments/count' on a per-record basis below.
# daq.setInt("/%s/scopes/0/segments/count" % device, 10)
# Perform a global synchronisation between the device and the data server: Ensure that the
# settings have taken
# effect on the device before continuing. This also clears the API's data buffers to remove any
# old data.
daq.sync()
# Check the scope_length parameter that was set:
scope_length_set = daq.getInt("/%s/scopes/0/length" % device)
print(
f"Actual scope length set on the device: {scope_length_set} (requested {scope_length})"
)
# Initialize and configure the Scope Module.
scopeModule = daq.scopeModule()
# 'mode' : Scope data processing mode.
# 0 - Pass through scope segments assembled, returned unprocessed, non-interleaved.
# 1 - Moving average, scope recording assembled, scaling applied, averaged, if averaging is
# enabled.
# 2 - Not yet supported.
# 3 - As for mode 1, except an FFT is applied to every segment of the scope recording.
scopeModule.set("mode", 1)
# 'averager/weight' : Average the scope shots using an exponentially weighted moving average of
# the previous 'weight' shots.
scopeModule.set("averager/weight", 1)
# 'historylength' : The number of scope records to keep in the Scope Module's memory, when more
# records arrive in the Module from the device the oldest records are overwritten.
scopeModule.set("historylength", historylength)
# Subscribe to the scope's data in the module.
wave_nodepath = f"/{device}/scopes/0/wave"
scopeModule.subscribe(wave_nodepath)
# Loop over the desired number of measurements. For each measurement we will get a scope record
# consisting of of the specified number of segments.
#
data = {}
data[wave_nodepath] = []
num_measurements = 5
segment_counts = [1, 5, 10, 15, 20]
for index, amplitude in enumerate(np.linspace(0.2, 1.0, num_measurements)):
# Use different signal output amplitudes simply to distinguish between
# different segments in the plot.
daq.setDouble(
"/%s/sigouts/%d/amplitudes/%d" % (device, out_channel, out_mixer_channel),
amplitude,
)
daq.sync()
# Perform an automatic adjustment of the signal inputs range based on
# the measured input signal's amplitude measured over approximately 100
# ms. This is important to obtain the best bit resolution on the signal
# inputs of the measured signal in the scope.
zhinst.utils.sigin_autorange(daq, device, in_channel)
# Note: We should disable the scope whilst modifying settings.
daq.setInt(f"/{device}/scopes/0/enable", 0)
# Set the desired number of segments.
daq.setInt(f"/{device}/scopes/0/segments/count", segment_counts[index])
daq.sync() # Ensure the setting has taken effect on the device before continuing.
segment_count_set = daq.getInt(f"/{device}/scopes/0/segments/count")
print(
f"Segment count set on the device: {segment_count_set}\
(requested {segment_counts[index]})."
)
if historylength == 1:
# Set the scope to operate in 'single' mode: Once one scope record consisting of the
# specified number of segments (>= 1) has been recorded the scope will automatically
# stop. Note: The device node scopes/0/single will be set back to 0 by the device after
# recording one record.
daq.setInt("/%s/scopes/0/single" % device, 1)
scopeModule.set("clearhistory", 1)
scope_records = get_scope_records(device, daq, scopeModule, historylength)
# Check the dictionary returned by read contains the expected data. The data returned is a
# dictionary with keys corresponding to the recorded data's path in the node hierarchy.
if wave_nodepath not in data:
print(
f"[error]: The subscribed data `{wave_nodepath}` for measurement {index} \
({amplitude}) was not returned."
)
else:
num_records = len(scope_records[wave_nodepath])
dt = scope_records[wave_nodepath][0][0]["dt"]
totalsamples = scope_records[wave_nodepath][0][0]["totalsamples"]
segment_duration = dt * totalsamples / segment_counts[index]
print(f"Scope data contains {num_records} record(s).")
print(f"Duration of each segment: {segment_duration} s.")
check_scope_record_flags(scope_records[wave_nodepath])
data[wave_nodepath].append(scope_records[wave_nodepath])
print("")
if plot and data[wave_nodepath]:
_, axis = plt.subplots()
axis.grid(True)
clockbase = daq.getInt("/%s/clockbase" % device)
total_segments = sum(segment_counts)
colors = cm.rainbow(np.linspace(0, 1, total_segments))
segment_index = 0
for index, records in enumerate(data[wave_nodepath]):
# We only plot the first record for each measurement. To plot all records for each
# measurement additionally loop over `records'.
wave = records[0][0]["wave"][scope_in_channel]
# Reshape the array to recover the individual segments (this is only necessary in
# segmented mode).
segments = wave.reshape(segment_counts[index], scope_length)
# Create a time array relative to the trigger time.
dt = records[0][0]["dt"]
# The timestamp is the timestamp of the last sample in the scope segment.
timestamp = records[0][0]["timestamp"]
triggertimestamp = records[0][0]["triggertimestamp"]
t_segment = np.arange(-scope_length, 0) * dt + (
timestamp - triggertimestamp
) / float(clockbase)
for segment in segments:
axis.plot(1e3 * t_segment, segment, color=colors[segment_index])
segment_index += 1
axis.set_title(
f"{num_measurements} Scope Records (consisting of different segment counts)"
)
axis.set_ylabel("Amplitude [V]")
axis.set_xlabel("Time, relative to trigger [ms]")
axis.axvline(0.0, linewidth=2, linestyle="--", color="k", label="Trigger time")
axis.autoscale(enable=True, axis="x", tight=True)
plt.show()
return data
| 13,281
|
def perform_cegs_gwas(kinship_type='ibd', phen_type='medians'):
"""
Perform a simple MLM GWAS for the 8 traits
"""
import hdf5_data
import kinship
import linear_models as lm
import time
import scipy as sp
from matplotlib import pyplot as plt
import analyze_gwas_results as agr
phen_dict = hdf5_data.parse_cegs_drosophila_phenotypes()
phenotypes = ['Protein', 'Sugar', 'Triglyceride', 'weight']
envs = ['mated', 'virgin']
for phenotype in phenotypes:
for env in envs:
print phenotype, env
s1 = time.time()
d = hdf5_data.coordinate_cegs_genotype_phenotype(
phen_dict, phenotype, env)
print 'Calculating kinship'
if kinship_type == 'ibs':
K = kinship.calc_ibs_kinship(d['snps'])
elif kinship_type == 'ibd':
K = kinship.calc_ibd_kinship(d['snps'])
else:
raise NotImplementedError
if phen_type == 'means':
lmm = lm.LinearMixedModel(d['Y_means'])
elif phen_type == 'medians':
lmm = lm.LinearMixedModel(d['Y_medians'])
else:
raise NotImplementedError
lmm.add_random_effect(K)
print "Running EMMAX"
res = lmm.emmax_f_test(d['snps'], emma_num=1000)
print 'Mean p-value:', sp.mean(res['ps'])
secs = time.time() - s1
if secs > 60:
mins = int(secs) / 60
secs = secs - mins * 60
print 'Took %d mins and %f seconds.' % (mins, secs)
else:
print 'Took %f seconds.' % (secs)
# Now generating QQ-plots
label_str = '%s_%s_%s_%s' % (
kinship_type, phenotype, env, phen_type)
agr.plot_simple_qqplots_pvals('/Users/bjarnivilhjalmsson/data/tmp/cegs_qq_%s' % (label_str),
[res['ps']], result_labels=[
label_str], line_colors=['green'],
num_dots=1000, title=None, max_neg_log_val=6)
# Perform multiple loci mixed model GWAS
chromosomes = d['positions'][:, 0]
positions = sp.array(d['positions'][:, 1], 'int32')
x_positions = []
y_log_pvals = []
colors = []
x_shift = 0
for i, chrom in enumerate(sp.unique(chromosomes)):
if chrom in ['2L', '2LHet', '3L', '3LHet', '4', 'X', 'XHet']:
colors.append('c')
else: # chrom in ['2R', '2RHet', '3R', '3RHet', 'U', 'Uextra']
# Toss U and Hets
colors.append('m')
chrom_filter = sp.in1d(chromosomes, chrom)
positions_slice = positions[chrom_filter]
x_positions.append(positions_slice + x_shift)
x_shift += positions_slice.max()
log_ps_slice = -sp.log10(res['ps'][chrom_filter])
y_log_pvals.append(log_ps_slice)
m = len(positions)
log_bonf = -sp.log10(1 / (20.0 * m))
print m, log_bonf
# Plot manhattan plots?
plt.figure(figsize=(12, 4))
plt.axes([0.03, 0.1, 0.95, 0.8])
for i, chrom in enumerate(sp.unique(chromosomes)):
plt.plot(x_positions[i], y_log_pvals[i],
c=colors[i], ls='', marker='.')
xmin, xmax = plt.xlim()
plt.hlines(log_bonf, xmin, xmax, colors='k',
linestyles='--', alpha=0.5)
plt.title('%s, %s' % (phenotype, env))
plt.savefig('/Users/bjarnivilhjalmsson/data/tmp/cegs_gwas_%s_%s_%s_%s.png' %
(kinship_type, phenotype, env, phen_type))
| 13,282
|
def get_draft_url(url):
"""
Return the given URL with a draft mode HMAC in its querystring.
"""
if verify_draft_url(url):
# Nothing to do. Already a valid draft URL.
return url
# Parse querystring and add draft mode HMAC.
url = urlparse.urlparse(url)
salt = get_random_string(5)
# QueryDict requires a bytestring as its first argument
query = QueryDict(force_bytes(url.query), mutable=True)
query['edit'] = '%s:%s' % (salt, get_draft_hmac(salt, url.path))
# Reconstruct URL.
parts = list(url)
parts[4] = query.urlencode(safe=':')
return urlparse.urlunparse(parts)
| 13,283
|
def make_datum(source: str, img_id: str, sent_id: int, sent: str):
"""
Create a datum from the provided infos.
:param source: the dataset of the particular sentence.
:param img_id: id of the image
:param sent_id: id of the sentence (of the image)
:param sent: the sentence
:return: a dict of datum
"""
uid = make_uid(img_id, source, sent_id)
img_path = get_img_path(source, img_id)
return {
'uid': uid,
'img_id': img_id,
'img_path': img_path,
'sent': sent,
}
| 13,284
|
def list_for_consumer(req):
"""List allocations associated with a consumer."""
context = req.environ['placement.context']
context.can(policies.ALLOC_LIST)
consumer_id = util.wsgi_path_item(req.environ, 'consumer_uuid')
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
# NOTE(cdent): There is no way for a 404 to be returned here,
# only an empty result. We do not have a way to validate a
# consumer id.
allocations = alloc_obj.get_all_by_consumer_id(context, consumer_id)
output = _serialize_allocations_for_consumer(
context, allocations, want_version)
last_modified = _last_modified_from_allocations(allocations, want_version)
allocations_json = jsonutils.dumps(output)
response = req.response
response.status = 200
response.body = encodeutils.to_utf8(allocations_json)
response.content_type = 'application/json'
if want_version.matches((1, 15)):
response.last_modified = last_modified
response.cache_control = 'no-cache'
return response
| 13,285
|
def get_supermean(name, season, data_dir, obs_flag=None):
"""Calculated supermeans from retrieved data, which are pickled Iris cubes.
:param name: Cube name. Should be CF-standard name. If no CF-standard name
exists the STASH code in msi format (for example m01s30i403)
is used as name.
:param season: Supermean for a season (including annual).
['ann', 'djf', 'mam', 'jja', 'son']
:param data_dir: Directory containing cubes of model output data for
supermeans.
:returns: Supermeaned cube.
:rtype Cube:
The monthly and seasonal supermeans are periodic averages, for example
the seasonal supermean consists of the averaged season, where each
season is averaged over several years.
The annual supermean is a continuous mean over multiple years.
Supermeans are only applied to full clima years (Starting Dec 1st).
"""
name_constraint = iris.Constraint(name=name)
if not obs_flag:
cubes_path = os.path.join(data_dir, 'cubeList.nc')
else:
cubes_path = os.path.join(data_dir, obs_flag + '_cubeList.nc')
cubes = iris.load(cubes_path)
# use STASH if no standard name
for cube in cubes:
if cube.name() == 'unknown':
cube.rename(str(cube.attributes['STASH']))
cube = cubes.extract_strict(name_constraint)
if season in ['djf', 'mam', 'jja', 'son']:
supermeans_cube = periodic_mean(cube, period='season')
return supermeans_cube.extract(iris.Constraint(season=season))
elif season == 'ann':
return periodic_mean(cube)
else:
raise ValueError(
"Argument 'season' must be one of "
"['ann', 'djf', 'mam', 'jja', 'son']. "
"It is: " + str(season))
| 13,286
|
def get_zcl_attribute_size(code):
"""
Determine the number of bytes a given ZCL attribute takes up.
Args:
code (int): The attribute size code included in the packet.
Returns:
int: size of the attribute data in bytes, or -1 for error/no size.
"""
opts = (0x00, 0,
0x08, 1,
0x09, 2,
0x0a, 3,
0x0b, 4,
0x0c, 5,
0x0d, 6,
0x0e, 7,
0x0f, 8,
0x10, 1,
0x18, 1,
0x19, 2,
0x1a, 3,
0x1b, 4,
0x1c, 5,
0x1d, 6,
0x1e, 7,
0x1f, 8,
0x20, 1,
0x21, 2,
0x22, 3,
0x23, 4,
0x24, 5,
0x25, 6,
0x26, 7,
0x27, 8,
0x28, 1,
0x29, 3,
0x2a, 3,
0x2b, 4,
0x2c, 5,
0x2d, 6,
0x2e, 7,
0x2f, 8,
0x30, 1,
0x31, 2,
0x38, 2,
0x38, 4,
0x39, 8,
0x41, -1,
0x42, -1,
0x43, -1,
0x44, -1,
0x48, -1,
0x4c, -1,
0x50, -1,
0x51, -1,
0xe0, 4,
0xe1, 4,
0xe2, 4,
0xe8, 2,
0xe9, 2,
0xea, 4,
0xf0, 8,
0xf1, 16,
0xff, 0)
for i in range(0, len(opts), 2):
if code == opts[i]: return opts[i + 1]
return -1
| 13,287
|
def get_mappings():
"""We process the mappings for two separate cases. (1) Variables that vary by year,
and (2) variables where there are multiple realizations each year.
"""
# Set up grid for survey years. Note that from 1996 we can only expect information every other
# year. We start with 1978 as information about 1978 employment histories is collected with
# the initial interview.
years = range(1978, 2013)
# time-constant variables
dct_full = dict()
dct_full.update(process_time_constant(years))
dct_full.update(process_school_enrollment_monthly())
dct_full.update(process_highest_degree_received())
dct_full.update(process_multiple_each_year())
dct_full.update(process_single_each_year())
# Finishing
return years, dct_full
| 13,288
|
def _get_function_name_and_args(str_to_split):
"""
Split a string of into a meta-function name and list of arguments.
@param IN str_to_split String to split
@return Function name and list of arguments, as a pair
"""
parts = [s.strip() for s in str_to_split.split(" | ")]
if len(parts) < 2:
raise Exception("Invalid meta function string: %s" % str_to_split)
func_name = parts[0]
func_args = parts[1:]
return func_name, func_args
| 13,289
|
def runAndWatch(container, cgroup, watchCgroup, notify=None, wallClockLimit=None,
cpuClockLimit=None, pollInterval=1, notifyInterval=10):
"""
Run a container and watch it for time limits. Returns a dictionary with
container statistics.
"""
inspection = inspectContainer(container)
command = ["container", "start", "--runtime", RUNTIME, container]
if CGROUP_WORKAROUND:
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
cgroup.addProcess(pid)
invokePodmanCommand(command)
os._exit(0)
else:
invokePodmanCommand(command)
timeout = False
ticks = 0
maxMemoryUsage = 0
while True:
time.sleep(pollInterval)
ticks += 1
if ticks % notifyInterval == 0 and notify is not None:
notify()
inspection = inspectContainer(container)
if containerStatus(inspection) != "running":
break
wTime = containerRunTime(inspection)
maxMemoryUsage = max(maxMemoryUsage, watchCgroup.currentMemoryUsage())
cTime = watchCgroup.cpuStats()["usage_usec"]
if wTime >= wallClockLimit * 1000000 or cTime >= cpuClockLimit * 1000000:
stopContainer(container, timeout=1)
timeout = True
inspection = inspectContainer(container)
stats = {
"cpuStat": watchCgroup.cpuStats(),
"memStat": watchCgroup.memoryStats(),
"maxMemory": maxMemoryUsage,
"wallTime": containerRunTime(inspection),
"exitCode": containerExitCode(inspection),
"outOfMemory": containerOomKilled(inspection),
"timeout": timeout,
"output": containerLogs(container)
}
return stats
| 13,290
|
def replicas_on_delete():
"""
This is a route for ALL NODES.
A (previous) neighbor node sends POST requests to this route,
so that a key-value pair replica is deleted in the current NODE.
"""
# The hash ID of the node-owner of the primary replica
start_id = request.form['id']
key = request.form['key']
k = int(request.form['k'])
if (key in node.storage):
# Delete the key-value replica from our database
del node.storage[key]
if (k == 1 or node.next_id == start_id):
return "Replicas have been deleted!", 200
data_to_next = {
'id': start_id,
'key': key,
'k': k-1
}
url_next = "http://" + node.next_ip + ":" + \
str(node.next_port) + "/delete/replicas"
print("Informing the next neighbor to delete their replica.")
r = requests.post(url_next, data_to_next)
if r.status_code != 200:
print("Something went wrong with deleting the replica \
in the next node.")
return r.text, r.status_code
| 13,291
|
def load(train_dir=train_dir, test_dir=test_dir):
"""
Load the dataset into memory.
This uses a cache-file which is reloaded if it already exists,
otherwise the dataset is created and saved to
the cache-file. The reason for using a cache-file is that it
ensure the files are ordered consistently each time the dataset
is loaded. This is important when the dataset is used in
combination with Transfer Learning.
:return:
A DataSet-object.
"""
# Path for the cache-file.
cache_path = os.path.abspath("signatures.pkl")
# If the DataSet-object already exists in a cache-file
# then load it, otherwise create a new object and save
# it to the cache-file so it can be loaded the next time.
dataset = load_cached(cache_path=cache_path,
train_dir=train_dir,
test_dir=test_dir)
return dataset
| 13,292
|
def pdf2(sigma_matrix, grid):
"""Calculate PDF of the bivariate Gaussian distribution.
Args:
sigma_matrix (ndarray): with the shape (2, 2)
grid (ndarray): generated by :func:`mesh_grid`,
with the shape (K, K, 2), K is the kernel size.
Returns:
kernel (ndarrray): un-normalized kernel.
"""
inverse_sigma = np.linalg.inv(sigma_matrix)
kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
return kernel
| 13,293
|
def spell_sql(*args,**kwargs):
"""
list=[]
"""
if len(args[0])<=0:
return None
sql="SELECT * from `emotion_data` WHERE id ={}".format(args[0][0])
for index in args[0][1:]:
sql +=" or id ={}".format(index)
return sql
| 13,294
|
def is_within_bounds(bounds, point):
""" Returns true if point is within bounds. point is a d-array and bounds is a
dx2 array. bounds is expected to be an np.array object.
"""
point = np.array(point)
if point.shape != (bounds.shape[0],):
return False
above_lb = np.all((point - bounds[:, 0] >= 0))
below_ub = np.all((bounds[:, 1] - point >= 0))
return above_lb * below_ub
| 13,295
|
def get_xyz(filename, information):
""" Returns xyz-files from data file"""
#Take out variables that will be constant throughout loops
natoms=information.num_of_atoms
charge=information.atom_charge
name=information.type
atom_counter = 1
time_step_counter = 0
T = time.time()
#Loop over all time units in given file
with open(filename, 'r') as file:
for line in file:
#Format string to desired format
line = line.split(" ")
line = "{0} {1} {2}".format(*line)
if atom_counter==1:
#Increment filename over time unit
filepath = str(time_step_counter).zfill(5)
#Create file and insert common information, observe, this needs a folder called 'qm'
xyzfile = open('qm/%s.xyz'% filepath, 'w')
xyzfile.write("%d \n"%(natoms))
xyzfile.write("charge = %s \n" % charge)
time_step_counter += 1
xyzfile.write(" %s %s" % (name, line))
atom_counter += 1
if atom_counter == natoms + 1:
atom_counter = 1
xyzfile.close()
print("End of file, counter: %d \n time: %f, %f "%(time_step_counter, time.time() - T, time.clock()))
| 13,296
|
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight.mean(dim=-1)
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
| 13,297
|
def list_of_paths():
"""
It lists all the folders which not contain PET images
"""
return ['.DS_Store', 'localizer', 'Space_3D_T2_FLAIR_sag_p2', 'AXIAL_FLAIR', 'MPRAGE_ADNI_confirmed_REPEATX2', 'Axial_PD-T2_TSE',
'Axial_PD-T2_TSE_repeat', 'MPRAGE_SAG_ISO_p2_ND', 'Axial_PD-T2_TSE_confirmed', 'MPRAGESAGISOp2ND', 'MPRAGE_ADNI_confirmed',
'MPRAGE_ADNI_confirmed_repeat', 'MPRAGE_SAG_ISO_p2', 'MPRAGE', 'MPRAGE_ADNI_confirmed_REPEAT', 'Axial_PD-T2_TSE_confirmed_repeat',
'MPRAGE_ADNI_conf_REPEAT', 'Space_3D_T2_FLAIR_sag_p2_REPEAT', 'MPRAGE_ADNI_confirmed_RPT', 'Brain_256_1.6_zoom_4_x_4_iter',
'Space_3D_T2_FLAIR_sag_REPEAT', 'Axial_PD-T2_TSE_RPTconfirmed', 'Axial_PD-T2_TSE_RPT_confirmed', 'Axial_PD-T2_TSE_confirmed_REPEAT',
'flair_t2_spc_irprep_ns_sag_p2_1mm_iso', 'localiser']
| 13,298
|
def convert_onnx_to_ell(path, step_interval_msec=None, lag_threshold_msec=None):
"""
convert the importer model into a ELL model, optionally a steppable model if step_interval_msec
and lag_threshold_msec are provided.
"""
_logger = logger.get()
_logger.info("Pre-processing... ")
converter = convert.OnnxConverter()
importer_model = converter.load_model(path)
_logger.info("\n Done pre-processing.")
try:
importer_engine = common.importer.ImporterEngine(step_interval_msec=step_interval_msec,
lag_threshold_msec=lag_threshold_msec)
ell_map = importer_engine.convert_nodes(importer_model)
ordered_importer_nodes, node_mapping = importer_engine.get_importer_node_to_ell_mapping()
except Exception as e:
_logger.error("Error occurred while attempting to convert the model: " + str(e))
raise
return ell_map, ordered_importer_nodes
| 13,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.