content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def load_ipython_extension(ipython):
"""
Any module file that define a function named `load_ipython_extension`
can be loaded via `%load_ext module.path` or be configured to be
autoloaded by IPython at startup time.
"""
# You can register the class itself without instantiating it. IPython will
# call the default constructor on it.
ipython.register_magics(GPU) | 32,300 |
def hw_uint(value):
"""return HW of 16-bit unsigned integer in two's complement"""
bitcount = bin(value).count("1")
return bitcount | 32,301 |
def run_tunedmode():
"""Run the daemon with provided config."""
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
session_bus = dbus.SessionBus()
bus_name = dbus.service.BusName(TUNEDMODE_BUS_NAME, bus=session_bus)
with TunedMode(bus_name, TUNEDMODE_BUS_PATH):
loop = GLib.MainLoop()
signal.signal(signal.SIGTERM, lambda n, f: loop.quit())
signal.signal(signal.SIGINT, lambda n, f: loop.quit())
loop.run() | 32,302 |
def step_impl(context, display_name):
"""
Args:
context (behave.runner.Context): The test context
display_name (str): The display name that identifies the monitor of interest.
"""
candidate_monitors = list(context.project.monitors().find_by_display_name(display_name))
assert_that(len(candidate_monitors), equal_to(1),
f'Expected single monitor with {display_name}. Found {len(candidate_monitors)}.')
context.monitor = candidate_monitors[0] | 32,303 |
def clip(x,xmin,xmax) :
""" clip input array so that x<xmin becomes xmin, x>xmax becomes xmax, return clipped array
"""
new=copy.copy(x)
bd=np.where(x<xmin)[0]
new[bd]=xmin
bd=np.where(x>xmax)[0]
new[bd]=xmax
return new | 32,304 |
def emit(plugin):
"""Emit a simple string notification to topic "custom"
"""
plugin.notify("custom", "Hello world") | 32,305 |
def profile_from_creds(creds, keychain, cache):
"""Create a profile from an AWS credentials file."""
access_key, secret_key = get_keys_from_file(creds)
arn = security_store(access_key, secret_key, keychain, cache)
return profile_from_arn(arn) | 32,306 |
async def lb_d(_ctx: Context):
"""
Returns embedded text of daily leaderboard.
Top 10 members with highest study time in the day.
:param _ctx:
The command context, unused but needed.
"""
await send_leaderboard('daily', daily_leaderboard()) | 32,307 |
def get_files(pp: Paths, glob: str=DEFAULT_GLOB, sort: bool=True) -> Tuple[Path, ...]:
"""
Helper function to avoid boilerplate.
Tuple as return type is a bit friendlier for hashing/caching, so hopefully makes sense
"""
# TODO FIXME mm, some wrapper to assert iterator isn't empty?
sources: List[Path]
if isinstance(pp, Path):
sources = [pp]
elif isinstance(pp, str):
if pp == '':
# special case -- makes sense for optional data sources, etc
return () # early return to prevent warnings etc
sources = [Path(pp)]
else:
sources = [Path(p) for p in pp]
def caller() -> str:
import traceback
# TODO ugh. very flaky... -3 because [<this function>, get_files(), <actual caller>]
return traceback.extract_stack()[-3].filename
paths: List[Path] = []
for src in sources:
if src.parts[0] == '~':
src = src.expanduser()
if src.is_dir():
gp: Iterable[Path] = src.glob(glob)
paths.extend(gp)
else:
ss = str(src)
if '*' in ss:
if glob != DEFAULT_GLOB:
warnings.warn(f"{caller()}: treating {ss} as glob path. Explicit glob={glob} argument is ignored!")
paths.extend(map(Path, do_glob(ss)))
else:
if not src.is_file():
raise RuntimeError(f"Expected '{src}' to exist")
# todo assert matches glob??
paths.append(src)
if sort:
paths = list(sorted(paths))
if len(paths) == 0:
# todo make it conditionally defensive based on some global settings
# TODO not sure about using warnings module for this
import traceback
warnings.warn(f'{caller()}: no paths were matched against {paths}. This might result in missing data.')
traceback.print_stack()
return tuple(paths) | 32,308 |
def test(model, test_loader, dynamics, fast_init):
"""
Evaluate prediction accuracy of an energy-based model on a given test set.
Args:
model: EnergyBasedModel
test_loader: Dataloader containing the test dataset
dynamics: Dictionary containing the keyword arguments
for the relaxation dynamics on u
fast_init: Boolean to specify if fast feedforward initilization
is used for the prediction
Returns:
Test accuracy
Mean energy of the model per batch
"""
test_E, correct, total = 0.0, 0.0, 0.0
for x_batch, y_batch in test_loader:
# Prepare the new batch
x_batch, y_batch = x_batch.to(config.device), y_batch.to(config.device)
# Extract prediction as the output unit with the strongest activity
output = predict_batch(model, x_batch, dynamics, fast_init)
prediction = torch.argmax(output, 1)
with torch.no_grad():
# Compute test batch accuracy, energy and store number of seen batches
correct += float(torch.sum(prediction == y_batch.argmax(dim=1)))
test_E += float(torch.sum(model.E))
total += x_batch.size(0)
return correct / total, test_E / total | 32,309 |
def generate_data(input_path, label_path):
"""generate dataset for s11 parameter prediction"""
data_input = np.load(input_path)
if os.path.exists(DATA_CONFIG_PATH):
data_config = np.load(DATA_CONFIG_PATH)
mean = data_config["mean"]
std = data_config["std"]
data_input, mean, std = custom_normalize(data_input)
data_label = np.load(label_path)
print(data_input.shape)
print(data_label.shape)
data_input = data_input.transpose((0, 4, 1, 2, 3))
data_label[:, :] = np.log10(-data_label[:, :] + 1.0)
scale_s11 = 0.5 * np.max(np.abs(data_label[:, :]))
data_label[:, :] = data_label[:, :] / scale_s11
np.savez(DATA_CONFIG_PATH, scale_s11=scale_s11, mean=mean, std=std)
np.save(os.path.join(SAVE_DATA_PATH, 'data_input.npy'), data_input)
np.save(os.path.join(SAVE_DATA_PATH, 'data_label.npy'), data_label)
print("data saved in target path") | 32,310 |
def main(args):
"""Parse arguments and run test environment setup.
This installs and/or upgrades any skills needed for the tests and
collects the feature and step files for the skills.
"""
if args.config:
apply_config(args.config, args)
msm = create_skills_manager(args.platform, args.skills_dir,
args.repo_url, args.branch)
random_skills = get_random_skills(msm, args.random_skills)
all_skills = args.test_skills + args.extra_skills + random_skills
install_or_upgrade_skills(msm, all_skills)
collect_test_cases(msm, args.test_skills)
print_install_report(msm.platform, args.test_skills,
args.extra_skills + random_skills) | 32,311 |
def get_cluster_activite(cluster_path_csv, test, train=None):
"""Get cluster activite csv from patch cluster_path_csv.
Merge cluster with station_id
Parameters
----------
cluster_path_csv : String :
Path to export df_labels DataFrame
test : pandas.DataFrame
train : pandas.DataFrame
Returns
-------
If train is not None:
Return 2 pandas.DataFrame train, test
Else:
Return 1 pandas.DataFrame test
"""
cluster_activite = read_cluster_activite(cluster_path_csv=cluster_path_csv)
test = test.merge(cluster_activite, left_on='station_id', right_on='id_station', how='left')
test.drop('id_station', axis=1, inplace=True)
if len(train) > 0:
train = train.merge(cluster_activite, left_on='station_id', right_on='id_station', how='left')
train.drop('id_station', axis=1, inplace=True)
return train, test
else:
return test | 32,312 |
def get_ssl_policy(name: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSSLPolicyResult:
"""
Gets an SSL Policy within GCE from its name, for use with Target HTTPS and Target SSL Proxies.
For more information see [the official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_ssl_policy = gcp.compute.get_ssl_policy(name="production-ssl-policy")
```
:param str name: The name of the SSL Policy.
:param str project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
__args__ = dict()
__args__['name'] = name
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/getSSLPolicy:getSSLPolicy', __args__, opts=opts, typ=GetSSLPolicyResult).value
return AwaitableGetSSLPolicyResult(
creation_timestamp=__ret__.creation_timestamp,
custom_features=__ret__.custom_features,
description=__ret__.description,
enabled_features=__ret__.enabled_features,
fingerprint=__ret__.fingerprint,
id=__ret__.id,
min_tls_version=__ret__.min_tls_version,
name=__ret__.name,
profile=__ret__.profile,
project=__ret__.project,
self_link=__ret__.self_link) | 32,313 |
def getitem(self, item):
"""Select elements at the specific index.
Parameters
----------
item : Union[slice, int, dragon.Tensor]
The index.
Returns
-------
dragon.Tensor
The output tensor.
"""
gather_args = []
if isinstance(item, Tensor):
if item.dtype == 'bool' or item.dtype == 'uint8':
if context.executing_eagerly():
return OpLib.execute('BooleanMask', [self, item])
return OpLib.add('BooleanMask', [self, item])
elif item.dtype == 'int64':
gather_args.append((0, item))
else:
raise TypeError('Unsupported index type: ' + item.dtype)
if isinstance(item, tuple):
for i, elem in enumerate(item):
if isinstance(elem, Tensor):
if elem.dtype == 'int64':
gather_args.append((i, elem))
else:
raise TypeError('Unsupported index type: ' + elem.dtype)
if len(gather_args) == 1:
axis, index = gather_args[0]
if context.executing_eagerly():
return OpLib.execute(
'Gather', [self, index], axis=axis, end_axis=None)
return OpLib.add('Gather', [self, index], axis=axis)
elif len(gather_args) > 1:
raise NotImplementedError
starts, sizes = _process_index(item)
if context.executing_eagerly():
return OpLib.execute(
'Slice', [self], ndim=len(starts), starts=starts, sizes=sizes)
return OpLib.add('Slice', [self], starts=starts, sizes=sizes) | 32,314 |
def test_multi_range_potential_form():
"""Tests definition of multiple ranges for potential-form definitions"""
k = u"A"
v = u"potential 1.0 2.0 3.0"
parser = ConfigParser(io.StringIO())
actual = parser._parse_multi_range(k, v)
assert actual.species == k
assert actual.potential_form_instance.potential_form == u"potential"
assert actual.potential_form_instance.parameters==[1.0, 2.0, 3.0]
assert actual.potential_form_instance.next is None
assert actual.potential_form_instance.start == (u'>', 0.0)
k = u"A"
v = u">=0 potential 1.0 2.0 3.0"
actual = parser._parse_multi_range(k, v)
assert actual.species == k
assert actual.potential_form_instance.potential_form == u"potential"
assert actual.potential_form_instance.parameters==[1.0, 2.0, 3.0]
assert actual.potential_form_instance.next is None
assert actual.potential_form_instance.start == (u'>=', 0.0)
k = u"A"
v = u">=0 potential >10 potentialb 1.0 2.0 3.0"
actual = parser._parse_multi_range(k, v)
assert actual.species == k
assert actual.potential_form_instance.potential_form == u"potential"
assert actual.potential_form_instance.parameters==[]
assert actual.potential_form_instance.start == (u'>=', 0.0)
assert actual.potential_form_instance.next.potential_form == u"potentialb"
assert actual.potential_form_instance.next.start == (u">", 10.0)
assert actual.potential_form_instance.next.parameters == [1.0,2.0,3.0]
v = u">= 0.0 potential 1.0 2.0 3.0"
actual = parser._parse_multi_range(k, v)
assert actual.potential_form_instance.start == (u'>=', 0.0)
k = u"A"
v = u"potential 1.0 2.0 3.0 >1e1 potentialb 5.0 6.0 7.0 >=2.0E1 zero"
actual = parser._parse_multi_range(k, v)
assert actual.species == k
assert actual.potential_form_instance.potential_form == u"potential"
assert actual.potential_form_instance.parameters==[1.0, 2.0, 3.0]
assert not actual.potential_form_instance.next is None
assert actual.potential_form_instance.start == (u'>', 0.0)
actual = actual.potential_form_instance.next
assert actual.potential_form == u"potentialb"
assert actual.parameters == [5.0, 6.0, 7.0]
assert not actual.next is None
assert actual.start == (u'>', 10.0)
actual = actual.next
assert actual.potential_form == u"zero"
assert actual.parameters == []
assert actual.next is None
assert actual.start == (u'>=', 20.0)
k = u"A"
v = u">0.01 potential 1.0"
actual = parser._parse_multi_range(k, v)
assert actual.potential_form_instance.start == (u'>', 0.01)
v = u">1e-2 potential 1.0"
actual = parser._parse_multi_range(k, v)
assert actual.potential_form_instance.start == (u'>', 0.01)
v = u">1.0E-2 potential 1.0"
actual = parser._parse_multi_range(k, v)
assert actual.potential_form_instance.start == (u'>', 0.01)
# v = ">.1E-1 potential 1.0"
# actual = parser._parse_multi_range(k, v)
# assert actual.potential_form_instance.start == ('>', 0.01) | 32,315 |
def zdot_batch(x1, x2):
"""Finds the complex-valued dot product of two complex-valued multidimensional Tensors, preserving the batch dimension.
Args:
x1 (Tensor): The first multidimensional Tensor.
x2 (Tensor): The second multidimensional Tensor.
Returns:
The dot products along each dimension of x1 and x2.
"""
batch = x1.shape[0]
return torch.reshape(torch.conj(x1)*x2, (batch, -1)).sum(1) | 32,316 |
def open_browser(url):
"""Open a browser using webbrowser."""
if 'browser_obj' in CONFIG and CONFIG['browser_obj']:
CONFIG['browser_obj'].open(utils.add_scheme(url))
else:
sys.stderr.write('Failed to open browser.\n') | 32,317 |
def show_progress(iteration, total, prefix = '', suffix = '', decimals = 0,
length = 50, fill = '=', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
iteration_ = iteration+1
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration_ / float(total)))
filledLength = int(length * iteration_ / total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)
# Print New Line on Complete
if iteration_ == total:
print() | 32,318 |
def test_post_document_annotation():
"""Create an Annotation via API."""
document_id = TEST_DOCUMENT
start_offset = 86
end_offset = 88
accuracy = 0.0001
label_id = 867 # Refers to Label Austellungsdatum
# create a revised annotation, so we can verify its existence via get_document_annotations
response = post_document_annotation(
document_id=document_id,
start_offset=start_offset,
end_offset=end_offset,
accuracy=accuracy,
label_id=label_id,
revised=True,
)
annotation = json.loads(response.text)
annotation_ids = [
annot['id'] for annot in get_document_annotations(document_id, include_extractions=True)
]
assert annotation['id'] in annotation_ids
assert delete_document_annotation(document_id, annotation['id']) | 32,319 |
def test_atomic_g_day_min_inclusive_2_nistxml_sv_iv_atomic_g_day_min_inclusive_3_5(mode, save_output, output_format):
"""
Type atomic/gDay is restricted by facet minInclusive with value ---24.
"""
assert_bindings(
schema="nistData/atomic/gDay/Schema+Instance/NISTSchema-SV-IV-atomic-gDay-minInclusive-3.xsd",
instance="nistData/atomic/gDay/Schema+Instance/NISTXML-SV-IV-atomic-gDay-minInclusive-3-5.xml",
class_name="NistschemaSvIvAtomicGDayMinInclusive3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 32,320 |
def add(request):
"""
Case of UPDATE REQUEST '/server/add/'
対象の更新
POST リクエストにのみレスポンス
"""
request_type = request.method
logger.debug(request_type)
if request_type == 'GET':
raise Http404
elif request_type == 'OPTION' or request_type == 'HEAD':
return HttpResponse("OK")
elif request_type == 'POST':
servername = request.POST['servername']
comment = request.POST['comment']
server = Server()
server.name = servername
server.comment = comment
server.save()
target_uuid = server.uuid
# 元のページにリダイレクト ブラウザのキャッシュで更新されてない画面が出るのを防止
return HttpResponseRedirect("/server/%s/?update=%d" % (target_uuid, datetime.datetime.now().microsecond))
else:
raise Http404 | 32,321 |
def gradient_check_numpy_expr(func, x, output_gradient, h=1e-5):
"""
This utility function calculates gradient of the function `func`
at `x`.
:param func:
:param x:
:param output_gradient:
:param h:
:return:
"""
grad = np.zeros_like(x).astype(np.float32)
iter = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not iter.finished:
idx = iter.multi_index
old_value = x[idx]
# calculate positive value
x[idx] = old_value + h
pos = func(x).copy()
# calculate negative value
x[idx] = old_value - h
neg = func(x).copy()
# restore
x[idx] = old_value
# calculate gradient
# Type of pos and neg will be memoryview if we are testing Cython functions.
# Therefore, we create numpy arrays be performing - operation.
# TODO: Don't we have an alternative method without creating numpy array from memoryview?
grad[idx] = np.sum((np.array(pos) - np.array(neg)) * output_gradient) / (2 * h)
iter.iternext()
return grad | 32,322 |
def serve_values(name, func, args, kwargs, serving_values, fallback_func, backend_name=None, implemented_funcs=None, supported_kwargs=None,): #249 (line num in coconut source)
"""Determines the parameter value to serve for the given parameter
name and kwargs. First checks for unsupported funcs or kwargs, then
uses the following algorithm:
1. if name in serving_values, use serving_values[name], else
2. if guess in kwargs, use the guess, else
3. call fallback_func(name, func, *args, **kwargs).""" #265 (line num in coconut source)
# validate arguments
if implemented_funcs is not None: #267 (line num in coconut source)
assert backend_name is not None, "serve_values expects a backend_name argument when doing func validation" #268 (line num in coconut source)
if func not in implemented_funcs: #269 (line num in coconut source)
raise ValueError("the {_coconut_format_0} backend does not implement the {_coconut_format_1} function".format(_coconut_format_0=(backend_name), _coconut_format_1=(func))) #270 (line num in coconut source)
if supported_kwargs is not None: #271 (line num in coconut source)
assert backend_name is not None, "serve_values expects a backend_name argument when doing kwargs validation" #272 (line num in coconut source)
unsupported_kwargs = set(kwargs) - set(supported_kwargs) #273 (line num in coconut source)
if unsupported_kwargs: #274 (line num in coconut source)
raise ValueError("the {_coconut_format_0} backend does not support {_coconut_format_1} option(s)".format(_coconut_format_0=(backend_name), _coconut_format_1=(unsupported_kwargs))) #275 (line num in coconut source)
# determine value
_coconut_match_to_4 = serving_values #278 (line num in coconut source)
_coconut_match_check_6 = False #278 (line num in coconut source)
_coconut_match_set_name_value = _coconut_sentinel #278 (line num in coconut source)
if _coconut.isinstance(_coconut_match_to_4, _coconut.abc.Mapping): #278 (line num in coconut source)
_coconut_match_temp_19 = _coconut_match_to_4.get(name, _coconut_sentinel) #278 (line num in coconut source)
if _coconut_match_temp_19 is not _coconut_sentinel: #278 (line num in coconut source)
_coconut_match_set_name_value = _coconut_match_temp_19 #278 (line num in coconut source)
_coconut_match_check_6 = True #278 (line num in coconut source)
if _coconut_match_check_6: #278 (line num in coconut source)
if _coconut_match_set_name_value is not _coconut_sentinel: #278 (line num in coconut source)
value = _coconut_match_set_name_value #278 (line num in coconut source)
if _coconut_match_check_6: #278 (line num in coconut source)
return value #279 (line num in coconut source)
else: #280 (line num in coconut source)
_coconut_match_to_3 = kwargs #280 (line num in coconut source)
_coconut_match_check_5 = False #280 (line num in coconut source)
_coconut_match_set_name_guess = _coconut_sentinel #280 (line num in coconut source)
if _coconut.isinstance(_coconut_match_to_3, _coconut.abc.Mapping): #280 (line num in coconut source)
_coconut_match_temp_18 = _coconut_match_to_3.get("guess", _coconut_sentinel) #280 (line num in coconut source)
if _coconut_match_temp_18 is not _coconut_sentinel: #280 (line num in coconut source)
_coconut_match_set_name_guess = _coconut_match_temp_18 #280 (line num in coconut source)
_coconut_match_check_5 = True #280 (line num in coconut source)
if _coconut_match_check_5: #280 (line num in coconut source)
if _coconut_match_set_name_guess is not _coconut_sentinel: #280 (line num in coconut source)
guess = _coconut_match_set_name_guess #280 (line num in coconut source)
if _coconut_match_check_5: #280 (line num in coconut source)
return guess #281 (line num in coconut source)
else: #282 (line num in coconut source)
return fallback_func(name, func, *args, **kwargs) | 32,323 |
def find_peaks(sig):
"""
Find hard peaks and soft peaks in a signal, defined as follows:
- Hard peak: a peak that is either /\ or \/.
- Soft peak: a peak that is either /-*\ or \-*/.
In this case we define the middle as the peak.
Parameters
----------
sig : np array
The 1d signal array.
Returns
-------
hard_peaks : ndarray
Array containing the indices of the hard peaks.
soft_peaks : ndarray
Array containing the indices of the soft peaks.
"""
if len(sig) == 0:
return np.empty([0]), np.empty([0])
tmp = sig[1:]
tmp = np.append(tmp, [sig[-1]])
tmp = sig - tmp
tmp[np.where(tmp>0)] = 1
tmp[np.where(tmp==0)] = 0
tmp[np.where(tmp<0)] = -1
tmp2 = tmp[1:]
tmp2 = np.append(tmp2, [0])
tmp = tmp-tmp2
hard_peaks = np.where(np.logical_or(tmp==-2, tmp==+2))[0] + 1
soft_peaks = []
for iv in np.where(np.logical_or(tmp==-1,tmp==+1))[0]:
t = tmp[iv]
i = iv+1
while True:
if i==len(tmp) or tmp[i] == -t or tmp[i] == -2 or tmp[i] == 2:
break
if tmp[i] == t:
soft_peaks.append(int(iv + (i - iv)/2))
break
i += 1
soft_peaks = np.array(soft_peaks, dtype='int') + 1
return hard_peaks, soft_peaks | 32,324 |
def evenly_divides(x, y):
"""Returns if [x] evenly divides [y]."""
return int(y / x) == y / x | 32,325 |
def proxmap_sort(arr: list, key: Function = lambda x: x, reverse: bool = False) -> list:
"""Proxmap sort is a sorting algorithm that works by partitioning an array of data items, or keys, into a number of
"subarrays" (termed buckets, in similar sorts). The name is short for computing a "proximity map," which indicates
for each key K the beginning of a subarray where K will reside in the final sorted order. Keys are placed into each
subarray using insertion sort."""
# Time complexity:
# Worst: O(n^2)
# Average: Theta(n)
# Best: Omega(n)
# Stable, Not in place
_check_key_arr(arr, key, IntFloatList)
if not arr:
return []
_min = key(min(arr, key=key))
_max = key(max(arr, key=key))
hit_counts = [0 for _ in range(int(_min), int(_max + 1))]
for item in arr:
hit_counts[int(key(item)) - int(_min)] += 1
proxmaps = []
last_hit_count = 0
for hc in hit_counts:
if hc == 0:
proxmaps.append(None)
else:
proxmaps.append(last_hit_count)
last_hit_count += hc
locations = []
for item in arr:
locations.append(proxmaps[int(key(item)) - int(_min)])
final = [None for _ in range(len(locations))]
for idx, item in enumerate(arr):
loc = locations[idx]
if final[loc] is None:
final[loc] = item
else:
none_ptr = loc
while final[none_ptr] is not None:
none_ptr += 1
for ptr in range(none_ptr - 1, loc - 1, -1):
if final[ptr] > item:
final[ptr], final[ptr + 1] = final[ptr + 1], final[ptr]
else:
final[ptr + 1] = item
break
else:
final[loc] = item
if reverse:
final = final[::-1]
return final | 32,326 |
async def test_browse_media(
hass, hass_ws_client, mock_plex_server, requests_mock, library_movies_filtertypes
):
"""Test getting Plex clients from plex.tv."""
websocket_client = await hass_ws_client(hass)
media_players = hass.states.async_entity_ids("media_player")
msg_id = 1
# Browse base of non-existent Plex server
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: "server",
ATTR_MEDIA_CONTENT_ID: "this server does not exist",
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == ERR_UNKNOWN_ERROR
# Browse base of Plex server
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "server"
assert result[ATTR_MEDIA_CONTENT_ID] == DEFAULT_DATA[CONF_SERVER_IDENTIFIER]
# Library Sections + Special Sections + Playlists
assert (
len(result["children"])
== len(mock_plex_server.library.sections()) + len(SPECIAL_METHODS) + 1
)
tvshows = next(iter(x for x in result["children"] if x["title"] == "TV Shows"))
playlists = next(iter(x for x in result["children"] if x["title"] == "Playlists"))
special_keys = list(SPECIAL_METHODS.keys())
# Browse into a special folder (server)
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: "server",
ATTR_MEDIA_CONTENT_ID: f"{DEFAULT_DATA[CONF_SERVER_IDENTIFIER]}:{special_keys[0]}",
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "server"
assert (
result[ATTR_MEDIA_CONTENT_ID]
== f"{DEFAULT_DATA[CONF_SERVER_IDENTIFIER]}:{special_keys[0]}"
)
assert len(result["children"]) == len(mock_plex_server.library.onDeck())
# Browse into a special folder (library)
requests_mock.get(
f"{mock_plex_server.url_in_use}/library/sections/1/all?includeMeta=1",
text=library_movies_filtertypes,
)
msg_id += 1
library_section_id = next(iter(mock_plex_server.library.sections())).key
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: "library",
ATTR_MEDIA_CONTENT_ID: f"{library_section_id}:{special_keys[1]}",
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "library"
assert result[ATTR_MEDIA_CONTENT_ID] == f"{library_section_id}:{special_keys[1]}"
assert len(result["children"]) == len(
mock_plex_server.library.sectionByID(library_section_id).recentlyAdded()
)
# Browse into a Plex TV show library
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: tvshows[ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: str(tvshows[ATTR_MEDIA_CONTENT_ID]),
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "library"
result_id = int(result[ATTR_MEDIA_CONTENT_ID])
assert len(result["children"]) == len(
mock_plex_server.library.sectionByID(result_id).all()
) + len(SPECIAL_METHODS)
# Browse into a Plex TV show
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: result["children"][-1][ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: str(result["children"][-1][ATTR_MEDIA_CONTENT_ID]),
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "show"
result_id = int(result[ATTR_MEDIA_CONTENT_ID])
assert result["title"] == mock_plex_server.fetch_item(result_id).title
# Browse into a non-existent TV season
unknown_key = 99999999999999
requests_mock.get(
f"{mock_plex_server.url_in_use}/library/metadata/{unknown_key}", status_code=404
)
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: result["children"][0][ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: str(unknown_key),
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == ERR_UNKNOWN_ERROR
# Browse Plex playlists
msg_id += 1
await websocket_client.send_json(
{
"id": msg_id,
"type": "media_player/browse_media",
"entity_id": media_players[0],
ATTR_MEDIA_CONTENT_TYPE: playlists[ATTR_MEDIA_CONTENT_TYPE],
ATTR_MEDIA_CONTENT_ID: str(playlists[ATTR_MEDIA_CONTENT_ID]),
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == msg_id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
result = msg["result"]
assert result[ATTR_MEDIA_CONTENT_TYPE] == "playlists"
result_id = result[ATTR_MEDIA_CONTENT_ID] | 32,327 |
def load_footings_file(file: str):
"""Load footings generated file.
:param str file: The path to the file.
:return: A dict representing the respective file type.
:rtype: dict
.. seealso::
:obj:`footings.testing.load_footings_json_file`
:obj:`footings.testing.load_footings_xlsx_file`
"""
file_ext = pathlib.Path(file).suffix
return _load_footings_file(file_ext=file_ext, file=file) | 32,328 |
def coupler(*, coupling: float = 0.5) -> SDict:
"""a simple coupler model"""
kappa = coupling ** 0.5
tau = (1 - coupling) ** 0.5
sdict = reciprocal(
{
("in0", "out0"): tau,
("in0", "out1"): 1j * kappa,
("in1", "out0"): 1j * kappa,
("in1", "out1"): tau,
}
)
return sdict | 32,329 |
def test_gen_weight_table_lis_no_intersect():
"""
Checks generating weight table for LIS grid with no intersect
"""
print("TEST 8: TEST GENERATE WEIGHT TABLE FOR LIS GRIDS WITH NO INTERSECT")
generated_weight_table_file = os.path.join(OUTPUT_DATA_PATH,
"weight_lis_no_intersect.csv")
#rapid_connect
rapid_connect_file = os.path.join(GIS_INPUT_DATA_PATH, "uk-no_intersect",
"rapid_connect_45390.csv")
lsm_grid = os.path.join(LSM_INPUT_DATA_PATH, "lis", "LIS_HIST_201101210000.d01.nc")
CreateWeightTableLDAS(in_ldas_nc=lsm_grid,
in_nc_lon_var="lon",
in_nc_lat_var="lat",
in_catchment_shapefile=os.path.join(GIS_INPUT_DATA_PATH, 'uk-no_intersect', 'Catchment_thames_drainID45390.shp'),
river_id="DrainLnID",
in_connectivity_file=rapid_connect_file,
out_weight_table=generated_weight_table_file)
generated_weight_table_file_solution = os.path.join(COMPARE_DATA_PATH, "uk-no_intersect",
"weight_lis_no_intersect.csv")
assert (compare_csv_decimal_files(generated_weight_table_file,
generated_weight_table_file_solution))
remove_files(generated_weight_table_file) | 32,330 |
def new_client(request):
"""
Function that allows a new client to register itself.
:param request: Who has made the request.
:return: Response 200 with user_type, state, message and token, if everything goes smoothly. Response 400 if there
is some kind of request error. Response 403 for forbidden. Or Response 404 for not found error.
"""
if "email" not in request.data or "first_name" not in request.data or "last_name" not in request.data or "password" not in request.data:
return Response({"state": "Error", "message": "Missing parameters"}, status=HTTP_400_BAD_REQUEST)
state, message, username = queries.add_client(request.data)
state, status = ("Success", HTTP_200_OK) if state else ("Error", HTTP_400_BAD_REQUEST)
return Response({"state": state, "message": message}, status=status) | 32,331 |
def display_stats(stats):
"""Prints the stats of a pokemon to the user.
If const.SHOW_IMAGES is set to True, displays the image of the pokemon to
the user.
Args:
stats: a tuple of:
-pokemon name (str)
-species_id (int)
-height (float)
-weight (float)
-type_1 (str)
-type_2 (str)
-url_image (str)
-generation_id (int)
-evolves_from_species_id (str)
Returns:
None
"""
# This function has already been implemented for you. You don't need to do
# anything with it except call it in the appropriate location!
template = ('Pokemon name: {0}\n'
'Pokemon number: {1}\n'
'Height (in m): {2}\n'
'Weight (in kg): {3}\n'
'Type 1: {4}\n'
'Type 2: {5}\n'
'Generation: {6}\n'
'Evolves from: {7}\n')
text = template.format(stats[0], stats[1], stats[2], stats[3], stats[4],
stats[5], stats[7], stats[8])
print(text, end='')
if const.SHOW_IMAGES:
img_filename = stats[6]
if img_filename.endswith('.png'):
image = mpimg.imread(const.IMAGES_DIR + img_filename)
plt.clf()
plt.imshow(image)
plt.show()
else:
print('No image for this Pokemon available') | 32,332 |
def test_build_Results():
"""Check that build results function returns properly labelled and processed dataframe."""
try:
search_term_df = ts().search_term("court")
except:
time.sleep(10)
search_term_df = ts().search_term("court")
assert not search_term_df.empty
preparedata = prep.PrepareData()
built_df = preparedata.build_Results(search_term_df)
col_names = built_df.columns.values.tolist()
assert "date" in col_names
assert "text" in col_names
assert "author" in col_names
for ind in built_df.index:
for element in block_list:
assert element not in built_df["author"][ind]
assert element not in built_df["text"][ind] | 32,333 |
def setup_info(nKPTs,nkpt_per_direction,KPTs,KPT_lengths,nkpoints,nbands,points,gaps,scissor):
"""
Print information about the system
"""
# BZ info
print("=== PATH ===")
print("directions: %d"%(nKPTs-1))
if np.array_equal(KPTs[-1],KPTs[0]): print("(Closed path)")
else: print("(Open path)")
print("ratios: ")
print(KPT_lengths)
# Bands info
print("=== BANDS ===")
print("nkpoints: %d"%nkpoints)
print("kpoint density per direction: %d"%nkpt_per_direction)
print("nbands: %d"%nbands)
if scissor is None: print("scissor shift: No")
else: print("scissor shift: Yes")
if gaps[0]>1.e-6:
print("direct band gap: %f eV"%gaps[0])
print("indirect band gap: %f eV"%gaps[1])
else:
print("direct band gap: %f eV"%gaps[0])
print("This is a metal.")
# Symmetry points info
print("=== PLOT ===")
print("Internal high-symmetry points at: ")
print(points) | 32,334 |
def parse_time_interval_seconds(time_str):
""" Convert a given time interval (e.g. '5m') into the number of seconds in that interval
:param time_str: the string to parse
:returns: the number of seconds in the interval
:raises ValueError: if the string could not be parsed
"""
cal = parsedatetime.Calendar()
parse_result = cal.parseDT(time_str, sourceTime=datetime.min)
if parse_result[1] == 0:
raise ValueError("Could not understand time {time}".format(time=time_str))
return (parse_result[0] - datetime.min).total_seconds() | 32,335 |
def initialize_seed(seed=0):
"""
This makes experiments more comparable by
forcing the random number generator to produce
the same numbers in each run
"""
random.seed(a=seed)
numpy.random.seed(seed)
if hasattr(tf, 'set_random_seed'):
tf.set_random_seed(seed)
elif hasattr(tf.random, 'set_random_seed'):
tf.random.set_random_seed(seed)
elif hasattr(tf.random, 'set_seed'):
tf.random.set_seed(seed)
else:
raise AttributeError("Could not set seed for TensorFlow") | 32,336 |
def _map_class_names_to_probabilities(probabilities: List[float]) -> Dict[str, float]:
"""Creates a dictionary mapping readable class names to their corresponding probabilites.
Args:
probabilities (List[float]): A List of the probabilities for the best predicted classes.
Returns:
Dict[str, float]: A dictionary mapping all readable class names to their corresponding probabilites.
"""
classes = load_classes()
return {
class_name: probability
for class_name, probability in zip(classes, probabilities)
} | 32,337 |
def unobscured_all_rebars_on_view(view, visible=True, solid=None):
"""
Переопределить видимость всей арматуры на виде
:param view: Вид, на котором нужно переопределить видимость
:type view: DB.View
:param visible: Видимость
:type visible: bool
:param solid: Показать как тело
:type solid: bool
"""
count = 0
rebars = get_all_rebar_on_view(view.Id)
for rebar in rebars:
unobscured_rebar_on_view(rebar, view, visible=visible)
if view.ViewType == DB.ViewType.ThreeD and solid is not None:
solid_rebar_on_view(rebar, view, solid=solid)
count += 1
logging.info('У {} арм. установлена видимость <{}> на виде "{}" #{}'.format(
count, visible, view.Name, view.Id)) | 32,338 |
def orca_printbas(fname, at):
"""
Prints the basis set parameters into the input file input.com
Parameters:
fname (char): Basis set name
at (char): Symbol of the element
"""
#bfile = pkg_resources.open_text(templates, 'GTBAS1')
#bfile_r = pkg_resources.read_text(templates, 'GTBAS1')
bfile_r = pkg_resources.read_text(basis, fname)
with open("temp_bas","w") as nbfile:
nbfile.write(bfile_r)
#basisSet_fpath = fname
start_phrase = "NewGTO "+ at
#print(start_phrase)
num_lines_bas = sum(1 for line_tmp1 in open("temp_bas","r"))
for temp_num, temp_l in enumerate(open("temp_bas","r")):
if start_phrase in temp_l.strip():
if start_phrase == temp_l.strip():
bas_start_lno = temp_num+1
break
with open("input.com", "a") as new_f:
linecache.clearcache()
for l1 in range(bas_start_lno,num_lines_bas):
req_line_1 = linecache.getline("temp_bas", l1)
if "end" in req_line_1.strip():
break
else:
new_f.write(req_line_1)
new_f.write(" end\n")
os.system("rm -f temp_bas") | 32,339 |
def order_items(records):
"""Orders records by ASC SHA256"""
return collections.OrderedDict(sorted(records.items(), key=lambda t: t[0])) | 32,340 |
def G2DListMutatorRealGaussianGradient(genome, **args):
""" A gaussian gradient mutator for G2DList of Real
Accepts the *rangemin* and *rangemax* genome parameters, both optional.
The difference is that this multiplies the gene by gauss(1.0, 0.0333), allowing
for a smooth gradient drift about the value.
"""
if args["pmut"] <= 0.0:
return 0
height, width = genome.getSize()
elements = height * width
mutations = args["pmut"] * elements
mu = constants.CDefGaussianGradientMU
sigma = constants.CDefGaussianGradientSIGMA
if mutations < 1.0:
mutations = 0
for i in xrange(genome.getHeight()):
for j in xrange(genome.getWidth()):
if utils.randomFlipCoin(args["pmut"]):
final_value = genome[i][j] * abs(prng.normal(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", constants.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", constants.CDefRangeMin))
genome.setItem(i, j, final_value)
mutations += 1
else:
for it in xrange(int(round(mutations))):
which_x = prng.randint(0, genome.getWidth())
which_y = prng.randint(0, genome.getHeight())
final_value = genome[which_y][which_x] * abs(prng.normal(mu, sigma))
final_value = min(final_value, genome.getParam("rangemax", constants.CDefRangeMax))
final_value = max(final_value, genome.getParam("rangemin", constants.CDefRangeMin))
genome.setItem(which_y, which_x, final_value)
return int(mutations) | 32,341 |
def convert_group_by(response, field):
"""
Convert to key, doc_count dictionary
"""
if not response.hits.hits:
return []
r = response.hits.hits[0]._source.to_dict()
stats = r.get(field)
result = [{"key": key, "doc_count": count} for key, count in stats.items()]
result_sorted = sorted(
result, key=lambda i: i["doc_count"], reverse=True
) # sort by count
return result_sorted | 32,342 |
def get_different_columns(
meta_subset1: pd.DataFrame,
meta_subset2: pd.DataFrame,
common_cols: list) -> list:
"""Find which metadata columns have the
same name but their content differ.
Parameters
----------
meta_subset1 : pd.DataFrame
A metadata table
meta_subset2 : pd.DataFrame
Another metadata table
common_cols : list
Metadata columns that are in common
between the two metadata tables
Returns
-------
diff_cols : list
Metadata columns that are
different in contents.
"""
diff_cols = []
for c in common_cols:
try:
meta_col1 = meta_subset1[c].tolist()
meta_col2 = meta_subset2[c].tolist()
except:
print(meta_subset1[c])
sys.exit(1)
if meta_col1 != meta_col2:
diff_cols.append(c)
return diff_cols | 32,343 |
def _parse_single(argv, args_array, opt_def_dict, opt_val):
"""Function: _parse_single
Description: Processes a single-value argument in command line
arguments. Modifys the args_array by adding a dictionary key and a
value.
NOTE: Used by the arg_parse2() to reduce the complexity rating.
Arguments:
(input) argv -> Arguments from the command line.
(input) args_array -> Array of command line options and values.
(input) opt_def_dict -> Dict with options and default values.
(input) opt_val -> List of options allow None or 1 value for option.
(output) argv -> Arguments from the command line.
(output) args_array -> Array of command line options and values.
"""
argv = list(argv)
args_array = dict(args_array)
opt_def_dict = dict(opt_def_dict)
opt_val = list(opt_val)
# If no value in argv for option and it is not an integer.
if len(argv) < 2 or (argv[1][0] == "-" and not gen_libs.chk_int(argv[1])):
if argv[0] in opt_val:
args_array[argv[0]] = None
else:
# See if default value is available for argument.
args_array = arg_default(argv[0], args_array, opt_def_dict)
else:
args_array[argv[0]] = argv[1]
argv = argv[1:]
return argv, args_array | 32,344 |
def sort_sentence(sentence)
words = break_words(sentence)
reutrn sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of the sentence."""
"""print the first and last words of the sentence."""
"""PRINTS the first and last words of the senctence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words) | 32,345 |
def compOverValueTwoSets(setA={1, 2, 3, 4}, setB={3, 4, 5, 6}):
"""
task 0.5.9
comprehension whose value is the intersection of setA and setB
without using the '&' operator
"""
return {x for x in (setA | setB) if x in setA and x in setB} | 32,346 |
def d_beta():
"""
Real Name: b'D BETA'
Original Eqn: b'0.05'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 0.05 | 32,347 |
def _get_data(filename):
"""
:param filename: name of a comma-separated data file with two columns: eccentricity and some other
quantity x
:return: eccentricities, x
"""
eccentricities = []
x = []
with open(filename) as file:
r = csv.reader(file)
for row in r:
eccentricities.append(float(row[0]))
x.append(float(row[1]))
return np.array(eccentricities), np.array(x) | 32,348 |
def longestCommonPrefix(strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs) > 0:
common = strs[0]
for str in strs[1:]:
while not str.startswith(common):
common = common[:-1]
return common
else:
return '' | 32,349 |
def analytics_dashboard(request):
"""Main page for analytics related things"""
template = 'analytics/analyzer/dashboard.html'
return render(request, template) | 32,350 |
def insert(shape, axis=-1):
"""Shape -> shape with one axis inserted"""
return shape[:axis] + (1,) + shape[axis:] | 32,351 |
def simplify_text(text):
"""
:param text:
:return:
"""
no_html = re.sub('<[^<]+?>', '', str(text))
stripped = re.sub(r"[^a-zA-Z]+", "", str(no_html))
clean = stripped.lower()
return clean | 32,352 |
def sym2img_check(session, scan, data_manager):
""" Check sym2img conversion """
y0 = data_manager.get_labels(wall_color=0)
sym2img_check_sub(session, scan, y0, "sym2img0.png")
y1 = data_manager.get_labels(wall_color=0, floor_color=0)
sym2img_check_sub(session, scan, y1, "sym2img1.png")
y2 = data_manager.get_labels(wall_color=0, floor_color=0, obj_color=0)
sym2img_check_sub(session, scan, y2, "sym2img2.png")
y3 = data_manager.get_labels(wall_color=0, floor_color=0, obj_color=0, obj_id=0)
sym2img_check_sub(session, scan, y3, "sym2img3.png") | 32,353 |
def is_numpy_convertable(v):
"""
Return whether a value is meaningfully convertable to a numpy array
via 'numpy.array'
"""
return hasattr(v, "__array__") or hasattr(v, "__array_interface__") | 32,354 |
def grower(array):
"""grows masked regions by one pixel
"""
grower = np.array([[0,1,0],[1,1,1],[0,1,0]])
ag = convolve2d(array , grower , mode = "same")
ag = ag != 0
return ag | 32,355 |
def SMAPELossFlat(*args, axis=-1, floatify=True, **kwargs):
"""Same as `smape`, but flattens input and target.
DOES not work yet
"""
return BaseLoss(smape, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) | 32,356 |
def get_fake_datetime(now: datetime):
"""Generate monkey patch class for `datetime.datetime`, whose now() and utcnow() always returns given value."""
class FakeDatetime:
"""Fake datetime.datetime class."""
@classmethod
def now(cls):
"""Return given value."""
return now
@classmethod
def utcnow(cls):
"""Return given value."""
return now
return FakeDatetime | 32,357 |
def generate_order_by(fields: List[str], sort_orders: List[str], table_pre: str = '') -> str:
"""Функция генерит ORDER BY запрос для SQL
Args:
fields: список полей для сортировки
sort_orders: список (asc\desc) значений
table_pre: префикс таблицы в запросе
Return:
sql ORBER BY
"""
def _get_str_order(field: str, sort_order: str, table_pre: str = '') -> str:
"""Функция генерации одной FIELD ASC"""
if sort_order.upper() not in ['ASC', 'DESC']:
raise PGsqlOrderByExcept(f'sort_order value should consist of ASC or DESC but he {sort_order}')
if table_pre:
return f"{table_pre}.{field} {sort_order.upper()}"
return f"{field} {sort_order.upper()}"
if not fields:
return ''
orders_clause = []
for i, f in enumerate(fields):
orders_clause.append(_get_str_order(f, sort_orders[i], table_pre))
return "ORDER BY " + ", ".join(orders_clause) | 32,358 |
def toUnicode(glyph, isZapfDingbats=False):
"""Convert glyph names to Unicode, such as 'longs_t.oldstyle' --> u'ſt'
If isZapfDingbats is True, the implementation recognizes additional
glyph names (as required by the AGL specification).
"""
# https://github.com/adobe-type-tools/agl-specification#2-the-mapping
#
# 1. Drop all the characters from the glyph name starting with
# the first occurrence of a period (U+002E; FULL STOP), if any.
glyph = glyph.split(".", 1)[0]
# 2. Split the remaining string into a sequence of components,
# using underscore (U+005F; LOW LINE) as the delimiter.
components = glyph.split("_")
# 3. Map each component to a character string according to the
# procedure below, and concatenate those strings; the result
# is the character string to which the glyph name is mapped.
result = [_glyphComponentToUnicode(c, isZapfDingbats)
for c in components]
return "".join(result) | 32,359 |
def list_watchlist_items_command(client, args):
"""
Get specific watchlist item or list of watchlist items.
:param client: (AzureSentinelClient) The Azure Sentinel client to work with.
:param args: (dict) arguments for this command.
"""
# prepare the request
alias = args.get('watchlist_alias', '')
url_suffix = f'watchlists/{alias}/watchlistItems'
item_id = args.get('watchlist_item_id')
if item_id:
url_suffix += f'/{item_id}'
# request
result = client.http_request('GET', url_suffix)
# prepare result
raw_items = [result] if item_id else result.get('value')
items = [{'WatchlistAlias': alias, **watchlist_item_data_to_xsoar_format(item)} for item in raw_items]
readable_output = tableToMarkdown('Watchlist items results', items,
headers=['ID', 'ItemsKeyValue'],
headerTransform=pascalToSpace,
removeNull=True)
return CommandResults(
readable_output=readable_output,
outputs_prefix='AzureSentinel.WatchlistItem',
outputs=items,
outputs_key_field='ID',
raw_response=result
) | 32,360 |
def blit_array(surface, array):
"""
Generates image pixels from a JNumeric array.
Arguments include destination Surface and array of integer colors.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
if len(array.shape) == 2:
data = numeric.transpose(array, (1,0))
data = numeric.ravel(data)
else:
data = array[:,:,0]*0x10000 | array[:,:,1]*0x100 | array[:,:,2]
data = numeric.transpose(data, (1,0))
data = numeric.ravel(data)
if not surface.getColorModel().hasAlpha():
surface.setRGB(0, 0, surface.width, surface.height, data, 0, surface.width)
else:
surf = Surface((surface.width,surface.height), BufferedImage.TYPE_INT_RGB)
surf.setRGB(0, 0, surface.width, surface.height, data, 0, surface.width)
g2d = surface.createGraphics()
g2d.drawImage(surf, 0, 0, None)
g2d.dispose()
return None | 32,361 |
def soerp_numeric(slc, sqc, scp, var_moments, func0, title=None, debug=False,
silent=False):
"""
This performs the same moment calculations, but expects that all input
derivatives and moments have been put in standardized form. It can also
describe the variance contributions and print out any output distribution
information, both raw and central moments.
Parameters
----------
slc : array
1st-order standardized derivatives (i.e., multiplied by the standard
deviation of the related input)
sqc : array
2nd-order derivatives (i.e., multiplied by the standard
deviation squared, or variance, of the related input)
scp : 2d-array
2nd-order cross-derivatives (i.e., multiplied by the two standard
deviations of the related inputs)
var_moments : 2-d array
Standardized moments where row[i] contains the first 9 moments of
variable x[i]. FYI: the first 3 values should always be [1, 0, 1]
func0 : scalar
System mean (i.e. value of the system evaluated at the means of all
the input variables)
Optional
--------
title : str
Identifier for results that get printed to the screen
debug : bool, false by default
If true, all intermediate calculation results get printed to the screen
silent : bool, false by default
If true, nothing gets printed to the screen (overrides debug).
Returns
-------
moments : list
The first four standard moments (mean, variance, skewness and kurtosis
coefficients)
Example
-------
Example taken from the original SOERP user guide by N. D. Cox:
>>> norm_moments = [1, 0, 1, 0, 3, 0, 15, 0, 105]
>>> lc = [-802.65, -430.5]
>>> qc = [205.54, 78.66]
>>> cp = np.array([[0, -216.5], [-216.5, 0]])
>>> vm = np.array([norm_moments, norm_moments])
>>> f0 = 4152
>>> soerp_numeric(lc, qc, cp, vm, f0,
... title='EXAMPLE FROM ORIGINAL SOERP USER GUIDE')
********************************************************************************
**************** SOERP: EXAMPLE FROM ORIGINAL SOERP USER GUIDE *****************
********************************************************************************
Variance Contribution of lc[x0]: 66.19083%
Variance Contribution of lc[x1]: 19.04109%
Variance Contribution of qc[x0]: 8.68097%
Variance Contribution of qc[x1]: 1.27140%
Variance Contribution of cp[x0, x1]: 4.81572%
********************************************************************************
MEAN-INTERCEPT (EDEL1).................... 2.8420000E+02
MEAN...................................... 4.4362000E+03
SECOND MOMENT (EDEL2)..................... 1.0540873E+06
VARIANCE (VARDL).......................... 9.7331770E+05
STANDARD DEVIATION (RTVAR)................ 9.8656865E+02
THIRD MOMENT (EDEL3)...................... 1.4392148E+09
THIRD CENTRAL MOMENT (MU3DL).............. 5.8640938E+08
COEFFICIENT OF SKEWNESS SQUARED (BETA1)... 3.7293913E-01
COEFFICIENT OF SKEWNESS (RTBT1)........... 6.1068742E-01
FOURTH MOMENT (EDEL4)..................... 5.0404781E+12
FOURTH CENTRAL MOMENT (MU4DL)............. 3.8956371E+12
COEFFICIENT OF KURTOSIS (BETA2)........... 4.1121529E+00
********************************************************************************
"""
if not silent:
print('\n', '*'*80)
if title:
print('{:*^80}'.format(' SOERP: ' + title + ' '))
############################
vy = np.empty(5)
if debug and not silent:
print('*'*80)
for k in range(5):
vy[k] = rawmoment(slc, sqc, scp, var_moments, k)
if debug and not silent:
print('Raw Moment {}: {}'.format(k, vy[k]))
############################
vz = np.empty(5)
if debug and not silent:
print('*'*80)
for k in range(5):
vz[k] = centralmoment(vy, k)
if debug and not silent:
print('Central Moment {}: {}'.format(k, vz[k]))
sysmean = float(vy[1] + func0)
############################
# Calculate variance contributions
vc_lc, vc_qc, vc_cp = variance_components(slc, sqc, scp, var_moments, vz)
vlc, vqc, vcp = variance_contrib(vc_lc, vc_qc, vc_cp, vz)
n = len(slc)
if not silent:
print('*'*80)
for i in range(n):
print('Variance Contribution of lc[x{:d}]: {:7.5%}'.format(i, vlc[i]))
for i in range(n):
print('Variance Contribution of qc[x{:d}]: {:7.5%}'.format(i, vqc[i]))
for i in range(n - 1):
for j in range(i + 1, n):
print('Variance Contribution of cp[x{:d}, x{:d}]: {:7.5%}'.format(i, j, vcp[i, j]))
############################
stdev = vz[2]**(0.5)
if stdev:
rtbt1 = vz[3]/vz[2]**(1.5)
beta2 = vz[4]/vz[2]**2
else:
rtbt1 = 0.0
beta2 = 0.0
beta1 = rtbt1**2
if not silent:
print('*'*80)
print('MEAN-INTERCEPT (EDEL1)....................','{: 8.7E}'.format(vy[1]))
print('MEAN......................................','{: 8.7E}'.format(sysmean))
print('SECOND MOMENT (EDEL2).....................','{: 8.7E}'.format(vy[2]))
print('VARIANCE (VARDL)..........................','{: 8.7E}'.format(vz[2]))
print('STANDARD DEVIATION (RTVAR)................','{: 8.7E}'.format(stdev))
print('THIRD MOMENT (EDEL3)......................','{: 8.7E}'.format(vy[3]))
print('THIRD CENTRAL MOMENT (MU3DL)..............','{: 8.7E}'.format(vz[3]))
print('COEFFICIENT OF SKEWNESS SQUARED (BETA1)...','{: 8.7E}'.format(beta1))
print('COEFFICIENT OF SKEWNESS (RTBT1)...........','{: 8.7E}'.format(rtbt1))
print('FOURTH MOMENT (EDEL4).....................','{: 8.7E}'.format(vy[4]))
print('FOURTH CENTRAL MOMENT (MU4DL).............','{: 8.7E}'.format(vz[4]))
print('COEFFICIENT OF KURTOSIS (BETA2)...........','{: 8.7E}'.format(beta2))
print('*'*80)
return [sysmean, vz[2], rtbt1, beta2] | 32,362 |
def generate_IO_examples(program, N, L, V):
""" Given a programs, randomly generates N IO examples.
using the specified length L for the input arrays. """
input_types = program.ins
input_nargs = len(input_types)
# Generate N input-output pairs
IO = []
for _ in range(N):
input_value = [None]*input_nargs
for a in range(input_nargs):
minv, maxv = program.bounds[a]
if input_types[a] == int:
input_value[a] = np.random.randint(minv, maxv)
elif input_types[a] == [int]:
input_value[a] = list(np.random.randint(minv, maxv, size=L))
else:
raise Exception("Unsupported input type " + input_types[a] + " for random input generation")
output_value = program.fun(input_value)
IO.append((input_value, output_value))
assert (program.out == int and output_value <= V) or (program.out == [int] and len(output_value) == 0) or (program.out == [int] and max(output_value) <= V)
return IO | 32,363 |
def populate_runtime_info(query, impala, converted_args, timeout_secs=maxint):
"""Runs the given query by itself repeatedly until the minimum memory is determined
with and without spilling. Potentially all fields in the Query class (except
'sql') will be populated by this method. 'required_mem_mb_without_spilling' and
the corresponding runtime field may still be None if the query could not be run
without spilling.
converted_args.samples and converted_args.max_conflicting_samples control the
reliability of the collected information. The problem is that memory spilling or usage
may differ (by a large amount) from run to run due to races during execution. The
parameters provide a way to express "X out of Y runs must have resulted in the same
outcome". Increasing the number of samples and decreasing the tolerance (max conflicts)
increases confidence but also increases the time to collect the data.
"""
LOG.info("Collecting runtime info for query %s: \n%s", query.name, query.sql)
samples = converted_args.samples
max_conflicting_samples = converted_args.max_conflicting_samples
results_dir = converted_args.results_dir
mem_limit_eq_threshold_mb = converted_args.mem_limit_eq_threshold_mb
mem_limit_eq_threshold_percent = converted_args.mem_limit_eq_threshold_percent
runner = QueryRunner(impalad=impala.impalads[0], results_dir=results_dir,
common_query_options=converted_args.common_query_options,
test_admission_control=converted_args.test_admission_control,
use_kerberos=converted_args.use_kerberos, check_if_mem_was_spilled=True)
runner.connect()
limit_exceeded_mem = 0
non_spill_mem = None
spill_mem = None
report = None
mem_limit = None
old_required_mem_mb_without_spilling = query.required_mem_mb_without_spilling
old_required_mem_mb_with_spilling = query.required_mem_mb_with_spilling
profile_error_prefix = query.logical_query_id + "_binsearch_error"
# TODO: This method is complicated enough now that breaking it out into a class may be
# helpful to understand the structure.
def update_runtime_info():
required_mem = min(mem_limit, impala.min_impalad_mem_mb)
if report.mem_was_spilled:
if (
query.required_mem_mb_with_spilling is None or
required_mem < query.required_mem_mb_with_spilling
):
query.required_mem_mb_with_spilling = required_mem
query.solo_runtime_secs_with_spilling = report.runtime_secs
query.solo_runtime_profile_with_spilling = report.profile
elif (
query.required_mem_mb_without_spilling is None or
required_mem < query.required_mem_mb_without_spilling
):
query.required_mem_mb_without_spilling = required_mem
query.solo_runtime_secs_without_spilling = report.runtime_secs
assert report.runtime_secs is not None, report
query.solo_runtime_profile_without_spilling = report.profile
def get_report(desired_outcome=None):
reports_by_outcome = defaultdict(list)
leading_outcome = None
for remaining_samples in xrange(samples - 1, -1, -1):
report = runner.run_query(query, mem_limit, run_set_up=True,
timeout_secs=timeout_secs, retain_profile=True)
if report.timed_out:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise QueryTimeout(
"query {0} timed out during binary search".format(query.logical_query_id))
if report.other_error:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise Exception(
"query {0} errored during binary search: {1}".format(
query.logical_query_id, str(report.other_error)))
LOG.debug("Spilled: %s" % report.mem_was_spilled)
if not report.has_query_error():
if query.result_hash is None:
query.result_hash = report.result_hash
elif query.result_hash != report.result_hash:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise Exception(
"Result hash mismatch for query %s; expected %s, got %s" %
(query.logical_query_id, query.result_hash, report.result_hash))
if report.not_enough_memory:
outcome = "EXCEEDED"
elif report.mem_was_spilled:
outcome = "SPILLED"
else:
outcome = "NOT_SPILLED"
reports_by_outcome[outcome].append(report)
if not leading_outcome:
leading_outcome = outcome
continue
if len(reports_by_outcome[outcome]) > len(reports_by_outcome[leading_outcome]):
leading_outcome = outcome
if len(reports_by_outcome[leading_outcome]) + max_conflicting_samples == samples:
break
if (
len(reports_by_outcome[leading_outcome]) + remaining_samples <
samples - max_conflicting_samples
):
return
if desired_outcome \
and len(reports_by_outcome[desired_outcome]) + remaining_samples \
< samples - max_conflicting_samples:
return
reports = reports_by_outcome[leading_outcome]
reports.sort(key=lambda r: r.runtime_secs)
return reports[len(reports) / 2]
if not any((old_required_mem_mb_with_spilling, old_required_mem_mb_without_spilling)):
mem_estimate = estimate_query_mem_mb_usage(query, runner.impalad_conn)
LOG.info("Finding a starting point for binary search")
mem_limit = min(mem_estimate, impala.min_impalad_mem_mb) or impala.min_impalad_mem_mb
while True:
LOG.info("Next mem_limit: {0}".format(mem_limit))
report = get_report()
if not report or report.not_enough_memory:
if report and report.not_enough_memory:
limit_exceeded_mem = mem_limit
if mem_limit == impala.min_impalad_mem_mb:
LOG.warn(
"Query couldn't be run even when using all available memory\n%s", query.sql)
return
mem_limit = min(2 * mem_limit, impala.min_impalad_mem_mb)
continue
update_runtime_info()
if report.mem_was_spilled:
spill_mem = mem_limit
else:
non_spill_mem = mem_limit
break
LOG.info("Finding minimum memory required to avoid spilling")
lower_bound = max(limit_exceeded_mem, spill_mem)
upper_bound = min(non_spill_mem or maxint, impala.min_impalad_mem_mb)
while True:
if old_required_mem_mb_without_spilling:
mem_limit = old_required_mem_mb_without_spilling
old_required_mem_mb_without_spilling = None
else:
mem_limit = (lower_bound + upper_bound) / 2
LOG.info("Next mem_limit: {0}".format(mem_limit))
should_break = mem_limit / float(upper_bound) > 1 - mem_limit_eq_threshold_percent \
or upper_bound - mem_limit < mem_limit_eq_threshold_mb
report = get_report(desired_outcome=("NOT_SPILLED" if spill_mem else None))
if not report:
lower_bound = mem_limit
elif report.not_enough_memory:
lower_bound = mem_limit
limit_exceeded_mem = mem_limit
else:
update_runtime_info()
if report.mem_was_spilled:
lower_bound = mem_limit
spill_mem = min(spill_mem, mem_limit)
else:
upper_bound = mem_limit
non_spill_mem = mem_limit
if mem_limit == impala.min_impalad_mem_mb:
break
if should_break:
if non_spill_mem:
break
lower_bound = upper_bound = impala.min_impalad_mem_mb
# This value may be updated during the search for the absolute minimum.
LOG.info(
"Minimum memory to avoid spilling: %s MB" % query.required_mem_mb_without_spilling)
LOG.info("Finding absolute minimum memory required")
lower_bound = limit_exceeded_mem
upper_bound = min(
spill_mem or maxint, non_spill_mem or maxint, impala.min_impalad_mem_mb)
while True:
if old_required_mem_mb_with_spilling:
mem_limit = old_required_mem_mb_with_spilling
old_required_mem_mb_with_spilling = None
else:
mem_limit = (lower_bound + upper_bound) / 2
LOG.info("Next mem_limit: {0}".format(mem_limit))
should_break = mem_limit / float(upper_bound) > 1 - mem_limit_eq_threshold_percent \
or upper_bound - mem_limit < mem_limit_eq_threshold_mb
report = get_report(desired_outcome="SPILLED")
if not report or report.not_enough_memory:
lower_bound = mem_limit
else:
update_runtime_info()
upper_bound = mem_limit
if should_break:
if not query.required_mem_mb_with_spilling:
if upper_bound - mem_limit < mem_limit_eq_threshold_mb:
# IMPALA-6604: A fair amount of queries go down this path.
LOG.info(
"Unable to find a memory limit with spilling within the threshold of {0} "
"MB. Using the same memory limit for both.".format(
mem_limit_eq_threshold_mb))
query.required_mem_mb_with_spilling = query.required_mem_mb_without_spilling
query.solo_runtime_secs_with_spilling = query.solo_runtime_secs_without_spilling
query.solo_runtime_profile_with_spilling = \
query.solo_runtime_profile_without_spilling
break
LOG.info("Minimum memory is %s MB" % query.required_mem_mb_with_spilling)
if (
query.required_mem_mb_without_spilling is not None and
query.required_mem_mb_without_spilling is not None and
query.required_mem_mb_without_spilling < query.required_mem_mb_with_spilling
):
# Query execution is not deterministic and sometimes a query will run without spilling
# at a lower mem limit than it did with spilling. In that case, just use the lower
# value.
LOG.info(
"A lower memory limit to avoid spilling was found while searching for"
" the absolute minimum memory.")
query.required_mem_mb_with_spilling = query.required_mem_mb_without_spilling
query.solo_runtime_secs_with_spilling = query.solo_runtime_secs_without_spilling
query.solo_runtime_profile_with_spilling = query.solo_runtime_profile_without_spilling
LOG.debug("Query after populating runtime info: %s", query) | 32,364 |
def secrecy_capacity(dist, rvs=None, crvs=None, rv_mode=None, niter=None, bound_u=None):
"""
The rate at which X and Y can agree upon a key with Z eavesdropping,
and no public communication.
Parameters
----------
dist : Distribution
The distribution of interest.
rvs : iterable of iterables, len(rvs) == 2
The indices of the random variables agreeing upon a secret key.
crvs : iterable
The indices of the eavesdropper.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
niter : int, None
The number of hops to perform during optimization.
bound_u : int, None
The bound to use on the size of the variable U. If none, use the
theoretical bound of |X|.
Returns
-------
sc : float
The secrecy capacity.
"""
a = secrecy_capacity_directed(dist, rvs[0], rvs[1], crvs, rv_mode=rv_mode,
niter=niter, bound_u=bound_u)
b = secrecy_capacity_directed(dist, rvs[1], rvs[0], crvs, rv_mode=rv_mode,
niter=niter, bound_u=bound_u)
return max([a, b]) | 32,365 |
def cli(app, environment, branch, open_deploy):
"""
Deploy an application to an environment.
"""
config = load_config()
try:
client = VMFarmsAPIClient.from_config(config)
application_list = client.get('applications')['results']
selected_application = next(application for application in application_list if application['name'] == app)
assert environment in selected_application['environments'], 'Invalid environment specified.'
data = {
'environment': environment,
'branch': branch,
}
response = client.post('applications/{application_id}/builds'.format(**selected_application), data=data)
build_id = response['id']
deploy_url = client.url_for('builds', 'deploys', build_id)
except AssertionError as exc:
output.die(str(exc))
except StopIteration:
output.die('Invalid app specified.')
except VMFarmsAPIError as error:
output.die(error.message, error.description)
else:
output.success('Triggered deploy! Monitor it at <{}>.'.format(deploy_url))
if open_deploy:
click.launch(deploy_url) | 32,366 |
def encrypt_message(partner, message):
"""
Encrypt a message
:param parner: Name of partner
:param message: Message as string
:return: Message as numbers
"""
matrix = get_encryption_matrix(get_key(get_private_filename(partner)))
rank = np.linalg.matrix_rank(matrix)
num_blocks = int(np.ceil(1.0 * len(message) / rank))
padded_message = message
for i in range(len(message), rank * num_blocks):
padded_message += ' '
encoded_message = string_to_numbers(padded_message)
encrypted_numbers = np.empty(rank * num_blocks, dtype=int)
rhs = np.empty(rank, dtype=int)
for b in range(num_blocks):
for i in range(rank):
rhs[i] = encoded_message[i + rank * b]
lhs = np.dot(matrix, rhs)
for i in range(rank):
encrypted_numbers[i + rank * b] = lhs[i]
return encrypted_numbers | 32,367 |
def test_get_keys_default(client):
"""Tests if search and admin keys have been generated and can be retrieved."""
keys = client.get_keys()
assert isinstance(keys, dict)
assert len(keys['results']) == 2
assert 'actions' in keys['results'][0]
assert 'indexes' in keys['results'][0]
assert keys['results'][0]['key'] is not None
assert keys['results'][1]['key'] is not None | 32,368 |
def create_event(title, start, end, capacity, location, coach, private):
"""Create event and submit to database"""
event = Class(title=title, start=start, end=end, capacity=capacity, location=location, coach=coach, free=capacity, private=private)
db.session.add(event)
db.session.commit()
return event | 32,369 |
def PlotPregLengths(live, firsts, others):
"""Plots sampling distribution of difference in means.
live, firsts, others: DataFrames
"""
print('prglngth example')
delta = firsts.prglngth.mean() - others.prglngth.mean()
print(delta)
dist1 = SamplingDistMean(live.prglngth, len(firsts))
dist2 = SamplingDistMean(live.prglngth, len(others))
dist = dist1 - dist2
print('null hypothesis', dist)
print(dist.Prob(-delta), 1 - dist.Prob(delta))
thinkplot.Plot(dist, label='null hypothesis')
thinkplot.Save(root='normal3',
xlabel='difference in means (weeks)',
ylabel='CDF') | 32,370 |
def regret_obs(m_list, inputs, true_ymin=0):
"""Immediate regret using past observations.
Parameters
----------
m_list : list
A list of GPy models generated by `OptimalDesign`.
inputs : instance of `Inputs`
The input space.
true_ymin : float, optional
The minimum value of the objective function.
Returns
-------
res : list
A list containing the values of the immediate regret for each
model in `m_list` using past observations:
$r(n) = min y_i - y_{true}$
where y_i are the observations recorded in the first `n`
iterations, and y_{true} the minimum of the objective function.
"""
res = np.zeros(len(m_list))
for ii, model in enumerate(m_list):
res[ii] = model.Y.min() - true_ymin
return res | 32,371 |
def pmat2cam_center(P):
"""
See Hartley & Zisserman (2003) p. 163
"""
assert P.shape == (3, 4)
determinant = numpy.linalg.det
# camera center
X = determinant([P[:, 1], P[:, 2], P[:, 3]])
Y = -determinant([P[:, 0], P[:, 2], P[:, 3]])
Z = determinant([P[:, 0], P[:, 1], P[:, 3]])
T = -determinant([P[:, 0], P[:, 1], P[:, 2]])
C_ = nx.transpose(nx.array([[X / T, Y / T, Z / T]]))
return C_ | 32,372 |
def main():
"""Main entry point for script"""
start = time.time()
ten_thousand_first_prime()
timeutils.elapsed_time(time.time() - start) | 32,373 |
def _http_req(mocker):
"""Fixture providing HTTP Request mock."""
return mocker.Mock(spec=Request) | 32,374 |
def transform_data(df, steps_per_floor_):
"""Transform original dataset.
:param df: Input DataFrame.
:param steps_per_floor_: The number of steps per-floor at 43 Tanner
Street.
:return: Transformed DataFrame.
"""
df_transformed = (
df
.select(
col('id'),
concat_ws(
' ',
col('first_name'),
col('second_name')).alias('name'),
(col('floor') * lit(steps_per_floor_)).alias('steps_to_desk')))
return df_transformed | 32,375 |
def get_client(bucket):
"""Get the Storage Client appropriate for the bucket.
Args:
bucket (str): Bucket including
Returns:
~Storage: Client for interacting with the cloud.
"""
try:
protocol, bucket_name = str(bucket).lower().split('://', 1)
except ValueError:
raise ValueError('Invalid storage bucket name: {}'.format(bucket))
logger = logging.getLogger('storage.get_client')
if protocol == 's3':
storage_client = S3Storage(bucket_name)
elif protocol == 'gs':
storage_client = GoogleStorage(bucket_name)
else:
errmsg = 'Unknown STORAGE_BUCKET protocol: %s'
logger.error(errmsg, protocol)
raise ValueError(errmsg % protocol)
return storage_client | 32,376 |
async def update_ltos(trade_list, data_dict, strategy_period_mapping, df_balance):
"""
Args:
lto_dict (dict): will be updated (status, result, exit sections)
data_dict (dict): used for getting the candle to see if trade status needs to change
current_ts (ts): used for info sections of ltos
df_balance (pd.DataFrame): When a lto go from STAT_OPEN_EXIT to STAT_CLOSED or STAT_OPEN_ENTER to STAT_OPEN_EXIT
it needs to be updated in terms of 'free' and 'locked'
Returns:
dict: lto_dict
"""
# NOTE: Only get the related LTOs and ONLY update the related LTOs. Doing the same thing here is pointless.
for i in range(len(trade_list)):
pair = trade_list[i].pair
# 1.2.1: Check trades and update status
strategy_min_scale = strategy_period_mapping[trade_list[i].strategy]
last_kline = data_dict[pair][strategy_min_scale].tail(1)
last_closed_candle_open_time = bson.Int64(last_kline.index.values[0])
if trade_list[i].status == EState.OPEN_ENTER:
# NOTE: There is 2 method to enter: TYPE_LIMIT and TYPE_MARKET. Since market executed directly, it is not expected to have market at this stage
if type(trade_list[i].enter) == Limit:
# Check if the open enter trade is filled else if the trade is expired
if float(last_kline['low']) < trade_list[i].enter.price:
# NOTE: Since this is testing, no dust created, perfect conversion
# TODO: If the enter is successful then the exit order should be placed. This is only required in DEPLOY
# TODO: REFACTORING: Why the enter moudle has no fee
trade_list[i].set_result_enter(last_closed_candle_open_time, fee_rate=StrategyBase.fee)
base_cur = pair.replace(config['broker']['quote_currency'],'')
if not balance_manager.buy(df_balance, config['broker']['quote_currency'], base_cur, trade_list[i].result.enter):
logger.error(f"Function failed: balance_manager.buy().")
# TODO: Fix the logic. The balance manager should be called prior
elif int(trade_list[i].enter.expire) <= last_closed_candle_open_time:
# Report the expiration to algorithm
trade_list[i].status = EState.ENTER_EXP
# NOTE: No update on command because it is, only placed by the strategies
else:
# TODO: Internal Error
pass
elif trade_list[i].status == EState.OPEN_EXIT:
if type(trade_list[i].exit) == Limit:
# Check if the open sell trade is filled or stoploss is taken
if float(last_kline['high']) > trade_list[i].exit.price:
trade_list[i].set_result_exit(last_closed_candle_open_time, fee_rate=StrategyBase.fee)
base_cur = pair.replace(config['broker']['quote_currency'],'')
if not balance_manager.sell(df_balance, config['broker']['quote_currency'], base_cur, trade_list[i].result.exit):
logger.error(f"Function failed: balance_manager.sell().")
# TODO: Fix the logic. The balance manager should be called prior
elif int(trade_list[i].exit.expire) <= last_closed_candle_open_time:
trade_list[i].status = EState.EXIT_EXP
elif type(trade_list[i].exit) == OCO:
# NOTE: Think about the worst case and check the stop loss first.
if float(last_kline['low']) < trade_list[i].exit.stopPrice:
# Stop Loss takens
trade_list[i].set_result_exit(last_closed_candle_open_time,
cause=ECause.CLOSED_STOP_LOSS,
price=trade_list[i].exit.stopLimitPrice,
fee_rate=StrategyBase.fee)
base_cur = pair.replace(config['broker']['quote_currency'],'')
balance_manager.sell(df_balance, config['broker']['quote_currency'], base_cur, trade_list[i].result.exit)
elif float(last_kline['high']) > trade_list[i].exit.price:
# Limit taken
trade_list[i].set_result_exit(last_closed_candle_open_time,
fee_rate=StrategyBase.fee)
base_cur = pair.replace(config['broker']['quote_currency'],'')
balance_manager.sell(df_balance, config['broker']['quote_currency'], base_cur, trade_list[i].result.exit)
elif int(trade_list[i].exit.expire) <= last_closed_candle_open_time:
trade_list[i].status = EState.EXIT_EXP
else:
pass
else:
# TODO: Internal Error
pass
else:
# TODO: Internal Error
pass | 32,377 |
def extract_text(xml_string):
"""Get text from the body of the given NLM XML string.
Parameters
----------
xml_string : str
String containing valid NLM XML.
Returns
-------
str
Extracted plaintext.
"""
paragraphs = extract_paragraphs(xml_string)
if paragraphs:
return '\n'.join(paragraphs) + '\n'
else:
return None | 32,378 |
def get_processing_info(data_path, actual_names, labels):
"""
Iterates over the downloaded data and checks which one is in our database
Returns:
files_to_process: List of file paths to videos
labs_to_process: list of same length with corresponding labels
"""
files_to_process = []
labs_to_process = []
for img_type in os.listdir(data_path):
if img_type[0] == ".":
continue
# img_type is B-lines, cardiac etc
for vid in os.listdir(os.path.join(data_path, img_type)):
# print(vid)
if vid in actual_names:
full_path = os.path.join(data_path, img_type, vid)
files_to_process.append(full_path)
ind = actual_names.index(vid)
labs_to_process.append(labels[ind])
return files_to_process, labs_to_process | 32,379 |
def search_organizations(search_term: str = None, limit: str = None):
"""
Looks up organizations by name & location.
:param search_term: e.g. "College of Nursing" or "Chicago, IL".
:param limit: The maximum number of matches you'd like returned - defaults to 10, maximum is 50.
:returns: String containing xml or an lxml element.
"""
return get_anonymous(
'searchOrganizations',
search_term=search_term,
limit=limit) | 32,380 |
def test_fetch_emd_history_fail(config=CONFIG):
"""happypath test for `fetch_market_history_emd`"""
with pytest.raises(requests.exceptions.HTTPError):
data = forecast_utils.fetch_market_history_emd(
region_id=config.get('TEST', 'region_id'),
type_id=config.get('TEST', 'type_id'),
data_range=config.get('TEST', 'history_count'),
config=config,
endpoint_addr='http://www.eveprosper.com/noendpoint'
)
with pytest.raises(exceptions.NoDataReturned):
data = forecast_utils.fetch_market_history_emd(
region_id=config.get('TEST', 'region_id'),
type_id=config.get('TEST', 'bad_typeid'),
data_range=config.get('TEST', 'history_count'),
config=config
) | 32,381 |
def call_math_operator(value1, value2, op, default):
"""Return the result of the math operation on the given values."""
if not value1:
value1 = default
if not value2:
value2 = default
if not pyd.is_number(value1):
try:
value1 = float(value1)
except Exception:
pass
if not pyd.is_number(value2):
try:
value2 = float(value2)
except Exception:
pass
return op(value1, value2) | 32,382 |
def AddGnuWinToPath():
"""Download some GNU win tools and add them to PATH."""
if sys.platform != 'win32':
return
gnuwin_dir = os.path.join(LLVM_BUILD_TOOLS_DIR, 'gnuwin')
GNUWIN_VERSION = '9'
GNUWIN_STAMP = os.path.join(gnuwin_dir, 'stamp')
if ReadStampFile(GNUWIN_STAMP) == GNUWIN_VERSION:
print('GNU Win tools already up to date.')
else:
zip_name = 'gnuwin-%s.zip' % GNUWIN_VERSION
DownloadAndUnpack(CDS_URL + '/tools/' + zip_name, LLVM_BUILD_TOOLS_DIR)
WriteStampFile(GNUWIN_VERSION, GNUWIN_STAMP)
os.environ['PATH'] = gnuwin_dir + os.pathsep + os.environ.get('PATH', '')
# find.exe, mv.exe and rm.exe are from MSYS (see crrev.com/389632). MSYS uses
# Cygwin under the hood, and initializing Cygwin has a race-condition when
# getting group and user data from the Active Directory is slow. To work
# around this, use a horrible hack telling it not to do that.
# See https://crbug.com/905289
etc = os.path.join(gnuwin_dir, '..', '..', 'etc')
EnsureDirExists(etc)
with open(os.path.join(etc, 'nsswitch.conf'), 'w') as f:
f.write('passwd: files\n')
f.write('group: files\n') | 32,383 |
async def _default_error_callback(ex: Exception) -> None:
"""
Provides a default way to handle async errors if the user
does not provide one.
"""
_logger.error('nats: encountered error', exc_info=ex) | 32,384 |
def addGems(ID, nbGems):
"""
Permet d'ajouter un nombre de gems à quelqu'un. Il nous faut son ID et le nombre de gems.
Si vous souhaitez en retirer mettez un nombre négatif.
Si il n'y a pas assez d'argent sur le compte la fonction retourne un nombre
strictement inférieur à 0.
"""
old_value = valueAt(ID, "gems", GF.dbGems)
new_value = int(old_value) + nbGems
if new_value >= 0:
updateField(ID, "gems", new_value, GF.dbGems)
print("DB >> Le compte de " + str(ID) + " est maintenant de: " + str(new_value))
else:
print("DB >> Il n'y a pas assez sur ce compte !")
return str(new_value) | 32,385 |
def start(
release,
fqdn,
rabbit_pass,
rabbit_ips_list,
sql_ip,
sql_password,
https,
port,
secret,
):
""" Start the arcus api """
image = f"breqwatr/arcus-api:{release}"
rabbit_ips_csv = ",".join(rabbit_ips_list)
env_vars = {
"OPENSTACK_VIP": fqdn,
"PUBLIC_ENDPOINT": "true",
"HTTPS_OPENSTACK_APIS": str(https).lower(),
"RABBITMQ_USERNAME": "openstack",
"RABBITMQ_PASSWORD": rabbit_pass,
"RABBIT_IPS_CSV": rabbit_ips_csv,
"SQL_USERNAME": "arcus",
"SQL_PASSWORD": sql_password,
"SQL_IP": sql_ip,
"ARCUS_INTEGRATION_SECRET": secret,
}
env_str = env_string(env_vars)
daemon = "-d --restart=always"
run = ""
dev_mount = ""
ceph_mount = ""
network = "--network host"
log_mount = "-v /var/log/arcus-api:/var/log/arcusweb"
hosts_mount = "-v /etc/hosts:/etc/hosts"
if DEV_MODE:
log_mount = ""
hosts_mount = ""
if "ARCUS_API_DIR" not in os.environ:
error("ERROR: must set $ARCUS_API_DIR when $VOITHOS_DEV==true", exit=True)
api_dir = os.environ["ARCUS_API_DIR"]
assert_path_exists(api_dir)
daemon = "-it --rm"
dev_mount = volume_opt(api_dir, "/app")
network = f"-p 0.0.0.0:{port}:{port}"
run = (
'bash -c "'
"/env_config.py && "
"pip install -e . && "
"gunicorn --workers 4 --error-logfile=- --access-logfile '-' "
"--reload "
f"--bind 0.0.0.0:{port}"
' arcusapi.wsgi:app" '
)
name = "arcus_api"
shell(f"docker rm -f {name} 2>/dev/null || true")
cmd = (
f"docker run --name {name} {daemon} {network} "
f"{hosts_mount} {log_mount} "
f"{env_str} {ceph_mount} {dev_mount} {image} {run}"
)
shell(cmd) | 32,386 |
def BertzCT(mol, cutoff=100, dMat=None, forceDMat=1):
""" A topological index meant to quantify "complexity" of molecules.
Consists of a sum of two terms, one representing the complexity
of the bonding, the other representing the complexity of the
distribution of heteroatoms.
From S. H. Bertz, J. Am. Chem. Soc., vol 103, 3599-3601 (1981)
"cutoff" is an integer value used to limit the computational
expense. A cutoff value tells the program to consider vertices
topologically identical if their distance vectors (sets of
distances to all other vertices) are equal out to the "cutoff"th
nearest-neighbor.
**NOTE** The original implementation had the following comment:
> this implementation treats aromatic rings as the
> corresponding Kekule structure with alternating bonds,
> for purposes of counting "connections".
Upon further thought, this is the WRONG thing to do. It
results in the possibility of a molecule giving two different
CT values depending on the kekulization. For example, in the
old implementation, these two SMILES:
CC2=CN=C1C3=C(C(C)=C(C=N3)C)C=CC1=C2C
CC3=CN=C2C1=NC=C(C)C(C)=C1C=CC2=C3C
which correspond to differentk kekule forms, yield different
values.
The new implementation uses consistent (aromatic) bond orders
for aromatic bonds.
THIS MEANS THAT THIS IMPLEMENTATION IS NOT BACKWARDS COMPATIBLE.
Any molecule containing aromatic rings will yield different
values with this implementation. The new behavior is the correct
one, so we're going to live with the breakage.
**NOTE** this barfs if the molecule contains a second (or
nth) fragment that is one atom.
"""
atomTypeDict = {}
connectionDict = {}
numAtoms = mol.GetNumAtoms()
if forceDMat or dMat is None:
if forceDMat:
# nope, gotta calculate one
dMat = Chem.GetDistanceMatrix(mol, useBO=0, useAtomWts=0, force=1)
mol._adjMat = dMat
else:
try:
dMat = mol._adjMat
except AttributeError:
dMat = Chem.GetDistanceMatrix(mol, useBO=0, useAtomWts=0, force=1)
mol._adjMat = dMat
if numAtoms < 2:
return 0
bondDict, neighborList, vdList = _CreateBondDictEtc(mol, numAtoms)
symmetryClasses = _AssignSymmetryClasses(mol, vdList, dMat, forceDMat, numAtoms, cutoff)
# print('Symmm Classes:',symmetryClasses)
for atomIdx in range(numAtoms):
hingeAtomNumber = mol.GetAtomWithIdx(atomIdx).GetAtomicNum()
atomTypeDict[hingeAtomNumber] = atomTypeDict.get(hingeAtomNumber, 0) + 1
hingeAtomClass = symmetryClasses[atomIdx]
numNeighbors = vdList[atomIdx]
for i in range(numNeighbors):
neighbor_iIdx = neighborList[atomIdx][i]
NiClass = symmetryClasses[neighbor_iIdx]
bond_i_order = _LookUpBondOrder(atomIdx, neighbor_iIdx, bondDict)
# print('\t',atomIdx,i,hingeAtomClass,NiClass,bond_i_order)
if (bond_i_order > 1) and (neighbor_iIdx > atomIdx):
numConnections = bond_i_order * (bond_i_order - 1) / 2
connectionKey = (min(hingeAtomClass, NiClass), max(hingeAtomClass, NiClass))
connectionDict[connectionKey] = connectionDict.get(connectionKey, 0) + numConnections
for j in range(i + 1, numNeighbors):
neighbor_jIdx = neighborList[atomIdx][j]
NjClass = symmetryClasses[neighbor_jIdx]
bond_j_order = _LookUpBondOrder(atomIdx, neighbor_jIdx, bondDict)
numConnections = bond_i_order * bond_j_order
connectionKey = (min(NiClass, NjClass), hingeAtomClass, max(NiClass, NjClass))
connectionDict[connectionKey] = connectionDict.get(connectionKey, 0) + numConnections
if not connectionDict:
connectionDict = {'a': 1}
return _CalculateEntropies(connectionDict, atomTypeDict, numAtoms) | 32,387 |
def conjugate(*args, **kwargs):
"""
the conjugate part of x
This function has been overriden from pymel.util.mathutils.conjugate to work element-wise on iterables
"""
pass | 32,388 |
def BOPTools_AlgoTools_CorrectRange(*args):
"""
* Correct shrunk range <aSR> taking into account 3D-curve resolution and corresp. tolerances' values of <aE1>, <aE2>
:param aE1:
:type aE1: TopoDS_Edge &
:param aE2:
:type aE2: TopoDS_Edge &
:param aSR:
:type aSR: IntTools_Range &
:param aNewSR:
:type aNewSR: IntTools_Range &
:rtype: void
* Correct shrunk range <aSR> taking into account 3D-curve resolution and corresp. tolerances' values of <aE>, <aF>
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:param aSR:
:type aSR: IntTools_Range &
:param aNewSR:
:type aNewSR: IntTools_Range &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_CorrectRange(*args) | 32,389 |
def OldValue(lval, mem, exec_opts):
# type: (lvalue_t, Mem, optview.Exec) -> value_t
"""
Used by s+='x' and (( i += 1 ))
TODO: We need a stricter and less ambiguous version for Oil.
Problem:
- why does lvalue have Indexed and Keyed, while sh_lhs_expr only has
IndexedName?
- should I have lvalue.Named and lvalue.Indexed only?
- and Indexed uses the index_t type?
- well that might be Str or Int
"""
assert isinstance(lval, lvalue_t), lval
# TODO: refactor lvalue_t to make this simpler
UP_lval = lval
with tagswitch(lval) as case:
if case(lvalue_e.Named): # (( i++ ))
lval = cast(lvalue__Named, UP_lval)
var_name = lval.name
elif case(lvalue_e.Indexed): # (( a[i]++ ))
lval = cast(lvalue__Indexed, UP_lval)
var_name = lval.name
elif case(lvalue_e.Keyed): # (( A['K']++ )) ? I think this works
lval = cast(lvalue__Keyed, UP_lval)
var_name = lval.name
else:
raise AssertionError()
val = _LookupVar(var_name, mem, exec_opts)
UP_val = val
with tagswitch(lval) as case:
if case(lvalue_e.Named):
return val
elif case(lvalue_e.Indexed):
lval = cast(lvalue__Indexed, UP_lval)
array_val = None # type: value__MaybeStrArray
with tagswitch(val) as case2:
if case2(value_e.Undef):
array_val = value.MaybeStrArray([])
elif case2(value_e.MaybeStrArray):
tmp = cast(value__MaybeStrArray, UP_val)
# mycpp rewrite: add tmp. cast() creates a new var in inner scope
array_val = tmp
else:
e_die("Can't use [] on value of type %s", ui.ValType(val))
s = word_eval.GetArrayItem(array_val.strs, lval.index)
if s is None:
val = value.Str('') # NOTE: Other logic is value.Undef()? 0?
else:
assert isinstance(s, str), s
val = value.Str(s)
elif case(lvalue_e.Keyed):
lval = cast(lvalue__Keyed, UP_lval)
assoc_val = None # type: value__AssocArray
with tagswitch(val) as case2:
if case2(value_e.Undef):
# This never happens, because undef[x]+= is assumed to
raise AssertionError()
elif case2(value_e.AssocArray):
tmp2 = cast(value__AssocArray, UP_val)
# mycpp rewrite: add tmp. cast() creates a new var in inner scope
assoc_val = tmp2
else:
e_die("Can't use [] on value of type %s", ui.ValType(val))
s = assoc_val.d.get(lval.key)
if s is None:
val = value.Str('')
else:
val = value.Str(s)
else:
raise AssertionError()
return val | 32,390 |
def approx_min_k(operand: Array,
k: int,
reduction_dimension: int = -1,
recall_target: float = 0.95,
reduction_input_size_override: int = -1,
aggregate_to_topk: bool = True) -> Tuple[Array, Array]:
"""Returns min ``k`` values and their indices of the ``operand``.
Args:
operand : Array to search for min-k.
k : Specifies the number of min-k.
reduction_dimension: Integer dimension along which to search. Default: -1.
recall_target: Recall target for the approximation.
reduction_input_size_override : When set to a positive value, it overrides
the size determined by operands[reduction_dim] for evaluating the recall.
This option is useful when the given operand is only a subset of the
overall computation in SPMD or distributed pipelines, where the true input
size cannot be deferred by the operand shape.
aggregate_to_topk: When true, aggregates approximate results to top-k. When
false, returns the approximate results.
Returns:
Tuple[Array, Array] : Least k values and their indices of the inputs.
"""
if xc._version < 45:
aggregate_to_topk = True
return approx_top_k_p.bind(
operand,
k=k,
reduction_dimension=reduction_dimension,
recall_target=recall_target,
is_max_k=False,
reduction_input_size_override=reduction_input_size_override,
aggregate_to_topk=aggregate_to_topk) | 32,391 |
def set_run(environment: str, run_result_id: int, description: str):
""" creates all the necessary directories and sets up the Simpyl object for a run
"""
create_dir_if_needed(run_path(environment, run_result_id))
# write a text file with the description as as text file
filename = os.path.join(
run_path(environment, run_result_id),
s.DESCRIPTION_FORMAT.format(run_result_id)
)
with open(filename, 'w') as f:
f.write(description) | 32,392 |
def sitemap_host_xml():
"""Supplementary Sitemap XML for Host Pages"""
database_connection.reconnect()
hosts = ww_host.info.retrieve_all(database_connection)
sitemap = render_template("sitemaps/hosts.xml",
hosts=hosts)
return Response(sitemap, mimetype="text/xml") | 32,393 |
def WTC(df,N):
"""Within Topic Coherence Measure.
[Note]
It ignores a word which does not have trained word vector.
Parameters
----------
df : Word-Topic distribution K by V
where K is number of topics and V is number of words
N : Number of top N words
Returns
-------
total : WTC value of each topic (1 * K)
"""
df = df.iloc[:N,:]
total = []
for col in df.columns:
cos_val = 0
words = df[col].tolist()
for c in combinations(words,2):
# print(c)
try:
cos_val += 1-cosine(word2vec_model.get_vector(c[0]),
word2vec_model.get_vector(c[1]))
except:
pass
# print(c)
# print(cosine(word2glove[c[0]], word2glove[c[1]]))
print(col, cos_val)
total.append(cos_val)
return total | 32,394 |
def launch_top_runs(top_paths, bp, command, auto_pupdate=False,
partition_name='debug', time_limit='04-00:00:00', memory_limit=2048):
"""
Launch the top runs.
@param top_paths: The full path to the base directory containing the top
results.
@param bp: The new base directory.
@param command: The base command to execute in the runner. Two additional
arguments will be passed - the base directory and the fold index.
@param auto_pupdate: If True the permanence increment and decrement amounts
will automatically be computed by the runner. If False, the ones specified
in the config file will be used.
@param partition_name: The partition name to use.
@param time_limit: The maximum time limit.
@param memory_limit: The maximum memory requirements in MB.
"""
for p in top_paths:
# Path where the run should occur
job_name = os.path.basename(p)
p2 = os.path.join(bp, job_name)
try:
os.makedirs(p2)
except OSError:
pass # Overwrite the files
# Create the runner
runner_path = os.path.join(p2, 'runner.sh')
command_new = '{0} "{1}" "{2}" {3}'.format(command, p, p2,
int(auto_pupdate))
stdio_path = os.path.join(p2, 'stdio.txt')
stderr_path = os.path.join(p2, 'stderr.txt')
create_runner(command=command_new, runner_path=runner_path,
job_name=job_name, partition_name=partition_name,
stdio_path=stdio_path, stderr_path=stderr_path,
time_limit=time_limit, memory_limit=memory_limit)
# Execute the runner
execute_runner(runner_path) | 32,395 |
def extract_static_override_features(
static_overrides):
"""Extract static feature override values.
Args:
static_overrides: A dataframe that contains the value for static overrides
to be passed to the GAM Encoders.
Returns:
A mapping from feature name to location and then to the override value.
This is a two-level dictionary of the format: {feature: {location: value}}
"""
static_overrides_features = dict()
for feature in set(static_overrides[constants.FEATURE_NAME_COLUMN]):
static_overrides_features[feature] = dict()
override_slice = static_overrides.loc[static_overrides[
constants.FEATURE_NAME_COLUMN] == feature]
for location in set(override_slice[constants.GEO_ID_COLUMN]):
override_sub_slice = override_slice.loc[override_slice[
constants.GEO_ID_COLUMN] == location]
static_overrides_features[feature][location] = override_sub_slice[
constants.FEATURE_MODIFIER_COLUMN].to_numpy()[0]
return static_overrides_features | 32,396 |
def job_list_View(request):
"""
"""
job_list = Job.objects.filter()
paginator = Paginator(job_list, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'page_obj': page_obj,
}
return render(request, 'jobapp/job-list.html', context) | 32,397 |
def set_degree_as_weight(g):
"""Set degree of connected nodes as weight.
For metabolite graphs it is often desirable to see the routes with
less connected metabolites
"""
d = nx.degree_centrality(g)
for u, v in g.edges():
g[u][v]['weight'] = d[v] | 32,398 |
def find_tiledirs(channeldir: pathlib.Path,
tiles: Union[int, str, List[int], None] = None,
conditions: Union[str, List[str], None] = None) -> TileGenerator:
""" Find all the tiles under the channel dir
:param Path channeldir:
The channel directory to search
:param list tiles:
A list of tile numbers to look for (None for any)
:param list conditions:
A list of condition suffixes to look for (None for any)
:returns:
An iterator of (tile, tiledir)
"""
if conditions is not None:
if isinstance(conditions, str):
conditions = [conditions]
conditions = [c.lower() for c in conditions]
if tiles is not None:
if isinstance(tiles, (str, int)):
tiles = [tiles]
tiles = [int(t) for t in tiles]
channel_dir = pathlib.Path(channeldir)
for tiledir in sorted(channel_dir.iterdir()):
if not tiledir.is_dir():
continue
data = parse_tile_name(tiledir.name)
if data is None:
continue
if tiles is not None and data['tile'] not in tiles:
continue
if conditions is None or any([c in data['condition'].lower() for c in conditions]):
yield data['tile'], tiledir | 32,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.