content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def main(reactor, args, base_path, top_level):
"""
:param reactor: Reactor to use.
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the flocker repository.
"""
options = RunOptions(top_level=top_level)
add_destination(eliot_output)
try:
options.parseOptions(args)
except UsageError as e:
sys.stderr.write("%s: %s\n" % (base_path.basename(), e))
raise SystemExit(1)
runner = options.runner
from flocker.common.script import eliot_logging_service
log_file = open("%s.log" % base_path.basename(), "a")
log_writer = eliot_logging_service(
log_file=log_file,
reactor=reactor,
capture_stdout=False)
log_writer.startService()
reactor.addSystemEventTrigger(
'before', 'shutdown', log_writer.stopService)
try:
nodes = yield runner.start_nodes(reactor, node_count=1)
yield perform(
make_dispatcher(reactor),
install_cli(runner.package_source, nodes[0]))
result = yield run_client_tests(reactor=reactor, node=nodes[0])
except:
result = 1
raise
finally:
# Unless the tests failed, and the user asked to keep the nodes, we
# delete them.
if not (result != 0 and options['keep']):
runner.stop_nodes(reactor)
elif options['keep']:
print "--keep specified, not destroying nodes."
raise SystemExit(result) | 5,327,400 |
def _format_mojang_uuid(uuid):
"""
Formats a non-hyphenated UUID into a whitelist-compatible UUID
:param str uuid: uuid to format
:return str: formatted uuid
Example:
>>> _format_mojang_uuid('1449a8a244d940ebacf551b88ae95dee')
'1449a8a2-44d9-40eb-acf5-51b88ae95dee'
Must have 32 characters:
>>> _format_mojang_uuid('1')
Traceback (most recent call last):
...
ValueError: Expected UUID to have 32 characters
"""
if len(uuid) != 32:
raise ValueError('Expected UUID to have 32 characters')
return uuid[:8] + '-' + uuid[8:12] + '-' + uuid[12:16] + '-' + uuid[16:20] + '-' + uuid[20:] | 5,327,401 |
def generate_uri(graph_base, username):
"""
Args:
graph_base ():
username ():
Returns:
"""
return "{}{}".format(graph_base, username) | 5,327,402 |
def _combine_model_kwargs_and_state(
generator_run: GeneratorRun,
model_class: Type[Model],
model_kwargs: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Produces a combined dict of model kwargs and model state after gen,
extracted from generator run. If model kwargs are not specified,
model kwargs from the generator run will be used.
"""
model_kwargs = model_kwargs or generator_run._model_kwargs or {}
if generator_run._model_state_after_gen is None:
return model_kwargs
serialized_model_state = not_none(generator_run._model_state_after_gen)
# We don't want to update `model_kwargs` on the `GenerationStep`,
# just to add to them for the purpose of this function.
return {**model_kwargs, **model_class.deserialize_state(serialized_model_state)} | 5,327,403 |
def quick_boxcar(s, M=4, centered=True):
"""Returns a boxcar-filtered version of the input signal
Keyword arguments:
M -- number of averaged samples (default 4)
centered -- recenter the filtered signal to reduce lag (default False)
"""
# Sanity check on signal and filter window
length = s.shape[0]
if length <= 2*M:
raise ValueError('signal too short for specified filter window')
# Set up staggered arrays for vectorized average
z = np.empty((M, length+M-1), 'd')
for i in range(M):
z[i] = np.r_[np.zeros(i)+s[0], s, np.zeros(M-i-1)+s[-1]]
# Center the average if specified
start_ix = 0
end_ix = length
if centered:
start_ix += int(M/2)
end_ix += int(M/2)
return z.mean(axis=0)[start_ix:end_ix] | 5,327,404 |
async def info(request):
"""HTTP Method to retun node state to caller"""
log.debug("info request")
app = request.app
answer = {}
# copy relevant entries from state dictionary to response
node = {}
node['id'] = request.app['id']
node['type'] = request.app['node_type']
node['start_time'] = app["start_time"] #unixTimeToUTC(app['start_time'])
node['state'] = app['node_state']
node['node_number'] = app['node_number']
node['node_count'] = app['node_count']
answer["node"] = node
# psutil info
# see: http://pythonhosted.org/psutil/ for description of different fields
cpu = {}
cpu["percent"] = psutil.cpu_percent()
cpu["cores"] = psutil.cpu_count()
answer["cpu"] = cpu
diskio = psutil.disk_io_counters()
disk_stats = {}
disk_stats["read_count"] = diskio.read_count
disk_stats["read_time"] = diskio.read_time
disk_stats["read_bytes"] = diskio.read_bytes
disk_stats["write_count"] = diskio.write_count
disk_stats["write_time"] = diskio.write_time
disk_stats["write_bytes"] = diskio.write_bytes
answer["diskio"] = disk_stats
netio = psutil.net_io_counters()
net_stats = {}
net_stats["bytes_sent"] = netio.bytes_sent
net_stats["bytes_sent"] = netio.bytes_recv
net_stats["packets_sent"] = netio.packets_sent
net_stats["packets_recv"] = netio.packets_recv
net_stats["errin"] = netio.errin
net_stats["errout"] = netio.errout
net_stats["dropin"] = netio.dropin
net_stats["dropout"] = netio.dropout
answer["netio"] = net_stats
mem_stats = {}
svmem = psutil.virtual_memory()
mem_stats["phys_total"] = svmem.total
mem_stats["phys_available"] = svmem.available
sswap = psutil.swap_memory()
mem_stats["swap_total"] = sswap.total
mem_stats["swap_used"] = sswap.used
mem_stats["swap_free"] = sswap.free
mem_stats["percent"] = sswap.percent
answer["memory"] = mem_stats
disk_stats = {}
sdiskusage = psutil.disk_usage('/')
disk_stats["total"] = sdiskusage.total
disk_stats["used"] = sdiskusage.used
disk_stats["free"] = sdiskusage.free
disk_stats["percent"] = sdiskusage.percent
answer["disk"] = disk_stats
answer["log_stats"] = app["log_count"]
answer["req_count"] = app["req_count"]
answer["s3_stats"] = app["s3_stats"]
mc_stats = {}
if "meta_cache" in app:
mc = app["meta_cache"] # only DN nodes have this
mc_stats["count"] = len(mc)
mc_stats["dirty_count"] = mc.dirtyCount
mc_stats["utililization_per"] = mc.cacheUtilizationPercent
mc_stats["mem_used"] = mc.memUsed
mc_stats["mem_target"] = mc.memTarget
answer["meta_cache_stats"] = mc_stats
cc_stats = {}
if "chunk_cache" in app:
cc = app["chunk_cache"] # only DN nodes have this
cc_stats["count"] = len(cc)
cc_stats["dirty_count"] = cc.dirtyCount
cc_stats["utililization_per"] = cc.cacheUtilizationPercent
cc_stats["mem_used"] = cc.memUsed
cc_stats["mem_target"] = cc.memTarget
answer["chunk_cache_stats"] = cc_stats
dc_stats = {}
if "domain_cache" in app:
dc = app["domain_cache"] # only DN nodes have this
dc_stats["count"] = len(dc)
dc_stats["dirty_count"] = dc.dirtyCount
dc_stats["utililization_per"] = dc.cacheUtilizationPercent
dc_stats["mem_used"] = dc.memUsed
dc_stats["mem_target"] = dc.memTarget
answer["domain_cache_stats"] = dc_stats
resp = await jsonResponse(request, answer)
log.response(request, resp=resp)
return resp | 5,327,405 |
def read_file(file, assume_complete=False):
"""read_file(filename, assume_complete=False) -> Contest
Read in a text file describing a contest, and construct a Contest object.
This adds the ballots (by calling addballots()), but it doesn't do
any further computation.
If assume_complete is True, any entries missing from a ballot are assumed
to be tied for last.
"""
contents = None
ballots = []
while True:
ln = file.readline()
if (not ln):
break
ln = ln.strip()
if (not ln):
continue
if (ln.startswith('#')):
continue
if (ln.startswith('*')):
if (contents):
raise Exception('More than one line in the input file begins with *.')
contents = ln
else:
ballots.append(ln)
if (not contents):
raise Exception('No line in the input file begins with *.')
entries = contents[1:].split()
if (not entries):
raise Exception('The * line has no contents.')
dic = {}
for val in entries:
dic[val] = True
if (len(dic) != len(entries)):
raise Exception('Duplicate entry in * line.')
contest = Contest(entries)
for ln in ballots:
ls = ln.split()
ls = [ val.split('/') for val in ls ]
dic = {}
for subls in ls:
for val in subls:
if (not contest.iskey(val)):
raise Exception('Unknown key in ballot: ' + val)
if (val in dic):
raise Exception('Repeated key in ballot: ' + val)
dic[val] = True
if (assume_complete):
final = []
for val in contest.entries:
if (val not in dic):
final.append(val)
if (final):
ls.append(final)
contest.addballot(ls)
return contest | 5,327,406 |
def launch_server(port: int, target: str) -> None:
"""Launch a RPC server.
Parameters
----------
port: int
The port for launching the server.
target: str
The target string for this server.
"""
s = ThreadedServer(
RPCService(target),
port=port,
protocol_config={"allow_public_attrs": True, "allow_pickle": True},
)
log.info("Launching RPC server at port %d", port)
try:
s.start()
except Exception as err: # pylint: disable=broad-except
log.info("RPC server at port %d throws exceptions: %s", port, str(err))
log.info("RPC server at port %d is shutdown", port) | 5,327,407 |
def main() -> None:
"""
Add user to database with email argument as the user ID.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'email',
type=str,
help='E-mail address of the user to be added',
)
args = parser.parse_args()
"""The `args.email` in fact must look like an email address,
otherwise the HTTP request to Bouncer will fail.
"""
email = args.email
if re.match(r'[^@]+@[^@]+\.[^@]+', email):
add_user(email)
else:
log.error('Provided uid `%s` does not appear to be an email address', email) | 5,327,408 |
def merge_dict(a, b):
"""
Recursively merges and returns dict a with dict b.
Any list values will be combined and returned sorted.
:param a: dictionary object
:param b: dictionary object
:return: merged dictionary object
"""
if not isinstance(b, dict):
return b
result = deepcopy(a)
for key, val in six.iteritems(b):
if key in result and isinstance(result[key], dict):
result[key] = merge_dict(result[key], val)
elif key in result and isinstance(result[key], list):
result[key] = sorted(list(set(val) | set(result[key])))
else:
result[key] = deepcopy(val)
return result | 5,327,409 |
def send_message(oc_user,params):
"""留言
"""
to_uid = params.get("to_uid")
content = params.get("content",'')
if not to_uid:
return 1,{"msg":"please choose user"}
if not content:
return 2,{"msg":"please input content"}
if len(content) > 40:
return 3,{"msg":"content too long"}
compete_message_obj = UserCompeteMessage.hget(oc_user.uid,to_uid)
compete_message_obj.set_message(oc_user.uid,to_uid,content)
compete_message_obj = UserCompeteMessage.hget(to_uid,oc_user.uid)
compete_message_obj.set_message(oc_user.uid,to_uid,content)
return 0,{} | 5,327,410 |
def test_bytestring_segment():
""" bytestring segment can be cast to unicode and bytestring """
segment = HL7Segment(b"FOO|Baababamm")
str(segment)
six.text_type(segment) | 5,327,411 |
def init_db():
"""
Initialize the database subsytem.
"""
# import models here so they'll properly register the metadata;
# otherwise, import them first before calling init_db()
from app import models
Base.metadata.create_all(bind=engine) | 5,327,412 |
def BDD100K(path: str) -> Dataset:
"""`BDD100K <https://bdd-data.berkeley.edu>`_ dataset.
The file structure should be like::
<path>
bdd100k_images_100k/
images/
100k/
test
train
val
labels/
det_20/
det_train.json
det_val.json
lane/
polygons/
lane_train.json
lane_val.json
drivable/
polygons/
drivable_train.json
drivable_val.json
Arguments:
path: The root directory of the dataset.
Returns:
Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.
"""
return _BDD100K_loader(path, "100k") | 5,327,413 |
def is_utf8(string):
"""Check if argument encodes to UTF8 without error.
Args:
string(str): string of bytes
Returns:
True if string can be successfully encoded
"""
try:
string.encode('utf-8')
except UnicodeEncodeError:
return False
except UnicodeDecodeError:
return False
except AttributeError:
return False
return True | 5,327,414 |
def fixedwidth_bins(delta, xmin, xmax):
"""Return bins of width `delta` that cover `xmin`, `xmax` (or a larger range).
The bin parameters are computed such that the bin size `delta` is
guaranteed. In order to achieve this, the range `[xmin, xmax]` can be
increased.
Bins can be calculated for 1D data (then all parameters are simple floats)
or nD data (then parameters are supplied as arrays, with each entry
correpsonding to one dimension).
Parameters
----------
delta : float or array_like
desired spacing of the bins
xmin : float or array_like
lower bound (left boundary of first bin)
xmax : float or array_like
upper bound (right boundary of last bin)
Returns
-------
dict
The dict contains 'Nbins', 'delta', 'min', and 'max'; these are either
floats or arrays, depending on the input.
Example
-------
Use with :func:`numpy.histogram`::
B = fixedwidth_bins(delta, xmin, xmax)
h, e = np.histogram(data, bins=B['Nbins'], range=(B['min'], B['max']))
"""
if not np.all(xmin < xmax):
raise ValueError('Boundaries are not sane: should be xmin < xmax.')
_delta = np.asarray(delta, dtype=np.float_)
_xmin = np.asarray(xmin, dtype=np.float_)
_xmax = np.asarray(xmax, dtype=np.float_)
_length = _xmax - _xmin
N = np.ceil(_length / _delta).astype(np.int_) # number of bins
dx = 0.5 * (N * _delta - _length) # add half of the excess to each end
return {'Nbins': N, 'delta': _delta, 'min': _xmin - dx, 'max': _xmax + dx} | 5,327,415 |
def get_word(path):
""" extract word name from json path """
return path.split('.')[0] | 5,327,416 |
def produce_edge_image(thresh, img):
"""
Threshold the image and return the edges
"""
(thresh, alpha_img) = cv.threshold(img, thresh, 255, cv.THRESH_BINARY_INV)
blur_img = cv.medianBlur(alpha_img, 9)
blur_img = cv.morphologyEx(blur_img, cv.MORPH_OPEN, (5,5))
# find the edged
return cv.Canny(blur_img, 30, 200), alpha_img | 5,327,417 |
def get(endpoint: str,
encoding: str="utf-8",
**params) -> (list, dict, None):
"""
Return requested data in JSON (empty list on fallback)
Check request has correct schema.
:param endpoint - endpoint for request.
:param encoding - encoding for received data.
:param params - requested params
"""
if not is_valid_schema(endpoint, **params):
return []
url = make_url(endpoint, **params)
try:
response = request.urlopen(url)
return json.loads(response.read().decode(encoding))
except (error.HTTPError, error.URLError) as err:
logging.debug("Can't get '%s'.\n%s", url, err)
return [] | 5,327,418 |
def aggregate_dicts(dicts: t.Sequence[dict], agg: str = "mean") -> dict:
"""
Aggregates a list of dictionaries into a single dictionary. All dictionaries in ``dicts`` should have the same keys.
All values for a given key are aggregated into a single value using ``agg``. Returns a single dictionary with the
aggregated values.
Parameters
----------
dicts : sequence of dicts
The dictionaries to aggregate.
agg : {'mean', 'stdev', 'sum', 'median', 'min', 'max'}
Name of the method to use to aggregate the values of `dicts` with.
"""
aggs: t.Dict[str, t.Callable] = {
"mean": np.mean,
"stdev": np.std,
"sum": np.sum,
"median": np.median,
"min": np.min,
"max": np.max,
}
assert len(dicts) > 0
keys = dicts[0].keys()
result = {}
for key in keys:
values = [d[key] for d in dicts]
if isinstance(values[0], dict):
# Recurse
result[key] = aggregate_dicts(values, agg)
else:
result[key] = aggs[agg](values, axis=0)
return result | 5,327,419 |
def aggregation_most_frequent(logits):
"""This aggregation mechanism takes the softmax/logit output of several
models resulting from inference on identical inputs and computes the most
frequent label. It is deterministic (no noise injection like noisy_max()
above.
:param logits: logits or probabilities for each sample
:return:
"""
# Compute labels from logits/probs and reshape array properly
labels = labels_from_probs(logits)
labels_shape = np.shape(labels)
labels = labels.reshape((labels_shape[0], labels_shape[1]))
# Initialize array to hold final labels
result = np.zeros(int(labels_shape[1]))
# Parse each sample
for i in xrange(int(labels_shape[1])):
# Count number of votes assigned to each class
label_counts = np.bincount(labels[:, i], minlength=10)
label_counts = np.asarray(label_counts, dtype=np.int32)
# Result is the most frequent label
result[i] = np.argmax(label_counts)
return np.asarray(result, dtype=np.int32) | 5,327,420 |
def sanitize_path(path):
"""
Ensure the local filesystem path we're supposed to write to is legit.
"""
if not path.startswith("/"):
raise Exception("Path must be fully qualified.")
os.chdir(path)
return os.getcwd() | 5,327,421 |
def handle_led(req):
""" In this function all the work is done :) """
# switch GPIO to HIGH, if '1' was sent
if (req.state == 1):
if hostname == 'minibot':
GPIO.output(req.pin, GPIO.HIGH)
else:
# for all other values we set it to LOW
# (LEDs are low active!)
if hostname == 'minibot':
GPIO.output(req.pin, GPIO.LOW)
# debug
rospy.loginfo("GPIO %s switched to %s. Result: %s", req.pin, req.state, req.pin)
# The name of the 'xyzResponse' comes directly from the Xyz.srv filename!
return LedResponse(req.pin) | 5,327,422 |
def evaluate_submission_with_proto(
submission: Submission,
ground_truth: Submission,
) -> Dict[str, float]:
"""Calculates various motion prediction metrics given
the submission and ground truth protobuf messages.
Args:
submission (Submission): Proto message with predicted trajectories.
ground_truth (Submission): Proto message with ground truth trajectories.
Raises:
ValueError:
Number of objects in submission is not equal to number of objects in ground truth.
ValueError:
Objects order in submission violates objects order in ground truth.
Returns:
Dict[str, float]: Mapping from metric name to its aggregated value.
"""
_check_submission_and_ground_truth(submission, ground_truth)
metrics = defaultdict(list)
gt_map = {
(prediction.scene_id, prediction.track_id): prediction
for prediction in ground_truth.predictions
}
for i in range(len(submission.predictions)):
pred = submission.predictions[i]
gt = gt_map[(pred.scene_id, pred.track_id)]
if pred.scene_id != gt.scene_id:
raise ValueError(f'Check scenes order: {pred.scene_id} != {gt.scene_id}')
if pred.track_id != gt.track_id:
raise ValueError(f'Check objects order: {pred.track_id} != {gt.track_id}')
pred_trajectories, weights = get_trajectories_weights_arrays(pred.weighted_trajectories)
pred_trajectories = pred_trajectories[np.argsort(weights)][-MAX_NUM_MODES:]
weights = weights[np.argsort(weights)][-MAX_NUM_MODES:]
gt_trajectory, _ = get_trajectories_weights_arrays(gt.weighted_trajectories)
gt_trajectory = gt_trajectory[0] # Reduce modes dim
metrics['avg_ade'].append(avg_ade(gt_trajectory, pred_trajectories))
metrics['avg_fde'].append(avg_fde(gt_trajectory, pred_trajectories))
metrics['min_ade'].append(min_ade(gt_trajectory, pred_trajectories))
metrics['min_fde'].append(min_fde(gt_trajectory, pred_trajectories))
metrics['top1_ade'].append(top1_ade(gt_trajectory, pred_trajectories, weights))
metrics['top1_fde'].append(top1_fde(gt_trajectory, pred_trajectories, weights))
metrics['weighted_ade'].append(weighted_ade(gt_trajectory, pred_trajectories, weights))
metrics['weighted_fde'].append(weighted_fde(gt_trajectory, pred_trajectories, weights))
metrics['log_likelihood'].append(log_likelihood(gt_trajectory, pred_trajectories, weights))
metrics['corrected_nll'].append(
corrected_negative_log_likelihood(gt_trajectory, pred_trajectories, weights))
metrics['is_ood'].append(gt.is_ood)
return metrics | 5,327,423 |
def build_optimizer(args, model):
"""
Build an optimizer based on the arguments given
"""
if args['optim'].lower() == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=args['learning_rate'], momentum=0.9, weight_decay=args['weight_decay'])
elif args['optim'].lower() == 'adadelta':
optimizer = optim.Adadelta(model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay'])
elif args['optim'].lower() == 'adamw':
optimizer = optim.AdamW(model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay'])
else:
raise ValueError("Unknown optimizer: %s" % args.optim)
return optimizer | 5,327,424 |
def decorator_matcher(func_names, keyword, fcreate=None):
"""Search pattern @[namespace]<func_name>("<skey>")
Parameters
----------
func_names : list
List of macro names to match.
fcreate : Function (skey, path, range, func_name) -> result.
"""
decorator = r"@?(?P<decorator>([a-zA-Z_]?[a-zA-Z_0-9.]*.)?("
decorator += "|".join(re.escape(x) for x in func_names)
decorator += "))((\(\"(?P<skey>[^\"]+)\")|(\s*\Z))"
nextline = keyword + r"\s+(?P<skey>[a-zA-Z_0-9]+)\("
decorator = re.compile(decorator)
nextline = re.compile(nextline)
def _matcher(path, source, begin_line=0, end_line=None):
source = source.split("\n") if isinstance(source, str) else source
results = []
end_line = min(end_line, len(source)) if end_line else len(source)
for line in range(begin_line, end_line):
content = source[line]
match = decorator.match(content)
if match:
skey = match.group("skey")
if skey:
start, end = match.span("skey")
lineno = line
if not skey and line + 1 < len(source):
match_name = nextline.match(source[line + 1])
if match_name:
skey = match_name.group("skey")
start, end = match_name.span("skey")
lineno = line + 1
if skey:
start_pos = Position(lineno, start)
end_pos = Position(lineno, end)
item = fcreate(skey, path,
Range(start_pos, end_pos),
match.group("decorator"))
if item:
results.append(item)
return results
return _matcher | 5,327,425 |
def log_binomial(n, k, tol=0.):
"""
Computes log binomial coefficient.
When ``tol >= 0.02`` this uses a shifted Stirling's approximation to the
log Beta function via :func:`log_beta`.
:param torch.Tensor n: A nonnegative integer tensor.
:param torch.Tensor k: An integer tensor ranging in ``[0, n]``.
:rtype: torch.Tensor
"""
assert isinstance(tol, (float, int)) and tol >= 0
n_plus_1 = n + 1
if tol < 0.02:
# At small tolerance it is cheaper to defer to torch.lgamma().
return n_plus_1.lgamma() - (k + 1).lgamma() - (n_plus_1 - k).lgamma()
return -n_plus_1.log() - log_beta(k + 1, n_plus_1 - k, tol=tol) | 5,327,426 |
def error_500(request, *args, **kwargs):
"""
Throws a JSON response for INTERNAL errors
:param request: the request
:return: response
"""
message = "An internal server error ocurred"
response = JsonResponse(data={"message": message, "status_code": 500})
response.status_code = 500
return response | 5,327,427 |
def datetimes_to_durations(start_times, end_times, fill_date=datetime.today(), freq="D", dayfirst=False, na_values=None):
"""
This is a very flexible function for transforming arrays of start_times and end_times
to the proper format for lifelines: duration and event observation arrays.
Parameters
----------
start_times: an array, Series or DataFrame
iterable representing start times. These can be strings, or datetime objects.
end_times: an array, Series or DataFrame
iterable representing end times. These can be strings, or datetimes. These values can be None, or an empty string, which corresponds to censorship.
fill_date: datetime, optional (default=datetime.Today())
the date to use if end_times is a None or empty string. This corresponds to last date
of observation. Anything after this date is also censored.
freq: string, optional (default='D')
the units of time to use. See Pandas 'freq'. Default 'D' for days.
dayfirst: bool, optional (default=False)
convert assuming European-style dates, i.e. day/month/year.
na_values : list, optional
list of values to recognize as NA/NaN. Ex: ['', 'NaT']
Returns
-------
T: numpy array
array of floats representing the durations with time units given by freq.
C: numpy array
boolean array of event observations: 1 if death observed, 0 else.
Examples
--------
.. code:: python
from lifelines.utils import datetimes_to_durations
start_dates = ['2015-01-01', '2015-04-01', '2014-04-05']
end_dates = ['2016-02-02', None, '2014-05-06']
T, E = datetimes_to_durations(start_dates, end_dates, freq="D")
T # array([ 397., 1414., 31.])
E # array([ True, False, True])
"""
fill_date = pd.to_datetime(fill_date)
freq_string = "timedelta64[%s]" % freq
start_times = pd.Series(start_times).copy()
end_times = pd.Series(end_times).copy()
C = ~(pd.isnull(end_times).values | end_times.isin(na_values or [""]))
end_times[~C] = fill_date
start_times_ = pd.to_datetime(start_times, dayfirst=dayfirst)
end_times_ = pd.to_datetime(end_times, dayfirst=dayfirst, errors="coerce")
deaths_after_cutoff = end_times_ > fill_date
C[deaths_after_cutoff] = False
T = (end_times_ - start_times_).values.astype(freq_string).astype(float)
if (T < 0).sum():
warnings.warn("Warning: some values of start_times are after end_times.\n", UserWarning)
return T, C.values | 5,327,428 |
def copy_image(filenames = '*.png', method = 'color', color = 'white',
boundary_width = BOUNDARY_WIDTH):
"""
Create image copies with square dimensions by adding additional space to the
original image. Image copies may fill background with either a solid color
(default: white) or a computed average based on the edge of the original
image.
"""
input_files = glob.glob(filenames)
if len(input_files) == 0:
sys.exit('The expression \'%s\' does not match any filenames!'
% (filenames,))
index = 1
for input_file in input_files:
print('Working on %s (%i of %i)' % (input_file, index, len(input_files)))
# Create directory for image copies; potentially spanning multiple
# directories
dirname, filename = os.path.split(os.path.abspath(input_file))
if dirname[-1] != '/':
dirname = dirname + '/'
new_dirname = dirname + 'changerose/'
try:
os.makedirs(new_dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Retain original image
im = Image.open(input_file)
im_copy = im.copy()
im.close()
# Create background image layer
if method == 'color':
final_im = color_background(im_copy, color = color)
elif method == 'average':
final_im = average_background(im_copy)
elif method == 'blend':
final_im = blend_background(im_copy)
else:
sys.exit('Invalid color method specified!')
final_im.paste(im_copy, get_paste_position(im_copy))
im_copy.close()
# Save final image with informative filename
root, ext = os.path.splitext(filename)
final_filename = ''
if method == 'color':
final_filename = new_dirname + root + '_changerose_' \
+ method + '_' + str(color) + '_b' + str(boundary_width) + ext
else:
final_filename = new_dirname + root + '_changerose_' \
+ method + '_b' + str(boundary_width) + ext
final_im.save(final_filename)
final_im.close()
print("Saved %s" % final_filename)
index += 1 | 5,327,429 |
def offset_from_date(v, offset, gran='D', exact=False):
"""
Given a date string and some numeric offset, as well as a unit, then compute
the offset from that value by offset gran's. Gran defaults to D. If exact
is set to true, then the exact date is figured out, otherwise the level of
granuality given by gran is used. Returns a date string.
"""
gran = string_conversions.units_to_gran(gran)
# check for valid refdate
if len(v) > 0:
# Extract date components into a datetime object for manipulation
y = int(v[:4])
m = int(v[4:6])
if len(v) >= 8:
d = int(v[6:8])
really_d = True
else:
really_d = False
d = 1
if len(v) >= 11:
h = int(v[9:11])
else:
h = None
dt = datetime.datetime(y, m, d)
if len(v) >= 13:
min = int(v[11:13])
else:
min = None
if h is not None:
dt = datetime.datetime(y, m, d, h)
if len(v) >= 15:
s = int(v[13:15])
dt = datetime.datetime(y, m, d, h, min, s)
else:
s = None
if min is not None:
dt = datetime.datetime(y, m, d, h, min)
elif offset >= 1:
return 'FUTURE_REF'
elif offset <= -1:
return 'PAST_REF'
else:
return v
# Do manipulations
if gran == 'TM':
# minutes
dt += datetime.timedelta(minutes=offset)
return dt.strftime('%Y%m%dT%H%M')
elif gran == 'TH':
# hours
dt += datetime.timedelta(hours=offset)
if exact:
return dt.strftime('%Y%m%dT%H%M')
else:
return dt.strftime('%Y%m%dT%H')
elif gran == 'D':
# days
dt += datetime.timedelta(days=offset)
if exact and min is not None:
return dt.strftime('%Y%m%dT%H%M')
elif exact and h is not None:
return dt.strftime('%Y%m%dT%H')
else:
return dt.strftime('%Y%m%d')
elif gran == 'W' or gran == 'F':
# weeks/fortnights
if gran == 'F':
offset *= 2
dt += datetime.timedelta(weeks=offset)
if exact:
return dt.strftime('%Y%m%d')
else:
return dt.strftime('%YW%W')
elif gran == 'M':
# months - timedelta rather annoyingly doesn't support months, so we
# need to do a bit more work here
m += offset
if m > 12:
y += int(m / 12)
m %= 12
elif m < 0:
y += int(m / 12)
m %= 12
if m == 0:
m = 12
y -= 1
# avoid bad days
dt = None
while dt is None and d > 0:
try:
dt = datetime.datetime(y, m, d)
except ValueError:
d -= 1
if exact:
return dt.strftime('%Y%m%d')
else:
return dt.strftime('%Y%m')
elif gran == 'Y' or gran == 'E' or gran == 'C':
# years/decades/centuries - again, need to do a bit more work
if gran == 'C':
offset *= 100
if gran == 'E':
offset *= 10
y += offset
# Python doesn't allow datetime objects to be created representing years
# before 1970, so do this the old fashioned way
if not exact:
if gran == 'C':
return ("{0:04d}".format(y))[:2]
elif gran == 'E':
return ("{0:04d}".format(y))[:3]
else:
return "%04d" % y
else:
if d == 29 and m == 2 and not calendar.isleap(y):
# eugh, mucking about with a date that's not going to be in the
# target year - fall back
d = 28
if really_d:
return "%04d%02d%02d" % (y, m, d)
else:
return "%04d%02d" % (y, m)
elif offset >= 1:
return 'FUTURE_REF'
elif offset <= -1:
return 'PAST_REF'
else:
return v | 5,327,430 |
def _strip(x):
"""remvoe tensor-hood from the input structure"""
if isinstance(x, Tensor):
x = x.item()
elif isinstance(x, dict):
x = {k: _strip(v) for k, v in x.items()}
return x | 5,327,431 |
def rank_array(a, descending=True):
"""Rank array counting from 1"""
temp = np.argsort(a)
if descending:
temp = temp[::-1]
ranks = np.empty_like(temp)
ranks[temp] = np.arange(1,len(a)+1)
return ranks | 5,327,432 |
def create_board_game_mcts(observation_spec,
action_spec,
dirichlet_alpha: float,
pb_c_init=1.25,
num_simulations=800,
debug_summaries=False):
"""Helper function for creating MCTSAlgorithm for board games."""
def visit_softmax_temperature(num_moves):
t = torch.ones_like(num_moves, dtype=torch.float32)
# paper pseudocode uses 0.0
# Current code does not support 0.0, so use a small value, which should
# not make any difference since a difference of 1e-3 for visit probability
# translates to about exp(1e-3*1e10) probability ratio.
t[num_moves >= 30] = 1e-10
return t
return MCTSAlgorithm(
observation_spec=observation_spec,
action_spec=action_spec,
discount=1.0,
root_dirichlet_alpha=dirichlet_alpha,
root_exploration_fraction=0.25,
num_simulations=num_simulations,
pb_c_init=pb_c_init,
pb_c_base=19652,
visit_softmax_temperature_fn=visit_softmax_temperature,
known_value_bounds=(-1, 1),
is_two_player_game=True) | 5,327,433 |
def normalize(params, axis=0):
"""
Function normalizing the parameters vector params with respect to the Axis: axis
:param params: array of parameters of shape [axis0, axis1, ..., axisp] p can be variable
:return: params: array of same shape normalized
"""
return params / np.sum(params, axis=axis, keepdims=True) | 5,327,434 |
def issue(context):
"""
This function is used to submit Issue
"""
form_data = context['issue']
jira_fields = {}
# transform from_data to payload submited to API of JIRA
for form_item in form_data:
if form_item['name'] == '主题' :
jira_fields['summary'] = form_item['value']
elif form_item['name'] == '版本':
jira_fields['version'] = {
id:form_item['value']
}
elif form_item['name'] == '描述':
jira_fields['description'] = form_item['value']
if form_item['extraMsg']:
for add_des in form_item['extraMsg']:
jira_fields['description'] +='\n'
jira_fields['description'] +='------------------------------\n'
jira_fields['description'] +=add_des['message']
url = 'http://www.example.com/jira/rest/api/2/issue'
header = {
'Content-Type': 'application/json;charset=utf-8'
}
resp = requests.post(url, auth=('YOUR JIRA_USER_NAME', 'YOUR_JIRA_PASSWD'),json={"fields": jira_fields},headers=header)
if resp.status_code >= 200 and resp.status_code < 300:
body = json.loads(response.text)
if body.get('key') :
context['key'] = body['key']
else:
raise Exception(f'Submit failed {response.text}')
else:
raise Exception(f'Create issue failed with code {response.status_code}\n{response.text}') | 5,327,435 |
def product_soft_update(uuid):
"""
Product soft update route
:return Endpoint with RESTful pattern
# pylint: disable=line-too-long
See https://madeiramadeira.atlassian.net/wiki/spaces/CAR/pages/2244149708/WIP+-+Guidelines+-+RESTful+e+HATEOS
:rtype flask.Response
---
patch:
summary: Soft Product Update
parameters:
- in: path
name: uuid
description: "Product Id"
required: true
schema:
type: string
format: uuid
example: 4bcad46b-6978-488f-8153-1c49f8a45244
requestBody:
description: 'Product field to be updated'
required: true
content:
application/json:
schema: ProductSoftUpdateRequestSchema
responses:
200:
description: Success response
content:
application/json:
schema: ProductUpdateResponseSchema
4xx:
description: Error response
content:
application/json:
schema: ProductUpdateErrorResponseSchema
5xx:
description: Service fail response
content:
application/json:
schema: ProductUpdateErrorResponseSchema
"""
request = ApiRequest().parse_request(APP)
LOGGER.info(f'request: {request}')
status_code = 200
response = ApiResponse(request)
response.set_hateos(False)
manager = ProductManager(logger=LOGGER, product_service=ProductServiceV1(logger=LOGGER))
manager.debug(DEBUG)
try:
response.set_data(manager.soft_update(request.to_dict(), uuid))
# response.set_total(manager.count(request))
except CustomException as error:
LOGGER.error(error)
if not isinstance(error, ValidationException):
error = ApiException(MessagesEnum.UPDATE_ERROR)
status_code = 400
if manager.exception:
error = manager.exception
response.set_exception(error)
return response.get_response(status_code) | 5,327,436 |
def kernel_test(slabs, data, backend):
"""
Test the reflectivity kernels for genx.
Parameters
----------
slabs: np.ndarray
Slab representation of the system
data: np.ndarray
Q, R arrays
backend: {parratt, matrix}
function for reflectance calculation
"""
Q = data[:, 0]
layers = []
for thickness, rsld, isld, sigma in slabs:
layers.append(
model.Layer(
b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma
)
)
layers.reverse()
stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)
sample = model.Sample(
Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]
)
# print(sample)
inst = model.Instrument(
probe=backend,
wavelength=1.54,
coords="q",
I0=1,
res=0,
restype="no conv",
respoints=5,
resintrange=2,
beamw=0.1,
footype="no corr",
samplelen=10,
pol="uu",
)
if data.shape[1] == 4:
dQ = data[:, 3]
inst.restype = "full conv and varying res."
inst.res = dQ
if backend == "neutron pol spin flip":
# memory issues in matrix formalism if too many data points
inst.respoints = 101
else:
inst.respoints = (
10001 # try to use same convolution as ref1d when generating
)
inst.resintrange = 3.5
# print(inst)
R = sample.SimSpecular(Q, inst)
assert R.shape == data[:, 1].shape
if data.shape[1] == 4:
# validation accuracy is reduced for resolution runs, as strongly
# depends on numerical convolution scheme
if backend == "neutron pol spin flip":
np.testing.assert_allclose(R, data[:, 1], rtol=0.005)
else:
np.testing.assert_allclose(R, data[:, 1], rtol=0.001)
else:
np.testing.assert_allclose(R, data[:, 1], rtol=0.001) | 5,327,437 |
def estimate_cv_regression(
results: pd.DataFrame, critical_values: Sequence[float]
) -> Dict[float, List[float]]:
"""
Parameters
----------
results : DataFrame
A dataframe with rows contaoning the quantiles and columns containign the
number of observations
critical_values : Sequence[float]
The critical values to use
"""
# For percentiles 1, 5 and 10, regress on a constant, and powers of 1/T
out = {}
quantiles = np.asarray(results.index)
tau = np.array(results.columns).reshape((1, -1)).T
rhs = (1.0 / tau) ** np.arange(4)
for cv in critical_values:
loc = np.argmin(np.abs(100 * quantiles - cv))
lhs = np.squeeze(np.asarray(results.iloc[loc]))
res = OLS(lhs, rhs).fit()
params = res.params.copy()
params[res.pvalues > 0.05] = 0.0
out[cv] = [round(val, 5) for val in params]
return out | 5,327,438 |
def parse_field(field_info, op):
""" Combines a field with the operation object
field_info is a dictionary containing at least the key 'fields'
op is an elasticObject
returns an elasticObject """
fields = field_info["fields"]
constraints = field_info["constraints"]
_logger.debug("fields %s", fields)
if len(fields) > 1 or constraints:
# If there's more than one field, use the multiple_field_string
op.multiple_fields_string(fields=fields, constraints=constraints)
else:
# otherwise use the standard string
op.set_field(field=fields[0])
return op | 5,327,439 |
def resize(image, size):
"""Resize multiband image to an image of size (h, w)"""
n_channels = image.shape[2]
if n_channels >= 4:
return skimage.transform.resize(
image, size, mode="constant", preserve_range=True
)
else:
return cv2.resize(image, size, interpolation=cv2.INTER_AREA) | 5,327,440 |
def get_configs(conf_file=None):
""" Get configurations, like db_url
"""
if not conf_file:
conf_file = os.path.join(os.path.dirname(__file__),
os.pardir,
'config.yml')
with open(conf_file, 'r') as fp:
configs = yaml.safe_load(fp)
return configs | 5,327,441 |
def version():
"""Return the version of this cli tool"""
return __version__ | 5,327,442 |
def load_file_dangerous(file_path):
""" Load a single-lined file. Using eval with no safety check! """
#TODO: Dangerous `eval`! Be aware of the content in the file
with open(file_path, "r") as f:
content = eval(f.read().strip())
return content | 5,327,443 |
def ho2cu(ho):
"""
Homochoric vector to cubochoric vector.
References
----------
D. Roşca et al., Modelling and Simulation in Materials Science and Engineering 22:075013, 2014
https://doi.org/10.1088/0965-0393/22/7/075013
"""
rs = np.linalg.norm(ho,axis=-1,keepdims=True)
xyz3 = np.take_along_axis(ho,Rotation._get_pyramid_order(ho,'forward'),-1)
with np.errstate(invalid='ignore',divide='ignore'):
# inverse M_3
xyz2 = xyz3[...,0:2] * np.sqrt( 2.0*rs/(rs+np.abs(xyz3[...,2:3])) )
qxy = np.sum(xyz2**2,axis=-1,keepdims=True)
q2 = qxy + np.max(np.abs(xyz2),axis=-1,keepdims=True)**2
sq2 = np.sqrt(q2)
q = (beta/np.sqrt(2.0)/R1) * np.sqrt(q2*qxy/(q2-np.max(np.abs(xyz2),axis=-1,keepdims=True)*sq2))
tt = np.clip((np.min(np.abs(xyz2),axis=-1,keepdims=True)**2\
+np.max(np.abs(xyz2),axis=-1,keepdims=True)*sq2)/np.sqrt(2.0)/qxy,-1.0,1.0)
T_inv = np.where(np.abs(xyz2[...,1:2]) <= np.abs(xyz2[...,0:1]),
np.block([np.ones_like(tt),np.arccos(tt)/np.pi*12.0]),
np.block([np.arccos(tt)/np.pi*12.0,np.ones_like(tt)]))*q
T_inv[xyz2<0.0] *= -1.0
T_inv[np.broadcast_to(np.isclose(qxy,0.0,rtol=0.0,atol=1.0e-12),T_inv.shape)] = 0.0
cu = np.block([T_inv, np.where(xyz3[...,2:3]<0.0,-np.ones_like(xyz3[...,2:3]),np.ones_like(xyz3[...,2:3])) \
* rs/np.sqrt(6.0/np.pi),
])/ sc
cu[np.isclose(np.sum(np.abs(ho),axis=-1),0.0,rtol=0.0,atol=1.0e-16)] = 0.0
cu = np.take_along_axis(cu,Rotation._get_pyramid_order(ho,'backward'),-1)
return cu | 5,327,444 |
def test__atom_neighbor_keys():
""" test graph.atom_neighbor_keys
"""
assert (graph.atom_neighbor_keys(C8H13O_CGR) ==
{0: (2,), 1: (3,), 2: (0, 4), 3: (1, 5), 4: (2, 6), 5: (3, 7),
6: (4, 7), 7: (5, 6, 8), 8: (7,)}) | 5,327,445 |
def apparent_resistivity(
dc_survey, survey_type='dipole-dipole',
space_type='half-space', dobs=None,
eps=1e-10
):
"""
Calculate apparent resistivity. Assuming that data are normalized
voltages - Vmn/I (Potential difference [V] divided by injection
current [A]). For fwd modelled data an injection current of 1A is
assumed in SimPEG.
Input:
:param SimPEG.EM.Static.DC.SurveyDC.Survey dc_survey: DC survey object
:param numpy.ndarray dobs: normalized voltage measurements [V/A]
:param str survey_type: Either 'dipole-dipole' | 'pole-dipole' |
'dipole-pole' | 'pole-pole'
:param float eps: Regularizer in case of a null geometric factor
Output:
:return rhoApp: apparent resistivity
"""
# Use dobs in survey if dobs is None
if dobs is None:
if dc_survey.dobs is None:
raise Exception()
else:
dobs = dc_survey.dobs
# Calculate Geometric Factor
G = geometric_factor(
dc_survey, survey_type=survey_type, space_type=space_type
)
# Calculate apparent resistivity
# absolute value is required because of the regularizer
rhoApp = np.abs(dobs*(1./(G+eps)))
return rhoApp | 5,327,446 |
def test_apiusers_unauthenticated_requests_fail(api_client,
apiuser_with_custom_defaults):
"""
Requesting an apiuser list or detail view without providing any
authentication credentials should result in a 403 error.
"""
test_cls = apiuser_with_custom_defaults()
api_user = test_cls.objects.create_user('test', 'secret', password='pw',
email='test@test.com',
first_name='F', last_name='Last')
list_resp = api_client.get('{}apiusers/'.format(API_ROOT))
detail_resp = api_client.get('{}apiusers/test'.format(API_ROOT))
assert list_resp.status_code == 403
assert detail_resp.status_code == 403 | 5,327,447 |
def readJsonFile(filePath):
"""read data from json file
Args:
filePath (str): location of the json file
Returns:
variable: data read form the json file
"""
result = None
with open(filePath, 'r') as myfile:
result = json.load(myfile)
return result | 5,327,448 |
def test_update_youtube_statuses_api_quota_exceeded(
mocker, youtube_video_files_processing
):
"""
Test that the update_youtube_statuses task stops without raising an error if the API quota is exceeded.
"""
mock_video_status = mocker.patch(
"videos.tasks.YouTubeApi.video_status",
side_effect=HttpError(
MockHttpErrorResponse(403), str.encode(API_QUOTA_ERROR_MSG, "utf-8")
),
)
update_youtube_statuses()
mock_video_status.assert_called_once() | 5,327,449 |
def queue_async_indicators():
"""
Fetches AsyncIndicators that
1. were not queued till now or were last queued more than 4 hours ago
2. have failed less than ASYNC_INDICATOR_MAX_RETRIES times
This task quits after it has run for more than
ASYNC_INDICATOR_QUEUE_TIME - 30 seconds i.e 4 minutes 30 seconds.
While it runs, it clubs fetched AsyncIndicators by domain and doc type and queue them for processing.
"""
start = datetime.utcnow()
cutoff = start + ASYNC_INDICATOR_QUEUE_TIME - timedelta(seconds=30)
retry_threshold = start - timedelta(hours=4)
# don't requeue anything that has been retried more than ASYNC_INDICATOR_MAX_RETRIES times
indicators = AsyncIndicator.objects.filter(unsuccessful_attempts__lt=ASYNC_INDICATOR_MAX_RETRIES)[:settings.ASYNC_INDICATORS_TO_QUEUE]
indicators_by_domain_doc_type = defaultdict(list)
# page so that envs can have arbitarily large settings.ASYNC_INDICATORS_TO_QUEUE
for indicator in paginated_queryset(indicators, 1000):
# only requeue things that are not in queue or were last queued earlier than the threshold
if not indicator.date_queued or indicator.date_queued < retry_threshold:
indicators_by_domain_doc_type[(indicator.domain, indicator.doc_type)].append(indicator)
for k, indicators in indicators_by_domain_doc_type.items():
_queue_indicators(indicators)
if datetime.utcnow() > cutoff:
break | 5,327,450 |
def _sort_torch(tensor):
"""Update handling of sort to return only values not indices."""
sorted_tensor = _i("torch").sort(tensor)
return sorted_tensor.values | 5,327,451 |
def check_scaleoperatorobject_statefulsets_state(stateful_name):
"""
Checks statefulset exists or not
if not exists , It asserts
if exists :
Checks statfulset is up or not
if statefulsets not up in 120 seconds , it asserts
Args:
param1: stateful_name - statefulset name to check
Returns:
None
Raises:
Raises an exception on kubernetes client api failure.
"""
read_statefulsets_api_instance = client.AppsV1Api()
num = 0
while (num < 30):
try:
read_statefulsets_api_response = read_statefulsets_api_instance.read_namespaced_stateful_set(
name=stateful_name, namespace=namespace_value, pretty=True)
LOGGER.debug(read_statefulsets_api_response)
ready_replicas = read_statefulsets_api_response.status.ready_replicas
replicas = read_statefulsets_api_response.status.replicas
if ready_replicas == replicas:
LOGGER.info(f"CSI driver statefulset {stateful_name} is up")
return
num += 1
time.sleep(10)
except ApiException:
num += 1
time.sleep(10)
LOGGER.info(f"CSI driver statefulset {stateful_name} does not exist")
assert False | 5,327,452 |
def lambda_handler(event, context):
"""
Route the incoming request based on type (LaunchRequest, IntentRequest, etc).
The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID
to prevent someone else from configuring a skill that sends requests
to this function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session']) | 5,327,453 |
def get_authorized_client(config):
"""Get an OAuth-authorized client
Following
http://requests-oauthlib.readthedocs.org/en/latest/examples/google.html
"""
client = requests_oauthlib.OAuth2Session(
client_id=config['client']['id'],
scope=SCOPE,
redirect_uri=config['client']['redirect_uri'])
# redirect user for authorization
authorization_url, state = client.authorization_url(
url=AUTHORIZATION_BASE_URL,
access_type='offline', # offline for refresh token
approval_prompt='force') # force to always make user click authorize
print('Please go here and authorize,', authorization_url)
# get the authorization verifier code from the callback url
redirect_response = input('Paste the full redirect URL here: ')
# fetch the access token
client.fetch_token(
token_url=TOKEN_URL,
client_secret=config['client']['secret'],
authorization_response=redirect_response)
return client | 5,327,454 |
def eval_preparation(save_path):
"""Saves the current repository code and generates a SLURM script to evaluate a trained model more easily.
Args:
save_path: directory the trained model is stored in
abs_file_path: absolute file path to the evaluation script to use for testing
"""
src_path = Path(sys.argv[0]).parent.parent
# This check is necessary for ModelTrainerScripts in own packages, e.g. Mesh_2D or TransferLearning
if "ModelTrainerScripts" in str(src_path):
src_path = Path(sys.argv[0]).parent.parent.parent
calling_script = Path(sys.argv[0]).parts[-1]
shutil.copytree(src_path, save_path / "rtm-predictions",
ignore=shutil.ignore_patterns('.git*', 'env*', '.idea*', '.vscode*', '__pycache__*',
'Docs/*', 'Debugging/*', 'Legacy/*'))
docker_img = 'docker://nvcr.io/isse/pytorch_extended:20.02'
slurm_txt = f"""#!/bin/sh
#SBATCH --gres=gpu:8
#SBATCH --job-name eval_rtm_predictions
#SBATCH --ntasks=1
#SBATCH -p gpu
#SBATCH --mem=495000
#SBATCH --cpus-per-task=75
PROJECT_ROOT="{save_path}"
export SINGULARITY_DOCKER_USERNAME=\\$oauthtoken
export SINGULARITY_DOCKER_PASSWORD={os.getenv('SINGULARITY_DOCKER_PASSWORD')}
export PYTHONPATH="${{PYTHONPATH}}:${{PROJECT_ROOT}}/rtm-predictions"
""" \
f'singularity exec --nv -B /cfs:/cfs {docker_img} ' \
f'python3 -u ${{PROJECT_ROOT}}/rtm-predictions/ModelTrainerScripts/{calling_script} ' \
f'--eval ${{PROJECT_ROOT}} ' \
f'--checkpoint_path ${{PROJECT_ROOT}}/checkpoint.pth'
with open(save_path / Path("run_model_eval.sh"), "w") as slurm_script:
slurm_script.write(slurm_txt) | 5,327,455 |
def enforce_api_limit(entity_type: str, count: int) -> None:
"""Check if the values given are over the limit for that key.
This is generally used for limiting the size of certain API requests
that eventually get stored in the database.
"""
if not nova_limit_utils.use_unified_limits():
return
if entity_type not in API_LIMITS:
fmt = "%s is not a valid API limit: %s"
raise ValueError(fmt % (entity_type, API_LIMITS))
try:
enforcer = limit.Enforcer(always_zero_usage)
except limit_exceptions.SessionInitError as e:
msg = ("Failed to connect to keystone while enforcing %s quota limit."
% entity_type)
LOG.error(msg + " Error: " + str(e))
raise exception.KeystoneConnectionFailed(msg)
try:
enforcer.enforce(None, {entity_type: count})
except limit_exceptions.ProjectOverLimit as e:
# Copy the exception message to a OverQuota to propagate to the
# API layer.
raise EXCEPTIONS.get(entity_type, exception.OverQuota)(str(e)) | 5,327,456 |
def query_obs_4h(session, station_name: str, start: datetime, end: datetime) -> pd.DataFrame:
"""
SQLite 读取 & 解析数据.
"""
time_format = "%Y-%m-%d %H:00:00"
resp = session.query(
ObsDataQcLinear.time,
ObsDataQcLinear.watertemp,
ObsDataQcLinear.pH,
ObsDataQcLinear.DO,
ObsDataQcLinear.conductivity,
ObsDataQcLinear.turbidity,
ObsDataQcLinear.codmn,
ObsDataQcLinear.nh3n,
ObsDataQcLinear.tp,
ObsDataQcLinear.tn) \
.filter_by(name=station_name) \
.filter(between(ObsDataQcLinear.time, start.strftime(time_format), end.strftime(time_format))) \
.all()
data = pd.DataFrame(resp)
return data.replace([-999.0, 999.0], [np.nan, np.nan]) | 5,327,457 |
def get_valid_user_input(*, prompt='', strict=False):
"""Return a valid user input as Fraction."""
frac_converter = parse_fraction_strict if strict else Fraction
while True:
user_input = input(prompt)
try:
user_input_fraction = frac_converter(user_input)
except (ValueError, ZeroDivisionError):
print('Format error, please try again')
else:
return user_input_fraction | 5,327,458 |
def main(github_token: str, fetch_schema: bool, to_csv: bool) -> None:
"""
Prints results to STDOUT
"""
token_fp = Path(github_token)
assert token_fp.exists(), "Token filepath doesnt exist"
gh_token = token_fp.read_text().strip()
client = construct_client(
github_token=gh_token, fetch_schema_from_transport=fetch_schema
)
printer = csv_printer if to_csv else json_printer
owned_repos: JsonList = query_results(client=client, gh_query=query_owned)
contributed_repos: JsonList = query_results(
client=client, gh_query=query_contributed_to
)
printer(list(chain(owned_repos, contributed_repos))) | 5,327,459 |
def get_rsync_pid_file_path(image_id):
"""Generate the path for an rsync pid file."""
return os.path.join(CONF.baremetal.rsync_pid_path, image_id) | 5,327,460 |
def binary_irrev(t, kf, prod, major, minor, backend=None):
"""Analytic product transient of a irreversible 2-to-1 reaction.
Product concentration vs time from second order irreversible kinetics.
Parameters
----------
t : float, Symbol or array_like
kf : number or Symbol
Forward (bimolecular) rate constant.
prod : number or Symbol
Initial concentration of the complex.
major : number or Symbol
Initial concentration of the more abundant reactant.
minor : number or Symbol
Initial concentration of the less abundant reactant.
backend : module or str
Default is 'numpy', can also be e.g. ``sympy``.
"""
be = get_backend(backend)
return prod + major * (1 - be.exp(-kf * (major - minor) * t)) / (
major / minor - be.exp(-kf * t * (major - minor))
) | 5,327,461 |
def adapted_rand_error(seg, gt, all_stats=False):
"""Compute Adapted Rand error as defined by the SNEMI3D contest [1]
Formula is given as 1 - the maximal F-score of the Rand index
(excluding the zero component of the original labels). Adapted
from the SNEMI3D MATLAB script, hence the strange style.
Parameters
----------
seg : np.ndarray
the segmentation to score, where each value is the label at that point
gt : np.ndarray, same shape as seg
the groundtruth to score against, where each value is a label
all_stats : boolean, optional
whether to also return precision and recall as a 3-tuple with rand_error
Returns
-------
are : float
The adapted Rand error; equal to $1 - \frac{2pr}{p + r}$,
where $p$ and $r$ are the precision and recall described below.
prec : float, optional
The adapted Rand precision. (Only returned when `all_stats` is ``True``.)
rec : float, optional
The adapted Rand recall. (Only returned when `all_stats` is ``True``.)
References
----------
[1]: http://brainiac2.mit.edu/SNEMI3D/evaluation
"""
# segA is query, segB is truth
segA = seg
segB = gt
n = segA.size
# This is the contingency table obtained from segA and segB, we obtain
# the marginal probabilities from the table.
p_ij = contingency_table(segA, segB, norm=False)
# Sum of the joint distribution squared
sum_p_ij = p_ij.data @ p_ij.data
# These are the axix-wise sums (np.sumaxis)
a_i = p_ij.sum(axis=0).A.ravel()
b_i = p_ij.sum(axis=1).A.ravel()
# Sum of the segment labeled 'A'
sum_a = a_i @ a_i
# Sum of the segment labeled 'B'
sum_b = b_i @ b_i
# This is the new code, wherein 'n' is subtacted from the numerator
# and the denominator.
precision = (sum_p_ij - n)/ (sum_a - n)
recall = (sum_p_ij - n)/ (sum_b - n)
fscore = 2. * precision * recall / (precision + recall)
are = 1. - fscore
if all_stats:
return (are, precision, recall)
else:
return are | 5,327,462 |
def get_knp_span(type_: str, span: Span) -> List[Span]:
"""Get knp tag or bunsetsu list"""
assert type_ != MORPH
knp_list = span.sent._.get(getattr(KNP_USER_KEYS, type_).list_)
if not knp_list:
return []
res = []
i = span.start_char
doc = span.doc
for b in knp_list:
j = i + len(b.midasi)
bspan = doc.char_span(i, j)
bspan._.set(getattr(KNP_USER_KEYS, type_).element, b)
res.append(bspan)
i = j
return res | 5,327,463 |
def test_close_single(single_time_browser):
"""Check that single browser window is closed"""
single_time_browser.close_window()
with pytest.raises(WebDriverException):
single_time_browser.element("html").get_actual() | 5,327,464 |
def set_g_clim(vmin, vmax):
"""Set min/max value of the gnomview part of a mollzoom.
"""
import pylab
f = pylab.gcf()
if not hasattr(f, "zoomtool"):
raise TypeError("The current figure has no zoomtool")
f.zoomtool.save_min = vmin
f.zoomtool.save_max = vmax
f.zoomtool._range_status = 2
f.zoomtool.draw_gnom() | 5,327,465 |
def get_namespace_leaf(namespace):
"""
From a provided namespace, return it's leaf.
>>> get_namespace_leaf('foo.bar')
'bar'
>>> get_namespace_leaf('foo')
'foo'
:param namespace:
:return:
"""
return namespace.rsplit(".", 1)[-1] | 5,327,466 |
def test_postprocessing():
""" Test the preprocessing method
Returns:
Should output Fixed 3 problems
"""
folder = osp.join(absolute_dir_path, "material", "post_processing")
checked_folder = osp.join(absolute_dir_path, "generated", "post_processing")
out_tsv = osp.join(absolute_dir_path, "generated", "post.tsv")
post_process_txt_labels(folder, output_folder=checked_folder, output_tsv=out_tsv)
df = pd.read_csv(out_tsv, sep="\t")
print(df.to_dict())
valid_df = pd.DataFrame(
{
"filename": {
0: "5.wav",
1: "5.wav",
2: "7.wav",
3: "7.wav",
4: "7.wav",
5: "7.wav",
6: "7.wav",
7: "7.wav",
},
"onset": {
0: 0.008,
1: 4.969,
2: 2.183,
3: 2.406,
4: 3.099,
5: 3.406,
6: 3.684,
7: 6.406,
},
"offset": {
0: 5.546,
1: 9.609,
2: 2.488,
3: 5.2,
4: 3.36,
5: 6.2,
6: 5.624,
7: 10.0,
},
"event_label": {
0: "Cat",
1: "Speech",
2: "Dishes",
3: "Speech",
4: "Dishes",
5: "Cat",
6: "Dishes",
7: "Frying",
},
}
)
check = df.round(3).sort_values("onset").reset_index(
drop=True
) == valid_df.sort_values("onset").reset_index(drop=True)
assert check.all(axis=None), "Problem with post_processing_txt_annotations" | 5,327,467 |
def fft_real_dB(sig: np.ndarray,
sample_interval_s: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
FFT, real frequencies only, magnitude in dB
:param sig: array with input signal
:param sample_interval_s: sample interval in seconds
:return: four numpy ndarrays with fft_frequency_pos, fft_sig_pos, fft_spectral_power_pos_dB,
fft_spectral_phase_radians
"""
fft_points = len(sig)
fft_sig_pos = np.fft.rfft(sig)
# returns correct RMS power level sqrt(2) -> 1
fft_sig_pos /= fft_points
fft_frequency_pos = np.fft.rfftfreq(fft_points, d=sample_interval_s)
fft_spectral_power_pos_dB = 10.*np.log10(2.*(np.abs(fft_sig_pos))**2. + EPSILON)
fft_spectral_phase_radians = np.angle(fft_sig_pos)
return fft_frequency_pos, fft_sig_pos, fft_spectral_power_pos_dB, fft_spectral_phase_radians | 5,327,468 |
def test_fd_contextmanager(mocker, obj):
"""Ensure close() is called when used as a context manager"""
obj._fd = -1 # invalid but unlikley to cause issues
# if real close is called
f = mocker.patch('butter.utils._close')
with obj:
pass
f.assert_called() | 5,327,469 |
def get_ipv4_gateway_mac_address_over_ssh(connected_ssh_client: SSHClient,
target_os: str = 'MacOS',
gateway_ipv4_address: str = '192.168.0.254') -> Union[None, str]:
"""
Get MAC address of IPv4 gateway in target host over SSH
:param connected_ssh_client: Already connected SSH client
:param target_os: MacOS, Linux or Windows (Installation of OpenSSH For Windows: https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse)
:param gateway_ipv4_address: IPv4 address of gateway
:return: None if error or MAC address string
"""
gateway_mac_address: Union[None, str] = None
try:
if target_os == 'Windows':
arp_table_command: str = 'arp -a ' + gateway_ipv4_address + ' | findstr ' + gateway_ipv4_address
else:
arp_table_command: str = 'arp -an ' + gateway_ipv4_address
stdin, stdout, stderr = connected_ssh_client.exec_command(arp_table_command)
arp_table: bytes = stdout.read()
arp_table: str = arp_table.decode('utf-8')
assert 'No route to host' not in arp_table, \
'No route to host' + base.error_text(args.target_ip)
assert arp_table != '', \
'Not found host: ' + base.error_text(gateway_ipv4_address) + \
' in ARP table in host: ' + base.error_text(args.target_ip)
if target_os == 'Windows':
assert base.windows_mac_address_regex.search(arp_table), \
'Not found host: ' + base.error_text(gateway_ipv4_address) + \
' in ARP table in host: ' + base.error_text(args.target_ip)
mac_address = base.windows_mac_address_regex.search(arp_table)
return mac_address.group(1).replace('-', ':').lower()
else:
target_arp_table: List[str] = arp_table.split(' ')
if target_os == 'Linux':
assert base.mac_address_validation(target_arp_table[3]), \
'Invalid MAC address: ' + base.error_text(target_arp_table[3])
return target_arp_table[3]
except AssertionError as Error:
base.print_error(Error.args[0])
return gateway_mac_address
except IndexError:
return gateway_mac_address | 5,327,470 |
def LengthComparator(current, compare):
"""Compare if current len is equals the compare value."""
if len(current) != compare:
msg = '{0} ({1}) is not of the expected length {2} ({3})'
raise JsonPatchTestFailed(msg.format(current, type(current), compare, type(compare))) | 5,327,471 |
def print(*args, sep=' ', end='', file=None, flush=False):
"""Zobrazí dané argumenty."""
root = Tk()
root.title('print')
str_args = ''
for arg in args:
str_args = str_args + sep + str(arg)
label = Label(root, text=str_args[len(sep):] + end)
label.pack(anchor=W)
button = Button(root, text="OK", command=root.quit)
button.pack(side=BOTTOM)
root.bind('<Return>', (lambda e: root.quit()))
root.mainloop()
root.destroy() | 5,327,472 |
def test_list_hex_binary_enumeration_nistxml_sv_iv_list_hex_binary_enumeration_1_4(mode, save_output, output_format):
"""
Type list/hexBinary is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/hexBinary/Schema+Instance/NISTSchema-SV-IV-list-hexBinary-enumeration-1.xsd",
instance="nistData/list/hexBinary/Schema+Instance/NISTXML-SV-IV-list-hexBinary-enumeration-1-4.xml",
class_name="NistschemaSvIvListHexBinaryEnumeration1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,327,473 |
def sort(list_):
"""
This function is a selection sort algorithm. It will put a list in numerical order.
:param list_: a list
:return: a list ordered by numerial order.
"""
for minimum in range(0, len(list_)):
for c in range(minimum + 1, len(list_)):
if list_[c] < list_[minimum]:
temporary = list_[minimum]
list_[minimum] = list_[c]
list_[c] = temporary
return list_ | 5,327,474 |
def stock_em_xgsglb(market: str = "沪市A股") -> pd.DataFrame:
"""
新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
:param market: choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板"}
:type market: str
:return: 新股申购与中签数据
:rtype: pandas.DataFrame
"""
market_map = {
"全部股票": """(APPLY_DATE>'2010-01-01')""",
"沪市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE in ("069001001001","069001001003","069001001006"))""",
"科创板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE="069001001006")""",
"深市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE in ("069001002001","069001002002","069001002003","069001002005"))""",
"创业板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE="069001002002")""",
}
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'APPLY_DATE,SECURITY_CODE',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPTA_APP_IPOAPPLY',
'columns': 'SECURITY_CODE,SECURITY_NAME,TRADE_MARKET_CODE,APPLY_CODE,TRADE_MARKET,MARKET_TYPE,ORG_TYPE,ISSUE_NUM,ONLINE_ISSUE_NUM,OFFLINE_PLACING_NUM,TOP_APPLY_MARKETCAP,PREDICT_ONFUND_UPPER,ONLINE_APPLY_UPPER,PREDICT_ONAPPLY_UPPER,ISSUE_PRICE,LATELY_PRICE,CLOSE_PRICE,APPLY_DATE,BALLOT_NUM_DATE,BALLOT_PAY_DATE,LISTING_DATE,AFTER_ISSUE_PE,ONLINE_ISSUE_LWR,INITIAL_MULTIPLE,INDUSTRY_PE_NEW,OFFLINE_EP_OBJECT,CONTINUOUS_1WORD_NUM,TOTAL_CHANGE,PROFIT,LIMIT_UP_PRICE,INFO_CODE,OPEN_PRICE,LD_OPEN_PREMIUM,LD_CLOSE_CHANGE,TURNOVERRATE,LD_HIGH_CHANG,LD_AVERAGE_PRICE,OPEN_DATE,OPEN_AVERAGE_PRICE,PREDICT_PE,PREDICT_ISSUE_PRICE2,PREDICT_ISSUE_PRICE,PREDICT_ISSUE_PRICE1,PREDICT_ISSUE_PE,PREDICT_PE_THREE,ONLINE_APPLY_PRICE,MAIN_BUSINESS',
'filter': market_map[market],
'source': 'WEB',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"股票代码",
"股票简称",
"_",
"申购代码",
"_",
"_",
"_",
"发行总数",
"网上发行",
"_",
"顶格申购需配市值",
"_",
"申购上限",
"_",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"中签率",
"询价累计报价倍数",
"_",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"行业市盈率",
"_",
"_",
"_",
]
big_df = big_df[
[
"股票代码",
"股票简称",
"申购代码",
"发行总数",
"网上发行",
"顶格申购需配市值",
"申购上限",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"行业市盈率",
"中签率",
"询价累计报价倍数",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
]
]
big_df['申购日期'] = pd.to_datetime(big_df['申购日期']).dt.date
big_df['中签号公布日'] = pd.to_datetime(big_df['中签号公布日']).dt.date
big_df['中签缴款日期'] = pd.to_datetime(big_df['中签缴款日期']).dt.date
big_df['发行总数'] = pd.to_numeric(big_df['发行总数'])
big_df['网上发行'] = pd.to_numeric(big_df['网上发行'])
big_df['顶格申购需配市值'] = pd.to_numeric(big_df['顶格申购需配市值'])
big_df['申购上限'] = pd.to_numeric(big_df['申购上限'])
big_df['发行价格'] = pd.to_numeric(big_df['发行价格'])
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['首日收盘价'] = pd.to_numeric(big_df['首日收盘价'])
big_df['发行市盈率'] = pd.to_numeric(big_df['发行市盈率'])
big_df['行业市盈率'] = pd.to_numeric(big_df['行业市盈率'])
big_df['中签率'] = pd.to_numeric(big_df['中签率'])
big_df['询价累计报价倍数'] = pd.to_numeric(big_df['询价累计报价倍数'])
big_df['配售对象报价家数'] = pd.to_numeric(big_df['配售对象报价家数'])
big_df['涨幅'] = pd.to_numeric(big_df['涨幅'])
big_df['每中一签获利'] = pd.to_numeric(big_df['每中一签获利'])
return big_df | 5,327,475 |
def geomfill_Mults(*args):
"""
:param TypeConv:
:type TypeConv: Convert_ParameterisationType
:param TMults:
:type TMults: TColStd_Array1OfInteger &
:rtype: void
"""
return _GeomFill.geomfill_Mults(*args) | 5,327,476 |
def test_app_index(pre_pop_transaction, app):
"""Test that index view returns OK response through app."""
response = app.get('/')
assert response.status_code == 200 | 5,327,477 |
def test_helpful_error_message_received_on_connection_reset_error():
"""Tests that if connection to the server fails with
a ConnectionResetError then a helpful error message is logged.
"""
ws_client, _, _ = default_ws_client_setup("wss://this-url-wont-be-used:1")
async def mock_connect(*args, **kwargs):
raise ConnectionResetError("foo")
mock_logger_error_method = MagicMock()
with patch("websockets.connect", mock_connect):
with patch.object(client.LOGGER, "error", mock_logger_error_method):
with pytest.raises(SystemExit):
ws_client.run_synchronously(
MagicMock(), MagicMock(), MagicMock())
mock_logger_error_method.assert_called_once()
assert (
"Caught ConnectionResetError when attempting to"
" connect to server"
in mock_logger_error_method.call_args[0][0]
) | 5,327,478 |
def test_add_meal_without_data(client):
"""Test if admin can add meal without data"""
rv = client.post('/api/v1/meals/')
assert rv.status_code == 400 | 5,327,479 |
def main():
"""Program entry point."""
args = parse_args()
os.chdir(THIS_DIR)
verbose_map = (logging.WARNING, logging.INFO, logging.DEBUG)
verbosity = args.verbose
if verbosity > 2:
verbosity = 2
logging.basicConfig(level=verbose_map[verbosity])
symbol_db = build_symbol_db(args.library_name)
with open(args.library_name + '.so.json', 'w') as db_file:
json.dump(symbol_db, db_file, indent=4, separators=(',', ': '),
sort_keys=True) | 5,327,480 |
def arcminutes(degrees=0, radians=0, arcseconds=0): # pylint: disable=W0621
"""
TODO docs.
"""
if radians:
degrees += math.degrees(radians)
if arcseconds:
degrees += arcseconds / arcsec(degrees=1.)
return degrees * 60. | 5,327,481 |
def test_throws_error_if_unspecified_and_env_vars_fail(test_env):
"""Throw an error if there is no specified dir and the directories in the environment
variables are not valid
"""
filestore = fakes.FakeFilestore(dict())
with pytest.raises(RuntimeError):
config_dir.ConfigDir(filestore, test_env) | 5,327,482 |
def PCO_GetRecordingStruct(handle):
"""
Get the complete set of the recording function
settings. Please fill in all wSize parameters,
even in embedded structures.
"""
strRecording = PCO_Recording()
f = pixelfly_dll.PCO_GetRecordingStruct
f.argtypes = (ctypes.wintypes.HANDLE, ctypes.POINTER(PCO_Recording))
f.restype = ctypes.c_int
ret_code = f(handle, ctypes.byref(strRecording))
PCO_manage_error(ret_code)
return strRecording | 5,327,483 |
def __alpha_configuration():
"""The default configuration for alphaconf"""
logging_default = {
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)s %(name)s: %(message)s',
'datefmt': '%H:%M:%S',
},
'default': {
'format': '%(asctime)s %(levelname)s'
' %(name)s [%(process)s,%(threadName)s]: %(message)s',
},
'color': {
'class': 'alphaconf.logging_util.ColorFormatter',
'format': '${..default.format}',
},
'json': {
'class': 'alphaconf.logging_util.JSONFormatter',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'color',
'stream': 'ext://sys.stdout',
},
},
'root': {
'handlers': ['console'],
'level': 'INFO',
},
# change the default to keep module-level logging by default
'disable_existing_loggers': False,
}
logging_none = {
'version': 1,
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)s'
' %(name)s [%(process)s,%(threadName)s]: %(message)s',
},
},
'handlers': {},
'root': {
'handlers': [],
'level': 'INFO',
},
}
conf = {
'logging': '${oc.select:base.logging.default}',
'base': {'logging': {'default': logging_default, 'none': logging_none}},
}
setup_configuration(conf) | 5,327,484 |
def auxiliar2(Letra, tabuleiro):
"""
Função auxiliar para jogada do computador, esta função compõe a estratégia e é responsável por realizar uma das jogadas do computador. Recebe como parâmetro o Simbolo do computador
e retorna a jogada que será realizada.
"""
if Letra == "X":
Letra2 = "O"
else:
Letra2 = "X"
if tabuleiro[1] == Letra2 and tabuleiro[5] == Letra:
if tabuleiro[3] == Letra:
return 7
elif tabuleiro[2] == Letra:
return 8
elif tabuleiro[4] == Letra:
return 6
elif tabuleiro[6] == Letra:
return 4
elif tabuleiro[7] == Letra:
return 3
elif tabuleiro[8] == Letra:
return 2
elif tabuleiro[9] == Letra:
jogada = random.choice([3, 7])
return jogada
elif tabuleiro[3] == Letra and tabuleiro[5] == Letra2:
if tabuleiro[1] == Letra2:
return 9
elif tabuleiro[2] == Letra2:
return 8
elif tabuleiro[4] == Letra2:
return 6
elif tabuleiro[6] == Letra2:
return 4
elif tabuleiro[8] == Letra2:
return 2
elif tabuleiro[9] == Letra2:
return 1
elif tabuleiro[7] == Letra2:
jogada = random.choice([9, 1])
return jogada
elif tabuleiro[7] == Letra and tabuleiro[5] == Letra2:
if tabuleiro[1] == Letra2:
return 9
elif tabuleiro[2] == Letra2:
return 8
elif tabuleiro[4] == Letra2:
return 6
elif tabuleiro[6] == Letra2:
return 4
elif tabuleiro[8] == Letra2:
return 2
elif tabuleiro[9] == Letra2:
return 1
elif tabuleiro[3] == Letra2:
jogada = random.choice([9, 1])
return jogada
elif tabuleiro[9] == Letra and tabuleiro[5] == Letra2:
if tabuleiro[3] == Letra2:
return 7
elif tabuleiro[2] == Letra2:
return 8
elif tabuleiro[4] == Letra2:
return 6
elif tabuleiro[6] == Letra2:
return 4
elif tabuleiro[7] == Letra2:
return 3
elif tabuleiro[8] == Letra2:
return 2
elif tabuleiro[1] == Letra2:
jogada = random.choice([3, 7])
return jogada | 5,327,485 |
def create_task():
"""
处理创建根据相关抓取参数及抓取节点服务的名称,启动数据抓取.
:return:
"""
payload = request.get_json()
# 查找抓取节点信息
node = db.nodes.find_one({"name": payload["node"]})
# 未找到抓取节点返回404
if node is None:
return abort(404)
# 保存任务信息至数据库中
payload["task"] = "%s@%s" % (payload["node"], strftime("%Y%m%d%H%M%S", localtime()))
payload["status"] = 0
payload["done"] = 0
db.tasks.insert(payload)
# 请求节点的抓取任务接口,启动抓取任务
try:
resp = post("http://%s:%d/tasks" % (node["addr"], node["port"]), json={
"task": str(payload["_id"]),
"type": payload["type"],
"keyword": payload["keyword"],
"start": payload["start"],
"end": payload["end"]
})
# 如果抓取节点请求成功,直接返回请求节点的处理结果,反之返回请求节点http状态
if resp.status_code == codes.ok:
payload["status"] = 1
db.tasks.save(payload)
return jsonify(resp.json())
else:
abort(resp.status_code)
except ConnectionError:
# 网络原因无法通知节点服务启动任务时,删除本次建立的任务信息.
db.tasks.delete_one({"_id": payload["_id"]})
return jsonify({"success": False, "code": -1}) | 5,327,486 |
def eco_hist_calcs(mass,bins,dlogM):
"""
Returns dictionaries with the counts for the upper
and lower density portions; calculates the
three different percentile cuts for each mass
array given
Parameters
----------
mass: array-like
A 1D array with log stellar mass values, assumed
to be an order which corresponds to the ascending
densities; (necessary, as the index cuts are based
on this)
bins: array-like
A 1D array with the values which will be used as the bin edges
dlogM: float-like
The log difference between bin edges
Returns
-------
hist_dict_low: dictionary-like
A dictionary with three keys (the frac vals), with arrays
as values. The values for the lower density cut
hist_dict_high: dictionary like
A dictionary with three keys (the frac vals), with arrays
as values. The values for the higher density cut
"""
hist_dict_low = {}
hist_dict_high = {}
bin_cens_low = {}
bin_cens_high = {}
frac_val = np.array([2,4,10])
frac_dict = {2:0,4:1,10:2}
edges = bins
bin_centers = 0.5 * (edges[:-1]+edges[1:])
low_err = [[] for xx in xrange(len(frac_val))]
high_err = [[] for xx in xrange(len(frac_val))]
for ii in frac_val:
# hist_dict_low[ii] = {}
# hist_dict_high[ii] = {}
frac_data = int(len(mass)/ii)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
low_counts = (counts/float(len(frac_mass))/dlogM)
non_zero = (low_counts!=0)
low_counts_1 = low_counts[non_zero]
hist_dict_low[ii] = low_counts_1
bin_cens_low[ii] = bin_centers[non_zero]
##So... I don't actually know if I need to be calculating error
##on the mocks. I thought I didn't, but then, I swear someone
##*ahem (Victor)* said to. So I am. Guess I'm not sure they're
##useful. But I'll have them if necessary. And ECO at least
##needs them.
low_err = np.sqrt(counts)/len(frac_mass)/dlogM
low_err_1 = low_err[non_zero]
err_key = 'err_{0}'.format(ii)
hist_dict_low[err_key] = low_err_1
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
high_counts = (counts_2/float(len(frac_mass_2))/dlogM)
non_zero = (high_counts!=0)
high_counts_1 = high_counts[non_zero]
hist_dict_high[ii] = high_counts_1
bin_cens_high[ii] = bin_centers[non_zero]
high_err = np.sqrt(counts_2)/len(frac_mass_2)/dlogM
high_err_1 = high_err[non_zero]
hist_dict_high[err_key] = high_err_1
return hist_dict_low, hist_dict_high, bin_cens_low, bin_cens_high | 5,327,487 |
def take(lines, n, header):
"""
Read and yield lines from C{lines} in groups of C{n}, ignoring any header
lines.
@param lines: An iterable of C{str} lines.
@param n: An C{int} number of lines per group.
@param header: A C{str} header line to ignore (may be C{None}).
@return: A generator that produces C{list}s containing C{n} lines, with
the final list containing fewer depending on len(lines) % n.
"""
while True:
count = 0
result = []
while count < n:
try:
line = next(lines)
except StopIteration:
if result:
yield result
return
else:
if line != header:
result.append(line)
count += 1
if count == n:
yield result
break | 5,327,488 |
def avgSentenceLength(text):
"""Return the average length of a sentence."""
tokens = langtools.tokenize(text)
return len(tokens) / sentenceCount(text) | 5,327,489 |
def retrieve_context_topology_link_available_capacity_total_size_total_size(uuid, link_uuid): # noqa: E501
"""Retrieve total-size
Retrieve operation of resource: total-size # noqa: E501
:param uuid: ID of uuid
:type uuid: str
:param link_uuid: ID of link_uuid
:type link_uuid: str
:rtype: CapacityValue
"""
return 'do some magic!' | 5,327,490 |
def harmonize_geonames_id(uri):
"""checks if a geonames Url points to geonames' rdf expression"""
if 'geonames' in uri:
geo_id = "".join(re.findall(r'\d', uri))
return "http://sws.geonames.org/{}/".format(geo_id)
else:
return uri | 5,327,491 |
def shut_down(sock, how=socket.SHUT_RDWR, ignored=NOT_CONNECTED):
"""Shut down the given socket."""
with ignored_errno(*ignored or ()):
sock.shutdown(how) | 5,327,492 |
def test_downloads(tmpdir):
"""Test dataset URL handling."""
# Try actually downloading a dataset
path = datasets._fake.data_path(path=str(tmpdir), update_path=False)
assert op.isfile(op.join(path, 'bar'))
assert datasets._fake.get_version() is None | 5,327,493 |
def general_split(s, first_index=0):
"""
Yield each word in a string that starts with a capital letter or that
has any whitespace immediately before it.
"""
s = s.strip() + "A"
for i, letter in enumerate(s):
if i > first_index:
# Split at capital letter
if letter.isupper():
yield s[first_index:i]
first_index = i
# Split at underscore
elif letter == "_":
yield s[first_index:i]
first_index = i + 1
# Split at whitespace
elif letter in str_whitespace \
and (i+1 < len(s) and not s[i + 1] in str_whitespace):
yield s[first_index:i]
first_index = i + 1 | 5,327,494 |
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Gashlycrumb',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('letter',
metavar='letter',
help='Letter(s)',
nargs='+')
parser.add_argument('-f',
'--file',
help='Input file',
metavar='FILE',
type=argparse.FileType('rt'),
default='gashlycrumb.txt')
return parser.parse_args() | 5,327,495 |
def get_apikey(api):
"""Return the API key."""
if api == "greynoise":
return config.greynoise_key
if api == "hybrid-analysis":
return config.hybrid_analysis_apikey
if api == "malshare":
return config.malshare_apikey
if api == "pulsedive":
return config.pulsedive_apikey
if api == "twitter":
return {
"access_token": config.twitter_access_token,
"access_token_secret": config.twitter_access_token_secret,
"consumer_key": config.twitter_consumer_key,
"consumer_secret": config.twitter_consumer_secret
} | 5,327,496 |
def create_admin_account():
"""
Creates a new admin account
"""
try:
original_api_key = generate_key()
secret_key = generate_key()
hashed_api_key = generate_password_hash(original_api_key)
Interactions.insert(DEFAULT_ACCOUNTS_TABLE,
**{'username': 'admin',
'endpoint': '',
'is_admin': True,
'api_key': hashed_api_key,
'secret_key': secret_key})
return {'api_key': original_api_key, 'secret_key': secret_key}
except (RqlRuntimeError, RqlDriverError) as err:
raise err | 5,327,497 |
def _arg_wrap(func):
""" Decorator to decorate decorators to support optional arguments. """
@wraps(func)
def new_decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return func(args[0])
else:
return lambda realf: func(realf, *args, **kwargs)
return new_decorator | 5,327,498 |
def _point_as_tuple(input_string: str) -> _Tuple[float]:
"""
Attempts to parse a string as a tuple of floats.
Checks that the number of elements corresponds to the specified dimensions.
The purpose of this function more than anything else is to validate correct
syntax of a CLI argument that is supposed to be a point in space.
"""
out = tuple(float(coordinate) for coordinate in input_string.split(','))
if len(out) == DIMENSIONS:
return out
raise TypeError | 5,327,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.