content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def select_questions(db: Database) -> List[Row]:
"""
Selects a list of 20 questions from the database using a spaced-repetition
algorithm.
The questions are drawn from 3 quizzes in a set ratio: 10 from the first quiz, 7
from the second, and 3 from the third.
The quizzes and questions are selected based on the strength of memorization and
the time since they were last asked.
"""
quizzes = select_quizzes(db)
questions = []
questions.extend(select_questions_from_quizzes(db, quizzes[0], 10))
questions.extend(select_questions_from_quizzes(db, quizzes[1], 7))
questions.extend(select_questions_from_quizzes(db, quizzes[2], 3))
return questions | 28,500 |
def process_ona_webhook(instance_data: dict):
"""
Custom Method that takes instance data and creates or Updates
an Instance Then Returns True if Instance was created or updated
"""
instance_obj = process_instance(instance_data)
if instance_obj is None:
return False
return True | 28,501 |
def test_quantized_conv2d_nonfunctional():
"""Basic test of the PyTorch quantized conv2d Node with external quantized
input on Glow."""
def test_f(a):
q = torch.nn.quantized.Quantize(1/16, 0, torch.quint8)
dq = torch.nn.quantized.DeQuantize()
conv = torch.nn.quantized.Conv2d(1, 1, [2, 2])
return dq(conv(q(a)))
x = torch.tensor([[[[5., 6.], [7., 8.]]]])
jitVsGlow(test_f, x, expected_fused_ops={"aten::quantize_per_tensor",
"glow::unpacked_quantized_conv2d",
"aten::dequantize"}) | 28,502 |
def distance(v, w):
"""the distance between two vectors"""
return math.sqrt(squared_distance(v, w)) | 28,503 |
def configuration(monkeypatch):
"""In-memory configuration.
Args:
monkeypatch: Fixture helper.
"""
config = configparser.ConfigParser()
config.read_dict(dict(
api=dict(
project='config_project',
host='config_host',
key='config_key',
)
))
monkeypatch.setattr(orchestrate.config, 'configuration', config) | 28,504 |
def client_login_fixture(client):
"""Define a fixture for patching the aioridwell coroutine to get a client."""
with patch(
"homeassistant.components.ridwell.config_flow.async_get_client"
) as mock_client:
mock_client.side_effect = client
yield mock_client | 28,505 |
def createLayerOnSimFrameDepend(job, layer, onjob, onlayer, onframe):
"""Creates a layer on sim frame dependency
@type job: string
@param job: the name of the dependant job
@type layer: string
@param layer: the name of the dependant layer
@type onjob: string
@param onjob: the name of the job to depend on
@type onlayer: string
@param onlayer: the name of the layer to depend on
@type onframe: int
@param onframe: the number of the frame to depend on
@rtype: Depend
@return: the created dependency"""
__is_valid(job, ERR_INVALID_ER_JOB)
__is_valid(layer, ERR_INVALID_ER_LAYER)
__is_valid(onjob, ERR_INVALID_ON_JOB)
__is_valid(onlayer, ERR_INVALID_ON_LAYER)
__is_valid(onframe, ERR_INVALID_ON_FRAME)
logger.debug(
"creating los depend from %s/%s to %s/%s-%04d", job, layer, onjob, onlayer, onframe)
depend_er_layer = opencue.api.findLayer(job,layer)
depend_on_frame = opencue.api.findFrame(onjob, onlayer, onframe)
depends = []
for depend_er_frame in depend_er_layer.getFrames():
depends.append(depend_er_frame.createDependencyOnFrame(depend_on_frame))
return depends | 28,506 |
def QA_SU_save_stock_min_5(file_dir, client=DATABASE):
"""save stock_min5
Arguments:
file_dir {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
"""
return tdx_file.QA_save_tdx_to_mongo(file_dir, client) | 28,507 |
def get_course_by_name(name):
""" Return a course dict for the given name, or None
{ 'id':id, 'name':name, 'title':title }
"""
ret = run_sql(
"""SELECT course, title, description, owner, active, type,
practice_visibility, assess_visibility
FROM courses
WHERE lower(title) LIKE lower(%s);""", [name, ])
course = None
if ret:
row = ret[0]
course = {
'id': int(row[0]),
'name': row[1],
'title': row[2],
'owner': row[3],
'active': row[4],
'type': row[5],
'practice_visibility': row[6],
'assess_visibility': row[7]
}
if not course['practice_visibility']:
course['practice_visibility'] = "all"
if not course['assess_visibility']:
course['assess_visibility'] = "all"
return course | 28,508 |
def make_nailgun_transport(nailgun_server, nailgun_port=None, cwd=None):
"""
Creates and returns a socket connection to the nailgun server.
"""
transport = None
if nailgun_server.startswith("local:"):
if platform.system() == "Windows":
pipe_addr = nailgun_server[6:]
transport = WindowsNamedPipeTransport(pipe_addr)
else:
try:
s = socket.socket(socket.AF_UNIX)
except socket.error as msg:
re_raise(
NailgunException(
"Could not create local socket connection to server: {0}".format(
msg
),
NailgunException.SOCKET_FAILED,
)
)
socket_addr = nailgun_server[6:]
prev_cwd = os.getcwd()
try:
if cwd is not None:
os.chdir(cwd)
s.connect(socket_addr)
transport = UnixTransport(s)
except socket.error as msg:
re_raise(
NailgunException(
"Could not connect to local server at {0}: {1}".format(
socket_addr, msg
),
NailgunException.CONNECT_FAILED,
)
)
finally:
if cwd is not None:
os.chdir(prev_cwd)
else:
socket_addr = nailgun_server
socket_family = socket.AF_UNSPEC
for (af, socktype, proto, _, sa) in socket.getaddrinfo(
nailgun_server, nailgun_port, socket.AF_UNSPEC, socket.SOCK_STREAM
):
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.connect(sa)
transport = UnixTransport(s)
except socket.error as msg:
s.close()
s = None
continue
break
if transport is None:
raise NailgunException(
"Could not connect to server {0}:{1}".format(nailgun_server, nailgun_port),
NailgunException.CONNECT_FAILED,
)
return transport | 28,509 |
def delete_chap_credentials(TargetARN=None, InitiatorName=None):
"""
Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target and initiator pair.
See also: AWS API Documentation
Examples
Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target and initiator pair.
Expected Output:
:example: response = client.delete_chap_credentials(
TargetARN='string',
InitiatorName='string'
)
:type TargetARN: string
:param TargetARN: [REQUIRED]
The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes operation to return to retrieve the TargetARN for specified VolumeARN.
:type InitiatorName: string
:param InitiatorName: [REQUIRED]
The iSCSI initiator that connects to the target.
:rtype: dict
:return: {
'TargetARN': 'string',
'InitiatorName': 'string'
}
"""
pass | 28,510 |
def timedelta(string):
"""
Parse :param string: into :class:`datetime.timedelta`, you can use any
(logical) combination of Nw, Nd, Nh and Nm, e.g. `1h30m` for 1 hour, 30
minutes or `3w` for 3 weeks.
Raises a ValueError if the input is invalid/unparseable.
>>> print(timedelta("3w"))
21 days, 0:00:00
>>> print(timedelta("3w 12h 57m"))
21 days, 12:57:00
>>> print(timedelta("1h30m37s"))
1:30:37
>>> print(timedelta("1asdf3w"))
Traceback (most recent call last):
...
ValueError: invalid human-readable timedelta
"""
keys = ["weeks", "days", "hours", "minutes", "seconds"]
regex = "".join(["((?P<%s>\\d+)%s ?)?" % (k, k[0]) for k in keys])
kwargs = {}
for k, v in re.match(regex, string).groupdict(default="0").items():
kwargs[k] = int(v)
rv = datetime.timedelta(**kwargs)
if rv == datetime.timedelta():
raise ValueError("invalid human-readable timedelta")
return datetime.timedelta(**kwargs) | 28,511 |
def englishTextNull(englishInputNull):
"""
This function returns true if input is empty
"""
if englishInputNull == '':
return True | 28,512 |
def test_restrict_inputs():
"""Test a basic use of the restrict(inputs=(...)) method."""
code = 'x = a + b\ny = b - c\nz = c**2'
b = Block(code)
assert_equal(b.inputs, set(['a', 'b', 'c']))
assert_equal(b.outputs, set(['x', 'y', 'z']))
br = b.restrict(inputs=('a', ))
names = dict(a=100, b=200)
br.execute(names)
assert_equal(sorted(names), ['a', 'b', 'x'])
assert_equal(names['a'], 100)
assert_equal(names['b'], 200)
assert_equal(names['x'], 300) | 28,513 |
def http_set(directive, values, config=None):
"""Set a directive in http context.
If directive exists, the value will be replace in place.
If directive not exists, new directive will be created at the beginning of http context.
Parameter values can be a list or a string.
If values is set to empty list or None or empty string, then the directive will be deleted.
"""
if not config or config['_isdirty']:
config = loadconfig(NGINXCONF, True)
hcontext = _context_gethttp(config)
if not values:
values = []
elif isinstance(values, str):
values = [values]
values = ['%s %s;' % (directive, v) for v in values]
if directive in hcontext:
# update or delete value
dvalues = hcontext[directive]
lines = [(config['_files'][dvalue['file']], dvalue['line'][0], dvalue['line'][1]) for dvalue in dvalues]
_replace(lines, values)
else:
# add directive to the beginning of http context
# some directive like proxy_cache_path should be declare before use the resource,
# so we should insert it at the beginning
begin = hcontext['_range']['begin']
_insert(config['_files'][begin['file']], begin['line'][0]+begin['line'][1], values)
config['_isdirty'] = True | 28,514 |
def authority_b(request, configuration, authority_a):
"""
Intermediate authority_a
valid_authority -> authority_a -> authority_b
"""
authority = configuration.manager.get_or_create_ca("authority_b",
hosts=["*.com"],
certificate_authority=authority_a)
request.addfinalizer(authority.delete_files)
return authority | 28,515 |
def filtered_data_time_stats(df):
"""Displays statistics on the most frequent times of travel for Filtered Data."""
print('-'*40)
print('\nCalculating The Most Frequent Times of Travel for Filtered Data...\n')
print('-'*40)
start_time = time.time()
# display month name
print("Month is: ",df['Month'].iloc[0])
print('-'*40)
# display the most common day of week
print("Day of week is: ",df['Day'].iloc[0])
print('-'*40)
# display the most common start hour
df['Hour'] = pd.DatetimeIndex(df['Start Time']).hour
common_hour = pd.DataFrame(df['Hour'].value_counts())
common_hour = common_hour.reset_index(drop = False)
cols = common_hour.columns.values
cols[0] = "Hour"
cols[1] = "Day_Counts"
common_hour.column = cols
common_hour = common_hour.head(1)
print("Most common Start Hour: ",common_hour.Hour.iloc[0])
print('-'*40)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40) | 28,516 |
def derive(pattern):
"""
Calculate the first derivative pattern pattern.
Smoothes the input first, so noisy patterns shouldn't
be much of a problem.
"""
return np.gradient(smooth_pattern(pattern)) | 28,517 |
def gaussian(nm, a, x0, sigma):
"""
gaussian function
"""
gaussian_array = a * np.exp(- ((nm - x0) ** 2.0) / (2 * (sigma ** 2.0)))
return gaussian_array | 28,518 |
def test_dual_conditions_ge_and_le_dates(df, right):
"""Test output for interval conditions."""
assume(not df.empty)
assume(not right.empty)
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">="),
(middle, right_on, "<="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual) | 28,519 |
def list_to_filename(filelist):
"""Returns a list if filelist is a list of length greater than 1,
otherwise returns the first element
"""
if len(filelist) > 1:
return filelist
else:
return filelist[0] | 28,520 |
def _get_expiration_seconds(expiration):
"""Convert 'expiration' to a number of seconds in the future.
:type expiration: int, long, datetime.datetime, datetime.timedelta
:param expiration: When the signed URL should expire.
:rtype: int
:returns: a timestamp as an absolute number of seconds.
"""
# If it's a timedelta, add it to `now` in UTC.
if isinstance(expiration, datetime.timedelta):
now = _NOW().replace(tzinfo=UTC)
expiration = now + expiration
# If it's a datetime, convert to a timestamp.
if isinstance(expiration, datetime.datetime):
micros = _microseconds_from_datetime(expiration)
expiration = micros // 10**6
if not isinstance(expiration, six.integer_types):
raise TypeError('Expected an integer timestamp, datetime, or '
'timedelta. Got %s' % type(expiration))
return expiration | 28,521 |
def push_data_to_elastic_search(data):
"""
Post `data` to our ElasticSearch instance at `index_name`.
This script was adapted from gitlab-ci/.
The index name is hardcoded to a test index for the time being.
The ingest goes through the "add_timestamp" pipeline which was created like
so:
> http PUT elasticsearch.dfinity.systems:9200/_ingest/pipeline/add_timestamp description="the pipeline" processors:='[{"set": {"field": "ingest_timestamp", "value": "{{_ingest.timestamp}}" } }]'
in the future, the pipeline should be terraformed.
Args:
----
data: The JSON data to export.
"""
index_name = "outsource-test"
pipeline_name = "add_timestamp"
req = urllib.request.Request(
"http://elasticsearch.dfinity.systems:9200/%s/_doc?pipeline=%s" % (index_name, pipeline_name),
# Always sort keys so output is comparable for tests.
data=json.dumps(data, sort_keys=True).encode(),
headers={"content-type": "application/json"},
)
try:
urllib.request.urlopen(req, timeout=5)
except (HTTPError, URLError) as error:
print(f"[outsource]: WARNING: could not upload metrics: {error}")
except timeout:
print("[outsource]: WARNING: could not upload metrics: timed out") | 28,522 |
def parse_encoding(format_, track, supplied_encoding, prompt_encoding):
"""Get the encoding from the FLAC files, otherwise require the user to specify it."""
if format_ == "FLAC":
if track["precision"] == 16:
return "Lossless", False
elif track["precision"] == 24:
return "24bit Lossless", False
if supplied_encoding and list(supplied_encoding) != [None, None]:
return supplied_encoding
if prompt_encoding:
return _prompt_encoding()
click.secho(
"An encoding must be specified if the files are not lossless.", fg="red"
)
raise click.Abort | 28,523 |
def move_logfile_to_standard_location(base_file, input_log_file, yaml_outdir=None, log_type='catalog_seed'):
"""Copy the log file from the current working directory and standard
name to a ```mirage_logs``` subdirectory below the simulation data
output directory. Rename the log to match the input yaml file name
with a suffix of `log`
Parameters
----------
base_file : str
Name of the file on which the name of the log file will be based.
For a ```log_type``` of "catalog_seed" or "fits_seed" this should
be one of the yaml input files used by i.e. catalog_seed_image.
For a ```log_type``` of "yaml_generator" this should be the xml
file from APT describing the proposal.
input_log_file : str
Name of the log file to be copied
yaml_outdir : str
Name of the output directory containing the simulated data. This
is the directory listed in the Output:directory entry of the yaml
file. It is here as an optional parameter to save having to read
the yaml file
log_type : str
Type of log file being created. Must be one of:
'catalog_seed': for log files from catalog_seed_image, dark_prep,
obs_generator, imaging_simulator, wfss_simulator,
grism_tso_simulator
'fits_seed': for log files from fits_seed_image
'yaml_generator': for log files from yaml_generator, apt_reader
"""
# Construct the log filename
final_logfile_name = create_standard_logfile_name(os.path.basename(base_file), log_type=log_type)
# Get the directory name in which to place the log file
if yaml_outdir is None:
yaml_outdir = get_output_dir(base_file)
final_logfile_dir = os.path.join(yaml_outdir, 'mirage_logs')
if not os.path.exists(final_logfile_dir):
os.makedirs(final_logfile_dir)
# Copy the log into the directory
shutil.copy2(input_log_file, os.path.join(final_logfile_dir, final_logfile_name)) | 28,524 |
def html_converter():
"""I'll be using pandoc as shell command as it is easier than programming it"""
folder = "/usr/share/nginx/html/feeds/support_files/rss_text/Mercury-SB4_comparison/mercury"
paths = []
for filename in os.listdir(folder):
if filename.endswith(".txt"):
paths.append(os.path.join(folder, filename))
else:
continue
for path in paths:
filename = os.path.basename(path)
html_url = filename[:-3] + ".html"
logger.info(html_url)
cmd = "pandoc --highlight-style=zenburn -s " + filename + " -o" + html_url
logger.info(cmd)
subprocess.run(cmd, cwd=folder, shell=True)
for filename in os.listdir(folder):
if filename.endswith(".html"):
cmd = "mv " + folder + "/" + filename + " html_version"
subprocess.run(cmd, shell=True) | 28,525 |
def check_core_dump_setting():
"""
checking os core dump setting is right
"""
errors = []
ret, out = run_shell('ulimit -c')
limit = out.strip('\n').strip()
if limit != 'unlimited':
errors.append(f'core dump file setting limit="{limit}" is incorrect, should set to unlimited')
with open('/proc/sys/kernel/core_pattern', 'r') as core_pattern_file:
core_pattern = core_pattern_file.read().strip()
core_path = os.path.dirname(core_pattern)
if os.path.isabs(core_path):
if os.path.exists(core_path):
core_dir_stat = os.stat(core_path)
permission = stat.S_IMODE(core_dir_stat.st_mode)
if permission != 0o777:
errors.append(f'core dump pattern path {core_path}\'s permission mask {permission} is not right')
else:
errors.append(f'core dump pattern path {core_path} is not exists')
else:
errors.append(f'core dump pattern path {core_pattern} is not a absolute path')
return errors | 28,526 |
def test_init():
"""Test return objects"""
fd = pf.FrequencyData([1, .5], [100, 200])
# interpolation object
interpolator = InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"))
assert isinstance(interpolator, InterpolateSpectrum)
# interpolation result
signal = interpolator(8, 44100)
assert isinstance(signal, pf.Signal) | 28,527 |
def disaggregate(model, mains, model_name, num_seq_per_batch, seq_len,
appliance, target_scale, stride=1):
"""
Disaggregation function to predict all results for whole time series mains.
:param model: tf model object
:param mains: numpy.ndarray, shape(-1,)
:param model_name: name of the used model
:param num_seq_per_batch: int, number of sequences to have in the batch
:param seq_len: int, length of the sequence
:param appliance: str, name of the appliance
:param target_scale: int, scaling factor of predicted value
:param stride: int, stride of moving window
:return:
p: np.ndarray, shape(-1,), disaggregated power of the appliance
metrics = dict containing the metrics
"""
# Converting mains array into batches for prediction
mains = mains.reshape(-1,)
agg_batches = mains_to_batches(mains, num_seq_per_batch, seq_len, stride=stride, pad=True)
if (appliance == 'fridge') or (appliance == 'Refrigerator') or (appliance == 'REFRIGERATOR'):
if target_scale:
target_max = target_scale
else:
target_max = 313
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'washing machine') or (appliance == 'Washing_Machine') or (appliance == 'WASHING_MACHINE'):
if target_scale:
target_max = target_scale
else:
target_max = 3999
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'dishwasher') or (appliance == 'Dishwasher') or (appliance == 'DISHWASHER'):
if target_scale:
target_max = target_scale
else:
target_max = 500
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'Electric_Vehicle') or (appliance == 'electric vehicle') or (appliance=='ELECTRIC_VEHICLE'):
if target_scale:
target_max = target_scale
else:
target_max = 6000
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'DRYER'):
if target_scale:
target_max = target_scale
else:
target_max = 2500
target_min = 0
input_max = 7879
input_min = 80
# list to store predictions
y_net = []
for id, batch in enumerate(agg_batches):
X_pred = np.copy(batch.reshape(-1, seq_len, 1))
X_pred /= (input_max-input_min)
X_pred = X_pred * 10
y_net.append(model.predict(X_pred))
# converting the predictions to rectangles
rectangles = pred_to_rectangles(y_net, num_seq_per_batch, seq_len, stride)
return rectangles | 28,528 |
def set_path_to_file(categoria: str) -> pathlib.PosixPath:
"""
Receba uma string com o nome da categoria da lesgilação e retorna
um objeto pathlib.PosixPath
"""
fpath = Path(f"./data/{categoria}")
fpath.mkdir(parents=True, exist_ok=True)
return fpath | 28,529 |
def transpose_nested_dictionary(nested_dict):
"""
Given a nested dictionary from k1 -> k2 > value
transpose its outer and inner keys so it maps
k2 -> k1 -> value.
"""
result = defaultdict(dict)
for k1, d in nested_dict.items():
for k2, v in d.items():
result[k2][k1] = v
return result | 28,530 |
def auth_code():
"""
Функция для обработки двухфакторной аутентификации
:return: Код для двухфакторной аутентификации
:rtype: tuple(str, bool)
"""
tmp = input('Введи код: ')
return tmp, True | 28,531 |
def graph_file_read_mtx(Ne: int, Nv: int, Ncol: int, directed: int, filename: str,\
RemapFlag:int=1, DegreeSortFlag:int=0, RCMFlag:int=0, WriteFlag:int=0) -> Graph:
"""
This function is used for creating a graph from a mtx graph file.
compared with the graph_file_read function, it will skip the mtx head part
Ne : the total number of edges of the graph
Nv : the total number of vertices of the graph
Ncol: how many column of the file. Ncol=2 means just edges (so no weight and weighted=0)
and Ncol=3 means there is weight for each edge (so weighted=1).
directed: 0 means undirected graph and 1 means directed graph
filename: the file that has the edge list
RemapFlag: if the vertex ID is larger than the total number of vertices, we will relabel the vertex ID
DegreeSortFlag: we will let small vertex ID be the vertex whose degree is small
RCMFlag: we will remap the vertex ID based on the RCM algorithm
WriteFlag: we will output the final edge list src->dst array as a new input file.
Returns
-------
Graph
The Graph class to represent the data
See Also
--------
Notes
-----
Raises
------
RuntimeError
"""
cmd = "segmentedGraphFileMtx"
args = "{} {} {} {} {} {} {} {} {}".format(Ne, Nv, Ncol, directed, filename, \
RemapFlag, DegreeSortFlag, RCMFlag, WriteFlag)
print(args)
repMsg = generic_msg(cmd=cmd, args=args)
return Graph(*(cast(str, repMsg).split('+'))) | 28,532 |
def _get_uri(tag, branch, sha1):
"""
Set the uri -- common code used by both install and debian upgrade
"""
uri = None
if tag:
uri = 'ref/' + tag
elif branch:
uri = 'ref/' + branch
elif sha1:
uri = 'sha1/' + sha1
else:
# FIXME: Should master be the default?
log.debug("defaulting to master branch")
uri = 'ref/master'
return uri | 28,533 |
def compute_inliers (BIH, corners):
"""
Function: compute_inliers
-------------------------
given a board-image homography and a set of all corners,
this will return the number that are inliers
"""
#=====[ Step 1: get a set of all image points for vertices of board coords ]=====
all_board_points = []
for i in range(9):
for j in range(9):
all_board_points.append ((i, j))
all_BIH_ip = [board_to_image_coords (BIH, bp) for bp in all_board_points]
#=====[ Step 2: get booleans for each corner being an inlier ]=====
num_inliers = sum ([is_BIH_inlier (all_BIH_ip, corner) for corner in corners])
return num_inliers | 28,534 |
def plotPayloadStates(full_state, posq, tf_sim):
"""This function plots the states of the payload"""
# PL_states = [xl, vl, p, wl]
fig8, ax11 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig8.tight_layout()
fig9, ax12 = plt.subplots(3, 1, sharex=True, sharey=True)
fig9.tight_layout()
fig10, ax13 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig10.tight_layout()
fig11, ax14 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig11.tight_layout()
fig12, ax15 = plt.subplots(1, 1, sharex=True ,sharey=True)
fig12.tight_layout()
time = np.linspace(0, tf_sim*1e-3, num=len(full_state))
pos = full_state[:,0:3]
linVel = full_state[:,3:6]
angVel = full_state[:,9:12]
p = full_state[:,6:9]
ts = 'time [s]'
###############################################################################################
ax11[0].plot(time, pos[:,0], c='k', lw=0.75, label='Actual'), ax11[1].plot(time, pos[:,1], lw=0.75, c='k'), ax11[2].plot(time, pos[:,2], lw=0.75, c='k')
ax11[0].set_ylabel('x [m]',), ax11[1].set_ylabel('y [m]'), ax11[2].set_ylabel('z [m]')
ax11[0].legend()
fig8.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig8, grid[0, ::], 'Actual Payload Positions')
###############################################################################################
ax12[0].plot(time, linVel[:,0],lw=0.75, c='k', label='Actual'), ax12[1].plot(time, linVel[:,1],lw=0.75, c='k'), ax12[2].plot(time, linVel[:,2],lw=0.75, c='k')
ax12[0].set_ylabel('vx [m/s]'), ax12[1].set_ylabel('vy [m/s]'), ax12[2].set_ylabel('vz [m/s]')
ax12[0].legend()
fig9.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig9, grid[0, ::], 'Actual Payload Linear Velocities')
###############################################################################################
ax13[0].plot(time, angVel[:,0],c='k',lw=1, label='Actual'), ax13[1].plot(time, angVel[:,1],c='k',lw=1), ax13[2].plot(time, angVel[:,2],c='k',lw=1)
ax13[0].set_ylabel('wx [deg/s]',labelpad=-5), ax13[1].set_ylabel('wy [deg/s]',labelpad=-5), ax13[2].set_ylabel('wz [deg/s]',labelpad=-5)
fig10.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig10, grid[0, ::], ' Actual Payload Angular Velocities')
###############################################################################################
ax14[0].plot(time, p[:,0],c='k',lw=1, label='Actual'), ax14[1].plot(time, p[:,1],c='k',lw=1), ax14[2].plot(time, p[:,2],c='k',lw=1)
ax14[0].set_ylabel('px',labelpad=-5), ax14[1].set_ylabel('py',labelpad=-5), ax14[2].set_ylabel('pz',labelpad=-5)
fig11.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig11, grid[0, ::], 'Cable Directional Unit Vector')
###############################################################################################
norm_x = np.zeros((len(full_state),))
for i in range(0, len(norm_x)):
norm_x[i] = np.linalg.norm(pos[i,:] - posq[i,:])
ax15.plot(time, norm_x,c='k',lw=1, label='Norm')
ax15.set_ylabel('||xq - xp||',labelpad=-2)
fig12.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig12, grid[0, ::], 'Diff between Quadrotor and Payload Positions (Norm)')
return fig8, fig9, fig10, fig11, fig12 | 28,535 |
def draw(p):
"""
Draw samples based on probability p.
"""
return np.searchsorted(np.cumsum(p), np.random.random(), side='right') | 28,536 |
def test_register_collection_archive_added_to_routes():
"""If your registered collection has an archive, Add the archive to
site.routes"""
class TestSite(Site):
routes = []
t = TestSite()
assert not t.routes # Verify that the site routes are empty on creation
@t.register_collection
class TestCollection(Collection):
content_items = []
has_archive = True
# The archive of TestCollection be added to t.routes
assert t.routes[0].title == 'TestCollection' | 28,537 |
def _autoinit(func):
"""Decorator to ensure that global variables have been initialized before
running the decorated function.
Args:
func (callable): decorated function
"""
@functools.wraps(func)
def _wrapped(*args, **kwargs):
init()
return func(*args, **kwargs)
return _wrapped | 28,538 |
def export_testing_time_result():
"""
Description:
I refer tp the answer at stockoverFlow:
https://stackoverflow.com/questions/42957871/return-a-created-excel-file-with-flask
:return: A HTTP response which is office excel binary data.
"""
target_info = request.form["target"]
workbook = ResultReport.general_report(target_info=target_info)
general_report_datetime = datetime.now().isoformat(timespec='seconds').split("T")[0]
return Response(
save_virtual_workbook(workbook=workbook),
headers={
'Content-Disposition': f'attachment; filename=D-Link_Wi-Fi_Testing_Time_Report_{general_report_datetime}.xlsx',
'Content-type': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
}
) | 28,539 |
def get_db():
"""Return a DB Session."""
db = SessionLocal()
try:
yield db
finally:
db.close() | 28,540 |
def profile_view(user_request: 'Queryset') -> 'Queryset':
"""
Функция, которая производит обработку данных пользователя и выборку из БД
вакансий для конкретного пользователя
"""
user_id = user_request[0]['id']
area = user_request[0]['area']
experience = user_request[0]['experience']
salary = user_request[0]['salary']
without_salary = user_request[0]['without_salary']
if without_salary is False:
vacancies_list = Vacancies.objects.filter(area=area,
experience=experience,
salary_from__lte=salary,
salary_to__gte=salary,
).exclude(banned_by_users=user_id,
).values('name',
'url',
'id',
).order_by('-published')
else:
vacancies_list = Vacancies.objects.filter(area=area,
experience=experience,
).exclude(banned_by_users=user_id,
).values('name',
'url',
'id',
).order_by('-published')
update_shown_vacancy_to_user(user_id, vacancies_list)
recommended_vacancies_id = recommendations(user_request)
if recommended_vacancies_id:
recommended_vacancies = Vacancies.objects.filter(id__in=recommended_vacancies_id,
).values('name', 'url')
else:
recommended_vacancies = None
return vacancies_list, recommended_vacancies | 28,541 |
def ground_generator(literal, context, D, D_index=None):
"""
This function is a generator, which generates entity and the context.
Args:
literal (a literal instance):
context (a dictionary): store the mapping of variables and constants
D (a dictionary object): a global object storing all facts.
Returns:
entity and the context
"""
predicate = literal.get_predicate()
entity = copy.deepcopy(literal.get_entity())
if predicate not in D:
return
if len(entity) == 1 and entity[0].name == "nan":
yield entity, dict()
elif not contain_variable(entity):
if predicate in D and entity in D[predicate]:
yield entity, dict()
elif not contain_variable_after_replace(entity, context):
replaced_entity = []
for term in entity:
if term.type == "variable":
term.type = "constant"
term.name = context[term.name]
replaced_entity.append(term)
else:
replaced_entity.append(term)
if predicate in D and tuple(replaced_entity) in D[predicate]:
yield tuple(replaced_entity), dict()
else:
if D_index is not None:
index_str = []
for i, term in enumerate(entity):
if term.type == "constant":
index_str.append(str(i) + "@" + term.name)
else:
if term.name in context:
index_str.append(str(i)+"@"+context[term.name])
index_str = "||".join(index_str)
if len(index_str) == 0:
for constant_entity in D[predicate]:
tmp_context = dict()
for term1, term2 in zip(entity, constant_entity):
if term1.type == "constant" and term1.name != term2.name:
break
elif term1.type == "variable" and term1.name in context and context[term1.name] != term2.name:
break
elif term1.type == "variable" and term1.name in tmp_context and tmp_context[term1.name] != term2.name:
break
else:
if term1.type == "variable":
tmp_context[term1.name] = term2.name
else:
yield constant_entity, tmp_context
else:
if index_str in D_index[predicate]:
for constant_entity in D_index[predicate][index_str]:
tmp_context = dict()
for term1, term2 in zip(entity, constant_entity):
if term1.type == "constant" and term1.name != term2.name:
break
elif term1.type == "variable" and term1.name in context and context[term1.name] != term2.name:
break
elif term1.type == "variable" and term1.name in tmp_context and tmp_context[term1.name] != term2.name:
break
else:
if term1.type == "variable":
tmp_context[term1.name] = term2.name
else:
yield constant_entity, tmp_context
else:
for tmp_entity in D[predicate]:
tmp_context = dict()
for term1, term2 in zip(entity, tmp_entity):
if term1.type == "constant" and term1.name != term2.name:
break
elif term1.type == "variable" and term1.name in context and context[term1.name] != term2.name:
break
elif term1.type == "variable" and term1.name in tmp_context and tmp_context[term1.name] != term2.name:
break
else:
if term1.type == "variable":
tmp_context[term1.name] = term2.name
else:
yield tmp_entity, tmp_context | 28,542 |
def config_vrf(dut, **kwargs):
"""
#Sonic cmd: Config vrf <add | delete> <VRF-name>
eg: config_vrf(dut = dut1, vrf_name = 'Vrf-test', config = 'yes')
eg: config_vrf(dut = dut1, vrf_name = 'Vrf-test', config = 'no')
"""
st.log('Config VRF API')
if 'config' in kwargs:
config = kwargs['config']
else:
config = 'yes'
if 'vrf_name' in kwargs:
if not isinstance(kwargs['vrf_name'],list):
vrf_name = [kwargs['vrf_name']]
else:
vrf_name = kwargs['vrf_name']
else:
st.log("Mandatory parameter vrfname is not found")
if 'skip_error' in kwargs:
skip_error = kwargs['skip_error']
else:
skip_error = False
cli_type = kwargs.pop('cli_type', st.get_ui_type(dut))
if cli_type == 'click':
my_cmd = ''
if config.lower() == 'yes':
for vrf in vrf_name:
my_cmd += 'sudo config vrf add {}\n'.format(vrf)
else:
for vrf in vrf_name:
my_cmd += 'sudo config vrf del {}\n'.format(vrf)
if skip_error:
try:
st.config(dut, my_cmd)
return True
except Exception:
st.log("Error handled..by API")
return False
else:
st.config(dut, my_cmd)
return True
elif cli_type == 'klish':
command = ''
if config.lower() == 'yes':
for vrf in vrf_name:
command = command + "\n" + "ip vrf {}".format(vrf)
else:
for vrf in vrf_name:
command = command + "\n" + "no ip vrf {}".format(vrf)
output = st.config(dut, command, skip_error_check=skip_error, type="klish", conf=True)
if "Could not connect to Management REST Server" in output:
st.error("klish mode not working.")
return False
return True
elif cli_type in ['rest-patch','rest-put']:
http_method = kwargs.pop('http_method',cli_type)
rest_urls = st.get_datastore(dut,'rest_urls')
if config.lower() == 'yes':
for vrf in vrf_name:
rest_url = rest_urls['vrf_config'].format(vrf)
ocdata = {"openconfig-network-instance:network-instance":[{"name":vrf,"config":{"name":vrf,"enabled":bool(1)}}]}
response = config_rest(dut, http_method=http_method, rest_url=rest_url, json_data=ocdata)
if not response:
st.log(response)
return False
elif config.lower() == 'no':
for vrf in vrf_name:
rest_url = rest_urls['vrf_config'].format(vrf)
response = delete_rest(dut, rest_url=rest_url)
if not response:
st.log(response)
return False
return True
else:
st.log("Unsupported cli") | 28,543 |
def _worker_command_line(thing, arguments):
"""
Create a worker command line suitable for Popen with only the
options the worker process requires
"""
def a(name):
"options with values"
return [name, arguments[name]] * (arguments[name] is not None)
def b(name):
"boolean options"
return [name] * bool(arguments[name])
return (
['ckanapi', 'dump', thing, '--worker']
+ a('--config')
+ a('--ckan-user')
+ a('--remote')
+ a('--apikey')
+ b('--get-request')
+ ['value-here-to-make-docopt-happy']
) | 28,544 |
def get_endpoint(arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEndpointResult:
"""
Resource Type Definition for AWS::S3Outposts::Endpoint
:param str arn: The Amazon Resource Name (ARN) of the endpoint.
"""
__args__ = dict()
__args__['arn'] = arn
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:s3outposts:getEndpoint', __args__, opts=opts, typ=GetEndpointResult).value
return AwaitableGetEndpointResult(
arn=__ret__.arn,
cidr_block=__ret__.cidr_block,
creation_time=__ret__.creation_time,
id=__ret__.id,
network_interfaces=__ret__.network_interfaces,
status=__ret__.status) | 28,545 |
def solveConsLaborIntMarg(
solution_next,
PermShkDstn,
TranShkDstn,
LivPrb,
DiscFac,
CRRA,
Rfree,
PermGroFac,
BoroCnstArt,
aXtraGrid,
TranShkGrid,
vFuncBool,
CubicBool,
WageRte,
LbrCost,
):
"""
Solves one period of the consumption-saving model with endogenous labor supply
on the intensive margin by using the endogenous grid method to invert the first
order conditions for optimal composite consumption and between consumption and
leisure, obviating any search for optimal controls.
Parameters
----------
solution_next : ConsumerLaborSolution
The solution to the next period's problem; must have the attributes
vPfunc and bNrmMinFunc representing marginal value of bank balances and
minimum (normalized) bank balances as a function of the transitory shock.
PermShkDstn: [np.array]
Discrete distribution of permanent productivity shocks.
TranShkDstn: [np.array]
Discrete distribution of transitory productivity shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor.
CRRA : float
Coefficient of relative risk aversion over the composite good.
Rfree : float
Risk free interest rate on assets retained at the end of the period.
PermGroFac : float
Expected permanent income growth factor for next period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. Currently not handled, must be None.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
TranShkGrid: np.array
Grid of transitory shock values to use as a state grid for interpolation.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution. Not yet handled, must be False.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear interpolation.
Cubic interpolation is not yet handled, must be False.
WageRte: float
Wage rate per unit of labor supplied.
LbrCost: float
Cost parameter for supplying labor: u_t = U(x_t), x_t = c_t*z_t^LbrCost,
where z_t is leisure = 1 - Lbr_t.
Returns
-------
solution_now : ConsumerLaborSolution
The solution to this period's problem, including a consumption function
cFunc, a labor supply function LbrFunc, and a marginal value function vPfunc;
each are defined over normalized bank balances and transitory prod shock.
Also includes bNrmMinNow, the minimum permissible bank balances as a function
of the transitory productivity shock.
"""
# Make sure the inputs for this period are valid: CRRA > LbrCost/(1+LbrCost)
# and CubicBool = False. CRRA condition is met automatically when CRRA >= 1.
frac = 1.0 / (1.0 + LbrCost)
if CRRA <= frac * LbrCost:
print(
"Error: make sure CRRA coefficient is strictly greater than alpha/(1+alpha)."
)
sys.exit()
if BoroCnstArt is not None:
print("Error: Model cannot handle artificial borrowing constraint yet. ")
sys.exit()
if vFuncBool or CubicBool is True:
print("Error: Model cannot handle cubic interpolation yet.")
sys.exit()
# Unpack next period's solution and the productivity shock distribution, and define the inverse (marginal) utilty function
vPfunc_next = solution_next.vPfunc
TranShkPrbs = TranShkDstn.pmf
TranShkVals = TranShkDstn.X
PermShkPrbs = PermShkDstn.pmf
PermShkVals = PermShkDstn.X
TranShkCount = TranShkPrbs.size
PermShkCount = PermShkPrbs.size
uPinv = lambda X: CRRAutilityP_inv(X, gam=CRRA)
# Make tiled versions of the grid of a_t values and the components of the shock distribution
aXtraCount = aXtraGrid.size
bNrmGrid = aXtraGrid # Next period's bank balances before labor income
# Replicated axtraGrid of b_t values (bNowGrid) for each transitory (productivity) shock
bNrmGrid_rep = np.tile(np.reshape(bNrmGrid, (aXtraCount, 1)), (1, TranShkCount))
# Replicated transitory shock values for each a_t state
TranShkVals_rep = np.tile(np.reshape(TranShkVals, (1, TranShkCount)), (aXtraCount, 1))
# Replicated transitory shock probabilities for each a_t state
TranShkPrbs_rep = np.tile(np.reshape(TranShkPrbs, (1, TranShkCount)), (aXtraCount, 1))
# Construct a function that gives marginal value of next period's bank balances *just before* the transitory shock arrives
# Next period's marginal value at every transitory shock and every bank balances gridpoint
vPNext = vPfunc_next(bNrmGrid_rep, TranShkVals_rep)
# Integrate out the transitory shocks (in TranShkVals direction) to get expected vP just before the transitory shock
vPbarNext = np.sum(vPNext * TranShkPrbs_rep, axis=1)
# Transformed marginal value through the inverse marginal utility function to "decurve" it
vPbarNvrsNext = uPinv(vPbarNext)
# Linear interpolation over b_{t+1}, adding a point at minimal value of b = 0.
vPbarNvrsFuncNext = LinearInterp(np.insert(bNrmGrid, 0, 0.0), np.insert(vPbarNvrsNext, 0, 0.0))
# "Recurve" the intermediate marginal value function through the marginal utility function
vPbarFuncNext = MargValueFunc(vPbarNvrsFuncNext, CRRA)
# Get next period's bank balances at each permanent shock from each end-of-period asset values
# Replicated grid of a_t values for each permanent (productivity) shock
aNrmGrid_rep = np.tile(np.reshape(aXtraGrid, (aXtraCount, 1)), (1, PermShkCount))
# Replicated permanent shock values for each a_t value
PermShkVals_rep = np.tile(np.reshape(PermShkVals, (1, PermShkCount)), (aXtraCount, 1))
# Replicated permanent shock probabilities for each a_t value
PermShkPrbs_rep = np.tile(np.reshape(PermShkPrbs, (1, PermShkCount)), (aXtraCount, 1))
bNrmNext = (Rfree / (PermGroFac * PermShkVals_rep)) * aNrmGrid_rep
# Calculate marginal value of end-of-period assets at each a_t gridpoint
# Get marginal value of bank balances next period at each shock
vPbarNext = (PermGroFac * PermShkVals_rep) ** (-CRRA) * vPbarFuncNext(bNrmNext)
# Take expectation across permanent income shocks
EndOfPrdvP = (DiscFac * Rfree * LivPrb * np.sum(vPbarNext * PermShkPrbs_rep,
axis=1, keepdims=True))
# Compute scaling factor for each transitory shock
TranShkScaleFac_temp = (frac * (WageRte * TranShkGrid) ** (LbrCost * frac)
* (LbrCost ** (-LbrCost * frac) + LbrCost ** frac ))
# Flip it to be a row vector
TranShkScaleFac = np.reshape(TranShkScaleFac_temp, (1, TranShkGrid.size))
# Use the first order condition to compute an array of "composite good" x_t values corresponding to (a_t,theta_t) values
xNow = (np.dot(EndOfPrdvP, TranShkScaleFac)) ** (-1.0 / (CRRA - LbrCost * frac))
# Transform the composite good x_t values into consumption c_t and leisure z_t values
TranShkGrid_rep = np.tile(np.reshape(TranShkGrid, (1, TranShkGrid.size)), (aXtraCount, 1))
xNowPow = xNow ** frac # Will use this object multiple times in math below
# Find optimal consumption from optimal composite good
cNrmNow = (((WageRte * TranShkGrid_rep) / LbrCost) ** (LbrCost * frac)) * xNowPow
# Find optimal leisure from optimal composite good
LsrNow = (LbrCost / (WageRte * TranShkGrid_rep)) ** frac * xNowPow
# The zero-th transitory shock is TranShk=0, and the solution is to not work: Lsr = 1, Lbr = 0.
cNrmNow[:, 0] = uPinv(EndOfPrdvP.flatten())
LsrNow[:, 0] = 1.0
# Agent cannot choose to work a negative amount of time. When this occurs, set
# leisure to one and recompute consumption using simplified first order condition.
# Find where labor would be negative if unconstrained
violates_labor_constraint = (LsrNow > 1.0)
EndOfPrdvP_temp = np.tile(np.reshape(EndOfPrdvP, (aXtraCount, 1)), (1, TranShkCount))
cNrmNow[violates_labor_constraint] = uPinv(EndOfPrdvP_temp[violates_labor_constraint])
LsrNow[violates_labor_constraint] = 1.0 # Set up z=1, upper limit
# Calculate the endogenous bNrm states by inverting the within-period transition
aNrmNow_rep = np.tile(np.reshape(aXtraGrid, (aXtraCount, 1)), (1, TranShkGrid.size))
bNrmNow = aNrmNow_rep - WageRte * TranShkGrid_rep + cNrmNow + WageRte * TranShkGrid_rep * LsrNow
# Add an extra gridpoint at the absolute minimal valid value for b_t for each TranShk;
# this corresponds to working 100% of the time and consuming nothing.
bNowArray = np.concatenate(
(np.reshape(-WageRte * TranShkGrid, (1, TranShkGrid.size)), bNrmNow), axis=0
)
# Consume nothing
cNowArray = np.concatenate((np.zeros((1, TranShkGrid.size)), cNrmNow), axis=0)
# And no leisure!
LsrNowArray = np.concatenate((np.zeros((1, TranShkGrid.size)), LsrNow), axis=0)
LsrNowArray[0, 0] = 1.0 # Don't work at all if TranShk=0, even if bNrm=0
LbrNowArray = 1.0 - LsrNowArray # Labor is the complement of leisure
# Get (pseudo-inverse) marginal value of bank balances using end of period
# marginal value of assets (envelope condition), adding a column of zeros
# zeros on the left edge, representing the limit at the minimum value of b_t.
vPnvrsNowArray = np.concatenate((np.zeros((1, TranShkGrid.size)), uPinv(EndOfPrdvP_temp)))
# Construct consumption and marginal value functions for this period
bNrmMinNow = LinearInterp(TranShkGrid, bNowArray[0, :])
# Loop over each transitory shock and make a linear interpolation to get lists
# of optimal consumption, labor and (pseudo-inverse) marginal value by TranShk
cFuncNow_list = []
LbrFuncNow_list = []
vPnvrsFuncNow_list = []
for j in range(TranShkGrid.size):
# Adjust bNrmNow for this transitory shock, so bNrmNow_temp[0] = 0
bNrmNow_temp = (bNowArray[:, j] - bNowArray[0, j])
# Make consumption function for this transitory shock
cFuncNow_list.append(LinearInterp(bNrmNow_temp, cNowArray[:, j]))
# Make labor function for this transitory shock
LbrFuncNow_list.append(LinearInterp(bNrmNow_temp, LbrNowArray[:, j]))
# Make pseudo-inverse marginal value function for this transitory shock
vPnvrsFuncNow_list.append(LinearInterp(bNrmNow_temp, vPnvrsNowArray[:, j]))
# Make linear interpolation by combining the lists of consumption, labor and marginal value functions
cFuncNowBase = LinearInterpOnInterp1D(cFuncNow_list, TranShkGrid)
LbrFuncNowBase = LinearInterpOnInterp1D(LbrFuncNow_list, TranShkGrid)
vPnvrsFuncNowBase = LinearInterpOnInterp1D(vPnvrsFuncNow_list, TranShkGrid)
# Construct consumption, labor, pseudo-inverse marginal value functions with
# bNrmMinNow as the lower bound. This removes the adjustment in the loop above.
cFuncNow = VariableLowerBoundFunc2D(cFuncNowBase, bNrmMinNow)
LbrFuncNow = VariableLowerBoundFunc2D(LbrFuncNowBase, bNrmMinNow)
vPnvrsFuncNow = VariableLowerBoundFunc2D(vPnvrsFuncNowBase, bNrmMinNow)
# Construct the marginal value function by "recurving" its pseudo-inverse
vPfuncNow = MargValueFunc2D(vPnvrsFuncNow, CRRA)
# Make a solution object for this period and return it
solution = ConsumerLaborSolution(cFunc=cFuncNow,
LbrFunc=LbrFuncNow,
vPfunc=vPfuncNow,
bNrmMin=bNrmMinNow
)
return solution | 28,546 |
def append_child(node, child):
"""Appends *child* to *node*'s children
Returns:
int: 1 on success, 0 on failure
"""
return _cmark.node_append_child(node, child) | 28,547 |
def set_memory(instance):
"""Set the amount of RAM, in MB, that the virtual machine
should allocate for itself from the host.
:param instance: nova.objects.instance.Instance
"""
host_info = get_host_info()
if instance.memory_mb > host_info[constants.HOST_MEMORY_AVAILABLE]:
raise nova_exception.InsufficientFreeMemory(uuid=instance.uuid)
manage.VBoxManage.modify_vm(instance, constants.FIELD_MEMORY,
instance.memory_mb) | 28,548 |
def test_spearman_regression(spearman_evaluator, targets, preds, uncs, spearman_exp):
"""
Tests the result of the spearman rank correlation UncertaintyEvaluator.
"""
area = spearman_evaluator.evaluate(targets, preds, uncs)
np.testing.assert_array_almost_equal(area, spearman_exp) | 28,549 |
def run_chain(init_part, chaintype, length, ideal_population, id, tag):
"""Runs a Recom chain, and saves the seats won histogram to a file and
returns the most Gerrymandered plans for both PartyA and PartyB
Args:
init_part (Gerrychain Partition): initial partition of chain
chaintype (String): indicates which proposal to be used to generate
spanning tree during Recom. Must be either "tree" or "uniform_tree"
length (int): total steps of chain
id (String): id of experiment, used when printing progress
tag (String): tag added to filename to identify run
Raises:
RuntimeError: If chaintype is not "tree" nor 'uniform_tree"
Returns:
list of partitions generated by chain
"""
graph = init_part.graph
for edge in graph.edges():
graph.edges[edge]['cut_times'] = 0
graph.edges[edge]['sibling_cuts'] = 0
if 'siblings' not in graph.edges[edge]:
graph.edges[edge]['siblings'] = tuple([edge])
popbound = within_percent_of_ideal_population(init_part, config['EPSILON'])
# Determine proposal for generating spanning tree based upon parameter
if chaintype == "tree":
tree_proposal = partial(recom, pop_col=config["POP_COL"], pop_target=ideal_population,
epsilon=config['EPSILON'], node_repeats=config['NODE_REPEATS'],
method=facefinder.my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col=config["POP_COL"], pop_target=ideal_population,
epsilon=config['EPSILON'], node_repeats=config['NODE_REPEATS'],
method=facefinder.my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recognized. Use 'tree' or 'uniform_tree' instead")
# Chain to be run
chain = MarkovChain(tree_proposal, Validator([popbound]), accept=accept.always_accept, initial_state=init_part,
total_steps=length)
electionDict = {
'seats' : (lambda x: x[config['ELECTION_NAME']].seats('PartyA')),
'won' : (lambda x: x[config['ELECTION_NAME']].seats('PartyA')),
'efficiency_gap' : (lambda x: x[config['ELECTION_NAME']].efficiency_gap()),
'mean_median' : (lambda x: x[config['ELECTION_NAME']].mean_median()),
'mean_thirdian' : (lambda x: x[config['ELECTION_NAME']].mean_thirdian()),
'partisan_bias' : (lambda x: x[config['ELECTION_NAME']].partisan_bias()),
'partisan_gini' : (lambda x: x[config['ELECTION_NAME']].partisan_gini())
}
# Run chain, save each desired statistic, and keep track of cuts. Save most
# left gerrymandered partition
statistics = {statistic : [] for statistic in config['ELECTION_STATISTICS']}
# Value of a partition is determined by each of the Gerry Statistics.
# Lexicographical ordering is used, such that if two partitions have the same
# value under the first Gerry Statistic, then the second is used as a tie
# breaker, and so on.
leftManderVal = [float('inf')] * len(config['GERRY_STATISTICS'])
leftMander = None
for i, partition in enumerate(chain):
for edge in partition["cut_edges"]:
graph.edges[edge]['cut_times'] += 1
for sibling in graph.edges[edge]['siblings']:
graph.edges[sibling]['sibling_cuts'] += 1
# Save statistics of partition
for statistic in config['ELECTION_STATISTICS']:
statistics[statistic].append(electionDict[statistic](partition))
# Update left mander if applicable
curPartVal = [electionDict[statistic](partition)
for statistic in config['GERRY_STATISTICS']]
if curPartVal < leftManderVal:
leftManderVal = curPartVal
leftMander = partition
if i % 500 == 0:
print('{}: {}'.format(id, i))
saveRunStatistics(statistics, tag)
return leftMander | 28,550 |
def cliquenet_s2(**kwargs):
"""CliqueNet-S2"""
model = cliquenet(input_channels=64, list_channels=[36, 80, 150, 120], list_layer_num=[5, 5, 6, 6])
return model | 28,551 |
def decConvert(dec):
"""
This is a number-word converter, but for decimals.
Parameters
-----
dec:str
This is the input value
numEngA: dict
A dictionary of values that are only up to single digits
frstDP: int
The first decimal place
scndDP: int
The second decimal place
Returns
-----
:str
This checks to see if there is a valid scndp, i.e., not zero,
and then then returns a valid decmial value in English format.
"""
numEngA = {
0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four',
5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine',
}
numEngB = {
1: 'ten', 2: 'twenty', 3: 'thirty', 4: 'fourty',
5: 'fifty', 6: 'sixty', 7: 'seventy', 8: 'eighty', 9: 'ninety',
}
frstDP = int(dec[0]);
scndDP = int(dec[1]);
return ' and ' + numEngA[frstDP] + ' ' + numEngA[scndDP] if not scndDP else ' and ' + numEngB[frstDP] | 28,552 |
def train_model(base_model, training_dataset, validation_dataset, output_dir,
loss=None, num_epochs=100, patience=20,
learning_rate=1e-4):
"""
Train a model with the given data.
Parameters
----------
base_model
training_dataset
validation_dataset
output_dir
loss
num_epochs
patience
learning_rate
Returns
-------
history
"""
os.makedirs(output_dir, exist_ok=True)
# Set up callbacks
cb = []
# checkpoint
model_weight_file = os.path.join(output_dir, 'model_best.h5')
cb.append(tf.keras.callbacks.ModelCheckpoint(output_dir + '\\{epoch:02d}-{val_loss:.2f}_model_best.h5', verbose=1,
save_weights_only=True,
save_best_only=False,
monitor='val_loss')) # val_loss
cb.append(tf.keras.callbacks.EarlyStopping(monitor='val_loss', verbose=1,
patience=patience))
history_csv_file = os.path.join(output_dir, 'history.csv')
cb.append(tf.keras.callbacks.CSVLogger(history_csv_file, append=True,
separator=','))
# model = ModelMGPU(base_model, gpus=2) FIXME:
model = base_model
model.compile(Adam(learning_rate=learning_rate), loss=loss)
history = model.fit(training_dataset, validation_data=validation_dataset,
# steps_per_epoch=846,
epochs=num_epochs, callbacks=cb, verbose=1, shuffle=False, use_multiprocessing=True,
workers=8)
return history | 28,553 |
def fit(data, weights, model_id, initial_parameters, tolerance=None, max_number_iterations=None, \
parameters_to_fit=None, estimator_id=None, user_info=None):
"""
Calls the C interface fit function in the library.
(see also http://gpufit.readthedocs.io/en/latest/bindings.html#python)
All 2D NumPy arrays must be in row-major order (standard in NumPy), i.e. array.flags.C_CONTIGUOUS must be True
(see also https://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#internal-memory-layout-of-an-ndarray)
:param data: The data - 2D NumPy array of dimension [number_fits, number_points] and data type np.float32
:param weights: The weights - 2D NumPy array of the same dimension and data type as parameter data or None (no weights available)
:param model_id: The model ID
:param initial_parameters: Initial values for parameters - NumPy array of dimension [number_fits, number_parameters] and data type np.float32
:param tolerance: The fit tolerance or None (will use default value)
:param max_number_iterations: The maximal number of iterations or None (will use default value)
:param parameters_to_fit: Which parameters to fit - NumPy array of length number_parameters and type np.int32 or None (will fit all parameters)
:param estimator_id: The Estimator ID or None (will use default values)
:param user_info: User info - NumPy array of type np.char or None (no user info available)
:return: parameters, states, chi_squares, number_iterations, execution_time
"""
# check all 2D NumPy arrays for row-major memory layout (otherwise interpretation of order of dimensions fails)
if not data.flags.c_contiguous:
raise RuntimeError('Memory layout of data array mismatch.')
if weights is not None and not weights.flags.c_contiguous:
raise RuntimeError('Memory layout of weights array mismatch.')
if not initial_parameters.flags.c_contiguous:
raise RuntimeError('Memory layout of initial_parameters array mismatch.')
# size check: data is 2D and read number of points and fits
if data.ndim != 2:
raise RuntimeError('data is not two-dimensional')
number_points = data.shape[1]
number_fits = data.shape[0]
# size check: consistency with weights (if given)
if weights is not None and data.shape != weights.shape:
raise RuntimeError('dimension mismatch between data and weights')
# the unequal operator checks, type, length and content (https://docs.python.org/3.7/reference/expressions.html#value-comparisons)
# size check: initial parameters is 2D and read number of parameters
if initial_parameters.ndim != 2:
raise RuntimeError('initial_parameters is not two-dimensional')
number_parameters = initial_parameters.shape[1]
if initial_parameters.shape[0] != number_fits:
raise RuntimeError('dimension mismatch in number of fits between data and initial_parameters')
# size check: consistency with parameters_to_fit (if given)
if parameters_to_fit is not None and parameters_to_fit.shape[0] != number_parameters:
raise RuntimeError(
'dimension mismatch in number of parameters between initial_parameters and parameters_to_fit')
# default value: tolerance
if not tolerance:
tolerance = 1e-4
# default value: max_number_iterations
if not max_number_iterations:
max_number_iterations = 25
# default value: estimator ID
if not estimator_id:
estimator_id = EstimatorID.LSE
# default value: parameters_to_fit
if parameters_to_fit is None:
parameters_to_fit = np.ones(number_parameters, dtype=np.int32)
# now only weights and user_info could be not given
# type check: data, weights (if given), initial_parameters are all np.float32
if data.dtype != np.float32:
raise RuntimeError('type of data is not np.float32')
if weights is not None and weights.dtype != np.float32:
raise RuntimeError('type of weights is not np.float32')
if initial_parameters.dtype != np.float32:
raise RuntimeError('type of initial_parameters is not np.float32')
# type check: parameters_to_fit is np.int32
if parameters_to_fit.dtype != np.int32:
raise RuntimeError('type of parameters_to_fit is not np.int32')
# type check: valid model and estimator id
if not _valid_id(ModelID, model_id):
raise RuntimeError('Invalid model ID, use an attribute of ModelID')
if not _valid_id(EstimatorID, estimator_id):
raise RuntimeError('Invalid estimator ID, use an attribute of EstimatorID')
# we don't check type of user_info, but we extract the size in bytes of it
if user_info is not None:
user_info_size = user_info.nbytes
else:
user_info_size = 0
# pre-allocate output variables
parameters = np.zeros((number_fits, number_parameters), dtype=np.float32)
states = np.zeros(number_fits, dtype=np.int32)
chi_squares = np.zeros(number_fits, dtype=np.float32)
number_iterations = np.zeros(number_fits, dtype=np.int32)
# conversion to ctypes types for optional C interface parameters using NULL pointer (None) as default argument
if weights is not None:
weights_p = weights.ctypes.data_as(gpufit_func.argtypes[3])
else:
weights_p = None
if user_info is not None:
user_info_p = user_info.ctypes.data_as(gpufit_func.argtypes[11])
else:
user_info_p = None
# call into the library (measure time)
t0 = time.perf_counter()
status = gpufit_func(
gpufit_func.argtypes[0](number_fits), \
gpufit_func.argtypes[1](number_points), \
data.ctypes.data_as(gpufit_func.argtypes[2]), \
weights_p, \
gpufit_func.argtypes[4](model_id), \
initial_parameters.ctypes.data_as(gpufit_func.argtypes[5]), \
gpufit_func.argtypes[6](tolerance), \
gpufit_func.argtypes[7](max_number_iterations), \
parameters_to_fit.ctypes.data_as(gpufit_func.argtypes[8]), \
gpufit_func.argtypes[9](estimator_id), \
gpufit_func.argtypes[10](user_info_size), \
user_info_p, \
parameters.ctypes.data_as(gpufit_func.argtypes[12]), \
states.ctypes.data_as(gpufit_func.argtypes[13]), \
chi_squares.ctypes.data_as(gpufit_func.argtypes[14]), \
number_iterations.ctypes.data_as(gpufit_func.argtypes[15]))
t1 = time.perf_counter()
# check status
if status != Status.Ok:
# get error from last error and raise runtime error
error_message = error_func()
raise RuntimeError('status = {}, message = {}'.format(status, error_message))
# return output values
return parameters, states, chi_squares, number_iterations, t1 - t0 | 28,554 |
def json_get(cid, item):
"""gets item from json file with user settings"""
with open('data/%s.json' %cid) as f:
user = json.load(f)
return user[item] | 28,555 |
def debug_info():
""" This function varies version-by-version, designed to help the authors of this package when there's an issue.
Returns:
A dictionary that contains debug info across the interpret package.
"""
from . import __version__, status_show_server
debug_dict = {}
debug_dict["interpret.__version__"] = __version__
debug_dict["interpret.status_show_server"] = status_show_server()
debug_dict["interpret.static_system_info"] = static_system_info()
debug_dict["interpret.dynamic_system_info"] = dynamic_system_info()
return debug_dict | 28,556 |
def adaptive_monte_carlo(func, z_min, z_max, epsilon):
"""
Perform adaptive Monte Carlo algorithm to a specific function. Uniform random variable is used in this case.
The calculation starts from 10 division of the original function range. Each step, it will divide the region which has the largest variance.
Input:
func: the function of integrand
z_min: lower limit of the integration
z_max: upper limit of the integration
epsilon: desired relative accuracy of the result
Returns:
new_I: numerical integral with required relative accuracy
err: error of estimation of the integral
evaluations: count of function evaluations"""
# However, we can speed up this small sampling process inside each
# sub-interval
@jit(nopython=True)
def loop(upper, lower, func, sampling_size):
elements = []
for _ in range(sampling_size):
z = random.uniform(lower, upper)
elements.append(func(z))
return elements
def monte_carlo(): # Monte Carlo integration in each of the sub-interval
var_array = []
I_array = []
for i in range(len(intervals) - 1):
# random sampling in each of the interval
elements = loop(
intervals[i], intervals[i + 1], func, sampling_size)
# integral of segment of integration
average = sum(elements) / sampling_size
# weight of integral is correspond to the width of the sub-interval
weight = intervals[i + 1] - intervals[i]
I_array.append(weight * average) # add up the integral value
# calculate the variance of this segment of integration
var = sum((elements[i] - average)**2 for i in range(sampling_size))
var_array.append(var) # add variance to the array
# return the integral value and variance of each sub-interval in an
# array
return I_array, var_array
evaluation = 0
n = 10 # number of divisions
sampling_size = 100 # 1000 sampling points in each division
# Initial trail
intervals = np.linspace(z_min, z_max, n)
I_array, var_array = monte_carlo()
evaluation += (len(intervals) - 1) * sampling_size
new_I = sum(I_array)
relative_accuracy = 1 # assign a non-zero value of initial relative accuracy
while relative_accuracy >= epsilon and relative_accuracy != 0:
old_I = new_I
# adaption
# find the index of the largest variance
largest_var_index = var_array.index(max(var_array))
# removing the result of section with largest variance
I_array = np.delete(I_array, largest_var_index)
var_array = np.delete(var_array, largest_var_index)
# divide sub-interval with the largest variance into 10 more
# sub-intervals
intervals = np.insert(intervals,
largest_var_index + 1,
np.linspace(intervals[largest_var_index],
intervals[largest_var_index + 1],
n,
endpoint=False))
intervals = np.delete(intervals, largest_var_index)
# run Monte Carlo in the new intervals
I_array, var_array = monte_carlo()
new_I = sum(I_array)
# calculate relative accuracy
relative_accuracy = abs((new_I - old_I) / old_I)
# amount of evaluations increases by the number of intervals * random
# points in each interval
evaluation += (len(intervals) - 1) * sampling_size
# print((len(intervals)-1)*sampling_size,new_I,relative_accuracy) #
# show realtime evaluations
err = 0
for i in range(len(intervals) - 1):
# sum up the variance of each interval
err += ((intervals[i + 1] - intervals[i]) /
(z_max - z_min))**2 * var_array[i]
# divide the standard deviation by sqrt of n to get standard error (error
# of estimation)
err = np.sqrt(err / (len(intervals) * sampling_size))
return new_I, err, evaluation | 28,557 |
def compute_statistics(measured_values, estimated_values):
"""Calculates a collection of common statistics comporaring the measured
and estimated values.
Parameters
----------
measured_values: numpy.ndarray
The experimentally measured values with shape=(number of data points)
estimated_values: numpy.ndarray
The computationally estimated values with shape=(number of data points)
Returns
-------
numpy.ndarray
An array of the summarised statistics, containing the
Slope, Intercept, R, R^2, p, RMSE, MSE, MUE, Tau
list of str
Human readable labels for each of the statistics.
"""
import scipy.stats
statistics_labels = [
Statistics.Slope,
Statistics.Intercept,
Statistics.R,
Statistics.R2,
Statistics.P,
Statistics.RMSE,
Statistics.MSE,
Statistics.MUE,
Statistics.Tau
]
summary_statistics = np.zeros(len(statistics_labels))
(
summary_statistics[0],
summary_statistics[1],
summary_statistics[2],
summary_statistics[4],
_
) = scipy.stats.linregress(measured_values, estimated_values)
summary_statistics[3] = summary_statistics[2] ** 2
summary_statistics[5] = np.sqrt(np.mean((estimated_values - measured_values) ** 2))
summary_statistics[6] = np.mean(estimated_values - measured_values)
summary_statistics[7] = np.mean(np.absolute(estimated_values - measured_values))
summary_statistics[8], _ = scipy.stats.kendalltau(measured_values, estimated_values)
return summary_statistics, statistics_labels | 28,558 |
def num_encode(n):
"""Convert an integer to an base62 encoded string."""
if n < 0:
return SIGN_CHARACTER + num_encode(-n)
s = []
while True:
n, r = divmod(n, BASE)
s.append(ALPHABET[r])
if n == 0:
break
return u''.join(reversed(s)) | 28,559 |
def hard_example_mining(dist_mat, labels, return_inds=False):
"""For each anchor, find the hardest positive and negative sample.
Args:
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]
labels: pytorch LongTensor, with shape [N]
return_inds: whether to return the indices. Save time if `False`(?)
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
NOTE: Only consider the case in which all labels have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(dist_mat.size()) == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
print(N)
# shape [N, N]
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
# `dist_ap` means distance(anchor, positive)
# both `dist_ap` and `relative_p_inds` with shape [N, 1]
dist_ap, relative_p_inds = torch.max(
dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)
# `dist_an` means distance(anchor, negative)
# both `dist_an` and `relative_n_inds` with shape [N, 1]
dist_an, relative_n_inds = torch.min(
dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)
# shape [N]
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_inds:
# shape [N, N]
ind = (labels.new().resize_as_(labels)
.copy_(torch.arange(0, N).long())
.unsqueeze(0).expand(N, N))
# shape [N, 1]
p_inds = torch.gather(
ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)
n_inds = torch.gather(
ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)
# shape [N]
p_inds = p_inds.squeeze(1)
n_inds = n_inds.squeeze(1)
return dist_ap, dist_an, p_inds, n_inds
return dist_ap, dist_an | 28,560 |
def failure_comment(request, comment_id):
"""Отклоняет отзыв с указанием причины"""
user = request.user
comment = Comment.objects.get(recipient_user=user.id, id=comment_id, failure=False)
form = AcceptForm(request.POST or None, initial=model_to_dict(comment), instance=comment)
if form.is_valid():
new_failure = form.save()
user = User.objects.get(id=request.user.id)
user.useraccept.failure = True
user.useraccept.save()
for user in comment.recipient_user.all():
users = User.objects.get(id=user.id)
if not users.useraccept.failure:
break
else:
new_failure.failure = True
recipient_users = construct_message(comment)
data_comment = f'''"
Status - failure, user - {comment.user}, recipient_users - {recipient_users}, comment - {comment.comment_for_rating},
reason - {new_failure.failure_text}"'''
command = r'''curl -H "Content-type:application/json" --data '{"data":''' + data_comment + r'''}' http://localhost:3001/mineBlock'''
os.system(command)
new_failure.save()
return HttpResponseRedirect(reverse_lazy('comment_list'))
return HttpResponseRedirect(reverse_lazy('comment_list'))
return render(request, 'failure_form.html',
{'form': form, 'comment': comment}) | 28,561 |
def CodedVideoGrain(src_id_or_meta=None,
flow_id_or_data=None,
origin_timestamp=None,
creation_timestamp=None,
sync_timestamp=None,
rate=Fraction(25, 1),
duration=Fraction(1, 25),
cog_frame_format=CogFrameFormat.UNKNOWN,
origin_width=1920,
origin_height=1080,
coded_width=None,
coded_height=None,
is_key_frame=False,
temporal_offset=0,
length=None,
cog_frame_layout=CogFrameLayout.UNKNOWN,
unit_offsets=None,
src_id=None,
source_id=None,
format=None,
layout=None,
flow_id=None,
data=None):
"""\
Function called to construct a coded video grain either from existing data or with new data.
First method of calling:
CodedVideoGrain(meta, data)
where meta is a dictionary containing the grain metadata, and data is a bytes-like
object which contains the grain's payload.
Optionally the data element can be replaced with an Awaitable that will return a
data element when awaited. This is useful for grains that are backed with some
sort of asynchronous IO system.
A properly formated metadata dictionary for a Video Grain should look like:
{
"@_ns": "urn:x-ipstudio:ns:0.1",
"grain": {
"grain_type": "audio",
"source_id": src_id, # str or uuid.UUID
"flow_id": flow_id, # str or uuid.UUID
"origin_timestamp": origin_timestamp, # str or mediatimestamps.Timestamp
"sync_timestamp": sync_timestamp, # str or mediatimestamps.Timestamp
"creation_timestamp": creation_timestamp, # str or mediatimestamps.Timestamp
"rate": {
"numerator": 0, # int
"denominator": 1, # int
},
"duration": {
"numerator": 0, # int
"denominator": 1, # int
},
"cog_coded_frame": {
"format": cog_frame_format, # int or CogFrameFormat
"origin_width": origin_width, # int
"origin_height": origin_height, # int
"coded_width": coded_width, # int
"coded_height": coded_height, # int
"layout": cog_frame_layout, # int or CogFrameLayout
"is_key_frame": False, # bool
"temporal_offset": temporal_offset, # int
"unit_offsets": [0, 16, 27] # list of int (optional)
}
}
}
Alternatively it may be called as:
CodedVideoGrain(src_id, flow_id,
origin_timestamp=None,
sync_timestamp=None,
rate=Fraction(25, 1),
duration=Fraction(1, 25),
cog_frame_format=CogFrameFormat.UNKNOWN,
origin_width=1920,
origin_height=1080,
is_key_frame=False,
coded_width=None,
coded_height=None,
temporal_offset=0,
length=None,
cog_frame_layout=CogFrameLayout.UNKNOWN,
unit_offsets=None,
data=None):
in which case a new grain will be constructed with type "coded_video" and the
specified metadata. If the data argument is None and the length argument is not
then a new bytearray object will be constructed with size equal to length.
In either case the value returned by this function will be an instance of the
class mediagrains.grain.CODEDVIDEOGRAIN
(the parameters "source_id" and "src_id" are aliases for each other. source_id is probably prefered,
but src_id is kept avaialble for backwards compatibility)
"""
meta: Optional[CodedVideoGrainMetadataDict] = None
if cog_frame_format is None:
cog_frame_format = format
if source_id is not None:
src_id = source_id
if cog_frame_layout is None:
cog_frame_layout = layout
if isinstance(src_id_or_meta, dict):
meta = cast(CodedVideoGrainMetadataDict, src_id_or_meta)
if data is None and not isinstance(flow_id_or_data, UUID):
data = flow_id_or_data
else:
if src_id is None and isinstance(src_id_or_meta, UUID):
src_id = src_id_or_meta
if flow_id is None and isinstance(flow_id_or_data, UUID):
flow_id = flow_id_or_data
if coded_width is None:
coded_width = origin_width
if coded_height is None:
coded_height = origin_height
if length is None:
if data is not None and hasattr(data, "__len__"):
length = len(cast(Sized, data))
else:
length = 0
if meta is None:
if src_id is None or flow_id is None:
raise AttributeError("Must include either metadata, or src_id, and flow_id")
cts = creation_timestamp
if cts is None:
cts = Timestamp.get_time()
if origin_timestamp is None:
origin_timestamp = cts
if sync_timestamp is None:
sync_timestamp = origin_timestamp
meta = {
"@_ns": "urn:x-ipstudio:ns:0.1",
"grain": {
"grain_type": "coded_video",
"source_id": str(src_id),
"flow_id": str(flow_id),
"origin_timestamp": str(mediatimestamp(origin_timestamp)),
"sync_timestamp": str(mediatimestamp(sync_timestamp)),
"creation_timestamp": str(mediatimestamp(cts)),
"rate": {
"numerator": Fraction(rate).numerator,
"denominator": Fraction(rate).denominator,
},
"duration": {
"numerator": Fraction(duration).numerator,
"denominator": Fraction(duration).denominator,
},
"cog_coded_frame": {
"format": cog_frame_format,
"origin_width": origin_width,
"origin_height": origin_height,
"coded_width": coded_width,
"coded_height": coded_height,
"layout": cog_frame_layout,
"is_key_frame": is_key_frame,
"temporal_offset": temporal_offset
}
},
}
if data is None:
data = bytearray(length)
if "grain" in meta and "cog_coded_frame" in meta['grain'] and unit_offsets is not None:
meta['grain']['cog_coded_frame']['unit_offsets'] = unit_offsets
return CODEDVIDEOGRAIN(meta, data) | 28,562 |
def test_log_likelihood(model, X_test, y_test):
""" Marginal log likelihood for GPy model on test data"""
_, test_log_likelihood, _ = model.inference_method.inference(
model.kern.rbf_1, X_test, model.likelihood.Gaussian_noise_1, y_test,
model.mean_function, model.Y_metadata)
return test_log_likelihood | 28,563 |
def test_object_buffered_base_io():
"""Tests airfs._core.io_buffered.ObjectBufferedIOBase"""
from airfs._core.io_base_raw import ObjectRawIOBase
from airfs._core.io_base_buffered import ObjectBufferedIOBase
from airfs._core.io_random_write import (
ObjectRawIORandomWriteBase,
ObjectBufferedIORandomWriteBase,
)
# Mock sub class
name = "name"
size = 10000
flushed = bytearray()
raw_flushed = bytearray()
buffer_size = 100
flush_sleep = 0
def flush(data):
"""Dummy flush"""
flushed.extend(data)
time.sleep(flush_sleep)
class DummySystem:
"""Dummy system"""
client = None
def __init__(self, **_):
"""Do nothing"""
@staticmethod
def getsize(*_, **__):
"""Returns fake result"""
return size
@staticmethod
def head(*_, **__):
"""Returns fake result"""
return {}
@staticmethod
def relpath(path):
"""Returns fake result"""
return path
@staticmethod
def get_client_kwargs(*_, **__):
"""Returns fake result"""
return {}
class DummyRawIO(ObjectRawIOBase):
"""Dummy IO"""
_SYSTEM_CLASS = DummySystem
def _flush(self, buffer):
"""Do nothing"""
raw_flushed.extend(buffer)
def _read_range(self, start, end=0):
"""Read fake bytes"""
return ((size if end > size else end) - start) * b"0"
class DummyBufferedIO(ObjectBufferedIOBase):
"""Dummy buffered IO"""
_RAW_CLASS = DummyRawIO
DEFAULT_BUFFER_SIZE = buffer_size
MINIMUM_BUFFER_SIZE = 10
MAXIMUM_BUFFER_SIZE = 10000
def ensure_ready(self):
"""Ensure flush is complete"""
while any(1 for future in self._write_futures if not future.done()):
time.sleep(0.01)
def __init(self, *arg, **kwargs):
ObjectBufferedIOBase.__init__(self, *arg, **kwargs)
self.close_called = False
def _close_writable(self):
"""Checks called"""
self.close_called = True
self.ensure_ready()
def _flush(self):
"""Flush"""
self._write_futures.append(
self._workers.submit(flush, self._write_buffer[: self._buffer_seek])
)
class DummyRawIOPartFlush(DummyRawIO, ObjectRawIORandomWriteBase):
"""Dummy IO with part flush support"""
_size = 20
def _flush(self, buffer, start, *_):
"""Do nothing"""
if start == 50:
# Simulate buffer that need to wait previous one
time.sleep(0.1)
raw_flushed.extend(buffer)
class DummyBufferedIOPartFlush(ObjectBufferedIORandomWriteBase):
"""Dummy buffered IO with part flush support"""
_RAW_CLASS = DummyRawIOPartFlush
# Tests: Read until end
object_io = DummyBufferedIO(name)
assert object_io.read() == size * b"0"
# Tests: Read when already at end
assert object_io.read() == b""
# Tests: Read, max buffer
object_io = DummyBufferedIO(name)
assert object_io._max_buffers == size // buffer_size
object_io = DummyBufferedIO(name, max_buffers=5)
assert object_io.read(100) == 100 * b"0"
# Tests: Read by parts
assert sorted(object_io._read_queue) == list(
range(100, 100 + buffer_size * 5, buffer_size)
)
assert object_io._seek == 100
assert object_io.read(150) == 150 * b"0"
assert sorted(object_io._read_queue) == list(
range(200, 200 + buffer_size * 5, buffer_size)
)
assert object_io._seek == 250
assert object_io.read(50) == 50 * b"0"
assert sorted(object_io._read_queue) == list(
range(300, 300 + buffer_size * 5, buffer_size)
)
assert object_io._seek == 300
assert object_io.read() == (size - 300) * b"0"
assert not object_io._read_queue
# Tests: Read small parts
part = buffer_size // 10
object_io.seek(0)
for index in range(1, 15):
assert object_io.read(part) == part * b"0"
assert object_io._seek == part * index
# Tests: Read, change seek
object_io.seek(450)
assert sorted(object_io._read_queue) == list(
range(450, 450 + buffer_size * 5, buffer_size)
)
object_io.seek(700)
assert sorted(object_io._read_queue) == list(
range(700, 700 + buffer_size * 5, buffer_size)
)
# Tests: Read buffer size (No copy mode)
object_io.seek(0)
assert object_io.read(buffer_size) == buffer_size * b"0"
object_io.seek(size - buffer_size // 2)
assert object_io.read(buffer_size) == b"0" * (buffer_size // 2)
object_io._seek = size
# Tests: Read, EOF before theoretical EOF
def read_range(*_, **__):
"""Returns empty bytes"""
return b""
object_io = DummyBufferedIO(name, max_buffers=5)
object_io._read_range = read_range
assert object_io.read() == b""
# Tests write (with auto flush)
assert bytes(flushed) == b""
object_io = DummyBufferedIO(name, mode="w")
assert object_io.write(250 * b"0") == 250
object_io.ensure_ready()
assert object_io._buffer_seek == 50
assert bytes(object_io._write_buffer) == 50 * b"0" + 50 * b"\0"
assert object_io._get_buffer().tobytes() == 50 * b"0"
assert object_io._seek == 2
assert len(flushed) == 200
assert bytes(flushed) == 200 * b"0"
# Tests manual flush
object_io.flush()
object_io.ensure_ready()
assert object_io._seek == 3
assert bytes(flushed) == 250 * b"0"
assert object_io._buffer_seek == 0
# Tests write, only buffered should flush
flushed = bytearray()
raw_flushed = bytearray()
assert bytes(flushed) == b""
assert bytes(raw_flushed) == b""
with DummyBufferedIO(name, mode="w") as object_io:
assert object_io.write(150 * b"0") == 150
object_io.ensure_ready()
assert len(flushed) == 100
assert object_io._buffer_seek == 50
assert len(object_io._get_buffer()) == 50
object_io.raw._write_buffer = object_io._get_buffer()
assert len(object_io.raw._get_buffer()) == 50
assert len(flushed) == 150
assert not len(raw_flushed)
# Tests write small data flushed by raw
object_io = DummyBufferedIO(name, mode="w")
assert object_io.write(10 * b"0") == 10
object_io.close()
assert bytes(raw_flushed) == 10 * b"0"
# Test max buffer
object_io = DummyBufferedIO(name, mode="w", max_buffers=2)
flush_sleep = object_io._FLUSH_WAIT
assert object_io.write(1000 * b"0") == 1000
flush_sleep = 0
# Test default implementation with part flush support
raw_flushed[:] = b""
content = os.urandom(100)
with DummyBufferedIOPartFlush(name, mode="w", buffer_size=10) as object_io:
object_io.write(content)
assert raw_flushed == content | 28,564 |
def fov_geometry(release='sva1',size=[530,454]):
"""
Return positions of each CCD in PNG image for
a given data release.
Parameters:
release : Data release name (currently ['sva1','y1a1']
size : Image dimensions in pixels [width,height]
Returns:
list : A list of [id, xmin, ymin, xmax, ymax] for each CCD
"""
SIZE=size
WIDTH=SIZE[0]
HEIGHT=SIZE[1]
# CCDs belonging to each row
ROWS = [ [3,2,1], #range(3,0,-1),
[7,6,5,4], #range(7,3,-1),
[12,11,10,9,8], #range(12,7,-1),
[18,17,16,15,14,13], #range(18,12,-1),
[24,23,22,21,20,19], #range(24,18,-1),
[31,30,29,28,27,26,25], #range(31,24,-1),
[38,37,36,35,34,33,32], #range(38,31,-1),
[44,43,42,41,40,39], #range(44,38,-1),
[50,49,48,47,46,45], #range(50,44,-1),
[55,54,53,52,51], #range(55,50,-1),
[59,58,57,56], #range(59,55,-1),
[62,61,60], #range(62,59,-1)
]
if release.lower() == 'sva1':
# These are the old SV pngs, not the ones made for Y2A1
# Boder padding in x,y; assumed symmetric
PAD = [0,0]
ROWS = [r[::-1] for r in ROWS[::-1]]
else:
PAD = [0.02*WIDTH,0.02*HEIGHT]
ROWS = ROWS
NROWS = len(ROWS) # Number of rows
NCCDS = [len(row) for row in ROWS]
CCD_SIZE = [float(WIDTH-2*PAD[0])/max(NCCDS),
float(HEIGHT-2*PAD[1])/NROWS] # CCD dimension (assumed to span image)
ret = []
for i,ccds in enumerate(ROWS):
for j,ccd in enumerate(ccds):
xpad = (SIZE[0] - len(ccds)*CCD_SIZE[0])/2.
ypad = PAD[1]
xmin = xpad + j*CCD_SIZE[0]
xmax = xmin + CCD_SIZE[0]
ymin = ypad + i*CCD_SIZE[1]
ymax = ymin + CCD_SIZE[1]
# These are output as ints now
ret += [[int(ccd), int(xmin), int(ymin), int(xmax), int(ymax)]]
return sorted(ret) | 28,565 |
def parse_descriptor(desc: str) -> 'Descriptor':
"""
Parse a descriptor string into a :class:`Descriptor`.
Validates the checksum if one is provided in the string
:param desc: The descriptor string
:return: The parsed :class:`Descriptor`
:raises: ValueError: if the descriptor string is malformed
"""
i = desc.find("#")
if i != -1:
checksum = desc[i + 1:]
desc = desc[:i]
computed = DescriptorChecksum(desc)
if computed != checksum:
raise ValueError("The checksum does not match; Got {}, expected {}".format(checksum, computed))
return _parse_descriptor(desc, _ParseDescriptorContext.TOP) | 28,566 |
def HHMMSS_to_seconds(string):
"""Converts a colon-separated time string (HH:MM:SS) to seconds since
midnight"""
(hhs,mms,sss) = string.split(':')
return (int(hhs)*60 + int(mms))*60 + int(sss) | 28,567 |
def create_check_warnings_reference(warnings_file):
"""Read warnings_file and compare it with the warnings the compiler
actually reported. If the reference file is missing we just check that
there are any warnings at all."""
if not os.path.isfile(warnings_file):
return check_missing_warnings
else:
reference = open(warnings_file, "rb").read()
return partial(_help_check_warnings_reference, reference=reference) | 28,568 |
def gis_location_onaccept(form):
"""
On Accept for GIS Locations (after DB I/O)
"""
if session.rcvars and hasattr(name_dummy_element, "onaccept"):
# HTML UI, not XML import
name_dummy_element.onaccept(db, session.rcvars.gis_location, request)
else:
location_id = form.vars.id
table = db.gis_location_name
names = db(table.location_id == location_id).select(table.id)
if names:
ids = [str(name.id) for name in names]
#name_dummy = "|%s|" % "|".join(ids)
name_dummy = "|".join(ids) # That's not how it should be
table = db.gis_location
db(table.id == location_id).update(name_dummy=name_dummy)
# Update the Path
gis.update_location_tree(form.vars.id, form.vars.parent)
return | 28,569 |
def fetch_user_profile(user_id):
"""
This function lookup a dictionary given an user ID. In production, this should be replaced
by querying external database.
user_id: User ID using which external Database will be queried to retrieve user profile.
return: Returns an user profile corresponding to the user ID, if not found returns a default profile type.
"""
if user_id in USER_PROFILES:
return USER_PROFILES[user_id]
else:
return {"profile": "free"} | 28,570 |
def _replace_token_range(tokens, start, end, replacement):
"""For a range indicated from start to end, replace with replacement."""
tokens = tokens[:start] + replacement + tokens[end:]
return tokens | 28,571 |
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
num_train = X.shape[0]
num_classes = W.shape[1]
num_dimensions = X.shape[1]
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
#copy paste from http://cs231n.github.io/neural-networks-case-study/#loss
score = X.dot(W) # (N,C)
score = score - np.amax(score,axis = 1,keepdims = True)
score = np.exp(score)
probs = score/np.sum(score,axis = 1, keepdims = True)
loss = -1*np.log(probs[np.arange(num_train),y]).sum()/num_train
loss = loss + 0.5 * reg * np.sum(W * W)
#http://cs231n.github.io/neural-networks-case-study/#grad
dscores = probs #(N,C)
dscores[range(num_train),y] -= 1
dscores = dscores / num_train
dW = np.dot(X.T,dscores)
dW += reg * W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW | 28,572 |
def test_var_length_array():
"""Check that length of dict <= length of columns in an array when passed an array."""
plots = make_qqplot(data_array)
assert len(plots.keys()) <= data_array.shape[1] | 28,573 |
def test_calling_start_on_connection_observer_returns_itself(do_nothing_connection_observer__for_major_base_class,
connection_to_remote):
"""
connection_observer.start() is asynchronous run. Return itself to allow for acting on it.
Simplest (stupid) case: connection_observer.start().await_done()
"""
connection_observer = do_nothing_connection_observer__for_major_base_class
connection_observer.connection = connection_to_remote.moler_connection
assert connection_observer == connection_observer.start()
connection_observer.cancel() | 28,574 |
def norm(a):
"""normalizes input matrix between 0 and 1
Args:
a: numpy array
Returns:
normalized numpy array
"""
return (a - np.amin(a))/(np.amax(a)-np.amin(a)) | 28,575 |
def input_apply_for_lock(driver):
"""进入门锁申请页面"""
if driver.hasElement(driver.ele.WebView_TitleBar_Title):
driver.assertEqual(driver.ele.MS_XZWebViewActivit, driver.driver.current_activity, '进入门锁介绍页失败')
driver.find_element_desc_click_wait('申请门锁服务')
else:
driver.assertEqual(driver.ele.MS_Lock_ManageActivity, driver.driver.current_activity, '进入门锁管理页面失败')
driver.find_element_id_and_click_wait(driver.ele.MS_lock_manager_passBtn) | 28,576 |
def collect_path(rf, method="quick", verbose=True):
"""
Collect paths from RandomForest objects. This function is the most time-consuming part.
Output:
A list of outputs from get_path_to_max_prediction_terminal_node.
"""
n_tree = len(rf)
result = []
if method == "quick":
for i in range(n_tree):
if verbose:
if (i+1) % 100 == 0:
print("Construct the %s tree graph out of %s trees" %(i+1, n_tree))
dot_data = tree.export_graphviz(rf.estimators_[i], out_file = None, rounded = True, special_characters = True)
G = Graph(dot_data)
result.append(G.get_path_to_max_prediction_terminal_node())
else:
result.append(return_node_path_to_max_prediction(rf.estimators_[i], verbose=False))
return result | 28,577 |
def calculate_variance(beta):
"""
This function calculates variance of curve beta
:param beta: numpy ndarray of shape (2,M) of M samples
:rtype: numpy ndarray
:return variance: variance
"""
n, T = beta.shape
betadot = gradient(beta, 1. / (T - 1))
betadot = betadot[1]
normbetadot = zeros(T)
centroid = calculatecentroid(beta)
integrand = zeros((n, n, T))
t = linspace(0, 1, T)
for i in range(0, T):
normbetadot[i] = norm(betadot[:, i])
a1 = (beta[:, i] - centroid)
a1 = a1.reshape((n, 1))
integrand[:, :, i] = a1.dot(a1.T) * normbetadot[i]
l = trapz(normbetadot, t)
variance = trapz(integrand, t, axis=2)
variance /= l
return (variance) | 28,578 |
def get_datastore_mo(client, soap_stub,
datacenter_name, datastore_name):
"""
Return datastore managed object with specific datacenter and datastore name
"""
datastore = get_datastore(client, datacenter_name, datastore_name)
if not datastore:
return None
datastore_mo = vim.Datastore(datastore, soap_stub)
return datastore_mo | 28,579 |
def _merge_dictionaries(dict1: dict, dict2: dict) -> dict:
"""
Recursive merge dictionaries.
:param dict1: Base dictionary to merge.
:param dict2: Dictionary to merge on top of base dictionary.
:return: Merged dictionary
"""
for key, val in dict1.items():
if isinstance(val, dict):
dict2_node = dict2.setdefault(key, {})
_merge_dictionaries(val, dict2_node)
else:
if key not in dict2:
dict2[key] = val
return dict2 | 28,580 |
def integrate_profile(rho0, s0, r_s, r_1, rmax_fac=1.2, rmin_fac=0.01,
r_min=None, r_max=None):
"""
Solves the ODE describing the to obtain the density profile
:returns: the integration domain in kpc and the solution to the density profile in M_sun / kpc^3
"""
G = 4.3e-6 # units kpc and solar mass
length_scale = np.sqrt(s0 ** 2 * (4 * np.pi * G * rho0) ** -1)
if r_max is None:
x_max = rmax_fac * r_1 / length_scale
else:
x_max = r_max/length_scale
if r_min is None:
x_min = r_1 * rmin_fac / length_scale
else:
x_min = r_min/length_scale
# solve the ODE with initial conditions
phi_0, phi_prime_0 = 0, 0
N = 600
xvalues = np.linspace(x_min, x_max, N)
res = solve_ivp(ode_system, (x_min, x_max),
[phi_0, phi_prime_0], t_eval=xvalues)
return res['t'] * length_scale, rho0 * np.exp(res.y[0]) | 28,581 |
def main():
""" Disable FSYNC
"""
# Disable fsync to fix saving issues
util.disable_fsync() | 28,582 |
def create_eval_dataset(
task,
batch_size,
subset):
"""Create datasets for evaluation."""
if batch_size % jax.device_count() != 0:
raise ValueError(f"Batch size ({batch_size}) must be divisible by "
f"the number of devices ({jax.device_count()}).")
per_device_batch_size = batch_size // jax.device_count()
dataset_builder = tfds.builder(task)
eval_split = deterministic_data.get_read_instruction_for_host(
subset, dataset_builder.info.splits[subset].num_examples)
eval_ds = deterministic_data.create_dataset(
dataset_builder,
split=eval_split,
num_epochs=1,
shuffle=False,
batch_dims=[jax.local_device_count(), per_device_batch_size],
preprocess_fn=_preprocess_cifar10)
return dataset_builder.info, eval_ds | 28,583 |
def rad2deg(angle):
"""
Converts radians to degrees
Parameters
----------
angle : float, int
Angle in radians
Returns
-------
ad : float
Angle in radians
Examples
--------
>>> rad2deg(pi)
180.000000000000
>>> rad2deg(pi/2)
90.0000000000000
>>> rad2deg(2*pi)
360.000000000000
"""
ad = ( (angle)*(180/pi) ).evalf()
return ad | 28,584 |
def center_crop(im, size, is_color=True):
"""
Crop the center of image with size.
Example usage:
.. code-block:: python
im = center_crop(im, 224)
:param im: the input image with HWC layout.
:type im: ndarray
:param size: the cropping size.
:type size: int
:param is_color: whether the image is color or not.
:type is_color: bool
"""
h, w = im.shape[:2]
h_start = (h - size) / 2
w_start = (w - size) / 2
h_end, w_end = h_start + size, w_start + size
if is_color:
im = im[h_start:h_end, w_start:w_end, :]
else:
im = im[h_start:h_end, w_start:w_end]
return im | 28,585 |
def read_json(file_path: str) -> Jelm:
"""reads from a json file path"""
with open(file_path) as fp:
dump = fp.read()
return reads_json(dump) | 28,586 |
def fillCells(cell_bounds, rdx, rdy, rdbathy, dlim=0.0, drymin=0.0,
drymax=0.99, pland=None, rotated=False,
median_depth=False, smc=False, setadj=False):
"""Returns a list of depth and land-sea data to correspond
with cell bounds list"""
print('[INFO] Calculating cell depths')
ncells = np.shape(cell_bounds)[0]
# cell depths array as depth, proportion of dry cells and cell type
cell_depths = np.zeros([ncells,3])
cell_depths[:,2] = 1 # set to default ww3 wet cell value
if dlim > 0.0:
print('[WARN] Dry depth limit is set greater than zero, changing sign for depth negative convention')
dlim = dlim * -1.0
# if rdx and rdy are 1D arrays, combine to form 2d arrays
#if len(np.shape(rdx)) == 1:
# chkx, chky = np.meshgrid(rdx, rdy)
#else:
chkx = rdx
chky = rdy
for lp in range(np.shape(cell_bounds)[0]):
if np.mod(lp, 2500) == 0:
print('[INFO] ... done %d points out of %d' %tuple([lp, ncells]))
xsw = cell_bounds[lp,0]
ysw = cell_bounds[lp,1]
xne = cell_bounds[lp,2]
yne = cell_bounds[lp,3]
if len(np.shape(rdx)) == 1:
# regular bathy
indsx = np.where((chkx >= xsw) & (chkx < xne))
indsy = np.where((chky >= ysw) & (chky < yne))
ndepths = np.size(indsx) * np.size(indsy)
else:
# rotated pole bathy
inds = np.where(((chkx >= xsw) & (chkx < xne) &
(chky >= ysw) & (chky < yne)))
ndepths = np.size(inds) / 2
if ndepths > 0:
if len(np.shape(rdx)) == 1:
# regular bathy
bathytmp = rdbathy[np.min(indsy):np.max(indsy)+1,
np.min(indsx):np.max(indsx)+1].flatten()
else:
# rotated pole bathy
bathytmp = rdbathy[inds]
# only use wet depths in calculations
if np.size(bathytmp[bathytmp<dlim]) > 0:
if median_depth:
depth = np.median(bathytmp[bathytmp<dlim])
else:
depth = np.mean(bathytmp[bathytmp<dlim])
else:
depth = 99.99
# use all depths for dry percentage calculation
pcdry = np.size(np.where(bathytmp >= dlim)[0])
# add wet cell land percentages if this info has been loaded in
if pland is not None:
if len(np.shape(rdx)) == 1:
# regular bathy
plandtmp = pland[np.min(indsy):np.max(indsy)+1,
np.min(indsx):np.max(indsx)+1].flatten()
else:
# rotated pole bathy
plandtmp = pland[inds]
if np.size(bathytmp[bathytmp < dlim]) > 0:
plandsum = np.sum(plandtmp[bathytmp < dlim])
pcdry = np.float(pcdry) + plandsum
pcdry = np.float(pcdry) / np.float(ndepths)
cell_depths[lp,0] = depth
cell_depths[lp,1] = pcdry
# mark cells for removal/tiering based on percentage dry
if pcdry >= drymax:
# reject dry cells
cell_depths[lp,2] = 0
elif pcdry > drymin:
# set partially dry points for tiering
cell_depths[lp,2] = -1
else:
print('[WARNING] No source data found in cell, returning zero value')
# second pass through cells to switch cells adjacent to coast to type -2
# sets required additional tiering in next step
if smc and setadj:
print('[INFO] Checking for points adjacent to dry cells')
#cellsbox = setCellsXYbox(smccells)
inds = np.where(cell_depths[:,2] == 0)
adjdry = []
for cnt, lp in enumerate(inds[0]):
if np.mod(cnt, 2500) == 0:
print('[INFO] ... done %d points out of %d' %tuple([cnt, np.size(inds)]))
intersects = chkAdj(lp, cell_bounds, altbounds=None)
#intersects = chkAdjxy(lp, cellsbox, altbox=None)
switch_drytype = False
if np.any(intersects is not None):
for chkcell in intersects:
if chkcell is not None:
if cell_depths[chkcell,2] != 0:
cell_depths[chkcell,2] = -1
switch_drytype = True
if switch_drytype:
adjdry.append(lp)
if np.size(np.array(adjdry)) > 0:
cell_depths[adjdry,2] = -2
# for non-smc grids set cells marked -1 to 1 (wet)
if not smc:
print('[INFO] Not SMC grid - switching tier values to wet cells')
cell_depths[cell_depths[:,2] == -2, 2] = -1
cell_depths[:,2] = np.abs(cell_depths[:,2])
print(np.min(cell_depths[lp,2]))
return cell_depths | 28,587 |
def main(path):
"""
from dal import DAL, Field
db=DAL('sqlite://catalog.sqlite')
db.define_table('catalog_folder',
Field('root_id','reference catalog_folder'),
Field('path'),
Field('title'),
Field('header','text'),
Field('footer','text'),
Field('pattern_ignore'),
Field('pattern_group'))
db.define_table('tag',
Field('name'),
Field('root_id','reference catalog_folder'))
db.define_table('catalog_file',
Field('root_id','reference catalog_folder'),
Field('filename'),
Field('md5'),
Field('pattern'),
Field('extension'),
Field('size','decimal(20,0)'),
Field('mtime','datetime'))
db.define_table('attribute',
Field('catalog',db.catalog_file),
Field('name'),
Field('value','double'))
"""
if os.path.exists(path):
if os.path.isdir(path):
walk_folders(path,db,folder_walker,'')
else:
walk_folders(path,db,ls_walker,'/projects/qcd/')
else:
raise "Invalid path"
for record in db(db.catalog_file).select():
print record.root_id,record.filename,record.pattern,record.extension,record.size | 28,588 |
def convert_format(input_path, output_path):
"""Convert format."""
input_string = Path(input_path).read_text()
tree = read_newick(input_string)
output_string = write_newick(tree)
Path(output_path).write_text(output_string) | 28,589 |
def submitter(commander):
"""Submits commands directly to the command line and waits for the process to finish."""
submiting = subprocess.Popen(commander,shell=True)
submiting.wait() | 28,590 |
def mark_item_complete(todo: TodoAndCompleted, idx: int) -> TodoAndCompleted:
"""
Pop todo['todo'][idx] and append it to todo['complete'].
Parameters
----------
todo : TodoAndCompleted
A dict containing a to-do list and a list of completed tasks.
idx : int
Index of an item to move from todo['todo'] to todo['complete'].
Returns
-------
TodoAndCompleted
Copy of `todo` with `todo['todo'][idx]` moved to `todo['complete'].
"""
_todo = todo.copy()
# Your code here...
return _todo | 28,591 |
def draw_legs():
"""left and right leg"""
radius = constants["radius"]
legs = constants["legs"]
# I got really lazy here sorry
# leftmost
peon.seth(270)
peon.pencolor("dark gray")
peon.pu()
peon.circle(radius, legs["principal"])
peon.rt(90)
peon.pd()
peon.forward(50)
peon.lt(20)
peon.forward(30)
peon.rt(60)
peon.forward(60)
peon.seth(0)
peon.forward(120)
peon.lt(110)
peon.forward(20)
peon.pu()
# rightmost
peon.tp(constants["center"] + Vector2(radius, 0), 90)
peon.circle(radius, -legs["principal"])
peon.rt(90)
peon.pd()
peon.forward(50)
peon.rt(20)
peon.forward(30)
peon.lt(60)
peon.forward(60)
peon.seth(180)
peon.forward(120)
peon.rt(110)
peon.forward(20)
peon.pu() | 28,592 |
def spatial_pyramid_pooling_2d(x, pyramid_height, pooling_class=None,
pooling=None):
"""Spatial pyramid pooling function.
It outputs a fixed-length vector regardless of input feature map size.
It performs pooling operation to the input 4D-array ``x`` with different
kernel sizes and padding sizes, and then flattens all dimensions except
first dimension of all pooling results, and finally concatenates them along
second dimension.
At :math:`i`-th pyramid level, the kernel size
:math:`(k_h^{(i)}, k_w^{(i)})` and padding size
:math:`(p_h^{(i)}, p_w^{(i)})` of pooling operation are calculated as
below:
.. math::
k_h^{(i)} &= \\lceil b_h / 2^i \\rceil, \\\\
k_w^{(i)} &= \\lceil b_w / 2^i \\rceil, \\\\
p_h^{(i)} &= (2^i k_h^{(i)} - b_h) / 2, \\\\
p_w^{(i)} &= (2^i k_w^{(i)} - b_w) / 2,
where :math:`\\lceil \\cdot \\rceil` denotes the ceiling function, and
:math:`b_h, b_w` are height and width of input variable ``x``,
respectively. Note that index of pyramid level :math:`i` is zero-based.
See detail in paper: `Spatial Pyramid Pooling in Deep Convolutional \
Networks for Visual Recognition \
<https://arxiv.org/abs/1406.4729>`_.
Args:
x (~chainer.Variable): Input variable. The shape of ``x`` should be
``(batchsize, # of channels, height, width)``.
pyramid_height (int): Number of pyramid levels
pooling_class (MaxPooling2D):
*(deprecated since v4.0.0)* Only MaxPooling2D is supported.
Please use the ``pooling`` argument instead since this argument is
deprecated.
pooling (str):
Currently, only ``max`` is supported, which performs a 2d max
pooling operation. Replaces the ``pooling_class`` argument.
Returns:
~chainer.Variable: Output variable. The shape of the output variable
will be :math:`(batchsize, c \\sum_{h=0}^{H-1} 2^{2h}, 1, 1)`,
where :math:`c` is the number of channels of input variable ``x``
and :math:`H` is the number of pyramid levels.
.. note::
This function uses some pooling classes as components to perform
spatial pyramid pooling. Currently, it only supports
:class:`~functions.MaxPooling2D` as elemental pooling operator so far.
"""
bottom_c, bottom_h, bottom_w = x.shape[1:]
ys = []
# create pooling functions for different pyramid levels and apply it
for pyramid_level in six.moves.range(pyramid_height):
num_bins = int(2 ** pyramid_level)
ksize_h = int(math.ceil(bottom_h / (float(num_bins))))
remainder_h = ksize_h * num_bins - bottom_h
pad_h = remainder_h // 2
ksize_w = int(math.ceil(bottom_w / (float(num_bins))))
remainder_w = ksize_w * num_bins - bottom_w
pad_w = remainder_w // 2
ksize = (ksize_h, ksize_w)
pad = (pad_h, pad_w)
if pooling_class is not None:
warnings.warn('pooling_class argument is deprecated. Please use '
'the pooling argument.', DeprecationWarning)
if (pooling_class is None) == (pooling is None):
raise ValueError('Specify the pooling operation either using the '
'pooling_class or the pooling argument.')
if (pooling_class is chainer.functions.MaxPooling2D or
pooling == 'max'):
pooler = chainer.functions.MaxPooling2D(
ksize=ksize, stride=None, pad=pad, cover_all=True)
else:
pooler = pooling if pooling is not None else pooling_class
raise ValueError('Unsupported pooling operation: ', pooler)
y_var = pooler.apply((x,))[0]
n, c, h, w = y_var.shape
ys.append(y_var.reshape((n, c * h * w, 1, 1)))
return chainer.functions.concat(ys) | 28,593 |
def test_preloadTestDbs():
"""
Test preloadTestDbs
"""
print("Testing staging dbs")
priming.setupTest()
dbEnv = dbing.gDbEnv
dbing.preloadTestDbs()
agents = dbing.getAgents()
assert agents == ['did:igo:3syVH2woCpOvPF0SD9Z0bu_OxNe2ZgxKjTQ961LlMnA=',
'did:igo:QBRKvLW1CnVDIgznfet3rpad-wZBL4qGASVpGRsE2uU=',
'did:igo:Qt27fThWoNZsa88VrTkep6H-4HA8tr54sHON1vWl6FE=',
'did:igo:Xq5YqaL6L48pf0fu7IUhL0JRaU2_RxFP0AL43wYn148=',
'did:igo:dZ74MLZXD-1QHoa73w9pQ9GroAvxqFi2RTZWlkC0raY=']
things = dbing.getThings()
assert things == ['did:igo:4JCM8dJWw_O57vM4kAtTt0yWqSgBuwiHpVgd55BioCM=']
did = dbing.getHid(key="hid:dns:localhost#02")
assert did == things[0]
#test get inbox for Ivy
messages = dbing.getDrops("did:igo:dZ74MLZXD-1QHoa73w9pQ9GroAvxqFi2RTZWlkC0raY=")
assert len(messages) == 2
assert messages[0]['from'] == "did:igo:Qt27fThWoNZsa88VrTkep6H-4HA8tr54sHON1vWl6FE="
#test get inbox for Ann
messages = dbing.getDrops("did:igo:Qt27fThWoNZsa88VrTkep6H-4HA8tr54sHON1vWl6FE=")
assert len(messages) == 1
assert messages[0]['from'] == "did:igo:dZ74MLZXD-1QHoa73w9pQ9GroAvxqFi2RTZWlkC0raY="
entries = dbing.getOfferExpires('did:igo:4JCM8dJWw_O57vM4kAtTt0yWqSgBuwiHpVgd55BioCM=',
lastOnly=False)
assert len(entries) == 2
dat, ser, sig = dbing.getSigned(entries[0]["offer"])
assert dat["uid"] == 'o_00035d2976e6a000_26ace93'
auids = dbing.getAllAnonUids()
assert auids == ['AQIDBAoLDA0=', 'BBIDBAoLCCC=']
anons = dbing.getAnonMsgs(key=auids[0])
assert len(anons) == 3
anons = dbing.getAnonMsgs(key=auids[1])
assert len(anons) == 1
cleanupTmpBaseDir(dbEnv.path())
print("Done Test") | 28,594 |
async def retrieve_seasons_and_teams(client, url): # noqa: E999
"""
Retrieves seasons and teams for a single player.
"""
doc = await get_document(client, url)
teams = doc.xpath(
"//table[@id='stats_basic_nhl' or @id='stats_basic_plus_nhl']" +
"/tbody/tr/td[2]/a/text()")
seasons = doc.xpath(
"//table[@id='stats_basic_nhl' or @id='stats_basic_plus_nhl']" +
"/tbody/tr/th[@data-stat='season']/text()")
teams = list(OrderedDict.fromkeys(teams).keys())
seasons = [
int(seasons[0].split("-")[0]), int(seasons[-1].split("-")[0]) + 1]
return teams, seasons | 28,595 |
def plot_cartpole_policy(policy, env, deterministic, plot=True, figname="cartpole_actor.pdf", save_figure=True) -> None:
"""
Plot a policy for a cartpole environment
:param policy: the policy to be plotted
:param env: the evaluation environment
:param deterministic: whether the deterministic version of the policy should be plotted
:param plot: whether the plot should be interactive
:param figname: the name of the file to save the figure
:param save_figure: whether the figure should be saved
:return: nothing
"""
if env.observation_space.shape[0] <= 2:
raise (ValueError("Observation space dimension {}, should be > 2".format(env.observation_space.shape[0])))
definition = 100
portrait = np.zeros((definition, definition))
state_min = env.observation_space.low
state_max = env.observation_space.high
for index_x, x in enumerate(np.linspace(state_min[0], state_max[0], num=definition)):
for index_y, y in enumerate(np.linspace(state_min[2], state_max[2], num=definition)):
obs = np.array([x])
z1 = random.random() - 0.5
z2 = random.random() - 0.5
obs = np.append(obs, z1)
obs = np.append(obs, y)
obs = np.append(obs, z2)
action, _ = policy.predict(obs, deterministic=deterministic)
portrait[definition - (1 + index_y), index_x] = action
plt.figure(figsize=(10, 10))
plt.imshow(portrait, cmap="inferno", extent=[state_min[0], state_max[0], state_min[2], state_max[2]], aspect="auto")
plt.colorbar(label="action")
# Add a point at the center
plt.scatter([0], [0])
x_label, y_label = getattr(env.observation_space, "names", ["x", "y"])
final_show(save_figure, plot, figname, x_label, y_label, figname, "/plots/") | 28,596 |
def array_match_difference_1d(a, b):
"""Return the summed difference between the elements in a and b."""
if len(a) != len(b):
raise ValueError('Both arrays must have the same length')
if len(a) == 0:
raise ValueError('Arrays must be filled')
if type(a) is not np.ndarray:
a = np.array(a)
if type(b) is not np.ndarray:
b = np.array(b)
return np.sum(np.abs(a - b)) | 28,597 |
def finder(ll, lats, longs, APIs, pad, max_dist=0.05):
"""
Determine which lats and longs are within the criteria
distance of ll to be included in the same pad
The function is designed for use in a recursive program
:input ll: latitude and longitude of a point to be considered
:input lats: a sorted array of latitudes
:input longs: an array of longitudes associated with lats
:input APIs: an array of API values of the wells
:input pad: a list of wells grouped into pads.
:input max_dist: maximum distance to the nearest well to be considered
a pad (km)
:return: none
"""
pad.append(ll)
if len(lats) < 1:
return
minind = binary_find_min(ll[0], lats)
# preliminary fast culling of distant wells
if crude_lat_dist(ll[0], lats[minind]) > max_dist * 10:
return
else:
cond = []
temp = minind
while temp >= 0 and \
crude_lat_dist(ll[0], lats[temp]) <= max_dist * 10:
cond.append(temp)
temp -= 1
temp = minind + 1
while temp < len(lats) and \
crude_lat_dist(ll[0], lats[temp]) <= max_dist * 10:
cond.append(temp)
temp += 1
cond.sort(reverse=True)
# final selection of nearby wells to be included in the pad
la = [lats[l] for l in cond]
lo = [longs[l] for l in cond]
distances = distance(np.array(list(zip(la, lo))), ll)
cond2 = np.where(distances < max_dist)[0]
if len(cond) == 0:
return
winners = []
for c2 in cond2:
winners.append([lats[cond[c2]], longs[cond[c2]],
APIs[cond[c2]]])
del (lats[cond[c2]])
del (longs[cond[c2]])
del (APIs[cond[c2]])
for w in winners:
finder(w, lats, longs, APIs, pad) | 28,598 |
def data_len(system: System) -> int:
"""Compute number of entries required to serialize all entities in a system."""
entity_lens = [entity.state_size() + entity.control_size() for entity in system.entities]
return sum(entity_lens) | 28,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.