content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def GetTSAWaitTimes(airportCode):
"""
Returns data from the TSA Wait Times API for a particular airport shortcode.
:param airportCode: 3-letter shortcode of airport
:return: Returns the full parsed json data from TSA Wait Times API
"""
base_url = "http://apps.tsa.dhs.gov/MyTSAWebService/GetTSOWaitTimes.ashx"
params_tsa_d = {}
params_tsa_d['ap'] = airportCode
params_tsa_d['output'] = 'json'
try:
## Uncomment this line if you want to get with caching for testing purposes
#tsa_result_diction = json.loads(get_with_caching(base_url, params_tsa_d, saved_cache, cache_fname))
## Comment out these two lines if you want to enable caching
results_tsa = requests.get(base_url, params=params_tsa_d)
tsa_result_diction = json.loads(results_tsa.text)
return tsa_result_diction
except Exception:
print("Error: Unable to load TSA wait times. Please try again.")
print("Exception: ")
# sys.exit(1)
quit() | 28,000 |
def False(context):
"""Function: <boolean> false()"""
return boolean.false | 28,001 |
def namedtuple_to_dict(model_params):
"""Transfers model specification from a
named tuple class object to dictionary."""
init_dict = {}
init_dict["GENERAL"] = {}
init_dict["GENERAL"]["num_periods"] = model_params.num_periods
init_dict["GENERAL"]["num_choices"] = model_params.num_choices
init_dict["CONSTANTS"] = {}
init_dict["CONSTANTS"]["delta"] = model_params.delta
init_dict["CONSTANTS"]["mu"] = model_params.mu
init_dict["CONSTANTS"]["benefits"] = model_params.benefits
init_dict["INITIAL_CONDITIONS"] = {}
init_dict["INITIAL_CONDITIONS"]["educ_max"] = model_params.educ_max
init_dict["INITIAL_CONDITIONS"]["educ_min"] = model_params.educ_min
init_dict["SIMULATION"] = {}
init_dict["SIMULATION"]["seed_sim"] = model_params.seed_sim
init_dict["SIMULATION"]["num_agents_sim"] = model_params.num_agents_sim
init_dict["SOLUTION"] = {}
init_dict["SOLUTION"]["seed_emax"] = model_params.seed_emax
init_dict["SOLUTION"]["num_draws_emax"] = model_params.num_draws_emax
init_dict["PARAMETERS"] = {}
init_dict["PARAMETERS"]["optim_paras"] = model_params.optim_paras
init_dict["DERIVED_ATTR"] = {}
init_dict["DERIVED_ATTR"]["educ_range"] = model_params.educ_range
init_dict["DERIVED_ATTR"]["shocks_cov"] = model_params.shocks_cov
return init_dict | 28,002 |
def roberts(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#roberts"""
return filter(stream, roberts.__name__, *args, **kwargs) | 28,003 |
def options(opt):
"""
Command-line options
"""
opt.add_option('--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries')
opt.add_option('--header-ext',
type='string',
default='',
help='header extension for moc files',
dest='qt_header_ext')
for i in 'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i, type='string', default='', dest=i)
opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt5", default=False) | 28,004 |
def run_code():
"""
codec api response
{
"error": {
"decode:": "error message"
},
"output": {
"status_code": 0,
"result": {
"data_type": "event",
"data": {
"humidity": {
"time": 1547660823,
"value": 34
},
"temperature": {
"time": 1547660823,
"value": -3.7
}
}
}
}
}
"""
request_json = CodeRunSchema.validate_request()
analog_type = request_json.get('analogType')
protocol = db.session.query(Product.cloudProtocol) \
.filter(Product.productID == request_json.get('productID')) \
.scalar()
if protocol is None:
raise DataNotFound(field='productID')
request_url = f"http://{current_app.config['CODEC_NODE']}/api/v1/codec"
with SyncHttp() as sync_http:
response = sync_http.post(request_url, json=request_json)
if response.responseCode != 200:
try:
errors = json.loads(response.responseContent)
except Exception:
errors = {
'codec': response.responseContent
}
raise APIException(errors=errors)
response_json = json.loads(response.responseContent)
# return response if it has error
if 'error' in response_json:
return jsonify(response_json)
output_data = response_json.get('output')
status_code = output_data.get('status_code')
# If status code is 1(ERROR)
# or analog type is 2(encode)
# return response without validate
if status_code == 1 or analog_type == 2:
return jsonify(response_json)
result = output_data.get('result')
error_dict = {}
validate_data, validate_error = DecodeSchema().load(result)
for key, value in validate_error.items():
error_dict[key] = value[0][:-1]
data_stream = DataStream.query \
.filter(DataStream.productID == request_json.get('productID'),
DataStream.tenantID == g.tenant_uid, DataStream.topic == request_json.get('topic'),
DataStream.streamID == validate_data.get('stream_id')) \
.first()
if not data_stream:
raise DataNotFound(field='data_stream')
error, passed_data = validate_decode_response(data_stream, validate_data)
error_dict.update(error)
record = {
'output': {
'status_code': status_code,
'result': passed_data
}
}
if error_dict:
record['error'] = error_dict
return jsonify(record) | 28,005 |
def _strip_paths(notebook_json: Mapping, project_root: Path):
"""Strip user paths from given notebook."""
project_root_string = str(project_root) + os.sep
mutated = False
for cell in notebook_json["cells"]:
if cell["cell_type"] == "code":
for output in cell["outputs"]:
for line_number, line in enumerate(output.get("text", [])):
if project_root_string in line:
output["text"][line_number] = line.replace(
project_root_string, ""
)
mutated = True
return notebook_json, mutated | 28,006 |
def build_dense_conf_block(x, filter_size=32, dropout_rate=None):
"""
builds a dense block according to https://arxiv.org/pdf/1608.06993.pdf
:param x:
:param dropout_rate:
:param filter_size
:return:
"""
x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
x = Activation('relu')(x)
x = Conv2D(filter_size * 4, (1, 1), padding='same')(x)
x = Conv2D(filter_size, (3, 3), padding='same')(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x | 28,007 |
def _viz_flow(u, v, logscale=True, scaledown=6):
"""
Copied from @jswulff:
https://github.com/jswulff/pcaflow/blob/master/pcaflow/utils/viz_flow.py
top_left is zero, u is horizon, v is vertical
red is 3 o'clock, yellow is 6, light blue is 9, blue/purple is 12
"""
color_wheel = _color_wheel()
n_cols = color_wheel.shape[0]
radius = np.sqrt(u ** 2 + v ** 2)
if logscale:
radius = np.log(radius + 1)
radius = radius / scaledown
rot = np.arctan2(-v, -u) / np.pi
fk = (rot + 1) / 2 * (n_cols - 1) # -1~1 mapped to 0~n_cols
k0 = fk.astype(np.uint8) # 0, 1, 2, ..., n_cols
k1 = k0 + 1
k1[k1 == n_cols] = 0
f = fk - k0
n_colors = color_wheel.shape[1]
img = np.zeros(u.shape + (n_colors,))
for i in range(n_colors):
tmp = color_wheel[:, i]
col0 = tmp[k0]
col1 = tmp[k1]
col = (1 - f) * col0 + f * col1
idx = radius <= 1
# increase saturation with radius
col[idx] = 1 - radius[idx] * (1 - col[idx])
# out of range
col[~idx] *= 0.75
img[:, :, i] = np.floor(255 * col).astype(np.uint8)
return img.astype(np.uint8) | 28,008 |
def submission_history(request, course_id, learner_identifier, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
found_user_name = get_learner_username(learner_identifier)
if not found_user_name:
return HttpResponse(escape(_('User does not exist.')))
course_key = CourseKey.from_string(course_id)
try:
usage_key = UsageKey.from_string(location).map_into_course(course_key)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_('Invalid location.')))
course = get_course_overview_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (found_user_name != request.user.username) and (not staff_access):
raise PermissionDenied
user_state_client = DjangoXBlockUserStateClient()
try:
history_entries = list(user_state_client.get_history(found_user_name, usage_key))
except DjangoXBlockUserStateClient.DoesNotExist:
return HttpResponse(escape(_('User {username} has never accessed problem {location}').format(
username=found_user_name,
location=location
)))
# This is ugly, but until we have a proper submissions API that we can use to provide
# the scores instead, it will have to do.
csm = StudentModule.objects.filter(
module_state_key=usage_key,
student__username=found_user_name,
course_id=course_key)
scores = BaseStudentModuleHistory.get_history(csm)
if len(scores) != len(history_entries):
log.warning(
"Mismatch when fetching scores for student "
"history for course %s, user %s, xblock %s. "
"%d scores were found, and %d history entries were found. "
"Matching scores to history entries by date for display.",
course_id,
found_user_name,
location,
len(scores),
len(history_entries),
)
scores_by_date = {
score.created: score
for score in scores
}
scores = [
scores_by_date[history.updated]
for history in history_entries
]
context = {
'history_entries': history_entries,
'scores': scores,
'username': found_user_name,
'location': location,
'course_id': str(course_key)
}
return render_to_response('courseware/submission_history.html', context) | 28,009 |
def tou(month, weekday, hour):
""" Calculate TOU pricing
"""
if weekday in [0, 6]:
return OFFPEAK
else:
if month in [5, 6, 7, 8, 9, 10]:
if hour in [11, 12, 13, 14, 15, 16]:
return ONPEAK
elif hour in [7, 8, 9, 10, 17, 18, 19, 20]:
return MIDPEAK
else:
return OFFPEAK
else:
if hour in [11, 12, 13, 14, 15, 16]:
return MIDPEAK
elif hour in [7, 8, 9, 10, 17, 18, 19, 20]:
return ONPEAK
else:
return OFFPEAK | 28,010 |
def build_doctree(tree, prefix, parent):
""" Build doctree dict with format:
dict key = full class/type name (e.g, "confluent_kafka.Message.timestamp")
value = object
"""
for n in dir(parent):
if n.startswith('__') or n == 'cimpl':
# Skip internals and the C module (it is automatically imported
# to other names in __init__.py)
continue
o = parent.__dict__.get(n)
full = prefix + n
tree[full].append(o)
if hasattr(o, '__dict__'):
is_module = isinstance(o, ModuleType)
is_ck_package = o.__dict__.get('__module__', '').startswith('confluent_kafka.')
is_cimpl_package = o.__dict__.get('__module__', '').startswith('cimpl.')
if not is_module or is_ck_package or is_cimpl_package:
build_doctree(tree, full + '.', o) | 28,011 |
def get_job(request):
""" Retrieve a specific Job
URL: /admin/Jobs/GetOne
:param request:
:return:
"""
id = request.GET.dict().get("id")
response = {
'status': 1,
'status_message': 'Success',
'job': job.objects.filter(id=id)
}
return HttpResponse(json.dumps(response)) | 28,012 |
def cpt_lvq_merid_deriv(temp, sphum):
"""Meridional derivative of c_p*T + L_v*q on pressure coordinates."""
deriv_obj = LatCenDeriv(cpt_lvq(temp, sphum), LAT_STR)
return deriv_obj.deriv() | 28,013 |
def ZonalStats(fhs, dates, output_dir, quantity, unit, location, color = '#6bb8cc'):
"""
Calculate and plot some statictics of a timeseries of maps.
Parameters
----------
fhs : ndarray
Filehandles pointing to maps.
dates : ndarray
Datetime.date object corresponding to fhs.
output_dir : str
Folder to save the graphs.
quantity : str
Quantity of the maps.
unit : str
Unit of the maps.
location : str
Location name of the maps.
color : str, optional
Color in which the graphs will be plotted, default is '#6bb8cc'.
Returns
-------
monthly_average : float
Monthly spatial average.
yearly_average : float
Yearly spatial average.
Examples
--------
>>> ZonalStats(p_fhs, p_dates, output_dir, 'Precipitation', 'mm/month', 'North-Vietnam')
>>> ZonalStats(et_fhs, et_dates, output_dir, 'Evapotranspiration', 'mm/month', 'South-Vietnam')
"""
ts = np.array([])
data_monthly_ts = dict()
data_monthly_counter = dict()
months = np.unique([date.month for date in dates])
for month in months:
data_monthly_ts[month] = 0
data_monthly_counter[month] = 0
data_yearly_ts = dict()
data_yearly_counter = dict()
years = np.unique([date.year for date in dates])
for year in years:
data_yearly_ts[year] = 0
data_yearly_counter[year] = 0
for date in dates:
DATA = OpenAsArray(fhs[dates == date][0], nan_values = True)
data = np.nanmean(DATA)
ts = np.append(ts, data)
data_monthly_ts[date.month] += data
data_monthly_counter[date.month] += 1
data_yearly_ts[date.year] += data
data_yearly_counter[date.year] += 1
monthly_ts = np.array(data_monthly_ts.values()) / np.array(data_monthly_counter.values())
months = np.array(data_monthly_ts.keys())
yearly_mask = np.array(data_yearly_counter.values()) == 12
yearly_ts = np.array(data_yearly_ts.values())[yearly_mask] / np.array(data_yearly_counter.values())[yearly_mask]
years = np.array(data_yearly_ts.keys())[yearly_mask]
idx = np.argsort(dates)
fig = plt.figure(figsize = (10,5))
plt.clf()
plt.grid(b=True, which='Major', color='0.65',linestyle='--', zorder = 0)
ax = plt.subplot(111)
ax.plot(dates[idx], ts[idx], '-k')
ax.fill_between(dates[idx], ts[idx], color = color)
ax.set_xlabel('Time')
ax.set_ylabel(quantity + ' ' + unit)
ax.set_title(quantity + ', ' + location)
fig.autofmt_xdate()
[i.set_zorder(10) for i in ax.spines.itervalues()]
plt.savefig(os.path.join(output_dir, quantity + '_' + location + '_ts.png'))
plt.close(fig)
fig = plt.figure(figsize = (10,5))
plt.clf()
plt.grid(b=True, which='Major', color='0.65',linestyle='--', zorder = 0)
ax = plt.subplot(111)
ax.bar(months - 0.4, monthly_ts, 0.8, color = color)
ax.set_xlabel('Time [month]')
ax.set_xlim([0, max(months)+1])
ax.set_xticks(months)
ax.set_ylabel(quantity + ' ' + unit)
ax.set_title('Monthly average ' + quantity + ', ' + location)
[i.set_zorder(10) for i in ax.spines.itervalues()]
plt.savefig(os.path.join(output_dir, quantity + '_' + location + '_monthly.png'))
plt.close(fig)
fig = plt.figure(figsize = (10,5))
plt.clf()
plt.grid(b=True, which='Major', color='0.65',linestyle='--', zorder = 0)
ax = plt.subplot(111)
ax.bar(years - 0.4, yearly_ts, 0.8, color = color)
ax.set_xlabel('Time [year]')
ax.set_xlim([min(years) - 1, max(years)+1])
ax.set_ylabel(quantity + ' ' + unit)
ax.set_title('Yearly average ' + quantity + ', ' + location)
[i.set_zorder(10) for i in ax.spines.itervalues()]
plt.savefig(os.path.join(output_dir, quantity + '_' + location + '_yearly.png'))
plt.close(fig)
monthly_max = np.nanmax(monthly_ts)
monthly_average = np.nanmean(monthly_ts)
yearly_average = np.nanmean(yearly_ts)
return monthly_max, monthly_average, yearly_average | 28,014 |
def test_zaehlpunkte(client):
"""Tests if any zaehkpunkte exist."""
zp = client.zaehlpunkte()
assert len(zp) > 0
assert len(zp[0]["zaehlpunkte"]) > 0 | 28,015 |
def callback(event):
""" Reacts to a user click on the window """
# switch on the status
global status
# if still initializing, ignore the click and don't do anything
if status == Status.init:
return
# if ready, start the race or show the distance map
elif status == Status.ready:
if cars:
status = Status.race
t = threading.Thread(target = start_race)
t.start()
else:
status = Status.distances
draw_map()
# if the race is running, pause the race
elif status == Status.race:
status = Status.paused
# if the race is paused, continue running
elif status == Status.paused:
status = Status.race
# if the distance map is shown, show the normal map
elif status == Status.distances:
status = Status.ready
draw_map()
# if the race is finished, exit the program
elif status == Status.finished:
sys.exit(0) | 28,016 |
def hubert_pretrain_large(
encoder_projection_dropout: float = 0.0,
encoder_attention_dropout: float = 0.0,
encoder_ff_interm_dropout: float = 0.0,
encoder_dropout: float = 0.0,
encoder_layer_drop: float = 0.0,
) -> HuBERTPretrainModel:
# Overriding the signature so that the return type is correct on Sphinx
"""hubert_pretrain_large(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0) -> torchaudio.models.HuBERTPretrainModel
Build HuBERTPretrainModel model for pre-training with "large" architecture from *HuBERT* [:footcite:`hsu2021hubert`]
Args:
encoder_projection_dropout (float):
See :py:func:`hubert_pretrain_model`.
encoder_attention_dropout (float):
See :py:func:`hubert_pretrain_model`.
encoder_ff_interm_dropout (float):
See :py:func:`hubert_pretrain_model`.
encoder_dropout (float):
See :py:func:`hubert_pretrain_model`.
encoder_layer_drop (float):
See :py:func:`hubert_pretrain_model`.
Returns:
HuBERTPretrainModel:
The resulting model.
""" # noqa: E501
return hubert_pretrain_model(
extractor_mode="layer_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1024,
encoder_projection_dropout=encoder_projection_dropout,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=encoder_attention_dropout,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
encoder_dropout=encoder_dropout,
encoder_layer_norm_first=True,
encoder_layer_drop=encoder_layer_drop,
mask_prob=0.80,
mask_selection="static",
mask_other=0.0,
mask_length=10,
no_mask_overlap=False,
mask_min_space=1,
mask_channel_prob=0.0,
mask_channel_selection="static",
mask_channel_other=0.0,
mask_channel_length=10,
no_mask_channel_overlap=False,
mask_channel_min_space=1,
skip_masked=False,
skip_nomask=False,
num_classes=500,
final_dim=768,
) | 28,017 |
def random_scaled_rotation(ralpha=(-0.2, 0.2), rscale=((0.8, 1.2), (0.8, 1.2))):
"""Compute a random transformation matrix for a scaled rotation.
:param ralpha: range of rotation angles
:param rscale: range of scales for x and y
:returns: random transformation
"""
affine = np.eye(2)
if rscale is not None:
(x0, x1), (y0, y1) = rscale
affine = np.diag([npr.uniform(x0, x1), npr.uniform(y0, y1)])
if ralpha is not None:
a0, a1 = ralpha
a = npr.uniform(a0, a1)
c = cos(a)
s = sin(a)
m = np.array([[c, -s], [s, c]], 'f')
affine = np.dot(m, affine)
return affine | 28,018 |
def time_as_int() -> int:
"""
Syntactic sugar for
>>> from time import time
>>> int(time())
"""
return int(time.time()) | 28,019 |
def test_end_response_is_one_send():
"""Test that ``HAPServerHandler`` sends the whole response at once."""
class ConnectionMock:
sent_bytes = []
def sendall(self, bytesdata):
self.sent_bytes.append([bytesdata])
return 1
def getsent(self):
return self.sent_bytes
amock = Mock()
with patch("pyhap.hap_server.HAPServerHandler.setup"), patch(
"pyhap.hap_server.HAPServerHandler.handle_one_request"
), patch("pyhap.hap_server.HAPServerHandler.finish"):
handler = hap_server.HAPServerHandler(
"mocksock", "mockclient_addr", "mockserver", amock
)
handler.request_version = "HTTP/1.1"
handler.connection = ConnectionMock()
handler.requestline = "GET / HTTP/1.1"
handler.send_response(200)
handler.wfile = MagicMock()
handler.end_response(b"body")
assert handler.connection.getsent() == [
[b"HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nbody"]
]
assert handler._headers_buffer == [] # pylint: disable=protected-access
assert handler.wfile.called_once() | 28,020 |
def create_schema(conn):
"""Create the trace database schema on a given SQLite3 connection.
"""
sql = [
'''
CREATE TABLE processes(
id INTEGER NOT NULL PRIMARY KEY,
run_id INTEGER NOT NULL,
parent INTEGER,
timestamp INTEGER NOT NULL,
exit_timestamp INTEGER,
cpu_time INTEGER,
is_thread BOOLEAN NOT NULL,
exitcode INTEGER
);
''',
'''
CREATE INDEX proc_parent_idx ON processes(parent);
''',
'''
CREATE TABLE opened_files(
id INTEGER NOT NULL PRIMARY KEY,
run_id INTEGER NOT NULL,
name TEXT NOT NULL,
timestamp INTEGER NOT NULL,
mode INTEGER NOT NULL,
is_directory BOOLEAN NOT NULL,
process INTEGER NOT NULL
);
''',
'''
CREATE INDEX open_proc_idx ON opened_files(process);
''',
'''
CREATE TABLE executed_files(
id INTEGER NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
run_id INTEGER NOT NULL,
timestamp INTEGER NOT NULL,
process INTEGER NOT NULL,
argv TEXT NOT NULL,
envp TEXT NOT NULL,
workingdir TEXT NOT NULL
);
''',
'''
CREATE INDEX exec_proc_idx ON executed_files(process);
''',
'''
CREATE TABLE connections(
id INTEGER NOT NULL PRIMARY KEY,
run_id INTEGER NOT NULL,
timestamp INTEGER NOT NULL,
process INTEGER NOT NULL,
inbound INTEGER NOT NULL,
family TEXT NULL,
protocol TEXT NULL,
address TEXT NULL
);
''',
'''
CREATE INDEX connections_proc_idx ON connections(process);
''',
]
for stmt in sql:
conn.execute(stmt) | 28,021 |
def compute_norm_cond_entropy_corr(data_df, attrs_from, attrs_to):
"""
Computes the correlations between attributes by calculating
the normalized conditional entropy between them. The conditional
entropy is asymmetric, therefore we need pairwise computation.
The computed correlations are stored in a dictionary in the format:
{
attr_a: { cond_attr_i: corr_strength_a_i,
cond_attr_j: corr_strength_a_j, ... },
attr_b: { cond_attr_i: corr_strength_b_i, ...}
}
:return a dictionary of correlations
"""
corr = {}
# Compute pair-wise conditional entropy.
for x in attrs_from:
corr[x] = {}
for y in attrs_to:
# Set correlation to 1 for same attributes.
if x == y:
corr[x][y] = 1.0
continue
xy_df = data_df[[x, y]]
xy_df = xy_df.loc[~(xy_df[x] == NULL_REPR) & ~(xy_df[y] == NULL_REPR)]
x_vals = xy_df[x]
x_domain_size = x_vals.nunique()
# Set correlation to 0.0 if entropy of x is 1 (only one possible value).
if x_domain_size == 1 or len(xy_df) == 0:
corr[x][y] = 0.0
continue
# Compute the conditional entropy H(x|y) = H(x,y) - H(y).
# H(x,y) denotes H(x U y).
# If H(x|y) = 0, then y determines x, i.e., y -> x.
# Use the domain size of x as a log base for normalization.
y_vals = xy_df[y]
x_y_entropy = drv.entropy_conditional(x_vals, y_vals, base=x_domain_size).item()
# The conditional entropy is 0 for strongly correlated attributes and 1 for
# completely independent attributes. We reverse this to reflect the correlation.
corr[x][y] = 1.0 - x_y_entropy
return corr | 28,022 |
def should_retry_http_code(status_code):
"""
:param status_code: (int) http status code to check for retry eligibility
:return: (bool) whether or not responses with the status_code should be retried
"""
return status_code not in range(200, 500) | 28,023 |
def softmax(x):
"""Calculates the softmax for each row of the input x.
Your code should work for a row vector and also for matrices of shape (n, m).
Argument:
x -- A numpy matrix of shape (n,m)
Returns:
s -- A numpy matrix equal to the softmax of x, of shape (n,m)
"""
# Apply exp() element-wise to x. Use np.exp(...).
x_exp = np.exp(x)
# Create a vector x_sum that sums each row of x_exp. Use np.sum(..., axis = 1, keepdims = True).
x_sum = np.sum(x_exp, axis=1, keepdims=True)
# Compute softmax(x) by dividing x_exp by x_sum. It should automatically use numpy broadcasting.
s = x_exp / x_sum
# print("x_exp: {}, x_sum: {}".format(x_exp.shape, x_sum.shape))
return s | 28,024 |
def detect_gid_list(ibs, gid_list, verbose=VERBOSE_AZURE, **kwargs):
"""Detect gid_list with azure.
Args:
gid_list (list of int): the list of IBEIS image_rowids that need detection
Kwargs (optional): refer to the Azure documentation for configuration settings
Args:
ibs (wbia.IBEISController): image analysis api
gid_list (list of int): the list of IBEIS image_rowids that need detection
Kwargs:
detector, config_filepath, weights_filepath, verbose
Yields:
tuple: (gid, gpath, result_list)
"""
# Get new gpaths if downsampling
config = {
'draw_annots': False,
'thumbsize': 900,
}
gpath_list = ibs.get_image_thumbpath(gid_list, ensure_paths=True, **config)
size_list = ibs.get_image_sizes(gid_list)
# Run detection
results_iter = detect(gpath_list, verbose=verbose, **kwargs)
# Upscale the results
_iter = zip(gid_list, size_list, results_iter)
for gid, size, (gpath, result_list) in _iter:
width, height = size
# Upscale the results back up to the original image size
for result in result_list:
result['xtl'] = int(np.around(result['xtl'] * width))
result['ytl'] = int(np.around(result['ytl'] * height))
result['width'] = int(np.around(result['width'] * width))
result['height'] = int(np.around(result['height'] * height))
yield (gid, gpath, result_list) | 28,025 |
def inertia_tensor_eigvals(image, mu=None, T=None):
"""Compute the eigenvalues of the inertia tensor of the image.
The inertia tensor measures covariance of the image intensity along
the image axes. (See `inertia_tensor`.) The relative magnitude of the
eigenvalues of the tensor is thus a measure of the elongation of a
(bright) object in the image.
Parameters
----------
image : array
The input image.
mu : array, optional
The pre-computed central moments of ``image``.
T : array, shape ``(image.ndim, image.ndim)``
The pre-computed inertia tensor. If ``T`` is given, ``mu`` and
``image`` are ignored.
Returns
-------
eigvals : list of float, length ``image.ndim``
The eigenvalues of the inertia tensor of ``image``, in descending
order.
Notes
-----
Computing the eigenvalues requires the inertia tensor of the input image.
This is much faster if the central moments (``mu``) are provided, or,
alternatively, one can provide the inertia tensor (``T``) directly.
"""
if T is None:
T = inertia_tensor(image, mu)
eigvals = np.linalg.eigvalsh(T)
# Floating point precision problems could make a positive
# semidefinite matrix have an eigenvalue that is very slightly
# negative. This can cause problems down the line, so set values
# very near zero to zero.
eigvals = np.clip(eigvals, 0, None, out=eigvals)
return sorted(eigvals, reverse=True) | 28,026 |
def test_info():
"""Test for qcengine info"""
outputs = []
for arg in cli.info_choices:
output = run_qcengine_cli(["info", arg])
if arg not in {"all", "config"}: # output of config changes call-to-call depending e.g. on mem available
outputs.append(output)
default_output = run_qcengine_cli(["info"])
for output in outputs:
assert output in default_output | 28,027 |
def extract_values(inst):
"""
:param inst: the instance
:return: python values extracted from the instance
"""
# inst should already be python
return inst | 28,028 |
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap | 28,029 |
def is_on_path(prog):
"""Checks if a given executable is on the current PATH."""
r = runcmd("which %s" % prog)
if r.failed:
return False
else:
return r | 28,030 |
def validate_entry(new_text) -> bool:
"""Função callback para validação de entrada dos campos na janela
ExperimentPCR.
É chamada toda vez que o usuário tenta inserir um valor no campo de
entrada.
Uma entrada válida deve atender os seguintes requisitos:
-Ser composto apenas de números inteiros.
-Ter um número de caracteres menor que 3.
:param new_text: Passada pelo próprio widget de entrada.
:return: boolean - Retorna pro widget se a entrada é ou não válida.
"""
if new_text == '': # Se "backspace"
return True
try:
int(new_text)
if len(new_text) <= 3:
return len(new_text) <= 3
except ValueError:
return False | 28,031 |
def eia_cbecs_land_call(*, resp, url, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param resp: df, response from url call
:param url: string, url
:return: pandas dataframe of original source data
"""
# Convert response to dataframe
df_raw_data = pd.read_excel(io.BytesIO(resp.content),
sheet_name='data')
df_raw_rse = pd.read_excel(io.BytesIO(resp.content),
sheet_name='rse')
if "b5.xlsx" in url:
# skip rows and remove extra rows at end of dataframe
df_data = pd.DataFrame(df_raw_data.loc[15:32]).reindex()
df_rse = pd.DataFrame(df_raw_rse.loc[15:32]).reindex()
df_data.columns = ["Name", "All buildings", "New England",
"Middle Atlantic", "East North Central",
"West North Central", "South Atlantic",
"East South Central", "West South Central",
"Mountain", "Pacific"]
df_rse.columns = ["Name", "All buildings", "New England",
"Middle Atlantic", "East North Central",
"West North Central", "South Atlantic",
"East South Central", "West South Central",
"Mountain", "Pacific"]
df_rse = df_rse.melt(id_vars=["Name"],
var_name="Location",
value_name="Spread")
df_data = df_data.melt(id_vars=["Name"],
var_name="Location",
value_name="FlowAmount")
if "b12.xlsx" in url:
# skip rows and remove extra rows at end of dataframe
df_data1 = pd.DataFrame(df_raw_data[4:5]).reindex()
df_data2 = pd.DataFrame(df_raw_data.loc[46:50]).reindex()
df_data = pd.concat([df_data1, df_data2], ignore_index=True)
df_rse1 = pd.DataFrame(df_raw_rse[4:5]).reindex()
df_rse2 = pd.DataFrame(df_raw_rse.loc[46:50]).reindex()
df_rse = pd.concat([df_rse1, df_rse2], ignore_index=True)
# drop the empty columns at end of df
df_data = df_data.iloc[:, 0:9]
df_rse = df_rse.iloc[:, 0:9]
df_data.columns = ["Description", "All buildings", "Office",
"Warehouse and storage", "Service",
"Mercantile", "Religious worship",
"Education", "Public assembly"]
df_rse.columns = ["Description", "All buildings", "Office",
"Warehouse and storage", "Service",
"Mercantile", "Religious worship",
"Education", "Public assembly"]
df_rse = df_rse.melt(id_vars=["Description"],
var_name="Name",
value_name="Spread")
df_data = df_data.melt(id_vars=["Description"],
var_name="Name",
value_name="FlowAmount")
if "b14.xlsx" in url:
# skip rows and remove extra rows at end of dataframe
df_data = pd.DataFrame(df_raw_data.loc[27:31]).reindex()
df_rse = pd.DataFrame(df_raw_rse.loc[27:31]).reindex()
# drop the empty columns at end of df
df_data = df_data.iloc[:, 0:8]
df_rse = df_rse.iloc[:, 0:8]
df_data.columns = ["Description", "All buildings", "Food service",
"Food sales", "Lodging", "Health care In-Patient",
"Health care Out-Patient",
"Public order and safety"]
df_rse.columns = ["Description", "All buildings", "Food service",
"Food sales", "Lodging", "Health care In-Patient",
"Health care Out-Patient", "Public order and safety"]
df_rse = df_rse.melt(id_vars=["Description"],
var_name="Name",
value_name="Spread")
df_data = df_data.melt(id_vars=["Description"],
var_name="Name",
value_name="FlowAmount")
df = pd.merge(df_rse, df_data)
return df | 28,032 |
def add_l2_interface(interface_name, interface_desc=None, interface_admin_state="up", **kwargs):
"""
Perform a POST call to create an Interface table entry for physical L2 interface.
:param interface_name: Alphanumeric Interface name
:param interface_desc: Optional description for the interface. Defaults to nothing if not specified.
:param interface_admin_state: Optional administratively-configured state of the interface.
Defaults to "up" if not specified
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: True if successful, False otherwise
"""
if kwargs["url"].endswith("/v1/"):
return port.add_l2_port(interface_name, interface_desc, interface_admin_state, **kwargs)
else: # Updated else for when version is v10.04
return _add_l2_interface(interface_name, interface_desc, interface_admin_state, **kwargs) | 28,033 |
def language_descriptions():
"""
Return a dict of `LanguageDesc` instances keyed by language name.
"""
global languages
if languages is None:
languages = {}
for language in pkg_resources.WorkingSet().iter_entry_points(
group='textx_languages'):
register_language_with_project(language.load(),
language.dist.project_name,
language.dist.version)
return languages | 28,034 |
async def test_async_get_next_pickup_event_no_event_left(aresponses):
"""Test getting the next pickup event. No event after today."""
response = aresponses.Response(
text=load_fixture("pickup_data_response_1.json"),
status=200,
headers={"Content-Type": "text/html"},
)
aresponses.add(
"ekosystem.wroc.pl",
"/wp-admin/admin-ajax.php",
"post",
response,
)
async with ClientSession() as session:
client = Client(TEST_LOCALIZATION_ID, TEST_STREET_ID, session=session)
with pytest.raises(DataError):
await client.async_get_next_pickup_event() | 28,035 |
def load_fixtures(fixtures_dict=None):
"""
Loads fixtures specified in fixtures_dict. This method must be
used for fixtures that don't have associated data models. We
simply want to load the meta into dict objects.
fixtures_dict should be of the form:
{
'actionchains': ['actionchain1.json', 'actionchain2.json'],
'workflows': ['workflow.yaml']
}
:param fixtures_dict: Dictionary specifying the fixtures to load for each type.
:type fixtures_dict: ``dict``
:rtype: ``dict``
"""
if fixtures_dict is None:
fixtures_dict = {}
all_fixtures = {}
fixtures_base_path = get_fixtures_base_path()
for fixture_type, fixtures in six.iteritems(fixtures_dict):
loaded_fixtures = {}
for fixture in fixtures:
fixture_path = fixtures_base_path + '/' + fixture
fixture_dict = load_content(fixture_path)
loaded_fixtures[fixture] = fixture_dict
all_fixtures[fixture_type] = loaded_fixtures
return all_fixtures | 28,036 |
def generate_sbm(sizes, probs, maxweight=1):
"""Generate a Stochastic Block Model graph.
Assign random values drawn from U({1, ..., maxw}) to the edges.
sizes : list of sizes (int) of the blocks
probs : matrix of probabilities (in [0, 1]) of edge creation
between nodes depending on the blocks they belong to
maxweight : maximum value of the weights to randomly assign
(default 1, resulting in weights all equal to 1)
"""
graph = nx.stochastic_block_model(sizes, probs)
weights = 1 + np.random.choice(maxweight, len(graph.edges))
weights = dict(zip(graph.edges, weights))
nx.set_edge_attributes(graph, weights, 'weight')
return graph | 28,037 |
def eq_portions(actual: str, expected: str):
"""
Compare whether actual matches portions of expected. The portions to ignore are of two types:
- ***: ignore anything in between the left and right portions, including empty
- +++: ignore anything in between left and right, but non-empty
:param actual: string to test
:param expected: expected string, containing at least one of the two patterns
:return: a list of the portions ignored; if empty, it means there is no match.
>>> eq_portions('', '+++aaaaaa***ccccc+++eeeeeee+++')
()
>>> eq_portions('_1__aaaaaa__2__ccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++')
('_1__', '__2__', '_3__', '_4_')
>>> eq_portions('_1__aaaaaaccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++')
('_1__', '', '_3__', '_4_')
>>> eq_portions('_1__aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee+++')
()
>>> eq_portions('aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee')
()
>>> eq_portions('aaaaaa_1__ccccc__2_eeeeeee', '***aaaaaa***ccccc+++eeeeeee***')
('', '_1__', '__2_', '')
>>> eq_portions('aaaaaa___ccccc___eeeeeee', '***aaaaaa')
()
>>> eq_portions('aaaaaa___ccccc___eeeeeee', 'aaaaaa')
Traceback (most recent call last):
...
ValueError: The 'expected' argument must contain at least one *** OR +++
"""
re_expect = re.escape(expected)
ANYTHING = re.escape('\\*' * 3)
SOMETHING = re.escape('\\+' * 3)
if not re.search(ANYTHING, re_expect) and not re.search(SOMETHING, re_expect):
raise ValueError("The 'expected' argument must contain at least one *** OR +++")
re_expect = re.sub(SOMETHING, '(.+)', re_expect)
re_expect = re.sub(ANYTHING, '(.*)', re_expect)
matches = re.fullmatch(re_expect, actual)
if not matches:
return ()
return matches.groups() | 28,038 |
def _get_encoder(
in_features: int,
embed_dim: int,
dropout_input: float,
pos_conv_kernel: int,
pos_conv_groups: int,
num_layers: int,
num_heads: int,
attention_dropout: float,
ff_interm_features: int,
ff_interm_dropout: float,
dropout: float,
layer_norm_first: bool,
layer_drop: float,
) -> Encoder:
"""
Args:
in_features (int): The number of input features.
embed_dim (int):
The dimension of embedding.
This option corresponds to "encoder_embed_dim" from fairseq.
Expected values are 768 for Base arch, and 1024 for Large arch.
dropout_input (float):
The dropout probability applied after the input feature is projected
to ``embed_dim``.
This option corresponds to "dropout_input" from fairseq.
Expected values are 0.1 for both Base and Large arch.
pos_conv_kernel (int):
The kernel size of convolutional positional embeddings.
This option corresponds to "conv_pos" from fairseq.
Expected values are 128 for both Base and Large arch.
pos_conv_groups (int):
The number of groups of convolutional positional embeddings.
This option corresponds to "conv_pos_groups" from fairseq.
Expected values are 16 for both Base and Large arch.
num_layers (int):
The number of self attention layers in transformer block.
This option corresponds to "encoder_layers" from fairseq.
Expected values are 12 for Base and 24 for Large arch.
num_heads (int):
The number of heads in self attention layers.
This option corresponds to "encoder_attention_heads" from fairseq.
Expected values are 12 for Base and 16 for Large arch.
attention_dropout (float):
The dropout probability applied after softmax in self-attention layer.
This option corresponds to "attention_dropout" from fairseq.
Expected values are 0.1 for Base and 0.0 for Large arch.
ff_interm_features (int):
The dimension of hidden features in feed forward layer.
This option corresponds to "encoder_ffn_embed_dim" from fairseq.
Expected values are 3072 for Base and 4096 for Large arch.
ff_interm_dropout (float):
The dropout probability applied in feedforward layer.
This option correspinds to "activation_dropout" from fairseq.
Expected values are 0.1 for both Base and Large arch.
dropout (float):
The dropout probability applied at the end of feed forward layer.
This option corresponds to "dropout" from fairseq.
Expected values are 0.1 for Base and 0.0 for Large arch.
layer_norm_first (bool):
Control the order of layer norm in transformer layer and each encoder layer.
If True, in transformer layer, layer norm is applied before features are fed
to encoder layers. In encoder layer, two layer norms are applied before and after
self attention.
If False, in transformer layer, layer norm is applied after features are fed
to encoder layers. In encoder layer, two layer norms are applied after self
attention, before and after feed forward.
This option corresponds to "layer_norm_first" from fairseq.
Expected values are False for Base and True for Large arch.
layer_drop (float):
Probability to drop each encoder layer during training.
This option corresponds to "layerdrop" from fairseq.
Expected values are 0.1 for both Base and Large arch.
See Also:
* "encoder_embed_dim"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L49-L51
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L64
* "dropout_input"
- Def, base and large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L75-L78
* "conv_pos"
- Def, base and large
NOTE: The description is wrong.
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L204-L207
- Usage
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L756
* "conv_pos_groups"
- Def, base and large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L208-L211
* "encoder_layers"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L46-L48
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L63
* "encoder_attention_heads"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L55-L57
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L66
* "attention_dropout"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L66-L68
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L60
* "encoder_ffn_embed_dim"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L52-L54
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L65
* "activation_dropout"
- Def
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L69-L71
- Base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L55
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L55
* "dropout"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L63-L65
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L59
* "layer_norm_first"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L91-L93
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L53
* "layerdrop"
- Def
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L72-L74
- Base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L54
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L54
"""
feature_projection = FeatureProjection(in_features, embed_dim, dropout_input)
pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups)
# Original impl
# https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782
encoder_layers = nn.ModuleList()
for _ in range(num_layers):
attention = SelfAttention(
embed_dim=embed_dim,
num_heads=num_heads,
dropout=attention_dropout,
)
feed_forward = FeedForward(
io_features=embed_dim,
intermediate_features=ff_interm_features,
intermediate_dropout=ff_interm_dropout,
output_dropout=dropout,
)
encoder_layers.append(
EncoderLayer(
attention=attention,
dropout=dropout,
layer_norm_first=layer_norm_first,
feed_forward=feed_forward,
)
)
transformer = Transformer(
pos_conv_embed=pos_conv,
dropout=dropout,
layers=encoder_layers,
layer_norm_first=not layer_norm_first,
layer_drop=layer_drop,
)
return Encoder(feature_projection, transformer) | 28,039 |
def print_tree(tree, indent=0):
"""Prints the parse tree for debugging purposes. This does not expand
HTML entities; that should be done after processing templates."""
assert isinstance(tree, (WikiNode, str))
assert isinstance(indent, int)
if isinstance(tree, str):
print("{}{}".format(" " * indent, repr(tree)))
return
print("{}{} {}".format(" " * indent, tree.kind.name, tree.args))
for k, v in tree.attrs.items():
print("{} {}={}".format(" " * indent, k, v))
for child in tree.children:
print_tree(child, indent + 2) | 28,040 |
def _soft_threshold(a, b):
"""Soft-threshold operator for the LASSO and elastic net."""
return np.sign(a) * np.clip(np.abs(a) - b, a_min=0, a_max=None) | 28,041 |
def predict(input_tokens):
"""register predict method in pangu-alpha"""
token_ids, valid_length = register.call_preprocess(preprocess, input_tokens)
############# two output ###################
# p, p_args = register.call_servable(token_ids)
# add_token = register.call_postprocess(postprocess, p, p_args, valid_length)
#############################################
################# one output ####################
logits = register.call_servable(token_ids)
add_token = register.call_postprocess(postprocess_topk, logits, valid_length)
return add_token | 28,042 |
def get_sim_data():
"""
Create the data needed to initialize a simulation
Performs the steps necessary to set up a stratified plume model simulation
and passes the input variables to the `Model` object and
`Model.simulate()` method.
Returns
-------
profile : `ambient.Profile` object
Return a profile object from the BM54 CTD data
particles : list of `PlumeParticle` objects
List of `PlumeParticle` objects containing the dispersed phase initial
conditions
z : float
Depth of the release port (m)
R : float
Radius of the release port (m)
maxit : float
Maximum number of iterations to converge between inner and outer
plumes
toler : float
Relative error tolerance to accept for convergence (--)
delta_z : float
Maximum step size to use in the simulation (m). The ODE solver
in `calculate` is set up with adaptive step size integration, so
in theory this value determines the largest step size in the
output data, but not the numerical stability of the calculation.
"""
# Get the ambient CTD data
profile = get_profile()
# Specify the release location and geometry and initialize a particle
# list
z0 = 300.
R = 0.15
particles = []
# Add a dissolving particle to the list
composition = ['oxygen', 'nitrogen', 'argon']
yk = np.array([1.0, 0., 0.])
o2 = dbm.FluidParticle(composition)
Q_N = 150. / 60. / 60.
de = 0.005
lambda_1 = 0.85
particles.append(stratified_plume_model.particle_from_Q(profile, z0, o2,
yk, Q_N, de, lambda_1))
# Add an insoluble particle to the list
composition = ['inert']
yk = np.array([1.])
oil = dbm.InsolubleParticle(True, True)
mb0 = 50.
de = 0.01
lambda_1 = 0.8
particles.append(stratified_plume_model.particle_from_mb0(profile, z0,
oil, [1.], mb0, de, lambda_1))
# Set the other simulation parameters
maxit = 2
toler = 0.2
delta_z = 1.0
# Return the results
return (profile, particles, z0, R, maxit, toler, delta_z) | 28,043 |
def _organize_arch(fils, pth):
"""Allocate data from each specific type of file (keys from the input dict) to a new dict
Arguments:
fils {dict} -- Dictionary containing type of files and list of files
Returns:
[dict] -- [description]
"""
import numpy as np
imgdata = dict()
for i in fils.keys():
images = dict()
for ii in np.arange(len(fils[i])):
images[str('img_' + str(ii+1))] = {'path': pth + str('\\') + str(fils[i][ii]),
'coords': np.loadtxt(pth + str('\\') + str(fils[i][ii]), skiprows=1, usecols=(-2, -1))}
imgdata[i] = images
return imgdata | 28,044 |
def gather_allele_freqs(record, all_samples, males, females, pop_dict, pops, no_combos = False):
"""
Wrapper to compute allele frequencies for all sex & population pairings
"""
#Get allele frequencies
calc_allele_freq(record, all_samples)
if len(males) > 0:
calc_allele_freq(record, males, prefix = 'MALE')
if len(females) > 0:
calc_allele_freq(record, females, prefix = 'FEMALE')
if len(pops) > 0:
for pop in pops:
pop_samps = [s for s in all_samples if pop_dict.get(s, None) == pop]
calc_allele_freq(record, pop_samps, prefix = pop)
if len(males) > 0 and not no_combos:
calc_allele_freq(record, [s for s in pop_samps if s in males],
prefix = pop + '_MALE')
if len(females) > 0 and not no_combos:
calc_allele_freq(record, [s for s in pop_samps if s in females],
prefix = pop + '_FEMALE')
#Get POPMAX AF for biallelic sites
if 'MULTIALLELIC' not in record.filter and len(record.alleles) <= 2:
AFs = [record.info['{0}_AF'.format(pop)][0] for pop in pops]
popmax = max(AFs)
record.info['POPMAX_AF'] = popmax
return record | 28,045 |
def get_selected(n=1):
"""
Return the first n selected object, or None if nothing is selected.
"""
if get_selection_len():
selection = bpy.context.selected_objects
if n == 1:
return selection[0]
elif n == -1:
return selection[:]
else:
return selection[:n]
else:
return [] | 28,046 |
def label_src_vertno_sel(label, src):
""" Find vertex numbers and indices from label
Parameters
----------
label : Label
Source space label
src : dict
Source space
Returns
-------
vertno : list of length 2
Vertex numbers for lh and rh
src_sel : array of int (len(idx) = len(vertno[0]) + len(vertno[1]))
Indices of the selected vertices in sourse space
"""
if src[0]['type'] != 'surf':
return Exception('Label are only supported with surface source spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([])
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([])
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel | 28,047 |
def get_subjects(creative_work):
"""
Returns generated html of subjects associated with the
Creative Work HTML or 0-length string
Parameters:
creative_work -- Creative Work
"""
html_output = ''
#! Using LOC Facet as proxy for subjects
facets = list(
REDIS_DATASTORE.smembers(
"{0}:hasAnnotation".format(creative_work.redis_key)))
for facet in facets:
if facet.startswith("bf:Facet"):
subject_template = loader.get_template('subject-icon.html')
loc_key = facet.split(":")[-1]
context = {
'name': REDIS_DATASTORE.hget('bf:Annotation:Facet:LOCFirstLetters',
loc_key),
'letter': loc_key}
html_output += subject_template.render(Context(context))
return mark_safe(html_output) | 28,048 |
def run_tests(runner: cmake_runner.CMakeRunner, args: argparse.Namespace,
build_config: str) -> bool:
"""Run tests for the current project.
Args:
runner: Cmake runner object.
args: Arguments for cmake.
build_config: Name of configuration target.
Returns:
True when testing ran successfully, False otherwise.
"""
try:
runner.test(args=args.cmake_test_regex)
except (subprocess.CalledProcessError, RuntimeError) as error:
logging.exception('Tests failed for %s CMake project %s: %s', build_config,
args.cmake_source_project_root, error)
return False
return True | 28,049 |
def test_api_versions_synced_with_botocore(api_version_args):
"""Verify both boto3 and botocore clients stay in sync."""
service_name, botocore_session, boto3_session = api_version_args
resource = boto3_session.resource(service_name)
boto3_api_version = resource.meta.client.meta.service_model.api_version
client = botocore_session.create_client(
service_name,
region_name='us-east-1',
aws_access_key_id='foo',
aws_secret_access_key='bar'
)
botocore_api_version = client.meta.service_model.api_version
err = (
f"Different latest API versions found for {service_name}: "
f"{botocore_api_version} (botocore), {boto3_api_version} (boto3)\n"
)
assert botocore_api_version == boto3_api_version, err | 28,050 |
def build(new_opts={}):
""" Build graph
Args:
new_opts: dict with additional opts, which will be added to opts dict/
"""
opts.update(new_opts)
images_ph, labels_ph, train_phase_ph = placeholder_inputs()
logits = inference(images_ph, train_phase_ph)
loss_out = loss(logits, labels_ph)
train = training(loss_out)
eval_out = evaluation(logits, labels_ph) | 28,051 |
def bubblesort(s, debug=1):
"""
冒泡排序
:param s: 要排序的数组
:return :
for i in range(len(s)-1)
找出元素s[i]….s[len(s)]中的最小元素
与s[i]交换
"""
c = 0
for i in range(len(s) - 1):
for j in range(1, len(s) - i):
if s[j - 1] > s[j]:
print('j-1=', j - 1, 'j=', j) if debug else None
s[j - 1], s[j] = s[j], s[j - 1]
print(s) if debug else None
c += 1
print(s) if debug else None
print(c) | 28,052 |
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param | 28,053 |
def is_comprehension(leaf):
"""
Return true if the leaf is the beginning of a list/set/dict comprehension.
Returns true for generators as well
"""
if leaf.type != 'operator' or leaf.value not in {'[', '(', '{'}:
return False
sibling = leaf.get_next_sibling()
return (sibling.type in {'testlist_comp', 'dictorsetmaker'}
and sibling.children[-1].type == 'sync_comp_for') | 28,054 |
def get_class(x):
"""
x: index
"""
# Example
distribution = [0, 2000, 4000, 6000, 8000, 10000]
x_class = 0
for i in range(len(distribution)):
if x > distribution[i]:
x_class += 1
return x_class | 28,055 |
def generate_checksum(message, previous_csum=0):
"""Generate checksum for messages with
CALL_REQ, CALL_REQ_CONTINUE,
CALL_RES,CALL_RES_CONTINUE types.
:param message: outgoing message
:param previous_csum: accumulated checksum value
"""
if message.message_type in CHECKSUM_MSG_TYPES:
csum = compute_checksum(
message.checksum[0],
message.args,
previous_csum,
)
message.checksum = (message.checksum[0], csum) | 28,056 |
def square(x):
"""Elementwise square of a tensor. """
return T.sqr(x) | 28,057 |
def update_email_body(parsed_email, key):
"""
Finds and updates the "text/html" and "text/plain" email body parts.
Parameters
----------
parsed_email: email.message.Message, required
EmailMessage representation the downloaded email
key: string, required
The object key that will be used for storing the message in S3
Returns
-------
email.message.Message
EmailMessage representation the updated email
"""
# template in the key for purposes of optional displaying to the recipient
this_disclaimer_text = re.sub("{key}", key, disclaimer_text)
this_footer_text = re.sub("{key}", key, footer_text)
text_charset = None
if parsed_email.is_multipart():
# Walk over message parts of this multipart email.
for part in parsed_email.walk():
content_type = part.get_content_type()
content_disposition = str(part.get_content_disposition())
if content_type == 'text/plain' and 'attachment' not in content_disposition:
transfer_encoding = part['Content-Transfer-Encoding']
text_charset = part.get_content_charset()
new_text_body = update_text_content(part, this_disclaimer_text, this_footer_text)
part.set_content(new_text_body, "plain", charset=text_charset, cte=transfer_encoding)
elif content_type == 'text/html' and 'attachment' not in content_disposition:
transfer_encoding = part['Content-Transfer-Encoding']
html_charset = part.get_content_charset()
new_html_body = update_html_content(part, this_disclaimer_text, this_footer_text)
if new_html_body is not None:
part.set_content(new_html_body.encode(html_charset), "text", "html", cte=transfer_encoding)
part.set_charset(html_charset)
else:
# Its a plain email with text/plain body
transfer_encoding = parsed_email['Content-Transfer-Encoding']
text_charset = parsed_email.get_content_charset()
new_text_body = update_text_content(parsed_email, this_disclaimer_text, this_footer_text)
parsed_email.set_content(new_text_body, "plain", charset=text_charset, cte=transfer_encoding)
return parsed_email | 28,058 |
def create_collaborators(collaborators, destination_url, destination, credentials):
"""Post collaborators to GitHub
INPUT:
collaborators: python list of dicts containing collaborators info to be POSTED to GitHub
destination_url: the root url for the GitHub API
destination: the team and repo '<team>/<repo>' to post milestones to
OUTPUT: A list of collaborators
"""
for collaborator in collaborators:
if collaborator['login'] == credentials['user_name']:
continue
url = destination_url + "repos/" + destination + "/collaborators/" + collaborator["login"]
perm = "push"
if collaborator["permissions"]["admin"] == True or collaborator['login'] == credentials['user_name']:
perm = "admin"
# create a new collaborator that includes only the attributes needed to create a new milestone
r = put_req(url, json.dumps({"permission": perm}), credentials)
status = check_res(r)
print(status)
return {"done": "true"} | 28,059 |
def complexity_recurrence(signal, delay=1, dimension=3, tolerance="default", show=False):
"""Recurrence matrix (Python implementation)
Fast Python implementation of recurrence matrix (tested against pyRQA). Returns a tuple
with the recurrence matrix (made of 0s and 1s) and the distance matrix (the non-binarized
version of the former).
Parameters
----------
signal : Union[list, np.ndarray, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted 'Tau', sometimes referred to as 'lag'). In practice, it is common
to have a fixed time lag (corresponding for instance to the sampling rate; Gautama, 2003),
or to find a suitable value using some algorithmic heuristics (see ``delay_optimal()``).
dimension : int
Embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order'). Typically
2 or 3. It corresponds to the number of compared runs of lagged data. If 2, the embedding
returns an array with two columns corresponding to the original signal and its delayed (by
Tau) version.
tolerance : float
Tolerance (similarity threshold, often denoted as 'r'). The radius used for detecting
neighbours (states considered as recurrent). A rule of thumb is to set 'r' so that the
percentage of points classified as recurrences is about 2-5%.
show : bool
Visualise recurrence matrix.
See Also
--------
complexity_embedding, complexity_tolerance
Returns
-------
np.ndarray
The recurrence matrix.
np.ndarray
The distance matrix.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=5, sampling_rate=100, frequency=[5, 6], noise=0.01)
>>>
>>> # Default r
>>> rc, _ = nk.complexity_recurrence(signal, show=True)
>>>
>>> # Larger radius
>>> rc, d = nk.complexity_recurrence(signal, tolerance=0.5, show=True)
>>>
>>> # Optimization of tolerance via recurrence matrix
>>> rc, d = nk.complexity_tolerance(signal, delay=1, dimension=3, method="recurrence", show=True)
References
----------
- Rawald, T., Sips, M., Marwan, N., & Dransch, D. (2014). Fast computation of recurrences
in long time series. In Translational Recurrences (pp. 17-29). Springer, Cham.
- Dabiré, H., Mestivier, D., Jarnet, J., Safar, M. E., & Chau, N. P. (1998). Quantification of
sympathetic and parasympathetic tones by nonlinear indexes in normotensive rats. American
Journal of Physiology-Heart and Circulatory Physiology, 275(4), H1290-H1297.
"""
if tolerance == "default":
tolerance, _ = complexity_tolerance(
signal, method="sd", delay=None, dimension=None, show=False
)
# Time-delay embedding
emb = complexity_embedding(signal, delay=delay, dimension=dimension)
# Compute distance matrix
d = scipy.spatial.distance.cdist(emb, emb, metric="euclidean")
# Flip the matrix to match traditional RQA representation
d = np.flip(d, axis=0)
# Initialize the recurrence matrix filled with 0s
recmat = np.zeros((len(d), len(d)))
# If lower than tolerance, then 1
recmat[d <= tolerance] = 1
# Plotting
if show is True:
try:
fig, axes = plt.subplots(ncols=2)
axes[0].imshow(recmat, cmap="Greys")
axes[0].set_title("Recurrence Matrix")
im = axes[1].imshow(d)
axes[1].set_title("Distance")
cbar = fig.colorbar(im, ax=axes[1], fraction=0.046, pad=0.04)
cbar.ax.plot([0, 1], [tolerance] * 2, color="r")
except MemoryError as e:
raise MemoryError(
"NeuroKit error: complexity_rqa(): the recurrence plot is too large to display. ",
"You can recover the matrix from the parameters and try to display parts of it.",
) from e
return recmat, d | 28,060 |
def _git_enable_release_branch():
"""Enable desired release branch."""
with _git_enable_branch(RELEASE_BRANCH):
yield | 28,061 |
def train(params):
"""
Trains error model.
Arguments:
params (dict): hyperparameters with which to train
"""
p, x = load_error_data()
# calculate means
p_mean = p.mean(axis=(0, 1))
p_std = p.std(axis=(0, 1))
x_mean = x.mean(axis=(0, 1))
x_std = x.std(axis=(0, 1))
# TODO - does this make sense?
# delta = x[:,2::2] - x[:,:-2:2]
# the number to look ahead
delta = x[:, 1:] - x[:, :-1]
delta_mean = delta.mean(axis=(0, 1))
delta_std = delta.std(axis=(0, 1))
# send to torch tensors
p_mean, p_std = torch.Tensor(p_mean).to(device), torch.Tensor(p_std).to(device)
x_mean, x_std = torch.Tensor(x_mean).to(device), torch.Tensor(x_std).to(device)
delta_mean, delta_std = (
torch.Tensor(delta_mean).to(device),
torch.Tensor(delta_std).to(device),
)
# parameters
buffer_size = int(params["buffer size"])
activation = params["activation"]
# train val split
training_split = 0.8
n = len(p)
k = int(np.ceil(n * training_split))
train_p, val_p = p[:k], p[k:]
train_x, val_x = x[:k], x[k:]
n_ahead = 1
train_dataset = LookaheadDataset(states=train_x, actions=train_p, n_ahead=n_ahead)
val_dataset = LookaheadDataset(states=val_x, actions=val_p, n_ahead=n_ahead)
action_size = len(train_dataset[0][0][0])
state_size = len(train_dataset[0][1])
output_size = len(train_dataset[0][2][0])
model_path = params.get("model path", None)
dropout = params["dropout"]
hidden_layers = int(params["hidden layers"])
hidden_size = int(params["hidden size"])
# LOAD ANALYTICAL MDOEL
analytical_model = Network(
action_size=action_size,
state_size=state_size,
output_size=output_size,
hidden_layers=hidden_layers,
hidden_size=hidden_size,
dropout=dropout,
activation=activation,
action_mean=p_mean,
action_std=p_std,
state_mean=x_mean,
state_std=x_std,
output_mean=delta_mean,
output_std=delta_std,
)
analytical_model.to(device)
analytical_path = params["analytical model path"]
analytical_model.load_state_dict(torch.load(analytical_path))
model = Network(
action_size=action_size,
state_size=state_size,
output_size=output_size,
hidden_layers=hidden_layers,
hidden_size=hidden_size,
dropout=dropout,
activation=activation,
action_mean=p_mean,
action_std=p_std,
state_mean=x_mean,
state_std=x_std,
output_mean=delta_mean,
output_std=delta_std,
)
model.to(device)
if params.get("load", False):
model.load_state_dict(torch.load(model_path))
learning_rate = params["learning rate"]
batch_size = int(params["batch size"])
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
train_losses = []
val_losses = []
best_loss = np.inf
print_info = params.get("print", False)
epochs = int(params["epochs"])
max_batches = np.inf
if print_info:
loop = tqdm(total=min(len(train_dataloader), max_batches) * epochs)
def step(state, deltas):
s = state + deltas
return s
for epoch in range(epochs):
model.train()
# new_n_ahead = min((epoch + 1) * 5, 100)
new_n_ahead = 10
if new_n_ahead != n_ahead:
n_ahead = new_n_ahead
if print_info:
print(n_ahead)
train_dataset = LookaheadDataset(
states=train_x, actions=train_p, n_ahead=n_ahead
)
val_dataset = LookaheadDataset(states=val_x, actions=val_p, n_ahead=n_ahead)
train_dataloader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
val_dataloader = DataLoader(
val_dataset, batch_size=batch_size, shuffle=True
)
for b, (a, s, d) in enumerate(train_dataloader):
s = s.float().to(device)
a = a.float().to(device)
d = d.float().to(device)
d_est = torch.zeros(d.shape).to(device)
for i in range(n_ahead):
d_hat = model(a[:, i], s) + analytical_model(a[:, i], s)
if i == 0:
# d_est[:,i] = d_est[:,i] + d_hat
d_est[:, i] = d_hat
else:
d_est[:, i] = d_est[:, i - 1] + d_hat
s = s + d_hat
# normalize d
d = (d - delta_mean) / delta_std
d_est = (d_est - delta_mean) / delta_std
loss = loss_function(d, d_est)
if print_info:
if not val_losses:
loop.set_description("loss: {:.3f}".format(loss.item()))
else:
loop.set_description(
"loss: {:.4f}, val loss: {:.4f}".format(
loss.item(), val_losses[-1]
)
)
train_losses.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if print_info:
loop.update(1)
if b > max_batches:
break
with torch.no_grad():
model.eval()
epoch_losses = []
for b, (a, s, d) in enumerate(val_dataloader):
s = s.float().to(device)
a = a.float().to(device)
d = d.float().to(device)
d_est = torch.zeros(d.shape).to(device)
for i in range(n_ahead):
d_hat = model(a[:, i], s) + analytical_model(a[:, i], s)
if i == 0:
# d_est[:,i] = d_est[:,i] + d_hat
d_est[:, i] = d_hat
else:
d_est[:, i] = d_est[:, i - 1] + d_hat
s = s + d_hat
# normalize d
d = (d - delta_mean) / delta_std
d_est = (d_est - delta_mean) / delta_std
loss = loss_function(d, d_est)
epoch_losses.append(loss.item())
if b > max_batches:
break
val_losses.append(np.mean(epoch_losses))
if np.mean(epoch_losses) < best_loss:
best_loss = np.mean(epoch_losses)
if model_path:
torch.save(model.state_dict(), model_path)
if print_info:
print("Best val loss: {:.4}".format(best_loss))
n_ahead = 100
val_dataset = LookaheadDataset(states=val_x, actions=val_p, n_ahead=n_ahead)
val_dataloader = DataLoader(val_dataset, batch_size=100, shuffle=True)
# calculate HZ
start = time()
with torch.no_grad():
model.eval()
for b, (a, s, d) in enumerate(val_dataloader):
s = s.float().to(device)
a = a.float().to(device)
d = d.float().to(device)
d_est = torch.zeros(d.shape).to(device)
for i in range(n_ahead):
d_hat = model(a[:, i], s) + analytical_model(a[:, i], s)
if i == 0:
# d_est[:,i] = d_est[:,i] + d_hat
d_est[:, i] = d_hat
else:
d_est[:, i] = d_est[:, i - 1] + d_hat
s = s + d_hat
elapsed = time() - start
speed = elapsed / len(val_dataloader)
return val_losses[-1].item(), speed | 28,062 |
def RunAll(test_spark=False):
"""Running all tests."""
# Uncomment to test writing tables.
# RunTest("ground_test")
# RunTest("ground_psql_test")
# RunTest("closure_test")
# RunTest("dialects/trino/grounding_test")
RunTest("in_expr_test")
RunTest("equals_true_test")
if test_spark:
RunTest("dialects/presto/basics_test")
RunTest("dialects/presto/arg_min_max_test")
RunTest("dialects/presto/joins_test")
RunTest("dialects/trino/basics_test")
RunTest("dialects/trino/arg_min_max_test")
RunTest("dialects/trino/joins_test")
RunTest("functor_arg_update_test")
RunTest("ultra_short_cycle_test")
RunTest("rec_small_cycle_test")
RunTest("rec_cycle_test")
RunTest("psql_recursion_test")
RunTest("sqlite_recursion")
RunTest("sqlite_rec_depth")
RunTest("sqlite_rec_functor")
RunTest("sqlite_pagerank")
RunTest("sqlite_composite_test")
RunTest("sqlite_reachability")
RunTest("psql_test")
RunTest("psql_arg_min_test")
RunTest("psql_arg_min_max_k_test")
RunTest("psql_pair_test")
RunTest("sqlite_subquery_test")
RunTest("sqlite_test")
RunTest("sub_if_test")
RunTest(
name="functor_chain_test",
src="functor_chain_test.l",
golden="functor_chain_test.txt"
)
RunTest(
name="quote_escape_test",
src="quote_escape_test.l",
golden="quote_escape_test.txt",
predicate="Q",
user_flags={'name': 'Dwayne "Rock" Johnson'}
)
RunTest("array_test")
RunTest("udf_test")
RunTest("with_test")
RunTest(
name="factorial_test",
src="factorial_test.l",
golden="factorial_test.txt",
predicate="Test"
)
RunTest(
name="sql_expr_test",
src="sql_expr_test.l",
golden="sql_expr_test.txt",
predicate="Test",
)
RunTest(
name="unnest_order_test",
src="unnest_order_test.l",
golden="unnest_order_test.txt",
predicate="Test",
)
RunTest(
name="nested_combines_test",
src="nested_combines_test.l",
golden="nested_combines_test.txt",
predicate="Test",
)
RunTest(
name="analytic_test",
src="analytic_test.l",
golden="analytic_test.txt",
predicate="ReadableTest",
)
RunTest(
name="simple_functors_test",
src="simple_functors_test.l",
golden="simple_functors_test.txt",
predicate="Test",
)
RunTest(
name="composite_functor_test",
src="composite_functor_test.l",
golden="composite_functor_test.txt",
predicate="AnonymizedTrafficUS",
)
RunTest(
name="long_functor_test",
src="long_functor_test.l",
golden="long_functor_test.txt",
predicate="F7",
)
RunTest(
name="nontrivial_restof_test",
src="nontrivial_restof_test.l",
golden="nontrivial_restof_test.txt",
predicate="Test",
)
RunTest(
name="cast_test",
src="cast_test.l",
golden="cast_test.txt",
predicate="T",
)
RunTest(
name="disjunction_test",
src="disjunction_test.l",
golden="disjunction_test.txt",
predicate="Answer",
)
RunTest(
name="arg_min_max_test",
src="arg_min_max_test.l",
golden="arg_min_max_test.txt",
predicate="Test",
)
RunTest(
name="operation_order_test",
src="operation_order_test.l",
golden="operation_order_test.txt",
predicate="Test",
)
RunTest(
name="no_from_test",
src="no_from_test.l",
golden="no_from_test.txt",
predicate="Test",
)
RunTest(
name="if_then_test",
src="if_then.l",
golden="if_then_QualifiedSummary.txt",
predicate="QualifiedSummary",
)
RunTest(
name="modification_inside_test",
src="modification_inside.l",
golden="modification_inside.txt",
predicate="BetterCountry",
)
RunTest(
name="outer_join_test",
src="outer_join.l",
golden="outer_join_test.txt",
predicate="PersonPhonesAndEmails",
)
RunTest(
name="outer_join_some_value_test",
src="outer_join_some_value.l",
golden="outer_join_verbose_test.txt",
predicate="PersonPhoneAndEmail",
)
RunTest(
name="outer_join_disjunction_test",
src="outer_join_disjunction.l",
golden="outer_join_verbose_test.txt",
predicate="PersonPhoneAndEmail",
)
RunTest(
name="outer_join_combine_test",
src="outer_join_combine.l",
golden="outer_join_verbose_test.txt",
predicate="PersonPhoneAndEmail",
)
RunTest(
name="outer_join_verbose_test",
src="outer_join_verbose.l",
golden="outer_join_verbose_test.txt",
predicate="PersonPhoneAndEmail",
)
RunTest(
name="multi_body_aggregation_test",
src="multi_body_aggregation.l",
golden="multi_body_aggregation_test.txt",
predicate="TestOutput",
)
RunTest(
name="bulk_functions_test",
src="bulk_functions.l",
golden="bulk_functions_test.txt",
predicate="Test",
)
RunTest(
name="define_aggregation_test",
src="define_aggregation.l",
golden="define_aggregation_test.txt",
predicate="SampledPeople",
)
RunTest(
name="unary_test",
src="unary_test.l",
golden="unary_test.txt",
predicate="Test",
)
RunTest(
name="sql_string_table_test",
src="sql_string_table_test.l",
golden="sql_string_table_test.txt",
predicate="Test",
)
# RunTest("reachability_test") | 28,063 |
def test_md042_bad_link_empty_fragment():
"""
Test to make sure we get the expected behavior after scanning a good file from the
test/resources/rules/md004 directory that has consistent asterisk usage on a single
level list.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md042/bad_link_empty_fragment.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md042/bad_link_empty_fragment.md:2:1: "
+ "MD042: No empty links (no-empty-links)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
) | 28,064 |
def check_max_line_length_configured(repo: RepoLinter) -> None:
""" checks for the max-line-length setting in .pylintrc """
# default setting
if "pylintrc" in repo.config:
if "max_line_length" not in repo.config[CATEGORY]:
logger.debug("max_line_length not set in config, no need to run.")
return
config: Optional[ConfigParser] = load_pylintrc(repo)
if not config:
repo.warning(CATEGORY, ".pylintrc not found")
return
if "MASTER" not in config.sections():
logger.debug("Can't find MASTER entry, dumping config")
logger.debug(json.dumps(config, indent=4, default=str, ensure_ascii=False))
return
try:
linelength = config.get("MASTER", "max-line-length")
except NoOptionError:
repo.warning(CATEGORY, "max-line-length not configured")
return
expected = repo.config[CATEGORY]["max_line_length"]
if int(linelength) != int(expected):
repo.error(
CATEGORY,
f"max-line-length wrong, is {linelength}, should be {expected}",
)
return | 28,065 |
def assert_bake_ok(result: Result):
"""Check bake result is ok"""
assert result.exit_code == 0
assert result.project_path.is_dir()
assert result.project_path.is_dir() | 28,066 |
def get_awb_shutter( f ):
"""
Get AWB and shutter speed from file object
This routine extracts the R and B white balance gains and the shutter speed
from a jpeg file made using the Raspberry Pi camera. These are stored as text in
a custom Makernote.
The autoexposure and AWB white balance values are not available directly until
a picture is taken and are saved in a Jpeg.
Returns 0 for the values if they're not found
"""
f.seek(256)
s = f.read(512) # Only part of the header needed
r_pos = s.find('gain_r=')
b_pos = s.find('gain_b=')
s_pos = s.find(' exp=')
gain_r = eval(s[r_pos+7:r_pos+12].split()[0]) if r_pos > -1 else 0
gain_b = eval(s[b_pos+7:b_pos+12].split()[0]) if b_pos > -1 else 0
shutter = eval(s[s_pos+5:s_pos+12].split()[0]) if s_pos > -1 else 0
return (gain_r,gain_b,shutter) | 28,067 |
def rgb2lab(rgb_arr):
"""
Convert colur from RGB to CIE 1976 L*a*b*
Parameters
----------
rgb_arr: ndarray
Color in RGB
Returns
-------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
"""
return xyz2lab(rgb2xyz(rgb_arr)) | 28,068 |
def format_rfidcard(rfidcard):
"""
:type rfidcard: apps.billing.models.RfidCard
"""
return {
'atqa': rfidcard.atqa if len(rfidcard.atqa) > 0 else None,
'sak': rfidcard.sak if len(rfidcard.sak) > 0 else None,
'uid': rfidcard.uid,
'registered_at': rfidcard.registered_at.isoformat(),
'user': rfidcard.user.username,
} | 28,069 |
def make_exposure_shares(exposure_levels, geography="geo_nm", variable="rank"):
"""Aggregate shares of activity at different levels of exposure
Args:
exposure_levels (df): employment by lad and sector and exposure ranking
geography (str): geography to aggregate over
variable (str): variable we want to calculate shares over
"""
exp_distr = (
exposure_levels.groupby(["month_year", variable, geography])["value"]
.sum()
.reset_index(drop=False)
.groupby([geography, "month_year"])
.apply(lambda x: x.assign(share=lambda df: df["value"] / df["value"].sum()))
).reset_index(drop=True)
return exp_distr | 28,070 |
def init_ranks(mpi_comm):
"""Returns rank information of the local process in `mpi_comm`.
Args:
mpi_comm (type:TODO)
MPI Communicator from mpi4py
Returns:
rank_info (list):
Elements are:
* rank (`mpi_comm.rank`)
* intra_rank (rank within the local computing node)
* intra_size (number of processes on the node)
* inter_rank (rank of the node)
* inter_size (number of computing nodes)
"""
global_names = mpi_comm.gather(mpi4py.MPI.Get_processor_name())
if mpi_comm.rank == 0:
name_to_global_ranks = collections.defaultdict(list)
for global_rank, name in enumerate(global_names):
name_to_global_ranks[name].append(global_rank)
for global_ranks in name_to_global_ranks.values():
global_ranks.sort()
inter_names = sorted(
set(global_names), key=lambda name: name_to_global_ranks[name])
name_to_inter_rank = {
name: inter_rank
for inter_rank, name in enumerate(inter_names)
}
inter_size = len(inter_names)
all_ranks = []
for global_rank, name in enumerate(global_names):
ranks = name_to_global_ranks[name]
intra_rank = ranks.index(global_rank)
intra_size = len(ranks)
inter_rank = name_to_inter_rank[name]
all_ranks.append((
global_rank, intra_rank, intra_size,
inter_rank, inter_size))
my_ranks = mpi_comm.scatter(all_ranks)
else:
my_ranks = mpi_comm.scatter(None)
assert my_ranks[0] == mpi_comm.rank
return my_ranks | 28,071 |
def boxscores(sports=["basketball/nba"], output="dict", live_only=True, verbose=False):
"""
~ 10 seconds
"""
links = boxlinks(sports=sports, live_only=live_only, verbose=verbose)
boxes = [boxscore(link) for link in links]
return boxes | 28,072 |
def pygame_loop():
""" To be called every iteration. Updates the screen if needed. """
global needs_update, message, robot_position, pen_down, pen_surface, show_robot
# Go through events to check whether user has quit.
for event in pygame.event.get():
if event.type == pygame.QUIT:
# To indicate quitting, we raise a KeyboardInterrupt.
raise KeyboardInterrupt
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN:
pen_down = True
elif event.key == pygame.K_UP:
pen_down = False
elif event.key == pygame.K_c:
clear_pen_surface()
needs_update = True
elif event.key == pygame.K_h:
show_robot = False
needs_update = True
elif event.key == pygame.K_s:
show_robot = True
needs_update = True
"""
To avoid wasting resources painting the same image, we only
update if there has been a change in the robot's position.
"""
if not needs_update: return
if message != None:
# Show error message.
screen.fill((255, 255, 255))
font_surface = font.render(message, True, (0, 0, 0))
font_coordinates = (screen_width - font_surface.get_width()) // 2, (screen_height - font_surface.get_height()) // 2
screen.blit(font_surface, font_coordinates)
elif robot_position == None:
# Show "No connection message" when waiting for connection.
screen.fill((255, 255, 255))
font_surface = font.render("Waiting for connection...", True, (0, 0, 0))
font_coordinates = (screen_width - font_surface.get_width()) // 2, (screen_height - font_surface.get_height()) // 2
screen.blit(font_surface, font_coordinates)
else:
# Paint background first.
screen.blit(field_img, (0,0))
""" Robot painting. """
# Destructure robot position
robot_x, robot_y, robot_heading = robot_position
# Rotate robot to correct heading.
robot_rotated = pygame.transform.rotate(robot_img, robot_heading)
# Compute robot's position on the screen.
robot_screen_x, robot_screen_y = field_to_pixels((robot_x, robot_y), screen_size)
# Retrieve width and height of the robot sprite after rotation
robot_w, robot_h = robot_rotated.get_size()
# Compute the coordinates we need to give pygame.
robot_pygame_x = robot_screen_x - robot_w // 2
robot_pygame_y = robot_screen_y - robot_h // 2
robot_pygame_pos = robot_pygame_x, robot_pygame_y
if pen_down:
pen_position = int(robot_screen_x), int(robot_screen_y)
pen_surface.set_at(pen_position, (255, 0, 0, 255))
screen.blit(pen_surface, (0, 0))
if show_robot:
screen.blit(robot_rotated, robot_pygame_pos)
pygame.display.flip()
needs_update = False | 28,073 |
def test_encode_tags2():
"""
Test encoding tags with TimeSeries
"""
with assert_raises(ValueError):
TimeSeriesName.encode_metric("cpu", None)
with assert_raises(ValueError) as ex:
TimeSeriesName.encode_metric(None, {1: 2})
with assert_raises(ValueError) as ex:
TimeSeriesName.encode_metric("", None)
tags = {"tk1": "tv1", "tk2": "tv2"}
name = "test"
assert_equals('test{"tk1": "tv1", "tk2": "tv2"}', TimeSeriesName.encode_metric(name, tags)) | 28,074 |
def runningMedian(seq, M):
"""
Purpose: Find the median for the points in a sliding window (odd number in size)
as it is moved from left to right by one point at a time.
Inputs:
seq -- list containing items for which a running median (in a sliding window)
is to be calculated
M -- number of items in window (window size) -- must be an integer > 1
Otputs:
medians -- list of medians with size N - M + 1
Note:
1. The median of a finite list of numbers is the "center" value when this list
is sorted in ascending order.
2. If M is an even number the two elements in the window that
are close to the center are averaged to give the median (this
is not by definition)
"""
seq = iter(seq)
s = []
m = M // 2 #// does a truncated division like integer division in Python 2
# Set up list s (to be sorted) and load deque with first window of seq
s = [item for item in islice(seq,M)]
d = deque(s)
# Simple lambda function to handle even/odd window sizes
median = lambda : s[m] if bool(M&1) else (s[m-1]+s[m])*0.5
# Sort it in increasing order and extract the median ("center" of the sorted window)
s.sort()
medians = [median()]
# Now slide the window by one point to the right for each new position (each pass through
# the loop). Stop when the item in the right end of the deque contains the last item in seq
for item in seq:
old = d.popleft() # pop oldest from left
d.append(item) # push newest in from right
del s[bisect_left(s, old)] # locate insertion point and then remove old
insort(s, item) # insert newest such that new sort is not required
medians.append(median())
return medians | 28,075 |
def min_max_normalize(img):
""" Center and normalize the given array.
Parameters:
----------
img: np.ndarray
"""
min_img = img.min()
max_img = img.max()
return (img - min_img) / (max_img - min_img) | 28,076 |
def find_common_features(experiment: FCSExperiment,
samples: list or None = None):
"""
Generate a list of common features present in all given samples of an experiment. By 'feature' we mean
a variable measured for a particular sample e.g. CD4 or FSC-A (forward scatter)
Parameters
----------
experiment: FCSExperiment
Experiment to extract features from
samples: list, optional
List of samples to get common features of. If None, will search all samples in experiment.
Returns
-------
List
"""
if samples is None:
samples = experiment.list_samples()
assert all([s in experiment.list_samples() for s in samples]), \
'One or more samples specified do not belong to experiment'
features = [_get_features(experiment, sample_id=s) for s in samples]
common_features = set(features[0])
for f in features[1:]:
common_features.intersection_update(f)
return list(common_features) | 28,077 |
def propMove(*args, **kwargs):
"""
Performs a proportional translate, scale or rotate operation on any number of objects.
Returns: None
"""
pass | 28,078 |
def generate_blend_weights(positions, new_p, n_neighbors):
""" Use inverse distance and K-Nearest-Neighbors Interpolation to estimate weights
according to [Johansen 2009] Section 6.2.4
"""
distances = []
for n, p in positions.items():
distance = np.linalg.norm(new_p - p)
heapq.heappush(distances, (distance, n))
distances = distances[:n_neighbors]
weights = dict()
if distances[0][0] <= 0:
weights[distances[0][1]] = 1.0
else:
inv_k_distance = 1.0 / distances[-1][0]
inv_distances = [(1.0 / d) - inv_k_distance for d, n in distances]
new_weights = inv_distances / np.sum(inv_distances)
for idx, v in enumerate(distances):
weights[v[1]] = new_weights[idx]
return weights | 28,079 |
def revert_rst(sitename, doc_name, directory=''):
"""reset a source document in the given directory to the previous contents
raises AttributeError on missing document name
FileNotFoundError if document doesn't exist
FileNotFoundError if no backup present
"""
if not doc_name:
raise AttributeError('no_name')
if doc_name not in list_docs(sitename, 'src', directory):
raise FileNotFoundError("no_document") # .format(doc_name))
path = WEBROOT / sitename / SRC_LOC
if directory:
path /= directory
path = path / doc_name
ext = LOC2EXT['src']
if path.suffix != ext:
path = path.with_suffix(ext)
oldpath = pathlib.Path(str(path) + '.bak')
try:
oldpath.rename(path)
except FileNotFoundError:
raise FileNotFoundError('no_backup') | 28,080 |
def check_method(adata):
"""Check that method output fits expected API."""
assert "connectivities" in adata.obsp
assert "distances" in adata.obsp
return True | 28,081 |
def load_model(Model, params, checkpoint_path='', device=None):
""" loads a model from a checkpoint or from scratch if checkpoint_path='' """
if checkpoint_path == '':
model = Model(params['model_params'], **params['data_params'])
else:
print("model:", Model)
print(f'-> Loading model checkpoint: {checkpoint_path}')
model = Model.load_from_checkpoint(checkpoint_path)
if device is not None:
model = model.eval().cuda(device)
return model | 28,082 |
def uncom_ec2_sync_cmdb():
"""没有使用合规的ami的ec2数据同步"""
with DBContext('w') as session:
uncom_ec2_list = get_uncom_ec2()
session.query(UnComEc2).delete(synchronize_session=False) # 清空数据库的所有记录
for uncom_ec2 in uncom_ec2_list:
instance_id = uncom_ec2["instance_id"]
ami_id = uncom_ec2["ami_id"]
instance_type = uncom_ec2["instance_type"]
key_name = uncom_ec2["key_name"]
launch_time = uncom_ec2["launch_time"]
placement = uncom_ec2["placement"]
private_dns_name = uncom_ec2["private_dns_name"]
private_ip_address = uncom_ec2["private_ip_address"]
public_dns_name = uncom_ec2["public_dns_name"]
public_ip_address = uncom_ec2["public_ip_address"]
new_uncom_ec2 = UnComEc2(
instance_id=instance_id, ami_id=ami_id, instance_type=instance_type, key_name=key_name,
launch_time=launch_time, placement=placement, private_dns_name=private_dns_name,
private_ip_address=private_ip_address, public_dns_name=public_dns_name, public_ip_address=public_ip_address
)
session.add(new_uncom_ec2)
session.commit() | 28,083 |
def install_miniconda(install_path):
"""Bootstrap miniconda to a given path."""
execute("bash miniconda.sh -b -p {}".format(install_path)) | 28,084 |
def get_widget_type_choices():
"""
Generates Django model field choices based on widgets
in holodeck.widgets.
"""
choices = []
for name, member in inspect.getmembers(widgets, inspect.isclass):
if member != widgets.Widget:
choices.append((
"%s.%s" % (member.__module__, member.__name__),
member.name
))
return choices | 28,085 |
def iau2000a(jd_tt):
"""Compute Earth nutation based on the IAU 2000A nutation model.
`jd_tt` - Terrestrial Time: Julian date float, or NumPy array of floats
Returns a tuple ``(delta_psi, delta_epsilon)`` measured in tenths of
a micro-arcsecond. Each value is either a float, or a NumPy array
with the same dimensions as the input argument.
"""
# Interval between fundamental epoch J2000.0 and given date.
t = (jd_tt - T0) / 36525.0
# Compute fundamental arguments from Simon et al. (1994), in radians.
a = fundamental_arguments(t)
# ** Luni-solar nutation **
# Summation of luni-solar nutation series (in reverse order).
arg = nals_t.dot(a)
fmod(arg, TAU, out=arg)
sarg = sin(arg)
carg = cos(arg)
stsc = array((sarg, t * sarg, carg)).T
ctcs = array((carg, t * carg, sarg)).T
dpsi = tensordot(stsc, lunisolar_longitude_coefficients)
deps = tensordot(ctcs, lunisolar_obliquity_coefficients)
# Compute and add in planetary components.
if getattr(t, 'shape', ()) == ():
a = t * anomaly_coefficient + anomaly_constant
else:
a = (outer(anomaly_coefficient, t).T + anomaly_constant).T
a[-1] *= t
fmod(a, TAU, out=a)
arg = napl_t.dot(a)
fmod(arg, TAU, out=arg)
sc = array((sin(arg), cos(arg))).T
dpsi += tensordot(sc, nutation_coefficients_longitude)
deps += tensordot(sc, nutation_coefficients_obliquity)
return dpsi, deps | 28,086 |
def numpy_ndarray(pa_arr):
"""Return numpy.ndarray view of a pyarrow.Array
"""
if pa_arr.null_count == 0:
# TODO: would memoryview.cast approach be more efficient? see xnd_xnd.
return pa_arr.to_numpy()
pa_nul, pa_buf = pa_arr.buffers()
raise NotImplementedError('numpy.ndarray view of pyarrow.Array with nulls') | 28,087 |
async def _async_get_states_and_events_with_filter(
hass: HomeAssistant, sqlalchemy_filter: Filters, entity_ids: set[str]
) -> tuple[list[Row], list[Row]]:
"""Get states from the database based on a filter."""
for entity_id in entity_ids:
hass.states.async_set(entity_id, STATE_ON)
hass.bus.async_fire("any", {ATTR_ENTITY_ID: entity_id})
await async_wait_recording_done(hass)
def _get_states_with_session():
with session_scope(hass=hass) as session:
return session.execute(
select(States.entity_id).filter(
sqlalchemy_filter.states_entity_filter()
)
).all()
filtered_states_entity_ids = {
row[0]
for row in await get_instance(hass).async_add_executor_job(
_get_states_with_session
)
}
def _get_events_with_session():
with session_scope(hass=hass) as session:
return session.execute(
select(EventData.shared_data).filter(
sqlalchemy_filter.events_entity_filter()
)
).all()
filtered_events_entity_ids = set()
for row in await get_instance(hass).async_add_executor_job(
_get_events_with_session
):
event_data = json.loads(row[0])
if ATTR_ENTITY_ID not in event_data:
continue
filtered_events_entity_ids.add(json.loads(row[0])[ATTR_ENTITY_ID])
return filtered_states_entity_ids, filtered_events_entity_ids | 28,088 |
def decrypt_password(private_key: PrivateKey, encrypted: str) -> str:
"""Return decrypt the given encrypted password using private_key and the RSA cryptosystem.
Your implementation should be very similar to the one from class, except now
the public key is a data class rather than a tuple.
"""
n = private_key.p * private_key.q
return ''.join([chr(pow(ord(c), private_key.d, n)) for c in encrypted]) | 28,089 |
def load_jed(fn):
"""
JEDEC file generated by 1410/84 from PALCE20V8H-15 06/28/20 22:42:11*
DM AMD*
DD PALCE20V8H-15*
QF2706*
G0*
F0*
L00000 0000000000000000000000000100000000000000*
"""
ret = {}
d = OrderedDict()
with open(fn) as f:
li = 0
for l in f:
li += 1
# remove *, newline
l = l.strip()[0:-1]
if not l:
continue
if li == 2:
ret["description"] = l
continue
parts = l.split(" ")
main_line = " ".join(parts[1:])
if parts[0] == "DM":
ret["vendor"] = main_line
elif parts[0] == "DD":
ret["part"] = main_line
elif l[0:2] == "QF":
ret["len"] = int(l[2:])
elif l[0] == "L":
# L00000 0000000000000000000000000100000000000000*
addr, bits = l.split(" ")
addr = int(addr[1:], 10)
d[addr] = bits
else:
continue
ret["data"] = d
return ret | 28,090 |
def setup(client):
"""
Setup function for testing_cog extension
Args:
client (app.client.BotClient): Client that connects to discord API
"""
client.add_cog(TestCog(client)) | 28,091 |
def plaintext(text, keeplinebreaks=True):
"""Extract the text elements from (X)HTML content
>>> plaintext('<b>1 < 2</b>')
u'1 < 2'
>>> plaintext(tag('1 ', tag.b('<'), ' 2'))
u'1 < 2'
>>> plaintext('''<b>1
... <
... 2</b>''', keeplinebreaks=False)
u'1 < 2'
:param text: `unicode` or `Fragment`
:param keeplinebreaks: optionally keep linebreaks
"""
if isinstance(text, Fragment):
text = text.as_text()
else:
text = stripentities(striptags(text))
if not keeplinebreaks:
text = text.replace(u'\n', u' ')
return text | 28,092 |
def check_out_dir(out_dir, base_dir):
"""Creates the output folder."""
if out_dir is None:
out_dir = pjoin(base_dir, default_out_dir_name)
try:
os.makedirs(out_dir, exist_ok=True)
except:
raise IOError('Unable to create the output directory as requested.')
return out_dir | 28,093 |
def test_nested_conditional_events(circuit):
"""Test tested conditional events (an edge case that nobody needs)."""
cnt = edzed.Counter('counter')
init(circuit)
assert cnt.output == 0
cnt.event(edzed.EventCond(edzed.EventCond('inc', 'ERR'), None), value=True)
assert cnt.output == 1
cnt.event(edzed.EventCond(
'ERR', edzed.EventCond(None, edzed.EventCond('ERR', 'dec'))), value=0)
assert cnt.output == 0 | 28,094 |
def setup():
"""Initial deployment setup"""
_set_venv_name()
run("mkvirtualenv {venv_name}".format(**env))
with cd(env.vhost_path):
run('mkdir -p {shared_dirs}'.format(**env))
execute(setup_remote) | 28,095 |
def fetch_rows(product):
"""
Returns the product and a list of timestamp and price for the given product in the current DATE,
ordered by timestamp.
"""
# We query the data lake by passing a SQL query to maystreet_data.query
# Note that when we filter by month/day, they need to be 0-padded strings,
# e.g. January is '01' and not 1.
query = f"""
SELECT
ExchangeTimestamp AS ts,
price
FROM
"prod_lake"."p_mst_data_lake".mt_trade
WHERE
y = '{DATE.year}'
AND m = '{str(DATE.month).rjust(2, '0')}'
AND d = '{str(DATE.day).rjust(2, '0')}'
AND product = '{product}'
ORDER BY
ExchangeTimestamp
"""
return product, list(md.query(md.DataSource.DATA_LAKE, query)) | 28,096 |
def create_learner(sm_writer, model_helper):
"""Create the learner as specified by FLAGS.learner.
Args:
* sm_writer: TensorFlow's summary writer
* model_helper: model helper with definitions of model & dataset
Returns:
* learner: the specified learner
"""
learner = None
if FLAGS.learner == 'full-prec':
learner = FullPrecLearner(sm_writer, model_helper)
elif FLAGS.learner == 'weight-sparse':
learner = WeightSparseLearner(sm_writer, model_helper)
elif FLAGS.learner == 'channel':
learner = ChannelPrunedLearner(sm_writer, model_helper)
elif FLAGS.learner == 'chn-pruned-gpu':
learner = ChannelPrunedGpuLearner(sm_writer, model_helper)
elif FLAGS.learner == 'chn-pruned-rmt':
learner = ChannelPrunedRmtLearner(sm_writer, model_helper)
elif FLAGS.learner == 'dis-chn-pruned':
learner = DisChnPrunedLearner(sm_writer, model_helper)
elif FLAGS.learner == 'uniform':
learner = UniformQuantLearner(sm_writer, model_helper)
elif FLAGS.learner == 'uniform-tf':
learner = UniformQuantTFLearner(sm_writer, model_helper)
elif FLAGS.learner == 'non-uniform':
learner = NonUniformQuantLearner(sm_writer, model_helper)
else:
raise ValueError('unrecognized learner\'s name: ' + FLAGS.learner)
return learner | 28,097 |
def train(env_id, num_timesteps, run, kappa, vf_phi_update_interval, log):
"""
Train TRPO model for the mujoco environment, for testing purposes
:param env_id: (str) Environment ID
:param num_timesteps: (int) The total number of samples
:param seed: (int) The initial seed for training
"""
with tf_util.single_threaded_session():
rank = MPI.COMM_WORLD.Get_rank()
log_path = './experiments/'+str(env_id)+'./updated_nkappa_x7_ent_0.01_new/'+str(kappa)+'_'+str(vf_phi_update_interval)+'_'+str(run)
if not log:
if rank == 0:
logger.configure(log_path)
else:
logger.configure(log_path, format_strs=[])
logger.set_level(logger.DISABLED)
else:
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
logger.set_level(logger.DISABLED)
seed = run
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
env = make_mujoco_env(env_id, workerseed)
test_env = None#make_mujoco_env(env_id, workerseed)
model = TRPO(MlpPolicy, env, test_env=test_env, timesteps_per_batch=1024, max_kl=0.01, cg_iters=10, cg_damping=0.1, entcoeff=0.01,
gamma=0.99, kappa=kappa, vf_iters=5, vf_stepsize=1e-3, verbose=1, vf_phi_update_interval=vf_phi_update_interval, seed=run)
model.learn(total_timesteps=num_timesteps, seed=run)
#model.save("./"+str(env_id)+"./models/"+str(kappa)+"_"+str(run)+'_final_nkappa_x7_ent_0.01_'+str(vf_phi_update_interval)+'.pkl')
env.close() | 28,098 |
def load_decoder(autoencoder):
"""
Gets the decoders associated with the inputted model
"""
dim = len(autoencoder.get_config()['input_layers'])
mag_phase_flag = False
decoders = []
if dim == 2:
mag_phase_flag = True
decoders.append(autoencoder.get_layer('mag_decoder'))
decoders.append(autoencoder.get_layer('phase_decoder'))
else:
decoders.append(autoencoder.get_layer('decoder'))
return decoders,mag_phase_flag | 28,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.