content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def most_frequent_code(features, common_table, config):
"""
Set feature to true if code is the most common one for language name
:param features: mapping from (lgname, lgcode) pair to features to values
:param common_table: language table mapping name to most common code
:return: none
"""
for name, code in features:
if name in common_table:
if code in common_table[name]:
features[(name, code)]['GL-most-frequent-code'] = True
if len(name.split()) > 1 and config['features']['GL-multi-word-name'] == 'yes':
features[(name, code)]['GL-multi-word-name'] = True | 26,200 |
def scrape_data_post(userName, id, scan_list, section, elements_path, save_status, file_names, workEducation):
"""Given some parameters, this function can scrap friends/photos/videos/about/posts(statuses) of a profile"""
page = []
folder = os.path.join(os.getcwd(), "Data")
data = []
if save_status == 4:
page.append(id)
for i in range(len(section)):
page.append(id + section[i])
for i in range(len(scan_list)):
try:
driver.get(page[i])
if save_status != 3:
scroll()
data = driver.find_elements_by_xpath(elements_path[i])
if len(data) == 0 and save_status == 4:
data = driver.find_elements_by_xpath(
'//div[@class="_1dwg _1w_m _q7o"]')
save_post(userName, file_names[i],
data, save_status, i, workEducation)
except:
print("Exception (scrape_data)", str(i), "Status =",
str(save_status), sys.exc_info()[0])
return | 26,201 |
def broadcast_dynamic_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given symbolic shapes.
When shape_x and shape_y are Tensors representing shapes (i.e. the result of
calling tf.shape on another Tensor) this computes a Tensor which is the shape
of the result of a broadcasting op applied in tensors of shapes shape_x and
shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
Tensor whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors do not have statically known shapes.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops.broadcast_args(shape_x, shape_y) | 26,202 |
def extract_args_for_httpie_main(context, method=None):
"""Transform a Context object to a list of arguments that can be passed to
HTTPie main function.
"""
args = _extract_httpie_options(context)
if method:
args.append(method.upper())
args.append(context.url)
args += _extract_httpie_request_items(context)
return args | 26,203 |
def apply_configuration(dist: "Distribution", filepath: _Path) -> "Distribution":
"""Apply the configuration from a ``setup.cfg`` file into an existing
distribution object.
"""
_apply(dist, filepath)
dist._finalize_requires()
return dist | 26,204 |
def test_double_prepare_slash(db):
""" This tests that the chain does not change head unless there are more commits on the alternative fork """
test_string = 'B J0 B B S0 B P0 B1 C0 B1 R0 B P0 B1 C0 B1 X0 B J1 J2 B B P1 P2 B1 C1 C2 B P1 P2 B1 C1 C2 B1'
test = TestLangHybrid(test_string, 15, 100, 0.02, 0.002)
test.parse() | 26,205 |
def bitbucketBaseIssue():
"""
BitbucketIssueのベースとなるデータを生成して返します。
"""
return {
'assignee': None,
'component': None,
'content': 'issue_summary',
'content_updated_on': '4000-01-01T00:00:00Z',
'created_on': '3000-01-01T00:00:00Z',
'edited_on': '4000-01-01T00:00:00Z',
'id': 1,
'kind': 'task',
'milestone': None,
'priority': 'major',
'reporter': {
'display_name': 'user_name',
'account_id': 'user_name'
},
'status': 'open',
'title': 'issue_title',
'updated_on': '4000-01-01T00:00:00Z',
'version': None,
'watchers': [],
'voters': []
} | 26,206 |
def FindChromeCandidates(overlay_dir):
"""Return a tuple of chrome's unstable ebuild and stable ebuilds.
Args:
overlay_dir: The path to chrome's portage overlay dir.
Returns:
Tuple [unstable_ebuild, stable_ebuilds].
Raises:
Exception: if no unstable ebuild exists for Chrome.
"""
stable_ebuilds = []
unstable_ebuilds = []
for path in [
os.path.join(overlay_dir, entry) for entry in os.listdir(overlay_dir)]:
if path.endswith('.ebuild'):
ebuild = ChromeEBuild(path)
if not ebuild.chrome_version:
cros_build_lib.Warning('Poorly formatted ebuild found at %s' % path)
else:
if '9999' in ebuild.version:
unstable_ebuilds.append(ebuild)
else:
stable_ebuilds.append(ebuild)
# Apply some sanity checks.
if not unstable_ebuilds:
raise Exception('Missing 9999 ebuild for %s' % overlay_dir)
if not stable_ebuilds:
cros_build_lib.Warning('Missing stable ebuild for %s' % overlay_dir)
return portage_utilities.BestEBuild(unstable_ebuilds), stable_ebuilds | 26,207 |
def is_hour_staffed(coverage_events_for_hour, level_mappings):
"""
Logic for determining if a shift is correctly staffed. The required subcalendar id determines which logic to apply
coverage_events_for_hour is a list of coverage events for the hour (a list of CoverageOffered JSON objects)
"""
return check_shift_coverage(coverage_events_for_hour, level_mappings) | 26,208 |
def sqliteAdminBlueprint(
dbPath,
bpName='sqliteAdmin',
tables=[],
title='标题',
h1='页标题',
baseLayout='flask_sqlite_admin/sqlite_base.html',
extraRules=[],
decorator=defaultDecorator):
""" create routes for admin """
sqlite = Blueprint(bpName, __name__,template_folder='templates',static_folder='static')
#@sh.wrapper()
@sqlite.route('/',methods=['GET', 'POST'])
@decorator
def index():
sf = sqliteAdminFunctions(global_db,tables=tables,extraRules=extraRules)
if request.method == 'POST':
add_form = AddFieldForm()
if add_form.validate_on_submit():
sf.addCol(add_form.field_name.data,
add_form.field_type.data,
add_form.field_table.data)
res = sf.tableList(tables)
#db.close()
if len(res) == 0:
raise ValueError('No sqlite db and/or tables found at path = %s' % dbPath)
else:
return render_template('flask_sqlite_admin/sqlite.html',res=res,title=title,h1=h1,baseLayout=baseLayout,bpName=bpName)
#@sh.wrapper()
@sqlite.route('/api',methods=['GET','POST','PUT','DELETE'])
@decorator
def api():
sf = sqliteAdminFunctions(global_db,tables=tables,extraRules=extraRules)
# GET request
if request.method == 'GET':
q = request.args
try:
res = sf.tableContents(request.args['table'],request.args['sort'],request.args['dir'],request.args['offset'])
except Exception as e:
return render_template('flask_sqlite_admin/sqlite_ajax.html',table=request.args['table'],error='{}'.format(e))
add_form = AddFieldForm()
add_form.field_table.default = request.args['table']
add_form.field_table.data = request.args['table']
#db.close()
return render_template('flask_sqlite_admin/sqlite_ajax.html',add_form=add_form,data=res,title=title,h1=h1,baseLayout=baseLayout,bpName=bpName,q=q,qJson=json.dumps(q))
# POST request
elif request.method == 'POST':
try:
request_data = request.get_json()
if "command" in request_data:
# delete column
if request_data['command'] == 'del_col':
del_col = request_data['data']
table = request_data['table']
sf.delCol(del_col, table)
res = {'status':1, 'message':'<a href="" class="alert-link">Refresh Page</a>'}
# save a row
elif request_data['command'] == 'save_row':
sf.saveRow(request_data['row'],request_data['table'],request_data['id'])
res = {'status':1, 'message':'<a href="" class="alert-link">Refresh Page</a>'}
#delete a row
elif request_data['command'] == 'del_row':
table = request_data['table']
id = request_data['id']
sf.delRow(table, id)
res = {'status':1,'message':'<a href="" class="alert-link">Refresh Page</a>'}
#create a row
elif request_data['command'] == 'save_detail':
table = request_data['table']
row = request_data['row']
sf.addRow(table,row)
res = {'status':1,'message':'<a href="" class="alert-link">Refresh Page</a>'}
except Exception as e:
res = {'status':0,'error':'{}'.format(e)}
return json.dumps(res)
@sqlite.route('/selected', methods=['POST'])
@decorator
def selected():
response = make_response()
return response
return sqlite | 26,209 |
def AccountsUrl(command):
"""Generates the Google Accounts URL.
Args:
command: The command to execute.
Returns:
A URL for the given command.
"""
return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command) | 26,210 |
def iter_input_annotation_output_df_df(inspection_index, input_df, annotation_df, output_df):
"""
Create an efficient iterator for the inspection input
"""
# pylint: disable=too-many-locals
# Performance tips:
# https://stackoverflow.com/questions/16476924/how-to-iterate-over-rows-in-a-dataframe-in-pandas
assert isinstance(input_df, DataFrame)
assert isinstance(output_df, DataFrame)
annotation_df_view = annotation_df.iloc[:, inspection_index:inspection_index + 1]
input_rows = get_df_row_iterator(input_df)
annotation_rows = get_df_row_iterator(annotation_df_view)
output_rows = get_df_row_iterator(output_df)
return map(lambda input_tuple: InspectionInputUnaryOperator(*input_tuple),
zip(input_rows, annotation_rows, output_rows)) | 26,211 |
def measure_curvatures(left_fit, right_fit, ym_per_pix=1., xm_per_pix=1.,
y_eval=0):
""" Calculate left and right lane line curvature
Args:
left_fit: `numpy.ndarray` second order linear regression of left lane line
right_fit: `numpy.ndarray` second order linear regression of right lane line
xm_per_pix: `float` [m/pix] horizontal pixel to meters relation
ym_per_pix: `float` [m/pix] vertical pixel to meters relation
y_eval: `int` value to evaluate curvature
Returns:
right_curvature: `float` [m] curvature of left lane line
left_curvature: `float` [m] curvature of right lane line
"""
# Varibles assignation
left_curvature = right_curvature = 0
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
# Measure curvature for left lane line
if left_fit is not None:
Al = left_fit[0]*(xm_per_pix/(ym_per_pix**2))
Bl = left_fit[1]*(xm_per_pix/ym_per_pix)
# Calculation of R_curve (radius of curvature)
left_curvature = ((1 + (2*Al*y_eval + Bl)**2)**1.5) / np.absolute(2*Al)
# Measure curvature for right lane line
if right_fit is not None:
Ar = right_fit[0]*(xm_per_pix/(ym_per_pix**2))
Br = right_fit[1]*(xm_per_pix/ym_per_pix)
# Calculation of R_curve (radius of curvature)
right_curvature = ((1 + (2*Ar*y_eval + Br)**2)**1.5) / np.absolute(2*Ar)
return right_curvature, left_curvature | 26,212 |
def ensure_present(params, check_mode):
"""
Ensure that the specified Hipersockets adapter exists and has the
specified properties set.
Raises:
ParameterError: An issue with the module parameters.
Error: Other errors during processing.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
# Note: Defaults specified in argument_spec will be set in params dict
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
adapter_name = params['name']
_faked_session = params.get('_faked_session', None) # No default specified
changed = False
try:
session = get_session(_faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
adapter = cpc.adapters.find(name=adapter_name)
except zhmcclient.NotFound:
adapter = None
if not adapter:
# It does not exist. The only possible adapter type
# that can be created is a Hipersockets adapter, but before
# creating one we check the 'type' input property to verify that
# the intention is really Hipersockets creation, and not just a
# mispelled name.
input_props = params.get('properties', None)
if input_props is None:
adapter_type = None
else:
adapter_type = input_props.get('type', None)
if adapter_type is None:
raise ParameterError(
"Input property 'type' missing when creating "
"Hipersockets adapter {0!r} (must specify 'hipersockets')".
format(adapter_name))
if adapter_type != 'hipersockets':
raise ParameterError(
"Input property 'type' specifies {0!r} when creating "
"Hipersockets adapter {1!r} "
"(must specify 'hipersockets').".
format(adapter_type, adapter_name))
create_props, update_props, _chg_adapter_type, _chg_crypto_type = \
process_properties(adapter, params)
# This is specific to Hipersockets: There are no update-only
# properties, so any remaining such property is an input error
invalid_update_props = {}
for name in update_props:
if name not in create_props:
invalid_update_props[name] = update_props[name]
if invalid_update_props:
raise ParameterError(
"Invalid input properties specified when creating "
"Hipersockets adapter {0!r}: {1!r}".
format(adapter_name, invalid_update_props))
# While the 'type' input property is required for verifying
# the intention, it is not allowed as input for the
# Create Hipersocket HMC operation.
del create_props['type']
if not check_mode:
adapter = cpc.adapters.create_hipersocket(create_props)
adapter.pull_full_properties()
result = adapter.properties # from actual values
else:
adapter = None
result = dict()
result.update(create_props) # from input values
changed = True
else:
# It does exist.
# Update its properties and change adapter and crypto type, if
# needed.
adapter.pull_full_properties()
result = adapter.properties
create_props, update_props, chg_adapter_type, chg_crypto_type = \
process_properties(adapter, params)
if update_props:
if not check_mode:
adapter.update_properties(update_props)
else:
result.update(update_props) # from input values
changed = True
if chg_adapter_type:
if not check_mode:
adapter.change_adapter_type(chg_adapter_type)
else:
result['type'] = chg_adapter_type
changed = True
if chg_crypto_type:
if not check_mode:
adapter.change_crypto_type(chg_crypto_type)
else:
result['crypto-type'] = chg_crypto_type
changed = True
if changed and not check_mode:
adapter.pull_full_properties()
result = adapter.properties # from actual values
if adapter:
ports = adapter.ports.list()
result_ports = list()
for port in ports:
port.pull_full_properties()
result_ports.append(port.properties)
result['ports'] = result_ports
else:
# For now, we return no ports when creating in check mode
result['ports'] = dict()
return changed, result
finally:
session.logoff() | 26,213 |
def getRelation (pid, rid, out) :
"""
pid: parent relation id for this relation
rid: crawl relation
"""
full_url = 'https://api.openstreetmap.org/api/0.6/relation/'+rid+'/full'
xml = requests.get(full_url)
# save this XML to local file.
file = codecs.open('boundary/'+rid + '.xml','w','utf-8')
file.write(xml.text)
file.close()
# parse this xml by lxml
sel = etree.fromstring(xml.content)
# get the meta data for this relation id
en = getRelationValue(sel, rid, 'name:en', 'alt_name:en')
zh = getRelationValue(sel, rid, 'name:zh', 'name')
admin_lvl = sel.xpath('//relation[@id="'+ rid +'"]/tag[@k="admin_level"]/@v')
if len(admin_lvl) >0 :
out.write('%s;%s;%s;%s;%s\n' % (pid, rid, en, zh, admin_lvl[0],))
sub_area_rid_path = '//relation[@id="'+ rid +'"]/member[@type="relation"]/@ref'
members = sel.xpath(sub_area_rid_path)
for member in members :
ref = member
getRelation(rid, ref, out) | 26,214 |
def make_viz():
"""Produce visualization."""
# Set plot settings
ylim = [-5, 5]
gpv.update_rc_params()
# Define model params
gp1_hypers = {'ls': 1., 'alpha': 1.5, 'sigma': 1e-1}
gp2_hypers = {'ls': 3., 'alpha': 1., 'sigma': 1e-1}
# Define domain
x_min = -10.
x_max = 10.
domain = RealDomain({'min_max': [(x_min, x_max)]})
# Define gp1 and gp2
gp1 = SimpleGp(gp1_hypers)
gp2 = SimpleGp(gp2_hypers)
# Plot GP priors
data = Namespace(X=[], y=np.array([]))
gpv.visualize_gp(gp1, domain, data, std_mult=2, ylim=ylim, save_str='viz_prior_true')
plt.close()
gpv.visualize_gp(gp2, domain, data, std_mult=2, ylim=ylim, save_str='viz_prior')
plt.close() | 26,215 |
def execute_parallel_get(og_obj_id, tar_par_loc, remove_after):
""" Copy file from mounted files system to mounted system """
# TODO: download file from PARALLEL
subprocess.check_output(['mpiexec', '-n', '1', '-usize', '17', 'python', 'getParallel.py', og_obj_id])
pass | 26,216 |
def test_to_time_string_wih_datetime(berlin_datetime):
"""
gets the time string representation of input datetime.
example: `23:40:15+00:00`
:rtype: str
"""
time_berlin = datetime_services.to_time_string(berlin_datetime, to_server=True)
assert time_berlin == '16:00:00+00:00' | 26,217 |
def get_processed_image(username):
"""Gets the b64 strings of the processed images from the server and
converts them to image files
Args:
username (tkinter.StringVar): user-specified username to identify
each unique user
Returns:
JpegImageFile: the image file of the processed image
"""
proc_images_bytes = []
r = requests.get(URL+'/processed_image/'+username.get())
r_json = r.json()
proc_b64_strings = r_json['processed_images']
for i in range(len(proc_b64_strings)):
proc_image_bytes = base64.b64decode(proc_b64_strings[i])
proc_images_bytes.append(proc_image_bytes)
return proc_images_bytes | 26,218 |
def make_cse_path(raw_ticker: str, raw_industry: str) -> str:
"""makes slug for ticker for the cse
Parameters:
raw_ticker - cse ticker from xlsx sheet
raw_industry - verbatim industry from ticker, not slugified
Returns:
description - url for cse files for download
"""
if pd.isna(raw_industry):
return ""
# verify raw_industry is in industry do later
cse_industries = [
"Industry",
"Mining",
"Diversified Industries",
"Life Sciences",
"Oil and Gas",
"Technology",
"CleanTech",
]
base_cse_url = "https://thecse.com/en/listings"
industry = raw_industry.lower().replace(" ", "-")
ticker = transform_name_to_slug(raw_ticker)
url = f"{base_cse_url}/{industry}/{ticker}"
return url | 26,219 |
def readTMY(filepath=os.path.join("TMY", "Germany DEU Koln (INTL).csv")):
"""
Reads a typical meteorological year file and gets the GHI, DHI and DNI from it.
"""
# get data
data = pd.read_csv(
os.path.join(PATH, "weatherdata", filepath),
skiprows=([0, 1]),
sep=",",
)
data.index = pd.date_range(
"2010-01-01 00:30:00", periods=8760, freq="H", tz="Europe/Berlin"
)
data = data.rename(
columns={"Beam": "DNI", "Diffuse": "DHI", "Tdry": "T", "Wspd": "WS"}
)
location_data = pd.read_csv(
os.path.join(PATH, "profiles", filepath), nrows=1, sep=","
)
location = {
"name": location_data["City"].values[0],
"latitude": location_data["Latitude"].values[0],
"longitude": location_data["Longitude"].values[0],
}
return data, location | 26,220 |
def interface(inp, xdata, ydata):
""" splits the c function output to two variables, the RMSE and the derivative of RMSE with respect to the parameters"""
p = _gauss.gaussjac(xdata,ydata,inp)
return p[0],p[1:] | 26,221 |
def generateCoverText_BERT(mod, tok, startOfText, ranks, completeMessage):
"""
Function to get the cover text that is sent from Alice to Bob based on the ranks of the secret text
"""
inputs = tok.encode(startOfText, return_tensors="pt", add_special_tokens=False)
for s in ranks:
tab = inputs.numpy()
pred = mod(inputs)[0]
index = torch.argsort(pred[0, -1, :], descending=True)[s]
tab = [np.append(tab[0],index)]
inputs = torch.Tensor(tab).type(torch.long)
inputs=inputs.tolist()[0]
if (completeMessage):
inputs=completeMessage_BERT(mod, tok, inputs)
cover_text = tok.decode(inputs)
return cover_text, inputs | 26,222 |
async def _(event: GroupMessageEvent):
"""
“来点色图”
"""
msg = MessageSegment.reply(event.message_id) + '您点的一份色图~' + MessageSegment.image(
f"file:///{Path(SETU_PATH + choice(os.listdir(SETU_PATH))).absolute()}")
await setu.finish(msg) | 26,223 |
def infer_from_did_elasticnet(did, debug=False):
"""Returns `inferred` tuple (interaction_coefficients, growth_rates)
Will return info as well if debug=True"""
df_geom, df_dlogydt, df_nzmask, n_species = prepare_data_for_inference(did)
####### BEGIN THE INFERENCE!!!!! #######
regs = []
intercepts = []
slopes = []
# For debugging if needed
info = dict(dlogydts=[], masks=[], gmeans=[], species=[], shapes=[])
# Begin inference for each and every focal_species
for focal_species in range(n_species):
# Get the y to be predicted
cur_dlogydt = np.concatenate(df_dlogydt.loc[focal_species].values)
cur_mask = np.concatenate(df_nzmask.loc[focal_species].values)
# Get the X to predict, only take valid intervals
cur_gmeans = np.array(
[np.concatenate(df_geom.loc[i, :].values) for i in range(n_species)]
).T
cur_gmeans = cur_gmeans[cur_mask, :].copy()
# Update debug info
info["dlogydts"].append(cur_dlogydt)
info["masks"].append(cur_mask)
info["gmeans"].append(cur_gmeans)
info["species"].append(focal_species)
info["shapes"].append(
dict(dlogydt=cur_dlogydt.shape, mask=cur_mask.shape, gmeans=cur_gmeans.shape)
)
# If focal_species has no intervals, return NaNs for inferred.
if len(cur_dlogydt) == 0:
regs.append(np.nan)
slopes.append(np.repeat(np.nan, n_species))
intercepts.append(np.array([np.nan]))
# Otherwise, regress.
else:
try:
reg = linear_model.ElasticNet().fit(cur_gmeans, cur_dlogydt)
except ValueError:
return ("broken", did, info)
regs.append(reg)
intercepts.append(reg.intercept_)
slopes.append(reg.coef_)
# Return all solutions!
# make em arrays
slopes = np.vstack(slopes)
intercepts = np.vstack(intercepts)
inferred = (slopes, intercepts)
if debug:
return (inferred, info)
else:
return inferred | 26,224 |
def Fanstatic(app,
publisher_signature=fanstatic.DEFAULT_SIGNATURE,
injector=None,
**config):
"""Fanstatic WSGI framework component.
:param app: The WSGI app to wrap with Fanstatic.
:param publisher_signature: Optional argument to define the
signature of the publisher in a URL. The default is ``fanstatic``.
:param injector: A injector callable.
:param ``**config``: Optional keyword arguments. These are
passed to :py:class:`NeededInclusions` when it is constructed.
"""
# Wrap the app inside the injector middleware, inside the
# delegator middleware.
injector_middleware = Injector(
app,
publisher_signature=publisher_signature,
injector=injector,
**config)
publisher_middleware = Publisher(LibraryRegistry.instance())
return Delegator(
injector_middleware,
publisher_middleware,
publisher_signature=publisher_signature) | 26,225 |
def predicate_id(namespace, predicate, cursor=None):
"""
Get a RDF predicate ID, creating it if necessary.
"""
data = {'namespace': namespace, 'predicate': predicate}
cursor = relations_reader.predicate_id(data, cursor=cursor)
if not cursor.rowcount:
relations_writer.upsert_predicate(data, cursor=cursor)
return cursor.fetchone()['id'] | 26,226 |
def _create_filter_list(user_list, request=None):
"""
[メソッド概要]
フィルタのリストを作成する
[引数]
user_list :_getUserData(filters)により作成されるuser_list
request :logger.logic_logでuserId sessionIDを表示するために使用する
[戻り値]
filter_list
"""
logger.logic_log('LOSI00001', 'user_list: %s' % len(user_list), request=request)
filter_list = []
pulldown_list = [ {'k':u,'v':u} for u in sorted({u["user_name"] for u in user_list}) ]
pulldown_list.insert(0, {'k':'','v':''})
filter_list.append(
{
'colno' : 'user_name',
'caption' : 'ユーザ名',
'like' : True,
'fromto' : None,
'pulldown': copy.copy(pulldown_list),
'calendar': None,
}
)
pulldown_list = [ {'k':u,'v':u} for u in sorted({u["login_id"] for u in user_list}) ]
pulldown_list.insert(0, {'k':'','v':''})
filter_list.append(
{
'colno' : 'login_id',
'caption' : 'ログインID',
'like' : True,
'fromto' : None,
'pulldown': copy.copy(pulldown_list),
'calendar': None,
}
)
pulldown_list = [ {'k':u,'v':u} for u in sorted({u["mail"] for u in user_list}) ]
pulldown_list.insert(0, {'k':'','v':''})
filter_list.append(
{
'colno' : 'mail_address',
'caption' : 'メールアドレス',
'like' : True,
'fromto' : None,
'pulldown': copy.copy(pulldown_list),
'calendar': None,
}
)
# アクティブユーザが所属しているグループ名を昇順で取得する
group_names = sorted({ gn for u in user_list for gn in u["group_name"]})
# グループ名のリストを作成
pulldown_list =[{'k':group_names[i], 'v':group_names[i]} for i in range(len(group_names))]
pulldown_list.insert(0, {'k':'','v':''})
filter_list.append(
{
'colno' : 'group_name',
'caption' : 'グループ名',
'like' : True,
'fromto' : None,
'pulldown': copy.copy(pulldown_list),
'calendar': None,
}
)
uuname_list = sorted({u["upd_user"] for u in user_list})
pulldown_list =[{'k':uuname_list[i], 'v':uuname_list[i]} for i in range(len(uuname_list))]
pulldown_list.insert(0, {'k':'','v':''})
filter_list.append(
{
'colno' : 'last_update_user',
'caption' : '最終更新者',
'like' : True,
'fromto' : None,
'pulldown': copy.copy(pulldown_list),
'calendar': None,
}
)
filter_list.append(
{
'colno' : 'last_update_timestamp',
'caption' : '最終更新日時',
'like' : None,
'fromto' : None,
'pulldown': [],
'calendar': True,
}
)
logger.logic_log('LOSI00002', 'filter_list: %s' % filter_list, request=request)
return filter_list | 26,227 |
def run_script(cli_string, die=True, verbose=True):
"""Run the cli_string UNIX CLI command and record output.
Args:
cli_string: String of command to run
die: Exit with error if True
Returns:
(returncode, stdoutdata, stderrdata):
Execution code, STDOUT output and STDERR output.
"""
# Initialize key variables
messages = []
stdoutdata = ''.encode()
stderrdata = ''.encode()
returncode = 1
# Enable verbose mode if True
if verbose is True:
print('Running Command: "{}"'.format(cli_string))
# Run update_targets script
do_command_list = list(cli_string.split(' '))
# Create the subprocess object
try:
process = subprocess.Popen(
do_command_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = process.communicate()
returncode = process.returncode
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
messages.append('''\
Bug: Exception Type:{}, Exception Instance: {}, Stack Trace Object: {}]\
'''.format(exc_type, exc_value, exc_traceback))
messages.append(traceback.format_exc())
# Crash if the return code is not 0
if bool(returncode) is True:
# Print the Return Code header
messages.append(
'Return code:{}'.format(returncode)
)
# Print the STDOUT
for line in stdoutdata.decode().split('\n'):
messages.append(
'STDOUT: {}'.format(line)
)
# Print the STDERR
for line in stderrdata.decode().split('\n'):
messages.append(
'STDERR: {}'.format(line)
)
# Log message
if verbose is True:
print('messages: {}'.format(messages))
if bool(messages) is True:
for log_message in messages:
if verbose is True:
print(log_message)
if bool(die) is True:
# All done
sys.exit(2)
# Return
return (returncode, stdoutdata, stderrdata) | 26,228 |
def get_events() -> List[Dict]:
"""Represents set of sales events"""
return [{
"Txid": 1,
"productname": "Product 2",
"qty": 2,
"sales": 489.5
}, {
"Txid": 2,
"productname": "Product 3 XL",
"qty": 2,
"sales": 411.8
}, {
"Txid": 3,
"productname": "Product 4",
"qty": 2,
"sales": 56.15
}, {
"Txid": 4,
"productname": "Product 4 XL",
"qty": 5,
"sales": 197.7
}, {
"Txid": 5,
"productname": "Product 3",
"qty": 7,
"sales": 222.3
}] | 26,229 |
def jump_handler(s):
"""Handling single and double jumps"""
jump = 1 if s.poG or s.ljump and not s.poG and s.fz > 0 else 0
if jump and s.ljump != s.lljump or not s.ljump:
s.pitch = s.yaw = s.roll = 0
if min(0.18, s.dT + 0.05) < s.airtime and s.fz > 100:
jump = not s.ljump
return jump | 26,230 |
def init():
"""
Initialize the connections.
Let's get it started...in here!
Developer note: the advantage of putting it here is
that me.py doesn't need to import anything.
"""
global neighborStrategy
me.init(nodes.CurrentNode())
debug("Init called. Node is " + me.getUid(), info=True)
debug("#".join(["New", me.getMe().getShortUid(), me.getUid()]),
monitor=True)
neighborStrategy = neighbors.neighborStrategyFactory(
config.GOSSIP_NEIGHBOR_STRATEGY) | 26,231 |
def sec2hms(sec):
"""
Convert seconds to hours, minutes and seconds.
"""
hours = int(sec/3600)
minutes = int((sec -3600*hours)/60)
seconds = int(sec -3600*hours -60*minutes)
return hours,minutes,seconds | 26,232 |
def EnableProfiler(enable): # real signature unknown; restored from __doc__
"""
EnableProfiler(enable: bool)
Enable or disable profiling for the current ScriptEngine. This will only affect code
that is compiled after the setting is changed; previously-compiled code will retain
whatever setting was active when the code was originally compiled.
The easiest way to recompile a module is to reload() it.
"""
pass | 26,233 |
def to_english_like_sentence(sentence: str, tokenizer = tokenizers.JiebaTokenizer()):
"""
:param sentence:
:return:
"""
return ' '.join(tokenizer(sentence)) | 26,234 |
def get_month_range(start_date):
"""
Get the start and end datetimes for the month
:param start_date: period start_date
:type start_date: datetime.datetime()
:return: tuple start_datetime, end_datetime
"""
start_date = datetime(start_date.year, start_date.month, 1)
end_date = utils.date_to_datetime(
utils.add_months(start_date.date(), 1),
'max'
)
return start_date, end_date | 26,235 |
def generate_rendition(in_path: str, out_dir: str, size: int) -> str:
""" Given an image path and a size, save a rendition to disk
:param str in_path: Path to the file
:param str out_dir: Directory to store the file in
:param int size: Rendition size:
:returns: a file path
"""
image = generate_image(in_path, size)
basename, _ = os.path.splitext(os.path.basename(in_path))
out_file = slugify_filename(f'{basename}.{size}.jpg')
image.convert('RGB').save(os.path.join(out_dir, out_file))
return out_file | 26,236 |
def dump(curse_manifest, filelike):
"""
Dump a curse manifest as bytes into a file-like object (supporting write()).
The encoding is utf-8
Arguments
curse_manifest -- curse manifest to dump
filelike -- filelike object that has a write() method
"""
filelike.write(json.dumps(curse_manifest, indent=4).encode("utf-8")) | 26,237 |
def convert_to_rotation_fdot(
gwfrequency=None,
rotationfrequency=None,
rotationperiod=None,
gwfdot=None,
rotationpdot=None,
**kwargs,
):
"""
Convert the GW frequency (assumed to be twice the rotation frequency) or
the rotation period and GW rotation frequency derivative or rotation
period derivative into rotation frequency derivative.
"""
freq = (
gwfrequency / 2.0
if gwfrequency is not None
else (
(1.0 / rotationperiod) if rotationperiod is not None else rotationfrequency
)
)
if freq is not None:
if gwfdot is not None:
fdot = gwfdot / 2.0
elif rotationpdot is not None:
fdot = -rotationpdot * freq ** 2
else:
fdot = None
if freq is None or fdot is None:
raise ValueError("Required conversion parameters are not present")
return fdot | 26,238 |
def send_block(
cmd=None,
section=None,
item=None,
identifier=None,
zone=None,
owner=None,
ttl=None,
rtype=None,
data=None,
flags=None,
filter_=None,
):
"""Send block command to Libknot server control."""
ctl = connect_knot()
resp = None
try:
ctl.send_block(
cmd=cmd,
section=section,
item=item,
identifier=identifier,
zone=zone,
owner=owner,
ttl=ttl,
rtype=rtype,
data=data,
flags="B",
filter=filter_,
)
resp_ = ctl.receive_block()
if resp_:
resp = json.dumps(resp, indent=4)
except libknot.control.KnotCtlError as knot_error:
# most of the time, after removing a zone
# socket connection will be time out
resp = str(knot_error.data)
finally:
ctl.send(libknot.control.KnotCtlType.END)
ctl.close()
return resp | 26,239 |
def _poisson_covariance(dist, lambda0):
""" Poisson covariance model on the sphere.
Parameters
----------
dist: float
Great circle distance.
lambda0: float
Lengthscale parameter.
"""
cov = (1 - lambda0**2) / (1 - 2*lambda0*np.cos(dist) + lambda0**2)**(3/2)
return cov | 26,240 |
def ch_mkdir(directory):
"""
ch_mkdir : This function creates a directory if it does not exist.
Arguments:
directory (string): Path to the directory.
--------
Returns:
null.
"""
if not os.path.exists(directory):
try:
os.makedirs(directory)
except:
print('could not make the directory!') | 26,241 |
def snot(N=None, target=None):
"""Quantum object representing the SNOT (2-qubit Hadamard) gate.
Returns
-------
snot_gate : qobj
Quantum object representation of SNOT gate.
Examples
--------
>>> snot()
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 0.70710678+0.j 0.70710678+0.j]
[ 0.70710678+0.j -0.70710678+0.j]]
"""
if not N is None and not target is None:
return gate_expand_1toN(snot(), N, target)
else:
u = basis(2, 0)
d = basis(2, 1)
Q = 1.0 / sqrt(2.0) * (sigmax() + sigmaz())
return Q | 26,242 |
def _do_wrapper(
func: Callable,
*,
responses: Dict[str, Type[BaseModel]] = None,
header: Type[BaseModel] = None,
cookie: Type[BaseModel] = None,
path: Type[BaseModel] = None,
query: Type[BaseModel] = None,
form: Type[BaseModel] = None,
body: Type[BaseModel] = None,
**kwargs: Any
) -> Response:
"""
Validate requests and responses
:param func: view func
:param responses: response model
:param header: header model
:param cookie: cookie model
:param path: path model
:param query: query model
:param form: form model
:param body: body model
:param kwargs: path parameters
:return:
"""
# validate header, cookie, path and query
request_kwargs = dict()
try:
if header:
_do_header(header, request_kwargs)
if cookie:
_do_cookie(cookie, request_kwargs)
if path:
_do_path(path, kwargs, request_kwargs)
if query:
_do_query(query, request_kwargs)
if form:
_do_form(form, request_kwargs)
if body:
_do_body(body, request_kwargs)
except ValidationError as e:
response = make_response(e.json())
response.headers['Content-Type'] = 'application/json'
response.status_code = 422
return response
# handle request
response = func(**request_kwargs)
VALIDATE_RESPONSE = current_app.config.get("VALIDATE_RESPONSE", False)
if VALIDATE_RESPONSE and responses:
validate_response(response, responses)
return response | 26,243 |
def make_divisible(v, divisor, min_val=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_val is None:
min_val = divisor
new_v = max(min_val, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v | 26,244 |
def mock_query_object(LCClient):
"""
Creating a Query Response object and prefilling it with some information
"""
# Creating a Query Response Object
start = '2016/1/1'
end = '2016/1/2'
obj = {
'TimeRange': TimeRange(parse_time(start), parse_time(end)),
'Time_start': parse_time(start),
'Time_end': parse_time(end),
'source': 'Proba2',
'instrument': 'lyra',
'physobs': 'irradiance',
'provider': 'esa'
}
urls = ['http://proba2.oma.be/lyra/data/bsd/2016/01/01/lyra_20160101-000000_lev2_std.fits',
'http://proba2.oma.be/lyra/data/bsd/2016/01/02/lyra_20160102-000000_lev2_std.fits']
results = QueryResponse.create(obj, urls, client=LCClient)
return results | 26,245 |
def edit(photo_id):
"""
Edit uploaded photo information.
:param photo_id: target photo id
:return: HTML template for edit form or Json data
"""
if request.method == 'GET':
photo = Photo.get(current_user.id, photo_id)
return render_template('upload.html', photo=photo, gmaps_key=conf['GMAPS_KEY'])
elif request.method == 'PUT':
data = request.get_json()
try:
photo = Photo.get(current_user.id, photo_id)
photo.tags = data['tags']
photo.desc = data['desc']
photo.geotag_lat = data['lat']
photo.geotag_lng = data['lng']
photo.city = data['city']
photo.nation = data['nation']
photo.address = data['address']
photo.save()
return jsonify(update='success')
except Exception as e:
app.logger.error(e)
return jsonify(update='fail')
else:
return redirect(url_for("/", gmaps_key=conf['GMAPS_KEY'])) | 26,246 |
def crop_only(net1, net2):
"""
the size(net1) <= size(net2)
"""
net1_shape = net1.get_shape().as_list()
net2_shape = net2.get_shape().as_list()
# print(net1_shape)
# print(net2_shape)
# if net2_shape[1] >= net1_shape[1] and net2_shape[2] >= net1_shape[2]:
offsets = [0, (net2_shape[1] - net1_shape[1]) // 2, (net2_shape[2] - net1_shape[2]) // 2, 0]
size = [-1, net1_shape[1], net1_shape[2], -1]
net2_resize = tf.slice(net2, offsets, size)
# return tf.concat([net1, net2_resize], 3)
return net2_resize | 26,247 |
def test_load_missing_key(engine):
"""Trying to load objects with missing hash and range keys raises"""
user = User(age=2)
with pytest.raises(MissingKey):
engine.load(user)
complex_models = [
ComplexModel(),
ComplexModel(name=uuid.uuid4()),
ComplexModel(date="no hash")
]
for model in complex_models:
with pytest.raises(MissingKey):
engine.load(model) | 26,248 |
def normalize(image):
""" Normalize to 0-255, dtype: np.uint8
"""
if np.min(image) < 0:
image = image - np.min(image)
if np.max(image) != 0:
image = image / np.max(image)
image = image * 255
image = np.uint8(image)
return image | 26,249 |
def temp_file(contents, suffix=''):
"""Create a self-deleting temp file with the given content"""
tmpdir = get_test_tmpdir()
t = tempfile.NamedTemporaryFile(suffix=suffix, dir=tmpdir)
t.write(contents)
t.flush()
return t | 26,250 |
def string_to_charlist(string):
"""Return a list of characters with extra empty strings after wide chars"""
if not set(string) - ASCIIONLY:
return list(string)
result = []
if PY3:
for c in string:
result.append(c)
if east_asian_width(c) in WIDE_SYMBOLS:
result.append('')
else:
try:
# This raised a "UnicodeEncodeError: 'ascii' codec can't encode
# character u'\xe4' in position 10: ordinal not in range(128)"
# for me once. I thought errors='ignore' means IGNORE THE DAMN
# ERRORS but apparently it doesn't.
string = string.decode('utf-8', 'ignore')
except UnicodeEncodeError:
return []
for c in string:
result.append(c.encode('utf-8'))
if east_asian_width(c) in WIDE_SYMBOLS:
result.append('')
return result | 26,251 |
def test_bad_pipeline_param_error():
"""
Should return error if pipeline parameter is not valid.
"""
runner = CliRunner()
result = runner.invoke(maggport.maggport, [
'--host', 'test_host',
'--collection', 'test_collection',
'--db', 'test_db',
'--port', '8080',
'--pipeline', 'dummyPipeline'
])
assert result.exit_code == 1
assert isinstance(result.exception, ValueError) | 26,252 |
def urlparams(url_, hash=None, **query):
"""Add a fragment and/or query paramaters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url = urlparse.urlparse(url_)
fragment = hash if hash is not None else url.fragment
# Use dict(parse_qsl) so we don't get lists of values.
q = url.query
query_dict = dict(urlparse.parse_qsl(smart_text(q))) if q else {}
query_dict.update((k, v) for k, v in query.items())
query_string = _urlencode([(k, v) for k, v in query_dict.items()
if v is not None])
new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
query_string, fragment)
return new.geturl() | 26,253 |
def get(word, cache=True):
"""
Load the word 'word' and return the DudenWord instance
"""
html_content = request_word(word, cache=cache) # pylint: disable=unexpected-keyword-arg
if html_content is None:
return None
soup = bs4.BeautifulSoup(html_content, 'html.parser')
return DudenWord(soup) | 26,254 |
def cubespace(start, stop=False, num=10, include_start=True):
"""
Return sequence of *num* floats between *start* and *stop*.
Analogously to numpy's linspace, values in returned list are chosen
so that their cubes (hence name) are spread evenly in equal distance.
If the parameter *stop* is not given, the value of *start* is used as
the upper limit instead. In such case the lower limit is set to 0.
The values of lower limit, *start*, and upper limit, *stop*,
are included in the list. The *start* value can be excluded
by setting the *include_start* keyword to False.
:example:
>>> cubespace(10, num=3)
array([ 0. , 7.93700526, 10. ])
>>> cubespace(0, -10, num=3)
array([ 0. , -7.93700526, -10. ])
>>> cubespace(0, 10, num=3, include_start=False)
array([ 6.93361274, 8.73580465, 10. ])
:param start: The starting value of a sequence.
:type start: float
:param stop: The ending value of a sequence. If False (default), *start*
is used as a the ending value, while the starting value is set to 0.
:type stop: float
:param num: Number of samples to generate. Default is 10.
:type num: int
:param include_start: If False, the value of *start* is not included in
returned list. Nonetheless, it is still considered as a starting point.
:type include_start: bool
:return: An array with *num* spaced samples in the *start*-*stop* interval.
:rtype: numpy.ndarray
"""
(start, stop) = (0.0, start) if stop is False else (start, stop)
if include_start is False:
return cubespace(start, stop, num=num+1, include_start=True)[1:]
cubed_start = pow(start, 3)
cubed_stop = pow(stop, 3)
cubed_space = linspace(cubed_start, cubed_stop, num)
return sign(cubed_space) * power(abs(cubed_space), 1/3) | 26,255 |
def to_ndarray(arr):
"""
Convert a list of lists to a multidimensional numpy array
@param arr:
@return:
"""
return np.array([np.array(x) for x in arr]) | 26,256 |
def spectre_avatar(avatar, size="sm", **kwargs):
"""
render avatar
:param avatar:
:param size:
:param kwargs:
:return: HTML
"""
avatar_kwargs = kwargs.copy()
avatar_kwargs['avatar_url'] = avatar
avatar_kwargs['size'] = size
if "background" not in avatar_kwargs.keys():
avatar_kwargs['background'] = "#5755d9"
return avatar_kwargs | 26,257 |
def update_models(scouseobject, key, models, selection):
"""
Here we update the model selection based on the users instructions
"""
spectrum = scouseobject.indiv_dict[key]
if np.size(selection) == 0.0:
# If no selection is made - refit manually
# Make pyspeckit be quiet
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
# generate a spectrum
spec = get_spec(scouseobject, spectrum)
log.setLevel(old_log)
#bf = interactive_fitting(scouseobject, spectrum, spec)
bf = Stage6Fitter()(scouseobject, spectrum, spec)
# Now add this as the best-fitting model and add the others to models
add_bf_model(spectrum, bf)
update_model_list(spectrum, models)
decision = 'refit'
add_decision(spectrum, decision)
elif selection[0] != 0.0:
# If any spectrum other than the first is selected then swap this to the
# model and the current best fit to models
bf = models[selection[0]]
models.remove(models[selection[0]])
# Now add this as the best-fitting model and add the others to models
add_bf_model(spectrum, bf)
update_model_list(spectrum, models)
decision = 'alternative'
add_decision(spectrum, decision)
print("")
print("Selection acknowledged. "+
colors.fg._lightgreen_+"Alternative spectrum selected"+colors._endc_+".")
print_fit_information(bf, init_guess=False)
else:
# If the first spectrum was selected then the user has chosen to accept
# the current best-fitting solution - so do nothing.
print("")
print("Selection acknowledged. "+
colors.fg._lightgreen_+"Original solution retained"+colors._endc_+".")
print_fit_information(spectrum.model, init_guess=False)
pass | 26,258 |
def get_modules_by_appid(app_id):
"""
查询业务下的所有模块信息
注意:企业版使用get_modules_by_property代替
"""
cc_data = bk.cc.get_modules_by_property(app_id=app_id)
return cc_data | 26,259 |
def length(time_array, voltage_array):
"""This function checks if the time and voltage arrays are of equal
lengths.
The first two lines in the function determine the length of the time
and voltage arrays, respectively. The if statement checks to see if
they are unequal, if so, it raises and exception.
:param time_array: array of time values
:param voltage_array: array of voltage values
:type time_array: ndarray
:type voltage_array: ndarray
"""
time_length = len(time_array)
voltage_length = len(voltage_array)
if time_length != voltage_length:
raise Exception | 26,260 |
def get_bed_from_nx_graph(
graph,
bed_file,
interval_key="active",
merge=True,
return_key="region"):
"""get BED file from nx examples
"""
if isinstance(graph.graph["examples"], basestring):
graph.graph["examples"] = graph.graph["examples"].split(",")
examples = list(graph.graph["examples"])
return_intervals = []
with open(bed_file, "w") as fp:
for region_metadata in examples:
interval_types = region_metadata.split(";")
interval_types = dict([
interval_type.split("=")[0:2]
for interval_type in interval_types])
interval_string = interval_types[interval_key]
return_intervals.append(interval_types[return_key])
chrom = interval_string.split(":")[0]
start = interval_string.split(":")[1].split("-")[0]
stop = interval_string.split("-")[1]
fp.write("{0}\t{1}\t{2}\n".format(chrom, start, stop))
if merge:
# merge
tmp_bed_file = "{}.tmp.bed".format(bed_file.split(".bed")[0])
os.system("mv {} {}".format(bed_file, tmp_bed_file))
os.system("cat {} | sort -k1,1 -k2,2n | bedtools merge -i stdin | bgzip > {}".format(
tmp_bed_file, bed_file))
os.system("rm {}".format(tmp_bed_file))
# renumber
tmp_bed_file = "{}.tmp.bed.gz".format(bed_file.split(".bed")[0])
os.system("mv {} {}".format(bed_file, tmp_bed_file))
#os.system("zcat {} | awk -F '\t' '{{ print $1\"\t\"$2\"\t\"$3\"\t\"$1\":\"$2\"-\"$3\"\t\"NR\"\t+\" }}' | bgzip -c > {}".format(
# tmp_bed_file, bed_file))
os.system("zcat {} | awk -F '\t' '{{ print $1\"\t\"$2\"\t\"$3\"\ttest\"NR\"\t\"NR\"\t.\" }}' | bgzip > {}".format(
tmp_bed_file, bed_file))
os.system("rm {}".format(tmp_bed_file))
# bgzip
#new_tmp_bed_file = "{}.bed".format(tmp_bed_file.split(".tmp")[0])
#os.system("mv {} {}".format(tmp_bed_file, new_tmp_bed_file))
#os.system("bgzip {}".format(new_tmp_bed_file))
#os.system("rm {}".format(tmp_bed_file))
return return_intervals | 26,261 |
def do_json_counts(df, target_name):
""" count of records where name=target_name in a dataframe with column 'name' """
return df.filter(df.name == target_name).count() | 26,262 |
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a ICEYE file. Returns a reader instance, if so.
Parameters
----------
file_name : str|BinaryIO
the file_name to check
Returns
-------
CSKReader|None
`CSKReader` instance if Cosmo Skymed file, `None` otherwise
"""
if is_file_like(file_name):
return None
if not is_hdf5(file_name):
return None
if h5py is None:
return None
try:
iceye_details = ICEYEDetails(file_name)
logger.info('File {} is determined to be a ICEYE file.'.format(file_name))
return ICEYEReader(iceye_details)
except SarpyIOError:
return None | 26,263 |
def exclusiveFields(obj, fields=[]):
"""
Validates that obj has fields only from specified list
"""
for k in obj.keys():
assert k in fields, k+':invalid' | 26,264 |
def split_graph(infile, outdir, k_outdirs=None, num_outputs=None, seedfile=None, dagfile=None):
"""
Load the dependency graph from infile and split it into num_outputs.
"""
if infile.endswith('.tgz'):
# there is only one file in the tarball archive
tmp_graph = tarfile.open(infile, "r:gz")
for member in tmp_graph.getmembers():
graph = pickle.load(tmp_graph.extractfile(member))
infile = splitext(infile)[0]
elif infile.endswith('.pickle'):
graph = pickle.load(open(infile, 'rb'))
else:
raise Exception("Unexpected format/extension of dep graph file: %s" % infile)
graph_size = graph.number_of_nodes()
infname = basename(infile)
if num_outputs:
assert num_outputs > 1
subgraph_size = graph_size / (num_outputs - 1)
logging.warning("spliting graph with %d nodes into %d outputs", graph_size, num_outputs)
outbase, outext = splitext(infname)
graph_nodes = list(graph.nodes())
random.shuffle(graph_nodes)
subgraphs = [graph_nodes[s: min(s+subgraph_size, graph_size)] for s in range(0, graph_size, subgraph_size)]
for index, subgraph_nodes in enumerate(subgraphs):
logging.warning("generating the %d subgraph, with %d nodes", index, len(subgraph_nodes))
outfname = '%s_%d%s' % (outbase, index, outext)
# if k_outdirs is specified, split the outfiles into k outdirs
if k_outdirs:
index_outdir = outdir.rstrip('/') + str(index / (len(subgraphs)/k_outdirs))
else:
index_outdir = outdir
if not exists(index_outdir):
os.makedirs(index_outdir)
outfile = join(index_outdir, outfname)
subgraph_node_set = set(subgraph_nodes)
# Descendants
# https://networkx.github.io/documentation/networkx-1.9.1/reference/generated/networkx.algorithms.dag.descendants.html
for subgraph_node in subgraph_nodes:
subgraph_node_set.update(networkx.descendants(graph, subgraph_node))
# generate the subgraph and dump it to disk
logging.warning("the complete subgraph has %d nodes", len(subgraph_node_set))
subgraph = networkx.DiGraph(graph.subgraph(subgraph_node_set))
pickle.dump(subgraph, open(outfile, 'wb'))
# generate the dag file for subgraph if specified
if dagfile and exists(dagfile):
logging.warning("generating the dag file for %d subgraph", index)
dag_content = open(dagfile, 'r').read()
dagfname = basename(dagfile)
dagbase, dagext = splitext(dagfname)
outdagfname = '%s_%d%s' % (dagbase, index, dagext)
outdagbase = '%s_%d' % (dagbase, index)
out_dagfile = join(index_outdir, outdagfname)
# FIXME: the indir/outdir is not handled here, and assumes that dagfile points to the dags folder.
out_dag_content = dag_content.replace(dagbase, outdagbase).replace(infname, outfname)
open(out_dagfile, 'w').write(out_dag_content)
elif seedfile:
seed_nodes = [row['package name'] for row in csv.DictReader(open(seedfile, 'r'))]
logging.warning("generating subgraph, with %d seed nodes", len(seed_nodes))
subgraph_node_set = set(seed_nodes)
seed_fname = splitext(basename(seedfile))[0]
outfname = seed_fname + ".pickle"
# if k_outdirs is specified, split the seedfile into the first outdir
if k_outdirs:
index_outdir = outdir.rstrip('/') + "0"
else:
index_outdir = outdir
if not exists(index_outdir):
os.makedirs(index_outdir)
outfile = join(index_outdir, outfname)
for seed_node in seed_nodes:
subgraph_node_set.update(networkx.descendants(graph, seed_node))
# generate the subgraph and dump it to disk
logging.warning("the complete subgraph has %d nodes, saving to %s", len(subgraph_node_set), outfile)
subgraph = networkx.DiGraph(graph.subgraph(subgraph_node_set))
pickle.dump(subgraph, open(outfile, 'wb'))
# generate the dag file for subraph if specified
if dagfile and exists(dagfile):
logging.warning("generating the dag file for %s, graph in %s", seedfile, outfile)
dag_content = open(dagfile, 'r').read()
dagfname = basename(dagfile)
dagbase, dagext = splitext(dagfname)
outdagfname = "%s%s" % (seed_fname, dagext)
out_dagfile = join(index_outdir, outdagfname)
out_dag_content = dag_content.replace(dagbase, seed_fname).replace(infname, outfname)
open(out_dagfile, 'w').write(out_dag_content)
else:
raise Exception("Invalid num_outputs %s and seedfile %s!" % (num_outputs, seedfile)) | 26,265 |
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=args.lr, total_steps=args.num_steps+100,
pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler | 26,266 |
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, freeze_bert, finetune_module,
num_train_examples):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" %
(name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
label_mask = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"],
dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids)[0], dtype=tf.float32)
if "label_mask" in features:
label_mask = tf.cast(features["label_mask"], dtype=tf.float32)
else:
label_mask = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, logits, probabilities, variance) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids,
label_ids, num_labels, use_one_hot_embeddings, freeze_bert,
finetune_module, num_train_examples, is_real_example, label_mask)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint,
assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate,
num_train_steps,
num_warmup_steps, use_tpu)
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(label_ids, logits, probabilities, variance,
is_real_example):
def hemming_loss(labels,
probabilities,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
probabilities.get_shape().assert_is_compatible_with(
labels.get_shape())
prob = tf.cast(probabilities, dtype=tf.float32)
lab = tf.cast(labels, dtype=tf.float32)
total_error = tf.reduce_sum(
tf.abs(lab - prob) * is_real_example)
h_loss, update_op = tf.metrics.mean(total_error)
if metrics_collections:
tf.compat.v1.add_to_collections(metrics_collections,
h_loss)
if updates_collections:
tf.compat.v1.add_to_collections(updates_collections,
update_op)
return h_loss, update_op
predictions = tf.cast(tf.round(probabilities), dtype=tf.int32)
label_ids = tf.cast(label_ids, dtype=tf.int32)
pred_split = tf.split(predictions, num_labels, axis=-1)
probs_split = tf.split(probabilities, num_labels, axis=-1)
label_ids_split = tf.split(label_ids, num_labels, axis=-1)
variance_split = tf.split(variance, num_labels, axis=-1)
eval_dict = dict()
for j in range(num_labels):
eval_dict[LABEL_COLUMN[j] + ' variance'] = tf.metrics.mean(
variance_split[j], weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' accuracy'] = tf.metrics.accuracy(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] + ' auc'] = tf.metrics.auc(
label_ids_split[j],
probs_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' f1'] = tf.contrib.metrics.f1_score(
label_ids_split[j],
probs_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] + ' recall'] = tf.metrics.recall(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' precision'] = tf.metrics.precision(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[
LABEL_COLUMN[j] +
' recall_at_precision_90'] = tf.contrib.metrics.recall_at_precision(
label_ids_split[j],
probs_split[j],
0.9,
weights=is_real_example)
eval_dict[
LABEL_COLUMN[j] +
' recall_at_precision_95'] = tf.contrib.metrics.recall_at_precision(
label_ids_split[j],
probs_split[j],
0.95,
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' true_positives'] = tf.metrics.true_positives(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' false_positives'] = tf.metrics.false_positives(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' true_negatives'] = tf.metrics.true_negatives(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict[LABEL_COLUMN[j] +
' false_negatives'] = tf.metrics.false_negatives(
label_ids_split[j],
pred_split[j],
weights=is_real_example)
eval_dict['hemming_loss'] = hemming_loss(
label_ids, probabilities, weights=is_real_example)
eval_dict["mean_variance"] = tf.metrics.mean(
values=variance, weights=is_real_example)
return eval_dict
eval_metrics = (metric_fn, [
label_ids, logits, probabilities, variance, is_real_example
])
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
"probs": probabilities,
"logits": logits,
"variance": variance
},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn | 26,267 |
def var_dump(*obs):
"""
shows structured information of a object, list, tuple etc
"""
i = 0
for x in obs:
dump(x, 0, i, '', object, True)
i += 1 | 26,268 |
def logout(odj: ODI) -> None:
"""Logout from ODI and remove the local cache."""
from odi.cli.command import implement_logout
implement_logout() | 26,269 |
def load_series_spectrum_df(series_dict_channels):
"""
Takes a series of dictionaries generated by pd.Series.apply(load_channels)
and returns a dataframe with the frequencies expanded as columns.
If the frequencies are not identically overlapping across rows, the resulting
set of columns will the the union of all the different frequency sets, where
rows not containing a given frequency will be NaN
"""
dict_df = {}
for i, dict_channels in series_dict_channels.items():
if dict_channels:
for key, value_dict in dict_channels.items():
n_rows = len(value_dict['value_y'])
x_values = np.array(value_dict['delta_x']).dot(np.arange(n_rows))
for j, freq in enumerate(x_values):
try:
dict_df[freq][i] = value_dict['value_y'][j]
except KeyError:
dict_df[freq] = {i: value_dict['value_y'][j]}
else:
pass
return pd.DataFrame.from_dict(dict_df) | 26,270 |
def compare_balloon_states(b1: balloon.BalloonState,
b2: balloon.BalloonState,
check_not_equal: Sequence[str] = ()):
"""Function for comparing balloon states.
Args:
b1: First BalloonState.
b2: Second BalloonState.
check_not_equal: A sequence of strings. Each string corresponds to a field
in BaloonStates to check if they are not equal. If not empty or None, not
equal will be asserted of the values or objects corresponding to these
fields.
"""
# pylint: disable=protected-access
for field in dataclasses.fields(balloon.BalloonState):
key = field.name
x = b1.__dict__[key]
y = b2.__dict__[key]
if 'safety_layer' in key:
continue
if isinstance(x, (np.ndarray, jnp.ndarray)):
absltest.TestCase().assertIsInstance(y, (np.ndarray, jnp.ndarray))
if key in check_not_equal:
np.testing.assert_raises(
AssertionError, np.testing.assert_array_equal, x, y)
elif not check_not_equal:
np.testing.assert_equal(x, y)
else:
if key in check_not_equal:
if field != 'power':
absltest.TestCase().assertNotEqual(x, y)
else:
absltest.TestCase().assertNotEqual(
x.battery_charge, y.battery_charge)
elif not check_not_equal:
if key != 'power':
absltest.TestCase().assertEqual(x, y)
else:
absltest.TestCase().assertEqual(x.battery_charge, y.battery_charge)
# pylint: enable=protected-access | 26,271 |
def get_mujoco_features_dict(
builder_config: BuilderConfig,
ds_config: DatasetConfig) -> Dict[str, tfds.features.FeatureConnector]:
"""Builds the features dict of a Mujoco dataset.
Args:
builder_config: builder config of the Mujoco dataset.
ds_config: config of the Mujoco dataset containing the specs.
Returns:
Dictionary with the features of this dataset.
"""
float_type = builder_config.float_type
steps_dict = {
'observation':
tfds.features.Tensor(shape=(ds_config.obs_len,), dtype=float_type),
'action':
tfds.features.Tensor(shape=(ds_config.action_len,), dtype=float_type),
'reward':
float_type,
'is_terminal':
tf.bool,
'is_first':
tf.bool,
'discount':
float_type,
}
if builder_config.has_step_metadata:
steps_dict['infos'] = {
# Infos correspond to state information.
# See https://github.com/rail-berkeley/d4rl/wiki/Tasks#gym.
'action_log_probs':
float_type,
'qpos':
tfds.features.Tensor(shape=(ds_config.qpos_len,), dtype=float_type),
'qvel':
tfds.features.Tensor(shape=(ds_config.qvel_len,), dtype=float_type),
}
episode_metadata = {}
if builder_config.has_episode_metadata:
episode_metadata.update({
'algorithm': tf.string,
'iteration': tf.int32,
})
if builder_config.has_policy_metadata:
episode_metadata.update({
# The policy dictionary contains the weights of the policy used to
# generate the dataset.
# See https://github.com/rail-berkeley/d4rl/wiki/Tasks#gym.
'policy': {
'fc0': {
'bias':
tfds.features.Tensor(shape=(256,), dtype=float_type),
'weight':
tfds.features.Tensor(
shape=(256, ds_config.obs_len), dtype=float_type),
},
'fc1': {
'bias':
tfds.features.Tensor(shape=(256,), dtype=float_type),
'weight':
tfds.features.Tensor(shape=(256, 256), dtype=float_type),
},
'last_fc': {
'bias':
tfds.features.Tensor(
shape=(ds_config.action_len,), dtype=float_type),
'weight':
tfds.features.Tensor(
shape=(ds_config.action_len, 256), dtype=float_type),
},
'last_fc_log_std': {
'bias':
tfds.features.Tensor(
shape=(ds_config.action_len,), dtype=float_type),
'weight':
tfds.features.Tensor(
shape=(ds_config.action_len, 256), dtype=float_type),
},
'nonlinearity': tf.string,
'output_distribution': tf.string,
},
})
features_dict = {
'steps': tfds.features.Dataset(steps_dict),
}
if episode_metadata:
features_dict.update(episode_metadata)
return features_dict | 26,272 |
def copy_and_extract_documents(agency_directory, d):
""" Extract text from documents, and return a tuple contain documentation
details and the extracted text. """
date_dir = os.path.join(agency_directory, d)
manifest_path = os.path.join(date_dir, 'manifest.yml')
manifest = yaml.load(open(manifest_path, 'r'))
for document in manifest:
doc_filename = document['document']['document_id']
doc_path = os.path.join(date_dir, doc_filename)
text_path = convert_to_text(doc_filename, doc_path)
text_contents = open(text_path, 'r').read()
yield (document, doc_path, text_contents) | 26,273 |
def weighted_cross_entropy(y_true, y_pred):
"""
Weighted cross entropy loss
:param y_true: Ground truth
:param y_pred: Prediction
:return: Loss value between y_true and y_pred
"""
try:
[seg, weight] = tf.unstack(y_true, 2, axis=3)
seg = tf.expand_dims(seg, -1)
weight = tf.expand_dims(weight, -1)
except Exception:
# test purpose
seg = tf.zeros((1,128,128,1))
weight = tf.ones((1, 128, 128, 1))
epsilon = tf.convert_to_tensor(10e-8, y_pred.dtype.base_dtype)
y_pred = tf.clip_by_value(y_pred, epsilon, 1 - epsilon)
y_pred = tf.math.log(y_pred / (1 - y_pred))
zeros = tf.zeros_like(y_pred, dtype=y_pred.dtype) # array_ops
cond = y_pred >= zeros
relu_logits = tf.where(cond, y_pred, zeros)
neg_abs_logits = tf.where(cond, -y_pred, y_pred)
entropy = tf.math.add(
relu_logits - y_pred * seg,
tf.math.log1p(tf.math.exp(neg_abs_logits)),
name=None,
)
return K.mean(tf.multiply(weight, entropy), axis=-1) | 26,274 |
def get_config(custom_path=None):
"""
Get config file and load it with yaml
:returns: loaded config in yaml, as a dict object
"""
if custom_path:
config_path = custom_path
else:
for d in CONFIG_DIRS:
config_path = os.path.join(d, CONFIG_FILENAME)
if os.path.isfile(config_path):
break
try:
with open(config_path, "r") as config_file:
return yaml.safe_load(config_file)
except FileNotFoundError as e:
logger.debug(e)
if custom_path:
logger.error("Configuration file {} not found.".format(custom_path))
else:
logger.error(
"No configuration file can be found. Please create a "
"config.yml in one of these directories:\n"
"{}".format(", ".join(CONFIG_DIRS))
)
raise FileNotFoundError | 26,275 |
def union(a, b):
"""Find union of two lists, sequences, etc.
Returns a list that includes repetitions if they occur in the input lists.
"""
return list(a) + list(b) | 26,276 |
def update_intervals(M, s, B):
"""
After found the s value, compute the new list of intervals
"""
M_new = []
for a, b in M:
r_lower = ceil(a * s - 3 * B + 1, n)
r_upper = ceil(b * s - 2 * B, n)
for r in range(r_lower, r_upper):
lower_bound = max(a, ceil(2 * B + r * n, s))
upper_bound = min(b, floor(3 * B - 1 + r * n, s))
interval = Interval(lower_bound, upper_bound)
M_new = safe_interval_insert(M_new, interval)
M.clear()
return M_new | 26,277 |
def mixer_l16_224_in21k(pretrained=False, **kwargs):
""" Mixer-L/16 224x224. ImageNet-21k pretrained weights.
Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs)
model = _create_mixer('mixer_l16_224_in21k', pretrained=pretrained, **model_args)
return model | 26,278 |
def compileInterpolatableTTFs(ufos,
preProcessorClass=TTFInterpolatablePreProcessor,
outlineCompilerClass=OutlineTTFCompiler,
featureCompilerClass=FeatureCompiler,
featureWriters=None,
glyphOrder=None,
useProductionNames=None,
cubicConversionError=None,
reverseDirection=True,
inplace=False):
"""Create FontTools TrueType fonts from a list of UFOs with interpolatable
outlines. Cubic curves are converted compatibly to quadratic curves using
the Cu2Qu conversion algorithm.
Return an iterator object that yields a TTFont instance for each UFO.
"""
preProcessor = preProcessorClass(
ufos, inplace=inplace,
conversionError=cubicConversionError,
reverseDirection=reverseDirection)
glyphSets = preProcessor.process()
for ufo, glyphSet in zip(ufos, glyphSets):
outlineCompiler = outlineCompilerClass(
ufo, glyphSet=glyphSet, glyphOrder=glyphOrder)
ttf = outlineCompiler.compile()
featureCompiler = featureCompilerClass(
ufo, ttf, featureWriters=featureWriters,
mtiFeatures=_getMtiFeatures(ufo))
featureCompiler.compile()
postProcessor = PostProcessor(ttf, ufo)
ttf = postProcessor.process(useProductionNames)
yield ttf | 26,279 |
def autofill(blf_dict: dict, user_dict: dict):
"""Accepts a dictonary of BLFs and a dictionary of users. Where there is
partial information (empty extension numbers or empty names) in the BLF
dictionary, this function will look in the user dictionary for matching
info it can use to fill in.
"""
for blf_number in blf_dict:
blf = blf_dict[blf_number]
destination, label = blf[0], blf[1]
if destination == "":
# This is a bit tricky, because technically there can be more than one extension with the same username.
possible_destinations = [extension for extension, user_name in user_dict.items() if user_name == label]
number_of_possible_destinations = len(possible_destinations)
if number_of_possible_destinations > 1:
pass
# raise Exception
# TODO: Make this give useful feedback to the user
elif number_of_possible_destinations == 1:
# Since there's only one possible destination,
# we assume it's the intended destination.
assumed_destination = possible_destinations[0]
blf = (assumed_destination, label)
blf_dict[blf_number] = blf
else:
pass # do nothing
elif label == "":
blf = (destination, user_dict[destination])
blf_dict[blf_number] = blf
else:
pass
return blf_dict | 26,280 |
def isoslice(var, iso_values, grid=None, iso_array=None, axis="Z"):
"""Interpolate var to iso_values.
This wraps `xgcm` `transform` function for slice interpolation,
though `transform` has additional functionality.
Inputs
------
var: DataArray
Variable to operate on.
iso_values: list, ndarray
Values to interpolate to. If calculating var at fixed depths,
iso_values are the fixed depths, which should be negative if
below mean sea level. If input as array, should be 1D.
grid: xgcm.grid, optional
Grid object associated with var. Optional because checks var
attributes for grid.
iso_array: DataArray, optional
Array that var is interpolated onto (e.g., z coordinates or
density). If calculating var on fixed depth slices, iso_array
contains the depths [m] associated with var. In that case and
if None, will use z coordinate attached to var. Also use this
option if you want to interpolate with z depths constant in
time and input the appropriate z coordinate.
dim: str, optional
Dimension over which to calculate isoslice. If calculating var
onto fixed depths, `dim='Z'`. Options are 'Z', 'Y', and 'X'.
Returns
-------
DataArray of var interpolated to iso_values. Dimensionality will be the
same as var except with dim dimension of size of iso_values.
Notes
-----
var cannot have chunks in the dimension dim.
cf-xarray should still be usable after calling this function.
Example usage
-------------
To calculate temperature onto fixed depths:
>>> xroms.isoslice(ds.temp, np.linspace(0, -30, 50))
To calculate temperature onto salinity:
>>> xroms.isoslice(ds.temp, np.arange(0, 36), iso_array=ds.salt, axis='Z')
Calculate lat-z slice of salinity along a constant longitude value (-91.5):
>>> xroms.isoslice(ds.salt, -91.5, iso_array=ds.lon_rho, axis='X')
Calculate slice of salt at 28 deg latitude
>>> xroms.isoslice(ds.salt, 28, iso_array=ds.lat_rho, axis='Y')
Interpolate temp to salinity values between 0 and 36 in the X direction
>>> xroms.isoslice(ds.temp, np.linspace(0, 36, 50), iso_array=ds.salt, axis='X')
Interpolate temp to salinity values between 0 and 36 in the Z direction
>>> xroms.isoslice(ds.temp, np.linspace(0, 36, 50), iso_array=ds.salt, axis='Z')
Calculate the depth of a specific isohaline (33):
>>> xroms.isoslice(ds.salt, 33, iso_array=ds.z_rho, axis='Z')
Calculate dye 10 meters above seabed. Either do this on the vertical
rho grid, or first change to the w grid and then use `isoslice`. You may prefer
to do the latter if there is a possibility that the distance above the seabed you are
interpolating to (10 m) could be below the deepest rho grid depth.
* on rho grid directly:
>>> height_from_seabed = ds.z_rho + ds.h
>>> height_from_seabed.name = 'z_rho'
>>> xroms.isoslice(ds.dye_01, 10, iso_array=height_from_seabed, axis='Z')
* on w grid:
>>> var_w = ds.dye_01.xroms.to_grid(scoord='w').chunk({'s_w': -1})
>>> ds['dye_01_w'] = var_w # currently this is the easiest way to reattached coords xgcm variables
>>> height_from_seabed = ds.z_w + ds.h
>>> height_from_seabed.name = 'z_w'
>>> xroms.isoslice(ds['dye_01_w'], 10, iso_array=height_from_seabed, axis='Z')
"""
words = "Either grid should be input or var should be DataArray with grid in attributes."
assert (grid is not None) or (
isinstance(var, xr.DataArray) and "grid" in var.attrs
), words
if grid is None:
grid = var.attrs["grid"]
assert isinstance(grid, xgcm.Grid), "grid must be `xgcm` grid object."
attrs = var.attrs # save to reinstitute at end
# make sure iso_values are array-like
if isinstance(iso_values, (int, float)):
iso_values = [iso_values]
# interpolate to the z coordinates associated with var
if iso_array is None:
key = [coord for coord in var.coords if "z_" in coord and "0" not in coord][
0
] # str
assert (
len(key) > 0
), "z coordinates associated with var could not be identified."
iso_array = var[key]
else:
if isinstance(iso_array, xr.DataArray) and iso_array.name is not None:
key = iso_array.name
else:
key = "z"
# perform interpolation
transformed = grid.transform(var, axis, iso_values, target_data=iso_array)
if key not in transformed.coords:
transformed = transformed.assign_coords({key: iso_array})
# bring along attributes for cf-xarray
transformed[key].attrs["axis"] = axis
transformed.attrs["grid"] = grid
# add original attributes back in
transformed.attrs = {**attrs, **transformed.attrs}
# save key names for later
# perform interpolation for other coordinates if needed
if "longitude" in var.cf.coordinates:
lonkey = var.cf["longitude"].name
if lonkey not in transformed.coords:
# this interpolation won't work for certain combinations of var[latkey] and iso_array
# without the following step
if "T" in iso_array.reset_coords(drop=True).cf.axes:
iso_array = iso_array.cf.isel(T=0).drop_vars(
iso_array.cf["T"].name, errors="ignore"
)
if "Z" in iso_array.reset_coords(drop=True).cf.axes:
iso_array = iso_array.cf.isel(Z=0).drop_vars(
iso_array.cf["Z"].name, errors="ignore"
)
transformedlon = grid.transform(
var[lonkey], axis, iso_values, target_data=iso_array
)
transformed = transformed.assign_coords({lonkey: transformedlon})
transformed[lonkey].attrs["standard_name"] = "longitude"
if "latitude" in var.cf.coordinates:
latkey = var.cf["latitude"].name
if latkey not in transformed.coords:
# this interpolation won't work for certain combinations of var[latkey] and iso_array
# without the following step
if "T" in iso_array.reset_coords(drop=True).cf.axes:
iso_array = iso_array.cf.isel(T=0).drop_vars(
iso_array.cf["T"].name, errors="ignore"
)
if "Z" in iso_array.reset_coords(drop=True).cf.axes:
iso_array = iso_array.cf.isel(Z=0).drop_vars(
iso_array.cf["Z"].name, errors="ignore"
)
transformedlat = grid.transform(
var[latkey], axis, iso_values, target_data=iso_array
)
transformed = transformed.assign_coords({latkey: transformedlat})
transformed[latkey].attrs["standard_name"] = "latitude"
if "vertical" in var.cf.coordinates:
zkey = var.cf["vertical"].name
if zkey not in transformed.coords:
transformedZ = grid.transform(
var[zkey], axis, iso_values, target_data=iso_array
)
transformed = transformed.assign_coords({zkey: transformedZ})
transformed[zkey].attrs["positive"] = "up"
transformed = transformed.squeeze().cf.guess_coord_axis()
# reorder back to normal ordering in case changed
transformed = xroms.order(transformed)
return transformed | 26,281 |
def game_stats(history):
"""
displays statistics for current session of the game
:param history:
"""
lost_games = history["loss"]
won_games = history["won"]
total_games = won_games + lost_games
print("total no of games played are:", total_games) # displays total number of games played
if total_games != 0:
win_prec = (won_games / total_games) * 100
print("winning percentage are:", win_prec) # displays winning percentage
print("winning percentage are:", 0) | 26,282 |
def user_prompt(msg: str, sound: bool = False, timeout: int = -1):
"""Open user prompt."""
return f'B;UserPrompt("{msg}",{sound:d},{timeout});'.encode() | 26,283 |
def ComputeWk(X, labels, classes):
"""
X - (d x n) data matrix, where n is samples and d is dimentionality
lables - n dimentional vector which are class labels
"""
Wk = 0
for i in range(classes):
mask = (labels == i)
Wk = Wk + np.sum(np.sum((X[:, mask] - np.mean(X[:, mask], axis=1, keepdims=True))**2, axis=0))
return Wk | 26,284 |
def vad_split(audio, rate, frame_duration, aggressiveness=1):
"""Splits the audio into audio segments on non-speech frames.
params:
audio: A numpy ndarray, which has 1 dimension and values within
-1.0 to 1.0 (inclusive)
rate: An integer, which is the rate at which samples are taken
frame_duration: A float, which is the duration of each frame
to check
return: A list of numpy ndarray, which are 1 dimension each and
have values within -1.0 to 1.0 (inclusive)
"""
assert rate in (8000, 16000, 32000, 48000), (
'Invalid Rate, use 8000, 16000, 32000, or 48000'
)
assert frame_duration in (.01, .02, .03), (
'Invalid frame_dur, use .01, .02, .03'
)
assert 0 <= aggressiveness <= 3, (
'Invalid aggressiveness, must be between 0 and 3'
)
vad = webrtcvad.Vad(aggressiveness)
frame_size = int(rate * frame_duration)
offset = 0
off = True
voiced_frames = []
while offset + frame_size < len(audio):
frame = audio[offset:offset + frame_size]
frame_bytes = np.int16(frame * np.iinfo('int16').max).tobytes()
if vad.is_speech(frame_bytes, rate):
if off is True:
off = False
voiced_frames.append([frame])
else:
voiced_frames[-1].append(frame)
else:
off = True
offset += frame_size
if len(voiced_frames) == 0:
return np.array([audio])
for ndx in range(len(voiced_frames)):
voiced_frames[ndx] = np.hstack(voiced_frames[ndx])
return voiced_frames | 26,285 |
def is_extant_string(string):
"""Check if the string exists in the database"""
return HyperlinkModel.objects.filter(internal=string).exists() | 26,286 |
def time_to_convergence(T,P0,final_beliefs=False,tolerance=0,max_iter=10000):
"""
This function calculates the number of periods that it takes for opinions to stop changing in the DeGroot Model.
Optionally, one can also get the final belief profile.
"""
T = np.asarray(T)
P0 = np.transpose(np.asarray(P0))
n, m = T.shape
N = P0.size
if (n!=m) or (m!=N):
print("Trust matrix should be squared and number of agents should be consistent in T and P0.")
return
t = 1
N = P0.size
P1 = P0
P0 = T.dot(P1)
while (t<max_iter) and (np.linalg.norm(P0-P1)>tolerance):
P1 = P0
P0 = T.dot(P1)
t = t+1
if final_beliefs == True:
return t, P0
else:
return t | 26,287 |
def sound_settings_function():
"""Function called when the player enter the sound settings.
It allows the player to navigate trough the music and the sounds to mute them or play them.
We have to repeat this instructions in diferent parts of the game."""
if event.key == K_LEFT:
if soundSettings.selectedButton == soundSettings.soundOffButton or soundSettings.selectedButton == soundSettings.soundOnButton:
soundDict["select"].play()
soundSettings.selectNextButton("left")
elif event.key == K_RIGHT:
if soundSettings.selectedButton == soundSettings.musicOffButton or soundSettings.selectedButton == soundSettings.musicOnButton:
soundDict["select"].play()
soundSettings.selectNextButton("right")
elif event.key == K_UP:
if soundSettings.selectedButton != soundSettings.exitButton:
soundDict["select"].play()
soundSettings.selectNextButton("up")
elif event.key == K_DOWN:
if soundSettings.selectedButton == soundSettings.exitButton:
soundDict["select"].play()
soundSettings.selectNextButton("down")
elif event.key == K_RETURN:
if soundSettings.soundOnButton.selected:
soundSettings.set_sound(False)
for sound in soundDict.keys():
soundDict[sound].set_volume(0)
elif soundSettings.soundOffButton.selected:
soundDict["enter"].play()
soundSettings.set_sound(True)
for sound in soundDict.keys():
soundDict[sound].set_volume(0.5)
elif soundSettings.musicOnButton.selected:
soundDict["enter"].play()
soundSettings.set_music(False)
pygame.mixer.music.pause()
elif soundSettings.musicOffButton.selected:
soundDict["enter"].play()
soundSettings.set_music(True)
pygame.mixer.music.unpause()
elif soundSettings.exitButton.selected:
soundSettings.close = True | 26,288 |
def count_subset_sum_recur(arr, total, n):
"""Count subsets given sum by recusrion.
Time complexity: O(2^n), where n is length of array.
Space complexity: O(1).
"""
if total < 0:
return 0
if total == 0:
return 1
if n < 0:
return 0
if total < arr[n]:
return count_subset_sum_recur(arr, total, n - 1)
else:
n_subsets_in = count_subset_sum_recur(arr, total - arr[n], n - 1)
n_subsets_out = count_subset_sum_recur(arr, total, n - 1)
return n_subsets_in + n_subsets_out | 26,289 |
def monthcalendar(year, month):
"""Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero."""
day1, ndays = monthrange(year, month)
rows = []
r7 = range(7)
day = (_firstweekday - day1 + 6) % 7 - 5 # for leading 0's in first week
while day <= ndays:
row = [0, 0, 0, 0, 0, 0, 0]
for i in r7:
if 1 <= day <= ndays: row[i] = day
day = day + 1
rows.append(row)
return rows | 26,290 |
def Contap_HCurve2dTool_Bezier(*args):
"""
:param C:
:type C: Handle_Adaptor2d_HCurve2d &
:rtype: Handle_Geom2d_BezierCurve
"""
return _Contap.Contap_HCurve2dTool_Bezier(*args) | 26,291 |
def api_view_issue(repo, issueid, username=None, namespace=None):
"""
Issue information
-----------------
Retrieve information of a specific issue.
::
GET /api/0/<repo>/issue/<issue id>
GET /api/0/<namespace>/<repo>/issue/<issue id>
::
GET /api/0/fork/<username>/<repo>/issue/<issue id>
GET /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>
The identifier provided can be either the unique identifier or the
regular identifier used in the UI (for example ``24`` in
``/forks/user/test/issue/24``)
Sample response
^^^^^^^^^^^^^^^
::
{
"assignee": null,
"blocks": [],
"comments": [],
"content": "This issue needs attention",
"date_created": "1431414800",
"depends": ["4"],
"id": 1,
"private": false,
"status": "Open",
"tags": [],
"title": "test issue",
"user": {
"fullname": "PY C",
"name": "pingou"
}
}
"""
comments = is_true(flask.request.args.get("comments", True))
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo, project_token=False)
issue_id = issue_uid = None
try:
issue_id = int(issueid)
except (ValueError, TypeError):
issue_uid = issueid
issue = _get_issue(repo, issue_id, issueuid=issue_uid)
_check_private_issue_access(issue)
jsonout = flask.jsonify(issue.to_json(public=True, with_comments=comments))
return jsonout | 26,292 |
def mktest():
"""Create testsuite.
"""
if changed('tests'):
os.chdir('tests')
run("python ../dktest/mktest.py") | 26,293 |
def mcc_class_loss(
predictions: torch.tensor,
targets: torch.tensor,
data_weights: torch.tensor,
mask: torch.tensor,
) -> torch.tensor:
"""
A classification loss using a soft version of the Matthews Correlation Coefficient.
:param predictions: Model predictions with shape(batch_size, tasks).
:param targets: Target values with shape(batch_size, tasks).
:param data_weights: A tensor with float values indicating how heavily to weight each datapoint in training with shape(batch_size, 1)
:param mask: A tensor with boolean values indicating whether the loss for this prediction is considered in the gradient descent with shape(batch_size, tasks).
:return: A tensor containing loss values of shape(tasks).
"""
# shape(batch, tasks)
# (TP*TN-FP*FN)/sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))
TP = torch.sum(targets * predictions * data_weights * mask, axis=0)
FP = torch.sum((1 - targets) * predictions * data_weights * mask, axis=0)
FN = torch.sum(targets * (1 - predictions) * data_weights * mask, axis=0)
TN = torch.sum((1 - targets) * (1 - predictions) * data_weights * mask, axis=0)
loss = 1 - (
(TP * TN - FP * FN) / torch.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))
)
return loss | 26,294 |
def inverse(fun, value):
"""Calculate argument of f given its value"""
epsilon = 1e-09
start = middle = 1e-09
end = 1e9
while abs(end - start) > epsilon:
middle = (start + end) / 2
if isclose(fun(middle) - value, 0.0):
break
elif fun(middle) - value > 0 and fun(start) < value:
end = middle
else:
start = middle
return middle | 26,295 |
def run_architecture_validator_test(
mocker,
config,
constrained_param_section,
constrained_param_name,
param_name,
param_val,
validator,
expected_warnings,
expected_errors,
):
"""Run a test for a validator that's concerned with the architecture param."""
mocked_pcluster_config = make_pcluster_config_mock(mocker, config)
errors, warnings = validator(param_name, param_val, mocked_pcluster_config)
mocked_pcluster_config.get_section.assert_called_once_with(constrained_param_section)
mocked_pcluster_config.get_section.side_effect(constrained_param_section).get_param_value.assert_called_with(
constrained_param_name
)
assert_that(len(warnings)).is_equal_to(len(expected_warnings))
for warnings, expected_warnings in zip(warnings, expected_warnings):
assert_that(warnings).matches(re.escape(expected_warnings))
assert_that(len(errors)).is_equal_to(len(expected_errors))
for errors, expected_errors in zip(errors, expected_errors):
assert_that(errors).matches(re.escape(expected_errors)) | 26,296 |
def get_test_files(dirname):
"""
Gets a list of files in directory specified by name.
"""
if not os.path.isdir(dirname):
return []
path = dirname + "/{}"
return list(map(path.format, sorted(os.listdir(dirname)))) | 26,297 |
def test_index_references_are_valid(repo):
"""
demonstrate that all keys in the index reference valid TOC entries in data pages.
this means:
- all pages exist
- all IDs exist
- the reported size equals the allocated size
Args:
repo (cim.CIM): the deleted-instance repo
Returns:
None
"""
index = cim.Index(repo.cim_type, repo.logical_index_store)
for key in index.lookup_keys(cim.Key('NS_')):
if not key.is_data_reference:
continue
is_found = False
entry_size = 0
page = repo.logical_data_store.get_page(key.data_page)
for i in range(page.toc.count):
entry = page.toc[i]
if entry.record_id == key.data_id:
is_found = True
entry_size = entry.size
break
assert is_found is True
assert key.data_length == entry_size | 26,298 |
def setup(verbose, cpu, memory, update):
"""
Uses Terraform to create a full Numerai Compute cluster in AWS.
Prompts for your AWS and Numerai API keys on first run, caches them in $HOME/.numerai.
Will output two important URLs at the end of running:
- submission_url: The webhook url you will provide to Numerai. A copy is stored in .numerai/submission_url.txt.
- docker_repo: Used for "numerai docker ..."
"""
terraform_setup(verbose, cpu, memory, update) | 26,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.