content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def main():
"""Main function"""
parser = ArgumentParser()
parser.add_argument("config", help="json cluster configuration file")
parser.add_argument("pattern", help="Filename pattern to download")
args = parser.parse_args()
config = get_config(parser, args)
pattern = args.pattern
copy_files(config, pattern)
logger.info("Done.")
| 18,500
|
def make_object(page, d):
"""Object block
Block loads a Object Model (Wavefront) along with it's *.mtl file. PARAM1
must be equal to *.obj and *.mtl filename (use lowercase extension). Files
must share same filename and must be loaded in the media/document folder.
If PARAM2 is set to 'scale', object will be scaled.
"""
identity = f'{page.id}-model-{d["num"]}'
position = f'0 {round(-d["43"]/2, 4)} 0'
blob = f'id=:{identity}=;position=:{position}'
if d['PARAM2'] == 'scale':
blob += f'=;scale=:{round(fabs(d["41"]), 4)} {round(fabs(d["43"]), 4)} '
blob += f'{round(fabs(d["42"]), 4)}'
if d['NAME'] == 'obj-mtl':
blob += f'=;obj-model=:{d["PARAM1"]}'
elif d['NAME'] == 'gltf':
blob += f'=;gltf-model=:{d["PARAM1"]}'
blob += f'=;layer=:{d["layer"]}=;tag=:{d["tag"]}=;closing=:{d["closing"]+1}'
page.ent_dict[identity] = blob
return
| 18,501
|
def read_xls_as_dict(filename, header="top"):
"""
Read a xls file as dictionary.
@param filename File name (*.xls or *.xlsx)
@param header Header position. Options: "top", "left"
@return Dictionary with header as key
"""
table = read_xls(filename)
if (header == "top"):
return read_table_header_top(table)
elif (header == "left"):
return read_table_header_left(table)
else:
return {}
| 18,502
|
def check_hostgroup(zapi, region_name, cluster_id):
"""check hostgroup from region name if exists
:region_name: region name of hostgroup
:returns: true or false
"""
return zapi.hostgroup.exists(name="Region [%s %s]" % (region_name, cluster_id))
| 18,503
|
def matlab_to_tt(ttemps, eng, is_orth=True, backend="numpy", mode="l"):
"""Load matlab.object representing TTeMPS into Python as TT"""
_, f = tempfile.mkstemp(suffix=".mat")
eng.TTeMPS_to_Py(f, ttemps, nargout=0)
tt = load_matlab_tt(f, is_orth=is_orth, mode=mode, backend=backend)
return tt
| 18,504
|
def encode_mecab(tagger, string):
"""
string을 mecab을 이용해서 형태소 분석
:param tagger: 형태소 분석기 객체
:param string: input text
:return tokens: 형태소 분석 결과
:return indexs: 띄어쓰기 위치
"""
string = string.strip()
if len(string) == 0:
return [], []
words = string.split()
nodes = tagger.pos(" ".join(words))
tokens = []
for node in nodes:
surface = node[0].strip()
if 0 < len(surface):
for s in surface.split(): # mecab 출력 중 '영치기 영차' 처리
tokens.append(s)
indexs = []
index, start, end = -1, 0, 100000
for i, token in enumerate(tokens): # 분류가 잘 되었는지 검증
if end < len(words[index]):
start = end
end += len(token)
else:
index += 1
start = 0
end = len(token)
indexs.append(i) # values 중 실제 시작 위치 기록
assert words[index][start:end] == token, f"{words[index][start:end]} != {token}"
return tokens, indexs
| 18,505
|
def __vector__init__(self, obj=None):
"""Initializes the vector.
Argument 'obj':
If 'obj' is an integer, the vector is initialized as a vector of
'obj' elements. If 'obj' is a Python sequence. The vector is
initialized as an equivalence of 'obj' by invoking self.fromlist().
"""
self.__old_init__()
if isinstance(obj, int):
self.resize(obj)
elif not obj is None:
self.create(obj)
| 18,506
|
def nutrient_limited_growth(X,idx_A,idx_B,growth_rate,half_saturation):
""" non-linear response with respect to *destination/predator* compartment
Similar to holling_type_II and is a reparameterization of holling II.
The response with respect to the origin compartment 'B' is approximately
linear for small 'B' and converges towards an upper limit governed by the
'growth_rate' for large 'B'.
For examples see:
`Examples <https://gist.github.com/465b/cce390f58d64d70613a593c8038d4dc6>`_
Parameters
----------
X : np.array
containing the current state of the contained quantity of each
compartment
idx_A : integer
index of the element representing the destination/predator compartment
idx_B : integer
index of the element representing the origin/pray compartment
growth_rate : float
first parameter of the interaction.
governs the upper limit of the response.
half_saturation : float
second parameter of the interaction.
governs the slope of the response.
Returns
-------
df : float
change in the origin and destitnation compartment. Calculated by
consumption_rate = ((hunting_rate * origin_compartment) / (1 +
hunting_rate * food_processing_time * origin_compartment)) *
destination_compartment
"""
A = X[idx_A] # quantity of compartment A (predator/consumer)
B = X[idx_B] # quantity of compartment B (prey/nutrient)
df = growth_rate*(B/(half_saturation+B))*A
return df
| 18,507
|
def aes128_decrypt(AES_KEY, _data):
"""
AES 128 位解密
:param requestData:
:return:
"""
# 秘钥实例
newAes = getAesByKey(AES_KEY)
# 解密
data = newAes.decrypt(_data)
rawDataLength = len(data)
# 剔除掉数据后面的补齐位
paddingNum = ord(data[rawDataLength - 1])
if paddingNum > 0 and paddingNum <= 16:
data = data[0:(rawDataLength - paddingNum)]
return data
| 18,508
|
def db(app):
"""Yield a database for the tests."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
# Explicitly close DB connection
_db.session.close()
_db.drop_all()
| 18,509
|
def generate_lists(project_id):
"""takes a scopus facettes export and extracts the two columns for the most common journals and the moste common
keywords. writes the list to the two files 'keywords_facettes.txt and journal_facettes.txt"""
with app.app_context():
location = app.config.get("LIBINTEL_DATA_DIR")
journal_facettes = []
keyword_facettes = []
with open(location + '/out/' + project_id + '/' + 'facettes.csv', 'r', encoding='utf-8-sig') as csvfile:
linereader = csv.reader(csvfile, delimiter=',')
for row in linereader:
if row.__len__() < 16:
continue
# skip header line
if row[12] == 'SOURCE TITLE':
continue
# skip empty data
if row[12] != '':
journal_facettes.append(row[12])
if row[14] != '':
keyword_facettes.append(row[14])
csvfile.close()
save_facettes_list(project_id, keyword_facettes)
save_facettes_list(project_id, journal_facettes, 'journal')
| 18,510
|
def test_finish_secret_finished():
""" Test that it doesn't update the secret if it's already AWSCURRENT"""
mock_secrets = Mock()
token = 'ver1'
mock_secrets.describe_secret.return_value = {
'VersionIdsToStages': {
'ver1': ['AWSCURRENT'],
'ver2': ['AWSPENDING']
}
}
rotation.finish_secret(mock_secrets, ARN, token)
mock_secrets.describe_secret.assert_called_with(SecretId=ARN)
mock_secrets.update_secret_version_stage.assert_not_called()
| 18,511
|
def human_readable_size(size, decimals=1):
"""Transform size in bytes into human readable text."""
for unit in ["B", "KB", "MB", "GB", "TB"]:
if size < 1000:
break
size /= 1000
return f"{size:.{decimals}f} {unit}"
| 18,512
|
def spec_save_pkl_matrix(w, f, filout, verb=False):
"""
Save pickle
"""
tpl = {'w': w, 'f': f}
with open(filout, 'wb') as handle:
pickle.dump(tpl, handle, protocol=pickle.HIGHEST_PROTOCOL)
# with open('filename.pickle', 'rb') as handle:
# b = pickle.load(handle)
if verb: print('FITS with processed template saved in {}'.format(filout))
return
| 18,513
|
def generichash_blake2b_final(statebuf, digest_size):
"""Finalize the blake2b hash state and return the digest.
:param statebuf:
:type statebuf: bytes
:param digest_size:
:type digest_size: int
:return: the blake2 digest of the passed-in data stream
:rtype: bytes
"""
_digest = ffi.new("unsigned char[]", crypto_generichash_BYTES_MAX)
rc = lib.crypto_generichash_blake2b_final(statebuf, _digest, digest_size)
ensure(rc == 0, 'Unexpected failure',
raising=exc.RuntimeError)
return ffi.buffer(_digest, digest_size)[:]
| 18,514
|
def walker_input_formatter(t, obs):
"""
This function formats the data to give as input to the controller
:param t:
:param obs:
:return: None
"""
return obs
| 18,515
|
def test_parse_nextflow_exec_report_html():
"""Test finding and parsing of Nextflow workflow execution HTML report"""
path = exec_report.find_exec_report(dirpath)
assert str(path.absolute()) == str(input_html.absolute())
info = exec_report.get_info(dirpath)
assert isinstance(info, exec_report.NextflowWorkflowExecInfo)
assert info.dict() == expected_report_info
| 18,516
|
def outpost_controller(
self: MonitoredTask, outpost_pk: str, action: str = "up", from_cache: bool = False
):
"""Create/update/monitor/delete the deployment of an Outpost"""
logs = []
if from_cache:
outpost: Outpost = cache.get(CACHE_KEY_OUTPOST_DOWN % outpost_pk)
LOGGER.debug("Getting outpost from cache to delete")
else:
outpost: Outpost = Outpost.objects.filter(pk=outpost_pk).first()
LOGGER.debug("Getting outpost from DB")
if not outpost:
LOGGER.warning("No outpost")
return
self.set_uid(slugify(outpost.name))
try:
controller_type = controller_for_outpost(outpost)
if not controller_type:
return
with controller_type(outpost, outpost.service_connection) as controller:
logs = getattr(controller, f"{action}_with_logs")()
LOGGER.debug("---------------Outpost Controller logs starting----------------")
for log in logs:
LOGGER.debug(log)
LOGGER.debug("-----------------Outpost Controller logs end-------------------")
except (ControllerException, ServiceConnectionInvalid) as exc:
self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
else:
self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, logs))
| 18,517
|
def build_url_base(url):
"""Normalize and build the final url
:param url: The given url
:type url: str
:return: The final url
:rtype: str
"""
normalize = normalize_url(url=url)
final_url = "{url}/api".format(url=normalize)
return final_url
| 18,518
|
def build_decoder(
latent_dim: int,
input_shape: Tuple,
encoder_shape: Tuple,
filters: List[int],
kernels: List[Tuple[int, int]],
strides: List[int]
) -> Model:
"""Return decoder model.
Parameters
----------
latent_dim:int,
Size of the latent vector.
encoder_shape:Tuple,
Output shape of the last convolutional layer
of the encoder model.
filters:List[int],
List of filters for the convolutional layer.
kernels:List[Tuple[int, int]],
List of kernel sizes for the convolutional layer.
strides:List[int]
List of strides for the convolutional layer.
"""
decoder_input = Input(
shape=(latent_dim,),
name='decoder_input'
)
x = Dense(np.prod(encoder_shape))(decoder_input)
x = Reshape(encoder_shape)(x)
x = decoder_blocks(
x,
reversed(filters),
reversed(kernels),
reversed(strides)
)
decoder_output = Conv2DTranspose(
filters=1,
kernel_size=kernels[0],
activation=axis_softmax,
padding='same',
name='decoder_output'
)(x)
reshape = Reshape(input_shape)(decoder_output)
# Instantiate Decoder Model
return Model(
decoder_input,
reshape,
name='decoder'
)
| 18,519
|
def execute_with_python_values(executable, arguments=(), backend=None):
"""Execute on one replica with Python values as arguments and output."""
backend = backend or get_local_backend()
def put(arg):
return Buffer.from_pyval(
arg, device=executable.DeviceOrdinals()[0], backend=backend)
arguments = [put(arg) for arg in arguments]
return executable.Execute(arguments).to_py()
| 18,520
|
def update_kubeproxy(token, ca, master_ip, api_port, hostname_override):
"""
Configure the kube-proxy
:param token: the token to be in the kubeconfig
:param ca: the ca
:param master_ip: the master node IP
:param api_port: the API server port
:param hostname_override: the hostname override in case the hostname is not resolvable
"""
create_kubeconfig(token, ca, master_ip, api_port, "proxy.config", "kubeproxy")
set_arg("--master", None, "kube-proxy")
if hostname_override:
set_arg("--hostname-override", hostname_override, "kube-proxy")
subprocess.check_call("systemctl restart snap.microk8s.daemon-proxy.service".split())
| 18,521
|
def vm_create(ctx, name, uuid, vport_id, mac, ipaddress):
"""Create VM for a given ID"""
params = {'name': name,
'UUID': uuid,
'interfaces': [{'VPortID': vport_id,
'MAC': mac,
'IPAddress': ipaddress}]}
result = ctx.obj['nc'].post("vms", params)[0]
print_object(result, exclude=['interfaces', 'resyncInfo'],
only=ctx.obj['show_only'])
| 18,522
|
def run_cmd(cmd, cwd=None):
"""
Runs the given command and return the output decoded as UTF-8.
"""
return subprocess.check_output(cmd,
cwd=cwd, encoding="utf-8", errors="ignore")
| 18,523
|
def access_some_envs():
"""
A simple function used to demonstrate accessing environment
vars from within the contect of a virtual environment handled by Poetry
"""
my_env_var = os.environ.get("MY_ENV_VAR")
print(my_env_var)
| 18,524
|
def _get_label_members(X, labels, cluster):
"""
Helper function to get samples of a specified cluster.
Args:
X (np.ndarray): ndarray with dimensions [n_samples, n_features]
data to check validity of clustering
labels (np.array): clustering assignments for data X
cluster (int): cluster of interest
Returns: members (np.ndarray)
array of dimensions (n_samples, n_features) of samples of the
specified cluster.
"""
indices = np.where(labels == cluster)[0]
members = X[indices]
return members
| 18,525
|
def get_spectrum_by_close_values(
mz: list,
it: list,
left_border: float,
right_border: float,
*,
eps: float = 0.0
) -> Tuple[list, list, int, int]:
"""int
Function to get segment of spectrum by left and right
border
:param mz: m/z array
:param it: it intensities
:param left_border: left border
:param right_border: right border
:param eps: epsilon to provide regulation of borders
:return: closest to left and right border values of spectrum, left and right
"""
mz, it = mz.copy(), it.copy()
left = bisect_left(mz, left_border - eps)
right = bisect_right(mz, right_border + eps)
return mz[left:right].copy(), it[left:right].copy(), left, right
| 18,526
|
def speak():
"""API call to have BiBli speak a phrase."""
subprocess.call("amixer sset Master 100%", shell=True)
data = request.get_json()
lang = data["lang"] if "lang" in data and len(data["lang"]) else "en"
#espeak.set_parameter(espeak.Parameter.Rate, 165)
#espeak.set_parameter(espeak.Parameter.Pitch, 70)
if lang == "es":
# espeak.set_voice("europe/es")
os.system("sudo espeak -v es '" + data["msg"] + "' -a 165 -p 70")
else:
os.system("sudo espeak -v en-us '" + data["msg"] + "' -a 165 -p 70")
# espeak.set_voice("en-us")
# espeak.synth(data["msg"])
# subprocess.call('espeak -v%s+f3 -a200 %s &' % (lang, "'\"%s\"'" % data["msg"]), shell=True)
return jsonify({})
| 18,527
|
def refresh_wrapper(trynum, maxtries, *args, **kwargs):
"""A @retry argmod_func to refresh a Wrapper, which must be the first arg.
When using @retry to decorate a method which modifies a Wrapper, a common
cause of retry is etag mismatch. In this case, the retry should refresh
the wrapper before attempting the modifications again. This method may be
passed to @retry's argmod_func argument to effect such a refresh.
Note that the decorated method must be defined such that the wrapper is its
first argument.
"""
arglist = list(args)
# If we get here, we *usually* have an etag mismatch, so specifying
# use_etag=False *should* be redundant. However, for scenarios where we're
# retrying for some other reason, we want to guarantee a fresh fetch to
# obliterate any local changes we made to the wrapper (because the retry
# should be making those changes again).
arglist[0] = arglist[0].refresh(use_etag=False)
return arglist, kwargs
| 18,528
|
def compute_frames_per_animation(
attacks_per_second: float,
base_animation_length: int,
speed_coefficient: float = 1.0,
engine_tick_rate: int = 60,
is_channeling: bool = False) -> int:
"""Calculates frames per animation needed to resolve a certain ability at attacks_per_second.
Args:
attacks_per_second: attacks per second of character
base_animation_length: animation length of ability
speed_coefficient: speed-up scalar of ability
engine_tick_rate: server tick rate
is_channeling: whether or not the ability is a channeling skill
Returns:
int: number of frames one casts needs to resolve for
"""
_coeff = engine_tick_rate / (attacks_per_second * speed_coefficient)
if is_channeling:
return np.floor(_coeff)
else:
return np.ceil((base_animation_length - 1) / base_animation_length * _coeff)
| 18,529
|
def pad_rect(rect, move):
"""Returns padded rectangles given specified padding"""
if rect['dx'] > 2:
rect['x'] += move[0]
rect['dx'] -= 1*move[0]
if rect['dy'] > 2:
rect['y'] += move[1]
rect['dy'] -= 1*move[1]
return rect
| 18,530
|
async def yes_no(ctx: commands.Context,
message: str="Are you sure? Type **yes** within 10 seconds to confirm. o.o"):
"""Yes no helper. Ask a confirmation message with a timeout of 10 seconds.
ctx - The context in which the question is being asked.
message - Optional messsage that the question should ask.
"""
await ctx.send(message)
try:
message = await ctx.bot.wait_for("message", timeout=10,
check=lambda message: message.author == ctx.message.author)
except asyncio.TimeoutError:
await ctx.send("Timed out waiting. :<")
return False
if message.clean_content.lower() not in ["yes", "y"]:
await ctx.send("Command cancelled. :<")
return False
return True
| 18,531
|
def test_read_source_spaces():
"""Test reading of source space meshes
"""
src = read_source_spaces(fname, add_geom=True)
# 3D source space
lh_points = src[0]['rr']
lh_faces = src[0]['tris']
lh_use_faces = src[0]['use_tris']
rh_points = src[1]['rr']
rh_faces = src[1]['tris']
rh_use_faces = src[1]['use_tris']
assert_true(lh_faces.min() == 0)
assert_true(lh_faces.max() == lh_points.shape[0] - 1)
assert_true(lh_use_faces.min() >= 0)
assert_true(lh_use_faces.max() <= lh_points.shape[0] - 1)
assert_true(rh_faces.min() == 0)
assert_true(rh_faces.max() == rh_points.shape[0] - 1)
assert_true(rh_use_faces.min() >= 0)
assert_true(rh_use_faces.max() <= rh_points.shape[0] - 1)
| 18,532
|
def test_activity_history(api):
"""
Test that the FacebookAPI class can retrieve account activity history.
"""
activity_history = api.get_ad_activity_history(
start_date=datetime.utcnow() - timedelta(days=7),
end_date=datetime.utcnow()
)
assert activity_history is not None
| 18,533
|
def __validation(size: int, it1: int, it2: int, it3: int, it4: int) -> bool:
""" Проверка на корректность тура
size: размер маршрута
it1, it2, it3, it4: индексы городов: t1, t2i, t2i+1, t2i+2
return: корректен или нет
"""
return between(size, it1, it3, it4) and between(size, it4, it2, it1)
| 18,534
|
def get_type_associations(base_type, generic_base_type): # type: (t.Type[TType], t.Type[TValue]) -> t.List[t.Tuple[t.Type[TValue], t.Type[TType]]]
"""Create and return a list of tuples associating generic_base_type derived types with a corresponding base_type derived type."""
return [item for item in [(get_generic_type(sc_type, generic_base_type), sc_type) for sc_type in get_subclasses(base_type)] if item[1]]
| 18,535
|
def df_to_asc(dfname, outname):
"""
:param outname:
:param dfname:
:return:
"""
dfname.to_csv(outname, sep='\t', encoding='utf-8', index=False)
| 18,536
|
def identify_empty_droplets(data, min_cells=3, **kw):
"""Detect empty droplets using DropletUtils
"""
import rpy2.robjects as robj
from rpy2.robjects import default_converter
from rpy2.robjects.packages import importr
import anndata2ri
from rpy2.robjects.conversion import localconverter
importr("DropletUtils")
adata = data.copy()
col_sum = adata.X.sum(0)
if hasattr(col_sum, 'A'):
col_sum = col_sum.A.squeeze()
keep = col_sum > min_cells
adata = adata[:,keep]
#adata.X = adata.X.tocsc()
anndata2ri.activate()
robj.globalenv["X"] = adata
res = robj.r('res <- emptyDrops(assay(X))')
anndata2ri.deactivate()
keep = res.loc[res.FDR<0.01,:]
data = data[keep.index,:]
data.obs['empty_FDR'] = keep['FDR']
return data
| 18,537
|
def catalog_category_RSS(category_id):
"""
Return an RSS feed containing all items in the specified category_id
"""
items = session.query(Item).filter_by(
category_id=category_id).all()
doc = jaxml.XML_document()
doc.category(str(category_id))
for item in items:
doc._push()
doc.item()
doc.id(item.id)
doc.name(item.name)
doc.description(item.description)
doc.imagepath('"' + item.image + '"')
doc.category_id(item.category_id)
doc.user_id(item.user_id)
doc._pop()
return doc.__repr__()
| 18,538
|
def get_caller_name(N=0, allow_genexpr=True):
"""
get the name of the function that called you
Args:
N (int): (defaults to 0) number of levels up in the stack
allow_genexpr (bool): (default = True)
Returns:
str: a function name
CommandLine:
python -m utool.util_dbg get_caller_name
python -m utool get_caller_name
python ~/code/utool/utool/__main__.py get_caller_name
python ~/code/utool/utool/__init__.py get_caller_name
python ~/code/utool/utool/util_dbg.py get_caller_name
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> import utool as ut
>>> N = list(range(0, 13))
>>> allow_genexpr = True
>>> caller_name = get_caller_name(N, allow_genexpr)
>>> print(caller_name)
"""
if isinstance(N, (list, tuple, range)):
name_list = []
for N_ in N:
try:
name_list.append(get_caller_name(N_))
except AssertionError:
name_list.append('X')
return '[' + ']['.join(name_list) + ']'
parent_frame = get_stack_frame(N=N + 2)
caller_name = parent_frame.f_code.co_name
co_filename = parent_frame.f_code.co_filename
if not allow_genexpr:
count = 0
while True:
count += 1
if caller_name == '<genexpr>':
parent_frame = get_stack_frame(N=N + 1 + count)
caller_name = parent_frame.f_code.co_name
else:
break
#try:
# if 'func' in parent_frame.f_locals:
# caller_name += '(' + meta_util_six.get_funcname(parent_frame.f_locals['func']) + ')'
#except Exception:
# pass
if caller_name == '<module>':
# Make the caller name the filename
caller_name = splitext(split(co_filename)[1])[0]
if caller_name in {'__init__', '__main__'}:
# Make the caller name the filename
caller_name = basename(dirname(co_filename)) + '.' + caller_name
return caller_name
| 18,539
|
def display_pos(output):
"""Render the `output` of a POS tagging model on the screen.
This makes use of the excellent visualization tools in spaCy's dispaCy.
"""
tokens = output["words"]
text = " ".join(tokens)
tags = [tag.upper() for tag in output["tags"]]
labels = [label.upper() for label in load_labels("pos-tagging")]
display_with_displacy(text, labels, tags)
| 18,540
|
def make_build_dir(prefix=""):
"""Creates a temporary folder with given prefix to be used as a build dir.
Use this function instead of tempfile.mkdtemp to ensure any generated files
will survive on the host after the FINN Docker container exits."""
try:
inst_prefix = os.environ["FINN_INST_NAME"] + "/"
tempfile.tempdir = get_finn_root() + "/tmp/"
return tempfile.mkdtemp(prefix=inst_prefix + prefix)
except KeyError:
raise Exception(
"""Environment variable FINN_INST_NAME must be set
correctly. Please ensure you have launched the Docker contaier correctly.
"""
)
| 18,541
|
def unblock_node_port_random(genesis_file: str,
transactions: Union[str,int] = None,
pause_before_synced_check: Union[str,int] = None, best_effort: bool = True,
did: str = DEFAULT_CHAOS_DID, seed: str = DEFAULT_CHAOS_SEED,
wallet_name: str = DEFAULT_CHAOS_WALLET_NAME,
wallet_key: str = DEFAULT_CHAOS_WALLET_KEY, pool: str = DEFAULT_CHAOS_POOL,
ssh_config_file: str = DEFAULT_CHAOS_SSH_CONFIG_FILE) -> bool:
"""
Unblock nodes randomly selected by calling block_node_port_random
State file "block_node_port_random" located in the chaos temp dir (see
get_chaos_temp_dir for details) is shared with the following functions
block_node_port_random
unblock_node_port_random
unblocked_nodes_are_caught_up
Because the aforementioned functions share a state file, they are intended
to be used together. The typical/suggested workflow would be:
1. Block the node port on some nodes (block_node_port_random)
2. Optionally do something while node ports are blocked (i.e. generate load)
3. Unblock node port on the set of nodes selected in step 1 above.
4. Optionally do something while nodes are catching up.
5. Check if nodes unblocked in step 3 above are caught up.
:param genesis_file: The relative or absolute path to a genesis file.
Required.
:type genesis_file: str
:param transactions: Expected number of transactions on the domain ledger
after catchup has completed.
Optional. (Default: None)
:type transactions: Union[str,int]
:param pause_before_synced_check: Seconds to pause before checking if a node
is synced.
Optional. (Default: None)
:type pause_before_synced_check: Union[str,int]
:param best_effort: Attempt to unblock ports blocked when calling
block_node_port_random. Do not fail if the block_node_port_random state
file does not exist, if an error/exception is encountered while
unblocking a node port on any of the nodes, or if fewer than expected
nodes were unblocked.
Optional. (Default: True)
:type best_effort: bool
:param did: A steward or trustee DID. A did OR a seed is required, but not
both. The did will be used if both are given. Needed to get validator
info.
Optional. (Default: chaosindy.common.DEFAULT_CHAOS_DID)
:type did: str
:param seed : A steward or trustee seed. A did OR a seed is required, but
not both. The did will be used if both are given. Needed to get
validator info.
Optional. (Default: chaosindy.common.DEFAULT_CHAOS_SEED)
:type seed: str
:param wallet_name: The name of the wallet to use when getting validator
info.
Optional. (Default: chaosindy.common.DEFAULT_CHAOS_WALLET_NAME)
:type wallet_name: str
:param wallet_key: The key to use when opening the wallet designated by
wallet_name.
Optional. (Default: chaosindy.common.DEFAULT_CHAOS_WALLET_KEY)
:type wallet_key: str
:param pool: The pool to connect to when getting validator info.
Optional. (Default: chaosindy.common.DEFAULT_CHAOS_POOL)
:type pool: str
:param ssh_config_file: The relative or absolute path to the SSH config
file.
Optional. (Default: chaosindy.common.DEFAULT_CHAOS_SSH_CONFIG_FILE)
:type ssh_config_file: str
:return: bool
"""
# TODO: Use the traffic shaper tool Kelly is using.
# http://www.uponmyshoulder.com/blog/2013/simulating-bad-network-conditions-on-linux/
#
# This function assumes that block_node_port_random has been called and a
# "block_node_port_random" file has been created in a temporary directory
# created using rules defined by get_chaos_temp_dir()
output_dir = get_chaos_temp_dir()
blocked_ports = {}
try:
with open(join(output_dir, "block_node_port_random"), "r") as f:
blocked_ports = json.load(f)
except Exception as e:
# Do not fail on exceptions like FileNotFoundError if best_effort is
# True
if best_effort:
return True
else:
raise e
selected = blocked_ports.keys()
unblocked = 0
tried_to_unblock = 0
# Keep track of nodes/ports that could not be unblocked either by the
# experiment's method or rollback segments and write it back to
# block_node_port_random in the experiement's temp directory
still_blocked_ports = {}
for node in selected:
logger.debug("node alias to unblock: %s", node)
try:
if unblock_port_by_node_name(node, str(blocked_ports[node]),
ssh_config_file):
unblocked += 1
else:
still_blocked_ports[node] = blocked_ports[node]
except Exception as e:
if best_effort:
pass
tried_to_unblock += 1
logger.debug("unblocked: %s -- tried_to_unblock: %s -- len-aliases: %s",
unblocked, tried_to_unblock, len(selected))
if not best_effort and unblocked < len(selected):
return False
# Only check if resurrected nodes are caught up if both a pause and number
# of transactions are given.
if pause_before_synced_check and transactions:
logger.debug("Pausing %s seconds before checking if unblocked nodes " \
"are synced...", pause_before_synced_check)
# TODO: Use a count down timer? May be nice for those who are running
# experiments manually.
sleep(int(pause_before_synced_check))
logger.debug("Checking if unblocked nodes are synced and report %s " \
"transactions...", transactions)
return unblocked_nodes_are_caught_up(genesis_file, transactions, did,
seed, wallet_name, wallet_key,
pool, ssh_config_file)
return True
| 18,542
|
def main():
"""Main method of Ansible module
"""
result = dict(
changed=False,
msg='',
specs=[]
)
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
supports_check_mode=True,
)
# Set payload defaults
result['failed'] = False
specs = []
errors = []
# Collect inputs
deployed_metalsmith = module.params.get('deployed_metalsmith')
tripleo_ansible_inventory = module.params.get('tripleo_ansible_inventory')
new_ceph_spec = module.params.get('new_ceph_spec')
ceph_service_types = module.params.get('ceph_service_types')
tripleo_roles = module.params.get('tripleo_roles')
osd_spec = module.params.get('osd_spec')
fqdn = module.params.get('fqdn')
crush = module.params.get('crush_hierarchy')
# Set defaults
if ceph_service_types is None:
ceph_service_types = ['mon', 'mgr', 'osd']
if new_ceph_spec is None:
new_ceph_spec = "/home/stack/ceph_spec.yaml"
if tripleo_roles is None:
tripleo_roles = "/usr/share/openstack-tripleo-heat-templates/roles_data.yaml"
if osd_spec is None:
osd_spec = {}
if fqdn is None:
fqdn = False
if crush is None:
crush = {}
# Validate inputs
# 0. Are they using metalsmith xor an inventory as their method?
method = ""
required_files = []
if deployed_metalsmith is None and tripleo_ansible_inventory is not None:
method = 'inventory'
required_files.append(tripleo_ansible_inventory)
elif deployed_metalsmith is not None and tripleo_ansible_inventory is None:
method = 'metal'
required_files.append(deployed_metalsmith)
required_files.append(tripleo_roles)
else:
error = "You must provide either the "
error += "tripleo_ansible_inventory or deployed_metalsmith "
error += "parameter (but not both)."
errors.append(error)
result['failed'] = True
# 1. The required files must all be an existing path to a file
for fpath in required_files:
if not os.path.isfile(fpath):
error = str(fpath) + " is not a valid file."
errors.append(error)
result['failed'] = True
# 2. The directory for the spec file must be an existing path
fpath = os.path.dirname(new_ceph_spec)
if not os.path.isdir(fpath):
error = str(fpath) + " is not a valid directory."
errors.append(error)
result['failed'] = True
# 3. argument_spec already ensures osd_spec is a dictionary
# 4. Must be one of the ceph_spec.ALLOWED_DAEMONS used in the SERVICE_MAP
supported_services = flatten(SERVICE_MAP.values())
for service_type in ceph_service_types:
if service_type not in supported_services:
error = "'" + str(service_type) + "' must be one of "
error += str(supported_services)
errors.append(error)
result['failed'] = True
# 5. fqdn is only supported for the inventory method
if method != 'inventory' and fqdn:
error = "The fqdn option may only be true when using tripleo_ansible_inventory"
errors.append(error)
result['failed'] = True
if not result['failed']:
# Build data structures to map roles/services/hosts/labels
if method == 'metal':
roles_to_svcs = get_roles_to_svcs_from_roles(tripleo_roles)
roles_to_hosts = get_deployed_roles_to_hosts(deployed_metalsmith,
roles_to_svcs.keys())
hosts_to_ips = get_deployed_hosts_to_ips(deployed_metalsmith)
elif method == 'inventory':
with open(tripleo_ansible_inventory, 'r') as stream:
inventory = yaml.safe_load(stream)
roles_to_svcs = get_roles_to_svcs_from_inventory(inventory)
roles_to_hosts = get_inventory_roles_to_hosts(inventory,
roles_to_svcs.keys(),
fqdn)
hosts_to_ips = get_inventory_hosts_to_ips(inventory,
roles_to_svcs.keys(),
fqdn)
# regardless of how we built our maps, assign the correct labels
label_map = get_label_map(hosts_to_ips, roles_to_svcs,
roles_to_hosts, ceph_service_types)
# Build specs as list of ceph_spec objects from data structures
specs = get_specs(hosts_to_ips, label_map, ceph_service_types, osd_spec, crush)
# Render specs list to file
render(specs, new_ceph_spec)
# Set payloads
result['msg'] = " ".join(errors)
result['specs'] = specs
# exit and pass the key/value results
module.exit_json(**result)
| 18,543
|
def create_user(
*,
db: Session = Depends(deps.get_db),
user_in: schema_in.UserCreateIn,
) -> Any:
"""
Create new user.
"""
new_user = User(**{k: v for k, v in user_in.dict().items() if k != 'password'})
new_user.hashed_password = get_password_hash(user_in.password)
new_user.gid = -1
try:
db.add(new_user)
db.commit()
except IntegrityError:
db.rollback()
raise HTTPException(
status_code=400,
detail="The user with this username already exists in the system.",
)
new_role = Role(uid=new_user.uid, nickname=new_user.nickname, avatar=new_user.avatar, gid=-1)
new_role.reset()
db.add(new_role)
db.commit()
return GameEnum.OK.digest()
| 18,544
|
def number_format(interp, num_args, number, decimals=0, dec_point='.',
thousands_sep=','):
"""Format a number with grouped thousands."""
if num_args == 3:
return interp.space.w_False
ino = int(number)
dec = abs(number - ino)
rest = ""
if decimals == 0 and dec >= 0.5:
if number > 0:
ino += 1
else:
ino -= 1
elif decimals > 0:
s_dec = str(dec)
if decimals + 2 < len(s_dec):
if ord(s_dec[decimals + 2]) >= ord('5'):
dec += math.pow(10, -decimals)
if dec >= 1:
if number > 0:
ino += 1
else:
ino -= 1
rest = "0" * decimals
else:
s_dec = str(dec)
if not rest:
rest = s_dec[2:decimals + 2]
else:
rest = s_dec[2:] + "0" * (decimals - len(s_dec) + 2)
s = str(ino)
res = []
i = 0
while i < len(s):
res.append(s[i])
if s[i] != '-' and i != len(s) - 1 and (len(s) - i - 1) % 3 == 0:
for item in thousands_sep:
res.append(item)
i += 1
if decimals > 0:
for item in dec_point:
res.append(item)
return interp.space.wrap("".join(res) + rest)
| 18,545
|
def _serialization_expr(value_expr: str, a_type: mapry.Type,
py: mapry.Py) -> Optional[str]:
"""
Generate the expression of the serialization of the given value.
If no serialization expression can be generated (e.g., in case of nested
structures such as arrays and maps), None is returned.
:param value_expr: Python expression of the value to be serialized
:param a_type: the mapry type of the value
:param py: Python settings
:return: generated expression, or None if not possible
"""
result = None # type: Optional[str]
if isinstance(a_type,
(mapry.Boolean, mapry.Integer, mapry.Float, mapry.String)):
result = value_expr
elif isinstance(a_type, mapry.Path):
if py.path_as == 'str':
result = value_expr
elif py.path_as == 'pathlib.Path':
result = 'str({})'.format(value_expr)
else:
raise NotImplementedError(
"Unhandled path_as: {}".format(py.path_as))
elif isinstance(a_type, (mapry.Date, mapry.Datetime, mapry.Time)):
result = '{value_expr}.strftime({dt_format!r})'.format(
value_expr=value_expr, dt_format=a_type.format)
elif isinstance(a_type, mapry.TimeZone):
if py.timezone_as == 'str':
result = value_expr
elif py.timezone_as == 'pytz.timezone':
result = 'str({})'.format(value_expr)
else:
raise NotImplementedError(
'Unhandled timezone_as: {}'.format(py.timezone_as))
elif isinstance(a_type, mapry.Duration):
result = '_duration_to_string({})'.format(value_expr)
elif isinstance(a_type, mapry.Array):
result = None
elif isinstance(a_type, mapry.Map):
result = None
elif isinstance(a_type, mapry.Class):
result = "{}.id".format(value_expr)
elif isinstance(a_type, mapry.Embed):
result = "serialize_{}({})".format(
mapry.py.naming.as_variable(a_type.name), value_expr)
else:
raise NotImplementedError(
"Unhandled serialization expression of type: {}".format(a_type))
return result
| 18,546
|
def get_init_hash():
""" 获得一个初始、空哈希值 """
return imagehash.ImageHash(np.zeros([8, 8]).astype(bool))
| 18,547
|
def get_vertex_between_points(point1, point2, at_distance):
"""Returns vertex between point1 and point2 at a distance from point1.
Args:
point1: First vertex having tuple (x,y) co-ordinates.
point2: Second vertex having tuple (x,y) co-ordinates.
at_distance: A distance at which to locate the vertex on the line joining point1 and point2.
Returns:
A Point object.
"""
line = LineString([point1, point2])
new_point = line.interpolate(at_distance)
return new_point
| 18,548
|
def disable_cache(response: Response) -> Response:
"""Prevents cached responses"""
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
| 18,549
|
def get_dir(src_point, rot_rad):
"""Rotate the point by `rot_rad` degree."""
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
| 18,550
|
def sparse_gauss_seidel(A,b,maxiters=100,tol=1e-8):
"""Returns the solution to the system Ax = b using the Gauss-Seidel method.
Inputs:
A (array) - 2D scipy.sparse matrix
b (array) - 1D NumPy array
maxiters (int, optional) - maximum iterations for algorithm to perform.
tol (float) - tolerance for convergence
Returns:
x (array) - solution to system Ax = b.
x_approx (list) - list of approximations at each iteration.
"""
if type(A) != spar.csr_matrix:
A = spar.csr_matrix(A)
n = A.shape[0]
x0 = np.zeros(n)
x = np.ones(n)
x_approx = []
for k in xrange(maxiters):
x = x0.copy()
diag = A.diagonal()
for i in xrange(n):
rowstart = A.indptr[i]
rowend = A.indptr[i+1]
Aix = np.dot(A.data[rowstart:rowend],
x[A.indices[rowstart:rowend]])
x[i] += (b[i] - Aix)/diag[i]
if np.max(np.abs(x0-x)) < tol:
return x, x_approx
x0 = x
x_approx.append(x)
print "Maxiters hit!"
return x, x_approx
| 18,551
|
def delete_user(user_id):
"""Delete user from Users database and their permissions
from SurveyPermissions and ReportPermissions.
:Route: /api/user/<int:user_id>
:Methods: DELETE
:Roles: s
:param user_id: user id
:return dict: {"delete": user_id}
"""
user = database.get_user(user_id)
database.delete_user(user)
return {"delete": user.id}
| 18,552
|
def selectTopFive(sortedList):
"""
从sortedList中选出前五,返回对应的名字与commit数量列成的列表
:param sortedList:按值从大到小进行排序的authorDict
:return:size -- [commit数量]
labels -- [名字]
"""
size = []
labels = []
for i in range(5):
labels.append(sortedList[i][0])
size.append(sortedList[i][1])
return size, labels
| 18,553
|
def get_license(file):
"""Returns the license from the input file.
"""
# Collect the license
lic = ''
for line in file:
if line.startswith('#include') or line.startswith('#ifndef'):
break
else:
lic += line
return lic
| 18,554
|
def quote_query_string(chars):
"""
Multibyte charactor string is quoted by double quote.
Because english analyzer of Elasticsearch decomposes
multibyte character strings with OR expression.
e.g. 神保町 -> 神 OR 保 OR 町
"神保町"-> 神保町
"""
if not isinstance(chars, unicode):
chars = chars.decode('utf-8')
token = u''
qs = u''
in_escape = False
in_quote = False
in_token = False
for c in chars:
# backslash escape
if in_escape:
token += c
in_escape = False
continue
if c == u'\\':
token += c
in_escape = True
continue
# quote
if c != u'"' and in_quote:
token += c
continue
if c == u'"' and in_quote:
token += c
qs += token
token = u''
in_quote = False
continue
# otherwise: not in_quote
if _is_delimiter(c) or c == u'"':
if in_token:
qs += _quote_token(token)
token = u''
in_token = False
if c == u'"':
token += c
in_quote = True
else:
qs += c
continue
# otherwise: not _is_delimiter(c)
token += c
in_token = True
if token:
qs += _quote_token(token)
return qs
| 18,555
|
def change_team():
"""Change the value of the global variable team."""
# Use team in global scope
global team
# Change the value of team in global: team
team = "justice league"
| 18,556
|
def run_generate_per_camera_videos(data_root: str, output_dir: str, num_workers: int) -> None:
"""Click entry point for ring camera .mp4 video generation."""
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
generate_per_camera_videos(data_root=Path(data_root), output_dir=Path(output_dir), num_workers=num_workers)
| 18,557
|
def copy_params(params: ParamsDict) -> ParamsDict:
"""copy a parameter dictionary
Args:
params: the parameter dictionary to copy
Returns:
the copied parameter dictionary
Note:
this copy function works recursively on all subdictionaries of the params
dictionary but does NOT copy any non-dictionary values.
"""
validate_params(params)
params = {**params}
if all(isinstance(v, dict) for v in params.values()):
return {k: copy_params(params[k]) for k in params}
return params
| 18,558
|
def ConvertRaster2LatLong(InputRasterFile,OutputRasterFile):
"""
Convert a raster to lat long WGS1984 EPSG:4326 coordinates for global plotting
MDH
"""
# import modules
import rasterio
from rasterio.warp import reproject, calculate_default_transform as cdt, Resampling
# read the source raster
with rasterio.open(InputRasterFile) as src:
#get input coordinate system
Input_CRS = src.crs
# define the output coordinate system
Output_CRS = {'init': "epsg:4326"}
# set up the transform
Affine, Width, Height = cdt(Input_CRS,Output_CRS,src.width,src.height,*src.bounds)
kwargs = src.meta.copy()
kwargs.update({
'crs': Output_CRS,
'transform': Affine,
'affine': Affine,
'width': Width,
'height': Height
})
with rasterio.open(OutputRasterFile, 'w', **kwargs) as dst:
for i in range(1, src.count+1):
reproject(
source=rasterio.band(src, i),
destination=rasterio.band(dst, i),
src_transform=src.affine,
src_crs=src.crs,
dst_transform=Affine,
dst_crs=Output_CRS,
resampling=Resampling.bilinear)
| 18,559
|
def create_feature_from_school(train_df, test_df):
"""
Since schools generally play an important role in house hunting, let us create some variables around school.
"""
train_df["ratio_preschool"] = train_df["children_preschool"] / train_df["preschool_quota"].astype("float")
test_df["ratio_preschool"] = test_df["children_preschool"] / test_df["preschool_quota"].astype("float")
train_df["ratio_school"] = train_df["children_school"] / train_df["school_quota"].astype("float")
test_df["ratio_school"] = test_df["children_school"] / test_df["school_quota"].astype("float")
| 18,560
|
def watch_list(request):
"""
Get watchlist or create a watchlist, or delete from watchlist
:param request:
:return:
"""
if request.method == 'GET':
watchlist = WatchList.objects.filter(user=request.user)
serializer = WatchListSerializer(watchlist, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
elif request.method == 'POST':
movie_id = request.data.get('movie_id')
if movie_id is not None:
# check if movie is in db
try:
movie = Movie_Collected.objects.get(pk=movie_id)
watchlist = WatchList.objects.filter(user=request.user, movie=movie).exists()
if watchlist:
message = {"error": "Movie already in watchlist"}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
else:
watchlist = WatchList.objects.create(user=request.user, movie=movie)
serializer = WatchListSerializer(watchlist)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
except Movie_Collected.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
else:
message = {'error': 'Movie id is required'}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
movie_id = request.data.get('movie_id')
if movie_id is not None:
try:
movie = Movie_Collected.objects.get(pk=movie_id)
WatchList.objects.filter(user=request.user, movie=movie).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except Movie_Collected.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
else:
message = {'error': 'Movie id is required'}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
| 18,561
|
def write_coordinate_volumes(reference_filename,
output_x_filename,
output_y_filename,
output_z_filename):
"""Write 3 coordinate volumes in the geometry of a reference volume."""
reference_header = read_reference_header(reference_filename)
x_vect, y_vect, z_vect = create_coordinate_vectors(reference_header)
dim = [len(x_vect), len(y_vect), len(z_vect)]
# Volumes are written with force_disk_data_type in order to prevent the
# soma-io Nifti writer (libsomanifti) from encoding the data as scaled
# int16 (we want to have the full precision of float32, moreover external
# software would most probably not play well with such encoded data).
vol_out = aims.Volume(dim[0], dim[1], dim[2], dtype='FLOAT')
vol_out.copyHeaderFrom(reference_header)
np.asarray(vol_out)[..., 0] = x_vect[:, np.newaxis, np.newaxis]
aims.write(vol_out, output_x_filename,
options={'force_disk_data_type': True})
vol_out = aims.Volume(dim[0], dim[1], dim[2], dtype='FLOAT')
vol_out.copyHeaderFrom(reference_header)
np.asarray(vol_out)[..., 0] = y_vect[np.newaxis, :, np.newaxis]
aims.write(vol_out, output_y_filename,
options={'force_disk_data_type': True})
vol_out = aims.Volume(dim[0], dim[1], dim[2], dtype='FLOAT')
vol_out.copyHeaderFrom(reference_header)
np.asarray(vol_out)[..., 0] = z_vect[np.newaxis, np.newaxis, :]
aims.write(vol_out, output_z_filename,
options={'force_disk_data_type': True})
| 18,562
|
async def user_has_pl(api, room_id, mxid, pl=100):
"""
Determine if a user is admin in a given room.
"""
pls = await api.get_power_levels(room_id)
users = pls["users"]
user_pl = users.get(mxid, 0)
return user_pl == pl
| 18,563
|
def ram_plot(df, ax, marker, markersize=None, markerfacecolor=None, color='None', linestyle='None', linewidth=None,
mew=None):
"""The function was named after Ram Yazdi that helped to solve this challenge in a dark hour"""
bars = df['topic'].unique()
mapping_name_to_index = {name: index for index, name in enumerate(bars)}
df['topic'] = df['topic'].replace(mapping_name_to_index)
pos = [0, 50, 100, 150, 200, 249] if len(bars) > 100 else [0, 50, 100]
df.set_index('topic').plot(legend=True, marker=marker, markersize=markersize, linestyle=linestyle, color=color,
markerfacecolor=markerfacecolor, grid=False, linewidth=linewidth, mew=mew, ax=ax)
plt.xticks(np.array(pos), pos, rotation=0)
plt.yticks(np.arange(0, 1.2, 0.2))
plt.legend()
| 18,564
|
def _set_read_options(request, eventual, transaction_id):
"""Validate rules for read options, and assign to the request.
Helper method for ``lookup()`` and ``run_query``.
:raises: :class:`ValueError` if ``eventual`` is ``True`` and the
``transaction_id`` is not ``None``.
"""
if eventual and (transaction_id is not None):
raise ValueError('eventual must be False when in a transaction')
opts = request.read_options
if eventual:
opts.read_consistency = _datastore_pb2.ReadOptions.EVENTUAL
elif transaction_id:
opts.transaction = transaction_id
| 18,565
|
def get_cgi_parameter_bool_or_default(form: cgi.FieldStorage,
key: str,
default: bool = None) -> Optional[bool]:
"""
Extracts a boolean parameter from a CGI form (``"1"`` = ``True``,
other string = ``False``, absent/zero-length string = default value).
"""
s = get_cgi_parameter_str(form, key)
if s is None or len(s) == 0:
return default
return is_1(s)
| 18,566
|
def loss_function_1(y_true, y_pred):
""" Probabilistic output loss """
a = tf.clip_by_value(y_pred, 1e-20, 1)
b = tf.clip_by_value(tf.subtract(1.0, y_pred), 1e-20, 1)
cross_entropy = - tf.multiply(y_true, tf.log(a)) - tf.multiply(tf.subtract(1.0, y_true), tf.log(b))
cross_entropy = tf.reduce_mean(cross_entropy, 0)
loss = tf.reduce_mean(cross_entropy)
return loss
| 18,567
|
def invert(seq: Sequence, axis_pitch=None) -> Iterator[Event]:
"""Invert the pitches about a given axis (mirror it
upside down). If axis_pitch is not given, invert about
the first pitch in the sequence.
"""
if axis_pitch is None:
try:
evt = next(seq.events)
except StopIteration:
return
axis_pitch = evt.pitches[0]
yield evt
for evt in seq.events:
delta = evt.pitches[0]-axis_pitch
if delta < 0: # note is below axis
yield Event([axis_pitch-delta], evt.duration)
elif delta > 0: # note is above axis
yield Event([axis_pitch-delta], evt.duration)
else: #its the axis, so stays the same
yield Event([evt.pitches[0]], evt.duration)
| 18,568
|
def sanitize_for_json(tag):
"""eugh the tags text is in comment strings"""
return tag.text.replace('<!--', '').replace('-->', '')
| 18,569
|
def load_data_and_labels(dataset_name):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
for i in [1]:
# Load data from files
positive_examples = list(open('data/'+str(dataset_name)+'/'+str(dataset_name)+'.pos',encoding="utf-8").readlines())
# positive_examples = positive_examples[0:1000]
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open('data/'+str(dataset_name)+'/'+str(dataset_name)+'.neg',encoding="utf-8").readlines())
# negative_examples = negative_examples[0:1000]
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
| 18,570
|
def tileswrap(ihtORsize, numtilings, floats, wrapwidths, ints=[], readonly=False):
"""Returns num-tilings tile indices corresponding to the floats and ints, wrapping some floats"""
qfloats = [floor(f * numtilings) for f in floats]
Tiles = []
for tiling in range(numtilings):
tilingX2 = tiling * 2
coords = [tiling]
b = tiling
for q, width in zip_longest(qfloats, wrapwidths):
c = (q + b % numtilings) // numtilings
coords.append(c % width if width else c)
b += tilingX2
coords.extend(ints)
Tiles.append(hashcoords(coords, ihtORsize, readonly))
return Tiles
| 18,571
|
def coerce(data, egdata):
"""Coerce a python object to another type using the AE coercers"""
pdata = pack(data)
pegdata = pack(egdata)
pdata = pdata.AECoerceDesc(pegdata.type)
return unpack(pdata)
| 18,572
|
def setup_logging(level):
"""Setups a basic logger for this app.
Args:
level (str): What the log level should be set to i.e. INFO.
"""
logging.basicConfig(
stream=sys.stdout,
level=level,
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
| 18,573
|
def distorted_inputs(data_dir, batch_size, num_train_files, train_num_examples, boardsize, num_channels):
"""Construct distorted input for training using the Reader ops.
Args:
data_dir: Path to the data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = []
for fn in os.listdir(data_dir):
if 'test' not in fn and 'prop' not in fn:
filenames.append(os.path.join(data_dir, fn))
print('filenames:{} in cnn_input.distorded_inputs()'.format(filenames))
#filenames = [os.path.join(data_dir, +'_%d.bin' % i)
# for i in xrange(1, num_train_files + 1)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_data(filename_queue, boardsize, num_channels)
reshaped_image = read_input.uint8image
height = boardsize
width = boardsize
# Image processing for training the network. Note the many random
# distortions applied to the image.
distorted_image = tf.cast(tf.reshape(reshaped_image, [height, width, num_channels]), tf.float32)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(min(train_num_examples *
min_fraction_of_examples_in_queue, 20000))
#min_queue_examples=64
print('Filling queue with %d images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(distorted_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
| 18,574
|
def _write_mkaero1(model: Union[BDF, OP2Geom], name: str,
mkaero1s: List[MKAERO1], ncards: int,
op2_file, op2_ascii, endian: bytes, nastran_format: str='nx') -> int:
"""writes the MKAERO1
data = (1.3, -1, -1, -1, -1, -1, -1, -1,
0.03, 0.04, 0.05, -1, -1, -1, -1, -1)
"""
key = (3802, 38, 271)
makero1s_temp = []
makero1s_final = []
for mkaero in mkaero1s:
nmachs = len(mkaero.machs)
nkfreqs = len(mkaero.reduced_freqs)
assert nmachs > 0, mkaero
assert nkfreqs > 0, mkaero
if nmachs <= 8 and nkfreqs <= 8:
# no splitting required
makero1s_final.append((mkaero.machs, mkaero.reduced_freqs))
elif nmachs <= 8 or nkfreqs <= 8:
# one of machs or kfreqs < 8
makero1s_temp.append((mkaero.machs, mkaero.reduced_freqs))
else:
# both machs and kfreqs > 8
nloops_mach = int(np.ceil(nmachs/8))
for i in range(nloops_mach):
machs_temp = _makero_temp(mkaero.machs, i, nloops_mach)
assert len(machs_temp) > 0, (i, nloops_mach, machs_temp)
makero1s_temp.append((machs_temp, mkaero.reduced_freqs))
for (machs, reduced_freqs) in makero1s_temp:
nmachs = len(machs)
nkfreqs = len(reduced_freqs)
assert nmachs > 0, nmachs
assert nkfreqs > 0, nkfreqs
if nmachs <= 8 and nkfreqs <= 8: # pragma: no cover
raise RuntimeError(f'this should never happen...nmachs={nmachs} knfreqs={nkfreqs}')
if nmachs <= 8:
# nkfreqs > 8
nloops = int(np.ceil(nkfreqs/8))
for i in range(nloops):
reduced_freqs_temp = _makero_temp(reduced_freqs, i, nloops)
makero1s_final.append((machs, reduced_freqs_temp))
elif nkfreqs <= 8:
# nmachs > 8
nloops = int(np.ceil(nmachs/8))
for i in range(nloops):
machs_temp = _makero_temp(machs, i, nloops)
assert len(machs_temp) > 0, (i, nloops_mach, machs_temp)
makero1s_final.append((machs_temp, reduced_freqs))
else: # pragma: no cover
raise RuntimeError(f'this should never happen...nmachs={nmachs} knfreqs={nkfreqs}')
#raise RuntimeError((nmachs, nkfreqs))
ncards = len(makero1s_final)
nfields = 16
nbytes = write_header(name, nfields, ncards, key, op2_file, op2_ascii)
for machs, reduced_freqs in makero1s_final:
data = []
nmachs = len(machs)
nkfreqs = len(reduced_freqs)
assert nmachs > 0, machs
assert nkfreqs > 0, reduced_freqs
nint_mach = 8 - nmachs
nint_kfreq = 8 - nkfreqs
fmt1 = b'%if' % nmachs + b'i' * nint_mach
fmt2 = b'%if' % nkfreqs + b'i' * nint_kfreq
spack = Struct(endian + fmt1 + fmt2)
data.extend(machs.tolist())
assert nint_mach < 8, nint_mach
if nint_mach:
data.extend([-1]*nint_mach)
data.extend(reduced_freqs.tolist())
if nint_kfreq:
data.extend([-1]*nint_kfreq)
op2_ascii.write(f' mkaero1 data={data}\n')
op2_file.write(spack.pack(*data))
return nbytes
| 18,575
|
def _mesh_obj_large():
"""build a large, random mesh model/dataset"""
n_tri, n_pts = 400, 1000
node = np.random.randn(n_pts, 2)
element = np.array([np.random.permutation(n_pts)[:3] for _ in range(n_tri)])
perm = np.random.randn(n_tri)
np.random.seed(0)
el_pos = np.random.permutation(n_pts)[:16]
return PyEITMesh(node=node, element=element, perm=perm, el_pos=el_pos, ref_node=0)
| 18,576
|
def cursor():
"""Return a database cursor."""
return util.get_dbconn("mesosite").cursor()
| 18,577
|
def custom_field_check(issue_in, attrib, name=None):
""" This method allows the user to get in the comments customfiled that are not common
to all the project, in case the customfiled does not existe the method returns an
empty string.
"""
if hasattr(issue_in.fields, attrib):
value = str(eval('issue_in.fields.%s'%str(attrib)))
if name != None:
return str("%s : %s"%(name,value))
else:
return str(value)
else:
return str("")
| 18,578
|
def validate_all_files(*yaml_files):
"""
Validate each YAML file
"""
for yaml_file in yaml_files:
validate_regex_in_yaml_file(yaml_file)
| 18,579
|
def header(text, color='black', gen_text=None):
"""Create an HTML header"""
if gen_text:
raw_html = f'<h1 style="margin-top:16px;color: {color};font-size:54px"><center>' + str(
text) + '<span style="color: red">' + str(gen_text) + '</center></h1>'
else:
raw_html = f'<h1 style="margin-top:12px;color: {color};font-size:54px"><center>' + str(
text) + '</center></h1>'
return raw_html
| 18,580
|
def test_06() -> None:
""" Runs OK """
run(SAMPLE2, 5)
| 18,581
|
def replace_dataset_targets(dataset, num_classes):
"""
Replaces dataset targets with random targets. These random targets are also broadcasted to other processes from
the main process when using distributed training.
:param dataset: dataset whose targets are to be replaced.
:param num_classes: number of classes in the dataset.
:return dataset object with the new random targets
"""
targets = torch.randint(0, num_classes, (len(dataset.targets),))
dist_utils.broadcast_from_main(targets)
dataset.targets = targets.numpy().tolist()
| 18,582
|
def block_pose(detection, block_size=0.05):
# type: (AprilTagDetection, float) -> PoseStamped
"""Given a tag detection (id == 0), return the block's pose. The block pose
has the same orientation as the tag detection, but it's position is
translated to be at the cube's center.
Args:
detection: The AprilTagDetection.
block_size: The block's side length in meters.
"""
transform = tf.transformations.concatenate_matrices(
tf.transformations.translation_matrix(
[detection.pose.pose.position.x,
detection.pose.pose.position.y,
detection.pose.pose.position.z]
),
tf.transformations.quaternion_matrix(
[detection.pose.pose.orientation.x,
detection.pose.pose.orientation.y,
detection.pose.pose.orientation.z,
detection.pose.pose.orientation.w]
),
tf.transformations.translation_matrix(
[0, 0, -block_size / 2]
)
)
t = tf.transformations.translation_from_matrix(transform)
q = tf.transformations.quaternion_from_matrix(transform)
ps = PoseStamped()
ps.header.frame_id = detection.pose.header.frame_id
ps.header.stamp = detection.pose.header.stamp
ps.pose.position = Point(*t)
ps.pose.orientation = Quaternion(*q)
return ps
| 18,583
|
async def batch_omim_similarity(
data: models.POST_OMIM_Batch,
method: str = 'graphic',
combine: str = 'funSimAvg',
kind: str = 'omim'
) -> dict:
"""
Similarity score between one HPOSet and several OMIM Diseases
"""
other_sets = []
for other in data.omim_diseases:
try:
disease = Omim.get(other)
hpos = ','.join([str(x) for x in disease.hpo])
except KeyError:
hpos = ''
other_sets.append(
models.POST_HPOSet(
set2=hpos,
name=other
)
)
res = await terms.batch_similarity(
data=models.POST_Batch(
set1=data.set1,
other_sets=other_sets
),
method=method,
combine=combine,
kind=kind
)
return res
| 18,584
|
def test_pylint_libs(libfiles_parametrize):
""" Run pylint on each lib """
command = "pylint --rcfile='{}' '{}'".format(os.path.join(pytest.sfauto_dir, "pylintrc"), libfiles_parametrize)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = process.communicate()
retcode = process.returncode
if retcode != 0 and stdout:
print(stdout)
# pylint return codes are a bitmap of:
# 0 : no errors
# 1 : fatal error in pylint itself
# 2 : python error messages
# 4 : python warning messages
# 8 : python refactor messages
# 16 : python conventions messages
# 32 : usage error
assert retcode == 0 or retcode == 16
| 18,585
|
def vector(*,
unit: _Union[_cpp.Unit, str, None] = default_unit,
value: _Union[_np.ndarray, list]):
"""Constructs a zero dimensional :class:`Variable` holding a single length-3
vector.
:param value: Initial value, a list or 1-D numpy array.
:param unit: Optional, unit. Default=dimensionless
:returns: A scalar (zero-dimensional) Variable.
:seealso: :py:func:`scipp.vectors`
"""
return _cpp.vectors(dims=[], unit=unit, values=value)
| 18,586
|
def read_lidar(filename, **kwargs):
"""Read a LAS file.
Args:
filename (str): Path to a LAS file.
Returns:
LasData: The LasData object return by laspy.read.
"""
try:
import laspy
except ImportError:
print(
"The laspy package is required for this function. Use pip install laspy to install it."
)
return
return laspy.read(filename, **kwargs)
| 18,587
|
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up WLED sensor based on a config entry."""
wled: WLED = hass.data[DOMAIN][entry.entry_id][DATA_WLED_CLIENT]
sensors = [
WLEDEstimatedCurrentSensor(entry.entry_id, wled),
WLEDUptimeSensor(entry.entry_id, wled),
WLEDFreeHeapSensor(entry.entry_id, wled),
]
async_add_entities(sensors, True)
| 18,588
|
def get_gene_starting_with(gene_symbol: str, verbose: bool = True):
""" get the genes that start with the symbol given
Args:
- gene_symbol: str
- verbose: bool
Returns:
- list of str
- None
"""
gene_symbol = gene_symbol.strip().upper()
ext = "search/symbol/{}*".format(gene_symbol)
data = get_api_response("{}/{}".format(URL, ext))
res = data["response"]["docs"]
if res == []:
if verbose:
print("No gene found starting with {}".format(gene_symbol))
return
else:
gene_symbols = [res[i]["symbol"] for i in range(len(res))]
if verbose:
print("Found these genes starting with {}:".format(gene_symbol))
for symbol in gene_symbols:
print(symbol)
return gene_symbols
| 18,589
|
def initialize_engine(ip, port, username, password, project_name):
"""Initializes the calm dsl engine"""
set_server_details(ip, port, username, password, project_name)
init_db()
sync_cache()
click.echo("\nHINT: To get started, follow the 3 steps below:")
click.echo("1. Initialize an example blueprint DSL: calm init bp")
click.echo(
"2. Create and validate the blueprint: calm create bp --file HelloBlueprint/blueprint.py"
)
click.echo(
"3. Start an application using the blueprint: calm launch bp HelloBlueprint --app_name HelloApp01 -i"
)
click.echo("\nKeep Calm and DSL On!\n")
| 18,590
|
def gen_timestamp():
"""
Generates a unique (let's hope!), whole-number, unix-time timestamp.
"""
return int(time() * 1e6)
| 18,591
|
def learningCurve(X, y, Xval, yval, Lambda):
"""returns the train and
cross validation set errors for a learning curve. In particular,
it returns two vectors of the same length - error_train and
error_val. Then, error_train(i) contains the training error for
i examples (and similarly for error_val(i)).
In this function, you will compute the train and test errors for
dataset sizes from 1 up to m. In practice, when working with larger
datasets, you might want to do this in larger intervals.
"""
# Number of training examples
m, _ = X.shape
# You need to return these values correctly
error_train = np.zeros(m)
error_val = np.zeros(m)
for i in range(m):
theta = trainLinearReg(X[:i + 1], y[:i + 1], Lambda)
error_train[i], _ = linearRegCostFunction(X[:i + 1], y[:i + 1], theta, 0)
error_val[i], _ = linearRegCostFunction(Xval, yval, theta, 0)
return error_train, error_val
| 18,592
|
def decode_xml(text):
"""Parse an XML document into a dictionary. This assume that the
document is only 1 level, i.e.:
<top>
<child1>content</child1>
<child2>content</child2>
</top>
will be parsed as: child1=content, child2=content"""
xmldoc = minidom.parseString(text)
return dict([(x.tagName, x.firstChild.nodeValue)
for x in xmldoc.documentElement.childNodes
if x.childNodes.length == 1])
| 18,593
|
def _qcheminputfile(ccdata, templatefile, inpfile):
"""
Generate input file from geometry (list of lines) depending on job type
:ccdata: ccData object
:templatefile: templatefile - tells us which template file to use
:inpfile: OUTPUT - expects a path/to/inputfile to write inpfile
"""
string = ''
if hasattr(ccdata, 'charge'):
charge = ccdata.charge
else:
charge = 0
if hasattr(ccdata, 'mult'):
mult = ccdata.mult
else:
print('Multiplicity not found, set to 1 by default')
mult = 1
# $molecule
string += '$molecule\n'
string += '{0} {1}\n'.format(charge, mult)
# Geometry (Maybe a cleaner way to do this..)
atomnos = [pt.Element[x] for x in ccdata.atomnos]
atomcoords = ccdata.atomcoords[-1]
if not type(atomcoords) is list:
atomcoords = atomcoords.tolist()
for i in range(len(atomcoords)):
atomcoords[i].insert(0, atomnos[i])
for atom in atomcoords:
string += ' {0} {1:10.8f} {2:10.8f} {3:10.8f}\n'.format(*atom)
string += '$end\n\n'
# $end
# $rem
with open(templates.get(templatefile), 'r') as templatehandle:
templatelines = [x for x in templatehandle.readlines()]
for line in templatelines:
string += line
# $end
return string
| 18,594
|
def arrangements(ns):
"""
prime factors of 19208 lead to the "tribonacci" dict;
only needed up to trib(4)
"""
trib = {0: 1, 1: 1, 2: 2, 3: 4, 4: 7}
count = 1
one_seq = 0
for n in ns:
if n == 1:
one_seq += 1
if n == 3:
count *= trib[one_seq]
one_seq = 0
return count
# # one-liner...
# return reduce(lambda c, n: (c[0]*trib[c[1]], 0) if n == 3 else (c[0], c[1]+1), ns, (1,0))[0]
| 18,595
|
def vtpnt(x, y, z=0):
"""坐标点转化为浮点数"""
return win32com.client.VARIANT (pythoncom.VT_ARRAY | pythoncom.VT_R8, (x, y, z))
| 18,596
|
def get_time_delta(pre_date: datetime):
"""
获取给定时间与当前时间的差值
Args:
pre_date:
Returns:
"""
date_delta = datetime.datetime.now() - pre_date
return date_delta.days
| 18,597
|
def test_wfasnd_ndst_updatespl_1(plugin):
""" workflow as a node
workflow-node with one task,
splitter for node added after add
"""
wfnd = Workflow(name="wfnd", input_spec=["x"])
wfnd.add(add2(name="add2", x=wfnd.lzin.x))
wfnd.set_output([("out", wfnd.add2.lzout.out)])
# TODO: without this the test is failing
wfnd.plugin = plugin
wfnd.inputs.x = [2, 4]
wf = Workflow(name="wf", input_spec=["x"])
wf.add(wfnd)
wfnd.add2.split("x")
wf.set_output([("out", wf.wfnd.lzout.out)])
wf.plugin = plugin
with Submitter(plugin=plugin) as sub:
sub(wf)
results = wf.result()
assert results.output.out == [4, 6]
# checking the output directory
assert wf.output_dir.exists()
| 18,598
|
def render_checkbox_list(soup_body: object) -> object:
"""As the chosen markdown processor does not support task lists (lists with checkboxes), this function post-processes
a bs4 object created from outputted HTML, replacing instances of '[ ]' (or '[]') at the beginning of a list item
with an unchecked box, and instances of '[x]' (or '[X]') at the beginning of a list item with a checked box.
Args:
soup_body: bs4 object input
Returns:
modified bs4 object
"""
if not isinstance(soup_body, BeautifulSoup):
raise TypeError('Input must be a bs4.BeautifulSoup object')
for ul in soup_body.find_all('ul'):
for li in ul.find_all('li', recursive=False):
if (li.contents[0].string[:2] == '[]') or (li.contents[0].string[:3] == '[ ]'):
unchecked = soup_body.new_tag("input", disabled="", type="checkbox")
li.contents[0].string.replace_with(li.contents[0].string.replace('[] ', u'\u2002'))
li.contents[0].string.replace_with(li.contents[0].string.replace('[ ] ', u'\u2002'))
li.contents[0].insert_before(unchecked)
li.find_parent('ul')['style'] = 'list-style-type: none; padding-left: 0.5em; margin-left: 0.25em;'
elif (li.contents[0].string[:3] == '[x]') or (li.contents[0].string[:3] == '[X]'):
checked = soup_body.new_tag("input", disabled="", checked="", type="checkbox")
li.contents[0].string.replace_with(li.contents[0].string.replace('[x] ', u'\u2002'))
li.contents[0].string.replace_with(li.contents[0].string.replace('[X] ', u'\u2002'))
li.contents[0].insert_before(checked)
li.find_parent('ul')['style'] = 'list-style-type: none; padding-left: 0.5em; margin-left: 0.25em;'
return soup_body
| 18,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.