content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def drag_eqn(times,g,r):
"""define scenario and integrate"""
param = np.array([ g, r])
hinit = np.array([0.0,0.0]) # initial values (position and velocity, respectively)
h = odeint(deriv, hinit, times, args = (param,))
return h[:,0], h[:,1]
| 5,340,800
|
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up LGE device climate based on config_entry."""
entry_config = hass.data[DOMAIN]
lge_devices = entry_config.get(LGE_DEVICES)
if not lge_devices:
return
_LOGGER.debug("Starting LGE ThinQ climate setup...")
lge_climates = []
# AC devices
lge_climates.extend(
[
LGEACClimate(lge_device)
for lge_device in lge_devices.get(DeviceType.AC, [])
]
)
# Refrigerator devices
lge_climates.extend(
[
LGERefrigeratorClimate(lge_device, refrigerator_desc)
for refrigerator_desc in REFRIGERATOR_CLIMATE
for lge_device in lge_devices.get(DeviceType.REFRIGERATOR, [])
]
)
async_add_entities(lge_climates)
| 5,340,801
|
def virtual_potential_temperature_monc(theta, thref, q_v, q_cl):
"""
Virtual potential temperature.
Derived variable name: th_v_monc
Approximate form as in MONC
Parameters
----------
theta : numpy array or xarray DataArray
Potential Temperature. (K)
thref : numpy array or xarray DataArray
Reference Potential Temperature (usually 1D). (K)
q_v : numpy array or xarray DataArray
specific humidity
q_cl : numpy array or xarray DataArray
specific cloud liquid water content.
Returns
-------
theta_v: numpy array or xarray DataArray
Virtual potential temperature (K)
"""
th_v = theta + thref * (tc.c_virtual * q_v - q_cl)
if type(th_v) is xr.core.dataarray.DataArray:
th_v.name = 'th_v_monc'
return th_v
| 5,340,802
|
def promote_user(username):
"""Give admin privileges from a normal user."""
user = annotator.credentials.find_one({'username': username})
if user:
if user['admin']:
flash("User {0} is already an administrator".format(username), 'warning')
else:
annotator.credentials.update_one(user, {'$set': {'admin': True}})
flash("User {0} promoted to administrator successfully".format(username), 'info')
else:
flash("Cannot promote unknown user {0} to administrator".format(username), 'warning')
return redirect(url_for('manage_users'))
| 5,340,803
|
def shutdown():
""" ensures the C++ node handle is shut down cleanly. It's good to call this a the end of any program
where you called rospy_and_cpp_init """
rospy.signal_shutdown('ros_init shutdown')
roscpp_initializer.shutdown()
| 5,340,804
|
def slide_number_from_xml_file(filename):
"""
Integer slide number from filename
Assumes /path/to/Slidefile/somekindofSlide36.something
"""
return int(filename[filename.rfind("Slide") + 5:filename.rfind(".")])
| 5,340,805
|
def massivescan(websites):
"""scan multiple websites / urls"""
# scan each website one by one
vulnerables = []
for website in websites:
io.stdout("scanning {}".format(website))
if scanner.scan(website):
io.stdout("SQL injection vulnerability found")
vulnerables.append(website)
if vulnerables:
return vulnerables
io.stdout("no vulnerable websites found")
return False
| 5,340,806
|
def is_strong_pass(password):
"""
Verify the strength of 'password'
Returns a dict indicating the wrong criteria
A password is considered strong if:
8 characters length or more
1 digit or more
1 symbol or more
1 uppercase letter or more
1 lowercase letter or more
"""
# calculating the length
length_error = len(password) < 8
# searching for digits
digit_error = re.search(r"\d", password) is None
# searching for uppercase
uppercase_error = re.search(r"[A-Z]", password) is None
# searching for lowercase
lowercase_error = re.search(r"[a-z]", password) is None
# searching for symbols
symbol_error = re.search(r"[ !#$@%&'()*+,-./[\\\]^_`{|}~" + r'"]', password) is None
# overall result
password_ok = not (length_error or digit_error or uppercase_error or lowercase_error or symbol_error)
return password_ok
| 5,340,807
|
def plot_config(config, settings=None):
"""
plot_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom experiment plot configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None
The global settings to use if it exists, otherwise
use the defaults.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config = _global_config(config, settings)
config['plot_style'] = 'whitegrid' if 'plot_style' not in config else config['plot_style']
config['plot_color'] = 'gray' if 'plot_color' not in config else config['plot_color']
config['plot_dpi'] = 300 if 'plot_dpi' not in config else config['plot_dpi']
config['plot_ext'] = '.png' if 'plot_ext' not in config else config['plot_ext']
return config
| 5,340,808
|
async def notify(event):
"""Notify all subscribers of ``event``."""
for subscriber in async_subscribers:
await subscriber(event)
| 5,340,809
|
def clear_waf_timestamp_files(conf):
"""
Remove timestamp files to force builds of generate_uber_files and project gen even if
some command after configure failes
:param conf:
"""
# Remove timestamp files to force builds even if some command after configure fail
force_timestamp_files = CONFIGURE_FORCE_TIMESTAMP_FILES
for force_timestamp_file in force_timestamp_files:
try:
force_timestamp_node = conf.get_bintemp_folder_node().make_node(force_timestamp_file)
os.stat(force_timestamp_node.abspath())
except OSError:
pass
else:
force_timestamp_node.delete()
# Add timestamp files for files generated by configure
check_timestamp_files = CONFIGURE_TIMESTAMP_FILES
for check_timestamp_file in check_timestamp_files:
check_timestamp_node = conf.get_bintemp_folder_node().make_node(check_timestamp_file)
check_timestamp_node.write('')
| 5,340,810
|
def load_spectrogram(spectrogram_path):
"""Load a cante100 dataset spectrogram file.
Args:
spectrogram_path (str): path to audio file
Returns:
np.array: spectrogram
"""
if not os.path.exists(spectrogram_path):
raise IOError("spectrogram_path {} does not exist".format(spectrogram_path))
parsed_spectrogram = np.genfromtxt(spectrogram_path, delimiter=' ')
spectrogram = parsed_spectrogram.astype(np.float)
return spectrogram
| 5,340,811
|
def evaluate_fN(model, NHI):
""" Evaluate an f(N,X) model at a set of NHI values
Parameters
----------
NHI : array
log NHI values
Returns
-------
log_fN : array
f(NHI,X) values
"""
# Evaluate without z dependence
log_fNX = model.__call__(NHI)
return log_fNX
| 5,340,812
|
def generate_volume_data(img_data):
"""
Generate volume data from img_data.
:param img_data: A NIfTI.get_data object, img_data[:][x][y][z] is the tensor matrix information of voxel (x,y,z)img_data:
:return: vtkImageData object which stores volume render object.
"""
dims = [148, 190, 160] # size of input data. Temporarily only support test file.
#TODO: Modify the code to handle more files.
image = vtk.vtkImageData()
image.SetDimensions(dims[0] - 2 , dims[1] - 2 , dims[2] - 2 )
image.SetSpacing(1, 1, 1) # set spacing
image.SetOrigin(0, 0, 0)
image.SetExtent(0, dims[0] - 1, 0, dims[1] - 1, 0, dims[2] - 1)
image.AllocateScalars(vtk.VTK_UNSIGNED_SHORT, 1)
for z in range(0, dims[2]-1):
for y in range(0, dims[1]-1 ):
for x in range(0, dims[0]-1 ):
scalardata = img_data[0][x][y][z] # set confidence as each voxel's scalardata
image.SetScalarComponentFromFloat(x, y, z, 0, scalardata)
return image
| 5,340,813
|
def pipe(bill_texts_df):
"""
soup = bs(text, 'html.parser')
raw_text = extractRawText(soup)
clean_text = cleanRawText(raw_text)
metadata = extract_metadata(soup)
"""
bill_texts_df['soup'] = \
bill_texts_df['html'].apply(lambda x: bs(x, 'html.parser'))
bill_texts_df['content'] = \
bill_texts_df['soup'].apply(lambda x: extractRawText(x.body))
bill_texts_df['long_title'] = \
bill_texts_df['soup'].apply(lambda x: extractLongTitle(x.body))
bill_texts_df['table_info'] = \
bill_texts_df['soup'].apply(lambda x: extractTableContent(x.body))
return None
| 5,340,814
|
def _as_static(data, fs):
"""Get data into the Pyglet audio format."""
fs = int(fs)
if data.ndim not in (1, 2):
raise ValueError('Data must have one or two dimensions')
n_ch = data.shape[0] if data.ndim == 2 else 1
audio_format = AudioFormat(channels=n_ch, sample_size=16,
sample_rate=fs)
data = data.T.ravel('C')
data[data < -1] = -1
data[data > 1] = 1
data = (data * (2 ** 15)).astype('int16').tostring()
return StaticMemorySourceFixed(data, audio_format)
| 5,340,815
|
def plot_success(exp_data_dict,
# task_names, exp_names,
title_str='Success Rate (%)', y_label_str="Success Rate (%)"):
"""
:param exp_data_dict: [name:list(1,2,3)]
:param task_names: ['Reaching task', 'Lifting task', ]
:param exp_names: ['PNO', 'PNR', 'POR']
:param title_str: 'Fig'
:param y_label_str: 'Success Rate (%)'
:return: None
"""
bar_width = 0.6
# 有i个实验,在不同任务中的位置。
x_axis_tick = np.arange(len(exp_data_dict))
values = []
labels = []
for key, value in exp_data_dict.items():
values.append(value[0])
labels.append(key)
print("x_axis_tick:", x_axis_tick)
print("values:", values)
print("labels:", labels)
for index in range(len(exp_data_dict)):
rect_mean = plt.bar(x=x_axis_tick[index],
height=values[index],
width=bar_width,
align="center",
label=labels[index],
)
# 给legend赋值字体大小
# plt.legend(loc=0, numpoints=1)
plt.legend(loc='upper center',
borderaxespad=0.,
)
leg = plt.gca().get_legend()
text = leg.get_texts()
plt.setp(text, fontsize=legendFontSize)
# 给每个柱状图都标上均值,按照规律来。
for i, y in enumerate(values):
plt.text(x_axis_tick[i], 1*y + 1.5, '%s' % round(y, 2), ha='center', FontSize=barFontSize)
plt.xticks(x_axis_tick, labels, FontSize=xTicksFontSize)
# 确保y轴标签处于0-100,每隔20一个,
y_ticks = np.arange(0, 120, 20)
print("y_ticks:", y_ticks)
plt.yticks(y_ticks, FontSize=yTicksFontSize)
plt.ylabel(y_label_str, FontSize=yLabelFontSize)
# plt.title(title_str, FontSize=titleFontSize)
plt.show()
| 5,340,816
|
def preprocess_data(cubes, time_slice: dict = None):
"""Regrid the data to the first cube and optional time-slicing."""
# Increase TEST_REVISION anytime you make changes to this function.
if time_slice:
cubes = [extract_time(cube, **time_slice) for cube in cubes]
first_cube = cubes[0]
# regrid to first cube
regrid_kwargs = {
'grid': first_cube,
'scheme': iris.analysis.Nearest(),
}
cubes = [cube.regrid(**regrid_kwargs) for cube in cubes]
return cubes
| 5,340,817
|
def password_to_str(password):
"""
加密
:param password:
:return:
"""
def add_to_16(password):
while len(password) % 16 != 0:
password += '\0'
return str.encode(password) # 返回bytes
key = 'saierwangluo' # 密钥
aes = AES.new(add_to_16(key), AES.MODE_ECB) # 初始化aes加密器
des3 = DES3.new(add_to_16(key), DES3.MODE_ECB) # 初始化3des加密器
# aes加密
encrypted_text = str(
base64.encodebytes(
aes.encrypt(add_to_16(password))), encoding='utf8'
).replace('\n', '')
des_encrypted_text = str(
base64.encodebytes(des3.encrypt(add_to_16(encrypted_text))), encoding='utf8'
).replace('\n', '') # 3des加密
# 返回加密后数据
return des_encrypted_text
| 5,340,818
|
def email_checker_mailru(request: Request, email: str):
"""
This API check email from mail.ru<br>
<pre>
:return: JSON<br>
</pre>
Example:<br>
<br>
<code>
https://server1.majhcc.xyz/api/email/checker/mailru?email=oman4omani@mail.ru
"""
from src.Emails.checker.mailru import checker
# regex mail.ru
if re.match(r'^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]@mail\.ru', email):
try:
result = checker(email)
if result:
return {
'status': 'success',
'available': True
}
elif not result:
return {
'status': 'success',
'available': False
}
elif result == None:
return {
'status': 'error please try again or contact us ==> instagram: @majhcc'
}
else:
return {
'status': 'error please try again or contact us ==> instagram: @majhcc'
}
except Exception as e:
data = {
'content': f'Check email from mail.ru api Error: ***{str(e)}***'
}
requests.post(WEBHOOKURL, data=data)
return {
'status': 'error please try again or contact us ==> instagram: @majhcc'}
else:
return {
'status': 'error',
'result': 'Invalid email'
}
| 5,340,819
|
def guild_only() -> Callable:
"""A :func:`.check` that indicates this command must only be used in a
guild context only. Basically, no private messages are allowed when
using the command.
This check raises a special exception, :exc:`.NoPrivateMessage`
that is inherited from :exc:`.CheckFailure`.
"""
def predicate(ctx: InteractionContext) -> bool:
if ctx.guild is None:
raise NoPrivateMessage()
return True
return check(predicate)
| 5,340,820
|
def make_lvis_metrics(
save_folder=None,
filename_prefix="model_output",
iou_types: Union[str, List[str]] = "bbox",
summarize_to_stdout: bool = True,
evaluator_factory: Callable[
[Any, List[str]], DetectionEvaluator
] = LvisEvaluator,
gt_api_def: Sequence[
SupportedDatasetApiDef
] = DEFAULT_SUPPROTED_DETECTION_DATASETS,
):
"""
Returns an instance of :class:`DetectionMetrics` initialized for the LVIS
dataset.
:param save_folder: path to the folder where to write model output
files. Defaults to None, which means that the model output of
test instances will not be stored.
:param filename_prefix: prefix common to all model outputs files.
Ignored if `save_folder` is None. Defaults to "model_output"
:param iou_types: list of (or a single string) strings describing
the iou types to use when computing metrics.
Defaults to "bbox". Valid values are "bbox" and "segm".
:param summarize_to_stdout: if True, a summary of evaluation metrics
will be printed to stdout (as a table) using the Lvis API.
Defaults to True.
:param evaluator_factory: Defaults to :class:`LvisEvaluator` constructor.
:param gt_api_def: Defaults to the list of supported datasets (LVIS is
supported in Avalanche through class:`LvisDataset`).
:return: A metric plugin that can compute metrics on the LVIS dataset.
"""
return DetectionMetrics(
evaluator_factory=evaluator_factory,
gt_api_def=gt_api_def,
save_folder=save_folder,
filename_prefix=filename_prefix,
iou_types=iou_types,
summarize_to_stdout=summarize_to_stdout,
)
| 5,340,821
|
def _calculate_cos_loop(graph, threebody_cutoff=4.0):
"""
Calculate the cosine theta of triplets using loops
Args:
graph: List
Returns: a list of cosine theta values
"""
pair_vector = get_pair_vector_from_graph(graph)
_, _, n_sites = tf.unique_with_counts(graph[Index.BOND_ATOM_INDICES][:, 0])
start_index = 0
cos = []
for n_site in n_sites:
for i in range(n_site):
for j in range(n_site):
if i == j:
continue
vi = pair_vector[i + start_index].numpy()
vj = pair_vector[j + start_index].numpy()
di = np.linalg.norm(vi)
dj = np.linalg.norm(vj)
if (di <= threebody_cutoff) and (dj <= threebody_cutoff):
cos.append(vi.dot(vj) / np.linalg.norm(vi) / np.linalg.norm(vj))
start_index += n_site
return cos
| 5,340,822
|
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_Firefox()
| 5,340,823
|
def temp_url_page(rid):
"""
Temporary page where receipts are stored. The user, which visits it first, get the receipt.
:param rid: (str) receipt id (user is assigned to receipt with this id)
"""
if not user_handler.assign_rid_user(rid, flask.session['username']):
logging.warn('Trying to steal receipt! {ip} has visited page: {url}! Cancelling request!'.
format(ip=flask.request.remote_addr, url=flask.request.url))
flask.abort(400)
return
return flask.redirect(flask.url_for('dashboard_page'))
| 5,340,824
|
def extract_dependencies(content):
"""
Extract the dependencies from the CMake code.
The `find_package()` and `pkg_check_modules` calls must be on a single line
and the first argument must be a literal string for this function to be
able to extract the dependency name.
:param str content: The CMake source code
:returns: The dependencies name
:rtype: list
"""
return \
extract_find_package_calls(content) | \
_extract_pkg_config_calls(content)
| 5,340,825
|
def _block(x, out_channels, name, conv=conv2d, kernel=(3, 3), strides=(2, 2), dilations=(1, 1), update_collection=None,
act=tf.nn.leaky_relu, pooling='avg', padding='SAME', batch_norm=False):
"""Builds the residual blocks used in the discriminator in GAN.
Args:
x: The 4D input vector.
out_channels: Number of features in the output layer.
name: The variable scope name for the block.
conv: Convolution function. Options conv2d or snconv2d
kernel: The height and width of the convolution kernel filter (Default value = (3, 3))
strides: Rate of convolution strides (Default value = (2, 2))
dilations: Rate of convolution dilation (Default value = (1, 1))
update_collection: The update collections used in the in the spectral_normed_weight. (Default value = None)
downsample: If True, downsample the spatial size the input tensor .
If False, the spatial size of the input tensor is unchanged. (Default value = True)
act: The activation function used in the block. (Default value = tf.nn.relu)
pooling: Strategy of pooling. Default: average pooling. Otherwise, no pooling, just using strides
padding: Padding type (Default value = 'SAME')
batch_norm: A flag that determines if batch norm should be used (Default value = False)
Returns:
A tensor representing the output of the operation.
"""
with tf.variable_scope(name):
if batch_norm:
bn0 = BatchNorm(name='bn_0')
bn1 = BatchNorm(name='bn_1')
input_channels = x.shape.as_list()[-1]
x_0 = x
x = conv(x, out_channels, kernel, dilations=dilations, name='conv1', padding=padding)
if batch_norm:
x = bn0(x)
x = act(x, name="before_downsampling")
x = down_sampling(x, conv, pooling, out_channels, kernel, strides, update_collection, 'conv2', padding)
if batch_norm:
x = bn1(x)
if strides[0] > 1 or strides[1] > 1 or input_channels != out_channels:
x_0 = down_sampling(x_0, conv, pooling, out_channels, kernel, strides, update_collection, 'conv3',
padding)
out = x_0 + x # No RELU: http://torch.ch/blog/2016/02/04/resnets.html
return out
| 5,340,826
|
def createLaplaceGaussianKernel(sigma, size):
"""构建高斯拉普拉斯卷积核
Args:
sigma ([float]): 高斯函数的标准差
size ([tuple]): 高斯核的大小,奇数
Returns:
[ndarray]: 高斯拉普拉斯卷积核
"""
H, W = size
r, c = np.mgrid[0:H:1, 0:W:1]
r = r - (H - 1) / 2
c = c - (W - 1) / 2
sigma2 = pow(sigma, 2.0)
norm2 = np.power(r, 2.0) + np.power(c, 2.0)
LoGKernel = (norm2 / sigma2 - 2)*np.exp(-norm2 / (2 * sigma2))
return LoGKernel
| 5,340,827
|
def test_datafile_success(session_obj):
"""
Normally a good practice is to have expected response as a string like in other tests.
Here we are exceptionally making expected response a dict for easier comparison.
String was causing some issues with extra white space characters.
:param session_obj: session object
"""
payload = '{"userId": "matjaz", "userAttributes": {"attr_1": "hola"}}'
params = {"featureKey": "feature_1"}
resp = create_and_validate_request_and_response(ENDPOINT_DATAFILE, 'get', session_obj,
bypass_validation_request=False,
payload=payload, params=params)
assert expected_response == resp.json()
assert resp.status_code == 200, resp.text
| 5,340,828
|
def test_DeAST_class():
"""Can the DeAST class be instantiated?"""
from ast import NodeVisitor
from deast import DeAST
deaster = DeAST()
assert isinstance(deaster, DeAST)
assert isinstance(deaster, NodeVisitor)
| 5,340,829
|
def is_file_type(fpath, filename, ext_list):
"""Returns true if file is valid, not hidden, and has extension of given type"""
file_parts = filename.split('.')
# invalid file
if not os.path.isfile(os.path.join(fpath, filename)):
return False
# hidden file
elif filename.startswith('.'):
return False
# no extension
elif len(file_parts) < 2:
return False
# check file type
extension = file_parts[-1].lower()
if extension in ext_list:
return True
else:
return False
| 5,340,830
|
def xp_rirgen2(room, source_loc, mic_loc, c=340, fs=16000, t60=0.5,
beta=None, nsamples=None, htw=None, hpfilt=True, method=1):
"""Generates room impulse responses corresponding to each source-microphone pair placed in a room.
Args:
room (numpy/cupy array) = room dimensions in meters, shape: (3, 1)
source_loc (numpy/cupy array) = source locations in meters, shape: (3, nsrc)
mic_loc (numpy/cupy array) = microphone locations in meters, shape: (3, nmic)
kwargs:
c (float) = speed of sound in meters/second (default: 340)
fs (float) = sampling rate in Hz (default: 16000)
t60 (float) = t60 or rt60 in seconds or None to use beta parameters (default: 0.5)
beta (numpy/cupy array) = beta parameters of reflections for each side, shape (6,1) (default: None)
nsamples (int) = number of output samples (default: auto from t60)
htw (int) = half size in samples of the time window used for sinc function interpolation (default automatic)
hpfilt (bool) = use post-generation highpass filter or not (default True)
method (int) = 1 or 2, 2 is not tested thoroughly and is very slow, so use 1 always (default 1)
Returns:
room impulse responses in time-domain of shape (nsrc, nmic, nsamples)
Notes:
1. If input arrays are cupy arrays (on GPU), the code runs with cupy, otherwise with numpy
2. if you do not want to install cupy or not interested in GPU processing,
remove line "import cupy" and replace "xp=cupy.get..." with "xp=np"
.. seealso:: :func:`pyrirgen.RirGenerator`
.. seealso:: :url:https://github.com/ehabets/RIR-Generator/blob/master/rir_generator.cpp
>>> ### DOCTEST ###
>>> room = np.array([4,7,3]).reshape(3,1)
>>> source_loc = np.random.uniform(0,1,(3,2)) * room
>>> mic_loc = np.random.uniform(0,1,(3,4)) * room
>>> t60=0.3
>>> rirs_np = xp_rirgen(room, source_loc, mic_loc, t60=t60)
>>> #import matplotlib.pyplot as plt
>>> #plt.plot(rirs_np[0,0,:] , label='rir for src1 and mic1')
>>> croom = cupy.array(room)
>>> csource_loc = cupy.array(source_loc)
>>> cmic_loc = cupy.array(mic_loc)
>>> rirs_cp = xp_rirgen(croom, csource_loc, cmic_loc, t60=t60)
>>> cupy.testing.assert_allclose(rirs_np, cupy.asnumpy(rirs_cp), atol=1e-5, rtol=1e-5)
>>> beta = np.random.uniform(0.1, 0.9, size=6)
>>> rirs_np = xp_rirgen(room, source_loc, mic_loc, beta=beta, t60=None)
>>> cbeta = cupy.array(beta)
>>> rirs_cp = xp_rirgen(croom, csource_loc, cmic_loc, beta=cbeta, t60=None)
>>> cupy.testing.assert_allclose(rirs_np, cupy.asnumpy(rirs_cp), atol=1e-5, rtol=1e-5)
"""
# xp = cupy.get_array_module(room, source_loc, mic_loc, beta)
xp
if beta is None and t60 is None:
raise Exception('Either t60 or beta array must be provided')
elif beta is None:
V = xp.prod(room)
S = 2 * (room[0] * room[2] + room[1] * room[2] + room[0] * room[1])
alpha = 24 * V * xp.log(10) / (c * S * t60)
if alpha < 1:
beta = xp.ones(6, ) * xp.sqrt(1 - alpha)
else:
raise Exception('t60 value {} too small for the room'.format(t60))
else:
if xp.max(beta) >= 1.0 or xp.min(beta) <= 0.0:
raise Exception('beta array values should be in the interval (0,1).')
if t60 is not None:
print('Overwriting provided t60 value using provided beta array')
alpha = 1 - beta**2
V = xp.prod(room)
Se = 2 * (room[1] * room[2] * (alpha[0] + alpha[1]) + room[0] * room[2] * (alpha[2] + alpha[3]) + room[0] * room[1] * (alpha[4] + alpha[5]))
t60 = 24 * xp.log(10.0) * V / (c * Se);
if htw is None:
htw = np.minimum(32, int(xp.min(room) / 10 / c * fs))
tw_idx = xp.arange(0, 2 * htw).reshape(2 * htw, 1)
try:
assert(xp.all(room.T - mic_loc.T > 0) and xp.all(room.T - source_loc.T > 0))
assert(xp.all(mic_loc.T > 0) and xp.all(source_loc.T > 0))
except:
raise Exception('Room dimensions and source and mic locations are not compatible.')
cTs = c / fs
# convert distances in meters to time-delays in samples
room = room / cTs
mic_loc = mic_loc / cTs
src_loc = source_loc / cTs
nmic = mic_loc.shape[-1]
nsrc = source_loc.shape[-1]
if nsamples is None:
nsamples = int(fs * t60)
def get_reflection_candidates():
nxrefl = int(nsamples / (room[0]))
nyrefl = int(nsamples / (room[1]))
nzrefl = int(nsamples / (room[2]))
xro = xp.arange(-nxrefl, nxrefl + 1)
yro = xp.arange(-nyrefl, nyrefl + 1)
zro = xp.arange(-nzrefl, nzrefl + 1)
xr = xro.reshape(2 * nxrefl + 1, 1, 1)
yr = yro.reshape(1, 2 * nyrefl + 1, 1)
zr = zro.reshape(1, 1, 2 * nzrefl + 1)
RoughDelays = xp.sqrt((2 * xr * room[0]) ** 2 + (2 * yr * room[1]) ** 2 + (2 * zr * room[2]) ** 2)
RoughGains = (beta[0] * beta[1]) ** xp.abs(xr) * (beta[2] * beta[3]) ** xp.abs(yr) * (beta[4] * beta[5]) ** xp.abs(zr) / (
RoughDelays + 0.5 / c * fs) # assume src-mic distance at least .5 metres
maxgain = xp.max(RoughGains)
vreflidx = xp.vstack(xp.nonzero(xp.logical_and(RoughDelays < nsamples, RoughGains > maxgain / 1.0e4)))
nrefl = vreflidx.shape[-1]
reflidx = xp.arange(nrefl).reshape(1, 1, nrefl, 1, 1, 1)
xrefl = xro[vreflidx[..., reflidx][0]]
yrefl = yro[vreflidx[..., reflidx][1]]
zrefl = zro[vreflidx[..., reflidx][2]]
return xrefl, yrefl, zrefl
xrefl, yrefl, zrefl = get_reflection_candidates()
def get_delays_and_gains():
xside = xp.arange(0, 2).reshape(1, 1, 1, 2, 1, 1)
yside = xp.arange(0, 2).reshape(1, 1, 1, 1, 2, 1)
zside = xp.arange(0, 2).reshape(1, 1, 1, 1, 1, 2)
imic = xp.arange(nmic).reshape(1, nmic, 1, 1, 1, 1)
isrc = xp.arange(nsrc).reshape(nsrc, 1, 1, 1, 1, 1)
Delays = xp.sqrt((2 * xrefl * room[0] - mic_loc[0, imic] + (1 - 2 * xside) * src_loc[0, isrc]) ** 2 + (2 * yrefl * room[1] - mic_loc[1, imic] + (1 - 2 * yside) * src_loc[1, isrc]) ** 2 + (2 * zrefl * room[2] - mic_loc[2, imic] + (1 - 2 * zside) * src_loc[2, isrc]) ** 2)
Refl_x = beta[0] ** (xp.abs(xrefl - xside)) * beta[1] ** (xp.abs(xrefl))
Refl_y = beta[2] ** (xp.abs(yrefl - yside)) * beta[3] ** (xp.abs(yrefl))
Refl_z = beta[4] ** (xp.abs(zrefl - zside)) * beta[5] ** (xp.abs(zrefl))
Gains = Refl_x * Refl_y * Refl_z / (4 * np.pi * Delays * cTs)
# Gains[Delays > nsamples] = 0.0
return Delays, Gains
Delays, Gains = get_delays_and_gains()
rirs = xp.zeros((nsrc, nmic, nsamples), dtype=np.float32)
for src in xp.arange(nsrc):
for mic in xp.arange(nmic):
dnow = Delays[src, mic, ...].flatten()
gnow = Gains[src, mic, ...].flatten()
if method == 1:
gnow = gnow[dnow < nsamples - htw - 2]
dnow = dnow[dnow < nsamples - htw - 2]
dnow_floor = xp.floor(dnow)
dnow_dist = dnow - dnow_floor
dnow_floor = dnow_floor.reshape(1, dnow.shape[0])
dnow_dist = dnow_dist.reshape(1, dnow.shape[0])
gnow = gnow.reshape(1, dnow.shape[0])
dnow_ext = dnow_floor + tw_idx - htw + 1
garg = np.pi * (-dnow_dist + 1 + tw_idx - htw)
gnow_ext = gnow * 0.5 * (1.0 - xp.cos(np.pi + garg / htw)) * xp.where(garg == 0.0, 1.0, xp.sin(garg) / garg)
dnow = dnow_ext.flatten().astype(np.uint32)
gnow = gnow_ext.flatten().astype(np.float32)
rirnow = xp.zeros((nsamples,), dtype=np.float32)
if xp == np:
np.add.at(rirnow, dnow, gnow)
else:
xp.scatter_add(rirnow, dnow, gnow)
rirs[src, mic, ...] = rirnow
elif method == 2: ## this is too slow and may not be accurate as well
gnow = gnow[dnow < nsamples]
dnow = dnow[dnow < nsamples]
frange = xp.arange(0, 0.5 + 0.5 / nsamples, 1.0 / nsamples)
rirfft = xp.zeros(frange.shape, dtype=np.complex128)
for i in range(len(frange)):
rirfft[i] = xp.sum(gnow * xp.exp(-1j * 2 * np.pi * frange[i] * dnow))
rirs[src, mic, :] = xp.real(xp.fft.irfft(rirfft)).astype(dtype=np.float32)
if hpfilt:
rirs[:, :, 1:-1] += -0.5 * rirs[:, :, 2:] -0.5 * rirs[:, : , :-2]
return rirs
| 5,340,831
|
def mean_bias_removal(hindcast, alignment, cross_validate=True, **metric_kwargs):
"""Calc and remove bias from py:class:`~climpred.classes.HindcastEnsemble`.
Args:
hindcast (HindcastEnsemble): hindcast.
alignment (str): which inits or verification times should be aligned?
- maximize/None: maximize the degrees of freedom by slicing ``hind`` and
``verif`` to a common time frame at each lead.
- same_inits: slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be
based on the same set of initializations.
- same_verif: slice to a common/consistent verification time frame prior
to computing metric. This philosophy follows the thought that each lead
should be based on the same set of verification dates.
cross_validate (bool): Use properly defined mean bias removal function. This
excludes the given initialization from the bias calculation. With False,
include the given initialization in the calculation, which is much faster
but yields similar skill with a large N of initializations.
Defaults to True.
Returns:
HindcastEnsemble: bias removed hindcast.
"""
if hindcast.get_initialized().lead.attrs["units"] != "years":
warnings.warn(
"HindcastEnsemble.remove_bias() is still experimental and is only tested "
"for annual leads. Please consider contributing to "
"https://github.com/pangeo-data/climpred/issues/605"
)
def bias_func(a, b, **kwargs):
return a - b
bias_metric = Metric("bias", bias_func, True, False, 1)
# calculate bias lead-time dependent
bias = hindcast.verify(
metric=bias_metric,
comparison="e2o",
dim=[], # not used by bias func, therefore best to add [] here
alignment=alignment,
**metric_kwargs,
).squeeze()
# how to remove bias
if cross_validate: # more correct
mean_bias_func = _mean_bias_removal_cross_validate
else: # faster
mean_bias_func = _mean_bias_removal_quick
bias_removed_hind = mean_bias_func(hindcast._datasets["initialized"], bias, "init")
bias_removed_hind = bias_removed_hind.squeeze()
# remove groupby label from coords
for c in ["dayofyear", "skill", "week", "month"]:
if c in bias_removed_hind.coords and c not in bias_removed_hind.dims:
del bias_removed_hind.coords[c]
# replace raw with bias reducted initialized dataset
hindcast_bias_removed = hindcast.copy()
hindcast_bias_removed._datasets["initialized"] = bias_removed_hind
return hindcast_bias_removed
| 5,340,832
|
def request_sudoku_valid(sudoku: str) -> bool:
"""valid request"""
is_valid = False
provider_request = requests.get(f"{base_url}/valid/{sudoku}")
if provider_request.status_code == 200:
request_data = provider_request.json()
is_valid = request_data["result"]
# TODO: else raise exception
return is_valid
| 5,340,833
|
def index():
"""
vista principal
"""
return "<i>API RestFull PARCES Version 0.1</i>"
| 5,340,834
|
def show_config(ctx):
"""
Prints the resolved config to the console
"""
click.echo(ctx.obj["config"].to_yaml())
| 5,340,835
|
def tag(dicts, key, value):
"""Adds the key value to each dict in the sequence"""
for d in dicts:
d[key] = value
return dicts
| 5,340,836
|
def init_emitter():
"""Ensure emit is always clean, and initted (in test mode).
Note that the `init` is done in the current instance that all modules already
acquired.
"""
# init with a custom log filepath so user directories are not involved here; note that
# we're not using pytest's standard tmp_path as Emitter would write logs there, and in
# effect we would be polluting that temporary directory (potentially messing with
# tests, that may need that empty), so we use another one.
temp_fd, temp_logfile = tempfile.mkstemp(prefix="emitter-logs")
os.close(temp_fd)
temp_logfile = pathlib.Path(temp_logfile)
messages.TESTMODE = True
messages.emit.init(
messages.EmitterMode.QUIET, "test-emitter", "Hello world", log_filepath=temp_logfile
)
yield
# end machinery (just in case it was not ended before; note it's ok to "double end")
messages.emit.ended_ok()
temp_logfile.unlink()
| 5,340,837
|
def plot_roc(fpr, tpr):
"""Plot the ROC curve"""
plt.plot(fpr, tpr)
plt.title('Receiver Operating Characteristic (ROC)')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.savefig(Directories.ROC_CURVE_DIR.value)
plt.show()
plt.close()
| 5,340,838
|
def openpairshelf(filename, flag='c', protocol=None, writeback=False):
"""Returns a ProteinPairDB object, with similar functionality to shelve.open()"""
return ProteinPairDB(filename, flag, protocol, writeback)
| 5,340,839
|
def createUser(emailid, password, contact_no, firstname, lastname, category, address, description, company_url, image_url, con=None, cur=None, db=None):
"""
Tries to create a new user with the given data.
Returns:
- dict: dict object containing all user data, if query was successfull
- False: If query was unsuccessful
"""
sql = """Insert into users(
emailid,
password,
firstname,
lastname,
contact_no,
category,
address,
description,
company_url,
image_url
) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
db(sql, (emailid,
password,
firstname,
lastname,
contact_no,
category,
address,
description,
company_url,
image_url))
con.commit()
# close database connection
user = getUserUsingEmail(emailid)
return user or False
| 5,340,840
|
def sensor_pull_storage(appname, accesskey, timestring, *,data_folder = None, ttn_version=3):
"""
Pull data from TTN via the TTN storage API.
appname is the name of the TTN app
accesskey is the full accesskey from ttn. For TTN V3, this is is the
secret that is output when a key is created. For TTN V2, this is
the string from the console, starting with 'ttn-acount-v2.'
timestring indicates amount of data needed, e.g. '100h'.
ttn_version should be 2 or 3; 3 is default.
If data_folder is supplied, it is a string or a Path; it is taken as a directory,
and the name "sensors_lastperiod.json" is appended to form an output file name, and
the data is written to the resulting file, replacing any previous contents.
Otherwise, the data is returned as a Python array (for V3) or a string (for V2).
We've not really tested V2 extensively.
"""
args = [ "curl" ]
if ttn_version == 2:
args += [
"-X", "GET",
"--header", "Accept: application/json",
"--header", f"Authorization: key {accesskey}",
f"https://{appname}.data.thethingsnetwork.org/api/v2/query?last={timestring}"
]
elif ttn_version == 3:
args += [
"-G", f"https://nam1.cloud.thethings.network/api/v3/as/applications/{appname}/packages/storage/uplink_message",
"--header", f"Authorization: Bearer {accesskey}",
"--header", "Accept: text/event-stream",
"-d", f"last={timestring}",
"-d", "field_mask=up.uplink_message.decoded_payload",
]
else:
raise FetchError(f"Illegal ttn_version (not 2 or 3)")
# if the user supplied a data_folder, than tack on the args.
# list1 += list2 syntax means "append each element of list2 to list 1"
# pathlib.Path allows
if data_folder != None:
args += [ "-o", pathlib.Path(data_folder, "sensors_lastperiod.json") ]
result = subprocess.run(
args, shell=False, check=True, capture_output=True
)
sresult = result.stdout
if ttn_version == 3:
return list(map(json.loads, re.sub(r'\n+', '\n', sresult.decode()).splitlines()))
else:
return sresult
| 5,340,841
|
async def discordView(cls:"PhaazebotWeb", WebRequest:ExtendedRequest) -> Response:
"""
Default url: /discord/view/{guild_id:\d+}
"""
PhaazeDiscord:"PhaazebotDiscord" = cls.BASE.Discord
if not PhaazeDiscord:
return await cls.Tree.errors.notAllowed(cls, WebRequest, msg="Discord module is not active")
guild_id:str = WebRequest.match_info.get("guild_id", "")
Guild:discord.Guild = discord.utils.get(PhaazeDiscord.guilds, id=int(guild_id))
if not Guild:
return await cls.Tree.Discord.discordinvite.discordInvite(WebRequest, msg=f"Phaaze is not on this Server", guild_id=guild_id)
ViewPage:HTMLFormatter = HTMLFormatter("Platforms/Web/Content/Html/Discord/view.html")
ViewPage.replace(
guild_id=Guild.id,
guild_icon_url=Guild.icon_url,
guild_name=Guild.name
)
site:str = cls.HTMLRoot.replace(
replace_empty=True,
title="Phaaze | Discord - View",
header=getNavbar(active="discord"),
main=ViewPage
)
return cls.response(
body=site,
status=200,
content_type='text/html'
)
| 5,340,842
|
def update_max_braking_decel(vehicle, mbd):
"""
Updates the max braking decel of the vehicle
:param vehicle: vehicle
:param mbd: new max braking decel
:type vehicle: VehicleProfile
:return: Updated vehicle
"""
return vehicle.update_max_braking_decel(mbd)
| 5,340,843
|
def get_outlier_removal_mask(xcoords, ycoords, nth_neighbor=10, quantile=.9):
"""
Parameters
----------
xcoords :
ycoords :
nth_neighbor :
(Default value = 10)
quantile :
(Default value = .9)
Returns
-------
"""
from scipy.spatial.distance import pdist, squareform
D = squareform(pdist(np.vstack((xcoords, ycoords)).T))
distances = D[np.argsort(D, axis=0)[nth_neighbor - 1, :], 0]
return distances <= np.quantile(distances, quantile)
| 5,340,844
|
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
| 5,340,845
|
def omics():
"""Manage -omics."""
| 5,340,846
|
def strategy(history, alivePlayers, whoami, memory):
"""
history contains all previous rounds (key : id of player (shooter), value : id of player (target))
alivePlayers is a list of all player ids
whoami is your own id (to not kill yourself by mistake)
memory is None by default and transferred over (if you set it to 1, it will be 1 in the next round)
memory is NOT shared between games (subject to changes)
"""
# Your code would be here but this strategy is dumb...
"""
You must return an id of a player (if not : you shoot in the air)
Memory must be set to something but can be anything (None included )
"""
return alivePlayers[0], None
| 5,340,847
|
def parse_fn(serialized_example: bytes) -> FeaturesType:
"""Parses and converts Tensors for this module's Features.
This casts the audio_raw_pcm16 feature to float32 and scales it into the range
[-1.0, 1.0].
Args:
serialized_example: A serialized tf.train.ExampleProto with the features
dict keys declared in the :py:class:Features enum.
Returns:
Tensor-valued dict of features. The keys are those declared in the
:py:class:Features enum.
"""
features = tf.io.parse_single_example(
serialized_example, {f.value.name: f.value.spec for f in Features})
audio_key: str = Features.AUDIO.value.name
features[audio_key] = tf.cast(tf.io.decode_raw(features[audio_key], tf.int16),
tf.float32) / np.iinfo(np.int16).max
return features
| 5,340,848
|
def cli(ctx: click.Context) -> int:
"""
Method used to declare root CLI command through decorators.
"""
return 0
| 5,340,849
|
def parse_clock(line):
"""Parse clock information"""
search = parse(REGEX_CLOCK, line)
if search:
return int(search.group('clock'))
else:
return None
| 5,340,850
|
def block_latest(self, **kwargs):
"""
Return the latest block available to the backends, also known as the tip of the blockchain.
https://docs.blockfrost.io/#tag/Cardano-Blocks/paths/~1blocks~1latest/get
:param return_type: Optional. "object", "json" or "pandas". Default: "object".
:type return_type: str
:returns BlockResponse object.
:rtype BlockResponse
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/blocks/latest",
headers=self.default_headers
)
| 5,340,851
|
def get_courses():
"""
Route to display all courses
"""
params = format_dict(request.args)
if params:
try:
result = Course.query.filter_by(**params).order_by(Course.active.desc())
except InvalidRequestError:
return { 'message': 'One or more parameter(s) does not exist' }, 400
else:
result = Course.query.order_by(Course.active.desc())
return { "courses": [c.serialize for c in result] }
| 5,340,852
|
def zero_adam_param_states(state: flax.optim.OptimizerState, selector: str):
"""Applies a gradient for a set of parameters.
Args:
state: a named tuple containing the state of the optimizer
selector: a path string defining which parameters to freeze.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
step = state.step
params = flax.core.unfreeze(state.param_states)
flat_params = {
"/".join(k): v for k, v in traverse_util.flatten_dict(params).items()
}
for k in flat_params:
if k.startswith(selector):
v = flat_params[k]
# pylint: disable=protected-access
flat_params[k] = flax.optim.adam._AdamParamState(
jnp.zeros_like(v.grad_ema), jnp.zeros_like(v.grad_sq_ema)
)
new_param_states = traverse_util.unflatten_dict(
{tuple(k.split("/")): v for k, v in flat_params.items()}
)
new_param_states = dict(flax.core.freeze(new_param_states))
new_state = flax.optim.OptimizerState(step, new_param_states)
return new_state
| 5,340,853
|
def main():
""" Main Function.
"""
inputQuery = input("Enter query string:")
tic = time.time()
docsAsShingleSets, allShingles, PostingDict, docIDlist = shingle(inputQuery)
toc = time.time()
print("Time taken = ", toc-tic)
tic = time.time()
matrix = matrixGenerator(allShingles,PostingDict)
print(matrix)
toc = time.time()
print("Time taken = ", toc-tic)
tic = time.time()
sign_matrix = find_sign_matrix(matrix,len(allShingles))
toc = time.time()
print("Time taken = ", toc-tic)
tic = time.time()
BANDS=20
docth,buckets = lsh(BANDS,docIDlist,sign_matrix)
toc = time.time()
print("Time taken = ", toc-tic)
query_id = len(docAsShingleSets)-1
inputDocID=query_id
tic = time.time()
sim_docs = get_similar(int(inputDocID),docIDlist,buckets,docth,docsAsShingleSets,sign_matrix)
print("\n Calculating Jaccard similarities....\n")
found = 0
for sim, doc in sim_docs:
if sim >= threshold:
found = 1
print('Document Name: ' + str(doc), 'Similarity: ' + str(sim) + '\n')
if found == 0:
print("NO similar docs for the given threshold")
toc = time.time()
print("Time taken = ", toc-tic)
| 5,340,854
|
def leftFitNormal(population):
"""
Obtain mode and standard deviation from the left side of a population.
>>> pop = np.random.normal(loc=-20, scale=3, size=15000)
>>> mode, sigma = leftFitNormal(pop)
>>> -22 < mode < -18
True
>>> round(sigma)
3
>>> pop[pop > -18] += 10 # perturb right side
>>> mode, sigma = leftFitNormal(pop)
>>> -22 < mode < -18
True
>>> round(sigma) == 3
True
>>> pop[pop < -22] -= 10 # perturb left side
>>> mode, sigma = leftFitNormal(pop)
>>> -22 < mode < -18
True
>>> round(sigma) == 3
False
"""
# TODO: Can this function be omitted?
# Quick alternative robust fit:
# median = np.nanmedian(population)
# MADstd = np.nanmedian(np.abs(population - median)) * 1.4826
# Could still modify this estimator to ignore samples > median.
# Note, if the distribution is right-skewed or bimodal (e.g. if there is
# some land amongst mostly open water) then other relative frequencies
# will proportionally be depressed, favouring the fit of a broader
# Gaussian (perhaps also shifted slightly rightward) to the left side
# of the histogram (compared to if the distribution was normal).
# Could address this by normalising the interval area.
#
# Currently the tests for perturbed distributions bypass this limitation
# by _conditionally_ replacing existing samples, rather than by mixing
# additional components into the population i.e. avoiding
# pop[:5000] = np.linspace(-15, -5, 5000).
std = np.nanstd(population) # naive initial estimate
Y, X = hist_fixedwidth(population)
# Take left side of distribution
pos = Y.argmax()
mode = X[pos]
X = X[:pos+1]
Y = Y[:pos+1]
# fit gaussian to (left side of) distribution
def gaussian(x, mean, sigma):
return np.exp(-0.5 * ((x - mean)/sigma)**2) / (sigma * (2*np.pi)**0.5)
(mean, std), cov = scipy.optimize.curve_fit(gaussian, X, Y, p0=[mode, std])
return mode, std
| 5,340,855
|
def test_neg_init():
""" Tests that init does not run main on package import """
with mock.patch.object(__main__, "__name__", "not-main"):
assert __main__.init() is None
| 5,340,856
|
def get_stock_market_list(corp_cls: str, include_corp_name=True) -> dict:
""" 상장 회사 dictionary 반환
Parameters
----------
corp_cls: str
Y: stock market(코스피), K: kosdaq market(코스닥), N: konex Market(코넥스)
include_corp_name: bool, optional
if True, returning dictionary includes corp_name(default: True)
Returns
-------
dict of {stock_code: information}
상장 회사 정보 dictionary 반환( 회사 이름, 섹터, 물품)
"""
if corp_cls.upper() == 'E':
raise ValueError('ETC market is not supported')
corp_cls_to_market = {
"Y": "stockMkt",
"K": "kosdaqMkt",
"N": "konexMkt",
}
url = 'http://kind.krx.co.kr/corpgeneral/corpList.do'
referer = 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=loadInitPage'
market_type = corp_cls_to_market[corp_cls.upper()]
payload = {
'method': 'download',
'pageIndex': 1,
'currentPageSize': 5000,
'orderMode': 3,
'orderStat': 'D',
'searchType': 13,
'marketType': market_type,
'fiscalYearEnd': 'all',
'location': 'all',
}
stock_market_list = dict()
resp = request.post(url=url, payload=payload, referer=referer)
html = BeautifulSoup(resp.text, 'html.parser')
rows = html.find_all('tr')
for row in rows:
cols = row.find_all('td')
if len(cols) > 0:
corp_name = cols[0].text.strip()
stock_code = cols[1].text.strip()
sector = cols[2].text.strip()
product = cols[3].text.strip()
corp_info = {'sector': sector, 'product': product, 'corp_cls': corp_cls}
if include_corp_name:
corp_info['corp_name'] = corp_name
stock_market_list[stock_code] = corp_info
return stock_market_list
| 5,340,857
|
def evaluate_template(template: dict) -> dict:
"""
This function resolves the template by parsing the T2WML expressions
and replacing them by the class trees of those expressions
:param template:
:return:
"""
response = dict()
for key, value in template.items():
if key == 'qualifier':
response[key] = []
for i in range(len(template[key])):
temp_dict = dict()
for k, v in template[key][i].items():
if isinstance(v, (ItemExpression, ValueExpression, BooleanEquation)):
col, row, temp_dict[k] = v.evaluate_and_get_cell(bindings)
temp_dict['cell'] = get_actual_cell_index((col, row))
else:
temp_dict[k] = v
if "property" in temp_dict and temp_dict["property"] == "P585":
if "format" in temp_dict:
try:
datetime_string, precision = parse_datetime_string(temp_dict["value"], additional_formats=[temp_dict["format"]])
if "precision" not in temp_dict:
temp_dict["precision"] = int(precision.value.__str__())
else:
temp_dict["precision"] = translate_precision_to_integer(temp_dict["precision"])
temp_dict["value"] = datetime_string
except Exception as e:
raise e
response[key].append(temp_dict)
else:
if isinstance(value, (ItemExpression, ValueExpression, BooleanEquation)):
col, row, response[key] = value.evaluate_and_get_cell(bindings)
if key == "item":
response['cell'] = get_actual_cell_index((col, row))
else:
response[key] = value
return response
| 5,340,858
|
def get_CommandeProduits(path, prefix='CP_',cleaned=False):
"""
Read CSV (CommandeProduits) into Dataframe. All relevant columns are kept and renamed with prefix.
Args:
path (str): file path to CommandeProduits.csv
prefix (str): All relevant columns are renamed with prefix
Returns:
df (Dataframe): Resulting dataframe
"""
col = {'Id':prefix+'Id',
'Commande_Id':'Commande_Id',
'OffreProduit_Id':'OffreProduit_Id',
'QuantiteTotale':prefix+'QuantiteTotale',
'QuantiteUnite':prefix+'QuantiteUnite',
'QuantiteValeur':prefix+'QuantiteValeur',
'MontantTotal':prefix+'MontantTotal',
'Weight':prefix+'Weight'}
dt = {'Id': 'int64',
'Commande_Id': 'int64',
'OffreProduit_Id':'int64',
'QuantiteTotale':'float64',
'QuantiteUnite':'object',
'QuantiteValeur':'float64',
'MontantTotal':'float64',
'Weight':'float64'}
if not cleaned:
df = pd.read_csv(path, sep='\t', encoding='utf-8', usecols=list(col.keys()), dtype=dt)
df = df.rename(index=str, columns=col)
else:
df = pd.read_csv(path, sep='\t', encoding='utf-8',index_col=0)
return df
| 5,340,859
|
def hist_equal(image, hist):
"""
Equalize an image based on a histogram.
Parameters
----------
image : af.Array
- A 2 D arrayfire array representing an image, or
- A multi dimensional array representing batch of images.
hist : af.Array
- Containing the histogram of an image.
Returns
---------
output : af.Array
- The equalized image.
"""
output = Array()
safe_call(backend.get().af_hist_equal(c_pointer(output.arr), image.arr, hist.arr))
return output
| 5,340,860
|
def get_close_icon(x1, y1, height, width):
"""percentage = 0.1
height = -1
while height < 15 and percentage < 1.0:
height = int((y2 - y1) * percentage)
percentage += 0.1
return (x2 - height), y1, x2, (y1 + height)"""
return x1, y1, x1 + 15, y1 + 15
| 5,340,861
|
def train_model(network, data, labels, batch_size,
epochs, validation_data=None, verbose=True, shuffle=False):
"""
Train
"""
model = network.fit(
data,
labels,
batch_size=batch_size,
epochs=epochs,
validation_data=validation_data,
shuffle=shuffle,
verbose=verbose)
return model
| 5,340,862
|
def taoyuan_agrichannel_irrigation_transfer_loss_rate():
"""
Real Name: TaoYuan AgriChannel Irrigation Transfer Loss Rate
Original Eqn: 0
Units: m3/m3
Limits: (None, None)
Type: constant
Subs: None
This is "no loss rate" version.
"""
return 0
| 5,340,863
|
def mocked_requests() -> MockedRequests:
"""Return mocked requests library."""
mocked_requests = MockedRequests()
with patch("libdyson.cloud.account.requests.request", mocked_requests.request):
yield mocked_requests
| 5,340,864
|
def Setup():
"""Sets up the logging environment."""
build_info = buildinfo.BuildInfo()
log_file = r'%s\%s' % (GetLogsPath(), constants.BUILD_LOG_FILE)
file_util.CreateDirectories(log_file)
debug_fmt = ('%(levelname).1s%(asctime)s.%(msecs)03d %(process)d {} '
'%(filename)s:%(lineno)d] %(message)s').format(
build_info.ImageID())
info_fmt = '%(levelname).1s%(asctime)s %(filename)s:%(lineno)d] %(message)s'
debug_formatter = logging.Formatter(debug_fmt, datefmt=DATE_FMT)
info_formatter = logging.Formatter(info_fmt, datefmt=DATE_FMT)
# Set default logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Create empty list of handlers to enable multiple streams.
logger.handlers = []
# Create console handler and set level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(info_formatter)
logger.addHandler(ch)
# Create file handler and set level
try:
fh = logging.FileHandler(log_file)
except IOError:
raise LogError('Failed to open log file %s.' % log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(debug_formatter)
logger.addHandler(fh)
# Create Event Log handler and set level
if not winpe.check_winpe():
eh = logging.handlers.NTEventLogHandler('GlazierBuildLog')
eh.setLevel(logging.DEBUG)
eh.setFormatter(debug_formatter)
logger.addHandler(eh)
| 5,340,865
|
def test_camera_association(focuser):
""" Test association of Focuser with Camera after initialisation (getter, setter) """
sim_camera_1 = Camera()
sim_camera_2 = Camera()
# Cameras in the fixture haven't been associated with a Camera yet, this should work
focuser.camera = sim_camera_1
assert focuser.camera is sim_camera_1
# Attempting to associate with a second Camera should fail, though.
focuser.camera = sim_camera_2
assert focuser.camera is sim_camera_1
| 5,340,866
|
def lambda_handler(event, context):
"""
Find and replace following words and outputs the result.
Oracle -> Oracle©
Google -> Google©
Microsoft -> Microsoft©
Amazon -> Amazon©
Deloitte -> Deloitte©
Example input: “We really like the new security features of Google Cloud”.
Expected output: “We really like the new security features of Google© Cloud”.
"""
# Return 400 if event is none or strToReplace is blank
if not event or not event['strToReplace']:
return {
'statusCode': 400,
'body': "Input string not provided."
}
# Input String
replacementString = event['strToReplace']
# Dictionary of words with replacement words
wordsToReplaceDict = {'Oracle': 'Oracle©', 'Google': 'Google©', 'Microsoft': 'Microsoft©', 'Amazon': 'Amazon©', 'Deloitte': 'Deloitte©'}
# Iterate over all key-value pairs in dictionary
for key, value in wordsToReplaceDict.items():
# Replace words in string
replacementString = replacementString.replace(key, value)
return {
'statusCode': 200,
'body': replacementString
}
| 5,340,867
|
def update_inv(X, X_inv, i, v):
"""Computes a rank 1 update of the the inverse of a symmetrical matrix.
Given a symmerical matrix X and its inverse X^{-1}, this function computes
the inverse of Y, which is a copy of X, with the i'th row&column replaced
by given vector v.
Parameters
----------
X : ndarray, shape (N, N)
A symmetrical matrix.
X_inv : nparray, shape (N, N)
The inverse of X_inv.
i : int
The index of the row/column to replace.
v : ndarray, shape (N,)
The values to replace the row/column with.
Returns
-------
Y_inv : ndarray, shape (N, N)
The inverse of Y.
"""
U = v[:, np.newaxis] - X[:, [i]]
mask = np.zeros((len(U), 1))
mask[i] = 1
U = np.hstack((U, mask))
V = U[:, [1, 0]].T
V[1, i] = 0
C = np.eye(2)
X_inv_U = X_inv.dot(U)
V_X_inv = V.dot(X_inv)
Y_inv = X_inv - X_inv_U.dot(pinv(C + V_X_inv.dot(U))).dot(V_X_inv)
return Y_inv
| 5,340,868
|
def read_barcode_lineno_map(stream):
"""Build a map of barcodes to line number from a stream
This builds a one based dictionary of barcode to line numbers.
"""
barcodes = {}
reader = csv.reader(stream, delimiter="\t")
for i, line in enumerate(reader):
barcodes[line[0]] = i + 1
return barcodes
| 5,340,869
|
def match_in_candidate_innings(entry, innings, summary_innings, entities):
"""
:param entry:
:param innings: innings to be searched in
:param summary_innings: innings mentioned in the summary segment
:param entities: total entities in the segment
:return:
"""
entities_in_summary_inning = set()
for summary_inning in summary_innings:
intersection = get_matching_entities_in_inning(entry, summary_inning, entities)
entities_in_summary_inning.update(intersection)
entities_not_found = entities.difference(entities_in_summary_inning)
matched_inning = -1
if len(entities_not_found) > 1:
remaining_inings = set(innings).difference(set(summary_innings))
orderered_remaining_innings = [inning for inning in innings if inning in remaining_inings]
matched_inning = get_inning_all_entities_set_intersection(entry, orderered_remaining_innings, entities_not_found)
return matched_inning
| 5,340,870
|
def checkpoint(
name: Optional[str] = None,
on_error: bool = True,
cond: Union[bool, Callable[..., bool]] = False,
) -> Callable[[Callable], Any]:
"""
Create a checkpointing decorator.
Args:
ckpt_name (Optional[str]): Name of the checkpoint when saved.
on_error (bool): Whether to save checkpoint when an error occurs.
cond (Union[bool, Callable[..., bool]]): Condition under which to save checkpoint.
If a Callable, all parameters of the wrapped function should be passed
and it has to return a boolean.
Returns:
A decorator function.
"""
def ckpt_worker(func: Callable):
if name is None:
ckpt_name = func.__name__
else:
ckpt_name = name
return CkptWrapper(func=func, ckpt_name=ckpt_name, on_error=on_error, cond=cond)
return ckpt_worker
| 5,340,871
|
def log_batch_stats(observes, actions, advantages, disc_sum_rew, episode, logger):
""" Log various batch statistics """
logger.log({'_mean_obs': np.mean(observes),
'_min_obs': np.min(observes),
'_max_obs': np.max(observes),
'_std_obs': np.mean(np.var(observes, axis=0)),
'_mean_act': np.mean(actions),
'_min_act': np.min(actions),
'_max_act': np.max(actions),
'_std_act': np.mean(np.var(actions, axis=0)),
'_mean_adv': np.mean(advantages),
'_min_adv': np.min(advantages),
'_max_adv': np.max(advantages),
'_std_adv': np.var(advantages),
'_mean_discrew': np.mean(disc_sum_rew),
'_min_discrew': np.min(disc_sum_rew),
'_max_discrew': np.max(disc_sum_rew),
'_std_discrew': np.var(disc_sum_rew),
'_Episode': episode
})
| 5,340,872
|
def return_int(bit_len, unsigned=False):
"""
This function return the decorator that change return value to valid value.
The target function of decorator should return only one value
e.g. func(*args, **kargs) -> value:
"""
if bit_len not in VALID_BIT_LENGTH_OF_INT:
err = "Value of bit_len should be the one of {}, but your bit_len={}."
raise ByteDatasValueError(err.format(VALID_BIT_LENGTH_OF_INT, bit_len))
# calculate max_value for changing raw value to valid value
max_value = 2**bit_len
def decorator(function):
"""decorator function"""
@wraps(function)
def wrapper(*args, **kwargs):
"""
change valid to positive if value < 0
check value than call function or return False directly
"""
value = function(*args, **kwargs)
if value >= max_value or value < 0:
err = ("Returned value of {} should be between 0 and {}, but your "
"value = {}.")
raise ByteDatasValueError(err.format(function.__name__, max_value, value))
if unsigned is False:
# if value > max_value//2 , it means the top bit of value is
# 1 , it is a negative value, so we should change it to negative
value = value - max_value if value > max_value//2 else value
return value
return wrapper
return decorator
| 5,340,873
|
def get_integral_curve(f, init_xy, x_end, delta):
"""
solve ode 'dy/dx=f(x,y)' with Euler method
"""
(x, y) = init_xy
xs, ys = [x], [y]
for i in np.arange(init_xy[0], x_end, delta):
y += delta*f(x, y)
x += delta
xs.append(x)
ys.append(y)
return xs, ys
| 5,340,874
|
def compute_atime_posteriors(sg, proposals,
global_srate=1.0,
use_ar=False,
raw_data=False,
event_idx=None):
"""
compute the bayesian cross-correlation (logodds of signal under an AR noise model)
for all signals in the historical library, against all signals in the current SG.
This is quite expensive so should in general be run only once, and the results cached.
"""
atime_lls = []
i = 0
for idx, (x, signals) in enumerate(proposals):
if event_idx is not None and event_idx != idx:
continue
sta_lls = dict()
for (sta, chan, band, phase), c in signals.items():
wns = sg.station_waves[sta]
if len(wns) == 0:
continue
elif len(wns) > 1:
raise Exception("haven't worked out correlation proposals with multiple wns from same station")
wn = wns[0]
if raw_data:
sdata = wn.get_value().data.copy()
sdata[np.isnan(sdata)] = 0.0
else:
sdata = wn.unexplained_kalman()
if use_ar:
lls = ar_advantage(sdata, c, wn.nm)
else:
normed_sdata = sdata / wn.nm_env.c #np.std(sdata)
lls = np.sqrt(iid_advantage(normed_sdata, c)) # sqrt for laplacian noise, essentially
tt_array, tt_mean = build_ttr_model_array(sg, x, sta, wn.srate, phase=phase)
origin_ll, origin_stime = atime_likelihood_to_origin_likelihood(lls, wn.st, wn.srate, tt_mean, tt_array, global_srate)
signal_scale = wn.nm_env.c
sta_lls[(wn.label, phase)] = origin_ll, origin_stime, signal_scale
sg.logger.info("computed advantage for %s %s %s" % (x, wn.label, phase))
i += 1
atime_lls.append((x, sta_lls))
return atime_lls
| 5,340,875
|
def search4vowels(pharse :str) -> set:
""""Return any vowels found in a supplied word."""
vowels = set('aeiou')
return vowels.intersection(set(pharse))
| 5,340,876
|
async def set_time(ctx, time: int):
"""Configures the timer countdown duration."""
if time <= 0: # Force duration to be 1 minute or longer
em = Embed(title=':warning: Invalid `settime` Command Usage',
description='Invalid timer duration. Duration must be 1+ minutes. \nFormat: `settime #`',
color=MsgColors.YELLOW.value)
else:
config.set('CURRENT_SETTINGS', 'time', str(time))
with open('settings.ini', 'w') as configFile:
config.write(configFile)
em = Embed(title=':gear: Timer Duration Changed',
description='Timer duration has been set to `' + str(time) + ' minute(s)`.',
color=MsgColors.BLACK.value)
await ctx.send(embed=em)
| 5,340,877
|
def rsort(s):
"""Sort sequence s in ascending order.
>>> rsort([])
[]
>>> rsort([1])
[1]
>>> rsort([1, 1, 1])
[1, 1, 1]
>>> rsort([1, 2, 3])
[1, 2, 3]
>>> rsort([3, 2, 1])
[1, 2, 3]
>>> rsort([1, 2, 1])
[1, 1, 2]
>>> rsort([1,2,3, 2, 1])
[1, 1, 2, 2, 3]
"""
if len(s) <= 1:
return s
else:
return [rmin(s)]+rsort(remove(rmin(s),s))
| 5,340,878
|
def xdg_data_home():
"""Base directory where user specific data files should be stored."""
value = os.getenv('XDG_DATA_HOME') or '$HOME/.local/share/'
return os.path.expandvars(value)
| 5,340,879
|
def read_starlight_output_syn_spec(lines):
""" read syn_spec of starlight output """
Nl_obs = len(lines)
wave = Column(np.zeros((Nl_obs, ), dtype=np.float), 'wave')
flux_obs = Column(np.zeros((Nl_obs, ), dtype=np.float), 'flux_obs')
flux_syn = Column(np.zeros((Nl_obs, ), dtype=np.float), 'flux_syn')
weight = Column(np.zeros((Nl_obs, ), dtype=np.float), 'weight')
for i, line in enumerate(lines):
line_split = line.split()
wave[i] = np.float(line_split[0])
flux_obs[i] = np.float(line_split[1])
flux_syn[i] = np.float(line_split[2])
weight[i] = np.float(line_split[3])
return Table([wave, flux_obs, flux_syn, weight])
| 5,340,880
|
def print_buttons(ids):
"""ids - [(int, str), ...]
"""
for ID,name in ids[::-1]:
print (f'<button id="button{ID}">')
print (name)
print ('</button>')
| 5,340,881
|
def get_stations_trips(station_id):
"""
https://api.rasp.yandex.net/v1.0/schedule/ ?
apikey=<ключ>
& format=<формат>
& station=<код станции>
& lang=<язык>
& [date=<дата>]
& [transport_types=<тип транспорта>]
& [system=<текущая система кодирования>]
& [show_systems=<коды в ответе>]
"""
params = {
'apikey': RASP_KEY,
'format': 'json',
'station': station_id,
'lang': 'ua',
'transport_types': 'suburban'
}
url = 'https://api.rasp.yandex.net/v1.0/schedule/'
return get_json(url, params)
| 5,340,882
|
def createDir(dirPath):
"""
Creates a directory if it does not exist.
:type dirPath: string
:param dirPath: the path of the directory to be created.
"""
try:
if os.path.dirname(dirPath) != "":
os.makedirs(os.path.dirname(dirPath), exist_ok=True) # Python 3.2+
except TypeError:
try: # Python 3.2-
if os.path.dirname(dirPath) != "":
os.makedirs(os.path.dirname(dirPath))
except OSError as exception:
if exception.errno != 17:
raise
| 5,340,883
|
def _compute_pairwise_kpt_distance(a, b):
"""
Args:
a, b (poses): Two sets of poses to match
Each "poses" is represented as a list of 3x17 or 4x17 np.ndarray
"""
res = np.zeros((len(a), len(b)))
for i in range(len(a)):
for j in range(len(b)):
res[i, j] = pck_distance(a[i], b[j])
return res
| 5,340,884
|
def _extract_dialog_node_name(dialog_nodes):
"""
For each dialog_node (node_id) of type *standard*, check if *title exists*.
If exists, use the title for the node_name. otherwise, use the dialog_node
For all other cases, use the dialog_node
dialog_node: (dialog_node_title, dialog_node_type)
In the case of Login Issues,
"title": "Login Issue",
"dialog_node": "Login Issues",
the record will be created as:
"Login Issues": ("Login Issue", "standard")
"""
nodes_dict = {}
nodes_type = {}
for obj in dialog_nodes:
if (obj['type']=='standard') and ('title' in obj):
if (obj['title'] is not None):
nodes_dict[obj['dialog_node']] = (obj['title'],obj['type'])
else:
nodes_dict[obj['dialog_node']] = (obj['dialog_node'],obj['type'])
else:
nodes_dict[obj['dialog_node']] = (obj['dialog_node'],obj['type'])
return nodes_dict
| 5,340,885
|
def _get_content(tax_id):
"""Get Kazusa content, either from cached file or remotely."""
target_file = os.path.join(DATA_DIR, "%s.txt" % tax_id)
if not os.path.exists(target_file):
url = (
"http://www.kazusa.or.jp/codon/cgi-bin/showcodon.cgi?"
+ "aa=1&style=N&species=%s" % tax_id
)
urlretrieve(url, target_file)
with open(target_file) as fle:
return fle.read()
| 5,340,886
|
def create_supervisor_config_file(
site_dir_name, wwwhisper_path, site_config_path, supervisor_config_path):
"""Creates site-specific supervisor config file.
The file allows to start the wwwhisper application for the site.
"""
settings = """[program:wwwhisper-%s]
command=%s/run_wwwhisper_for_site.sh -d %s
user=%s
group=%s
autorestart=true
stopwaitsecs=2
stopsignal=INT
stopasgroup=true
""" % (site_dir_name, wwwhisper_path, site_config_path, WWWHISPER_USER,
WWWHISPER_GROUP)
write_to_file(
supervisor_config_path, SUPERVISOR_CONFIG_FILE, settings)
| 5,340,887
|
def search_all_entities(bsp, **search: Dict[str, str]) -> Dict[str, List[Dict[str, str]]]:
"""search_all_entities(key="value") -> {"LUMP": [{"key": "value", ...}]}"""
out = dict()
for LUMP_name in ("ENTITIES", *(f"ENTITIES_{s}" for s in ("env", "fx", "script", "snd", "spawn"))):
entity_lump = getattr(bsp, LUMP_name, shared.Entities(b""))
results = entity_lump.search(**search)
if len(results) != 0:
out[LUMP_name] = results
return out
| 5,340,888
|
def pull_verbose(docker_client, repository, tag=None, log=True):
"""
Use low-level docker-py API to show status while pulling docker containers.
Attempts to replicate docker command line output
log - if True, logs the output, if False, prints the output
"""
for update in docker_client.api.pull(repository, tag=tag, stream=True, decode=True):
tokens = []
for key in ("id", "status", "progress"):
value = update.get(key)
if value is not None:
tokens.append(value)
output = ": ".join(tokens)
if log:
logger.info(output)
else:
print(output)
| 5,340,889
|
def expose_all(root_module: types.ModuleType, container_type: type):
"""
exposes all sub-modules and namespaces to be available under given container type (class)
Args:
root_module (types.ModuleType): module
ns_type (type): namepace type (class)
"""
for path in root_module.__path__:
for name in os.listdir(path):
if not os.path.isdir(os.path.join(path, name)) or name == "__pycache__" or name.startswith("_"):
continue
lazy_import_property = get_lazy_import_property(name, root_module, container_type)
setattr(container_type, name, lazy_import_property)
| 5,340,890
|
def is_probably_beginning_of_sentence(line):
"""Return True if this line begins a new sentence."""
# Check heuristically for a parameter list.
for token in ['@', '-', r'\*']:
if re.search(r'\s' + token + r'\s', line):
return True
stripped_line = line.strip()
is_beginning_of_sentence = re.match(r'[^\w"\'`\(\)]', stripped_line)
is_pydoc_ref = re.match(r'^:\w+:', stripped_line)
return is_beginning_of_sentence and not is_pydoc_ref
| 5,340,891
|
def default_attack_handler(deck, discard, hand, turn, supply, attack):
"""Handle some basic attacks in a default manner. Returns True iff the
attack was handled."""
covertool.cover("domsim.py:219")
if attack == COUNCIL_ROOM:
# Not really an attack, but this is an easy way to handle it.
covertool.cover("domsim.py:221")
hand += draw(deck, discard, 1)
covertool.cover("domsim.py:222")
return True
elif MOAT in hand:
covertool.cover("domsim.py:224")
return True
elif attack == MINION and len(hand) > 4:
covertool.cover("domsim.py:226")
discard += hand
covertool.cover("domsim.py:227")
hand[:] = draw(deck, discard, 4)
covertool.cover("domsim.py:228")
return True
elif attack == WITCH:
covertool.cover("domsim.py:230")
gain(CURSE, supply, discard)
covertool.cover("domsim.py:231")
return True
elif attack == SEA_HAG:
covertool.cover("domsim.py:233")
discard += draw(deck, discard, 1)
covertool.cover("domsim.py:234")
gain(CURSE, supply, deck)
covertool.cover("domsim.py:235")
return True
else:
covertool.cover("domsim.py:237")
return False
| 5,340,892
|
def solve_word_jumble(word_perms):
"""Solve a word jumble by unscrambling four jumbles, then a final jumble.
Parameters:
- words: list of strings, each is the scrambled letters for a single word
- circles: list of strings, each marks whether the letter at that position
in the solved anagram word will be used to solve the final jumble.
This string contains only two different characters:
1. O (letter "oh") = the letter is in the final jumble
2. _ (underscore) = the letter is not in the final jumble
- final: list of strings in the same format as circles parameter that shows
how the final jumble's letters are arranged into a word or phrase."""
# Get all English words in the built-in dictionary
all_words = get_file_lines()
| 5,340,893
|
def deprecate(remove_in, use_instead, module_name=None, name=None):
"""
Decorator that marks a function or class as deprecated.
When the function or class is used, a warning will be issued.
Args:
remove_in (str):
The version in which the decorated type will be removed.
use_instead (str):
The function or class to use instead.
module_name (str):
The name of the containing module. This will be used to
generate more informative warnings.
Defaults to None.
name (str):
The name of the object being deprecated.
If not provided, this is automatically determined based on the decorated type.
Defaults to None.
"""
def deprecate_impl(obj):
if config.INTERNAL_CORRECTNESS_CHECKS and version(polygraphy.__version__) >= version(remove_in):
G_LOGGER.internal_error("{:} should have been removed in version: {:}".format(obj, remove_in))
nonlocal name
name = name or obj.__name__
if inspect.ismodule(obj):
class DeprecatedModule(object):
def __getattr__(self, attr_name):
warn_deprecated(name, use_instead, remove_in, module_name)
self = obj
return getattr(self, attr_name)
def __setattr__(self, attr_name, value):
warn_deprecated(name, use_instead, remove_in, module_name)
self = obj
return setattr(self, attr_name, value)
DeprecatedModule.__doc__ = "Deprecated: Use {:} instead".format(use_instead)
return DeprecatedModule()
elif inspect.isclass(obj):
class Deprecated(obj):
def __init__(self, *args, **kwargs):
warn_deprecated(name, use_instead, remove_in, module_name)
super().__init__(*args, **kwargs)
Deprecated.__doc__ = "Deprecated: Use {:} instead".format(use_instead)
return Deprecated
elif inspect.isfunction(obj):
def wrapped(*args, **kwargs):
warn_deprecated(name, use_instead, remove_in, module_name)
return obj(*args, **kwargs)
wrapped.__doc__ = "Deprecated: Use {:} instead".format(use_instead)
return wrapped
else:
G_LOGGER.internal_error("deprecate is not implemented for: {:}".format(obj))
return deprecate_impl
| 5,340,894
|
def get_available_processors():
"""Return the list of available processors modules."""
modules = [item.replace('.py', '')
for item in os.listdir(PROCESSORS_DIR)
if isfile(join(PROCESSORS_DIR, item))]
return modules
| 5,340,895
|
def append_after(filename="", search_string="", new_string=""):
"""
Inserts a line of text to a file, after
each line containing a specific string.
"""
out = ""
with open(filename, 'r') as f:
for line in f:
out += line
if search_string in line:
out += new_string
with open(filename, 'w') as f:
f.write(out)
| 5,340,896
|
def hello_world():
"""return bool if exists -> take in email"""
email = request.json['email']
c = conn.cursor()
c.execute("select * from Users where Users.email = {}".format(email))
result = False
conn.commit()
conn.close()
return result
| 5,340,897
|
def get_model_relations(
model: Callable,
model_args: Optional[tuple] = None,
model_kwargs: Optional[dict] = None,
):
"""
Infer relations of RVs and plates from given model and optionally data.
See https://github.com/pyro-ppl/pyro/issues/949 for more details.
This returns a dictionary with keys:
- "sample_sample" map each downstream sample site to a list of the upstream
sample sites on which it depend;
- "sample_dist" maps each sample site to the name of the distribution at
that site;
- "plate_sample" maps each plate name to a list of the sample sites within
that plate; and
- "observe" is a list of observed sample sites.
For example for the model::
def model(data):
m = pyro.sample('m', dist.Normal(0, 1))
sd = pyro.sample('sd', dist.LogNormal(m, 1))
with pyro.plate('N', len(data)):
pyro.sample('obs', dist.Normal(m, sd), obs=data)
the relation is::
{'sample_sample': {'m': [], 'sd': ['m'], 'obs': ['m', 'sd']},
'sample_dist': {'m': 'Normal', 'sd': 'LogNormal', 'obs': 'Normal'},
'plate_sample': {'N': ['obs']},
'observed': ['obs']}
:param callable model: A model to inspect.
:param model_args: Optional tuple of model args.
:param model_kwargs: Optional dict of model kwargs.
:rtype: dict
"""
if model_args is None:
model_args = ()
if model_kwargs is None:
model_kwargs = {}
with torch.random.fork_rng(), torch.no_grad(), pyro.validation_enabled(False):
with TrackProvenance():
trace = poutine.trace(model).get_trace(*model_args, **model_kwargs)
sample_sample = {}
sample_dist = {}
plate_sample = defaultdict(list)
observed = []
for name, site in trace.nodes.items():
if site["type"] != "sample" or site_is_subsample(site):
continue
sample_sample[name] = [
upstream
for upstream in get_provenance(site["fn"].log_prob(site["value"]))
if upstream != name
]
sample_dist[name] = _get_dist_name(site["fn"])
for frame in site["cond_indep_stack"]:
plate_sample[frame.name].append(name)
if site["is_observed"]:
observed.append(name)
def _resolve_plate_samples(plate_samples):
for p, pv in plate_samples.items():
pv = set(pv)
for q, qv in plate_samples.items():
qv = set(qv)
if len(pv & qv) > 0 and len(pv - qv) > 0 and len(qv - pv) > 0:
plate_samples_ = plate_samples.copy()
plate_samples_[q] = pv & qv
plate_samples_[q + "__CLONE"] = qv - pv
return _resolve_plate_samples(plate_samples_)
return plate_samples
plate_sample = _resolve_plate_samples(plate_sample)
# convert set to list to keep order of variables
plate_sample = {
k: [name for name in trace.nodes if name in v] for k, v in plate_sample.items()
}
return {
"sample_sample": sample_sample,
"sample_dist": sample_dist,
"plate_sample": dict(plate_sample),
"observed": observed,
}
| 5,340,898
|
def progress_enabled():
"""
Checks if progress is enabled. To disable:
export O4_PROGRESS=false
"""
return os.environ.get('O4_PROGRESS', 'true') == 'true'
| 5,340,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.