content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def batchify_rays(rays_flat, chunk=1024*32, random_directions=None, background_color=None, **kwargs):
"""Render rays in smaller minibatches to avoid OOM.
"""
all_ret = {}
for i in range(0, rays_flat.shape[0], chunk):
ret = render_rays(rays_flat[i:i+chunk], random_directions=random_directions, background_color=background_color, **kwargs)
if random_directions is not None:
ret, mean_regularization_term = ret
for k in ret:
if k not in all_ret:
all_ret[k] = []
all_ret[k].append(ret[k])
all_ret = {k : torch.cat(all_ret[k], 0) for k in all_ret}
if random_directions is None:
return all_ret
else:
return all_ret, mean_regularization_term
| 5,337,900
|
def extract_surfaces(pvol):
""" Extracts surfaces from a volume.
:param pvol: input volume
:type pvol: abstract.Volume
:return: extracted surface
:rtype: dict
"""
if not isinstance(pvol, BSpline.abstract.Volume):
raise TypeError("The input should be an instance of abstract.Volume")
# Get data from the volume object
vol_data = pvol.data
rational = vol_data['rational']
degree_u = vol_data['degree'][0]
degree_v = vol_data['degree'][1]
degree_w = vol_data['degree'][2]
kv_u = vol_data['knotvector'][0]
kv_v = vol_data['knotvector'][1]
kv_w = vol_data['knotvector'][2]
size_u = vol_data['size'][0]
size_v = vol_data['size'][1]
size_w = vol_data['size'][2]
cpts = vol_data['control_points']
# Determine object type
obj = NURBS.Surface if rational else BSpline.Surface
# u-v plane
surflist_uv = []
for w in range(size_w):
surf = obj()
surf.degree_u = degree_u
surf.degree_v = degree_v
surf.ctrlpts_size_u = size_u
surf.ctrlpts_size_v = size_v
surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for v in range(size_v)] for u in range(size_u)]
surf.knotvector_u = kv_u
surf.knotvector_v = kv_v
surflist_uv.append(surf)
# u-w plane
surflist_uw = []
for v in range(size_v):
surf = obj()
surf.degree_u = degree_u
surf.degree_v = degree_w
surf.ctrlpts_size_u = size_u
surf.ctrlpts_size_v = size_w
surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for w in range(size_w)] for u in range(size_u)]
surf.knotvector_u = kv_u
surf.knotvector_v = kv_w
surflist_uw.append(surf)
# v-w plane
surflist_vw = []
for u in range(size_u):
surf = obj()
surf.degree_u = degree_v
surf.degree_v = degree_w
surf.ctrlpts_size_u = size_v
surf.ctrlpts_size_v = size_w
surf.ctrlpts2d = [[cpts[v + (size_v * (u + (size_u * w)))] for w in range(size_w)] for v in range(size_v)]
surf.knotvector_u = kv_v
surf.knotvector_v = kv_w
surflist_vw.append(surf)
# Return shapes as a dict object
return dict(uv=surflist_uv, uw=surflist_uw, vw=surflist_vw)
| 5,337,901
|
def read_file(filepath: str, config: Config = DEFAULT_CONFIG) -> pd.DataFrame:
"""
Read .csv, .xlsx, .xls to pandas dataframe. Read only a certain sheet name and skip
to header row using sheet_name and header_index.
:filepath: path to file (str)
:config: dtype.Config
Returns pd.DataFrame
"""
filename = os.path.basename(filepath).lower()
excel_header_row = utils._or(config.excel_header_row, config.header_row)
csv_header_row = utils._or(config.csv_header_row, config.header_row)
if filename == "pdappend.csv":
logging.warning("Cannot read reserved result filename (pdappend.csv)")
return pd.DataFrame()
if not is_filetype(filename):
raise ValueError(f"file {filename} is not .csv, .xslx, or .xls")
if ".xls" in filename:
return pd.read_excel(
filepath,
sheet_name=config.sheet_name,
skiprows=list(range(0, int(excel_header_row))),
)
if filename.endswith(".csv"):
return pd.read_csv(filepath, skiprows=list(range(0, int(csv_header_row))))
| 5,337,902
|
async def alterar_nome(usuario: Usuario, nome: str) -> None:
"""
Altera o nome do usuário que já está conectado.
:param usuario: O usuário que receberá o nome.
:param nome: O nome que deve ser atribuído ao usuário.
"""
nome_antigo = usuario.nome
if await usuario.atribuir_nome(nome):
await usuario.msg.enviar_para_todos(f'*. Usuário {nome_antigo} é conhecido como {usuario.nome}')
| 5,337,903
|
def euler2quaternion( euler_angs ):
"""
Description
-----------
This code is directly from the following reference
[REF] https://computergraphics.stackexchange.com/questions/8195/how-to-convert-euler-angles-to-quaternions-and-get-the-same-euler-angles-back-fr
Converting a R4 quaternion vector (w, x, y, z) to Euler Angle (Roll, Pitch, Yaw)
Arguments
---------
[NAME] [TYPE] [DESCRIPTION]
(1) yaw, pitch, roll The euler angles of the given quaternion vector.
[OUTPUTS]
-----------
[NAME] [TYPE] [DESCRIPTION]
(1) quatVec List The quaternion vector, ordered in w, x, y and z
"""
yaw, pitch, roll = euler_angs[ : ]
cy = np.cos( yaw * 0.5 )
sy = np.sin( yaw * 0.5 )
cp = np.cos( pitch * 0.5 )
sp = np.sin( pitch * 0.5 )
cr = np.cos( roll * 0.5 )
sr = np.sin( roll * 0.5 )
w = cr * cp * cy + sr * sp * sy;
x = sr * cp * cy - cr * sp * sy;
y = cr * sp * cy + sr * cp * sy;
z = cr * cp * sy - sr * sp * cy;
return w,x,y,z
| 5,337,904
|
def run_server(server, thread=False, port=8080):
"""
Runs the server.
@param server if None, it becomes ``HTTPServer(('localhost', 8080), SimpleHandler)``
@param thread if True, the server is run in a thread
and the function returns right away,
otherwite, it runs the server.
@param port port to use
@return server if thread is False, the thread otherwise (the thread is started)
@warning If you kill the python program while the thread is still running, python interpreter might be closed completely.
"""
if server is None:
server = HTTPServer(('localhost', port), SimpleHandler)
if thread:
th = ThreadServer(server)
th.start()
return th
else:
server.serve_forever()
return server
| 5,337,905
|
def validate(aLine):
"""
>>> validate(b"$GPGSA,A,2,29,19,28,,,,,,,,,,23.4,12.1,20.0*0F")
[b'GPGSA', b'A', b'2', b'29', b'19', b'28', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'23.4', b'12.1', b'20.0']
>>> validate(b"$GPGSA,A,2,29,19,28,,,,,,,,,,23.4,") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/doctest.py", line 1330, in __run
compileflags, 1), test.globs)
File "<doctest __main__.validate[1]>", line 1, in <module>
validate(b"$GPGSA,A,2,29,19,28,,,,,,,,,,23.4,")
File "/Users/slott/Documents/Projects/NMEA-Tools/nmea_checksum.py", line 23, in validate
assert sentence[0] in b'$!', "Unexpected {} not in ({}, {})".format(sentence[0], b'$', b'!')
IndexError: index out of range
>>> validate(b"29,19,28,,,,,,,,,,23.4,12.1,20.0*0F") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/doctest.py", line 1330, in __run
compileflags, 1), test.globs)
File "<doctest __main__.validate[2]>", line 1, in <module>
validate(b"29,19,28,,,,,,,,,,23.4,12.1,20.0*0F") # doctest: +IGNORE_EXCEPTION_DETAIL
File "/Users/slott/Documents/Projects/NMEA-Tools/nmea_checksum.py", line 32, in validate
assert sentence[0] in b'$!', "Unexpected {} not in ({}, {})".format(sentence[0], b'$', b'!')
AssertionError: Unexpected 50 not in (b'$', b'!')
>>> validate(b'$GPGLL,2542.9243,N,08013.6310,W,162823.000,A*29')
[b'GPGLL', b'2542.9243', b'N', b'08013.6310', b'W', b'162823.000', b'A']
"""
sentence, star, checksum = aLine.rpartition(b'*')
assert sentence[0] in b'$!', f"Unexpected {sentence[0]} not in b'$!'
if star == b'*':
cs = reduce(xor, sentence[1:])
assert int(checksum, 16) == cs
return sentence[1:].split(b',')
| 5,337,906
|
def matlabize(s):
"""Make string s suitable for use as a MATLAB function/script name"""
s = s.replace(' ', '_')
s = s.replace('.', '_')
s = s.replace('-', '_')
assert len(s) <= 63 # MATLAB function/script name length limitation
return s
| 5,337,907
|
def getApiResults(case, installer, version, criteria):
"""
Get Results by calling the API
criteria is to consider N last results for the case success criteria
"""
results = json.dumps([])
# to remove proxy (to be removed at the end for local test only)
# proxy_handler = urllib2.ProxyHandler({})
# opener = urllib2.build_opener(proxy_handler)
# urllib2.install_opener(opener)
# url = "http://127.0.0.1:8000/results?case=" + case + \
# "&period=30&installer=" + installer
period = get_config('general.period')
url_base = get_config('testapi.url')
nb_tests = get_config('general.nb_iteration_tests_success_criteria')
url = (url_base + "?case=" + case +
"&period=" + str(period) + "&installer=" + installer +
"&version=" + version)
if criteria:
url += "&last=" + str(nb_tests)
proxy = get_config('general.proxy')
response = requests.get(url, proxies=proxy)
try:
results = json.loads(response.content)
except Exception: # pylint: disable=broad-except
print "Error when retrieving results form API"
return results
| 5,337,908
|
def generate_noisy_gaussian(center, std_dev, height, x_domain, noise_domain,
n_datapoints):
"""
Generate a gaussian with some aspect of noise.
Input:
center = central x value
std_dev = standard deviation of the function
height = height (y-off set) of the function
noise_range = uniform random distribution of noise from perfect gauss function
x_range = absolute domain of the gaussian function
n_datapoints = total number of input datapoints of gaussian function
Output: x_values,y_values
x_values = the x-axial array of the gaussian function within the domain
y_values = the y-axial array of the gaussian function within the domain
"""
# Type check.
center = valid.validate_float_value(center)
std_dev = valid.validate_float_value(std_dev, greater_than=0)
height = valid.validate_float_value(height)
x_domain = valid.validate_float_array(x_domain, shape=(2,), size=2)
noise_domain = valid.validate_float_array(noise_domain, shape=(2,), size=2)
n_datapoints = valid.validate_int_value(n_datapoints, greater_than=0)
# Generate the gaussian function and map to an output with the input
# parameters.
x_values, y_values = generate_gaussian(center, std_dev, height,
x_domain=x_domain,
n_datapoints=n_datapoints)
# Imbue the gaussian with random noise.
y_values = misc.generate_noise(y_values, noise_domain,
distribution='uniform')
return x_values, y_values
| 5,337,909
|
def run_corrsearch3d(
path_to_original: Path,
path_to_new: Path,
tomogram_trimming: float
):
"""Reads mrc files size, sets patch size, numbers, boundaries, and runs IMODs corrsearch3d function
"""
#Read mrc files
mrc_original = mrcfile.open(path_to_original)
mrc_new = mrcfile.open(path_to_new)
#Extract dimensions
ori_shape = mrc_original.data.shape
new_shape = mrc_new.data.shape
#If dimensions don't match, bail
if ori_shape != new_shape:
print(f"The dimensions of matched original tomogram {path_to_original.name} and new tomogram {path_to_new} do not match. Bailing...")
sys.exit()
#Set XYZ dimensions
x_dim = ori_shape[2]
y_dim = ori_shape[1]
z_dim = ori_shape[0]
#Set XYZ min and max by applying trimming
actual_trim = (tomogram_trimming / 2) / 100
x_min = 0 + (x_dim * actual_trim)
x_max = x_dim - ((x_dim * actual_trim))
y_min = 0 + (y_dim * actual_trim)
y_max = y_dim - ((y_dim * actual_trim))
z_min = 0 + (z_dim * actual_trim)
z_max = z_dim - ((z_dim * actual_trim))
#Other parameters, set for the user automatically
largest_dimension = {'x_dim': x_dim,'y_dim': y_dim,'z_dim':z_dim}
result = (max(largest_dimension,key=largest_dimension.get))
largest_dimension = largest_dimension[result]
patch_size = largest_dimension / 10
max_shift = largest_dimension / 5
number_patches = 5
#Run command
os.system(f"corrsearch3d -ref {str(path_to_original)} -align {str(path_to_new)} -maxshift {int(max_shift)} -size {int(patch_size)},{int(patch_size)},{int(patch_size)} -number {int(number_patches)},{int(number_patches)},{int(number_patches)} -xminmax {int(x_min)},{int(x_max)} -yminmax {int(y_min)},{int(y_max)} -zminmax {int(z_min)},{int(z_max)} -output {path_to_original.stem}.txt")
| 5,337,910
|
def filter_group_delay(
sos_or_fir_coef: np.ndarray,
N: int = 2048,
fs: float = None,
sos: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Given filter spec in second order sections or (num, den) form, return group delay.
Uses method in [1], which is cited by `scipy.signal.group_delay` but incorrectly implemented.
Inputs:
- sos_or_fir_coef: np.ndarray, second order section of iir filter or fir_coef of a
FIR filter.
- N: int, number of samples to calculate for the impulse and frequency response
- fs: float, sampling rate in Hz. If not None, will return the frequency in Hz,
otherwise normalized frequency will be returned.
- sos: bool. If true, assume `sos_or_fir_coef` is sos, otherwise as fir_coef
Output:
- frequency: np.ndarray, frequency of the frequency response. If fs is None,
unit will be in radians/sample (ranging from 0 to np.pi),
otherwise will be in Hz (ranging from 0 to fs / 2).
- group_delay: np.ndarray, group delay of filter as function of frequency, unit
is in samples.
[1] Richard G. Lyons, "Understanding Digital Signal Processing, 3rd edition", p. 830.
"""
impulse_response = filter_impulse_response(sos_or_fir_coef, N, sos=sos)
k = np.arange(N)
fft_gd = np.real(fft(k * impulse_response) / fft(impulse_response))[0 : N // 2]
omega = (fftfreq(N) * 2 * np.pi)[0 : N // 2]
if fs is not None:
freq = omega_to_f(omega, fs)[0 : N // 2]
return freq, fft_gd
else:
return omega, fft_gd
| 5,337,911
|
def plotmodel(axs, m, x, z, vmin, vmax,
params=('VP', 'VS', 'Rho'),
cmap='gist_rainbow', title=None):
"""Quick visualization of model
"""
for ip, param in enumerate(params):
axs[ip].imshow(m[:, ip],
extent=(x[0], x[-1], z[-1], z[0]),
vmin=vmin, vmax=vmax, cmap=cmap)
axs[ip].set_title('%s - %s' %(param, title))
axs[ip].axis('tight')
plt.setp(axs[1].get_yticklabels(), visible=False)
plt.setp(axs[2].get_yticklabels(), visible=False)
| 5,337,912
|
def create_custom_job(
type,
gcp_project,
gcp_region,
payload,
gcp_resources,
):
"""
Create and poll custom job status till it reaches a final state.
This follows the typical launching logic
1. Read if the custom job already exists in gcp_resources
- If already exists, jump to step 3 and poll the job status. This happens if the
launcher container experienced unexpected termination, such as preemption
2. Deserialize the payload into the job spec and create the custom job.
3. Poll the custom job status every _POLLING_INTERVAL_IN_SECONDS seconds
- If the custom job is succeeded, return succeeded
- If the custom job is cancelled/paused, it's an unexpected scenario so return failed
- If the custom job is running, continue polling the status
Also retry on ConnectionError up to _CONNECTION_ERROR_RETRY_LIMIT times during the poll.
"""
client_options = {"api_endpoint": gcp_region + '-aiplatform.googleapis.com'}
client_info = gapic_v1.client_info.ClientInfo(
user_agent="google-cloud-pipeline-components",
)
# Initialize client that will be used to create and send requests.
job_client = aiplatform.gapic.JobServiceClient(
client_options=client_options,
client_info=client_info
)
# Check if the Custom job already exists
if path.exists(gcp_resources) and os.stat(gcp_resources).st_size != 0:
with open(gcp_resources) as f:
custom_job_name = f.read()
logging.info(
'CustomJob name already exists: %s. Continue polling the status',
custom_job_name
)
else:
parent = f"projects/{gcp_project}/locations/{gcp_region}"
job_spec = json.loads(payload, strict=False)
create_custom_job_response = job_client.create_custom_job(
parent=parent, custom_job=job_spec
)
custom_job_name = create_custom_job_response.name
# Write the job id to output
with open(gcp_resources, 'w') as f:
f.write(custom_job_name)
# Poll the job status
get_custom_job_response = job_client.get_custom_job(name=custom_job_name)
retry_count = 0
while get_custom_job_response.state not in _JOB_COMPLETE_STATES:
time.sleep(_POLLING_INTERVAL_IN_SECONDS)
try:
get_custom_job_response = job_client.get_custom_job(
name=custom_job_name
)
logging.info(
'GetCustomJob response state =%s', get_custom_job_response.state
)
retry_count = 0
# Handle transient connection error.
except ConnectionError as err:
retry_count += 1
if retry_count < _CONNECTION_ERROR_RETRY_LIMIT:
logging.warning(
'ConnectionError (%s) encountered when polling job: %s. Trying to '
'recreate the API client.', err, custom_job_name
)
# Recreate the Python API client.
job_client = aiplatform.gapic.JobServiceClient(
client_options=client_options
)
else:
logging.error(
'Request failed after %s retries.',
_CONNECTION_ERROR_RETRY_LIMIT
)
raise
if get_custom_job_response.state in _JOB_ERROR_STATES:
raise RuntimeError(
"Job failed with:\n%s" % get_custom_job_response.state
)
else:
logging.info(
'CustomJob %s completed with response state =%s', custom_job_name,
get_custom_job_response.state
)
| 5,337,913
|
def query():
"""Perform a query on the dataset, where the search terms are given by the saleterm parameter"""
# If redis hasn't been populated, stick some tweet data into it.
if redis_db.get("tweet_db_status") != "loaded":
tweet_scraper.add_tweets(default_num_tweets_to_try)
sale_term = request.form['saleterm']
subterms = re.split('\W+', sale_term)
saleterm_keys = ['saleterm-{}'.format(w) for w in subterms if len(w) > 1]
result_dict = {}
num_tweets = 0
if saleterm_keys:
common_tweet_ids = redis_db.sinter(saleterm_keys)
if common_tweet_ids:
result_dict['tweets'] = [redis_db.hgetall(tweet_id) for tweet_id in common_tweet_ids]
num_tweets = len(common_tweet_ids)
result_dict['num_tweets'] = num_tweets
result_dict['saleterm'] = sale_term
resp = jsonify(result_dict)
resp.status_code = 200
return resp
| 5,337,914
|
def _set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
logger.info(f'Training seed was set to {seed} w/ deterministic={deterministic}.')
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| 5,337,915
|
def get_spec_res(z=2.2, spec_res=2.06, pix_size=1.8):
""" Calculates the pixel size (pix_size) and spectral resolution (spec_res) in
km/s for the MOCK SPECTRA.
arguments: z, redshift. spec_res, spectral resoloution in Angst. pixel_size
in sngst.
returns:
(pixel_size, spec_res) in km/s
"""
# conversion factor from Angstrom to km/s at any redshift
conv_fac = 3e5*0.000823/(1+z)
return(pix_size*conv_fac, spec_res*conv_fac)
| 5,337,916
|
def _process_general_config(config: ConfigType) -> ConfigType:
"""Process the `general` section of the config
Args:
config (ConfigType): Config object
Returns:
[ConfigType]: Processed config
"""
general_config = deepcopy(config.general)
general_config.id = general_config.id.replace("/", "_")
if not general_config.commit_id:
general_config.commit_id = utils.get_current_commit_id()
if not general_config.date:
general_config.date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
slurm_id = []
env_var_names = ["SLURM_JOB_ID", "SLURM_STEP_ID"]
for var_name in env_var_names:
if var_name in os.environ:
slurm_id.append(str(os.environ[var_name]))
if slurm_id:
general_config.slurm_id = "-".join(slurm_id)
config.general = general_config
return config
| 5,337,917
|
def api_response(response):
"""Response generation for ReST API calls"""
# Errors present
if response.message:
messages = response.message
if not isinstance(messages, list):
messages = [messages]
# Report the errors
return Response({'errors': messages}, status=status.HTTP_400_BAD_REQUEST)
# All OK
return Response(response.data) if not isinstance(response.data, HttpResponseBase) else response.data
| 5,337,918
|
def batch_norm(name, inpvar, decay=0.9, epsilon=1e-5, use_affine=True, param_dtype=__default_dtype__):
"""
Batch normalization.
:param name: operator name
:param inpvar: input tensor, of data type NHWC
:param decay: decay for moving average
:param epsilon: epsilon
:param use_affine: add affine transformation after the normalization (to preserve the bias and scale)
:param param_dtype: param dtype
:return: output tensor
"""
from tensorflow.python.training import moving_averages
assign_moving_average = moving_averages.assign_moving_average
inpvar = as_varnode(inpvar)
shape = inpvar.static_shape
assert len(shape) in [2, 4]
nr_channels = shape[-1]
if len(shape) == 2:
inpvar = inpvar.reshape(-1, 1, 1, nr_channels)
if use_affine:
beta = O.variable('beta', tf.constant_initializer(), shape=[nr_channels], dtype=param_dtype)
gamma = O.variable('gamma', tf.constant_initializer(1.0), shape=[nr_channels], dtype=param_dtype)
else:
beta = O.zeros([nr_channels], name='beta')
gamma = O.ones([nr_channels], name='gamma')
moving_mean = O.variable('mean/ema', tf.constant_initializer(), shape=[nr_channels], trainable=False)
moving_var = O.variable('variance/ema', tf.constant_initializer(1.0), shape=[nr_channels], trainable=False)
env = get_default_env()
if env.flags.compute_update_batch_normalization(name):
xn, batch_mean, batch_var = tf.nn.fused_batch_norm(inpvar, gamma, beta, epsilon=epsilon, is_training=True, name='bn')
else:
xn = tf.nn.batch_normalization(inpvar, moving_mean, moving_var, beta, gamma, variance_epsilon=epsilon, name='bn')
if len(shape) == 2:
xn = O.remove_axis(xn, [1, 2])
if env.flags.compute_update_batch_normalization(name) and \
(not env.has_current_dpc() or env.current_dpc.is_master_device):
update_mean_op = assign_moving_average(moving_mean.impl, batch_mean, decay, zero_debias=False, name='mean_ema_op')
update_var_op = assign_moving_average(moving_var.impl, batch_var, decay, zero_debias=False, name='var_ema_op')
with tf.control_dependencies([update_mean_op, update_var_op]):
return tf.identity(xn, name='out')
else:
return tf.identity(xn, name='out')
| 5,337,919
|
def get_engine(onnx_file_path, engine_file_path="", input_shapes=((1, 3, 640, 640)), force_rebuild=False):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
assert len(input_shapes) in [1, 3], 'length of input_shapes should be 1 or 3, 3 for dynamic input size, got {}'.format(len(input_shapes))
def build_engine():
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER) as parser, builder.create_builder_config() as config:
builder.strict_type_constraints = True
# builder.max_workspace_size = 1 << 30 # deprecated, use config to set max_workspace_size
# builder.fp16_mode = True # deprecated, use config to set FP16 mode
# builder.max_batch_size = 1 # deprecated, use EXPLICIT_BATCH
config.set_flag(trt.BuilderFlag.FP16)
config.max_workspace_size=GiB(1)
# Parse model file
# Try to load a previously generated graph in ONNX format:
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please generate it first.'.format(
onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print('ERROR: Failed to parse the ONNX file.')
for error in range(parser.num_errors):
print(parser.get_error(error))
exit(0)
# Reference: https://blog.csdn.net/weixin_43953045/article/details/103937295
last_layer = network.get_layer(network.num_layers - 1)
if not last_layer.get_output(0):
network.mark_output(last_layer.get_output(0))
print("input shape {}".format(network.get_input(0).shape))
network.get_input(0).shape = [1, 3, -1, -1] if len(input_shapes) != 1 else input_shapes[0]
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
# ######################## SET DYNAMIC INPUT SHAPE #################################
if len(input_shapes) == 3:
profile = builder.create_optimization_profile()
profile.set_shape(network.get_input(0).name, *input_shapes)
config.add_optimization_profile(profile)
engine = builder.build_engine(network, config)
else:
engine = builder.build_cuda_engine(network)
# ########################################################
print("Completed creating Engine")
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
# return build_engine()
if os.path.exists(engine_file_path) and not force_rebuild:
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine()
| 5,337,920
|
def describe_adjustment_types():
"""
Describes the available adjustment types for Amazon EC2 Auto Scaling scaling policies. These settings apply to step scaling policies and simple scaling policies; they do not apply to target tracking scaling policies.
The following adjustment types are supported:
See also: AWS API Documentation
Exceptions
Examples
This example describes the available adjustment types.
Expected Output:
:example: response = client.describe_adjustment_types()
:rtype: dict
ReturnsResponse Syntax{
'AdjustmentTypes': [
{
'AdjustmentType': 'string'
},
]
}
Response Structure
(dict) --
AdjustmentTypes (list) --The policy adjustment types.
(dict) --Describes a policy adjustment type.
AdjustmentType (string) --The policy adjustment type. The valid values are ChangeInCapacity , ExactCapacity , and PercentChangeInCapacity .
Exceptions
AutoScaling.Client.exceptions.ResourceContentionFault
Examples
This example describes the available adjustment types.
response = client.describe_adjustment_types(
)
print(response)
Expected Output:
{
'AdjustmentTypes': [
{
'AdjustmentType': 'ChangeInCapacity',
},
{
'AdjustmentType': 'ExactCapcity',
},
{
'AdjustmentType': 'PercentChangeInCapacity',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'AdjustmentTypes': [
{
'AdjustmentType': 'string'
},
]
}
:returns:
AutoScaling.Client.exceptions.ResourceContentionFault
"""
pass
| 5,337,921
|
def test_empty_directories_non_current(empty_directory):
"""Find empty directory.
See https://docs.python.org/3/library/os.html#os.listdir
"""
for instance in empty_directory:
with mock.patch("os.walk", return_value=instance):
album_path = instance[0][0]
with mock.patch("os.listdir", return_value=[album_path]):
result = teeb.find.empty_directories(album_path)
assert result
| 5,337,922
|
def _window_when(closing_mapper: Callable[[], Observable]) -> Callable[[Observable], Observable]:
"""Projects each element of an observable sequence into zero or
more windows.
Args:
source: Source observable to project into windows.
Returns:
An observable sequence of windows.
"""
def window_when(source: Observable) -> Observable:
def subscribe(observer, scheduler=None):
m = SerialDisposable()
d = CompositeDisposable(m)
r = RefCountDisposable(d)
window = Subject()
observer.on_next(add_ref(window, r))
def on_next(value):
window.on_next(value)
def on_error(error):
window.on_error(error)
observer.on_error(error)
def on_completed():
window.on_completed()
observer.on_completed()
d.add(source.subscribe_(on_next, on_error, on_completed, scheduler))
def create_window_on_completed():
try:
window_close = closing_mapper()
except Exception as exception:
observer.on_error(exception)
return
def on_completed():
nonlocal window
window.on_completed()
window = Subject()
observer.on_next(add_ref(window, r))
create_window_on_completed()
m1 = SingleAssignmentDisposable()
m.disposable = m1
m1.disposable = window_close.pipe(ops.take(1)).subscribe_(noop, on_error, on_completed, scheduler)
create_window_on_completed()
return r
return Observable(subscribe)
return window_when
| 5,337,923
|
def generate_features(df):
"""Generate features for a stock/index based on historical price and performance
Args:
df(dataframe with columns "Open", "Close", "High", "Low", "Volume", "Adjusted Close")
Returns:
dataframe, data set with new features
"""
df_new = pd.DataFrame()
# 6 original features
df_new['open'] = df['Open']
df_new['open_1'] = df['Open'].shift(1)
# Shift index by 1, in order to take the value of previous day. For example, [1, 3, 4, 2] -> [N/A, 1, 3, 4]
df_new['close_1'] = df['Close'].shift(1)
df_new['high_1'] = df['High'].shift(1)
df_new['low_1'] = df['Low'].shift(1)
df_new['volume_1'] = df['Volume'].shift(1)
# 31 original features
# average price
df_new['avg_price_5'] = df['Close'].rolling(5).mean().shift(1)
df_new['avg_price_30'] = df['Close'].rolling(21).mean().shift(1)
df_new['avg_price_365'] = df['Close'].rolling(252).mean().shift(1)
df_new['ratio_avg_price_5_30'] = df_new['avg_price_5'] / df_new['avg_price_30']
df_new['ratio_avg_price_5_365'] = df_new['avg_price_5'] / df_new['avg_price_365']
df_new['ratio_avg_price_30_365'] = df_new['avg_price_30'] / df_new['avg_price_365']
# average volume
df_new['avg_volume_5'] = df['Volume'].rolling(5).mean().shift(1)
df_new['avg_volume_30'] = df['Volume'].rolling(21).mean().shift(1)
df_new['avg_volume_365'] = df['Volume'].rolling(252).mean().shift(1)
df_new['ratio_avg_volume_5_30'] = df_new['avg_volume_5'] / df_new['avg_volume_30']
df_new['ratio_avg_volume_5_365'] = df_new['avg_volume_5'] / df_new['avg_volume_365']
df_new['ratio_avg_volume_30_365'] = df_new['avg_volume_30'] / df_new['avg_volume_365']
# standard deviation of prices
df_new['std_price_5'] = df['Close'].rolling(5).std().shift(1)
# rolling_mean calculates the moving standard deviation given a window
df_new['std_price_30'] = df['Close'].rolling(21).std().shift(1)
df_new['std_price_365'] = df['Close'].rolling(252).std().shift(1)
df_new['ratio_std_price_5_30'] = df_new['std_price_5'] / df_new['std_price_30']
df_new['ratio_std_price_5_365'] = df_new['std_price_5'] / df_new['std_price_365']
df_new['ratio_std_price_30_365'] = df_new['std_price_30'] / df_new['std_price_365']
# standard deviation of volumes
df_new['std_volume_5'] = df['Volume'].rolling(5).std().shift(1)
df_new['std_volume_30'] = df['Volume'].rolling(21).std().shift(1)
df_new['std_volume_365'] = df['Volume'].rolling(252).std().shift(1)
df_new['ratio_std_volume_5_30'] = df_new['std_volume_5'] / df_new['std_volume_30']
df_new['ratio_std_volume_5_365'] = df_new['std_volume_5'] / df_new['std_volume_365']
df_new['ratio_std_volume_30_365'] = df_new['std_volume_30'] / df_new['std_volume_365']
# return
df_new['return_1'] = ((df['Close'] - df['Close'].shift(1)) / df['Close'].shift(1)).shift(1)
df_new['return_5'] = ((df['Close'] - df['Close'].shift(5)) / df['Close'].shift(5)).shift(1)
df_new['return_30'] = ((df['Close'] - df['Close'].shift(21)) / df['Close'].shift(21)).shift(1)
df_new['return_365'] = ((df['Close'] - df['Close'].shift(252)) / df['Close'].shift(252)).shift(1)
df_new['moving_avg_5'] = df_new['return_1'].rolling(5).mean()
df_new['moving_avg_30'] = df_new['return_1'].rolling(21).mean()
df_new['moving_avg_365'] = df_new['return_1'].rolling(252).mean()
# the target
df_new['close'] = df['Close']
df_new = df_new.dropna(axis=0)
# This will drop rows with any N/A value, which is by-product of moving average/std.
return df_new
| 5,337,924
|
def assert_equal(left, right):
"""compare two terms and display them when unequal
"""
try:
assert left == right
except AssertionError:
print(left)
print(' is not the same as')
print(right)
raise
| 5,337,925
|
def format_plate(barcode: str) -> Dict[str, Union[str, bool, Optional[int]]]:
"""Used by flask route /plates to format each plate. Determines whether there is sample data for the barcode and if
so, how many samples meet the fit to pick rules.
Arguments:
barcode (str): barcode of plate to get sample information for.
Returns:
Dict[str, Union[str, bool, Optional[int]]]: sample information for the plate barcode
"""
logger.info(f"Getting information for plate with barcode: {barcode}")
(
fit_to_pick_samples,
count_fit_to_pick_samples,
count_must_sequence,
count_preferentially_sequence,
count_filtered_positive,
) = get_fit_to_pick_samples_and_counts(barcode)
return {
"plate_barcode": barcode,
"has_plate_map": fit_to_pick_samples is not None and len(fit_to_pick_samples) > 0,
"count_fit_to_pick_samples": count_fit_to_pick_samples if count_fit_to_pick_samples is not None else 0,
"count_must_sequence": count_must_sequence if count_must_sequence is not None else 0,
"count_preferentially_sequence": count_preferentially_sequence
if count_preferentially_sequence is not None
else 0,
"count_filtered_positive": count_filtered_positive if count_filtered_positive is not None else 0,
}
| 5,337,926
|
def method_only_in(*states):
"""
Checks if function has a MethodMeta representation, calls wrap_method to
create one if it doesn't and then adds only_in to it from *states
Args:
*args(list): List of state names, like DefaultStateMachine.RESETTING
Returns:
function: Updated function
"""
def decorator(func):
if not hasattr(func, "MethodMeta"):
MethodMeta.wrap_method(func)
func.MethodMeta.only_in = states
return func
return decorator
| 5,337,927
|
def restart():
"""
Restart gunicorn worker processes for the project.
If the processes are not running, they will be started.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
run("supervisorctl restart gunicorn_%s" % env.proj_name)
else:
run("supervisorctl update")
| 5,337,928
|
def to_igraph(adjacency_matrix:Image, centroids:Image=None):
"""
Converts a given adjacency matrix to a iGraph [1] graph data structure.
Note: the given centroids typically have one entry less than the adjacency matrix is wide, because
those matrices contain a first row and column representing background. When exporting the networkx graph,
that first column will be ignored.
Parameters
----------
adjacency_matrix : Image
m*m touch-matrix, proximal-neighbor-matrix or n-nearest-neighbor-matrix
centroids : Image, optional
d*(m-1) matrix, position list of centroids
Returns
-------
iGraph graph
See Also
--------
..[1] https://igraph.org/
"""
try:
import igraph
except ImportError:
raise ImportError("igraph is not installed. Please refer to the documentation https://igraph.org/python/")
igraph_graph = igraph.Graph(adjacency_matrix.shape[0] - 1)
edge_list = np.nonzero(np.asarray(adjacency_matrix)[1:,1:])
igraph_graph.add_edges(np.asarray(edge_list).T)
if centroids is not None:
igraph_graph.vs['x'] = centroids[0]
igraph_graph.vs['y'] = centroids[1]
if centroids.shape[0] > 2: # 3D data
igraph_graph.vs['z'] = centroids[2]
return igraph_graph
| 5,337,929
|
def Laplacian(src, ddepth, dst=None, ksize=1, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT):
"""dst = cv.Laplacian( src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]] )
Executes the Laplacian operator on hardware if input parameters fit to hardware constraints.
Otherwise the OpenCV Laplacian function is called."""
if (ksize == 1 or ksize ==3 or ksize == 5) and (scale == 1) and (delta == 0) and (borderType == cv2.BORDER_DEFAULT) :
if (src.dtype == np.uint8) and (src.ndim == 2) :
if (src.shape[0] <= cv2pynq.MAX_HEIGHT) and (src.shape[0] > 0) and (src.shape[1] <= cv2pynq.MAX_WIDTH) and (src.shape[1] > 0) :
if (ddepth == -1) :
return c.Laplacian(src, ddepth, dst, ksize)
return cv2.Laplacian(src, ddepth, dst, ksize, scale, delta, borderType)
| 5,337,930
|
def get_docptr(n_dw_matrix):
"""
Parameters
----------
n_dw_matrix: array-like
Returns
-------
np.array
row indices for the provided matrix
"""
return _get_docptr(n_dw_matrix.shape[0], n_dw_matrix.indptr)
| 5,337,931
|
def check_if_shift_v0(data, column_name, start_index, end_index, check_period):
""" using median to see if it changes significantly in shift """
period_before = data[column_name][start_index - check_period: start_index]
period_in_the_middle = data[column_name][start_index:end_index]
period_after = data[column_name][end_index: end_index + check_period]
period_before_median = abs(np.nanmedian(period_before))
period_in_the_middle_median = abs(np.nanmedian(period_in_the_middle))
period_after_median = abs(np.nanmedian(period_after))
upper_threshold = period_in_the_middle_median * 2
down_threshold = period_in_the_middle_median / 2
if (upper_threshold < period_before_median and upper_threshold < period_after_median) or\
(down_threshold > period_before_median and down_threshold > period_after_median):
return True
else:
return False
| 5,337,932
|
def opening2d(value, kernel, stride=1, padding="SAME"):
"""
erode and then dilate
Parameters
----------
value : Tensor
4-D with shape [batch, in_height, in_width, depth].
kernel : Tensor
Must have the same type as 'value'. 3-D with shape '[kernel_height, kernel_width, depth]'
stride : int
The stride of the sliding window for the spatial dimensions '[1, 2]' of the input tensor.
padding : string
from '"SAME", "VALID"'. The type of padding algorithm to use.
Returns
-------
out : tensor
opened output
"""
strides = [1, stride, stride, 1]
rates = [1, 1, 1, 1]
out = tf.nn.erosion2d(value, kernel, strides, rates, padding)
out = tf.nn.dilation2d(out, kernel, strides, rates, padding)
return out
| 5,337,933
|
def convert_npy_mat(user_num, item_num, df):
"""
method of convert dataframe to numpy matrix
Parameters
----------
user_num : int, the number of users
item_num : int, the number of items
df : pd.DataFrame, rating dataframe
Returns
-------
mat : np.matrix, rating matrix
"""
mat = np.zeros((user_num, item_num))
for _, row in df.iterrows():
u, i, r = row['user'], row['item'], row['rating']
mat[int(u), int(i)] = float(r)
return mat
| 5,337,934
|
def split_kernel2(S, r, out):
"""
:param S: B x NY x NX x 2
:param r: K x2
:param out: K x MY x MX x 2
:return:
"""
n = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
K, MY, MX, B, _ = out.shape
N = K * MY * MX * B
k = n // (B)
b = (n - k * B)
if n < N:
for my in range(MY):
for mx in range(MX):
y = r[k, 0]
x = r[k, 1]
out[k, my, mx, b, 0] = S[b, y + my, x + mx, 0]
out[k, my, mx, b, 1] = S[b, y + my, x + mx, 1]
| 5,337,935
|
def user_enter_state_change_response():
"""
Prompts the user to enter a key event response.
nothing -> str
"""
return input('>> ')
| 5,337,936
|
def test_simulate_outbreaks(SimulationAlgo, shared_datadir):
"""Test against changes in simulation behavior."""
simulation_model = SimulationAlgo(seed=1)
if "state_weight" in inspect.signature(simulation_model.simulate).parameters:
simulated = simulation_model.simulate(length=100, state_weight=1)
else:
simulated = simulation_model.simulate(length=100)
saved_simulation = load_simulations(
shared_datadir / f"{SimulationAlgo.__name__}_simulation.csv"
)
assert_frame_equal(simulated, saved_simulation)
| 5,337,937
|
def call_worker(job_spec):
"""Calls command `cron_worker run <job_spec>` and parses the output"""
output = call_command("cron_worker", "run", job_spec)
status = exc_class_name = exc_message = None
if output:
result_match = RESULT_PATTERN.match(output)
if result_match:
status = result_match.group("status")
else:
exc_match = EXCEPTION_PATTERN.match(output)
if exc_match:
exc_class_name = exc_match.group("exc_class_name")
exc_message = exc_match.group("exc_message")
ok = status == "OK"
return CronWorkerRunResult(output, status, exc_class_name, exc_message, ok)
| 5,337,938
|
def overlap(a, b):
"""check if two intervals overlap.
Positional arguments:
a -- First interval.
b -- Second interval.
"""
return a[1] > b[0] and a[0] < b[1]
| 5,337,939
|
def contAvg_headpos(condition, method='median', folder=[], summary=False):
"""
Calculate average transformation from dewar to head coordinates, based
on the continous head position estimated from MaxFilter
Parameters
----------
condition : str
String containing part of common filename, e.g. "task" for files
task-1.fif, task-2.fif, etc. Consistent naiming of files is mandatory!
method : str
How to calculate "average, "mean" or "median" (default = "median")
folder : str
Path to input files. Default = current dir.
Returns
-------
MNE-Python transform object
4x4 transformation matrix
"""
# Check that the method works
method = method.lower()
if method not in ['median','mean']:
raise RuntimeError('Wrong method. Must be either \"mean\" or "median"!')
if not condition:
raise RuntimeError('You must provide a conditon!')
# Get and set folders
if not folder:
rawdir = getcwd() # [!] Match up with bash script !
else:
rawdir = folder
print(rawdir)
quatdir = op.join(rawdir,'quat_files')
mean_trans_folder = op.join(rawdir, 'trans_files')
if not op.exists(mean_trans_folder): # Make sure output folder exists
mkdir(mean_trans_folder)
mean_trans_file = op.join(mean_trans_folder, condition+'-trans.fif')
if op.isfile(mean_trans_file):
warnings.warn('N"%s\" already exists is %s. Delete if you want to rerun' % (mean_trans_file, mean_trans_folder), RuntimeWarning)
return
# Change to subject dir
files2combine = find_condition_files(quatdir, condition)
files2combine.sort()
if not files2combine:
raise RuntimeError('No files called \"%s\" found in %s' % (condition, quatdir))
allfiles = []
for ff in files2combine:
fl = ff.split('_')[0]
tmplist = [f for f in listdir(quatdir) if fl in f and '_quat' in f]
#Fix order
if len(tmplist) > 1:
tmplist.sort()
if any("-" in f for f in tmplist):
firstfile = tmplist[-1] # The file without a number will always be last!
tmpfs = sorted(tmplist[:-1], key=lambda a: int(re.split('-|.fif', a)[-2]) ) # Assuming consistent naming!!!
tmplist[0] = firstfile
tmplist[1:] = tmpfs
allfiles = allfiles + tmplist
if len(allfiles) > 1:
print('Files used for average head pos:')
for ib in range(len(allfiles)):
print('{:d}: {:s}'.format(ib + 1, allfiles[ib]))
else:
print('Will find average head pos in %s' % files2combine)
# LOAD DATA
# raw = read_raw_fif(op.join(quatdir,firstfile), preload=True, allow_maxshield=True, verbose=False).pick_types(meg=False, chpi=True)
# Use files2combine instead of allfiles as MNE will find split files automatically.
for idx, ffs in enumerate(files2combine):
if idx == 0:
raw = read_raw_fif(op.join(quatdir,ffs), preload=True, allow_maxshield=True).pick_types(meg=False, chpi=True)
else:
raw.append(read_raw_fif(op.join(quatdir,ffs), preload=True, allow_maxshield=True).pick_types(meg=False, chpi=True))
quat, times = raw.get_data(return_times=True)
gof = quat[6,] # Godness of fit channel
# fs = raw.info['sfreq']
# In case "record raw" started before "cHPI"
if np.any(gof < 0.98):
begsam = np.argmax(gof>0.98)
raw.crop(tmin=raw.times[begsam])
quat = quat[:,begsam:].copy()
times = times[begsam:].copy()
# Make summaries
if summary:
plot_movement(quat, times, dirname=rawdir, identifier=condition)
total_dist_moved(quat, times, write=True, dirname=rawdir, identifier=condition)
# Get continous transformation
print('Reading transformation. This will take a while...')
H = np.empty([4,4,len(times)]) # Initiate transforms
init_rot_angles = np.empty([len(times),3])
for i,t in enumerate(times):
Hi = np.eye(4,4)
Hi[0:3,3] = quat[3:6,i].copy()
Hi[:3,:3] = quat_to_rot(quat[0:3,i])
init_rot_angles[i,:] = rotation_angles(Hi[:3,:3])
assert(np.sum(Hi[-1]) == 1.0) # sanity check result
H[:,:,i] = Hi.copy()
if method in ["mean"]:
H_mean = np.mean(H, axis=2) # stack, then average over new dim
mean_rot_xfm = rotation3d(*tuple(np.mean(init_rot_angles, axis=0))) # stack, then average, then make new xfm
elif method in ["median"]:
H_mean = np.median(H, axis=2) # stack, then average over new dim
mean_rot_xfm = rotation3d(*tuple(np.median(init_rot_angles, axis=0))) # stack, then average, then make new xfm
H_mean[:3,:3] = mean_rot_xfm
assert(np.sum(H_mean[-1]) == 1.0) # sanity check result
# Create the mean structure and save as .fif
mean_trans = raw.info['dev_head_t'] # use the last info as a template
mean_trans['trans'] = H_mean.copy()
# Write file
write_trans(mean_trans_file, mean_trans)
print("Wrote "+mean_trans_file)
return mean_trans
| 5,337,940
|
def Scan_SlitSize(slit,start,stop,step,setslit=None,scanIOC=None,scanDIM=1,**kwargs):
"""
Scans the slit center:
slit='1H','1V','2H' or '2V'
Slit 1A is set to (1.75,1.75,0,0) unless setslit= not None
Logging is automatic: use **kwargs or the optional logging arguments see scanlog() for details
default: scanDIM=1
"""
if setslit is None:
SetSlit1A(1.75,1.75,0,0) #why is that?
#SetSlit1A(4.5,4.5,0,0,'q') # would open all the way?
#SetSlit2B(6.0,8.0,0,0,'q')
if scanIOC is None:
scanIOC = BL_ioc()
VAL='29idb:Slit'+slit+'size.VAL'
RBV='29idb:Slit'+slit+'t2.C'
Scan_FillIn(VAL,RBV,scanIOC,scanDIM,start,stop,step)
Scan_Go(scanIOC,scanDIM=scanDIM,**kwargs)
| 5,337,941
|
def getconstantfunc(name, **kwargs):
"""Get constants from file by name."""
from . import __path__ as path
from numpy import fromfile
from os.path import join
from os import listdir
path = path[0]
if not name in listdir(path):
from ..IO.output import printError
printError("File {0} not exists.".format(name))
raise ValueError("File {0} not exists.".format(name))
temp = fromfile(join(path, name))
return temp
| 5,337,942
|
def scalar_prod_logp0pw_beta_basis_npf(pw, p0, DV, alpha):
"""
From normalized p_fact
Args:
pw: a batch of probabilities (row:word, column:chi)
DV: centered statistics (for p0, to be consistent)
p0: the central probability on which tangent space to project (row vector)
alpha: the value of alpha
Returns:
scalar product between Logmaps of each point in the batch and the basis of the tangent space
.. math:: \left< \Log^{(\alpha)_{p_0} p_w}, \beta_i^{(\alpha)} \right>_{\mathbb{R}^n_{(\alpha)}}
"""
p_fact_normalized, l_scale = get_norm_p_fact(p0, pw, alpha)
ldv_alpha = np.matmul(p_fact_normalized, DV)
return ldv_alpha, l_scale
| 5,337,943
|
def get_seminars() -> List[Tuple[str, str, datetime, str]]:
"""
Returns summary information for upcoming ITEE seminars, comprising
seminar date, seminar title, venue, and an information link.
"""
html = BeautifulSoup(get_seminar_summary_page(), 'html.parser')
summary_table = html.find('table', summary='ITEE Seminar List')
if (summary_table is None) or (summary_table.tbody is None):
# When no seminars are scheduled, no table is shown.
return []
seminar_rows = summary_table.tbody.find_all('tr')
seminar_summaries = map(get_seminar_summary, seminar_rows)
return list(seminar_summaries)
| 5,337,944
|
def cluster_hierarchically(active_sites,num_clusters=7):
"""
Cluster the given set of ActiveSite instances using a hierarchical algorithm.
Input: a list of ActiveSite instances
(OPTIONAL): number of clusters (default 7)
Output: a list of clusterings
(each clustering is a list of lists of ActiveSite instances)
"""
labels = centroid_linkage(active_sites,num_clusters)
clustering = []
for clust in np.unique(labels):
clustering.append([active_sites[ind] for ind,val in enumerate(labels.tolist() )if val==clust])
return clustering
| 5,337,945
|
def test_shap_rfe_group_cv(X, y, groups, sample_weight, capsys):
"""
Test ShapRFECV with StratifiedGroupKFold.
"""
clf = DecisionTreeClassifier(max_depth=1, random_state=1)
cv = StratifiedGroupKFold(n_splits=2, shuffle=True, random_state=1)
with pytest.warns(None) as record:
shap_elimination = ShapRFECV(
clf,
random_state=1,
step=1,
cv=cv,
scoring="roc_auc",
n_jobs=4,
)
shap_elimination = shap_elimination.fit(
X, y, groups=groups, sample_weight=sample_weight, approximate=True, check_additivity=False
)
assert shap_elimination.fitted
shap_elimination._check_if_fitted()
report = shap_elimination.compute()
assert report.shape[0] == 3
assert shap_elimination.get_reduced_features_set(1) == ["col_3"]
_ = shap_elimination.plot(show=False)
# Ensure that number of warnings was 0
assert len(record) == 0
# Check if there is any prints
out, _ = capsys.readouterr()
assert len(out) == 0
| 5,337,946
|
def test_pickelable_tinydb_can_be_pickled_and_unpickled():
"""PickleableTinyDB should be able to be pickled and unpickled."""
test_dict = {'test_key': ['test', 'values']}
db = PickleableTinyDB(storage=MemoryStorage)
db.insert(test_dict)
db = pickle.loads(pickle.dumps(db))
assert db.search(where('test_key').exists())[0] == test_dict
| 5,337,947
|
def insert(table: _DMLTableArgument) -> Insert:
"""Construct an :class:`_expression.Insert` object.
E.g.::
from sqlalchemy import insert
stmt = (
insert(user_table).
values(name='username', fullname='Full Username')
)
Similar functionality is available via the
:meth:`_expression.TableClause.insert` method on
:class:`_schema.Table`.
.. seealso::
:ref:`tutorial_core_insert` - in the :ref:`unified_tutorial`
:param table: :class:`_expression.TableClause`
which is the subject of the
insert.
:param values: collection of values to be inserted; see
:meth:`_expression.Insert.values`
for a description of allowed formats here.
Can be omitted entirely; a :class:`_expression.Insert` construct
will also dynamically render the VALUES clause at execution time
based on the parameters passed to :meth:`_engine.Connection.execute`.
:param inline: if True, no attempt will be made to retrieve the
SQL-generated default values to be provided within the statement;
in particular,
this allows SQL expressions to be rendered 'inline' within the
statement without the need to pre-execute them beforehand; for
backends that support "returning", this turns off the "implicit
returning" feature for the statement.
If both :paramref:`_expression.Insert.values` and compile-time bind
parameters are present, the compile-time bind parameters override the
information specified within :paramref:`_expression.Insert.values` on a
per-key basis.
The keys within :paramref:`_expression.Insert.values` can be either
:class:`~sqlalchemy.schema.Column` objects or their string
identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
.. seealso::
:ref:`tutorial_core_insert` - in the :ref:`unified_tutorial`
"""
return Insert(table)
| 5,337,948
|
def parallelize_window_generation_imap(passed_args, procs=None):
"""Produce window files, in a parallel fashion
This method calls the get_win function as many times as sets of arguments
specified in passed_args. starmap is used to pass the list of arguments to
each invocation of get_win. The pool is created with either the number of
provided processors, or half the number of the available processors (be
kind, don't allocate everything).
Parameters
----------
passed_args : list
A list of lists, each one containing all the arguments to pass to an
invocation of the get_win function.
procs : int
The number of processors to use. Defaulted to None, will use half of
the available cores.
Returns
-------
list
A list containing the paths of all the results from the get_win calls.
"""
pool = mp.Pool(procs or int(mp.cpu_count() / 2))
results = pool.starmap(get_win, passed_args)
pool.close()
pool.join()
return results
| 5,337,949
|
def build_train(q_func, ob_space, ac_space, optimizer, sess, grad_norm_clipping=None,
scope="deepq", reuse=None, full_tensorboard_log=False):
"""
Creates the train function:
:param q_func: (DQNPolicy) the policy
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param reuse: (bool) whether or not to reuse the graph variables
:param optimizer: (tf.train.Optimizer) optimizer to use for the Q-learning objective.
:param sess: (TensorFlow session) The current TensorFlow session
:param grad_norm_clipping: (float) clip gradient norms to this value. If None no clipping is performed.
:param scope: (str or VariableScope) optional scope for variable_scope.
:param reuse: (bool) whether or not the variables should be reused. To be able to reuse the scope must be given.
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:return: (tuple)
act
train: (function (Any, numpy float, numpy float, Any, numpy bool, numpy float): numpy float)
optimize the error in Bellman's equation. See the top of the file for details.
step_model: (DQNPolicy) Policy for evaluation
"""
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
with tf.variable_scope(scope, reuse=reuse):
policy = q_func(sess, ob_space, ac_space, 1, 1, None)
act = build_act(policy)
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/model")
with tf.variable_scope("loss", reuse=reuse):
# set up placeholders
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
act_mask = tf.one_hot(act_t_ph, n_actions)
labels = tf.nn.relu(tf.math.sign(rew_t_ph))
dist = tf.nn.softmax(policy.q_values)
pred = tf.reduce_sum(dist * act_mask, axis=1)
loss = tf.math.reduce_mean(tf.keras.losses.binary_crossentropy(y_true=labels, y_pred=pred))
tf.summary.scalar("loss", loss)
# compute optimization op (potentially with gradient clipping)
gradients = optimizer.compute_gradients(loss, var_list=q_func_vars)
if grad_norm_clipping is not None:
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(rew_t_ph))
if full_tensorboard_log:
tf.summary.histogram('rewards', rew_t_ph)
optimize_expr = optimizer.apply_gradients(gradients)
summary = tf.summary.merge_all()
# Create callable functions
_train = tf_util.function(
inputs=[
policy._obs_ph,
act_t_ph,
rew_t_ph,
policy.targets_ph,
policy.train_ph
],
outputs=summary,
updates=[optimize_expr]
)
def train(obses, actions, rewards, targets, **kwargs):
return _train(obses, actions, rewards, targets, True, **kwargs)
return act, train, policy
| 5,337,950
|
def function_that_prints_stuff():
"""
>>> function_that_prints_stuff()
stuff
"""
print('stuff')
| 5,337,951
|
def _make_global_var_name(element):
"""creates a global name for the MAP element"""
if element.tag != XML_map:
raise ValueError('Expected element <%s> for variable name definition, found <%s>' % (XML_map,element.tag))
base_name = _get_attrib_or_None(element,XML_attr_name)
if base_name is None:
# a MAP element needs to have a name
raise ValueError('Element <%s> is missing attribute "%s".' % (XML_map,XML_attr_name))
# walk up in the hirachy until we find the group element
total_group_name = ''
while True:
element = element.find('..')
if element is None:
# no group element --> we could raise an exception
if total_group_name =='': total_group_name = 'NO_GROUP'
break
if element.tag == XML_group:
group_name = _get_attrib_or_None(element,XML_attr_name)
if group_name is None:
# perfectly legal case
group_name = 'EMPTY_GROUP_NAME'
total_group_name = group_name+'_'+total_group_name
#... and keep looking in case there are nested groups
h=str(hash(total_group_name+base_name)) # we calculate the hash in case somebody uses names in non-ASCII characters only
if h[0]=='-': h='M'+h[1:]
name= '_VAR_'+total_group_name+base_name+'_'+h
return _sanitized_var_name(name)
| 5,337,952
|
def list_user_images(user_id):
"""
Given a user_id, returns a list of Image objects scoped to that user.
:param user_id: str user identifier
:return: List of Image (messsage) objects
"""
db = get_session()
try:
imgs = [msg_mapper.db_to_msg(i).to_dict() for i in db.query(Image).filter(Image.user_id == user_id).all()]
finally:
db.close()
return imgs
| 5,337,953
|
def is_lower_cased_megatron(pretrained_model_name):
"""
Returns if the megatron is cased or uncased
Args:
pretrained_model_name (str): pretrained model name
Returns:
do_lower_cased (bool): whether the model uses lower cased data
"""
return MEGATRON_CONFIG_MAP[pretrained_model_name]["do_lower_case"]
| 5,337,954
|
def unsuspend_trip(direction, day, driver):
"""
Removes a trip from its suspension.
:param direction: "Salita" or "Discesa".
:param day: A day spanning the whole work week ("Lunedì"-"Venerdì").
:param driver: The chat_id of the driver.
:return:
"""
dt.groups[direction][day][driver]["Suspended"] = False
| 5,337,955
|
def merge(incr_a, incr_b):
"""Yield the elements of strictly increasing iterables incr_a and incr_b, removing
repeats. Assume that incr_a and incr_b have no repeats. incr_a or incr_b may or may not
be infinite sequences.
>>> m = merge([0, 2, 4, 6, 8, 10, 12, 14], [0, 3, 6, 9, 12, 15])
>>> type(m)
<class 'generator'>
>>> list(m)
[0, 2, 3, 4, 6, 8, 9, 10, 12, 14, 15]
>>> def big(n):
... k = 0
... while True: yield k; k += n
>>> m = merge(big(2), big(3))
>>> [next(m) for _ in range(11)]
[0, 2, 3, 4, 6, 8, 9, 10, 12, 14, 15]
"""
iter_a, iter_b = iter(incr_a), iter(incr_b)
next_a, next_b = next(iter_a, None), next(iter_b, None)
"*** YOUR CODE HERE ***"
while next_a is not None and next_b is not None:
if next_a>next_b:
yield next_b
next_b = next(iter_b,None)
elif next_a<next_b:
yield next_a
next_a = next(iter_a,None)
else:
yield next_a
next_a = next(iter_a,None)
next_b = next(iter_b,None)
while next_a is not None:
yield next_a
next_a = next(iter_a,None)
while next_b is not None:
yield next_b
next_b = next(iter_b,None)
| 5,337,956
|
def bright(args, return_data=True):
"""
Executes CA Brightside with arguments of this function. The response is returned as Python data structures.
Parameter ``return_data`` is by default ``True`` and it caused to return only the data section without metadata.
Metadata are processed automatically and if they mean that commands was not successful and ``BrightCallError`` exception
is raised.
Example:
jobs = bright("zos-jobs list jobs")
# jobs is equal to:
[{'class': 'A',
'files-url': 'https://ca32.ca.com:1443/zosmf/restjobs/jobs/J0038667USILCA11D4B949F2.......%3A/files',
'job-correlator': 'J0038667USILCA11D4B949F2.......:',
'jobid': 'JOB38667',
'jobname': 'PLAPALLC',
'owner': 'PLAPE03',
'phase': 20,
'phase-name': 'Job is on the hard copy queue',
'retcode': 'SEC ERROR',
'status': 'OUTPUT',
'subsystem': 'JES2',
'type': 'JOB',
'url': 'https://ca32.ca.com:1443/zosmf/restjobs/jobs/J0038667USILCA11D4B949F2.......%3A'}]
"""
if not isinstance(args, str):
args = subprocess.list2cmdline(args)
command = f"bright --rfj {args}"
try:
j, cp = _call_command_and_parse_json(command)
if j is None:
return None
if not j.get("success"):
be = BrightCallError(cp.returncode, command,
args, output=cp.stdout, stderr=cp.stderr)
be.errors = j.get("errors")
be.message = j.get("message")
else:
if "data" in j and return_data:
return j["data"]
return j
except CalledProcessError as e:
log.debug("error: %s, output=%s" % (repr(e), e.output))
if e.stderr:
raise BrightCallError(e.returncode, e.cmd,
args, output=e.output, stderr=e.stderr)
else:
j = json.loads(e.output)
be = BrightCallError(e.returncode, e.cmd, args, output=j.get(
"stdout"), stderr=j.get("stderr"))
be.errors = j.get("errors")
be.message = j.get("message")
raise be
| 5,337,957
|
def handle_mentions(utils, mentions):
"""
Processes a batch (list) of mentions.
Parameters
----------
utils : `Utils object`
extends tweepy api wrapper
mentions : `list`
list of Status objects (mentions)
"""
for mention in mentions:
if mention.user.screen_name == "rundown_bot":
continue
text = mention.text.lower()
try:
if "#doesfollow" in text:
handle_doesfollow(utils, mention)
elif "#articlepeople" in text:
handle_articlepeople(utils, mention)
elif "#articleplaces" in text:
handle_articleplaces(utils, mention)
elif "#articleorgs" in text:
handle_articleorgs(utils, mention)
elif "#articleabstract" in text:
handle_articleabstract(utils, mention)
else:
handle_generic_reply(utils, mention)
except tweepy.TweepError as e:
if e.api_code == 187:
#duplicate message. shouldn't happen once since_id is handled
continue
else:
raise e
| 5,337,958
|
def decipher(string, key, a2i_dict, i2a_dict):
"""
This function is BASED on https://github.com/jameslyons/pycipher
"""
key = [k.upper() for k in key]
ret = ''
for (i, c) in enumerate(string):
i = i % len(key)
ret += i2a_dict[(a2i_dict[c] - a2i_dict[key[i]]) % len(a2i_dict)]
return ret
| 5,337,959
|
def keep_samples_from_pcoa_data(headers, coords, sample_ids):
"""Controller function to filter coordinates data according to a list
Parameters
----------
headers : list, str
list of sample identifiers, if used for jackknifed data, this
should be a list of lists containing the sample identifiers
coords : numpy.ndarray
2-D numpy array with the float data in the coordinates, if used for
jackknifed data, coords should be a list of 2-D numpy arrays
sample_ids : list, str
list of sample ids that should be kept
Returns
-------
out_headers : list, str
list of headers
out_coords : list, np.array
list of coordinates
"""
# if the coords are a list then it means that the input jackknifed
if type(coords) == list:
out_coords, out_headers = [], []
for single_headers, single_coords in zip(headers, coords):
a, b = filter_samples_from_coords(single_headers, single_coords,
sample_ids)
out_headers.append(a)
out_coords.append(b)
return out_headers, out_coords
else:
out_headers, out_coords = filter_samples_from_coords(headers,
coords,
sample_ids)
return out_headers, out_coords
| 5,337,960
|
def get_default_command() -> str:
"""get_default_command returns a command to execute the default output of g++ or clang++. The value is basically `./a.out`, but `.\a.exe` on Windows.
The type of return values must be `str` and must not be `pathlib.Path`, because the strings `./a.out` and `a.out` are different as commands but same as a path.
"""
if platform.system() == 'Windows':
return r'.\a.exe'
return './a.out'
| 5,337,961
|
def roi_circle(roi_index, galactic=True, radius=5.0):
""" return (lon,lat,radius) tuple for given nside=12 position
"""
from skymaps import Band
sdir = Band(12).dir(roi_index)
return (sdir.l(),sdir.b(), radius) if galactic else (sdir.ra(),sdir.dec(), radius)
| 5,337,962
|
def find_by_user_defined_key(user_defined_key: str) -> List[models.BBoundingBoxDTO]:
"""Get a list of bounding boxes by a user-defined key."""
res_json = BoundingBoxes.get('query/userdefinedkey/{}'.format(user_defined_key))
return list(map(models.BBoundingBoxDTO.from_dict, res_json))
| 5,337,963
|
def object_type(r_name):
"""
Derives an object type (i.e. ``user``) from a resource name (i.e. ``users``)
:param r_name:
Resource name, i.e. would be ``users`` for the resource index URL
``https://api.pagerduty.com/users``
:returns: The object type name; usually the ``type`` property of an instance
of the given resource.
:rtype: str
"""
if r_name.endswith('ies'):
# Because English
return r_name[:-3]+'y'
else:
return r_name.rstrip('s')
| 5,337,964
|
def test_atomic_g_month_enumeration_1_nistxml_sv_iv_atomic_g_month_enumeration_2_3(mode, save_output, output_format):
"""
Type atomic/gMonth is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/gMonth/Schema+Instance/NISTSchema-SV-IV-atomic-gMonth-enumeration-2.xsd",
instance="nistData/atomic/gMonth/Schema+Instance/NISTXML-SV-IV-atomic-gMonth-enumeration-2-3.xml",
class_name="NistschemaSvIvAtomicGMonthEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,337,965
|
def fix_ascendancy_positions(path: os.PathLike) -> None:
"""Normalise the relative positions of ascendancy nodes on the passive skill tree.
Ascendancy positions in the passive skill tree data we receive from GGG look
scrambled, which is why we have to fix them before importing the skill tree in PoB.
.. warning:: Overwrites the input file in-place.
:param path: File path to a JSON passive skill tree data file.
:return:
"""
with open(path, "rb") as f:
data = json.load(f)
ascendancy_groups = [
(data["nodes"][group["nodes"][0]]["ascendancyName"], group)
for group in data["groups"].values()
if "ascendancyName" in data["nodes"][group["nodes"][0]]
]
ascendancy_starting_point = {
ascendancy: Point2D(group["x"], group["y"])
for ascendancy, group in ascendancy_groups
for node in group["nodes"]
if "isAscendancyStart" in data["nodes"][node]
}
for ascendancy, group in ascendancy_groups:
offset = NODE_GROUPS[ascendancy] - ascendancy_starting_point[ascendancy]
group["x"] += offset.x
group["y"] += offset.y
with open(path, "w", encoding="utf-8") as o:
json.dump(data, o, indent=4)
| 5,337,966
|
def create_comment(args, global_var):
"""
创建一条新评论 (每天只能允许创建最多 1000 条评论)
-------------
关于 uuid:
1. raw_str 就用 data["content"]
2. 由于生成的 comment_id 格式中有中划线, 很奇怪, 所以建议删掉: uuid = "-".join(uuid)
"""
can_create = hit_daily_comment_creation_threshold(global_var)
if not can_create:
res = {
"code": "FAILURE",
"message": "Hit max daily comment creation threshold, please try to comment tomorrow."
}
return res
data = args["data"]
db = global_var["db"]
comment_id = create_uuid(raw_str=data["content"])
comment_id = "".join(comment_id.split("-"))
try:
current_user_name = get_jwt_identity()
# 根据 current_user_name 找到 user_id
user_query = db.session.query(User.user_id).filter(User.name == current_user_name).first()
current_user_id = user_query.user_id
record = Comment(
comment_id=comment_id,
content=data["content"],
creator_user_id=getValueWithDefault(data, "creator_user_id", current_user_id)
)
db.session.add(record)
db.session.commit()
res = {
"code": "SUCCESS",
"data": {"id": record.id,
"creator": current_user_name}
}
global_var["today_already_created_comment_count"][1] += 1
except Exception as e:
res = {
"code": "FAILURE",
"message": traceback.format_exc()
}
return res
| 5,337,967
|
def gen_value(schema: DataType):
"""
VALUE -> OBJECT
| ARRAY
| STRING
| NUMBER
| BOOL
"""
if isinstance(schema, StructType):
for t in gen_object(schema):
yield t
elif isinstance(schema, ArrayType):
for t in gen_array(schema.elementType):
yield t
elif isinstance(schema, StringType):
for t in gen_string():
yield t
elif isinstance(schema, BooleanType):
for t in gen_bool():
yield t
elif isinstance(schema, IntegerType):
for t in gen_integer():
yield t
elif isinstance(schema, (FloatType, DoubleType)):
for t in gen_number():
yield t
else:
raise Exception("not supported schema")
| 5,337,968
|
def get_aggregated_metrics(expr: ExperimentResource):
"""
Get aggregated metrics using experiment resource and metric resources.
"""
versions = [expr.spec.versionInfo.baseline]
if expr.spec.versionInfo.candidates is not None:
versions += expr.spec.versionInfo.candidates
# messages not working as intended...
messages = []
# initialize aggregated metrics object
iam = get_builtin_metrics(expr)
# check if start time is greater than now
# this is problematic.... start time is set by etc3 but checked by analytics.
# clocks are not synced, so this is not right...
if expr.status.startTime > (datetime.now(timezone.utc)):
messages.append(Message(MessageLevel.ERROR, "Invalid startTime: greater than current time"))
iam.message = Message.join_messages(messages)
return iam
# there are no metrics to be fetched
if expr.status.metrics is None:
iam.message = Message.join_messages(messages)
return iam
for metric_info in expr.status.metrics:
# only custom metrics is handled below... not builtin metrics
if metric_info.metricObj.spec.provider is None or \
metric_info.metricObj.spec.provider != "iter8":
iam.data[metric_info.name] = AggregatedMetric(data = {})
# fetch the metric value for each version...
for version in versions:
# initialize metric object for this version...
iam.data[metric_info.name].data[version.name] = VersionMetric()
val, err = get_metric_value(metric_info.metricObj, version, \
expr.status.startTime)
if err is None and val is not None:
iam.data[metric_info.name].data[version.name].value = val
else:
try:
val = float(expr.status.analysis.aggregated_metrics.data\
[metric_info.name].data[version.name].value)
except AttributeError:
val = None
iam.data[metric_info.name].data[version.name].value = val
if err is not None:
messages.append(Message(MessageLevel.ERROR, \
f"Error from metrics backend for metric: {metric_info.name} \
and version: {version.name}"))
iam.message = Message.join_messages(messages)
logger.debug("Analysis object after metrics collection")
logger.debug(pprint.PrettyPrinter().pformat(iam))
return iam
| 5,337,969
|
def lsa_main(args):
"""Runs lsa on a data directory
:args: command line argument namespace
"""
if args.outfile is None:
os.makedirs(os.path.join("data", "tmp"), exist_ok=True)
args.outfile = os.path.join("data", "tmp", f"lsa-{args.n_components}.pkl")
print("LSA Embedding will be stored at:", args.outfile)
lsa = Pipeline(
[
("tfidf", TfidfVectorizer(input='filename', stop_words='english', max_features=50000)),
("svd", TruncatedSVD(n_components=args.n_components))
]
)
all_papers = glob.glob(os.path.join(args.data, "*"))
print("Run {}-dim LSA on {} papers.".format(args.n_components, len(all_papers)))
lsa_embedding = lsa.fit_transform(all_papers)
print("Explained variance ratio sum:", lsa.named_steps.svd.explained_variance_ratio_.sum())
# save_word2vec_format(OUTFILE, [identifier_from_path(p) for p in all_papers], LSA_EMBEDDING)
labels = [identifier_from_path(p) for p in all_papers]
if args.annotate is not None:
with open(args.annotate, 'rb') as fhandle:
id2title = pickle.load(fhandle)
# Replace identifier labels with title labels (if possible)
labels = [id2title.get(x, x) for x in labels]
embedding_bf = {
'labels': labels,
'embeddings': lsa_embedding
}
with open(args.outfile, 'wb') as outfile:
pickle.dump(embedding_bf, outfile)
| 5,337,970
|
def get_bart(folder_path, checkpoint_file):
"""
Returns a pretrained BART model.
Args:
folder_path: str, path to BART's model, containing the checkpoint.
checkpoint_file: str, name of BART's checkpoint file (starting from BART's folder).
"""
from fairseq.models.bart import BARTModel
bart = BARTModel.from_pretrained(model_name_or_path=folder_path + '/',
checkpoint_file=checkpoint_file)
if torch.cuda.is_available():
bart.cuda()
print("Using BART on GPU...")
bart.eval()
print("BART loaded (in evaluation mode).\n")
return bart
| 5,337,971
|
async def test_login():
"""Test login."""
# assert os.environ.get("NOIP_USERNAME"), 'You need to set NOIP_USERNAME, e.g. set NOIP_USERNAME="your noip_username or email address or set it up in .env (refer to .env.sample)'
# assert os.environ.get("NOIP_PASSWORD"), 'You need to set NOIP_USERNAME, e.g. set NOIP_USERNAME="your noip_username or email address or set it up in .env (refer to .env.sample)'
try:
page = await login_noip()
assert "DASHBOARD" in (await page.content())
# await page.close()
# await BROWSER.close()
# await BROWSER.killChrome()
await page.browser.close()
await asyncio.sleep(0.4)
except Exception as exc:
logger.error(exc)
| 5,337,972
|
def construct_s3_raw_data_path(study_id, filename):
""" S3 file paths for chunks are of this form:
RAW_DATA/study_id/user_id/data_type/time_bin.csv """
return os.path.join(RAW_DATA_FOLDER, study_id, filename)
| 5,337,973
|
def reset_password_email(recipient, link):
""" Sends out an email telling the recipients that
their password is able to be reset. Passed in is
a recipient to receive the password reset, along
with a password reset link.
"""
subject = "NordicSHIFT password reset request"
message = "Your password has recently been reset.\n" + \
"If you requested a password to be reset, follow the link below. \n" + \
"If you did not request a password reset, ignore this email. \n" + \
"%s \n" % (link) + \
"Thank you."
send_email(recipient, subject, message)
| 5,337,974
|
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
#time_taken is a series formed of subtraction of end and start times
df['End Time'] = pd.to_datetime(df['End Time'])
#We make a series of the time taken (difference between start and end times)
time_taken = df['End Time'] - df['Start Time']
print("The sum of time taken for all trips is: {} days.".format(time_taken.sum()))
# TO DO: display mean travel time
#First, we calculate the mean, then convert it into seconds for more clarity
mean_in_seconds = time_taken.mean().total_seconds()
print("The mean time for a trip is: {} seconds.".format(mean_in_seconds))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
| 5,337,975
|
def _mixed_s2(x, filters, name=None):
"""Utility function to implement the 'stride-2' mixed block.
# Arguments
x: input tensor.
filters: a list of filter sizes.
name: name of the ops
# Returns
Output tensor after applying the 'stride-2' mixed block.
"""
if len(filters) != 2:
raise ValueError('filters should have 2 components')
name1 = name + '_3x3' if name else None
branch3x3 = _depthwise_conv2d_bn(x, filters[0],
kernel_size=(3, 3),
strides=(2, 2),
name=name1)
name1 = name + '_5x5' if name else None
branch5x5 = _depthwise_conv2d_bn(x, filters[1],
kernel_size=(5, 5),
strides=(2, 2),
name=name1)
name1 = name + '_pool' if name else None
branchpool = layers.MaxPooling2D(pool_size=(3, 3), padding='same',
strides=(2, 2), name=name1)(x)
concat_axis = 1 if backend.image_data_format() == 'channels_first' else 3
x = layers.concatenate([branch3x3, branch5x5, branchpool],
axis=concat_axis,
name=name)
return x
| 5,337,976
|
def get_convertor(cls: Union[Type, str]) -> Convertor:
"""Returns Convertor for data type.
Arguments:
cls: Type or type name. The name could be simple class name, or full name that includes
the module name.
Note:
When `cls` is a type name:
1. If class name is NOT registered via `register_class()`, it's not possible to perform
lookup for bases classes.
2. If simple class name is provided and multiple classes of the same name but from
different modules have registered convertors, the first one found is used. If you
want to avoid this situation, use full names.
Raises:
TypeError: If there is no convertor for `cls` or any from its bases classes.
"""
if (conv := _get_convertor(cls)) is None:
raise TypeError(f"Type '{cls.__name__ if isinstance(cls, type) else cls}' has no Convertor")
return conv
| 5,337,977
|
def cli():
"""The pypi CLI.
\b
Examples:
\b
pypi stat Django
pypi browse Flask
To get help with a subcommand, add the --help option after the command.
\b
pypi stat --help
"""
pass
| 5,337,978
|
def adjust_learning_rate(optimizer, epoch):
"""For resnet, the lr starts from 0.1, and is divided by 10 at 80 and 120 epochs"""
if epoch < 80:
lr = 0.1
elif epoch < 120:
lr = 0.01
else:
lr = 0.001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| 5,337,979
|
def partPos2video(pos, fname='video.mp4', limits=[2, 4, 2, 4, 5, 7], ftime=100, partSize=5, shadowSize=1, psf=[0.15, 0.15, 0.45]):
"""
Convert SPAD-FCS particle positions to video
===========================================================================
Input Meaning
---------- ---------------------------------------------------------------
pos [Np x 3 x Nf] data array with
Np number of particles
3 x, y, z coordinates of the particle (float)
Nf number of frames
fname File name
limits Plot axes limits [xmin, xmax, ymin, ymax, zmin, zmax]
ftime Frame time [ms]
partSize Size of the dots for the particles
shadowSize Size of the shadows of the particles, use 0 for no shadows
psf [wx, wy, z0] array with psf size
===========================================================================
Output Meaning
---------- ---------------------------------------------------------------
video
===========================================================================
"""
Nf = np.shape(pos)[2]
Np = np.shape(pos)[0]
# plot limits
xmin = limits[0]
xmax = limits[1]
ymin = limits[2]
ymax = limits[3]
zmin = limits[4]
zmax = limits[5]
pos = np.asarray(pos)
posShadow = np.copy(pos)
posShadow[:, 2, :] = zmin
pos = np.concatenate((pos, posShadow), axis=0)
# set default color of particles to blue
colorList = np.zeros((2*Np, 3, Nf))
colorList[0:Np, :, :] = np.swapaxes(np.tile(np.array([31/256, 121/256, 182/256]), (Np, Nf, 1)), 1, 2)
# change color of particles in laser to green
for f in range(Nf):
for p in range(Np):
if np.abs(pos[p, 0, f] - 3) < psf[0] and np.abs(pos[p, 1, f] - 3) < psf[1] and np.abs(pos[p, 2, f] - 3) < psf[2]:
colorList[p, :, f] = np.array([0/256, 256/256, 0/256])
sizeList = np.zeros((2*Np, Nf))
sizeList[0:Np, :] = np.tile(np.array([partSize]), (Np, Nf))
if shadowSize > 0:
sizeList[Np:2*Np, :] = np.tile(np.array([shadowSize]), (Np, Nf))
else:
sizeList[Np:2*Np, :] = np.tile(np.array([0]), (Np, Nf))
# do not show particles outside of plot boundaries
sizeList[pos[:,0,:] > xmax] = 0
sizeList[pos[:,0,:] < xmin] = 0
sizeList[pos[:,1,:] > ymax] = 0
sizeList[pos[:,1,:] < ymin] = 0
sizeList[pos[:,2,:] > zmax] = 0
sizeList[pos[:,2,:] < zmin] = 0
# if particles outside of boundaries, also remove shadows
sizeList[np.concatenate((sizeList[0:Np,:] == 0, sizeList[0:Np,:] == 0), axis=0)] = 0
fig = plt.figure()
ax = plt.axes(projection = "3d")
#ax.set_xticklabels([])
#ax.set_yticklabels([])
#ax.set_zticklabels([])
ims = []
for i in range(Nf):
#[x, y, z] = drawEllipsoid(psf[0], psf[1], psf[2], plotFig=False)
#ax.plot_surface(x+2, y+2, z+6, rstride=4, cstride=4, color='b', alpha=0.2)
im = ax.scatter3D(pos[:, 0, i], pos[:, 1, i], pos[:, 2, i], color=colorList[:,:,i], s=sizeList[:,i])
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.set_zlim([zmin, zmax])
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=ftime, blit=True)
fname = checkfname(fname, 'mp4')
ani.save(fname)
| 5,337,980
|
def get_all_todo_list(request):
"""
This gets all the todolist associated with the user
:param request:
:return:
"""
pass
| 5,337,981
|
def _is_mchedr(filename):
"""
Checks whether a file format is mchedr
(machine-readable Earthquake Data Report).
:type filename: str
:param filename: Name of the mchedr file to be checked.
:rtype: bool
:return: ``True`` if mchedr file.
.. rubric:: Example
>>> _is_mchedr('/path/to/mchedr.dat') # doctest: +SKIP
True
"""
if not isinstance(filename, (str, native_str)):
return False
with open(filename, 'rb') as fh:
for line in fh.readlines():
# skip blank lines at beginning, if any
if line.strip() == b'':
continue
# first record has to be 'HY':
if line[0:2] == b'HY':
return True
else:
return False
| 5,337,982
|
def ddr3_8x8_profiling(
trace_file=None,
word_sz_bytes=1,
page_bits=8192, # number of bits for a dram page/row
min_addr_word=0,
max_addr_word=100000
):
"""
this code takes non-stalling dram trace and reorganizes the trace to meet the bandwidth requirement.
currently, it does not generate new trace, which can be improved later.
all default values are for ddr3, the output values are used by cacti "main memory" type
in this model, the burst behavior in the dram is not modeled, as such the reported cycle count will be larger, i.e., a pessimistic estimation
"""
# output list
tot_word = 0
max_word = 0
tot_access = 0
tot_row_access = 0
act_cycles = 0
shift_cycles = 0
ideal_start_cycle = 0
ideal_end_cycle = 0
bank=8 # number of banks in a chip. banks can be interleaved to reduce access latency. not modelled for simplicity.
burst=8 # number of bytes for a single bank row and col address, and burst is sequential. not modelled for simplicity.
prefetch=8 # number of prefetches/chips, with each chip referring to 1 prefetch. prefetch is parallel
io_bits=8 # number of bits provided by all chips, with each chip providing io_bits/prefectch bits, each 8 bit provided by a single bank in the chip
# number of words per page
page_byte = page_bits / 8
# per cycle ddr bandwidth in word
io_byte = io_bits / 8
requests = open(trace_file, 'r')
# applied address mapping: row + bank + col + chip
# this mapping is just for modeling, and actual implementation can be different
# for default ddr3 setting, 14-b row + 3-b bank + 10-b col + 3-b chip
# more info about ddr3 can be found here: http://mermaja.act.uji.es/docencia/is37/data/DDR3.pdf page 15
# parallel prefetch via chip has higher priority than sequential burst in a bank
# prefetch_buf format (row idx, col idx, chip idx)
# consecutive addresses are transmitted using prefetech instead of burst, as they are from the same bank but different chips
# bank interleaving is not simulated here, as they will not incur high access overhead
prefetch_buf_new = []
prefetch_buf_old = []
current_prefetch = []
first = True
for entry in requests:
elems = entry.strip().split(',')
elems = prune(elems)
elems = [float(x) for x in elems]
valid_word = 0
if first == True:
first = False
ideal_start_cycle = elems[0]
ideal_end_cycle = elems[0]
prefetch_buf_new = []
# memory row index and col index generation inside a chip
for e in range(1, len(elems)): # each element here is a word
# only count legal address
if (elems[e] >= min_addr_word) and (elems[e] < max_addr_word):
# get the byte addr of the element, as dram is byte addressable
elem_addr_byte = math.floor((elems[e] - min_addr_word) * word_sz_bytes)
# this row index contain both row and bank in the address
row_idx = math.floor(elem_addr_byte / page_byte)
# col idx inside a chip
col_idx = math.floor((elem_addr_byte % page_byte) / prefetch)
# chip index
chip_idx = math.floor(elem_addr_byte % prefetch)
prefetch_buf_new.append((row_idx, col_idx, chip_idx))
valid_word += 1
act_cycles += (len(prefetch_buf_new) > 0)
# add addresses for multi-byte word
tmp_prefetch_buf = list(prefetch_buf_new)
for w in range(math.ceil(word_sz_bytes) - 1):
for (x, y, z) in tmp_prefetch_buf:
# get the byte addr of the element, as dram is byte addressable
elem_addr_byte = x * page_byte + y * prefetch + z + (w + 1)
# this row index contain both row and bank in the address
row_idx = math.floor(elem_addr_byte / page_byte)
# col idx inside a chip
col_idx = math.floor((elem_addr_byte % page_byte) / prefetch)
# chip index
chip_idx = math.floor(elem_addr_byte % prefetch)
prefetch_buf_new.append((row_idx, col_idx, chip_idx))
tot_word += valid_word
if max_word < valid_word:
max_word = valid_word
# merge the repeated accesses in byte granularity
prefetch_buf_new = list(set(prefetch_buf_new))
new_access = 0
# update the prefetch start addr
prefetch_row_col_new = list(set([(x, y) for (x, y, z) in prefetch_buf_new]))
prefetch_row_col_old = list(set([(x, y) for (x, y, z) in prefetch_buf_old]))
for (x, y) in prefetch_row_col_new:
# a new start address for prefetch
if (x, y) not in prefetch_row_col_old:
start_chip = 1000000
for (i, j, k) in prefetch_buf_new:
if x == i and j == y and k < start_chip:
# add a new prefetch
start_chip = k
current_prefetch.append((x, y))
# each prefetch means an access
new_access += 1
tot_access += new_access
for (x, y) in prefetch_row_col_old:
if (x, y) not in prefetch_row_col_new:
# remove a prefetch if it's not used anymore
current_prefetch.remove((x, y))
# print(current_prefetch)
# only new row accesses from the last load are counted, as old are already buffered
new_row_access = 0
# only different blocks are accessed, old accesses are buffered already and required no access
prefetch_row_new = list(set([x for (x, y, z) in prefetch_buf_new]))
prefetch_row_old = list(set([x for (x, y, z) in prefetch_buf_old]))
for a in range(len(prefetch_row_new)):
if prefetch_row_new[a] not in prefetch_row_old:
new_row_access += 1
tot_row_access += new_row_access
prefetch_buf_old = prefetch_buf_new
# divided by two because of ddr
shift_cycles = max((math.ceil(tot_access / 2) - act_cycles), 0 )
requests.close()
return tot_word, max_word, tot_access, tot_row_access, act_cycles, shift_cycles, ideal_start_cycle, ideal_end_cycle
| 5,337,983
|
def get_sites_by_latlon(latlon, filter_date='', **kwargs):
"""Gets list of sites from BETYdb, filtered by a contained point.
latlon (tuple) -- only sites that contain this point will be returned
filter_date -- YYYY-MM-DD to filter sites to specific experiment by date
"""
latlon_api_arg = "%s,%s" % (latlon[0], latlon[1])
return get_sites(filter_date=filter_date, containing=latlon_api_arg, **kwargs)
| 5,337,984
|
def event_setup():
"""
Loads event dictionary into memory
:return: nothing
"""
global _EVENT_DICT
_EVENT_DICT = _get_event_dictionary()
| 5,337,985
|
def get_context(book, chapter, pharse):
"""
Given book, chapter, and pharse number, return the bible context.
"""
try:
context = repository['{} {}:{}'.format(book, chapter, pharse)]
return context
except KeyError:
bookname = bookid2chinese[book]
pharse_name = '{}{}:{}'.format(bookname, chapter, pharse)
logging.warning('Cannot find this pharse:' + pharse_name)
raise KeyError('Cannot find this pharse')
| 5,337,986
|
def find_cocotb_base (path = "", debug = False):
"""
Find Cocotb base directory in the normal installation path. If the user
specifies a location it attempts to find cocotb in that directory. This
function failes quietly because most people will probably not use cocotb
on the full design so it's not a big deal if it fails
Args:
path (string): Path to cocotb base if cocotb is not installed in the
default location(can be left blank)
Returns:
(String): Path to cocotb on the local machine, returns an empty string
if none is found
Raises: Nothing
"""
#Normally cocotb is installed (on Linux) at
if os.name == "posix":
if len(path) == 0:
path = DEFAULT_POSIX_COCOTB
else:
raise CocotbError("Error, Windows and Mac are not supported for " +
"cocotb utils")
dirs = os.listdir(path)
if debug: print "Look for directory"
if debug: print "path: %s" % path
for s in dirs:
if "cocotb" in s:
path = os.path.join(path, s)
if debug: print "Found: %s" % path
return path
raise CocotbWarning("Did not find Cocotb in %s" % path)
| 5,337,987
|
def read_dicom_volume(dcm_path):
"""
This function reads all dicom volumes in a folder as a volume.
"""
dcm_files = [ f for f in os.listdir(dcm_path) if f.endswith('.dcm')]
dcm_files = ns.natsorted(dcm_files, alg=ns.IGNORECASE)
Z = len(dcm_files)
reference = dicom.read_file(os.path.join(dcm_path,dcm_files[0]))
H,W = reference.pixel_array.shape
type = reference.pixel_array.dtype
volume = np.zeros((H,W,Z), dtype = type)
for (ii,dcm_slice) in enumerate(dcm_files):
volume[:,:,ii] = dicom.read_file(os.path.join(dcm_path,dcm_files[ii])).pixel_array
return volume
| 5,337,988
|
def _compare_namelists(gold_namelists, comp_namelists, case):
###############################################################################
"""
Compare two namelists. Print diff information if any.
Returns comments
Note there will only be comments if the namelists were not an exact match
Expect args in form: {namelist -> {key -> value} }.
value can be an int, string, list, or dict
>>> teststr = '''&nml
... val = 'foo'
... aval = 'one','two', 'three'
... maval = 'one', 'two', 'three', 'four'
... dval = 'one -> two', 'three -> four'
... mdval = 'one -> two', 'three -> four', 'five -> six'
... nval = 1850
... /
... &nml2
... val2 = .false.
... /
... '''
>>> _compare_namelists(_parse_namelists(teststr.splitlines(), 'foo'), _parse_namelists(teststr.splitlines(), 'bar'), None)
''
>>> teststr1 = '''&nml1
... val11 = 'foo'
... /
... &nml2
... val21 = 'foo'
... val22 = 'foo', 'bar', 'baz'
... val23 = 'baz'
... val24 = '1 -> 2', '2 -> 3', '3 -> 4'
... /
... &nml3
... val3 = .false.
... /'''
>>> teststr2 = '''&nml01
... val11 = 'foo'
... /
... &nml2
... val21 = 'foo0'
... val22 = 'foo', 'bar0', 'baz'
... val230 = 'baz'
... val24 = '1 -> 20', '2 -> 3', '30 -> 4'
... /
... &nml3
... val3 = .false.
... /'''
>>> comments = _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'), _parse_namelists(teststr2.splitlines(), 'bar'), None)
>>> print(comments)
Missing namelist: nml1
Differences in namelist 'nml2':
BASE: val21 = 'foo'
COMP: val21 = 'foo0'
BASE: val22 list item 1 = 'bar'
COMP: val22 list item 1 = 'bar0'
missing variable: 'val23'
BASE: val24 dict item 1 = 2
COMP: val24 dict item 1 = 20
dict variable 'val24' missing key 3 with value 4
dict variable 'val24' has extra key 30 with value 4
found extra variable: 'val230'
Found extra namelist: nml01
<BLANKLINE>
>>> teststr1 = '''&rad_cnst_nl
... icecldoptics = 'mitchell'
... logfile = 'cpl.log.150514-001533'
... case_name = 'ERB.f19_g16.B1850C5.sandiatoss3_intel.C.150513-230221'
... runid = 'FOO'
... model_version = 'cam5_3_36'
... username = 'jgfouca'
... iceopticsfile = '/projects/ccsm/inputdata/atm/cam/physprops/iceoptics_c080917.nc'
... liqcldoptics = 'gammadist'
... liqopticsfile = '/projects/ccsm/inputdata/atm/cam/physprops/F_nwvl200_mu20_lam50_res64_t298_c080428.nc'
... mode_defs = 'mam3_mode1:accum:=', 'A:num_a1:N:num_c1:num_mr:+',
... 'A:so4_a1:N:so4_c1:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:pom_a1:N:pom_c1:p-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocpho_rrtmg_c101112.nc:+',
... 'A:soa_a1:N:soa_c1:s-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', 'A:bc_a1:N:bc_c1:black-c:/projects/ccsm/inputdata/atm/cam/physprops/bcpho_rrtmg_c100508.nc:+',
... 'A:dst_a1:N:dst_c1:dust:/projects/ccsm/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', 'A:ncl_a1:N:ncl_c1:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc',
... 'mam3_mode2:aitken:=', 'A:num_a2:N:num_c2:num_mr:+',
... 'A:so4_a2:N:so4_c2:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:soa_a2:N:soa_c2:s-organic:/projects/ccsm/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+',
... 'A:ncl_a2:N:ncl_c2:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', 'mam3_mode3:coarse:=',
... 'A:num_a3:N:num_c3:num_mr:+', 'A:dst_a3:N:dst_c3:dust:/projects/ccsm/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+',
... 'A:ncl_a3:N:ncl_c3:seasalt:/projects/ccsm/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc:+', 'A:so4_a3:N:so4_c3:sulfate:/projects/ccsm/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc'
... rad_climate = 'A:Q:H2O', 'N:O2:O2', 'N:CO2:CO2',
... 'N:ozone:O3', 'N:N2O:N2O', 'N:CH4:CH4',
... 'N:CFC11:CFC11', 'N:CFC12:CFC12', 'M:mam3_mode1:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode1_rrtmg_c110318.nc',
... 'M:mam3_mode2:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode2_rrtmg_c110318.nc', 'M:mam3_mode3:/projects/ccsm/inputdata/atm/cam/physprops/mam3_mode3_rrtmg_c110318.nc'
... /'''
>>> teststr2 = '''&rad_cnst_nl
... icecldoptics = 'mitchell'
... logfile = 'cpl.log.150514-2398745'
... case_name = 'ERB.f19_g16.B1850C5.sandiatoss3_intel.C.150513-1274213'
... runid = 'BAR'
... model_version = 'cam5_3_36'
... username = 'hudson'
... iceopticsfile = '/something/else/inputdata/atm/cam/physprops/iceoptics_c080917.nc'
... liqcldoptics = 'gammadist'
... liqopticsfile = '/something/else/inputdata/atm/cam/physprops/F_nwvl200_mu20_lam50_res64_t298_c080428.nc'
... mode_defs = 'mam3_mode1:accum:=', 'A:num_a1:N:num_c1:num_mr:+',
... 'A:so4_a1:N:so4_c1:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:pom_a1:N:pom_c1:p-organic:/something/else/inputdata/atm/cam/physprops/ocpho_rrtmg_c101112.nc:+',
... 'A:soa_a1:N:soa_c1:s-organic:/something/else/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+', 'A:bc_a1:N:bc_c1:black-c:/something/else/inputdata/atm/cam/physprops/bcpho_rrtmg_c100508.nc:+',
... 'A:dst_a1:N:dst_c1:dust:/something/else/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+', 'A:ncl_a1:N:ncl_c1:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc',
... 'mam3_mode2:aitken:=', 'A:num_a2:N:num_c2:num_mr:+',
... 'A:so4_a2:N:so4_c2:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc:+', 'A:soa_a2:N:soa_c2:s-organic:/something/else/inputdata/atm/cam/physprops/ocphi_rrtmg_c100508.nc:+',
... 'A:ncl_a2:N:ncl_c2:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc', 'mam3_mode3:coarse:=',
... 'A:num_a3:N:num_c3:num_mr:+', 'A:dst_a3:N:dst_c3:dust:/something/else/inputdata/atm/cam/physprops/dust4_rrtmg_c090521.nc:+',
... 'A:ncl_a3:N:ncl_c3:seasalt:/something/else/inputdata/atm/cam/physprops/ssam_rrtmg_c100508.nc:+', 'A:so4_a3:N:so4_c3:sulfate:/something/else/inputdata/atm/cam/physprops/sulfate_rrtmg_c080918.nc'
... rad_climate = 'A:Q:H2O', 'N:O2:O2', 'N:CO2:CO2',
... 'N:ozone:O3', 'N:N2O:N2O', 'N:CH4:CH4',
... 'N:CFC11:CFC11', 'N:CFC12:CFC12', 'M:mam3_mode1:/something/else/inputdata/atm/cam/physprops/mam3_mode1_rrtmg_c110318.nc',
... 'M:mam3_mode2:/something/else/inputdata/atm/cam/physprops/mam3_mode2_rrtmg_c110318.nc', 'M:mam3_mode3:/something/else/inputdata/atm/cam/physprops/mam3_mode3_rrtmg_c110318.nc'
... /'''
>>> _compare_namelists(_parse_namelists(teststr1.splitlines(), 'foo'), _parse_namelists(teststr2.splitlines(), 'bar'), 'ERB.f19_g16.B1850C5.sandiatoss3_intel')
''
"""
different_namelists = OrderedDict()
for namelist, gold_names in gold_namelists.items():
if (namelist not in comp_namelists):
different_namelists[namelist] = ["Missing namelist: {}\n".format(namelist)]
else:
comp_names = comp_namelists[namelist]
for name, gold_value in gold_names.items():
if (name not in comp_names):
different_namelists.setdefault(namelist, []).append(" missing variable: '{}'\n".format(name))
else:
comp_value = comp_names[name]
comments = _compare_values(name, gold_value, comp_value, case)
if comments != "":
different_namelists.setdefault(namelist, []).append(comments)
for name in comp_names:
if (name not in gold_names):
different_namelists.setdefault(namelist, []).append(" found extra variable: '{}'\n".format(name))
for namelist in comp_namelists:
if (namelist not in gold_namelists):
different_namelists[namelist] = ["Found extra namelist: {}\n".format(namelist)]
comments = ""
for namelist, nlcomment in different_namelists.items():
if len(nlcomment) == 1:
comments += nlcomment[0]
else:
comments += "Differences in namelist '{}':\n".format(namelist)
comments += "".join(nlcomment)
return comments
| 5,337,989
|
def directMultiCreate( data, cfg_params='', *, dtype='',
doInfo = True, doScatter = True, doAbsorption = True ):
"""Convenience function which creates Info, Scatter, and Absorption objects
directly from a data string rather than an on-disk or in-memory
file. Such usage obviously precludes proper caching behind the scenes,
and is intended for scenarios where the same data should not be used
repeatedly.
"""
if not dtype and not data.startswith('NCMAT') and 'NCMAT' in data:
if data.strip().startswith('NCMAT'):
raise NCBadInput('NCMAT data must have "NCMAT" as the first 5 characters (must not be preceded by whitespace)')
rawi,raws,rawa = _rawfct['multicreate_direct'](data,dtype,cfg_params,doInfo,doScatter,doAbsorption)
info = Info( ('_rawobj_',rawi) ) if rawi else None
scatter = Scatter( ('_rawobj_',raws) ) if raws else None
absorption = Absorption( ('_rawobj_',rawa) ) if rawa else None
class MultiCreated:
def __init__(self,i,s,a):
self.__i,self.__s,self.__a = i,s,a
@property
def info(self):
"""Info object (None if not present)."""
return self.__i
@property
def scatter(self):
"""Scatter object (None if not present)."""
return self.__s
@property
def absorption(self):
"""Absorption object (None if not present)."""
return self.__a
def __str__(self):
fmt = lambda x : str(x) if x else 'n/a'
return 'MultiCreated(Info=%s, Scatter=%s, Absorption=%s)'%(fmt(self.__i),
fmt(self.__s),
fmt(self.__a))
return MultiCreated(info,scatter,absorption)
| 5,337,990
|
def write_date_json(date: str, df: DataFrame) -> str:
""" Just here so we can log in the list comprehension """
file_name = f"pmg_reporting_data_{date}.json"
print(f"Writing file {file_name}")
df.to_json(file_name, orient="records", date_format="iso")
print(f"{file_name} written")
return file_name
| 5,337,991
|
def fit_composite_peak(bands, intensities, locs, num_peaks=2, max_iter=10,
fit_kinds=('lorentzian', 'gaussian'), log_fn=print,
band_resolution=1):
"""Fit several peaks to a single spectral feature.
locs : sequence of float
Contains num_peaks peak-location guesses,
or a single feature-location guess.
fit_kinds : sequence of str
Specifies all the peak types that the composite may be made of.
Not all fit_kinds are guaranteed to appear in the final composite fit.
See fit_single_peak for details about the other arguments.
"""
# deal with bad scaling
intensities, scale = _scale_spectrum(intensities)
# get the appropriate function(s) to fit
fit_funcs = {k: _get_peak_function(k, None, False) for k in fit_kinds}
# find reasonable approximations for initial parameters: (loc, area, fwhm)
if len(locs) == num_peaks:
loc_guesses = locs
elif len(locs) == 1:
loc_guesses = np.linspace(locs[0]-band_resolution, locs[0]+band_resolution,
num_peaks)
else:
raise ValueError('Number of locs (%d) != number of peaks (%d)' % (
len(locs), num_peaks))
mean_loc = np.mean(locs)
area_guess = _guess_area(bands, intensities, mean_loc) / num_peaks
fwhm_guess = 2 * band_resolution / num_peaks
init_params = (tuple(loc_guesses) +
(area_guess,) * num_peaks +
(fwhm_guess,) * num_peaks)
loc_idx = slice(0, num_peaks)
# try all combinations of peaks, use the one that matches best
combs = []
for fit_keys in combinations_with_replacement(fit_funcs, num_peaks):
label = '+'.join(fit_keys)
fit_func = _combine_peak_functions([fit_funcs[k] for k in fit_keys])
params, pstd = _weighted_curve_fit(
bands, intensities, mean_loc, fit_func, init_params,
max_iter=max_iter, log_fn=log_fn, log_label=label,
band_resolution=band_resolution, loc_idx=loc_idx)
mask, peak_x, peak_y = _select_top99(bands, fit_func, params)
residual = np.linalg.norm(peak_y - intensities[mask])
log_fn('composite %s residual: %g' % (label, residual))
combs.append((residual, fit_keys, fit_func, params, pstd,
mask, peak_x, peak_y))
residual, fit_keys, fit_func, params, pstd, mask, peak_x, peak_y = min(combs)
# Calculate peak info, with original scaling
peak_data = dict(xmin=float(peak_x[0]), xmax=float(peak_x[-1]),
fit_kinds=fit_keys, height=[], center=[], area=[], fwhm=[],
center_std=[], area_std=[], fwhm_std=[])
peak_ys = [peak_y * scale]
for i, k in enumerate(fit_keys):
fn = fit_funcs[k]
loc, area, fwhm = map(float, params[i::num_peaks])
loc_std, area_std, fwhm_std = map(float, pstd[i::num_peaks])
peak_ys.append(fn(peak_x, loc, area, fwhm) * scale)
height = float(fn(loc, loc, area, fwhm))
peak_data['height'].append(height * scale)
peak_data['center'].append(loc)
peak_data['center_std'].append(loc_std)
peak_data['area'].append(area * scale)
peak_data['area_std'].append(area_std * scale)
peak_data['fwhm'].append(fwhm)
peak_data['fwhm_std'].append(fwhm_std)
peak_y *= scale
return mask, peak_ys, peak_data
| 5,337,992
|
def main(args):
"""
main function
:param args: input arguments
"""
# make kowiki dir
if not os.path.exists(args.data_dir):
os.makedirs(args.data_dir)
print(f"make kowiki data dir: {args.data_dir}")
# download latest kowiki dump
filename = download_kowiki(args.data_dir)
# WikiExtractor 실행
wiki_list, wiki_out = exec_WikiExtractor(args.data_dir, filename)
# witk multi new line 제거
data_list = trim_wiki(wiki_list)
# wiki를 txt 파일로 저장
wiki_txt = os.path.join(args.data_dir, "kowiki.txt")
dump_txt(data_list, wiki_txt)
# zip
with zipfile.ZipFile(os.path.join(args.data_dir, "kowiki.txt.zip"), "w") as z:
z.write(wiki_txt, os.path.basename(wiki_txt))
os.remove(filename)
shutil.rmtree(wiki_out)
os.remove(wiki_txt)
| 5,337,993
|
def plot_score(model, title=None, figsize=(12,4), directory=None, filename=None):
"""Plots training score (and optionally validation score) by epoch."""
# Validate request
if not model.metric:
raise Exception("No metric designated for score.")
if not isinstance(model, Estimator):
raise ValueError("Model is not a valid Estimator or subclass object.")
if not isinstance(figsize, tuple):
raise TypeError("figsize is not a valid tuple.")
# If val score is on the log, plot both training and validation score
if 'val_score' in model.history.epoch_log:
fig, _, title = _plot_train_val_score(model, title=title,
figsize=figsize)
else:
fig, _, title = _plot_train_score(model, title=title,
figsize=figsize)
# Save figure if directory is not None
if directory is not None:
title = title.replace('\n', ' ')
save_plot(fig, directory, filename, title)
# Show plot
fig.tight_layout()
plt.show()
| 5,337,994
|
def entry_generator(tree):
"""
Since the RSS Feed has a fixed structure the method
heavily relies on simply finding the nodes belonging
to the same level: shortly, it yields a new dictionary
containing at every <entry> tag it finds; this is
useful for appending them to a list (see how is called
in a list comprehension in the main() method).
"""
for entry in tree.findall(relative("entry")):
entry_dict = {
'title': entry.find(relative("title")).text,
'summary': entry.find(relative("summary")).text,
'issued': entry.find(relative("issued")).text,
'id': entry.find(relative("id")).text,
'name': entry.find(relative("author"))
.find(relative("name")).text,
'email': entry.find(relative("author"))
.find(relative("email")).text}
yield entry_dict
| 5,337,995
|
def eprint(*args, **kwargs):
"""
Print to stderr
"""
print(*args, file=sys.stderr, **kwargs)
| 5,337,996
|
def test_session():
"""test of Session class
"""
session = MockSession()
assert session.remote_address.full_address == 'mock'
assert str(session) == '<mprpc.transport.Session: remote_address=mock>'
assert repr(session) == '<mprpc.transport.Session: remote_address=mock>'
| 5,337,997
|
def param():
"""
Create a generic Parameter object with generic name, description and no value defined
"""
return parameter.Parameter("generic_param",template_units.kg_s,"A generic param")
| 5,337,998
|
def colorneighborhood(graph, start_node, colors, color_last_edges=True):
"""
Color nodes and edges according to how far they are from a specific starting node.
Likely only useful for debugging. Arguments:
- graph: NetworkX DiGraph to color in-place
- start_node: node ID of starting node
- colors: list of colors for nodes and outgoing edges at each level (starting node first)
- color_last_edges: whether to color outgoing edges of the last level of colored nodes
"""
current_ring = set([start_node])
next_ring = set()
level = 0
while level < len(colors):
for n in current_ring:
graph.nodes[n]['color'] = colors[level]
if color_last_edges or level < len(colors) - 1:
for neighbor in graph.successors(n):
graph.edges[n, neighbor]['color'] = colors[level]
if 'color' not in graph.nodes[neighbor] and neighbor not in current_ring:
next_ring.add(neighbor)
current_ring = next_ring
next_ring = set()
level += 1
| 5,337,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.