content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def testIBP():
""" Plot matrices generated by an IBP, for a few different settings. """
from pybrain.tools.plotting.colormaps import ColorMap
import pylab
# always 50 customers
n = 50
# define parameter settings
ps = [(10, 0.1),
(10,), (50,),
(50, 0.5),
]
# generate a few matrices, on for each parameter setting
ms = []
for p in ps:
if len(p) > 1:
m = generateIBP(n, p[0], p[1])
else:
m = generateIBP(n, p[0])
ms.append(leftordered(m))
# plot the matrices
for m in ms:
ColorMap(m, pixelspervalue=3)
pylab.show() | 5,328,700 |
def rel_error(model, X, Y, meanY =0):
"""function to compute the relative error
model : trained neural network to compute the predicted data
X : input data
Y : reference output data
meanY : the mean of the of the untreated Y """
Yhat = model.predict(X)
dY = Yhat - Y
axis = tuple(range(np.size(dY.shape))[1:])
return np.sqrt(np.sum(dY**2, axis = axis)/
np.sum((Y+meanY)**2, axis = axis)) | 5,328,701 |
def read_data(vocab, path):
"""Reads a bAbI dataset.
Args:
vocab (collections.defaultdict): A dictionary storing word IDs.
path (str): Path to bAbI data file.
Returns:
list of Query of Sentence: Parsed lines.
"""
data = []
all_data = []
with open(path) as f:
for line in f:
sid, content = line.strip().split(' ', 1)
if sid == '1':
if len(data) > 0:
all_data.append(data)
data = []
data.append(parse_line(vocab, content))
if len(data) > 0:
all_data.append(data)
return all_data | 5,328,702 |
def get_dataset(cfg: DatasetConfig,
shard_id: int,
num_shards: int,
feature_converter_cls: Type[seqio.FeatureConverter],
num_epochs: Optional[int] = None,
continue_from_last_checkpoint: bool = False) -> tf.data.Dataset:
"""Returns a dataset from SeqIO based on a `DatasetConfig`."""
if continue_from_last_checkpoint:
raise ValueError(
'`continue_from_last_checkpoint` must be set to False as this is not '
'supported by this dataset fn.')
del continue_from_last_checkpoint
if cfg.module:
import_module(cfg.module)
if cfg.batch_size % num_shards:
raise ValueError(
f'Batch size ({cfg.batch_size}) must be divisible by number of '
f'shards ({num_shards}).')
shard_info = seqio.ShardInfo(index=shard_id, num_shards=num_shards)
if cfg.seed is None:
# Use a shared timestamp across devices as the seed.
seed = multihost_utils.broadcast_one_to_all(np.int32(time.time()))
else:
seed = cfg.seed
return get_dataset_inner(cfg, shard_info, feature_converter_cls, seed,
num_epochs) | 5,328,703 |
def contour_distances_2d(image1, image2, dx=1):
"""
Calculate contour distances between binary masks.
The region of interest must be encoded by 1
Args:
image1: 2D binary mask 1
image2: 2D binary mask 2
dx: physical size of a pixel (e.g. 1.8 (mm) for UKBB)
Returns:
mean_hausdorff_dist: Hausdorff distance (mean if input are 2D stacks) in pixels
"""
# Retrieve contours as list of the coordinates of the points for each contour
# convert to contiguous array and data type uint8 as required by the cv2 function
image1 = np.ascontiguousarray(image1, dtype=np.uint8)
image2 = np.ascontiguousarray(image2, dtype=np.uint8)
# extract contour points and stack the contour points into (N, 2)
contours1, _ = cv2.findContours(image1.astype('uint8'), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour1_pts = np.array(contours1[0])[:, 0, :]
for i in range(1, len(contours1)):
cont1_arr = np.array(contours1[i])[:, 0, :]
contour1_pts = np.vstack([contour1_pts, cont1_arr])
contours2, _ = cv2.findContours(image2.astype('uint8'), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour2_pts = np.array(contours2[0])[:, 0, :]
for i in range(1, len(contours2)):
cont2_arr = np.array(contours2[i])[:, 0, :]
contour2_pts = np.vstack([contour2_pts, cont2_arr])
# distance matrix between two point sets
dist_matrix = np.zeros((contour1_pts.shape[0], contour2_pts.shape[0]))
for i in range(contour1_pts.shape[0]):
for j in range(contour2_pts.shape[0]):
dist_matrix[i, j] = np.linalg.norm(contour1_pts[i, :] - contour2_pts[j, :])
# symmetrical mean contour distance
mean_contour_dist = 0.5 * (np.mean(np.min(dist_matrix, axis=0)) + np.mean(np.min(dist_matrix, axis=1)))
# calculate Hausdorff distance using the accelerated method
# (doesn't really save computation since pair-wise distance matrix has to be computed for MCD anyways)
hausdorff_dist = directed_hausdorff(contour1_pts, contour2_pts)[0]
return mean_contour_dist * dx, hausdorff_dist * dx | 5,328,704 |
def weighted_sequence_identity(a, b, weights, gaps='y'):
"""Compute the sequence identity between two sequences, different positions differently
The definition of sequence_identity is ambyguous as it depends on how gaps are treated,
here defined by the *gaps* argument. For details and examples, see
`this page <https://pyaln.readthedocs.io/en/latest/tutorial.html#sequence-identity>`_
Parameters
----------
a : str
first sequence, with gaps encoded as "-"
b : str
second sequence, with gaps encoded as "-"
weights : list of float
list of weights. Any iterable with the same length as the two input sequences
(including gaps) is accepted. The final score is divided by their sum
(except for positions not considered, as defined by the gaps argument).
gaps : str
defines how to take into account gaps when comparing sequences pairwise. Possible values:
- 'y' : gaps are considered and considered mismatches. Positions that are gaps in both sequences are ignored.
- 'n' : gaps are not considered. Positions that are gaps in either sequences compared are ignored.
- 't' : terminal gaps are trimmed. Terminal gap positions in either sequences are ignored, others are considered as in 'y'.
- 'a' : gaps are considered as any other character; even gap-to-gap matches are scored as identities.
Returns
-------
float
sequence identity between the two sequences
Examples
--------
>>> weighted_sequence_identity('ATGCA',
... 'ATGCC', weights=[1, 1, 1, 1, 6])
0.4
>>> weighted_sequence_identity('ATGCA',
... 'ATGCC', weights=[1, 1, 1, 1, 1])
0.8
Note
----
To compute sequence identity efficiently among many sequences, use :func:`~pyaln.Alignment.score_similarity` instead.
See also
--------
pyaln.Alignment.score_similarity, weighted_sequence_identity
"""
if len(a)!=len(b):
raise IndexError('sequence_identity ERROR sequences do not have the same length')
if len(weights)!=len(a):
raise IndexError('sequence_identity ERROR weights must be the same length as sequences')
if gaps=='y':
pos_to_remove=[i for i in range(len(a)) if a[i]=='-' and b[i]=='-' ]
elif gaps=='n':
pos_to_remove=[i for i in range(len(a)) if a[i]=='-' or b[i]=='-' ]
elif gaps=='t':
pos_to_remove=[i for i in range(len(a)) if a[i]=='-' and b[i]=='-' ]
for s in [a,b]:
for i,c in enumerate(s):
if c=='-':
pos_to_remove.append(i)
else:
break
for i, c in reversed(list(enumerate(s))):
if c=='-':
pos_to_remove.append(i)
else:
break
elif gaps=='a':
total_weight= sum( weights )
count_identical=sum([int(ca == b[i])*weights[i] for i,ca in enumerate(a)])
return count_identical/total_weight if total_weight else 0.0
else:
raise Exception('sequence_identity ERROR gaps argument must be one of {a, y, n, t}')
exclude_pos=set(pos_to_remove)
actual_weights=[w for i,w in enumerate(weights) if not i in exclude_pos]
total_weight= sum( actual_weights )
count_identical=sum([int(ca == b[i] and ca!='-' )*weights[i] for i,ca in enumerate(a) if not i in exclude_pos])
return count_identical/( total_weight ) if total_weight else 0.0 | 5,328,705 |
def loss_fd(batch,model,reconv_psf_image,step=0.01):
"""
Defines a loss respective to unit shear response
Args:
batch: tf batch
Image stamps as ['obs_image'] and psf models as ['psf_image']
reconv_psf_image: tf tensor
Synthetic reconvolution psf
step: float
Step size for the finite differences
Returns:
lost: float
Distance between the shear response matrix and unity.
"""
shears = tf.random.uniform((batch_size,2),-.1,.1,dtype=tf.float32)
#compute response
R = get_metacal_response_finitediff(batch['obs_image'],
batch['psf_image'],
reconv_psf_image,
shear=shears,
step=step,
method=model)[1]
lost = tf.norm(R - tf.eye(2))
return lost | 5,328,706 |
def batch_tokenize(sentences: List[str],
tokenizer: yttm.BPE,
batch_size: int = 256,
bos: bool = True,
eos: bool = True) -> List[List[int]]:
"""
Tokenize input sentences in batches.
:param sentences: sentences to tokenize
:param tokenizer: trained tokenizer model
:param batch_size: amount of sentences in each batch
:param bos: whether to add <BOS> symbol at the beginning of each sentence
:param eos: whether to add <EOS> symbol at the end of each sentence
:return: a list of tokenized sentences, where each sentence is represented as a list of integers
"""
tokenized = []
for i_batch in range(math.ceil(len(sentences) / batch_size)):
tokenized.extend(
tokenizer.encode(
list(sentences[i_batch * batch_size:(i_batch + 1) * batch_size]), bos=bos, eos=eos)
)
return tokenized | 5,328,707 |
def blog_index(request):
"""The index of all blog posts"""
ctx = {}
entries = Page.objects.filter(blog_entry=True).order_by(
'-pinned', '-date_created')
paginator = Paginator(entries, 10)
page_num = request.GET.get('page')
ctx['BASE_URL'] = settings.BASE_URL
try:
entries = paginator.page(page_num)
except PageNotAnInteger:
entries = paginator.page(1)
except EmptyPage:
entries = paginator.page(paginator.num_pages)
ctx['entries'] = entries
return render(request, 'cms/blog_index.html', ctx) | 5,328,708 |
def read_ini_file(file_path):
"""Read an ini file and return the profile data.
If the profile name begins with 'profile ', remove it.
Parameters
----------
- file_path - the path to the file to read
Returns
-------
The profile data.
"""
LOG.info('Reading ini file from %s', file_path)
profiles = {}
if os.path.exists(file_path):
parser = ConfigParser.ConfigParser()
parser.read(file_path)
for profile in parser.sections():
profiles[profile.replace('profile ', '')] = {}
profiles[profile.replace('profile ', '')]['__name__'] = profile.replace('profile ', '')
for option in parser.options(profile):
profiles[profile.replace('profile ', '')][option] = parser.get(profile, option)
else:
safe_print('AWSume Error: Directory [' + file_path + '] does not exist')
return profiles | 5,328,709 |
def pair_time(pos_k, pos_l, vel_k, vel_l, radius):
""" pos_k, pos_l, vel_k, vel_l all have two elements as a list """
t_0=0.0
pos_x=pos_l[0]-pos_k[0]
pos_y=pos_l[1]-pos_k[1]
Delta_pos=np.array([pos_x, pos_y])
vel_x=vel_l[0]-vel_k[0]
vel_y=vel_l[1]-vel_k[1]
Delta_vel=np.array([vel_x, vel_y])
Upsilon=(Delta_pos.dot(Delta_vel))**2-(Delta_vel.dot(Delta_vel))*((Delta_pos.dot(Delta_pos))-4.0*radius**2)
if Upsilon>0.0 and Delta_pos.dot(Delta_vel)<0.0: return t_0-(Delta_pos.dot(Delta_vel)+m.sqrt(Upsilon))/(Delta_vel.dot(Delta_vel))
else: return float(oo) | 5,328,710 |
def test_systeminit():
"""
Test that initializing a ``System`` class produces a list of ``Prior``
objects of the correct length when:
- parallax and total mass are fixed
- parallax and total mass errors are given
- parallax is fixed, total mass error is given
- parallax error is given, total mass error is fixed
Test that the different types of data are parsed correctly
when initializing a ``System`` object.
"""
testdir = orbitize.DATADIR
input_file = os.path.join(testdir, 'test_val.csv')
data_table = read_input.read_file(input_file)
# Manually set 'object' column of data table
data_table['object'] = 1
data_table['object'][1] = 2
plx_mass_errs2lens = {
(0., 0.): 14,
(1., 1.): 14,
(0., 1.): 14,
(1., 0.): 14
}
for plx_e, mass_e in plx_mass_errs2lens.keys():
testSystem_priors = system.System(
2, data_table, 10., 10., plx_err=plx_e, mass_err=mass_e
)
assert len(testSystem_priors.sys_priors) == \
plx_mass_errs2lens[(plx_e, mass_e)]
testSystem_parsing = system.System(
2, data_table, 10., 10.,
plx_err=0.5, mass_err=0.5
)
assert len(data_table[testSystem_parsing.seppa[0]]) == 0
assert len(data_table[testSystem_parsing.seppa[1]]) == 1
assert len(data_table[testSystem_parsing.seppa[2]]) == 1
assert len(data_table[testSystem_parsing.radec[0]]) == 0
assert len(data_table[testSystem_parsing.radec[1]]) == 1
assert len(data_table[testSystem_parsing.radec[2]]) == 0
assert testSystem_parsing.labels == [
'sma1', 'ecc1', 'inc1', 'aop1', 'pan1', 'tau1', 'sma2',
'ecc2', 'inc2', 'aop2', 'pan2', 'tau2', 'plx', 'mtot'
] | 5,328,711 |
def replace_version(search_dir: Path = _DEFAULT_SEARCH_DIR, *, old: str, new: str):
"""Replaces the current version number with a new version number.
Args:
search_dir: the search directory for modules.
old: the current version number.
new: the new version number.
Raises:
ValueError: if `old` does not match the current version, or if there is not exactly one
version number in the found modules.
"""
version = get_version(search_dir=search_dir)
if version != old:
raise ValueError(f"{old} does not match current version: {version}")
_validate_version(new)
for m in list_modules(search_dir=search_dir, include_parent=True):
version_file = _find_version_file(search_dir / m.root)
content = version_file.read_text("UTF-8")
new_content = content.replace(old, new)
version_file.write_text(new_content) | 5,328,712 |
def coco_to_shapely(inpath_json: Union[Path, str],
categories: List[int] = None) -> Dict:
"""Transforms COCO annotations to shapely geometry format.
Args:
inpath_json: Input filepath coco json file.
categories: Categories will filter to specific categories and images that contain at least one
annotation of that category.
Returns:
Dictionary of image key and shapely Multipolygon.
"""
data = utils.other.load_json(inpath_json)
if categories is not None:
# Get image ids/file names that contain at least one annotation of the selected categories.
image_ids = sorted(list(set([x['image_id'] for x in data['annotations'] if x['category_id'] in categories])))
else:
image_ids = sorted(list(set([x['image_id'] for x in data['annotations']])))
file_names = [x['file_name'] for x in data['images'] if x['id'] in image_ids]
# Extract selected annotations per image.
extracted_geometries = {}
for image_id, file_name in zip(image_ids, file_names):
annotations = [x for x in data['annotations'] if x['image_id'] == image_id]
if categories is not None:
annotations = [x for x in annotations if x['category_id'] in categories]
segments = [segment['segmentation'][0] for segment in annotations] # format [x,y,x1,y1,...]
# Create shapely Multipolygons from COCO format polygons.
mp = MultiPolygon([Polygon(np.array(segment).reshape((int(len(segment) / 2), 2))) for segment in segments])
extracted_geometries[str(file_name)] = mp
return extracted_geometries | 5,328,713 |
def test_ssl_configuration():
"""
Given:
- Kafka initialization parameters
When:
- Initializing KafkaCommunicator object
Then:
- Assert initialization is as expected.
"""
kafka = KafkaCommunicator(brokers='brokers',
ca_cert='ca_cert',
client_cert='client_cert',
client_cert_key='client_cert_key',
ssl_password='ssl_password',
offset='earliest',
trust_any_cert=False)
expected_consumer_conf = {
'auto.offset.reset': 'earliest',
'bootstrap.servers': 'brokers',
'enable.auto.commit': False,
'group.id': 'xsoar_group',
'security.protocol': 'ssl',
'session.timeout.ms': 10000,
'ssl.ca.location': os.path.abspath(kafka.ca_path),
'ssl.certificate.location': os.path.abspath(kafka.client_cert_path),
'ssl.key.location': os.path.abspath(kafka.client_key_path),
'ssl.key.password': 'ssl_password'
}
expected_producer_conf = {
'bootstrap.servers': 'brokers',
'security.protocol': 'ssl',
'ssl.ca.location': os.path.abspath(kafka.ca_path),
'ssl.certificate.location': os.path.abspath(kafka.client_cert_path),
'ssl.key.location': os.path.abspath(kafka.client_key_path),
'ssl.key.password': 'ssl_password'
}
assert kafka.conf_consumer == expected_consumer_conf
assert kafka.conf_producer == expected_producer_conf
with open(kafka.ca_path, 'r') as f:
assert f.read() == 'ca_cert'
with open(kafka.client_cert_path, 'r') as f:
assert f.read() == 'client_cert'
with open(kafka.client_key_path, 'r') as f:
assert f.read() == 'client_cert_key'
os.remove(kafka.ca_path)
os.remove(kafka.client_cert_path)
os.remove(kafka.client_key_path) | 5,328,714 |
def create_activity_specific_breathing_rate_df(
person_breathing_in,
time,
event,
breathing_rate_key,
rounding=5
):
"""
Generate breathing rates taking into account age and activity intensity.
Parameters:
person_breathing_in: string
E.g. "person 1"
time: string
E.g. "2022-01"
event: string
E.g. "work", "party"
Returns: tuple(pd.DataFrame)
"""
age_key = f'age_({person_breathing_in})'
person_time_event_index = index_name(
time,
event,
person_breathing_in
)
activity_key = f'activity_{person_time_event_index}'
# below is in cubic meters per minute
keys = {
'Sleep or Nap': {
'31 to <41':
np.arange(0.0046, 0.0066, 0.0001).round(rounding),
},
'Sedentary/Passive': {
'31 to <41':
np.arange(0.0043, 0.0066, 0.0001).round(rounding),
},
'Light Intensity': {
'31 to <41':
np.arange(0.012, 0.016, 0.0001).round(rounding),
},
'Moderate Intensity': {
'31 to <41':
np.arange(0.026, 0.038, 0.0001).round(rounding),
},
'High Intensity': {
'31 to <41':
np.arange(0.049, 0.072, 0.0001).round(rounding),
}
}
collection = []
for activity, ages in keys.items():
for age, rng in ages.items():
df = pd.DataFrame(
{
breathing_rate_key: rng,
activity_key: [
activity for _ in range(len(rng))
],
age_key: [
age for _ in range(len(rng))
],
'value': 1
}
)
collection.append(df)
cpt_df = pd.concat(collection)
# So that the result is in cubic meters per hour
cpt_df[breathing_rate_key] = cpt_df[breathing_rate_key] * 60
return cpt_df | 5,328,715 |
def find_valid_imported_name(name):
"""return a name preceding an import op, or False if there isn't one"""
return name.endswith(MARKER) and remove_import_op(name) | 5,328,716 |
def parseText(text1, nlp):
"""Run the Spacy parser on the input text that is converted to unicode."""
doc = nlp(text1)
return doc | 5,328,717 |
def read_rc(rcpath):
"""Retrieve color values from the rc file.
Arguments:
rcpath (str): path to the rc file.
Returns:
3-tuple of integers representing R,G,B.
"""
if not os.path.exists(rcpath):
return None
with open(rcpath) as rc:
colors = [int(ln) for ln in rc.readlines() if ln.strip()[0] in '1234567890']
if len(colors) != 3:
return None
return colors | 5,328,718 |
def load_gij_coeff_matrix(friction_coeffs_root, translation, order_g12, eps=1e-8):
"""
Load Fourier expansion coefficients of friction coefficients gii and gij.
Represent result as a matrix (numpy.array)
:param translation: means relative position of the second oscillator
"""
# Determine if translation vector is in lower half-plane - then use coeffs from upper part of the plane
# but swap cilia indices
if translation[1] < 0 or (abs(translation[1]) < eps and translation[0] < 0):
in_lower_halfplane = True
translation = - translation
else:
in_lower_halfplane = False
# Replace very small coordinates with zero - to correctly compile the folder name
for i, ti in enumerate(translation):
if abs(ti) < eps:
translation[i] = 0
# Get file name and load g_ij as a function
translation3D = (*translation, 0.)
translation_folder = get_translation_rotation_folder(translation3D, (0, 0))
if in_lower_halfplane: # fix symmetry
filename = os.path.join(friction_coeffs_root, translation_folder, 'g21_ft.dat') # should be the same as g12
order = order_g12[::-1] # reverse order
else:
filename = os.path.join(friction_coeffs_root, translation_folder, 'g12_ft.dat')
order = order_g12
# Load friction as a function (Fourier sum); swap order of input when needed
df = load_coeffs_from_file(filename, order_max=order, truncate_triangular=False)
coeffs = np.array(df['coeff'])
coeff_ids = [(m, n) for (m, n) in zip(df['n1'], df['n2'])]
order = max(order_g12) # determine matrix size
coeff_mat = np.zeros([2 * order + 1, 2 * order + 1])
for (i, j), coeff in zip(coeff_ids, coeffs):
imat = i + order
jmat = j + order
coeff_mat[imat, jmat] = coeff
# If in lower half-plane, we used data from the upper half-plane;
# swap arguments (transpose the fourier coeffs matrix)
if in_lower_halfplane:
coeff_mat = coeff_mat.T
return coeff_mat | 5,328,719 |
def view_pebbles_home(request):
"""Serve up the workspace, the current home page.
Include global js settings"""
app_config = AppConfiguration.get_config()
if app_config is None:
return HttpResponseRedirect(reverse('view_no_domain_config_error'))
# Is this D3M Mode? If so, make sure:
# (1) there is D3M config information
# (2) user is logged in
#
if app_config.is_d3m_domain():
# (1) Is there a valid D3M config?
d3m_config = get_latest_d3m_config()
if not d3m_config:
return HttpResponseRedirect(\
reverse('view_d3m_config_error'))
# (2) Is the user authenticated?
if not request.user.is_authenticated():
return HttpResponseRedirect(\
reverse('login'))
session_key = get_session_key(request)
dinfo = dict(title='TwoRavens',
session_key=session_key,
app_config=app_config.convert_to_dict())
return render(request,
'index.html',
dinfo) | 5,328,720 |
def save_to_pickle(value, path, file_name):
"""saves a value into a pickle file"""
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path, file_name), 'wb') as handle:
pickle.dump(value, handle, protocol=pickle.HIGHEST_PROTOCOL) | 5,328,721 |
def multi_pretty(request):
""" 批量添加(Excel文件)"""
from openpyxl import load_workbook
if request.method == "GET":
return render(request, 'multi_pretty.html')
file_object = request.FILES.get("exc")
wb = load_workbook(file_object)
sheet = wb.worksheets[0]
for row in sheet.iter_rows(min_row=2):
mobile = row[0].value
price = row[1].value
level = row[2].value
status = row[3].value
exists = models.PrettyNum.objects.filter(mobile=mobile).exists()
if not exists:
models.PrettyNum.objects.create(mobile=mobile, price=price, level=level, status=status)
return redirect('/pretty/list/') | 5,328,722 |
def is_port_open(host, port, timeout=5):
"""
verifies if a port is open in a remote host
:param host: IP of the remote host
:type host: str
:param port: port to check
:type port: int
:param timeout: timeout max to check
:type timeout: int
:return: True if the port is open
:rtype: bool
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((host, port))
return result is 0 | 5,328,723 |
def check_transport_reaction_gpr_presence(model):
"""Return the list of transport reactions that have no associated gpr."""
return [
rxn
for rxn in helpers.find_transport_reactions(model)
if not rxn.gene_reaction_rule
] | 5,328,724 |
def get_RSA_modulus(b, num):
"""
Generates a list of RSA modulus' of bit length b
such that each modulus, n = pq, where p and q are
both primes.
:param b: bit length of the modulus
:param num: number of modulus' you want
:returns: a list of RSA modulus'
"""
p_lst = []
# generate primes of length b/2 because when we multiply 2 together, the length
# will be b-bits long
for _ in range(0, num + 1):
p_lst.append(get_prime(b // 2))
n_list = []
for i in range(0, num):
n_list.append(p_lst[i] * p_lst[i + 1])
return n_list | 5,328,725 |
def generate_single_color_images(width, height, destination_dir):
""" Generates and saves to disk about 1000 single-color rectangle-shaped images """
light_values = range(69, 231, 3)
medium_values = range(46, 154, 2)
dark_values = range(23, 78, 1)
index = 1
# Generate light coloured images
for value in light_values:
generate_image(width, height, 69, 230, value, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, 69, value, 230, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, 230, 69, value, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, 230, value, 69, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, value, 69, 230, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, value, 230, 69, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
# Generate medium coloured images
for value in medium_values:
generate_image(width, height, 46, 153, value, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, 46, value, 153, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, 153, 46, value, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, 153, value, 46, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, value, 46, 153, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, value, 153, 46, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
# Generate dark coloured images
for value in dark_values:
generate_image(width, height, 23, 77, value, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, 23, value, 77, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, 77, 23, value, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, 77, value, 23, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, value, 23, 77, 'color_' + str(index) + '.jpg', destination_dir)
index += 1
generate_image(width, height, value, 77, 23, 'color_' + str(index) + '.jpg', destination_dir)
index += 1 | 5,328,726 |
def reset_data():
"""Reset the datastore to a known state, populated with test data."""
setup.reset_datastore()
db.put([
Authorization.create(
'haiti', 'test_key', domain_write_permission='test.google.com'),
Authorization.create(
'haiti', 'domain_test_key',
domain_write_permission='mytestdomain.com'),
Authorization.create(
'haiti', 'reviewed_test_key',
domain_write_permission='test.google.com',
mark_notes_reviewed=True),
Authorization.create(
'haiti', 'not_allow_believed_dead_test_key',
domain_write_permission='test.google.com',
believed_dead_permission=False),
Authorization.create(
'haiti', 'allow_believed_dead_test_key',
domain_write_permission='test.google.com',
believed_dead_permission=True),
Authorization.create(
'haiti', 'other_key', domain_write_permission='other.google.com'),
Authorization.create(
'haiti', 'read_key', read_permission=True),
Authorization.create(
'haiti', 'full_read_key', full_read_permission=True),
Authorization.create(
'haiti', 'search_key', search_permission=True),
Authorization.create(
'haiti', 'subscribe_key', subscribe_permission=True),
Authorization.create(
'*', 'global_test_key',
domain_write_permission='globaltestdomain.com'),
Authorization.create(
'*', 'global_search_key', search_permission=True),
]) | 5,328,727 |
def use_processors(n_processes):
"""
This routine finds the number of available processors in your machine
"""
from multiprocessing import cpu_count
available_processors = cpu_count()
n_processes = n_processes % (available_processors+1)
if n_processes == 0:
n_processes = 1
print('WARNING: Found n_processes = 0. Falling back to default single-threaded execution (n_processes = 1).')
return n_processes | 5,328,728 |
def test_gauge_absolute_negative_rate_tcp(mock_random):
"""TCPStatsClient.gauge works with absolute negative value and rate."""
cl = _tcp_client()
_test_gauge_absolute_negative_rate(cl, 'tcp', mock_random) | 5,328,729 |
def climate_radio_thermostat_ct101_multiple_temp_units_state_fixture():
"""Load the climate multiple temp units node state fixture data."""
return json.loads(
load_fixture(
"zwave_js/climate_radio_thermostat_ct101_multiple_temp_units_state.json"
)
) | 5,328,730 |
def get_TR_and_ntpts(dtseries_path, wb_command_path):
"""
:param dtseries_path: String, the full path to a .dtseries.nii file
:param wb_command_path: String, the full path to the wb_command executable
:return: Tuple of 2 numbers, the number of timepoints and repetition time
"""
if not os.path.exists(dtseries_path):
sys.exit('Error: {} does not exist'.format(dtseries_path))
else:
ntpts = wb_command_get_info(wb_command_path, dtseries_path,
'number-of-maps')
rep_time = wb_command_get_info(wb_command_path, dtseries_path,
'step-interval')
return rep_time, ntpts | 5,328,731 |
def GetPseudoAAC1(ProteinSequence, lamda=30, weight=0.05, AAP=[]):
"""
#######################################################################################
Computing the first 20 of type I pseudo-amino acid compostion descriptors based on the given
properties.
########################################################################################
"""
rightpart = 0.0
for i in range(lamda):
rightpart = rightpart + GetSequenceOrderCorrelationFactor(
ProteinSequence, i + 1, AAP
)
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = 1 + weight * rightpart
for index, i in enumerate(AALetter):
result["PAAC" + str(index + 1)] = round(AAC[i] / temp, 3)
return result | 5,328,732 |
def reversedict(dct):
""" Reverse the {key:val} in dct to
{val:key}
"""
# print labelmap
newmap = {}
for (key, val) in dct.iteritems():
newmap[val] = key
return newmap | 5,328,733 |
def createNewEnsemble(templateVars, project, targetPath, mono):
"""
If "localEnv" is in templateVars, clone that ensemble;
otherwise create one from a template with templateVars
"""
# targetPath is relative to the project root
from unfurl import yamlmanifest
assert not os.path.isabs(targetPath)
if not targetPath or targetPath == ".":
destDir, manifestName = DefaultNames.EnsembleDirectory, DefaultNames.Ensemble
elif targetPath.endswith(".yaml") or targetPath.endswith(".yml"):
destDir, manifestName = os.path.split(targetPath)
else:
destDir = targetPath
manifestName = DefaultNames.Ensemble
# choose a destDir that doesn't conflict with an existing folder
# (i.e. if default ensemble already exists)
destDir = project.getUniquePath(destDir)
# destDir is now absolute
targetPath = os.path.normpath(os.path.join(destDir, manifestName))
if "localEnv" not in templateVars:
# we found a template file to clone
assert project
sourceDir = os.path.normpath(
os.path.join(project.projectRoot, templateVars["sourceDir"])
)
specRepo, relPath, revision, bare = project.findPathInRepos(sourceDir)
if not specRepo:
raise UnfurlError(
'"%s" is not in a git repository. Cloning from plain file directories not yet supported'
% os.path.abspath(sourceDir)
)
manifestPath = writeEnsembleManifest(
os.path.join(project.projectRoot, destDir),
manifestName,
specRepo,
sourceDir,
templateVars,
)
localEnv = LocalEnv(manifestPath, project=project)
manifest = yamlmanifest.ReadOnlyManifest(localEnv=localEnv)
elif templateVars:
# didn't find a template file
# look for an ensemble at the given path or use the source project's default
manifest = yamlmanifest.clone(templateVars["localEnv"], targetPath)
else:
raise UnfurlError("can't find anything to clone")
_createEnsembleRepo(manifest, mono and project.projectRepo)
return destDir, manifest
# XXX need to add manifest to unfurl.yaml | 5,328,734 |
def pi_tune():
"""Attempts to automatically tune the PI loop."""
print("TO BE IMPLEMENTED") # TODO
return | 5,328,735 |
def mincut_graph_tool(edges: Iterable[Sequence[np.uint64]],
affs: Sequence[np.uint64],
sources: Sequence[np.uint64],
sinks: Sequence[np.uint64],
logger: Optional[logging.Logger] = None) -> np.ndarray:
""" Computes the min cut on a local graph
:param edges: n x 2 array of uint64s
:param affs: float array of length n
:param sources: uint64
:param sinks: uint64
:return: m x 2 array of uint64s
edges that should be removed
"""
time_start = time.time()
original_edges = edges
# Stitch supervoxels across chunk boundaries and represent those that are
# connected with a cross chunk edge with a single id. This may cause id
# changes among sinks and sources that need to be taken care of.
edges, affs, mapping, remapping = merge_cross_chunk_edges(edges.copy(),
affs.copy())
dt = time.time() - time_start
if logger is not None:
logger.debug("Cross edge merging: %.2fms" % (dt * 1000))
time_start = time.time()
mapping_vec = np.vectorize(lambda a: mapping[a] if a in mapping else a)
if len(edges) == 0:
return []
if len(mapping) > 0:
assert np.unique(list(mapping.keys()), return_counts=True)[1].max() == 1
remapped_sinks = mapping_vec(sinks)
remapped_sources = mapping_vec(sources)
sinks = remapped_sinks
sources = remapped_sources
# Assemble edges: Edges after remapping combined with edges between sinks
# and sources
sink_edges = list(itertools.product(sinks, sinks))
source_edges = list(itertools.product(sources, sources))
comb_edges = np.concatenate([edges, sink_edges, source_edges])
comb_affs = np.concatenate([affs, [float_max, ] *
(len(sink_edges) + len(source_edges))])
# To make things easier for everyone involved, we map the ids to
# [0, ..., len(unique_ids) - 1]
# Generate weighted graph with graph_tool
weighted_graph, cap, gt_edges, unique_ids = \
flatgraph_utils.build_gt_graph(comb_edges, comb_affs,
make_directed=True)
sink_graph_ids = np.where(np.in1d(unique_ids, sinks))[0]
source_graph_ids = np.where(np.in1d(unique_ids, sources))[0]
if logger is not None:
logger.debug(f"{sinks}, {sink_graph_ids}")
logger.debug(f"{sources}, {source_graph_ids}")
dt = time.time() - time_start
if logger is not None:
logger.debug("Graph creation: %.2fms" % (dt * 1000))
time_start = time.time()
# # Get rid of connected components that are not involved in the local
# # mincut
# cc_prop, ns = graph_tool.topology.label_components(weighted_graph)
#
# if len(ns) > 1:
# cc_labels = cc_prop.get_array()
#
# for i_cc in range(len(ns)):
# cc_list = np.where(cc_labels == i_cc)[0]
#
# # If connected component contains no sources and/or no sinks,
# # remove its nodes from the mincut computation
# if not np.any(np.in1d(source_graph_ids, cc_list)) or \
# not np.any(np.in1d(sink_graph_ids, cc_list)):
# weighted_graph.delete_vertices(cc) # wrong
# Compute mincut
src, tgt = weighted_graph.vertex(source_graph_ids[0]), \
weighted_graph.vertex(sink_graph_ids[0])
res = graph_tool.flow.boykov_kolmogorov_max_flow(weighted_graph,
src, tgt, cap)
part = graph_tool.flow.min_st_cut(weighted_graph, src, cap, res)
labeled_edges = part.a[gt_edges]
cut_edge_set = gt_edges[labeled_edges[:, 0] != labeled_edges[:, 1]]
dt = time.time() - time_start
if logger is not None:
logger.debug("Mincut comp: %.2fms" % (dt * 1000))
time_start = time.time()
if len(cut_edge_set) == 0:
return []
time_start = time.time()
# Make sure we did not do something wrong: Check if sinks and sources are
# among each other and not in different sets
for i_cc in np.unique(part.a):
# Make sure to read real ids and not graph ids
cc_list = unique_ids[np.array(np.where(part.a == i_cc)[0],
dtype=np.int)]
# if logger is not None:
# logger.debug("CC size = %d" % len(cc_list))
if np.any(np.in1d(sources, cc_list)):
assert np.all(np.in1d(sources, cc_list))
assert ~np.any(np.in1d(sinks, cc_list))
if np.any(np.in1d(sinks, cc_list)):
assert np.all(np.in1d(sinks, cc_list))
assert ~np.any(np.in1d(sources, cc_list))
dt = time.time() - time_start
if logger is not None:
logger.debug("Verifying local graph: %.2fms" % (dt * 1000))
# Extract original ids
# This has potential to be optimized
remapped_cutset = []
for s, t in flatgraph_utils.remap_ids_from_graph(cut_edge_set, unique_ids):
if s in remapping:
s = remapping[s]
else:
s = [s]
if t in remapping:
t = remapping[t]
else:
t = [t]
remapped_cutset.extend(list(itertools.product(s, t)))
remapped_cutset.extend(list(itertools.product(t, s)))
remapped_cutset = np.array(remapped_cutset, dtype=np.uint64)
remapped_cutset_flattened_view = remapped_cutset.view(dtype='u8,u8')
edges_flattened_view = original_edges.view(dtype='u8,u8')
cutset_mask = np.in1d(remapped_cutset_flattened_view, edges_flattened_view)
return remapped_cutset[cutset_mask] | 5,328,736 |
def mock_graph_literal():
"""Creates a mock tree
Metasyntactic variables: https://www.ietf.org/rfc/rfc3092.txt
"""
graph_dict = [
{
"frame": {"name": "foo", "type": "function"},
"metrics": {"time (inc)": 130.0, "time": 0.0},
"children": [
{
"frame": {"name": "bar"},
"metrics": {"time (inc)": 20.0, "time": 5.0},
"children": [
{
"frame": {"name": "baz", "type": "function"},
"metrics": {"time (inc)": 5.0, "time": 5.0},
},
{
"frame": {"name": "grault"},
"metrics": {"time (inc)": 10.0, "time": 10.0},
},
],
},
{
"frame": {"name": "qux", "type": "function"},
"metrics": {"time (inc)": 60.0, "time": 0.0},
"children": [
{
"frame": {"name": "quux"},
"metrics": {"time (inc)": 60.0, "time": 5.0},
"children": [
{
"frame": {"name": "corge", "type": "function"},
"metrics": {"time (inc)": 55.0, "time": 10.0},
"children": [
{
"frame": {"name": "bar"},
"metrics": {
"time (inc)": 20.0,
"time": 5.0,
},
"children": [
{
"frame": {
"name": "baz",
"type": "function",
},
"metrics": {
"time (inc)": 5.0,
"time": 5.0,
},
},
{
"frame": {"name": "grault"},
"metrics": {
"time (inc)": 10.0,
"time": 10.0,
},
},
],
},
{
"frame": {"name": "grault"},
"metrics": {
"time (inc)": 10.0,
"time": 10.0,
},
},
{
"frame": {
"name": "garply",
"type": "function",
},
"metrics": {
"time (inc)": 15.0,
"time": 15.0,
},
},
],
}
],
}
],
},
{
"frame": {"name": "waldo", "type": "function"},
"metrics": {"time (inc)": 50.0, "time": 0.0},
"children": [
{
"frame": {"name": "fred", "type": "function"},
"metrics": {"time (inc)": 35.0, "time": 5.0},
"children": [
{
"frame": {"name": "plugh", "type": "function"},
"metrics": {"time (inc)": 5.0, "time": 5.0},
},
{
"frame": {"name": "xyzzy", "type": "function"},
"metrics": {"time (inc)": 25.0, "time": 5.0},
"children": [
{
"frame": {
"name": "thud",
"type": "function",
},
"metrics": {
"time (inc)": 25.0,
"time": 5.0,
},
"children": [
{
"frame": {
"name": "baz",
"type": "function",
},
"metrics": {
"time (inc)": 5.0,
"time": 5.0,
},
},
{
"frame": {
"name": "garply",
"type": "function",
},
"metrics": {
"time (inc)": 15.0,
"time": 15.0,
},
},
],
}
],
},
],
},
{
"frame": {"name": "garply", "type": "function"},
"metrics": {"time (inc)": 15.0, "time": 15.0},
},
],
},
],
},
{
"frame": {"name": "waldo", "type": "function"},
"metrics": {"time (inc)": 30.0, "time": 10.0},
"children": [
{
"frame": {"name": "bar"},
"metrics": {"time (inc)": 20.0, "time": 5.0},
"children": [
{
"frame": {"name": "baz", "type": "function"},
"metrics": {"time (inc)": 5.0, "time": 5.0},
},
{
"frame": {"name": "grault"},
"metrics": {"time (inc)": 10.0, "time": 10.0},
},
],
}
],
},
]
return graph_dict | 5,328,737 |
def parse_footer(fn):
"""
Parse the downloaded FOOTER file, which contains a header for each program
and (usually) a description line.
Yields either a nested 2-tuple of (header-program-name,
(description-program-name, description-text)) if a description can be
found, or a 1-tuple of (header-program-name,) if no description found.
"""
block = []
f = open(fn)
while True:
line = f.readline()
if not line:
break
m1 = re_header.match(line)
if m1:
if block:
yield block
block = []
name = m1.groups()[0]
block.append(name)
continue
m = re_summary.match(line)
if m:
if not block:
continue
block.append(m.groups())
yield block
block = []
if block:
yield block | 5,328,738 |
def parse_ADD_ins(tokens):
"""Attempts to parse an ADD instruction."""
failure = None
assert len(tokens) > 0
if tokens[0].text.upper() != 'ADD':
return failure
statement = Obj()
statement.type = 'STATEMENT'
statement.statement_type = 'INSTRUCTION'
statement.instruction = 'ADD'
operands = parse_operands_DR_SR1_SR2(tokens[1:])
if operands:
statement.operands = operands
return statement
operands = parse_operands_DR_SR1_imm5(tokens[1:])
if operands:
statement.operands = operands
return statement
return failure | 5,328,739 |
def commonIntegerPredicate(field):
""""return any integers"""
return tuple(re.findall("\d+", field)) | 5,328,740 |
def spatialft(image, cosine_window=True, rmdc=True):
"""Take the fourier transform of an image (or flow field).
shift the quadrants around so that low spatial frequencies are in
the center of the 2D fourier transformed image"""
#raised cosyne window on image to avoid border artifacts
(dim1,dim2) = np.shape(image)
if(cosine_window):
cosfilter = np.tile(np.hanning(dim2), (dim1,1))*(np.tile(np.hanning(dim1), (dim2,1)).T)
image = image * cosfilter
# remove DC component
if(rmdc):
image = image - np.mean(image)
ps = np.abs(np.fft.fftshift(np.fft.fft2(image)))**2
fqs = [np.fft.fftshift(np.fft.fftfreq(np.shape(image)[0])),
np.fft.fftshift(np.fft.fftfreq(np.shape(image)[1]))]
return(ps, fqs) | 5,328,741 |
def _get_instrument_parameters(ufile, filemetadata):
""" Return a dictionary containing instrument parameters. """
# pulse width
pulse_width = filemetadata('pulse_width')
pulse_width['data'] = ufile.get_pulse_widths() / _LIGHT_SPEED # m->sec
# assume that the parameters in the first ray represent the beam widths,
# bandwidth and frequency in the entire volume
first_ray = ufile.rays[0]
field_header = first_ray.field_headers[0]
beam_width_h = field_header['beam_width_h'] / 64.
beam_width_v = field_header['beam_width_v'] / 64.
bandwidth = field_header['bandwidth'] / 16. * 1.e6
wavelength_cm = field_header['wavelength_cm'] / 64.
wavelength_hz = _LIGHT_SPEED / (wavelength_cm / 100.)
# radar_beam_width_h
radar_beam_width_h = filemetadata('radar_beam_width_h')
radar_beam_width_h['data'] = np.array([beam_width_h], dtype='float32')
# radar_beam_width_v
radar_beam_width_v = filemetadata('radar_beam_width_w')
radar_beam_width_v['data'] = np.array([beam_width_v], dtype='float32')
# radar_receiver_bandwidth
radar_receiver_bandwidth = filemetadata('radar_receiver_bandwidth')
radar_receiver_bandwidth['data'] = np.array([bandwidth], dtype='float32')
# polarization_mode
polarization_mode = filemetadata('polarization_mode')
polarization_mode['data'] = ufile.get_sweep_polarizations()
# frequency
frequency = filemetadata('frequency')
frequency['data'] = np.array([wavelength_hz], dtype='float32')
# prt
prt = filemetadata('prt')
prt['data'] = ufile.get_prts() / 1e6 # us->sec
instrument_parameters = {
'pulse_width': pulse_width,
'radar_beam_width_h': radar_beam_width_h,
'radar_beam_width_v': radar_beam_width_v,
'radar_receiver_bandwidth': radar_receiver_bandwidth,
'polarization_mode': polarization_mode,
'frequency': frequency,
'prt': prt,
}
# nyquist velocity if defined
nyquist_velocity = filemetadata('nyquist_velocity')
nyquist_velocity['data'] = ufile.get_nyquists()
if nyquist_velocity['data'] is not None:
instrument_parameters['nyquist_velocity'] = nyquist_velocity
return instrument_parameters | 5,328,742 |
def get_deployment_mode(path):
"""
Work out the 'deployment mode' from the global attributes in a NetCDF file
:param path: path to dataset
:return: Mode as a value from `DeploymentModes` enumeration
:raises ValueError: if mode cannot be determined or is invalid
"""
fname = os.path.basename(path)
d = Dataset(path)
try:
mode_str = d.deployment_mode
except AttributeError:
raise ValueError("Attribute 'deployment_mode' not found in '{}'".format(fname))
for mode in DeploymentModes:
if mode.value.lower() == mode_str:
return mode
raise ValueError(
"Unrecognised deployment mode '{}' in '{}'".format(mode_str, fname)
) | 5,328,743 |
def split_reaction(reac):
""" split a CHEMKIN reaction into reactants and products
:param reac: reaction string
:type reac: str
:returns: reactants and products
:rtype: (tuple of strings, tuple of strings)
"""
em_pattern = one_of_these([PAREN_PLUS_EM + STRING_END,
PLUS_EM + STRING_END])
reactant_str, product_str = re.split(PADDED_ARROW, reac)
reactant_str = re.sub(em_pattern, '', reactant_str)
product_str = re.sub(em_pattern, '', product_str)
en_reactants = tuple(map(_expand_en_reagents,
map(strip_spaces,
re.split(PADDED_PLUS, reactant_str))))
en_products = tuple(map(_expand_en_reagents,
map(strip_spaces,
re.split(PADDED_PLUS, product_str))))
reactants = tuple(chain(*en_reactants))
products = tuple(chain(*en_products))
return reactants, products | 5,328,744 |
def bond_stereo_parities(sgr):
""" bond parities, as a dictionary
"""
return mdict.by_key_by_position(bonds(sgr), bond_keys(sgr),
BND_STE_PAR_POS) | 5,328,745 |
def archive_fs(locations):
"""Fixture to check the BagIt file generation."""
archive_path = locations['archive'].uri
fs = opener.opendir(archive_path, writeable=False, create_dir=True)
yield fs
for d in fs.listdir():
fs.removedir(d, force=True) | 5,328,746 |
def encode(text):
"""
Encode to base64
"""
return [int(x) for x in text.encode('utf8')] | 5,328,747 |
def _get_ip():
"""
:return: This computer's default AF_INET IP address as a string
"""
# find ip using answer with 75 votes
# https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
ip = ''
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# apparently any IP will work
sock.connect(('192.168.1.1', 1))
ip = sock.getsockname()[0]
except Exception as e:
print(e)
print('Error: Couldn\'t get IP! Make sure you are connected to a network.')
finally:
sock.close()
return str(ip) | 5,328,748 |
def _get_value(cav, _type):
"""Get value of custom attribute item"""
if _type == 'Map:Person':
return cav["attribute_object"]["id"] \
if cav.get("attribute_object") else None
if _type == 'Checkbox':
return cav["attribute_value"] == '1'
return cav["attribute_value"] | 5,328,749 |
def test_post_order_with_empty_input():
"""To test post_order method with an empty tree."""
tree_new = BST()
with pytest.raises(TypeError) as err:
tree_new.post_order_traversal(tree_new.root)
assert str(err.value) == (f'There is no node to traverse.') | 5,328,750 |
def get_inference(model, vectorizer, topics, text, threshold):
"""
runs inference on text input
paramaters
----------
model: loaded model to use to transform the input
vectorizer: instance of the vectorizer e.g TfidfVectorizer(ngram_range=(2, 3))
topics: the list of topics in the model
text: input string to be classified
threshold: float of threshold to use to output a topic
returns
-------
tuple => top score
"""
v_text = vectorizer.transform([text])
score = model.transform(v_text)
labels = set()
for i in range(len(score[0])):
if score[0][i] > threshold:
labels.add(topics[i])
if not labels:
return 'None', -1, set()
return topics[np.argmax(score)] | 5,328,751 |
def get_message_id(update: dict, status_update: str) -> int:
"""функция для получения номера сообщения.
Описание - функция получает номер сообщения от пользователя
Parameters
----------
update : dict
новое сообщение от бота
status_update : str
состояние сообщения, изменено или новое
Returns
-------
message_status : str
статус сообщения, если новое, то message, если отредактированое
edited_message
"""
return update[status_update]['message_id'] | 5,328,752 |
def gmm_clustering_predict(model, X):
"""
X is a (N, 1) array
"""
X = np.clip(X, -2.5, 2.5)
return model.predict(X) | 5,328,753 |
def http_request(url, method='GET', timeout=2, **kwargs):
"""Generic task to make an http request."""
headers = kwargs.get('headers', {})
params = kwargs.get('params', {})
data = kwargs.get('data', {})
request_kwargs = {}
if headers:
request_kwargs['headers'] = headers
if params:
request_kwargs['params'] = params
if method in ['post', 'put']:
request_kwargs['data'] = data
s = requests.Session()
if method not in METHOD_CHOICES:
raise ValueError(f'{method} not supported!')
method = method.lower()
request = getattr(s, method)
try:
response = request(url, timeout=timeout, **request_kwargs)
# response.raise_for_status()
except requests.exceptions.RequestException:
logger.exception('%s request to url %s failed!', method, url)
return None
else:
logger.info('%s request to url %s successfull!', method, url)
return {
'status_code': response.status_code,
'headers': dict(response.headers),
'text': response.text
} | 5,328,754 |
def start_grpc_server(server: Server):
"""
Start the given gRPC `server`.
This does not block so, if you don't have a mainloop, this will simply
finish immediatly with the thread/process.
"""
server.start() | 5,328,755 |
def unify_nest(args: Type[MultiNode], kwargs: Type[MultiNode], node_str, mode, axis=0, max_depth=1):
"""
Unify the input nested arguments, which consist of sub-arrays spread across arbitrary nodes, to unified arrays
on the single target node.
:param args: The nested positional arguments to unify.
:type args: MultiNode
:param kwargs: The nested keyword arguments to unify.
:type kwargs: MultiNode
:param node_str: The node to unify the nested arguments to.
:type node_str: str
:param mode: The mode by which to unify, must be one of [ concat | mean | sum ]
:type mode: str
:param axis: The axis along which to concattenate the sub-arrays. Default is 0.
:type axis: int, optional
:param max_depth: The maximum nested depth to reach. Default is 1. Increase this if the nest is deeper.
:type max_depth: int, optional
:return: nested arguments unified to the target node
"""
args = args._data if isinstance(args, MultiNodeIter) else args
kwargs = kwargs._data if isinstance(kwargs, MultiNodeIter) else kwargs
args_uni = ivy.nested_map(args, lambda x: unify(x, node_str, mode, axis), max_depth=max_depth)
kwargs_uni = ivy.nested_map(kwargs, lambda x: unify(x, node_str, mode, axis), max_depth=max_depth)
return args_uni, kwargs_uni | 5,328,756 |
def ast_for_inv_exp(inv: 'Ast', ctx: 'ReferenceDict'):
"""
invExp ::= atomExpr (atomExpr | invTrailer)*;
"""
assert inv.name is UNameEnum.invExp
atom_expr, *inv_trailers = inv
res = ast_for_atom_expr(atom_expr, ctx)
if len(inv_trailers) is 1:
[each] = inv_trailers
if each.name is UNameEnum.atomExpr:
return res(ast_for_atom_expr(each, ctx))
return ast_for_atom_expr(each[0], ctx)(res)
stack = []
for each in inv_trailers:
if each.name is UNameEnum.atomExpr:
stack.append(ast_for_atom_expr(each, ctx))
continue
if stack:
res = res(*stack)
stack.clear()
res = (ast_for_atom_expr(each[0], ctx))(res)
if stack:
res = res(*stack)
return res | 5,328,757 |
def json_writer(docs, json_file_path: str, source: str = None):
"""Converts a collection of Spacy Doc objects to a JSON format,
such that it can be used to train the Spacy NER model. (for Spacy v2)
Source must be an aggregated source (defined in user_data["agg_spans"]), which
will correspond to the target values in the JSON file.
"""
import spacy
if int(spacy.__version__[0]) > 2:
raise RuntimeError("Only supported for Spacy v2")
import spacy.gold # type: ignore
# We start opening up the JSON file
print("Writing JSON file to", json_file_path)
out_fd = open(json_file_path, "wt")
out_fd.write("[{\"id\": 0, \"paragraphs\": [\n")
for i, doc in enumerate(docs):
# We replace the NER labels with the annotation source
if source is not None:
doc = replace_ner_spans(doc, source)
# We dump the JSON content to the file
d = spacy.gold.docs_to_json([doc])
s = json.dumps(d["paragraphs"]).strip("[]")
if i > 0:
s = ",\n" + s
out_fd.write(s)
if i > 0 and i % 1000 == 0:
print("Converted documents:", i)
out_fd.flush()
# And finally close all file descriptors
out_fd.write("]}]\n")
out_fd.flush()
out_fd.close() | 5,328,758 |
def euler42_():
"""Solution for problem 42."""
pass | 5,328,759 |
def create_input_pipeline(files,
batch_size,
n_epochs,
shape,
crop_shape=None,
crop_factor=1.0,
n_threads=2):
"""Creates a pipefile from a list of image files.
Includes batch generator/central crop/resizing options.
The resulting generator will dequeue the images batch_size at a time until
it throws tf.errors.OutOfRangeError when there are no more images left in
the queue.
Parameters
----------
files : list
List of paths to image files.
batch_size : int
Number of image files to load at a time.
n_epochs : int
Number of epochs to run before raising tf.errors.OutOfRangeError
shape : list
[height, width, channels]
crop_shape : list
[height, width] to crop image to.
crop_factor : float
Percentage of image to take starting from center.
n_threads : int, optional
Number of threads to use for batch shuffling
Returns
-------
TYPE
Description
"""
# We first create a "producer" queue. It creates a production line which
# will queue up the file names and allow another queue to deque the file
# names all using a tf queue runner.
# Put simply, this is the entry point of the computational graph.
# It will generate the list of file names.
# We also specify it's capacity beforehand.
producer = tf.train.string_input_producer(
files, capacity=len(files), num_epochs=n_epochs)
# We need something which can open the files and read its contents.
reader = tf.WholeFileReader()
# We pass the filenames to this object which can read the file's contents.
# This will create another queue running which dequeues the previous queue.
keys, vals = reader.read(producer)
# And then have to decode its contents as we know it is a jpeg image
imgs = tf.image.decode_jpeg(
vals, channels=3 if len(shape) > 2 and shape[2] == 3 else 0)
# We have to explicitly define the shape of the tensor.
# This is because the decode_jpeg operation is still a node in the graph
# and doesn't yet know the shape of the image. Future operations however
# need explicit knowledge of the image's shape in order to be created.
imgs.set_shape(shape)
# Next we'll centrally crop the image to the size of 100x100.
# This operation required explicit knowledge of the image's shape.
if shape[0] > shape[1]:
rsz_shape = [
int(shape[0] / shape[1] * crop_shape[0] / crop_factor), int(
crop_shape[1] / crop_factor)
]
else:
rsz_shape = [
int(crop_shape[0] / crop_factor),
int(shape[1] / shape[0] * crop_shape[1] / crop_factor)
]
rszs = tf.image.resize_images(imgs, rsz_shape)
crops = (tf.image.resize_image_with_crop_or_pad(rszs, crop_shape[0],
crop_shape[1])
if crop_shape is not None else imgs)
# Now we'll create a batch generator that will also shuffle our examples.
# We tell it how many it should have in its buffer when it randomly
# permutes the order.
min_after_dequeue = len(files) // 100
# The capacity should be larger than min_after_dequeue, and determines how
# many examples are prefetched. TF docs recommend setting this value to:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
capacity = min_after_dequeue + (n_threads + 1) * batch_size
# Randomize the order and output batches of batch_size.
batch = tf.train.shuffle_batch(
[crops],
enqueue_many=False,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=n_threads)
# alternatively, we could use shuffle_batch_join to use multiple reader
# instances, or set shuffle_batch's n_threads to higher than 1.
return batch | 5,328,760 |
def validate_supervise(key_name: str, sup_conf: Dict, params: Dict) -> None:
"""
Validate the supervise config subblock to be used for ETLBlock supervise operation.
Parameters
----------
key_name : The subkey that maps to this extract subblock.
sup_conf : The extract sub-config dict or config['supervise'].
params : Any additional passed-in params including datetime and target cols.
"""
# TEST 1) Validate root 'key' in params ##################################
assert (
'key' in params.keys()
), "Missing the root key for this ETL block."
assert isinstance(
params['key'], str
), "'key' value in param must be a str type."
assert params['key'] != '', "'key' value in param cannot be blank."
path = f"'{params['key']}'->'{key_name}'->"
# TEST 2) Validate 'alias' sub-config key (optional) #####################
if 'alias' in sup_conf.keys():
assert isinstance(
sup_conf['alias'], str
), f"{path}'alias' value must be a str type."
assert sup_conf['alias'] != '', f"{path}'alias' value cannot be blank."
# TEST 3) Validate 'training_period' sub-config key ######################
assert (
'training_period' in sup_conf.keys()
), f"{path}'training_period' key is not found in the config file."
assert isinstance(
sup_conf['training_period'], str
), f"{path}'training_period' value must be a str type."
# TEST 4) Validate 'forecast_period' sub-config key ######################
assert (
'forecast_period' in sup_conf.keys()
), f"{path}'forecast_period' key is not found in the config file."
assert isinstance(
sup_conf['forecast_period'], str
), f"{path}'forecast_period' value must be a str type."
# TEST 5) Validate 'validation_set' sub-config key (optional) ############
if 'validation_set' in sup_conf.keys():
assert (
'validation_set' in sup_conf.keys()
), f"{path}'validation_set' key is not found in the config file."
assert isinstance(
sup_conf['validation_set'], str
), f"{path}'validation_set' value must be a str type."
# TEST 6) Validate 'test_set' sub-config key #############################
assert (
'test_set' in sup_conf.keys()
), f"{path}'test_set' key is not found in the config file."
assert isinstance(
sup_conf['test_set'], str
), f"{path}'test_set' value must be a str type."
# TEST 7) Validate 'max_gap' sub-config key ##############################
assert (
'max_gap' in sup_conf.keys()
), f"{path}'max_gap' key is not found in the config file."
assert isinstance(
sup_conf['max_gap'], float
), f"{path}'max_gap' value must be a float type."
validate_datetime_target(params) | 5,328,761 |
def confirm_email_page():
"""Returns page for users that have not confirmed their
email address"""
if not g.loggedIn:
return redirect(url_for('general.loginPage'))
next = request.args.get('next')
if general_db.is_activated(g.user):
if next is not '':
return make_auth_token_response(g.user, g.email, next)
return make_auth_token_response(g.user, g.email,
url_for('articles.index'))
if next:
err = 'You must confirm your email to access this endpoint'
flash(err, 'danger')
return render_template('confirm_email.html', next=next, email=g.email)
return render_template('confirm_email.html', email=g.email) | 5,328,762 |
def get_movie_names(url_data):
"""Get all the movies from the webpage"""
soup = BeautifulSoup(url_data, 'html.parser')
data = soup.findAll('ul', attrs={'class' : 'ctlg-holder'}) #Get all the lines from HTML that are a part of ul with class = 'ctlg-holder'
movie_list = []
for div in data:
links = div.findAll('a') #Choose all the lines with links
for a in links:
if a is not None and a is not "#":
movie_list.append(a.get('href', None))
print("Movie Names Obtained")
return movie_list | 5,328,763 |
def message_results():
"""Shows the user their message, with the letters in sorted order."""
message = request.form.get('message')
encrypted_message = sort_letters(message)
return render_template('message_results.html', message=encrypted_message) | 5,328,764 |
def fsum(iterable): # real signature unknown; restored from __doc__
"""
fsum(iterable)
Return an accurate floating point sum of values in the iterable.
Assumes IEEE-754 floating point arithmetic.
"""
pass | 5,328,765 |
def flatten_dict(dicts, keys):
"""
Input is list of dicts. This operation pulls out the key in each dict and combines the values into a new list mapped to the original key. A new dictionary is formed with these key -> list mappings.
"""
return {
key: flatten_n([d[key] for d in dicts])
for key in keys
} | 5,328,766 |
def main(
source_files: List[str], destination_file: Optional[str], fail_on_failures: bool
) -> None:
"""
Convert2JUnit
This tool allows you to convert various reports into the JUnit format.
"""
report = initialize_report(destination_file, source_files)
apply_source_files(report, source_files)
report.write()
if fail_on_failures and report.has_failures():
raise click.ClickException(
"report2junit detected that the report contains failures"
) | 5,328,767 |
def delete_file(file_name):
"""
Delete a file, does nothing if file does not exist.
:param file_name: file to delete
.. versionadded:: 9.3.1
"""
if file_name:
try:
os.remove(file_name)
except (FileNotFoundError, PermissionError):
pass | 5,328,768 |
def render_url(fullpath, notebook=False): # , prefix="files"):
"""Converts a path relative to the notebook (i.e. kernel) to a URL that
can be served by the notebook server, by prepending the notebook
directory"""
if fullpath.startswith('http://'):
url = fullpath
else:
url = (radiopadre.FILE_URL_ROOT if not notebook else radiopadre.NOTEBOOK_URL_ROOT) + fullpath
# print "{} URL is {}".format(fullpath, url)
return url | 5,328,769 |
def register():
"""Register User route."""
email = request.form.get('email')
password = request.form.get('password')
new_user = User.register(email, password)
if new_user:
return jsonify({'message': 'Registration successful.'}), 201
return jsonify({'message': 'Invalid username or password.'}), 400 | 5,328,770 |
def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event['Records'][0]
message = json.loads(
record['Sns']['Message']
)
return run_message(message) | 5,328,771 |
def RAND_pseudo_bytes(*args, **kwargs): # real signature unknown
"""
Generate n pseudo-random bytes.
Return a pair (bytes, is_cryptographic). is_cryptographic is True
if the bytes generated are cryptographically strong.
"""
pass | 5,328,772 |
def release_dp_mean_absolute_deviation(x, bounds, epsilon):
"""Release the dp mean absolute deviation.
Assumes dataset size len(`x`) is public.
Theorem 27: https://arxiv.org/pdf/2001.02285.pdf
"""
lower, upper = bounds
sensitivity = (upper - lower) * 2. / len(x)
x = np.clip(x, *bounds)
mad = (x - x.mean()).abs().mean()
base_lap = binary_search_chain(lambda s: make_base_laplace(s), sensitivity, epsilon)
return base_lap(mad) | 5,328,773 |
def train(hparams, datas=None, scope=None):
"""build the train process"""
# 1. create the model
model_creator = select_model_creator(hparams)
train_model = _mh.create_model(model_creator, hparams, 'train')
# eval_model = _mh.create_model(model_creator, hparams, 'eval')
# infer_model = _mh.create_model(model_creator, hparams, 'infer')
# 2. create the session
sess_conf = tf.ConfigProto(intra_op_parallelism_threads=8, inter_op_parallelism_threads=8)
sess_conf.gpu_options.allow_growth = True
train_sess = tf.Session(config=sess_conf, graph=train_model.graph)
# eval_sess = tf.Session(config=sess_conf, graph=eval_model.graph)
# infer_sess = tf.Session(config=sess_conf, graph=infer_model.graph)
with train_model.graph.as_default():
loaded_train_model, global_step = _mh.create_or_load_model(
train_model.model, hparams.out_dir, train_sess)
# Summary writer
summary_writer = tf.summary.FileWriter(
os.path.join(hparams.out_dir, hparams.summary_name), train_model.graph)
num_steps = hparams.train_steps
stats = {'loss': 0., 'count': 0}
batch_size = hparams.batch_size
while global_step < num_steps:
for datas in iter_data(hparams.data_path):
if datas is None:
_error('Feeding data function has not been writen')
raise NotImplementedError
else:
feed_dict = datas
res = loaded_train_model.train(train_sess, feed_dict)
_info('STEP : {} \n\t LOSS : {:2f} LOSS_PER : {:2f} KL_LOSS : {:2f}'.format(
global_step, res[1].train_loss, res[1].loss_per_token, res[1].kl_loss))
# add loss record to tensorboard
global_step = res[1].global_step
summary_writer.add_summary(res[1].train_summary, global_step)
# update statistic
statistic(res[1].train_loss, res[1].predict_count, batch_size, stats)
if global_step % 100 == 0:
# calculate Perplexity
try:
ppl = math.exp(stats['loss'] / stats['count'])
except OverflowError:
ppl = float('inf')
finally:
_info('Perplexity : {:2f}'.format(ppl))
reset_statistic(stats)
# # evaluate the model
# res_infer = infer(hparams, datas)
# # TODO idx -> str
# print(res_infer)
if global_step % hparams.save_batch == 0:
loaded_train_model.saver.save(
train_sess,
os.path.join(hparams.out_dir, 'nmt.ckpt'),
global_step=global_step)
_info('Save model at step {}th'.format(global_step))
summary_writer.close() | 5,328,774 |
def create_test_account(test_case, username="TestUser"):
"""
Creates user TestUser
"""
test_case.client.post(reverse("users:register"), {"username": username,
"password": "password",
"confirm_password": "password"}) | 5,328,775 |
def get_configuration(spec_path):
"""Get mrunner experiment specification and gin-config overrides."""
try:
with open(spec_path, 'rb') as f:
specification = cloudpickle.load(f)
except pickle.UnpicklingError:
with open(spec_path) as f:
vars_ = {'script': os.path.basename(spec_path)}
exec(f.read(), vars_) # pylint: disable=exec-used
specification = vars_['experiments_list'][0].to_dict()
print('NOTE: Only the first experiment from the list will be run!')
parameters = specification['parameters']
gin_bindings = []
for key, value in parameters.items():
if key == 'imports':
for module_str in value:
binding = f'import {module_str}'
gin_bindings.append(binding)
continue
if isinstance(value, str) and not value[0] in ('@', '%', '{', '(', '['):
binding = f'{key} = "{value}"'
else:
binding = f'{key} = {value}'
gin_bindings.append(binding)
return specification, gin_bindings | 5,328,776 |
def inspect_single_run(raw_df, tag):
"""
:param :
:return: None
"""
df = raw_df[
(raw_df['tag'] == tag)
].copy()
print(len(df))
print(df[['seed_input_graph', 'seed_embedding', 'solution_frequency']])
print(df.columns)
print(len(df))
print(df.head())
for j in range(len(df)):
print(f'======{j}=========')
solution_df = pd.read_json(df['dwave_solution_df'].values[j])
print(solution_df)
print(solution_df[['energy']].iloc[0].values)
sol1 = utils.convert_list_of_strings_to_list_of_tuples(
eval(solution_df[['state']].iloc[0].values[0])
)
# print(solution_df[['energy']].iloc[1].values)
sol2 = utils.convert_list_of_strings_to_list_of_tuples(
eval(solution_df[['state']].iloc[1].values[0])
)
print(sol1)
print(sol2)
print(set(sol1) - set(sol2))
print(set(sol2) - set(sol1)) | 5,328,777 |
def LoadScores(firstfile, prevfile):
"""Load the first and previous scores. For each peptide, compute a prize
that is -log10(min p-value across all time points). Assumes the scores
are p-values or equivalaent scores in (0, 1]. Do not allow null or missing
scores.
Return: data frame with scores and prize for each peptide
"""
first_df = pd.read_csv(firstfile, sep="\t", comment="#", header=None, index_col=0)
prev_df = pd.read_csv(prevfile, sep="\t", comment="#", header=None, index_col=0)
first_shape = first_df.shape
assert first_shape == prev_df.shape, "First and previous score files must have the same number of peptides and time points"
assert not first_df.isnull().values.any(), "First scores file contains N/A values. Replace with 1.0"
assert not prev_df.isnull().values.any(), "Previous scores file contains N/A values. Replace with 1.0"
print "Loaded {} peptides and {} scores in the first and previous score files".format(first_shape[0], first_shape[1])
# Merge the two types of scores
merged_df = pd.concat([first_df, prev_df], axis=1, join="outer")
merged_shape = merged_df.shape
assert merged_shape[0] == first_shape[0], "First and previous significance scores contain different peptides"
assert merged_shape[1] == 2*first_shape[1], "Unexpected number of significance scores after merging first and previous scores"
# Compute prizes
merged_df["prize"] = merged_df.apply(CalcPrize, axis=1)
return merged_df | 5,328,778 |
def add_makeflags(job_core_count, cmd):
"""
Correct for multi-core if necessary (especially important in case coreCount=1 to limit parallel make).
:param job_core_count: core count from the job definition (int).
:param cmd: payload execution command (string).
:return: updated payload execution command (string).
"""
# ATHENA_PROC_NUMBER is set in Node.py using the schedconfig value
try:
core_count = int(os.environ.get('ATHENA_PROC_NUMBER'))
except Exception:
core_count = -1
if core_count == -1:
try:
core_count = int(job_core_count)
except Exception:
pass
else:
if core_count >= 1:
# Note: the original request (AF) was to use j%d and not -j%d, now using the latter
cmd += "export MAKEFLAGS=\'-j%d QUICK=1 -l1\';" % (core_count)
# make sure that MAKEFLAGS is always set
if "MAKEFLAGS=" not in cmd:
cmd += "export MAKEFLAGS=\'-j1 QUICK=1 -l1\';"
return cmd | 5,328,779 |
def process_map(file_in, validate):
"""Iteratively process each XML element and write to csv(s)"""
with codecs.open(NODES_PATH, 'w') as nodes_file, \
codecs.open(NODE_TAGS_PATH, 'w') as nodes_tags_file, \
codecs.open(WAYS_PATH, 'w') as ways_file, \
codecs.open(WAY_NODES_PATH, 'w') as way_nodes_file, \
codecs.open(WAY_TAGS_PATH, 'w') as way_tags_file:
nodes_writer = UnicodeDictWriter(nodes_file, NODE_FIELDS)
node_tags_writer = UnicodeDictWriter(nodes_tags_file, NODE_TAGS_FIELDS)
ways_writer = UnicodeDictWriter(ways_file, WAY_FIELDS)
way_nodes_writer = UnicodeDictWriter(way_nodes_file, WAY_NODES_FIELDS)
way_tags_writer = UnicodeDictWriter(way_tags_file, WAY_TAGS_FIELDS)
nodes_writer.writeheader()
node_tags_writer.writeheader()
ways_writer.writeheader()
way_nodes_writer.writeheader()
way_tags_writer.writeheader()
validator = cerberus.Validator()
for element in get_element(file_in, tags=('node', 'way')):
el = shape_element(element)
if el:
if validate is True:
validate_element(el, validator)
if element.tag == 'node':
nodes_writer.writerow(el['node'])
node_tags_writer.writerows(el['node_tags'])
elif element.tag == 'way':
ways_writer.writerow(el['way'])
way_nodes_writer.writerows(el['way_nodes'])
way_tags_writer.writerows(el['way_tags']) | 5,328,780 |
def _gen_find(subseq, generator):
"""Returns the first position of `subseq` in the generator or -1 if there is no such position."""
if isinstance(subseq, bytes):
subseq = bytearray(subseq)
subseq = list(subseq)
pos = 0
saved = []
for c in generator:
saved.append(c)
if len(saved) > len(subseq):
saved.pop(0)
pos += 1
if saved == subseq:
return pos
return -1 | 5,328,781 |
def fnCalculate_Bistatic_RangeAndDoppler(pos_target,vel_target,pos_rx,pos_tx,wavelength):
"""
Calculate measurement vector consisting of bistatic range and Doppler shift for 3D bistatic case.
pos_rx, pos_tx = position of Rx and Tx in [km].
pos_target = position of target in [km].
wavelength = wavelength of radar transmitter in [km].
Validated in main_iss_bistatic_rangedopp_01.py
Date: 27/12/16
Edited:
22/01/17: fixed a bug in the expression for Doppler shift. Forgot to include the Doppler shift due to the transmitter.
"""
target_rx = np.subtract(pos_target,pos_rx);
target_tx = np.subtract(pos_target,pos_tx);
y_radar = np.zeros([2],dtype=np.float64);
# bistatic range
y_radar[0] = np.linalg.norm(target_rx) + np.linalg.norm(target_tx)
# Doppler shift
pos_vel = np.hstack((pos_target,vel_target));
y_radar[1] = fnCalculate_Doppler_Shift_3D(wavelength,pos_vel,pos_rx) + fnCalculate_Doppler_Shift_3D(wavelength,pos_vel,pos_tx); # fixed: 22/01/17
return y_radar | 5,328,782 |
def all_permits(target_dynamo_table):
"""
Simply return all data from DynamoDb Table
:param target_dynamo_table:
:return:
"""
response = target_dynamo_table.scan()
data = response['Items']
while response.get('LastEvaluatedKey', False):
response = target_dynamo_table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
data.extend(response['Items'])
return data | 5,328,783 |
def maximum_difference_sort_value(contributions):
"""
Auxiliary function to sort the contributions for the compare_plot.
Returns the value of the maximum difference between values in contributions[0].
Parameters
----------
contributions: list
list containing 2 elements:
a Numpy.ndarray of contributions of the indexes compared, and the features' names.
Returns
-------
value_max_difference : float
Value of the maximum difference contribution.
"""
if len(contributions[0]) <= 1:
max_difference = contributions[0][0]
else:
max_difference = max(
[
abs(contrib_i - contrib_j)
for i, contrib_i in enumerate(contributions[0])
for j, contrib_j in enumerate(contributions[0])
if i <= j
]
)
return max_difference | 5,328,784 |
def load_test_val_train_files(version):
"""Load the test, validation and train labels and images from the data folder.
Also does the basic preprocessing (converting to the right datatype, clamping and rescaling etc.)
return images_train, images_validation, images_test, labels_train, labels_validation, labels_test"""
# Load labels
labels_pattern = re.compile(r'labels-(\d+).npy')
labels_files = filter(lambda f: re.match(
labels_pattern, f), os.listdir(INPUT_DIR))
images_train, images_validation, images_test = np.array([]), np.array([]), np.array([])
labels_train, labels_validation, labels_test = list(), list(), list()
for f in labels_files:
# Load images (stack all frames vertically)
# Loading images this way ensures that labels and images have the same order
file_num = int(re.match(labels_pattern, f).group(1))
new_labels = np.load(os.path.join(INPUT_DIR, f)).tolist()
if file_num % 5 == 0:
# Test file
labels_test += new_labels
images_test = add_images_from_file(images_test, file_num)
elif file_num % 5 == 1:
# Validation file
labels_validation += new_labels
images_validation = add_images_from_file(images_validation, file_num)
else:
# Train file
labels_train += new_labels
images_train = add_images_from_file(images_train, file_num)
# Add new dimension (explicit mention that we have only one color channel)
# Change range from 0-255 to 0-1 (datatype change from uint8 to float64)
images_test = images_test[:,:,:,np.newaxis] / 255.0
images_validation = images_validation[:,:,:,np.newaxis] / 255.0
images_train = images_train[:,:,:,np.newaxis] / 255.0
labels_test = np.array(labels_test, dtype=bool)
labels_validation = np.array(labels_validation, dtype=bool)
labels_train = np.array(labels_train, dtype=bool)
return images_train, images_validation, images_test, labels_train, labels_validation, labels_test | 5,328,785 |
def get_primer_target_sequence(id, svStartChr, svStartPos, svEndChr, svEndPos, svType, svComment, primerTargetSize, primerOffset, blastdbcmd, genomeFile):
"""Get the sequences in which primers will be placed"""
if svType in ["del", "inv3to3", "trans3to3", "trans3to5", "snv", "invRefA", "invAltA"]:
targetSeq1Start = svStartPos - primerOffset - primerTargetSize
targetSeq1End = svStartPos - primerOffset
targetSeq1 = get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper()
elif svType in ["invRefB"]:
targetSeq1Start = max(svEndPos - primerOffset - primerTargetSize, svStartPos + primerOffset)
targetSeq1End = svEndPos - primerOffset
targetSeq1 = get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper()
elif svType in ["trans5to3", "trans5to5"]:
targetSeq1Start = svStartPos + primerOffset
targetSeq1End = svStartPos + primerOffset + primerTargetSize
targetSeq1 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper())
elif svType in ["dup", "inv5to5", "invAltB"]:
targetSeq1Start = svStartPos + primerOffset
targetSeq1End = min(svStartPos + primerOffset + primerTargetSize, svEndPos - primerOffset)
targetSeq1 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper())
if svType in ["del", "inv5to5", "snv", "invRefB", "invAltB"]:
targetSeq2Start = svEndPos + primerOffset
targetSeq2End = svEndPos + primerOffset + primerTargetSize
targetSeq2 = get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()
elif svType in ["invRefA"]:
targetSeq2Start = svStartPos + primerOffset
targetSeq2End = min(svStartPos + primerOffset + primerTargetSize, svEndPos - primerOffset)
targetSeq2 = get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()
elif svType in ["dup", "inv3to3", "invAltA"]:
targetSeq2Start = max(svEndPos - primerTargetSize - primerOffset, svStartPos + primerOffset)
targetSeq2End = svEndPos - primerOffset
targetSeq2 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper())
elif svType in ["trans3to5", "trans5to5"]:
targetSeq2Start = svEndPos + primerOffset
targetSeq2End = svEndPos + primerOffset + primerTargetSize
targetSeq2 = get_DNA_sequence(svEndChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()
elif svType in ["trans3to3", "trans5to3"]:
targetSeq2Start = svEndPos - primerTargetSize - primerOffset
targetSeq2End = svEndPos - primerOffset
targetSeq2 = reverseComplementSequence(get_DNA_sequence(svEndChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper())
return (targetSeq1, targetSeq2) | 5,328,786 |
def human_format(num):
"""
:param num: A number to print in a nice readable way.
:return: A string representing this number in a readable way (e.g. 1000 --> 1K).
"""
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude]) | 5,328,787 |
def torchserve(
model_path: str,
management_api: str,
image: str = TORCHX_IMAGE,
params: Optional[Dict[str, object]] = None,
) -> specs.AppDef:
"""Deploys the provided model to the given torchserve management API
endpoint.
>>> from torchx.components.serve import torchserve
>>> torchserve(
... model_path="s3://your-bucket/your-model.pt",
... management_api="http://torchserve:8081",
... )
AppDef(name='torchx-serve-torchserve', ...)
Args:
model_path: The fsspec path to the model archive file.
management_api: The URL to the root of the torchserve management API.
image: Container to use.
params: torchserve parameters.
See https://pytorch.org/serve/management_api.html#register-a-model
Returns:
specs.AppDef: the Torchx application definition
"""
args = [
"torchx/apps/serve/serve.py",
"--model_path",
model_path,
"--management_api",
management_api,
]
if params is not None:
for param, value in params.items():
args += [
f"--{param}",
str(value),
]
return specs.AppDef(
name="torchx-serve-torchserve",
roles=[
specs.Role(
name="torchx-serve-torchserve",
image=image,
entrypoint="python3",
args=args,
port_map={"model-download": 8222},
),
],
) | 5,328,788 |
def ExpandRange(r,s=1):
"""expand 1-5 to [1..5], step by 1-10/2"""
if REGEX_PATTERNS['step'].search(r):
[r1,s] = r.split('/')
s=int(s)
else:
r1 = r
(start,end) = r1.split('-')
return [i for i in range(int(start),int(end)+1,s)] | 5,328,789 |
def write_file_if_changed(name, data):
""" Write a file if the contents have changed. Returns True if the file was written. """
if path_exists(name):
old_contents = read_file(name)
else:
old_contents = ''
if (data != old_contents):
write_file(name, data)
return True
return False | 5,328,790 |
def _get_preprocessor_loader(plugin_name):
"""Get a class that loads a preprocessor class.
This returns a class with a single class method, ``transform``,
which, when called, finds a plugin and defers to its ``transform``
class method. This is necessary because ``convert()`` is called as
a decorator at import time, but we cannot be confident that the
ResourceType plugins may not be loaded yet. (In fact, since
``convert()`` is used to decorate plugins, we can be confident
that not all plugins are loaded when it is called.)
This permits us to defer plugin searching until the moment when
``preprocess()`` calls the various preprocessors, at which point
we can be certain that all plugins have been loaded and finding
them by name will work.
"""
def transform(cls, *args, **kwargs):
plug = ResourceType.get(plugin_name)
return plug.transform(*args, **kwargs)
return type("PluginLoader_%s" % plugin_name,
(object,),
{"transform": classmethod(transform)}) | 5,328,791 |
def preprocess_sample(data, word_dict):
"""
Args:
data (dict)
Returns:
dict
"""
processed = {}
processed['Abstract'] = [sentence_to_indices(sent, word_dict) for sent in data['Abstract'].split('$$$')]
if 'Task 2' in data:
processed['Label'] = label_to_onehot(data['Task 2'])
return processed | 5,328,792 |
def read_input_file(input_file_path):
"""
read inputs from input_file_path
:param input_file_path:
:return:
"""
cprint('[INFO]', bc.dgreen, "read input file: {}".format(input_file_path))
with open(input_file_path, 'r') as input_file_read:
dl_inputs = yaml.load(input_file_read, Loader=yaml.FullLoader)
dl_inputs['gru_lstm']['learning_rate'] = float(dl_inputs['gru_lstm']['learning_rate'])
# initialize before checking if GPU actually exists
device = torch.device("cpu")
dl_inputs['general']['is_cuda'] = False
if dl_inputs['general']['use_gpu']:
# --- check cpu/gpu availability
# returns a Boolean True if a GPU is available, else it'll return False
is_cuda = torch.cuda.is_available()
if is_cuda:
device = torch.device(dl_inputs["general"]["gpu_device"])
dl_inputs['general']['is_cuda'] = True
else:
cprint('[INFO]', bc.lred, 'GPU was requested but not available.')
dl_inputs['general']['device'] = device
cprint('[INFO]', bc.lgreen, 'pytorch will use: {}'.format(dl_inputs['general']['device']))
if not "early_stopping_patience" in dl_inputs["gru_lstm"]:
dl_inputs["gru_lstm"]["early_stopping_patience"] = False
if dl_inputs['gru_lstm']["early_stopping_patience"] <= 0:
dl_inputs['gru_lstm']["early_stopping_patience"] = False
# XXX separation in the input CSV file
# Hardcoded, see issue #38
dl_inputs['preprocessing']['csv_sep'] = "\t"
return dl_inputs | 5,328,793 |
async def test_burn(
async_stubbed_sender, async_stubbed_sender_token_account_pk, test_token
): # pylint: disable=redefined-outer-name
"""Test burning tokens."""
burn_amount = 200
expected_amount = 300
burn_resp = await test_token.burn(
account=async_stubbed_sender_token_account_pk,
owner=async_stubbed_sender,
amount=burn_amount,
multi_signers=None,
opts=TxOpts(skip_confirmation=False),
)
assert_valid_response(burn_resp)
resp = await test_token.get_balance(async_stubbed_sender_token_account_pk)
balance_info = resp["result"]["value"]
assert balance_info["amount"] == str(expected_amount)
assert balance_info["decimals"] == 6
assert balance_info["uiAmount"] == 0.0003 | 5,328,794 |
def test_cMpc_to_z_array():
"""
Test passing a comoving distance array returns a redshift array
"""
comoving_distance_array = np.array([0.0, 3000])
# Must return an array
expected_redshift_array = np.array([0.0, 0.8479314667609102])
calculated_redshift_array = pyxcosmo.cMpc_to_z(comoving_distance_array)
assert np.isclose(expected_redshift_array, calculated_redshift_array).all | 5,328,795 |
def PWebFetch (url, args, outfile, err):
"""
Generic Fetch a web product
Sends a http post request to url and sames response to outfile
Throws exception on error
* url = where the request to be sent,
e.g."https://www.cv.nrao.edu/cgi-bin/postage.pl"
* args = dict or arguments, e.g. {"arg1":"arg1"}
* outfile = Name of the output file, absolute path or relative to CWD
None => use name from server
* err = Python Obit Error/message stack
"""
################################################################
# Package parameters
encoded_args = six.moves.urllib.parse.urlencode(args)
NVSShost = "https://www.cv.nrao.edu/cgi-bin/postage.pl"
# fetch
try:
request = six.moves.urllib.request.Request(url)
response = six.moves.urllib.request.urlopen(request, encoded_args)
data = response.read()
except Exception as exception:
print(exception)
OErr.PLog(err, OErr.Error, "Request from server failed")
OErr.printErrMsg(err)
if outfile == None: # Name from server?
outfile = os.path.basename(response.headers['URI'])
fd = open(outfile,"wb")
fd.write(data)
fd.close()
# Info
print("Response code =",response.code, response.msg)
print("Response type =",response.headers["Content-Type"]) | 5,328,796 |
def run_client(
cfg: DictConfig,
comm: MPI.Comm,
model: nn.Module,
loss_fn: nn.Module,
num_clients: int,
train_data: Dataset,
test_data: Dataset = Dataset(),
):
"""Run PPFL simulation clients, each of which updates its own local parameters of model
Args:
cfg (DictConfig): the configuration for this run
comm: MPI communicator
model (nn.Module): neural network model to train
num_clients (int): the number of clients used in PPFL simulation
train_data (Dataset): training data
test_data (Dataset): testing data
"""
comm_size = comm.Get_size()
comm_rank = comm.Get_rank()
## We assume to have as many GPUs as the number of MPI processes.
if cfg.device == "cuda":
device = f"cuda:{comm_rank-1}"
else:
device = cfg.device
num_client_groups = np.array_split(range(num_clients), comm_size - 1)
""" log for clients"""
outfile = {}
for _, cid in enumerate(num_client_groups[comm_rank - 1]):
output_filename = cfg.output_filename + "_client_%s" % (cid)
outfile[cid] = client_log(cfg.output_dirname, output_filename)
"""
Send the number of data to a server
Receive "weight_info" from a server
(fedavg) "weight_info" is not needed as of now.
(iceadmm+iiadmm) "weight_info" is needed for constructing coefficients of the loss_function
"""
num_data = {}
for _, cid in enumerate(num_client_groups[comm_rank - 1]):
num_data[cid] = len(train_data[cid])
comm.gather(num_data, root=0)
weight = None
weight = comm.scatter(weight, root=0)
batchsize = {}
for _, cid in enumerate(num_client_groups[comm_rank - 1]):
batchsize[cid] = cfg.train_data_batch_size
if cfg.batch_training == False:
batchsize[cid] = len(train_data[cid])
"Run validation if test data is given or the configuration is enabled."
if cfg.validation == True and len(test_data) > 0:
test_dataloader = DataLoader(
test_data,
num_workers=cfg.num_workers,
batch_size=cfg.test_data_batch_size,
shuffle=cfg.test_data_shuffle,
)
else:
cfg.validation = False
test_dataloader = None
clients = [
eval(cfg.fed.clientname)(
cid,
weight[cid],
copy.deepcopy(model),
loss_fn,
DataLoader(
train_data[cid],
num_workers=cfg.num_workers,
batch_size=batchsize[cid],
shuffle=cfg.train_data_shuffle,
pin_memory=True,
),
cfg,
outfile[cid],
test_dataloader,
**cfg.fed.args,
)
for _, cid in enumerate(num_client_groups[comm_rank - 1])
]
## name of parameters
model_name = []
for client in clients:
for name, _ in client.model.named_parameters():
model_name.append(name)
break
do_continue = comm.bcast(None, root=0)
local_states = OrderedDict()
while do_continue:
"""Receive "global_state" """
global_state = comm.bcast(None, root=0)
""" Update "local_states" based on "global_state" """
for client in clients:
cid = client.id
## initial point for a client model
for name in client.model.state_dict():
if name not in model_name:
global_state[name] = client.model.state_dict()[name]
client.model.load_state_dict(global_state)
## client update
local_states[cid] = client.update()
""" Send "local_states" to a server """
comm.gather(local_states, root=0)
do_continue = comm.bcast(None, root=0)
for client in clients:
client.outfile.close() | 5,328,797 |
def task_deploy_docs() -> DoitTask:
"""Deploy docs to the Github `gh-pages` branch.
Returns:
DoitTask: doit task
"""
if _is_mkdocs_local(): # pragma: no cover
return debug_task([
(echo, ('ERROR: Not yet configured to deploy documentation without "use_directory_urls"',)),
])
return debug_task([Interactive('poetry run mkdocs gh-deploy')]) | 5,328,798 |
def plugin(version: str) -> 'Plugin':
"""Get the application plugin."""
return XPXPlugin | 5,328,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.