content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def read_conf_file_interface(config_name):
"""
Get interface settings.
@param config_name: Name of WG interface
@type config_name: str
@return: Dictionary with interface settings
@rtype: dict
"""
conf_location = WG_CONF_PATH + "/" + config_name + ".conf"
with open(conf_location, 'r', encoding='utf-8') as file_object:
file = file_object.read().split("\n")
data = {}
for i in file:
if not regex_match("#(.*)", i):
if len(i) > 0:
if i != "[Interface]":
tmp = re.split(r'\s*=\s*', i, 1)
if len(tmp) == 2:
data[tmp[0]] = tmp[1]
return data
| 5,341,600
|
def test_basic_reliable_data_transfer():
"""Basic test: Check that when you run server and client starter code
that the input file equals the output file
"""
# Can you think of how you can test this? Give it a try!
pass
| 5,341,601
|
def add_to_history(media, watched_at=None):
"""Add a :class:`Movie`, :class:`TVShow`, or :class:`TVEpisode` to your
watched history.
:param media: The media object to add to your history
:param watched_at: A `datetime.datetime` object indicating the time at
which this media item was viewed
"""
if watched_at is None:
watched_at = datetime.now(tz=timezone.utc)
data = dict(watched_at=timestamp(watched_at))
data.update(media.ids)
result = yield 'sync/history', {media.media_type: [data]}
yield result
| 5,341,602
|
def book_transformer(query_input, book_dict_input):
"""grabs the book and casts it to a list"""
sample_version = versions_dict.versions_dict()
query_input[1] = query_input[1].replace('[', '').replace(']', '').lstrip().rstrip().upper()
for i in list(book_dict_input.keys()):
result = re.search(i, query_input[1])
if result is not None:
book = book_dict_input[result.group(0)]
reduced_query = query_input[1].replace(result.group(0), '')
return [query_input[0], book, reduced_query]
return [sample_version['KJV'], 'error book not found']
| 5,341,603
|
def custom_response(message, status, mimetype):
"""handle custom errors"""
resp = Response(json.dumps({"message": message, "status_code": status}),
status=status,
mimetype=mimetype)
return resp
| 5,341,604
|
def demo():
"""
Let the user play around with the standard scene using programmatic
instructions passed directly to the controller.
The environment will be displayed in a graphics window. The user can type
various commands into the graphics window to query the scene and control
the grasper. Type `help` to get a list of commands.
"""
pygame.init()
screen_info = pygame.display.Info()
screen_width = screen_info.current_w
screen_height = screen_info.current_h
screen = pygame.display.set_mode((screen_width // 2, screen_height // 2))
Viewer(screen, "SHRDLU Blocks Demo", demo_callback,
initial_output='Type "help" for a list of available commands.').run()
| 5,341,605
|
def embed_network(input_net, layers, reuse_variables=False):
"""Convolutional embedding."""
n_layers = int(len(layers)/3)
tf.logging.info('Number of layers: %d' % n_layers)
# set normalization and activation functions
normalizer_fn = None
activation_fn = tf.nn.softplus
tf.logging.info('Softplus activation')
net = input_net
for ilayer in range(n_layers):
tf.logging.info('Building layer: %d, %d, %d'
% (int(layers[ilayer*3 + 1]), int(layers[ilayer*3]),
int(layers[ilayer*3 + 2])))
net = slim.conv2d(net, int(layers[ilayer*3 + 1]),
int(layers[ilayer*3]),
stride=int(layers[ilayer*3 + 2]),
scope='layer_wt_%d' % ilayer,
reuse=reuse_variables,
normalizer_fn=normalizer_fn,
activation_fn=activation_fn)
return net
| 5,341,606
|
def write_pair(readID, read1, read2, fh_out_R1, fh_out_R2):
"""Write paired reads to two files"""
fh_out_R1.write("@%s\n%s\n+\n%s\n" % (readID + "/1", read1[0], read1[1]))
fh_out_R2.write("@%s\n%s\n+\n%s\n" % (readID + "/2", read2[0], read2[1]))
pass
| 5,341,607
|
def sample_variance(sample1, sample2):
"""
Calculate sample variance. After learn.co
"""
n_1, n_2 = len(sample1), len(sample2)
var_1, var_2 = variance(sample1), variance(sample2)
return (var_1 + var_2)/((n_1 + n_2)-2)
| 5,341,608
|
def dwritef2(obj, path):
"""The dwritef2() function writes the object @p obj to the Python
pickle file whose path is pointed to by @p path. Non-existent
directories of @p path are created as necessary.
@param obj Object to write, as created by e.g. dpack()
@param path Path of output file
@return Path of output file
"""
dirname = os.path.dirname(path)
if dirname != "" and not os.path.isdir(dirname):
os.makedirs(dirname)
easy_pickle.dump(path, obj)
return path
| 5,341,609
|
def extract_test_zip(pattern, dirname, fileext):
"""Extract a compressed zip given the search pattern.
There must be one and only one matching file.
arg 1- search pattern for the compressed file.
arg 2- name of the output top-level directory.
arg 3- a list of search patterns for the source files.
"""
arcdir = libastyle.get_archive_directory(True)
testdir = libastyle.get_test_directory(True)
# extract thezip
files = glob.glob(arcdir + pattern)
if len(files) == 0:
libastyle.system_exit("No zip to extract")
if len(files) > 1:
libastyle.system_exit(str(files) + "\nToo many zips to extract")
call_7zip(files[0], testdir + dirname.strip(), fileext)
| 5,341,610
|
def test_imageprocessor_read():
"""Test the Imageprocessor."""
# Test with 4 channels
transform_sequence = [ToPILImage('RGBA')]
p = ImageProcessor(transform_sequence)
img_out = p.apply_transforms(test_input)
assert np.array(img_out).shape == (678, 1024, 4)
# Test with 3 channels
transform_sequence = [ToPILImage('RGB')]
p = ImageProcessor(transform_sequence)
img_out = p.apply_transforms(test_input)
assert np.array(img_out).shape == (678, 1024, 3)
# Test the values of the image
transform_sequence = [ToPILImage('RGBA'), PILtoarray()]
p = ImageProcessor(transform_sequence)
img_out = p.apply_transforms(test_input)
assert np.min(img_out) >= 0
assert np.max(img_out) <= 255
# Test the values of the image
transform_sequence = [ToPILImage('L'), PILtoarray()]
p = ImageProcessor(transform_sequence)
img_out = p.apply_transforms(test_input)
assert np.min(img_out) >= 0
assert np.max(img_out) <= 255
| 5,341,611
|
def app(*tokens):
"""command function to add a command for an app with command name."""
apps_handler.add(*tokens)
| 5,341,612
|
def test_ant():
"""
target_files = [
"tests/apache-ant/main/org/apache/tools/ant/types/ArchiveFileSet.java",
"tests/apache-ant/main/org/apache/tools/ant/types/TarFileSet.java",
"tests/apache-ant/main/org/apache/tools/ant/types/ZipFileSet.java"
]
"""
ant_dir = "/home/ali/Desktop/code/TestProject/"
print("Success!" if PushDownField(
utils2.get_filenames_in_dir(ant_dir),
"test_package",
"App",
"push_down",
[],
# lambda x: "tests/pushdown_field_ant/" + x[len(ant_dir):]
).do_refactor() else "Cannot refactor.")
| 5,341,613
|
def calculate_area(geometry):
"""
Calculate geometry area
:param geometry: GeoJSON geometry
:return: the geometry area
"""
coords = get_coords_from_geometry(
geometry, ["Polygon", "MultiPolygon"], raise_exception=False
)
if get_input_dimensions(coords) >= 4:
areas = list(map(lambda sub_item: calculate_area(sub_item), coords))
return sum(areas)
elif get_input_dimensions(coords) == 3:
polygon(coords)
return polygon_area(coords)
else:
return 0
| 5,341,614
|
def read_prb(file):
"""
Read a PRB file and return a ProbeGroup object.
Since PRB do not handle contact shape then circle of 5um are put.
Same for contact shape a dummy tip is put.
PRB format do not contain any information about the channel of the probe
Only the channel index on device is given.
"""
file = Path(file).absolute()
assert file.is_file()
with file.open('r') as f:
contents = f.read()
contents = re.sub(r'range\(([\d,]*)\)', r'list(range(\1))', contents)
prb = {}
exec(contents, None, prb)
prb = {k.lower(): v for (k, v) in prb.items()}
if 'channel_groups' not in prb:
raise ValueError('This file is not a standard PRB file')
probegroup = ProbeGroup()
for i, group in prb['channel_groups'].items():
probe = Probe(ndim=2, si_units='um')
chans = np.array(group['channels'], dtype='int64')
positions = np.array([group['geometry'][c] for c in chans],
dtype='float64')
probe.set_contacts(positions=positions, shapes='circle',
shape_params={'radius': 5})
probe.create_auto_shape(probe_type='tip')
probe.set_device_channel_indices(chans)
probegroup.add_probe(probe)
return probegroup
| 5,341,615
|
def alt_credits():
""" Route for alt credits page. Uses json list to generate page body """
alternate_credits = tasks.json_list(os.path.join(pathlib.Path(__file__).parent.absolute(),'static/alt_credits.json'))
return render_template('alt_credits.html',title='collegeSMART - Alternative College Credits',alt_credits=alternate_credits)
| 5,341,616
|
def CreateNode(parent, node_type, position, wx_id):
""" Create an instance of a node associated with the specified name.
:param parent: parent of the node object (usually a wx.Window)
:param node_type: type of node from registry - the IDName
:param position: default position for the node
:param wx_id: id for the node. Usually an id generated by wxPython.
:returns: Node object
:raises: NodeNotFoundError if the node is not registered in the Node Registry
"""
if node_type in REGISTERED_NODES:
# Initialize the base class here so that a new instance
# is created for each node. We also set some important
# values for the position and type of the node.
node = REGISTERED_NODES[node_type]
node = node(wx_id)
node.SetPosition(position)
node.Model.SetType(node_type)
node.Model.SetParent(parent)
return node
else:
raise exceptions.NodeNotFoundError(node_type)
| 5,341,617
|
def decode_json_content(content):
"""
Decodes a given string content to a JSON object
:param str content: content to be decoded to JSON.
:return: A JSON object if the string could be successfully decoded and None otherwise
:rtype: json or None
"""
try:
return json.loads(content) if content is not None else None
except JSONDecodeError:
print("The given content could not be decoded as a JSON file")
return None
| 5,341,618
|
def plot_dist(noise_feats, label=None, ymax=1.1, color=None, title=None, save_path=None):
"""
Kernel density plot of the number of noisy features included in explanations,
for a certain number of test samples
"""
if not any(noise_feats): # handle special case where noise_feats=0
noise_feats[0] = 0.5
# plt.switch_backend("agg")
sns.set_style('darkgrid')
ax = sns.distplot(noise_feats, hist=False, kde=True,
kde_kws={'label': label}, color=color)
plt.xlim(-3, 11)
plt.ylim(ymin=0.0, ymax=ymax)
if title:
plt.title(title)
if save_path:
plt.savefig(save_path)
return ax
| 5,341,619
|
def simple_simulate(choosers, spec, nest_spec,
skims=None, locals_d=None,
chunk_size=0, custom_chooser=None,
log_alt_losers=False,
want_logsums=False,
estimator=None,
trace_label=None, trace_choice_name=None, trace_column_names=None):
"""
Run an MNL or NL simulation for when the model spec does not involve alternative
specific data, e.g. there are no interactions with alternative
properties and no need to sample from alternatives.
"""
trace_label = tracing.extend_trace_label(trace_label, 'simple_simulate')
assert len(choosers) > 0
result_list = []
# segment by person type and pick the right spec for each person type
for i, chooser_chunk, chunk_trace_label \
in chunk.adaptive_chunked_choosers(choosers, chunk_size, trace_label):
choices = _simple_simulate(
chooser_chunk, spec, nest_spec,
skims=skims,
locals_d=locals_d,
custom_chooser=custom_chooser,
log_alt_losers=log_alt_losers,
want_logsums=want_logsums,
estimator=estimator,
trace_label=chunk_trace_label,
trace_choice_name=trace_choice_name,
trace_column_names=trace_column_names)
result_list.append(choices)
chunk.log_df(trace_label, f'result_list', result_list)
if len(result_list) > 1:
choices = pd.concat(result_list)
assert len(choices.index == len(choosers.index))
return choices
| 5,341,620
|
def state(predicate):
"""DBC helper for reusable, simple predicates for object-state tests used in both preconditions and postconditions"""
@wraps(predicate)
def wrapped_predicate(s, *args, **kwargs):
return predicate(s)
return wrapped_predicate
| 5,341,621
|
def dpp(kernel_matrix, max_length, epsilon=1E-10):
"""
Our proposed fast implementation of the greedy algorithm
:param kernel_matrix: 2-d array
:param max_length: positive int
:param epsilon: small positive scalar
:return: list
"""
item_size = kernel_matrix.shape[0]
cis = np.zeros((max_length, item_size))
di2s = np.copy(np.diag(kernel_matrix))
selected_items = list()
selected_item = np.argmax(di2s)
selected_items.append(selected_item)
while len(selected_items) < max_length:
k = len(selected_items) - 1
ci_optimal = cis[:k, selected_item]
di_optimal = math.sqrt(di2s[selected_item])
elements = kernel_matrix[selected_item, :]
eis = (elements - np.dot(ci_optimal, cis[:k, :])) / di_optimal
cis[k, :] = eis
di2s -= np.square(eis)
di2s[selected_item] = -np.inf
selected_item = np.argmax(di2s)
if di2s[selected_item] < epsilon:
break
selected_items.append(selected_item)
return selected_items
| 5,341,622
|
def adjust_image_resolution(data):
"""Given image data, shrink it to no greater than 1024 for its larger
dimension."""
inputbytes = cStringIO.StringIO(data)
output = cStringIO.StringIO()
try:
im = Image.open(inputbytes)
im.thumbnail((240, 240), Image.ANTIALIAS)
# could run entropy check to see if GIF makes more sense given an item.
im.save(output, 'JPEG')
except IOError:
return None
return output.getvalue()
| 5,341,623
|
def timing_function():
"""
There's a better timing function available in Python 3.3+
Otherwise use the old one.
TODO: This could be a static analysis at the top of the module
"""
if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:
return time.monotonic()
else:
return time.time()
| 5,341,624
|
async def test_volume_down(mock_device, heos):
"""Test the volume_down command."""
await heos.get_players()
player = heos.players.get(1)
with pytest.raises(ValueError):
await player.volume_down(0)
with pytest.raises(ValueError):
await player.volume_down(11)
mock_device.register(
const.COMMAND_VOLUME_DOWN, {"pid": "1", "step": "6"}, "player.volume_down"
)
await player.volume_down(6)
| 5,341,625
|
def parse_date(txt):
""" Returns None or parsed date as {h, m, D, M, Y}. """
date = None
clock = None
for word in txt.split(' '):
if date is None:
try:
date = datetime.strptime(word, "%d-%m-%Y")
continue
except ValueError:
pass
try:
date = datetime.strptime(word, "%d.%m.%Y")
continue
except ValueError:
pass
if clock is None:
try:
clock = datetime.strptime(word, "%H:%M")
continue
except ValueError:
pass
if date is not None and clock is not None:
return {'h': clock.hour,
'm': clock.minute,
'D': date.day,
'M': date.month,
'Y': date.year}
return None
| 5,341,626
|
def log_success(msg):
"""
Log success message
:param msg: Message to be logged
:return: None
"""
print("[+] " + str(msg))
sys.stdout.flush()
| 5,341,627
|
def diff_configurations(model_config, bench_config, model_bundle, bench_bundle):
"""
Description
Args:
model_config: a dictionary with the model configuration data
bench_config: a dictionary with the benchmark configuration data
model_bundle: a LIVVkit model bundle object
bench_bundle: a LIVVkit model bundle object
Returns:
A dictionary created by the elements object corresponding to
the results of the bit for bit testing
"""
diff_dict = LIVVDict()
model_data = model_bundle.parse_config(model_config)
bench_data = bench_bundle.parse_config(bench_config)
if model_data == {} and bench_data == {}:
return elements.error("Configuration Comparison",
"Could not open file: " + model_config.split(os.path.sep)[-1])
model_sections = set(six.iterkeys(model_data))
bench_sections = set(six.iterkeys(bench_data))
all_sections = set(model_sections.union(bench_sections))
for s in all_sections:
model_vars = set(six.iterkeys(model_data[s])) if s in model_sections else set()
bench_vars = set(six.iterkeys(bench_data[s])) if s in bench_sections else set()
all_vars = set(model_vars.union(bench_vars))
for v in all_vars:
model_val = model_data[s][v] if s in model_sections and v in model_vars else 'NA'
bench_val = bench_data[s][v] if s in bench_sections and v in bench_vars else 'NA'
same = True if model_val == bench_val and model_val != 'NA' else False
diff_dict[s][v] = (same, model_val, bench_val)
return elements.file_diff("Configuration Comparison", diff_dict)
| 5,341,628
|
def update_statvar_dcids(statvar_list: list, config: dict):
"""Given a list of statvars, generates the dcid for each statvar after
accounting for dependent PVs.
"""
for d in statvar_list:
ignore_props = get_dpv(d, config)
dcid = get_statvar_dcid(d, ignore_props=ignore_props)
d['Node'] = dcid
| 5,341,629
|
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Arguments:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
| 5,341,630
|
def calc_angle(m, n):
"""
Calculate the cosθ,
where θ is the angle between 2 vectors, m and n.
"""
if inner_p_s(m, n) == -1:
print('Error! The 2 vectors should belong on the same space Rn!')
elif inner_p_s(m,n) == 0:
print('The cosine of the two vectors is 0, so these vectors are orthogonal!')
else:
angle = (inner_p_s(m, n))/(calc_norm(m) * calc_norm(n))
return angle
| 5,341,631
|
def Seuil_var(img):
"""
This fonction compute threshold value. In first the image's histogram is calculated. The threshold value is set to the first indexe of histogram wich respect the following criterion : DH > 0, DH(i)/H(i) > 0.1 , H(i) < 0.01 % of the Norm.
In : img : ipl Image : image to treated
Out: seuil : Int : Value of the threshold
"""
dim=255
MaxValue=np.amax(np.asarray(img[:]))
Norm = np.asarray(img[:]).shape[0]*np.asarray(img[:]).shape[1]
scale=MaxValue/dim
Wdim=dim*scale
MaxValue=np.amax(np.asarray(img[:]))
bins= [float(x) for x in range(dim)]
hist,bin_edges = np.histogram(np.asarray(img[:]), bins)
Norm = Norm -hist[0]
median=np.median(hist)
mean=0
var=0
i=1
som = 0
while (som < 0.8*Norm and i <len(hist)-1):
som = som + hist[i]
i=i+1
while ((hist[i]-hist[i-1] < 0 or (hist[i]-hist[i-1])/hist[i-1]>0.1 or hist[i]> 0.01*Norm ) and i < len(hist)-1):
i=i+1
if( i == len(hist)-1):
seuil=0
seuil = i
var = 0
return seuil
| 5,341,632
|
def convert_numpy(file_path, dst=None, orient='row', hold=False, axisf=False, *arg):
"""
Extract an array of data stored in a .npy file or DATABLOCK
Parameters
---------
file_path : path (str)
Full path to the file to be extracted.
dst : str
Full path to the file where data will be appended as bytes.
In the case of None value, a temporary file is created and the path is returned.
orient : str
orientation of the spectra in the file. Defaults to spectra as row.
hold : bool
If true, limits parts of the code to only get data type and parameters. (faster)
axisf : bool
Extracts the 1st axis and set it as the file axis as it is being converted.
Return
------
Asgard_param : dict
Stores update to Asgard parameters (i.e. spec_amount, spec_len, Axis for sif)
dst : path (str)
Full path to the file where data were writen, may it be temporary or user selected
"""
if dst is None and hold is False:
dst = TemporaryFile('wb', delete=False).name
try :
arr = load(file_path, allow_pickle=True, mmap_mode='r')
except ValueError :
raise Exc.FileFormatError('Selected file is not a valid numpy array')
if orient != 'row' :
arr = arr.T
if len(arr.shape) == 1:
arr = arr.reshape([1, arr.shape[0]])
if len(arr.shape) != 2 :
raise Exc.FileFormatError('Selected file contains an array with more than 2 dimensions')
Asgard_param = {'Spec len':arr.shape[1], 'Spec amount':arr.shape[0]}
if hold is True :
if axisf is True :
Asgard_param['Spec amount'] -= 1
axis = arr[0,:]
return Asgard_param, axis
else :
return Asgard_param
else :
with open(dst,'ab') as f :
for spec in range(arr.shape[0]):
if axisf is True :
Asgard_param['Spec amount'] -= 1
axis = arr[spec,:]
else :
for pix in arr[spec,:]:
f.write(bytearray(pack('f',pix)))
if axisf is True :
return dst, Asgard_param, axis
else :
return dst, Asgard_param
| 5,341,633
|
def index():
"""Every time the html page refreshes this function is called.
Checks for any activity from the user (setting an alarm, deleting an alarm,
or deleting a notification)
:return: The html template with alarms and notifications added
"""
notification_scheduler.run(blocking=False)
# get the inputs from the users alarm submission
alarm_time = request.args.get("alarm")
alarm_title = request.args.get("two")
alarm_news = request.args.get("news")
alarm_weather = request.args.get("weather")
check_for_delete_request()
if alarm_title and alarm_time:
alarm = {"alarm time": str(alarm_time), "title": str(alarm_title), "content": "",
"weather": alarm_weather is not None, "news": alarm_news is not None}
notification_delay = get_notification_delay(alarm["alarm time"])
# if the notification delays is negative then it is set in the past which is invalid
if notification_delay > 0 and valid_alarm_title(alarm["title"]):
alarm_date_time = alarm_time.split("T")
alarm["content"] = format_alarm_content(alarm_date_time, alarm_news, alarm_weather)
notification_scheduler.enter(notification_delay, len(notifications),
set_off_alarms, (alarm,))
log.info("Alarm set: %s", alarm)
log.info("Delay for alarm: %d seconds", notification_delay)
alarms.append(alarm)
else:
log.error("INVALID ALARM: %s", alarm)
return render_template('index.html', title='Daily update', alarms=alarms,
notifications=notifications, image="alarm_clock.jpg",
favicon="static/images/favicon.jpg")
| 5,341,634
|
def team_6_adv():
"""
Team 6's refactored chapter.
Originally by lovelle, refactored by lovelle.
:return: None
"""
global dead
direction = input("Which direction would you like to go? [North/South/East/West] ")
if direction == "East":
# Good choice
print()
print("You come upon an underground lake, fed by a glistening stream.")
print()
print("The sound of the water soothes your troubled nerves.")
sleep(delay)
print()
elif direction == "South":
# Bad choice
print()
print("Ever so suddenly, you find yourself surrounded by ogres twice your size.")
print("They realize you are harmless and you catch your breath. It seems they might let you pass...")
sleep(delay * 5)
print()
print("They strike up a song, ready to continue on their way.")
print("Oh, but how loud their voices are! And you aren't feeling so good...")
sleep(delay * 5)
print()
print("The leader asks you to rank the quality of their singing, on a scale of 1 to 10.")
rating = int(input("What do you say? Choose wisely; your life depends on it... "))
print()
if rating < 10:
print("You fall to the ground, feeling the power of a cursed song. Looks like your time is up, friend.")
dead = True
sleep(delay)
print()
else:
print("The ogre thanks you for the complement and sends you on your merry way.")
sleep(delay)
print()
else:
# Neutral choice
print()
print("Phew, you're still on solid ground. But still in the dark. Think fast!")
sleep(delay)
print()
if dead == True:
print("Oh no! You died. And what a shame it had to happen this way.")
print("Better luck next time - try again by hitting the green play button!")
quit()
| 5,341,635
|
def do_upgrade_show(cc, args):
"""Show software upgrade details and attributes."""
upgrades = cc.upgrade.list()
if upgrades:
_print_upgrade_show(upgrades[0])
else:
print('No upgrade in progress')
| 5,341,636
|
def check_icinga_should_run(state_file: str) -> bool:
"""Return True if the script should continue to update the state file, False if the state file is fresh enough."""
try:
with open(state_file) as f:
state = json.load(f)
except Exception as e:
logger.error('Failed to read Icinga state from %s: %s', state_file, e)
return True
delta = time.time() - state['timestamp']
logger.info('Last run was %d seconds ago with exit code %d', delta, state['exit_code'])
if state['exit_code'] == 0:
if delta > ICINGA_RUN_EVERY_MINUTES * 60:
return True
logger.info('Skipping')
return False
if delta > ICINGA_RETRY_ON_FAILURE_MINUTES * 60:
return True
logger.info('Skipping')
return False
| 5,341,637
|
def ln_new_model_to_gll(py, new_flag_dir, output_dir):
"""
make up the new gll directory based on the OUTPUT_MODEL.
"""
script = f"{py} -m seisflow.scripts.structure_inversion.ln_new_model_to_gll --new_flag_dir {new_flag_dir} --output_dir {output_dir}; \n"
return script
| 5,341,638
|
def compare_tuple(Procedure, cfg):
"""Validate the results using a tuple
"""
profile = DummyData()
tp = {}
for v in profile.keys():
if isinstance(profile[v], ma.MaskedArray) and profile[v].mask.any():
profile[v][profile[v].mask] = np.nan
profile.data[v] = profile[v].data
tp[v] = tuple(profile.data[v])
y = Procedure(profile, "TEMP", cfg)
y2 = Procedure(tp, "TEMP", cfg, attrs=profile.attrs)
assert isinstance(y2["TEMP"], tuple), "It didn't preserve the tuple type"
for f in y.features:
assert np.allclose(y.features[f], y2.features[f], equal_nan=True)
assert y.features[f].dtype == y2.features[f].dtype
for f in y.flags:
assert type(y.flags[f]) == type(y2.flags[f])
assert y.flags[f].dtype == y2.flags[f].dtype
assert np.allclose(y.flags[f], y2.flags[f], equal_nan=True)
| 5,341,639
|
def deserialize_item(item: dict):
"""Deserialize DynamoDB item to Python types.
Args:
item: item to deserialize
Return: deserialized item
"""
return {k: DDB_DESERIALIZER.deserialize(v) for k, v in item.items()}
| 5,341,640
|
def good_result(path_value, pred, source=None, target_path=''):
"""Constructs a JsonFoundValueResult where pred returns value as valid."""
source = path_value.value if source is None else source
return jp.PathValueResult(pred=pred, source=source, target_path=target_path,
path_value=path_value, valid=True)
| 5,341,641
|
def add_task(task_name):
"""Handles the `add_task` command.
Syntax: `add_task <task_name>`
Function: register a new task in TASK_NAME_CACHE
Precondition on argument `task_name`:
- `task_name` is non-empty and is registered in global object task_keys as the the input validation has already been
handled by the do_start() function in interpreter stage.
"""
task_name = task_name.lower()
if task_name not in task_keys:
task_keys.append(task_name)
with open(TASK_NAME_CACHE, 'a') as f:
f.write(task_name + '\n')
f.flush()
print("Task {} has been added.".format(task_name))
else:
print("Task {} has been added already.".format(task_name))
| 5,341,642
|
def bot_properties(bot_id):
"""
Return all available properties for the given bot. The bot id should be
available in the `app.config` dictionary.
"""
bot_config = app.config['BOTS'][bot_id]
return [pd[0] for pd in bot_config['properties']]
| 5,341,643
|
def find_path(ph_tok_list, dep_parse, link_anchor, ans_anchor, edge_dict, ph_dict):
"""
:param dep_parse: dependency graph
:param link_anchor: token index of the focus word (0-based)
:param ans_anchor: token index of the answer (0-based)
:param link_category: the category of the current focus link
:param edge_dict: <head-dep, rel> dict
:param ph_dict: <token_idx, ph> dict
:return:
"""
if ans_anchor != link_anchor:
edges = []
for head, rel, dep in triples(dep_parse=dep_parse):
edges.append((head, dep))
graph = nx.Graph(edges)
path_nodes = nx.shortest_path(graph, source=ans_anchor+1, target=link_anchor+1) #[0, 1, 2, 3, 4]
else:
path_nodes = [link_anchor]
path_tok_list = []
path_len = len(path_nodes)
if path_len > 0:
for position in range(path_len-1):
edge = edge_dict['%d-%d' % (path_nodes[position], path_nodes[position+1])]
cur_token_idx = path_nodes[position] - 1
if cur_token_idx in ph_dict:
path_tok_list.append(ph_dict[cur_token_idx])
else:
path_tok_list.append(ph_tok_list[cur_token_idx])
path_tok_list.append(edge)
if link_anchor in ph_dict:
path_tok_list.append(ph_dict[link_anchor])
else:
path_tok_list.append('<E>')
return path_tok_list
| 5,341,644
|
def get_theme_section_directories(theme_folder:str, sections:list = []) -> list:
"""Gets a list of the available sections for a theme
Explanation
-----------
Essentially this function goes into a theme folder (full path to a theme), looks for a folder
called sections and returns a list of all the .jinja files available stripped of the extension
so i.e. if `<theme folder>/sections` had 3 files `education.jinja`, `work_experience.jinja` and
`volunteering_experience.jinja` this function would return ['education', 'work_experience', 'volunteering_experience']
Parameters
----------
sections : (list, optional)
A list of sections names, or an empty list if they need to be searched for
theme_folder : str
The full path to the theme folder (typically from calling locate_theme_directory() )
Returns
-------
list
The name(s) of the section templates that exist within the sections list without extensions
"""
if sections:
return sections
if not sections and os.path.exists(os.path.join(theme_folder, "sections")):
for section in os.listdir(os.path.join(theme_folder, "sections")):
if section.endswith(".jinja"):
section = section.replace(".jinja", "")
sections.append(section)
return sections
| 5,341,645
|
def download_video_url(
video_url: str,
pipeline: PipelineContext,
destination="%(title)s.%(ext)s",
progress=ProgressMonitor.NULL,
):
"""Download a single video from the ."""
config = pipeline.config
logger = logging.getLogger(__name__)
logger.info("Starting video download from URL: %s", video_url)
# Setup progress-tracking
progress.scale(total_work=1.0)
progress_tracker = YDLProgressTracker(show_progress_bar=True)
# Resolve destination path template
output_template = complete_template(config.sources.root, destination)
logger.info("Output template: %s", output_template)
ydl_opts = {
"format": "mp4",
"logger": YDLLogger(logger),
"progress_hooks": [progress_tracker.hook],
"outtmpl": output_template,
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
# Determine destination file name
video_info = ydl.extract_info(video_url, download=False)
file_name = ydl.prepare_filename(video_info)
logger.info("Downloading file: %s", file_name)
# Download file
with progress_tracker.track(progress):
ydl.download([video_url])
progress.complete()
return file_name
| 5,341,646
|
def get_block_name(source):
"""Get block name version from source."""
url_parts = urlparse(source)
file_name = url_parts.path
extension = file_name.split(".")[-1]
new_path = file_name.replace("." + extension, "_block." + extension)
new_file_name = urlunparse(
(
url_parts.scheme,
url_parts.netloc,
new_path,
url_parts.params,
url_parts.query,
url_parts.fragment,
)
)
return new_file_name
| 5,341,647
|
def int_to_symbol(i):
""" Convert numeric symbol or token to a desriptive name.
"""
try:
return symbol.sym_name[i]
except KeyError:
return token.tok_name[i]
| 5,341,648
|
def test_iron_skillet(g):
"""
Test the "iron-skillet" snippet
"""
sc = g.build()
test_stack = 'snippets'
test_snippets = ['all']
push_test(sc, test_stack, test_snippets)
| 5,341,649
|
def write_radius_pkix_cd_manage_trust_infile(policy_json, roles, trust_infile_path):
"""Write the input file for radius_pkix_cd_manage_trust.py."""
role_list = roles.split(",")
ti_file_lines = []
for role in role_list:
for policy_role in policy_json["roles"]:
if policy_role["name"] == role:
lines = ["{}|{}".format(role, supplicant) for supplicant in policy_role["members"]]
ti_file_lines.extend(lines)
if not ti_file_lines:
print("No policy lines for trust infile! Ensure that roles map to policy roles!")
return
ti_file_contents = "\n".join(ti_file_lines)
with open(trust_infile_path, "w") as ti_file:
ti_file.write(ti_file_contents)
print("Updated {}".format(trust_infile_path))
| 5,341,650
|
def debugger(parser, token):
"""
Activates a debugger session in both passes of the template renderer
"""
pudb.set_trace()
return DebuggerNode()
| 5,341,651
|
def extra_init(app):
"""Extra blueprint initialization that requires application context."""
if 'header_links' not in app.jinja_env.globals:
app.jinja_env.globals['header_links'] = []
# Add links to 'header_links' var in jinja globals. This allows header_links
# to be read by all templates in the app instead of just this blueprint.
app.jinja_env.globals['header_links'].extend([
("Feedback", 'feedback.feedback'),
])
| 5,341,652
|
def get_openmm_energies(system_pdb, system_xml):
"""
Returns decomposed OPENMM energies for the
system.
Parameters
----------
system_pdb : str
Input PDB file
system_xml : str
Forcefield file in XML format
"""
pdb = simtk.openmm.app.PDBFile(system_pdb)
ff_xml_file = open(system_xml, "r")
system = simtk.openmm.XmlSerializer.deserialize(ff_xml_file.read())
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(
getEnergy=True, getParameters=True, getForces=True
)
force_group = []
for i, force in enumerate(system.getForces()):
force_group.append(force.__class__.__name__)
forcegroups = {}
for i in range(system.getNumForces()):
force = system.getForce(i)
force.setForceGroup(i)
forcegroups[force] = i
energies = {}
for f, i in forcegroups.items():
energies[f] = (
simulation.context.getState(getEnergy=True, groups=2 ** i)
.getPotentialEnergy()
._value
)
decomposed_energy = []
for key, val in energies.items():
decomposed_energy.append(val)
df_energy_openmm = pd.DataFrame(
list(zip(force_group, decomposed_energy)),
columns=["Energy_term", "Energy_openmm_params"],
)
energy_values = [
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicBondForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicAngleForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "PeriodicTorsionForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "NonbondedForce"
].values[0]
)[1],
]
energy_group = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_open_mm = pd.DataFrame(
list(zip(energy_group, energy_values)),
columns=["Energy_term", "Energy_openmm_params"],
)
df_energy_open_mm = df_energy_open_mm.set_index("Energy_term")
print(df_energy_open_mm)
| 5,341,653
|
def cranimp(i, s, m, N):
"""
Calculates the result of c_i,s^dag a_s acting on an integer m. Returns the new basis state and the fermionic prefactor.
Spin: UP - s=0, DOWN - s=1.
"""
offi = 2*(N-i)-1-s
offimp = 2*(N+1)-1-s
m1 = flipBit(m, offimp)
if m1<m:
m2=flipBit(m1, offi)
if m2>m1:
prefactor = prefactor_offset(m1, offi, N)
prefactor *= prefactor_offset_imp(m, s, N)
return prefactor, m2
return 0, 0
| 5,341,654
|
def _can_beeify():
""" Determines if the random chance to beeify has occured """
return randint(0, 12) == 0
| 5,341,655
|
def test_relax_parameters_vol_shape(init_relax_parameters):
"""Test volume and shape relaxation combinations."""
del init_relax_parameters.relax.positions
massager = ParametersMassage(init_relax_parameters)
parameters = massager.parameters[_DEFAULT_OVERRIDE_NAMESPACE]
assert parameters.isif == 6
| 5,341,656
|
def p_for_header(p):
"""
for_header : for_simple
| for_complex
"""
p[0] = p[1]
| 5,341,657
|
def get_object_classes(db):
"""return a list of all object classes"""
list=[]
for item in classinfo:
list.append(item)
return list
| 5,341,658
|
def load_arviz_data(dataset=None, data_home=None):
"""Load a local or remote pre-made dataset.
Run with no parameters to get a list of all available models.
The directory to save to can also be set with the environement
variable `ARVIZ_HOME`. The checksum of the dataset is checked against a
hardcoded value to watch for data corruption.
Run `az.clear_data_home` to clear the data directory.
Parameters
----------
dataset : str
Name of dataset to load.
data_home : str, optional
Where to save remote datasets
Returns
-------
xarray.Dataset
"""
if dataset in LOCAL_DATASETS:
resource = LOCAL_DATASETS[dataset]
return from_netcdf(resource.filename)
elif dataset in REMOTE_DATASETS:
remote = REMOTE_DATASETS[dataset]
home_dir = get_data_home(data_home=data_home)
file_path = os.path.join(home_dir, remote.filename)
if not os.path.exists(file_path):
http_type = rcParams["data.http_protocol"]
# Replaces http type. Redundant if http_type is http, useful if http_type is https
url = remote.url.replace("http", http_type)
urlretrieve(url, file_path)
checksum = _sha256(file_path)
if remote.checksum != checksum:
raise IOError(
"{} has an SHA256 checksum ({}) differing from expected ({}), "
"file may be corrupted. Run `arviz.clear_data_home()` and try "
"again, or please open an issue.".format(file_path, checksum, remote.checksum)
)
return from_netcdf(file_path)
else:
if dataset is None:
return dict(itertools.chain(LOCAL_DATASETS.items(), REMOTE_DATASETS.items()))
else:
raise ValueError(
"Dataset {} not found! The following are available:\n{}".format(
dataset, list_datasets()
)
)
| 5,341,659
|
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
| 5,341,660
|
def prepare_ablation_from_config(config: Mapping[str, Any], directory: str, save_artifacts: bool):
"""Prepare a set of ablation study directories."""
metadata = config['metadata']
optuna_config = config['optuna']
ablation_config = config['ablation']
evaluator = ablation_config['evaluator']
evaluator_kwargs = ablation_config['evaluator_kwargs']
evaluation_kwargs = ablation_config['evaluation_kwargs']
it = itt.product(
ablation_config['datasets'],
ablation_config['create_inverse_triples'],
ablation_config['models'],
ablation_config['loss_functions'],
ablation_config['regularizers'],
ablation_config['optimizers'],
ablation_config['training_loops'],
)
directories = []
for counter, (
dataset,
create_inverse_triples,
model,
loss,
regularizer,
optimizer,
training_loop,
) in enumerate(it):
experiment_name = f'{counter:04d}_{normalize_string(dataset)}_{normalize_string(model)}'
output_directory = os.path.join(directory, experiment_name)
os.makedirs(output_directory, exist_ok=True)
# TODO what happens if already exists?
_experiment_optuna_config = optuna_config.copy()
_experiment_optuna_config['storage'] = f'sqlite:///{output_directory}/optuna_results.db'
if save_artifacts:
save_model_directory = os.path.join(output_directory, 'artifacts')
os.makedirs(save_model_directory, exist_ok=True)
_experiment_optuna_config['save_model_directory'] = save_model_directory
hpo_config = dict()
for retain_key in ('stopper', 'stopper_kwargs'):
if retain_key in ablation_config:
logger.info(f'Retaining {retain_key} configuration in HPO')
hpo_config[retain_key] = deepcopy(ablation_config[retain_key])
for error_key in ('early_stopping', 'early_stopping_kwargs'):
if error_key in ablation_config:
raise ValueError(f'Outdated key: {error_key}. Please update')
# TODO incorporate setting of random seed
# pipeline_kwargs=dict(
# random_seed=random.randint(1, 2 ** 32 - 1),
# ),
def _set_arguments(key: str, value: str) -> None:
"""Set argument and its values."""
d = {key: value}
kwargs = ablation_config[f'{key}_kwargs'][model][value]
if kwargs:
d[f'{key}_kwargs'] = kwargs
kwargs_ranges = ablation_config[f'{key}_kwargs_ranges'][model][value]
if kwargs_ranges:
d[f'{key}_kwargs_ranges'] = kwargs_ranges
hpo_config.update(d)
# Add dataset to current_pipeline
hpo_config['dataset'] = dataset
logger.info(f"Dataset: {dataset}")
hpo_config['dataset_kwargs'] = dict(create_inverse_triples=create_inverse_triples)
logger.info(f"Add inverse triples: {create_inverse_triples}")
hpo_config['model'] = model
model_kwargs = ablation_config['model_kwargs'][model]
if model_kwargs:
hpo_config['model_kwargs'] = ablation_config['model_kwargs'][model]
hpo_config['model_kwargs_ranges'] = ablation_config['model_kwargs_ranges'][model]
logger.info(f"Model: {model}")
# Add loss function to current_pipeline
_set_arguments(key='loss', value=loss)
logger.info(f"Loss function: {loss}")
# Add regularizer to current_pipeline
_set_arguments(key='regularizer', value=regularizer)
logger.info(f"Regularizer: {regularizer}")
# Add optimizer to current_pipeline
_set_arguments(key='optimizer', value=optimizer)
logger.info(f"Optimizer: {optimizer}")
# Add training approach to current_pipeline
hpo_config['training_loop'] = training_loop
logger.info(f"Training loop: {training_loop}")
if normalize_string(training_loop, suffix=_TRAINING_LOOP_SUFFIX) == 'slcwa':
negative_sampler = ablation_config['negative_sampler']
_set_arguments(key='negative_sampler', value=negative_sampler)
logger.info(f"Negative sampler: {negative_sampler}")
# Add training kwargs and kwargs_ranges
training_kwargs = ablation_config['training_kwargs'][model][training_loop]
if training_kwargs:
hpo_config['training_kwargs'] = training_kwargs
hpo_config['training_kwargs_ranges'] = ablation_config['training_kwargs_ranges'][model][training_loop]
# Add evaluation
hpo_config['evaluator'] = evaluator
if evaluator_kwargs:
hpo_config['evaluator_kwargs'] = evaluator_kwargs
hpo_config['evaluation_kwargs'] = evaluation_kwargs
logger.info(f"Evaluator: {evaluator}")
rv_config = dict(
type='hpo',
metadata=metadata,
pipeline=hpo_config,
optuna=_experiment_optuna_config,
)
rv_config_path = os.path.join(output_directory, 'hpo_config.json')
with open(rv_config_path, 'w') as file:
json.dump(rv_config, file, indent=2, ensure_ascii=True)
directories.append((output_directory, rv_config_path))
return directories
| 5,341,661
|
def spin_coherent(j, theta, phi, type='ket'):
"""Generates the spin state |j, m>, i.e. the eigenstate
of the spin-j Sz operator with eigenvalue m.
Parameters
----------
j : float
The spin of the state.
theta : float
Angle from z axis.
phi : float
Angle from x axis.
type : string {'ket', 'bra', 'dm'}
Type of state to generate.
Returns
-------
state : qobj
Qobj quantum object for spin coherent state
"""
Sp = jmat(j, '+')
Sm = jmat(j, '-')
psi = (0.5 * theta * np.exp(1j * phi) * Sm -
0.5 * theta * np.exp(-1j * phi) * Sp).expm() * spin_state(j, j)
if type == 'ket':
return psi
elif type == 'bra':
return psi.dag()
elif type == 'dm':
return ket2dm(psi)
else:
raise ValueError("invalid value keyword argument 'type'")
| 5,341,662
|
def vgg_upsampling(classes, target_shape=None, scale=1, weight_decay=0., block_name='featx'):
"""A VGG convolutional block with bilinear upsampling for decoding.
:param classes: Integer, number of classes
:param scale: Float, scale factor to the input feature, varing from 0 to 1
:param target_shape: 4D Tuples with targe_height, target_width as
the 2nd, 3rd elements if `channels_last` or as the 3rd, 4th elements if
`channels_first`.
>>> from keras_fcn.blocks import vgg_upsampling
>>> feat1, feat2, feat3 = feat_pyramid[:3]
>>> y = vgg_upsampling(classes=21, target_shape=(None, 14, 14, None),
>>> scale=1, block_name='feat1')(feat1, None)
>>> y = vgg_upsampling(classes=21, target_shape=(None, 28, 28, None),
>>> scale=1e-2, block_name='feat2')(feat2, y)
>>> y = vgg_upsampling(classes=21, target_shape=(None, 224, 224, None),
>>> scale=1e-4, block_name='feat3')(feat3, y)
"""
def f(x, y):
score = Conv2D(filters=classes, kernel_size=(1, 1),
activation='linear',
padding='valid',
kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay),
name='score_{}'.format(block_name))(x)
if y is not None:
def scaling(xx, ss=1):
return xx * ss
scaled = Lambda(scaling, arguments={'ss': scale},
name='scale_{}'.format(block_name))(score)
score = add([y, scaled])
upscore = BilinearUpSampling2D(
target_shape=target_shape,
name='upscore_{}'.format(block_name))(score)
return upscore
return f
| 5,341,663
|
def show_toolbar(request):
"""Determine if toolbar will be displayed."""
return settings.DEBUG
| 5,341,664
|
def compute_metrics(logits, labels, weights):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, labels, weights)
acc, _ = compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
return metrics
| 5,341,665
|
def test_wild080_wild080_v1_xml(mode, save_output, output_format):
"""
Consistency of governing type declarations between locally-declared
elements and lax wildcards in a content model No violation of Element
Declarations Consistent with a skip wildcard
"""
assert_bindings(
schema="saxonData/Wild/wild080.xsd",
instance="saxonData/Wild/wild080.v1.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,341,666
|
def test_try_decorator():
"""Test try and log decorator."""
# for pydocstyle
@try_decorator("oops", default_return="failed")
def fn():
raise Exception("expected")
assert fn() == "failed"
| 5,341,667
|
def start_session(web_session=None):
"""Starts a SQL Editor Session
Args:
web_session (object): The web_session object this session will belong to
Returns:
A dict holding the result message
"""
new_session = SqleditorModuleSession(web_session)
result = Response.ok("New SQL Editor session created successfully.", {
"module_session_id": new_session.module_session_id
})
return result
| 5,341,668
|
def show_box(box_data):
"""from box_data produce a 3D image of surfaces and display it"""
use_color_flag=True
reverse_redshift_flag=True
(dimx,dimy,dimz)=box_data.shape
print box_data.shape
mycolor='black-white'
if use_color_flag:
mycolor='blue-red'
mycolor='RdBu'
#need to set color scale to be symmetric around zero
if reverse_redshift_flag:
print 'want to be able to reseverse long axis ordering'
box_data=numpy.flipud(box_data)
#box_data=box_data[:,::-1,:]
print box_data.min(), box_data.max()
#clip_data
clip=30.0
vmax=clip
vmin= -clip
box_data[box_data>vmax]=vmax
box_data[box_data<vmin]=vmin
print box_data.min(), box_data.max()
c_black=(0.0,0.0,0.0)
c_white=(1.0,1.0,1.0)
#s=box_data
mlab.figure(1,bgcolor=c_white,fgcolor=c_black)
planex=mlab.pipeline.image_plane_widget(mlab.pipeline.scalar_field(box_data),
plane_orientation='x_axes',
slice_index=dimx-1,colormap=mycolor
)
planey=mlab.pipeline.image_plane_widget(mlab.pipeline.scalar_field(box_data),
plane_orientation='y_axes',
slice_index=dimy-1,colormap=mycolor
)
planez=mlab.pipeline.image_plane_widget(mlab.pipeline.scalar_field(box_data),
plane_orientation='z_axes',
slice_index=dimz-1,colormap=mycolor
)
# colormap='blue-red' may be useful for 21 cm brightness maps
# although it passes through white not black
#now try to invert the color scheme
lut=planez.module_manager.scalar_lut_manager.lut.table.to_array()
lut=numpy.flipud(lut)
#lut=lutForTBMap()
#print help(lut)
planex.module_manager.scalar_lut_manager.lut.table = lut
planey.module_manager.scalar_lut_manager.lut.table = lut
planez.module_manager.scalar_lut_manager.lut.table = lut
mlab.draw() #force update of figure
mlab.colorbar()
mlab.outline(color=c_black)
#mlab.show()
filename='volume_slice.png'
figsave=mlab.gcf()
#mlab.savefig(filename,figure=figsave,magnification=4.)
mlab.savefig(filename,size=(1800,800),figure=figsave,magnification=4)
mlab.close()
#Can't get MayaVi to output a good eps, so use PIL to convert
im = Image.open(filename)
im.save("volume_slice.eps")
return
| 5,341,669
|
async def error_middleware(request: Request, handler: t.Callable[[Request], t.Awaitable[Response]]) -> Response:
"""logs an exception and returns an error message to the client
"""
try:
return await handler(request)
except Exception as e:
logger.exception(e)
return json_response(text=str(e), status=HTTPInternalServerError.status_code)
| 5,341,670
|
def init_mobility_accordion():
"""
Initialize the accordion for mobility tab.
Args: None
Returns:
mobility_accordion (object): dash html.Div that contains individual accordions
"""
accord_1 = init_accordion_element(
title="Mobility Index",
id='id_mobility_index',
tab_n=4,
group_n=1
)
accord_2 = init_accordion_element(
title="Comming Soon!",
id='id_metro_accordion',
tab_n=4,
group_n=2
)
accord_3 = init_accordion_element(
title="Comming Soon!",
id='id_tram_accordion',
tab_n=4,
group_n=3
)
accord_4 = init_accordion_element(
title="Comming Soon!",
id='id_bikes_accordion',
tab_n=4,
group_n=4
)
mobility_accordion = [
accord_1,
accord_2,
accord_3,
accord_4
]
return assemble_accordion(mobility_accordion)
| 5,341,671
|
def per_image_whiten(X):
""" Subtracts the mean of each image in X and renormalizes them to unit norm.
"""
num_examples, height, width, depth = X.shape
X_flat = X.reshape((num_examples, -1))
X_mean = X_flat.mean(axis=1)
X_cent = X_flat - X_mean[:, None]
X_norm = np.sqrt( np.sum( X_cent * X_cent, axis=1) )
X_out = X_cent / X_norm[:, None]
X_out = X_out.reshape(X.shape)
return X_out
| 5,341,672
|
def test_is_read_values_any_allowed_cdf_single(
cursor, make_user_roles, new_cdf_forecast, other_obj):
"""That that a user can (and can not) perform the specified action"""
info = make_user_roles('read_values', other_obj, True)
authid = info['user']['auth0_id']
obj = new_cdf_forecast(org=info['org'])
for id_ in obj['constant_values'].keys():
binid = uuid_to_bin(UUID(id_))
cursor.execute('SELECT is_read_values_any_allowed(%s, %s)',
(authid, binid))
if other_obj == 'cdf_forecasts':
assert cursor.fetchone()[0] == 1
else:
assert cursor.fetchone()[0] == 0
| 5,341,673
|
def fill_defaults(data, vals) -> dict:
"""Fill defaults if source is not present"""
for val in vals:
_name = val['name']
_type = val['type'] if 'type' in val else 'str'
_source = val['source'] if 'source' in val else _name
if _type == 'str':
_default = val['default'] if 'default' in val else ''
if 'default_val' in val and val['default_val'] in val:
_default = val[val['default_val']]
if _name not in data:
data[_name] = from_entry([], _source, default=_default)
elif _type == 'bool':
_default = val['default'] if 'default' in val else False
_reverse = val['reverse'] if 'reverse' in val else False
if _name not in data:
data[_name] = from_entry_bool([], _source, default=_default, reverse=_reverse)
return data
| 5,341,674
|
def crossValPlot(skf,classifier,X_,y_):
"""Code adapted from:
"""
X = np.asarray(X_)
y = np.asarray(y_)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
f,ax = plt.subplots(figsize=(10,7))
i = 0
for train, test in skf.split(X, y):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
ax.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic example')
ax.legend(bbox_to_anchor=(1,1))
| 5,341,675
|
def delete_product(info):
"""*
"""
# get objectid
import pdb; pdb.set_trace()
params = 'where={"name": "%s"}' % info['name']
res = requests.get(CLASSES_BASE_URL + 'products', headers=RAW_HEADERS,
params=params)
if res.status_code == 200:
record = json.loads(res.content.decode())
try:
objectid = record['results'][0]['objectId']
except:
print('No objectid found with %s' % info['name'])
return
# delete record
res = requests.delete('%sproducts/%s' % (CLASSES_BASE_URL, objectid),
headers=RAW_HEADERS)
if res.status_code == 200:
print('Delete %s OK.' % info['name'])
else:
print('Delete %s failed.' % info['name'])
else:
print('Get objectid of %s failed.' % info['name'])
| 5,341,676
|
def static_shuttle_between():
"""
Route endpoint to show real shuttle data within a certain time range at once.
Returns:
rendered website displaying all points at once.
Example:
http://127.0.0.1:5000/?start_time=2018-02-14%2015:40:00&end_time=2018-02-14%2016:02:00
"""
start_time = request.args.get('start_time', default="2018-02-14 13:00:00")
end_time = request.args.get('end_time', default="2018-02-14 17:00:00")
return render_to_static(start_time, end_time)
| 5,341,677
|
def read_data(inargs, infiles, ref_cube=None):
"""Read data."""
clim_dict = {}
trend_dict = {}
for filenum, infile in enumerate(infiles):
cube = iris.load_cube(infile, gio.check_iris_var(inargs.var))
if ref_cube:
branch_time = None if inargs.branch_times[filenum] == 'default' else str(inargs.branch_times[filenum])
time_constraint = timeseries.get_control_time_constraint(cube, ref_cube, inargs.time, branch_time=branch_time)
cube = cube.extract(time_constraint)
iris.util.unify_time_units([ref_cube, cube])
cube.coord('time').units = ref_cube.coord('time').units
cube.replace_coord(ref_cube.coord('time'))
else:
time_constraint = gio.get_time_constraint(inargs.time)
cube = cube.extract(time_constraint)
#cube = uconv.convert_to_joules(cube)
if inargs.perlat:
grid_spacing = grids.get_grid_spacing(cube)
cube.data = cube.data / grid_spacing
trend_cube = calc_trend_cube(cube.copy())
clim_cube = cube.collapsed('time', iris.analysis.MEAN)
clim_cube.remove_coord('time')
model = cube.attributes['model_id']
realization = 'r' + str(cube.attributes['realization'])
physics = 'p' + str(cube.attributes['physics_version'])
key = (model, physics, realization)
trend_dict[key] = trend_cube
clim_dict[key] = clim_cube
experiment = cube.attributes['experiment_id']
experiment = 'historicalAA' if experiment == "historicalMisc" else experiment
trend_ylabel = get_ylabel(cube, 'trend', inargs)
clim_ylabel = get_ylabel(cube, 'climatology', inargs)
metadata_dict = {infile: cube.attributes['history']}
return cube, trend_dict, clim_dict, experiment, trend_ylabel, clim_ylabel, metadata_dict
| 5,341,678
|
async def create_account(*, user):
"""
Open an account for a user
Save account details in json file
"""
with open("mainbank.json", "r") as f:
users = json.load(f)
if str(user.id) in users:
return False
else:
users[str(user.id)] = {"wallet": 0, "bank": 0}
with open("mainbank.json", "w") as f:
json.dump(users, f)
| 5,341,679
|
def commonpath(paths):
"""Given a sequence of path names, returns the longest common sub-path."""
if not paths:
raise ValueError('commonpath() arg is an empty sequence')
if isinstance(paths[0], bytes):
sep = b'\\'
altsep = b'/'
curdir = b'.'
else:
sep = '\\'
altsep = '/'
curdir = '.'
try:
drivesplits = [ntpath.splitdrive(p.replace(altsep, sep).lower()) for p in paths]
split_paths = [p.split(sep) for d, p in drivesplits]
try:
isabs, = set(p[:1] == sep for d, p in drivesplits)
except ValueError:
raise ValueError("Can't mix absolute and relative paths")
# Check that all drive letters or UNC paths match. The check is made only
# now otherwise type errors for mixing strings and bytes would not be
# caught.
if len(set(d for d, p in drivesplits)) != 1:
raise ValueError("Paths don't have the same drive")
drive, path = ntpath.splitdrive(paths[0].replace(altsep, sep))
common = path.split(sep)
common = [c for c in common if c and c != curdir]
split_paths = [[c for c in s if c and c != curdir] for s in split_paths]
s1 = min(split_paths)
s2 = max(split_paths)
for i, c in enumerate(s1):
if c != s2[i]:
common = common[:i]
break
else:
common = common[:len(s1)]
prefix = drive + sep if isabs else drive
return prefix + sep.join(common)
except (TypeError, AttributeError):
genericpath._check_arg_types('commonpath', *paths)
raise
| 5,341,680
|
def test_set_container_uid_and_pod_fs_gid():
"""
Test specification of the simplest possible pod specification
"""
assert api_client.sanitize_for_serialization(make_pod(
name='test',
image='jupyter/singleuser:latest',
cmd=['jupyterhub-singleuser'],
port=8888,
run_as_uid=1000,
fs_gid=1000,
image_pull_policy='IfNotPresent'
)) == {
"metadata": {
"name": "test",
"annotations": {},
"labels": {},
},
"spec": {
'automountServiceAccountToken': False,
"containers": [
{
"securityContext": {
"runAsUser": 1000,
},
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"imagePullPolicy": "IfNotPresent",
"args": ["jupyterhub-singleuser"],
"ports": [{
"name": "notebook-port",
"containerPort": 8888
}],
'volumeMounts': [],
"resources": {
"limits": {},
"requests": {}
}
}
],
'restartPolicy': 'OnFailure',
'securityContext': {
'fsGroup': 1000,
},
'volumes': [],
},
"kind": "Pod",
"apiVersion": "v1"
}
| 5,341,681
|
def reshape(x, shape):
""" Reshape array to new shape
This is a parallelized version of the ``np.reshape`` function with the
following limitations:
1. It assumes that the array is stored in `row-major order`_
2. It only allows for reshapings that collapse or merge dimensions like
``(1, 2, 3, 4) -> (1, 6, 4)`` or ``(64,) -> (4, 4, 4)``
.. _`row-major order`: https://en.wikipedia.org/wiki/Row-_and_column-major_order
When communication is necessary this algorithm depends on the logic within
rechunk. It endeavors to keep chunk sizes roughly the same when possible.
See Also
--------
dask.array.rechunk
numpy.reshape
"""
# Sanitize inputs, look for -1 in shape
from .slicing import sanitize_index
shape = tuple(map(sanitize_index, shape))
known_sizes = [s for s in shape if s != -1]
if len(known_sizes) < len(shape):
if len(known_sizes) - len(shape) > 1:
raise ValueError("can only specify one unknown dimension")
# Fastpath for x.reshape(-1) on 1D arrays, allows unknown shape in x
# for this case only.
if len(shape) == 1 and x.ndim == 1:
return x
missing_size = sanitize_index(x.size / reduce(mul, known_sizes, 1))
shape = tuple(missing_size if s == -1 else s for s in shape)
if np.isnan(sum(x.shape)):
raise ValueError("Array chunk size or shape is unknown. shape: %s", x.shape)
if reduce(mul, shape, 1) != x.size:
raise ValueError("total size of new array must be unchanged")
if x.shape == shape:
return x
meta = meta_from_array(x, len(shape))
name = "reshape-" + tokenize(x, shape)
if x.npartitions == 1:
key = next(flatten(x.__dask_keys__()))
dsk = {(name,) + (0,) * len(shape): (M.reshape, key, shape)}
chunks = tuple((d,) for d in shape)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, meta=meta)
# Logic for how to rechunk
inchunks, outchunks = reshape_rechunk(x.shape, shape, x.chunks)
x2 = x.rechunk(inchunks)
# Construct graph
in_keys = list(product([x2.name], *[range(len(c)) for c in inchunks]))
out_keys = list(product([name], *[range(len(c)) for c in outchunks]))
shapes = list(product(*outchunks))
dsk = {a: (M.reshape, b, shape) for a, b, shape in zip(out_keys, in_keys, shapes)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x2])
return Array(graph, name, outchunks, meta=meta)
| 5,341,682
|
def update_optimizer_lr(optimizer, lr):
"""
为了动态更新learning rate, 加快训练速度
:param optimizer: torch.optim type
:param lr: learning rate
:return:
"""
for group in optimizer.param_groups:
group['lr'] = lr
| 5,341,683
|
async def test_kafka_provider_metric_unavailable(aiohttp_server, loop):
"""Test the resilience of the Kafka provider when the name of a metric taken from a
Metric resource is not found.
"""
columns = ["id", "value"]
rows = [["first_id", [0.42, 1.0]]]
kafka = await aiohttp_server(make_kafka("my_table", columns, rows))
metric = GlobalMetricFactory(
spec__provider__name="my-provider", spec__provider__metric="wrong_id"
)
metrics_provider = GlobalMetricsProviderFactory(
metadata__name="my-provider",
spec__type="kafka",
spec__kafka__comparison_column="id",
spec__kafka__value_column="value",
spec__kafka__table="my_table",
spec__kafka__url=server_endpoint(kafka),
)
async with ClientSession() as session:
provider = metrics.Provider(metrics_provider=metrics_provider, session=session)
assert isinstance(provider, metrics.Kafka)
with pytest.raises(
MetricError, match="The value of the metric 'wrong_id' cannot be read"
):
await provider.query(metric)
| 5,341,684
|
def test_create_extended(setup_teardown_file):
"""Create an extended dataset."""
f = setup_teardown_file[3]
grp = f.create_group("test")
dset = grp.create_dataset('foo', (63,))
assert dset.shape == (63,)
assert dset.size == 63
dset = f.create_dataset('bar', (6, 10))
assert dset.shape == (6, 10)
assert dset.size == (60)
| 5,341,685
|
def clean_json(data_type: str):
"""deletes all the data from the json corresponding to the data type.
data_type is a string corresponding to key of the dictionary "data_types",
where the names of json files for each data type are stored."""
# check if correct data type
if not data_type in json_files.keys():
logger.error("Incorrect data type detected while tying to clear the json file.")
return
with open(json_files[data_type], "w") as app_data_file:
json.dump([], app_data_file)
| 5,341,686
|
def get_state(module_instance, incremental_state, key_postfix):
""" Helper for extracting incremental state """
if incremental_state is None:
return None
full_key = _get_full_key(module_instance, key_postfix)
return incremental_state.get(full_key, None)
| 5,341,687
|
def get_lenovo_urls(from_date, to_date):
"""
Extracts URL on which the data about vulnerabilities are available.
:param from_date: start of date interval
:param to_date: end of date interval
:return: urls
"""
lenovo_url = config['vendor-cve']['lenovo_url']
len_p = LenovoMainPageParser(lenovo_url, from_date, to_date)
len_p.parse()
return len_p.entities
| 5,341,688
|
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
| 5,341,689
|
def get_object_attributes(DirectoryArn=None, ObjectReference=None, ConsistencyLevel=None, SchemaFacet=None, AttributeNames=None):
"""
Retrieves attributes within a facet that are associated with an object.
See also: AWS API Documentation
Exceptions
:example: response = client.get_object_attributes(
DirectoryArn='string',
ObjectReference={
'Selector': 'string'
},
ConsistencyLevel='SERIALIZABLE'|'EVENTUAL',
SchemaFacet={
'SchemaArn': 'string',
'FacetName': 'string'
},
AttributeNames=[
'string',
]
)
:type DirectoryArn: string
:param DirectoryArn: [REQUIRED]\nThe Amazon Resource Name (ARN) that is associated with the Directory where the object resides.\n
:type ObjectReference: dict
:param ObjectReference: [REQUIRED]\nReference that identifies the object whose attributes will be retrieved.\n\nSelector (string) --A path selector supports easy selection of an object by the parent/child links leading to it from the directory root. Use the link names from each parent/child link to construct the path. Path selectors start with a slash (/) and link names are separated by slashes. For more information about paths, see Access Objects . You can identify an object in one of the following ways:\n\n$ObjectIdentifier - An object identifier is an opaque string provided by Amazon Cloud Directory. When creating objects, the system will provide you with the identifier of the created object. An object\xe2\x80\x99s identifier is immutable and no two objects will ever share the same object identifier\n/some/path - Identifies the object based on path\n#SomeBatchReference - Identifies the object in a batch call\n\n\n\n
:type ConsistencyLevel: string
:param ConsistencyLevel: The consistency level at which to retrieve the attributes on an object.
:type SchemaFacet: dict
:param SchemaFacet: [REQUIRED]\nIdentifier for the facet whose attributes will be retrieved. See SchemaFacet for details.\n\nSchemaArn (string) --The ARN of the schema that contains the facet with no minor component. See arns and In-Place Schema Upgrade for a description of when to provide minor versions.\n\nFacetName (string) --The name of the facet.\n\n\n
:type AttributeNames: list
:param AttributeNames: [REQUIRED]\nList of attribute names whose values will be retrieved.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Attributes': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
]
}
Response Structure
(dict) --
Attributes (list) --
The attributes that are associated with the object.
(dict) --
The combination of an attribute key and an attribute value.
Key (dict) --
The key of the attribute.
SchemaArn (string) --
The Amazon Resource Name (ARN) of the schema that contains the facet and attribute.
FacetName (string) --
The name of the facet that the attribute exists within.
Name (string) --
The name of the attribute.
Value (dict) --
The value of the attribute.
StringValue (string) --
A string data value.
BinaryValue (bytes) --
A binary data value.
BooleanValue (boolean) --
A Boolean data value.
NumberValue (string) --
A number data value.
DatetimeValue (datetime) --
A date and time value.
Exceptions
CloudDirectory.Client.exceptions.InternalServiceException
CloudDirectory.Client.exceptions.InvalidArnException
CloudDirectory.Client.exceptions.RetryableConflictException
CloudDirectory.Client.exceptions.ValidationException
CloudDirectory.Client.exceptions.LimitExceededException
CloudDirectory.Client.exceptions.AccessDeniedException
CloudDirectory.Client.exceptions.DirectoryNotEnabledException
CloudDirectory.Client.exceptions.ResourceNotFoundException
CloudDirectory.Client.exceptions.FacetValidationException
:return: {
'Attributes': [
{
'Key': {
'SchemaArn': 'string',
'FacetName': 'string',
'Name': 'string'
},
'Value': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'BooleanValue': True|False,
'NumberValue': 'string',
'DatetimeValue': datetime(2015, 1, 1)
}
},
]
}
:returns:
CloudDirectory.Client.exceptions.InternalServiceException
CloudDirectory.Client.exceptions.InvalidArnException
CloudDirectory.Client.exceptions.RetryableConflictException
CloudDirectory.Client.exceptions.ValidationException
CloudDirectory.Client.exceptions.LimitExceededException
CloudDirectory.Client.exceptions.AccessDeniedException
CloudDirectory.Client.exceptions.DirectoryNotEnabledException
CloudDirectory.Client.exceptions.ResourceNotFoundException
CloudDirectory.Client.exceptions.FacetValidationException
"""
pass
| 5,341,690
|
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
"""
Computes mean and std for batch then apply batch_normalization on batch.
# Arguments
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
# Returns
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if gamma is None:
gamma = ones_like(x)
if beta is None:
beta = zeros_like(x)
mean = av.ops.reduce_mean(x, reduction_axes, True)
variance = av.ops.reduce_mean(av.ops.square(x - mean), reduction_axes, True)
normalized_tensor = batch_normalization(
x, mean, variance, beta, gamma, axis=reduction_axes, epsilon=epsilon)
return normalized_tensor, mean, variance
| 5,341,691
|
def supercell_scaling_by_target_atoms(structure, min_atoms=60, max_atoms=120,
target_shape='sc', lower_search_limit=-2, upper_search_limit=2,
verbose=False):
"""
Find a the supercell scaling matrix that gives the most cubic supercell for a
structure, where the supercell has between the minimum and maximum nubmer of atoms.
Parameters
----------
structure : pymatgen.Structure
Unitcell of a structure
min_atoms : target number of atoms in the supercell, defaults to 5
max_atoms : int
Maximum number of atoms allowed in the supercell
target_shape : str
Target shape of supercell. Could choose 'sc' for simple cubic or 'fcc' for face centered
cubic. Default is 'sc'.
lower_search_limit : int
How far to search below the 'ideal' cubic scaling. Default is -2.
upper_search_limit : int
How far to search below the 'ideal' cubic scaling. Default is 2.
verbose : bool
Whether to print extra details on the cell shapes and scores. Useful for debugging.
Returns
-------
numpy.ndarray
2d array of a scaling matrix, e.g. [[3,0,0],[0,3,0],[0,0,3]]
Notes
-----
The motiviation for this is for use in phonon calculations and defect calculations.
It is important that defect atoms are far enough apart that they do not interact.
Scaling unit cells that are not cubic by even dimensions might result in interacting
defects. An example would be a tetragonal cell with 2x8x8 Ang lattice vectors being
made into a 2x2x2 supercell. Atoms along the first dimension would not be very far
apart.
We are using a pure Python implementation from ASE, which is not very fast for a given
supercell size. This allows for a variable supercell size, so it's going to be slow
for a large range of atoms.
The search limits are passed directloy to ``find_optimal_cell_shape``.
They define the search space for each individual supercell based on the "ideal" scaling.
For example, a cell with 4 atoms and a target size of 110 atoms might have an ideal scaling
of 3x3x3. The search space for a lower and upper limit of -2/+2 would be 1-5. Since the
calculations are based on the cartesian product of 3x3 matrices, large search ranges are
very expensive.
"""
from ase.build import get_deviation_from_optimal_cell_shape, find_optimal_cell_shape
# range of supercell sizes in number of unitcells
supercell_sizes = range(min_atoms//len(structure), max_atoms//len(structure) + 1)
optimal_supercell_shapes = [] # numpy arrays of optimal shapes
optimal_supercell_scores = [] # will correspond to supercell size
# find the target shapes
for sc_size in supercell_sizes:
optimal_shape = find_optimal_cell_shape(structure.lattice.matrix, sc_size, target_shape, upper_limit=upper_search_limit, lower_limit=lower_search_limit, verbose = True)
optimal_supercell_shapes.append(optimal_shape)
optimal_supercell_scores.append(get_deviation_from_optimal_cell_shape(optimal_shape, target_shape))
if verbose:
for i in range(len(supercell_sizes)):
print('{} {:0.4f} {}'.format(supercell_sizes[i], optimal_supercell_scores[i], optimal_supercell_shapes[i].tolist()))
# find the most optimal cell shape along the range of sizes
optimal_sc_shape = optimal_supercell_shapes[np.argmin(optimal_supercell_scores)]
return optimal_sc_shape
| 5,341,692
|
def itemAPIEndpoint(categoryid):
"""Return page to display JSON formatted information of item."""
items = session.query(Item).filter_by(category_id=categoryid).all()
return jsonify(Items=[i.serialize for i in items])
| 5,341,693
|
def graph_response_function(
func: Callable,
start: int = 0.001,
stop: int = 1000,
number_of_observations: int = 1000000,
):
"""
Generates a graph for the passed in function in the same style as the log
based graphs in the assignment.
Args:
func: The function to graph.
start: At what point to start the graph
stop: At what point to stop the graph.
number_of_observations: Number of observations to plot. Given the
curvature of the sigmoidal functions, a very high observation
count is recommended, or you will end up with angular graphs.
"""
x = np.linspace(start, stop, number_of_observations)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim([0.001, 1000])
ax.set_ylim([0.001, 1000])
ax.set_yscale("log")
ax.set_xscale("log")
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter("%0.3f"))
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%0.3f"))
plt.plot(x, list(map(func, x)))
plt.show()
| 5,341,694
|
def command(settings_module,
command,
bin_env=None,
pythonpath=None,
*args, **kwargs):
"""
run arbitrary django management command
"""
da = _get_django_admin(bin_env)
cmd = "{0} {1} --settings={2}".format(da, command, settings_module)
if pythonpath:
cmd = "{0} --pythonpath={1}".format(cmd, pythonpath)
for arg in args:
cmd = "{0} --{1}".format(cmd, arg)
for key, value in kwargs.iteritems():
if not key.startswith("__"):
cmd = '{0} --{1}={2}'.format(cmd, key, value)
return __salt__['cmd.run'](cmd)
| 5,341,695
|
def read_input(fpath):
"""
Read an input file, and return a list of tuples, each item
containing a single line.
Args:
fpath (str): File path of the file to read.
Returns:
list of tuples:
[ (xxx, xxx, xxx) ]
"""
with open(fpath, 'r') as f:
data = [line.strip() for line in f.readlines()]
rows = [tuple(map(int, d.split())) for d in data]
columns = format_into_columns(data)
return rows, columns
| 5,341,696
|
def get_parquet_lists():
"""
Load all .parquet files and get train and test splits
"""
parquet_files = [f for f in os.listdir(
Config.data_dir) if f.endswith(".parquet")]
train_files = [f for f in parquet_files if 'train' in f]
test_files = [f for f in parquet_files if 'test' in f]
return train_files, test_files
| 5,341,697
|
def test_masked_values():
"""Test specific values give expected results"""
data = np.zeros((2, 2), dtype=np.float32)
data = np.ma.masked_array(data, [[True, False], [False, False]])
input_cube = set_up_variable_cube(
data, name="snow_fraction", units="1", standard_grid_metadata="uk_ens",
)
with pytest.raises(
NotImplementedError, match="SignificantPhaseMask cannot handle masked data"
):
SignificantPhaseMask()(input_cube, "snow")
| 5,341,698
|
def find_level(key):
"""
Find the last 15 bits of a key, corresponding to a level.
"""
return key & LEVEL_MASK
| 5,341,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.