content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import random
import collections
def load_papertext(train_rate=0.8, dev_rate=0.1, test_rate=0.1, max_length=50, download_from_label_studio=True):
"""
Aspect Base sentiment analysis
:param kind: 是加载papertext数据,还是dem8的数据
:return:
:rtype:
"""
export_dir = "/opt/nlp/data/papertext/"
if download_from_label_studio:
json_path = export_data(hostname='http://127.0.0.1:8080/api/', dirpath=export_dir, jsonfile="0707.json")
data = collect_json(dirpath=export_dir)
valid_data = []
for one in data:
for complete in one['completions']:
if complete.get('was_cancelled'):
# 被取消了,那么跳过
continue
else:
# 只取第一个标注结果就行了,我们只有一个标注结果
if complete['result']:
result_one = complete['result'][0]
label = result_one['value']['choices'][0]
location = one['data']['location']
location = location.replace('行数','lines num').replace('段落宽度','paragraph width').replace('段落高度','paragraph height').replace('页面宽','page width').replace('页面高','page height')
text = one['data']['text']
valid_data.append([text,location,label])
print(f'从总的数据{len(data)}中, 共收集到有效数据{len(valid_data)}')
random.seed(30)
random.shuffle(valid_data)
total = len(valid_data)
train_num = int(total * train_rate)
dev_num = int(total * dev_rate)
test_num = int(total * test_rate)
train_data = valid_data[:train_num]
dev_data = valid_data[train_num:train_num+dev_num]
test_data = valid_data[train_num+dev_num:]
# 处理一下,保存的格式
def change_data(kind_data, name):
cnts = collections.Counter()
rows = []
for idx, one_data in enumerate(kind_data):
content, location, label = one_data
# label_id = labels2id[label]
assert label in ['作者','页眉','页脚','段落','标题','参考','表格','图像','公式','其它'], "label不是特定的关键字,那么paper_task_def.yml配置文件中的labels就不能解析,会出现错误"
sample = {'uid': idx, 'premise': content, 'hypothesis': location, 'label': label}
cnts[label] +=1
rows.append(sample)
print(f"{name}数据集的各个label的数量是: {cnts}")
return rows
papertext_train_data = change_data(train_data, name='train')
papertext_dev_data = change_data(dev_data, name='dev')
papertext_test_data = change_data(test_data, name='test')
return papertext_train_data, papertext_dev_data, papertext_test_data
|
b0c4747aaf61dce82612162652218ce001a7f17e
| 3,649,800
|
import logging
def set_log_level_for_all_handlers(logger, level=logging.DEBUG):
"""
Set a log level for all the handlers on the provided logger.
"""
logger.setLevel(level)
handlers = logger.handlers
for handler in handlers:
handler.setLevel(level)
return logger
|
c217284e813f46b16d29de5aa2393e06f26981b7
| 3,649,801
|
import json
def _load_cmake_spec():
"""Load and return the CMake spec from disk"""
try:
with open(CMAKE_SPEC_FILE()) as fp:
return json.load(fp)
except (OSError, IOError, ValueError):
return None
|
32d239ec667aa6f24da6f426d0c2dbf1984f3409
| 3,649,802
|
import openai
def compare_ask_ai_question():
"""
compare_ask_ai_question(): Ask a one questions to many product (GPT-3)
"""
try:
id_token = request.headers['Authorization']
claims = auth.verify_id_token(id_token)
uid = claims['uid']
data = request.json['data']
question = data['question']
product_ids = data['product_ids']
if claims['Enterprise'] is True:
product_answers = []
for product_id in product_ids:
todo = GPT3QA.document(product_id)
todo_dict = todo.get().to_dict()
if todo_dict['company_id'] == uid:
response = openai.Answer.create(
n=3,
temperature=0.35,
search_model="ada",
model="curie",
question=str(question),
file=todo_dict['gpt3_form_id'],
examples_context="In 2017, U.S. life expectancy was 78.6 years. With a 2019 population of 753,675, it is the largest city in both the state of Washington and the Pacific Northwest",
examples=[["What is human life expectancy in the United States?", "78 years."],
["what is the population of Seattle?", "Seattle's population is 724,305"]],
max_tokens=40,
stop=["\n", "<|endoftext|>"],
)
document_list = response['selected_documents']
df = pd.DataFrame(data=document_list)
text_list = df.nlargest(3, 'score')['text'].tolist()
answer_response = response['answers']
product_answers.append(
{"AI Answers": answer_response, "Reviews": text_list})
else:
return ("You are not authorized to view this page"), 403
return (jsonify(product_answers), 200)
else:
return ("You are not authorized to view this page"), 403
except Exception as e:
return f"An Error Occured: {e}"
|
097f7161fded9b5452b7373d0bcbc1b18ceb6590
| 3,649,803
|
def read():
"""
Read temperature
:return: temperature
"""
# global ds18b20
location = '/sys/bus/w1/devices/' + ds18b20 + '/w1_slave'
tfile = open(location)
text = tfile.read()
tfile.close()
secondline = text.split("\n")[1]
temperaturedata = secondline.split(" ")[9]
temperature = float(temperaturedata[2:])
temperature = temperature / 1000
return temperature
|
7e4c689d5cce6b28c28314eb7e1773e9af1a5061
| 3,649,804
|
import time
def wine(root):
"""Title of Database: Wine recognition data
Updated Sept 21, 1998 by C.Blake : Added attribute information
These data are the results of a chemical analysis of
wines grown in the same region in Italy but derived from three
different cultivars.
The analysis determined the quantities of 13 constituents
found in each of the three types of wines.
Number of Instances
class 1 59
class 2 71
class 3 48
Data storage directory:
root = `/user/.../mydata`
wine data:
`root/wine/wine.txt`
`root/wine/wine.json`
Args:
root: str, Store the absolute path of the data directory.
example:if you want data path is `/user/.../mydata/wine`,
root should be `/user/.../mydata`.
Returns:
Store the absolute path of the data directory, is `root/wine`.
"""
start = time.time()
task_path = assert_dirs(root, 'wine')
url_introduce = 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.names'
url_txt = 'http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
rq.files(url_introduce, gfile.path_join(task_path, 'introduce.txt'), verbose=0)
rq.table(url_txt, gfile.path_join(task_path, 'wine.txt'),
names=['label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium',
'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline'])
print('wine dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return task_path
|
f2a9a3b66b276b563dc03919becc326f35d77b3a
| 3,649,805
|
def initialize_scenario_data():
"""Will initialize the Scenario Data.
:return an empty ScenarioData named tuple
:rtype ScenarioData
"""
actors = {}
companies = {}
scenario_data = ScenarioData(actors, companies)
return scenario_data
|
4bbb26b84abef89fc6636bd382d0308cbc8e7573
| 3,649,806
|
def dynamicMass(bulk_density, lat, lon, height, jd, velocity, decel, gamma=1.0, shape_factor=1.21):
""" Calculate dynamic mass at the given point on meteor's trajectory.
Either a single set of values can be given (i.e. every argument is a float number), or all arguments
must be numpy arrays.
Arguments:
bulk_density: [float] Bulk density of the meteoroid in kg/m^3.
lat: [float] Latitude of the meteor (radians).
lon: [flaot] Longitude of the meteor (radians).
height: [float] Height of the meteor (meters).
jd: [float] Julian date of the meteor.
velocity: [float] Velocity of the meteor (m/s).
decel: [float] Deceleration in m/s^2.
Keyword arguments:
gamma: [flot] Drag coefficient. 1 by defualt.
shape_factor: [float] Shape factory for the body. 1.21 (sphere) by default. Other values:
- sphere = 1.21
- hemisphere = 1.92
- cube = 1.0
- brick 2:3:5 = 1.55
Return:
dyn_mass: [float] Dynamic mass in kg.
"""
# Calculate the atmosphere density at the given point
atm_dens = getAtmDensity_vect(lat, lon, height, jd)
# Calculate the dynamic mass
dyn_mass = (1.0/(bulk_density**2))*((gamma*(velocity**2)*atm_dens*shape_factor)/decel)**3
return dyn_mass
|
48920ecaef4c039672a387f4da45297861b6da56
| 3,649,807
|
def input_fn_tfrecords(files_name_pattern, num_epochs, batch_size, mode):
"""
Input functions which parses TFRecords.
:param files_name_pattern: File name to TFRecords.
:param num_epochs: Number of epochs.
:param batch_size: Batch size.
:param mode: Input function mode.
:return: features and label.
"""
return tf.data.experimental.make_batched_features_dataset(
file_pattern=files_name_pattern,
batch_size=batch_size,
features=get_metadata().schema.as_feature_spec(),
reader=tf.data.TFRecordDataset,
num_epochs=num_epochs,
shuffle=True if mode == tf.estimator.ModeKeys.TRAIN else False,
shuffle_buffer_size=1 + (batch_size * 2),
prefetch_buffer_size=1,
)
|
bd2b5bf41c2ea9fbb28d7e2cdc5c8f22e8bbac93
| 3,649,808
|
def validate(number):
"""Check if the number provided is a valid RUC number. This checks the
length, formatting, check digit and check sum."""
number = compact(number)
if len(number) != 13:
raise InvalidLength()
if not number.isdigit():
raise InvalidFormat()
if number[:2] < '01' or number[:2] > '24':
raise InvalidComponent() # invalid province code
if number[2] < '6':
# 0..5 = natural RUC: CI plus establishment number
if number[-3:] == '000':
raise InvalidComponent() # establishment number wrong
ci.validate(number[:10])
elif number[2] == '6':
# 6 = public RUC
if number[-4:] == '0000':
raise InvalidComponent() # establishment number wrong
if _checksum(number[:9], (3, 2, 7, 6, 5, 4, 3, 2, 1)) != 0:
raise InvalidChecksum()
elif number[2] == '9':
# 9 = juridical RUC
if number[-3:] == '000':
raise InvalidComponent() # establishment number wrong
if _checksum(number[:10], (4, 3, 2, 7, 6, 5, 4, 3, 2, 1)) != 0:
raise InvalidChecksum()
else:
raise InvalidComponent() # third digit wrong
return number
|
c09602c8b3301c6f1d4d467a1b7bfd607656c436
| 3,649,809
|
def parse_raw(data: bytes) -> dict:
"""
Parse the contents of an environment retrieved from flash or memory
and provide an equivalent dictionary.
The provided *data* should being at the start of the variable definitions.
It **must not** contain the ``env_t`` metadata, such as the CRC32 word
and the ``flags`` value (only present when compiled with
"``CONFIG_SYS_REDUNDAND_ENVIRONMENT``".
A :py:exc:`ValueError` is raised if no environment variables are found.
"""
results = {}
regex = raw_var_regex()
for match in regex.finditer(data):
name = match.group('name').decode('ascii')
value = match.group('value').decode('ascii')
results[name] = value
if not results:
raise ValueError('No environment variables found')
return results
|
c40c08a099d7468a4ec19da90ce9062d8ddd6ed1
| 3,649,810
|
from typing import List
def _list_registered_paths() -> List[str]:
"""List available paths registered to this service."""
paths = []
for rule in application.url_map.iter_rules():
rule = str(rule)
if rule.startswith("/api/v1"):
paths.append(rule)
return paths
|
56f27aa4b33191cbd779e0e173295431670d26ab
| 3,649,811
|
def input_fn(request_body, request_content_type):
"""An input_fn that loads a pickled numpy array"""
if request_content_type == "application/python-pickle":
array = np.load(BytesIO(request_body), allow_pickle=True)
return array
else:
raise Exception("Please provide 'application/python-pickle' as a request content type")
|
0f6387dffc3ade2097888a92ef1af99f4d367ef8
| 3,649,812
|
def game(x_train, x_test, y_train, y_test, algo='rf', show_train_scores=True):
"""Standard Alogrithms fit and return scores.
* Default Random State is set as 192 when posible.
* Available models - dc, rf, gb, knn, mc_ovo_rf, mc_ova_rf
"""
if algo is 'dc':
clf = clf = DummyClassifier(strategy='most_frequent', random_state=192)
elif algo is 'rf':
clf = RandomForestClassifier(n_jobs=-1, random_state=192)
elif algo is 'gb':
clf = GradientBoostingClassifier(random_state=192)
elif algo is 'knn':
clf = KNeighborsClassifier()
elif algo is 'mc_ovo_rf':
clf = OneVsOneClassifier(RandomForestClassifier(n_jobs=-1,
random_state=192))
elif algo is 'mc_ova_rf':
clf = OneVsRestClassifier(RandomForestClassifier(n_jobs=-1,
random_state=192))
else:
print('improper model name, please check help')
return 0, 0
clf = clf.fit(x_train, y_train)
# if user does not opt
ac_score, f1_score = 0, 0
if show_train_scores:
print('Training Scores')
ac_score, f1_score = check_metric(clf.predict(x_train), y_train)
print('\nTesting Scores')
ac_score1, f1_score1 = check_metric(clf.predict(x_test), y_test)
ret = {'classifier': clf,
'test_ac_score': ac_score,
'test_f1_score': f1_score,
'train_ac_score': ac_score1,
'train_f1_score': f1_score1,
}
return ret
|
9a225f04d5d883bc70c4f4f9036ddfee7b206dbc
| 3,649,813
|
def get_convolutional_model(vocab_size: int,
input_length: int,
num_classes: int,
embedding_size: int=300,
model_size: str='small'
) -> Model:
"""Create a character convolutional model
Parameters
----------
vocab_size: the number of characters in the vocabulary
input_length: the size of the input sequences (must be least 160)
num_classes: the number of output classes
embedding_size: the vector size of character representations
model_size: 'large' or 'small' feature sizes
Returns
-------
tf.keras.Model: an uncompiled keras model
"""
if model_size.lower() == 'small':
conv_filters = 256
dnn_size = 1024
elif model_size.lower() == 'large':
conv_filters = 1024
dnn_size = 2048
else:
ValueError("model size must be either 'small' or 'large'")
if input_length < 160:
ValueError('The input sequences must be at least 160 characters long')
model = Sequential()
model.add(layers.Embedding(
vocab_size,
embedding_size,
input_length=input_length,
name='character_embedding'
))
model.add(layers.Dropout(0.2, name='input_dropout'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=7,
activation='relu',
name='conv_1'))
model.add(layers.MaxPooling1D(
pool_size=3,
name='pooling_1'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=7,
activation='relu',
name='conv_2'))
model.add(layers.MaxPooling1D(
pool_size=3,
name='pooling_2'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=3,
activation='relu',
name='conv_3'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=3,
activation='relu',
name='conv_4'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=3,
activation='relu',
name='conv_5'))
model.add(layers.Conv1D(
filters=conv_filters,
kernel_size=7,
activation='relu',
name='conv_6'))
model.add(layers.MaxPooling1D(
pool_size=3,
name='pooling_3'))
model.add(layers.Flatten(name='flatten'))
model.add(layers.Dense(dnn_size,
activation='relu',
name='dense_out_1'))
model.add(layers.Dropout(0.5, name='post_dropout_1'))
model.add(layers.Dense(dnn_size,
activation='relu',
name='dense_out_2'))
model.add(layers.Dropout(0.5, name='post_dropout_2'))
model.add(layers.Dense(num_classes,
activation='softmax',
name='output'))
return model
|
aafd9fe6141a05c433508ff0a9583d9c42a7de5b
| 3,649,814
|
def parse_test_config(doc):
""" Get the configuration element. """
test_config = doc.documentElement
if test_config.tagName != 'configuration':
raise RuntimeError('expected configuration tag at root')
return test_config
|
c61c2f4e43c5501c461bb92b63609162b2918860
| 3,649,815
|
import textwrap
def _get_control_vars(control_vars):
"""
Create the section of control variables
Parameters
----------
control_vars: str
Functions to define control variables.
Returns
-------
text: str
Control variables section and header of model variables section.
"""
text = textwrap.dedent("""
##########################################################################
# CONTROL VARIABLES #
##########################################################################
def _init_outer_references(data):
for key in data:
__data[key] = data[key]
def time():
return __data['time']()
""")
text += control_vars
text += textwrap.dedent("""
##########################################################################
# MODEL VARIABLES #
##########################################################################
""")
return text
|
614a6ca5bc8ac7354f63bfceabaff4eb4b93208a
| 3,649,816
|
def echo():
"""Echo data"""
return request.get_data() + '\n'
|
75aad93e46925ed086be87b18a96d756fa1c6425
| 3,649,817
|
import os
import csv
def get_ids():
"""
Get all SALAMI IDs related to RWC
"""
# Filename for SALAMI RWC metadata
metadata_file = os.path.join(
dpath.SALAMI, 'metadata', 'id_index_rwc.csv')
ids = []
with open(metadata_file, "r") as rwc_file:
reader = csv.reader(rwc_file)
next(reader) #skip header
for row in reader:
ids.append(int(row[0]))
ids = ids[1:] # First one has no annotations!?
return ids
|
ad55be00b1a43f62c51b9ce6bb025bda9bdb1756
| 3,649,818
|
import logging
def _get_signature_def(signature_def_key, export_dir, tags):
"""Construct a `SignatureDef` proto."""
signature_def_key = (
signature_def_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
metagraph_def = saved_model_cli.get_meta_graph_def(export_dir, tags)
try:
signature_def = signature_def_utils.get_signature_def_by_key(
metagraph_def,
signature_def_key)
except ValueError as e:
try:
formatted_key = _DEFAULT_INPUT_ALTERNATIVE_FORMAT.format(
signature_def_key)
signature_def = signature_def_utils.get_signature_def_by_key(
metagraph_def, formatted_key)
logging.warning('Could not find signature def "%s". '
'Using "%s" instead', signature_def_key, formatted_key)
except ValueError:
raise ValueError(
'Got signature_def_key "{}". Available signatures are {}. '
'Original error:\n{}'.format(
signature_def_key, list(metagraph_def.signature_def), e))
return signature_def
|
d0bedd323fb68ad41553034a08b64dc73f85faf3
| 3,649,819
|
def illuminance_to_exposure_value(E, S, c=250):
"""
Computes the exposure value :math:`EV` from given scene illuminance
:math:`E` in :math:`Lux`, *ISO* arithmetic speed :math:`S` and
*incident light calibration constant* :math:`c`.
Parameters
----------
E : array_like
Scene illuminance :math:`E` in :math:`Lux`.
S : array_like
*ISO* arithmetic speed :math:`S`.
c : numeric, optional
*Incident light calibration constant* :math:`c`.
With a flat receptor, *ISO 2720:1974* recommends a range for
:math:`c`. of 240 to 400 with illuminance in :math:`Lux`; a value of
250 is commonly used. With a hemispherical receptor, *ISO 2720:1974*
recommends a range for :math:`c` of 320 to 540 with illuminance in
:math:`Lux`; in practice, values typically are between 320 (Minolta)
and 340 (Sekonic).
Returns
-------
ndarray
Exposure value :math:`EV`.
Notes
-----
- The exposure value :math:`EV` indicates a combination of camera
settings rather than the focal plane exposure, i.e. luminous exposure,
photometric exposure, :math:`H`. The focal plane exposure is
time-integrated illuminance.
References
----------
:cite:`Wikipediabj`
Examples
--------
>>> illuminance_to_exposure_value(2.5, 100)
0.0
"""
E = as_float_array(E)
S = as_float_array(S)
c = as_float_array(c)
EV = np.log2(E * S / c)
return EV
|
7c03f816e801f04735687a2a2adb6f4969877bb2
| 3,649,820
|
from typing import Counter
def feedback(code, guess):
"""
Return a namedtuple Feedback(blacks, whites) where
blacks is the number of pegs from the guess that
are correct in both color and position and
whites is the number of pegs of the right color but wrong position.
"""
blacks = sum(g == c for g, c in zip(guess, code))
whites = sum((Counter(guess) & Counter(code)).values()) - blacks
return Feedback(blacks, whites)
|
bab57da2d7c60869988d6c24b69b8eab1c7da173
| 3,649,821
|
import os
import logging
import functools
def Logging(logfile=None):
"""Custom logging function.
Args:
logfile: The name of log files. Log will be stored in logs_dir.
Returns:
The same output of the call function with logging information.
"""
# Create logs_dir if the directory logs is not exist.
logs_dir = 'logs'
if os.path.isdir(logs_dir) is False:
os.mkdir(logs_dir)
def Logging_decorator(func):
# Define logger, set the logger name as func.__name__
logger = logging.getLogger(func.__name__) # run logger.name to check
# Set level for logger
logger.setLevel(logging.DEBUG)
# Define the handler and formatter for console logging
consoleHandler = logging.StreamHandler() # Define StreamHandler
consoleHandler.setLevel(logging.DEBUG) # Set level
concolsFormatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') # Define formatter
consoleHandler.setFormatter(concolsFormatter) # # Set formatter
logger.addHandler(consoleHandler) # Add handler to logger
# Define the handler and formatter for file logging
if logfile is not None:
fileHandler = logging.FileHandler(f'{logs_dir}/{logfile}.log') # Define FileHandler
fileHandler.setLevel(logging.WARNING) # Set level
fileFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Define formatter
fileHandler.setFormatter(fileFormatter) # Set formatter
logger.addHandler(fileHandler) # Add handler to logger
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
# Before running func
#logger.debug(f"{func.__name__} - {args} - {kwargs}")
logger.debug(f"{func.__name__}({args}, {kwargs})")
try:
output = func(*args,**kwargs)
except:
logger.exception(f"{func.__name__}({args}, {kwargs})")
# After running func
return output
return wrapper_decorator
return Logging_decorator
|
cbb89ce22a2f1d8234dcb30430fd2f9a80421a5c
| 3,649,822
|
from datetime import date
from .models import PlacedDateBet
def find_winning_dates(placed_bets, winning_date):
"""
Finds the placed bets with the dates closest to the winning date
:param placed_bets: iterable of PlacedDateBet
:param winning_date: datetime.date
:return: list of winning PlacedDateBets
"""
assert isinstance(winning_date, date)
dates = []
for placed_bet in placed_bets:
assert isinstance(placed_bet, PlacedDateBet)
dates.append(placed_bet.placed_date)
timedeltas = []
for date in dates:
timedeltas.append(abs(winning_date - date))
closest = min(timedeltas)
indices = []
for i in range(0, len(timedeltas)):
if timedeltas[i] == closest:
indices.append(i)
winning_bets = []
for index in indices:
winning_bets.append(placed_bets[index])
return winning_bets
|
73315f2bebfcc0290f9372af935ded78011c7d4b
| 3,649,823
|
def create_greedy_policy(Q):
"""
Creates a greedy policy based on Q values.
Args:
Q: A dictionary that maps from state -> action values
Returns:
A function that takes an observation as input and returns a vector
of action probabilities.
"""
def policy_fn(observation):
a_probs = np.zeros(len(Q[observation]))
a_probs[np.argmax(Q[observation])] = 1.0
return a_probs
return policy_fn
|
01966964034504454e3be9926236706371c626c8
| 3,649,824
|
def get_tags(rule, method, **options):
"""
gets the valid tags for given rule.
:param pyrin.api.router.handlers.base.RouteBase rule: rule instance to be processed.
:param str method: http method name.
:rtype: list[str]
"""
return get_component(SwaggerPackage.COMPONENT_NAME).get_tags(rule, method, **options)
|
4671d1d9c66934d6b22bee74801d07b30635b3b6
| 3,649,825
|
def get_paybc_transaction_request():
"""Return a stub payment transaction request."""
return {
'clientSystemUrl': 'http://localhost:8080/abcd',
'payReturnUrl': 'http://localhost:8081/xyz'
}
|
b913438562d4f2b0883e340b48843f9954faa8a4
| 3,649,826
|
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not in
real networks.
Outputs:
- out: Array of the same shape as x.
- cache: A tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
###########################################################################
# TODO: Implement the training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
###########################################################################
mask = (np.random.rand(*x.shape)<(1-p))/(1-p)
out = x*mask
###########################################################################
# END OF YOUR CODE #
###########################################################################
elif mode == 'test':
###########################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
###########################################################################
out = x
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
|
4d4442ab5e612888628f43574e60b53342873d83
| 3,649,827
|
def pretreatment(filename):
"""pretreatment"""
poems = []
file = open(filename, "r")
for line in file: #every line is a poem
#print(line)
title, poem = line.strip().split(":") #get title and poem
poem = poem.replace(' ','')
if '_' in poem or '《' in poem or '[' in poem or '(' in poem or '(' in poem:
continue
if len(poem) < 10 or len(poem) > 128: #filter poem
continue
poem = '[' + poem + ']' #add start and end signs
poems.append(poem)
print("唐诗总数: %d"%len(poems))
#counting words
allWords = {}
for poem in poems:
for word in poem:
if word not in allWords:
allWords[word] = 1
else:
allWords[word] += 1
#'''
# erase words which are not common
erase = []
for key in allWords:
if allWords[key] < 2:
erase.append(key)
for key in erase:
del allWords[key]
#'''
wordPairs = sorted(allWords.items(), key = lambda x: -x[1])
words, a= zip(*wordPairs)
#print(words)
words += (" ", )
wordToID = dict(zip(words, range(len(words)))) #word to ID
wordTOIDFun = lambda A: wordToID.get(A, len(words))
poemsVector = [([wordTOIDFun(word) for word in poem]) for poem in poems] # poem to vector
#print(poemsVector)
#padding length to batchMaxLength
batchNum = (len(poemsVector) - 1) // batchSize
X = []
Y = []
#create batch
for i in range(batchNum):
batch = poemsVector[i * batchSize: (i + 1) * batchSize]
maxLength = max([len(vector) for vector in batch])
temp = np.full((batchSize, maxLength), wordTOIDFun(" "), np.int32)
for j in range(batchSize):
temp[j, :len(batch[j])] = batch[j]
X.append(temp)
temp2 = np.copy(temp) #copy!!!!!!
temp2[:, :-1] = temp[:, 1:]
Y.append(temp2)
return X, Y, len(words) + 1, wordToID, words
|
5aa85b3bda72d3efb3067ebcc06d7f4038d9990e
| 3,649,828
|
def forward_fdm(order, deriv, adapt=1, **kw_args):
"""Construct a forward finite difference method.
Further takes in keyword arguments of the constructor of :class:`.fdm.FDM`.
Args:
order (int): Order of the method.
deriv (int): Order of the derivative to estimate.
adapt (int, optional): Number of recursive calls to higher-order
derivatives to dynamically determine the step size. Defaults to `1`.
Returns:
:class:`.fdm.FDM`: The desired finite difference method.
"""
return FDM(
list(range(order)),
deriv,
bound_estimator=_construct_bound_estimator(
forward_fdm, order, adapt, **kw_args
),
**kw_args
)
|
7b5c46fcdfc1a186079b2a4f94a129b8f79dbfde
| 3,649,829
|
import torch
def lrp_linear_torch(hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor=0.0, debug=False):
"""
LRP for a linear layer with input dim D and output dim M.
Args:
- hin: forward pass input, of shape (D,)
- w: connection weights, of shape (D, M)
- b: biases, of shape (M,)
- hout: forward pass output, of shape (M,) (unequal to np.dot(w.T,hin)+b if more than one incoming layer!)
- Rout: relevance at layer output, of shape (M,)
- bias_nb_units: total number of connected lower-layer units (onto which the bias/stabilizer contribution is redistributed for sanity check)
- eps: stabilizer (small positive number)
- bias_factor: set to 1.0 to check global relevance conservation, otherwise use 0.0 to ignore bias/stabilizer redistribution (recommended)
Returns:
- Rin: relevance at layer input, of shape (D,)
"""
sign_out = torch.where(hout.cpu() >= 0 , torch.Tensor([1.]), torch.Tensor([-1.])).view(1,-1) # shape (1, M)
numer = (w * hin.view(-1,1)).cpu() + ( bias_factor * (b.view(1,-1)*1. + eps*sign_out*1.) / bias_nb_units ) # shape (D, M)
# Note: here we multiply the bias_factor with both the bias b and the stabilizer eps since in fact
# using the term (b[na,:]*1. + eps*sign_out*1.) / bias_nb_units in the numerator is only useful for sanity check
# (in the initial paper version we were using (bias_factor*b[na,:]*1. + eps*sign_out*1.) / bias_nb_units instead)
denom = hout.view(1,-1) + (eps*sign_out*1.) # shape (1, M)
message = (numer/denom) * Rout.view(1,-1) # shape (D, M)
Rin = message.sum(axis=1) # shape (D,)
if debug:
print("local diff: ", Rout.sum() - Rin.sum())
# Note:
# - local layer relevance conservation if bias_factor==1.0 and bias_nb_units==D (i.e. when only one incoming layer)
# - global network relevance conservation if bias_factor==1.0 and bias_nb_units set accordingly to the total number of lower-layer connections
# -> can be used for sanity check
return Rin
|
1939ef92f3c06a79148e41397f0a3668b273d716
| 3,649,830
|
import requests
def get_list_by_ingredient(ingredient):
""" this should return data for filtered recipes by ingredient """
res = requests.get(f'{API_URL}/{API_KEY}/filter.php', params={"i":ingredient})
return res.json()
|
5bb34ffe635499a93decc5d4c080c638ee92c1b5
| 3,649,831
|
def chk_sudo():
"""\
Type: decorator.
The command will only be able to be executed by the author if the author is owner or have permissions.
"""
async def predicate(ctx):
if is_sudoers(ctx.author):
return True
await ctx.message.add_reaction("🛑")
raise excepts.NotMod()
return commands.check(predicate)
|
45ddad31e761c9cf227a19fb78e3b3f52414c966
| 3,649,832
|
def have_same_items(list1, list2):
""" Проверяет состоят ли массивы list1 и list2 из одинакового
числа одних и тех же элементов
Parameters
----------
list1 : list[int]
отсортированный по возрастанию массив уникальных элементов
list2 : list[int]
массив произвольной длинны произвольных чисел
Returns
-------
bool
"""
return True
|
2973a1961e25686fcbd2003dd366429cbd4c67eb
| 3,649,833
|
def analyze(geometry_filenames, mode='global', training_info=None, stride=None,
box_size=None, configs=None, descriptor=None, model=None,
format_=None, descriptors=None, save_descriptors=False,
save_path_descriptors=None, nb_jobs=-1, **kwargs):
"""
Apply ARISE to given list of geometry files.
This function is key to reproduce the single- and polycrystalline predictions in:
[1] A. Leitherer, A. Ziletti, and L.M. Ghiringhelli, arXiv ??? (2021).
Parameters:
gometry_filenames: list
list of geometry files to be analyzed.
mode: str (default='global')
If 'global', a global descriptor will be calculated and a global label (plus uncertainty) predicted.
If 'local', the strided pattern matching algorithm introduced in [1] is applied.
stride: float (default=None)
Step size in strided pattern matching algorithm. Only relevant if mode='local'.
If no value is specified, a stride of 4 Angstroem in each direction, for each of the geometry files
is used.
box_size: float (default=None)
Size of the box employed in strided pattern matching algorithm. Only relevant if mode='local'.
If no value is specified, a box size of 16 Angstroem is used, for each of the geometry files.
configs: object (default=None)
configuration object, defining folder structure. For more details, please have a look at the function set_configs from ai4materials.utils.utils_config
descriptor: ai4materials descriptor object (default=None)
If None, the SOAP descriptor as implemented in the quippy package (see ai4materials.descritpors.quippy_soap_descriptor)
with the standard settings employed in [1] will be used.
model: str, (default=None)
If None, the model of [1] will be automatically loaded. Otherwise the path to the model h5 file needs to be specified alongside
information on the training set (in particular, the relation between integer class labels and
class labels).
training_info: path to dict (default=None)
Information on the realtion between int labels and structure labels. If model=None, training information
of [1] will be loaded regardless of this keyword. If model not None,
then specification of training_info is mandatory. The structure of this dictionary
is defined as dict = {'data': ['nb_classes': 108,
'classes': [text label class 0, text label class 1, ... ie ordered class labels]]}
format_: str, optional (default=None)
format of geometry files. If not specified, the input files are assumed to have aims format in case of
global mode, and xyz format in case of local mode.
descriptors: path to desc or numpy array, optional (default=None)
If mode=local, then this must be a path to a desc file containing the descriptors.
If mode=global, then this must be a numpy array containing the descriptors.
save_descriptors: bool, optional (default=False)
Decides whether to save calculated descriptors into specified savepath or not (only for mode=local).
save_path_descriptors: str, optional (default=None)
path into which descriptors are saved (for mode=global)
"""
if not model == None:
if training_info == None:
raise ValueError("No information on the relation between int and str class labels is provided.")
#if not (type(model) == str or type(model)==keras.engine.training.Model):
# raise NotImplementedError("Either specifiy path or model loaded from h5 via keras.models.load_model")
if stride == None:
stride = [[4.0, 4.0, 4.0] for _ in range(len(geometry_filenames))]
if box_size == None:
box_size = [16.0 for _ in range(len(geometry_filenames))]
if format_ == None:
if mode == 'global':
format_ = 'aims'
elif mode == 'local':
format_ = 'xyz'
if not model == None:
try:
model_file_ending = model.split('.')[1]
if not model_file_ending == '.h5':
raise NotImplementedError("Model path must link to h5 file.")
except:
raise ValueError("Model must be a path to a h5 file or None. In the latter case, a pretrained model is loaded.")
if mode == 'global':
predictions, uncertainty = global_(geometry_filenames, descriptor=descriptor,
model=model, format_=format_,
descriptors=descriptors, save_descriptors=save_descriptors,
save_path_descriptors=save_path_descriptors, **kwargs)
elif mode == 'local':
predictions, uncertainty = local(geometry_filenames, stride, box_size, configs,
descriptor=descriptor, model=model, format_=format_,
desc_filename=descriptors, nb_jobs=nb_jobs, **kwargs)
else:
raise ValueError("Argument 'mode' must either be 'local' or 'global'.")
return predictions, uncertainty
|
eeec9ac33a91b41b8a90f825aef0fc7605bdbf58
| 3,649,834
|
def get_params(name, seed):
"""Some default parameters.
Note that this will initially include training parameters that you won't need for metalearning since we have our own training loop."""
configs = []
overrides = {}
overrides["dataset_reader"] = {"lazy": True}
configs.append(Params(overrides))
configs.append(
Params({"random_seed": seed, "numpy_seed": seed, "pytorch_seed": seed})
)
configs.append(Params.from_file("config/ud/en/udify_bert_finetune_en_ewt.json"))
configs.append(Params.from_file("config/udify_base.json"))
return util.merge_configs(configs)
|
02d70be07a2d7afe793e657d6fb38fefe99171ce
| 3,649,835
|
def rgb2hex(rgb):
"""Converts an RGB 3-tuple to a hexadeximal color string.
EXAMPLE
-------
>>> rgb2hex((0,0,255))
'#0000FF'
"""
return ('#%02x%02x%02x' % tuple(rgb)).upper()
|
4c3323e34fcd2c1b4402ebe5f433c5fd9320cce9
| 3,649,836
|
from typing import Union
import re
from typing import Optional
def path_regex(
path_regex: Union[str, re.Pattern], *, disable_stage_removal: Optional[bool] = False
):
"""Validate the path in the event against the given path pattern.
The following APIErrorResponse subclasses are used:
PathNotFoundError: When the path doesn't match.
Args:
path: A regular expression to validate against.
disable_stage_removal (bool): Preserve the original path with stage.
"""
return _get_decorator(
validate_path_regex,
path_regex=path_regex,
disable_stage_removal=disable_stage_removal,
update_event=True,
)
|
5c54d71a20fa7795b9e6eefa508de5b8516378a6
| 3,649,837
|
async def root():
"""Health check"""
return {"status": "OK"}
|
80c3c7ff9e1abebbb9f38dc11a5ecd5a7fe5414a
| 3,649,838
|
from typing import Dict
from typing import List
def build_foreign_keys(
resources: Dict[str, dict],
prune: bool = True,
) -> Dict[str, List[dict]]:
"""Build foreign keys for each resource.
A resource's `foreign_key_rules` (if present) determines which other resources will
be assigned a foreign key (`foreign_keys`) to the reference's primary key:
* `fields` (List[List[str]]): Sets of field names for which to create a foreign key.
These are assumed to match the order of the reference's primary key fields.
* `exclude` (Optional[List[str]]): Names of resources to exclude.
Args:
resources: Resource descriptors by name.
prune: Whether to prune redundant foreign keys.
Returns:
Foreign keys for each resource (if any), by resource name.
* `fields` (List[str]): Field names.
* `reference['resource']` (str): Reference resource name.
* `reference['fields']` (List[str]): Reference resource field names.
Examples:
>>> resources = {
... 'x': {
... 'schema': {
... 'fields': ['z'],
... 'primary_key': ['z'],
... 'foreign_key_rules': {'fields': [['z']]}
... }
... },
... 'y': {
... 'schema': {
... 'fields': ['z', 'yy'],
... 'primary_key': ['z', 'yy'],
... 'foreign_key_rules': {'fields': [['z', 'zz']]}
... }
... },
... 'z': {'schema': {'fields': ['z', 'zz']}}
... }
>>> keys = build_foreign_keys(resources)
>>> keys['z']
[{'fields': ['z', 'zz'], 'reference': {'resource': 'y', 'fields': ['z', 'yy']}}]
>>> keys['y']
[{'fields': ['z'], 'reference': {'resource': 'x', 'fields': ['z']}}]
>>> keys = build_foreign_keys(resources, prune=False)
>>> keys['z'][0]
{'fields': ['z'], 'reference': {'resource': 'x', 'fields': ['z']}}
"""
tree = _build_foreign_key_tree(resources)
keys = {}
for name in tree:
firsts = []
followed = []
for fields in tree[name]:
path = _traverse_foreign_key_tree(tree, name, fields)
firsts.append(path[0])
followed.extend(path[1:])
keys[name] = firsts
if prune:
# Keep key if not on path of other key
keys[name] = [key for key in keys[name] if key not in followed]
return keys
|
96cb032a03445400eeee57a23a4024ae06f62573
| 3,649,839
|
import ipaddress
def port_scan(ip):
"""Run a scan to determine what services are responding.
Returns nmap output in JSON format.
"""
# validate input
valid_ip = ipaddress.ip_address(ip)
# nnap requires a `-6` option if the target is IPv6
v6_flag = '-6 ' if valid_ip.version == 6 else ''
nmap_command = f'sudo nmap {v6_flag}{valid_ip} --stats-every 60 -oX - ' \
'-R -Pn -T4 --host-timeout 120m --max-scan-delay 5ms ' \
'--max-retries 2 --min-parallelism 32 ' \
'--defeat-rst-ratelimit -sV -O -sS -p1-65535'
completed_process = run_it(nmap_command)
xml_string = completed_process.stdout.decode()
data = bf.data(fromstring(xml_string))
return data
|
c33cd56635338d3476e4ce5348376a1f6b2cfd68
| 3,649,840
|
def create_table(p, table_name, schema):
"""Create a new Prism table.
Parameters
----------
p : Prism
Instantiated Prism class from prism.Prism()
table_name : str
The name of the table to obtain details about. If the default value
of None is specified, details regarding first 100 tables is returned.
schema : list
A list of dictionaries containing the schema
Returns
-------
If the request is successful, a dictionary containing information about
the table is returned.
"""
p.create_bearer_token()
table = p.create_table(table_name, schema=schema)
return table
|
43c8c789d4e212d2d98d68f4f22e3f0fb0a97552
| 3,649,841
|
def get_args():
"""
Parses and processes args, returning the modified arguments as a dict.
This is to maintain backwards compatibility with the old of parsing
arguments.
"""
parser = make_parser()
args = parser.parse_args()
process_args(args)
return vars(args)
|
8a6f31bd0c9547a007bdd7644d148e8ba0e126d1
| 3,649,842
|
from typing import Iterable
def run_asm_pprinter(ir: gtirb.IR, args: Iterable[str] = ()) -> str:
"""
Runs the pretty-printer to generate an assembly output.
:param ir: The IR object to print.
:param args: Any additional arguments for the pretty printer.
:returns: The assembly string.
"""
asm, _ = run_asm_pprinter_with_outputput(ir, args)
return asm
|
8d71a4b91f90cb449f65d5c95ec740d78836a071
| 3,649,843
|
import re
def fix_ccdsec(hdu):
""" Fix CCDSEC keywords in image extensions """
section_regexp = re.compile(SECTION_STRING)
# In unbinned space
ccdsec = _get_key_value(hdu, 'CCDSEC')
detsec = _get_key_value(hdu, 'DETSEC')
if None in [ccdsec, detsec]:
raise ValueError("CCDSEC {}; detsec {}".format(ccdsec, detsec))
updated = False
ccd_coords = list(section_regexp.match(ccdsec).groups())
detector_coords = list(section_regexp.match(detsec).groups())
# Y coordinates should match!
if ccd_coords[2:4] != detector_coords[2:4]:
raise ValueError("Y values: {} {}".format(ccdsec, detsec))
# X coordinates maybe wrong
if ccd_coords[0:2] != detector_coords[0:2]:
for i, x in enumerate(detector_coords[0:2]):
offset_x = int(x) - CCDWIDTH
if offset_x <= 0:
if ccd_coords[i] != detector_coords[i]:
# Use DETSEC
ccd_coords[i] = detector_coords[i]
updated = True
else:
# Reset offset to x
offset_x = x
elif offset_x > CCDWIDTH:
updated = True
offset_x -= CCDWIDTH
# update ccd_coords
ccd_coords[i] = offset_x
# Reset CCDSEC
ccdsec = "[{}:{},{}:{}]".format(ccd_coords[0],
ccd_coords[1],
ccd_coords[2],
ccd_coords[3])
hdu.header['CCDSEC'] = ccdsec
return updated
|
1ce3e7e519f47f63f8894c3a29e269ca77d7cf5d
| 3,649,844
|
def reload(hdf):
"""Reload a hdf file, hdf = reload(hdf)"""
filename = hdf.filename
return load(filename)
|
6eb17d171b1181ac4ed974de6c36f83c00e72c57
| 3,649,845
|
def read_image(im_name, n_channel, data_dir='', batch_size=1, rescale=None):
""" function for create a Dataflow for reading images from a folder
This function returns a Dataflow object for images with file
name containing 'im_name' in directory 'data_dir'.
Args:
im_name (str): part of image names (i.e. 'jpg' or 'im_').
n_channel (int): number of channels (3 for color images and 1 for grayscale images)
data_dir (str): directory of images
batch_size (int): number of images read from Dataflow for each batch
rescale (bool): whether rescale image to 224 or not
Returns:
Image (object): batch images can be access by Image.next_batch_dict()['image']
"""
def rescale_im(im, short_side=416):
""" Pre-process for images
images are rescaled so that the shorter side = 224
"""
im = np.array(im)
h, w = im.shape[0], im.shape[1]
if h >= w:
new_w = short_side
im = imagetool.rescale_image(im, (int(h * new_w / w), short_side))
# im = skimage.transform.resize(
# im, (int(h * new_w / w), short_side), preserve_range=True)
else:
new_h = short_side
im = imagetool.rescale_image(im, (short_side, int(w * new_h / h)))
# im = skimage.transform.resize(
# im, (short_side, int(w * new_h / h)), preserve_range=True)
# return im.astype('uint8')
return im
def normalize_im(im, *args):
im = imagetool.rescale_image(im, rescale)
# im = skimage.transform.resize(
# im, rescale, preserve_range=True)
# im = rescale_im(im, short_side=rescale)
im = np.array(im)
if np.amax(im) > 1:
im = im / 255.
return np.clip(im, 0., 1.)
# if rescale:
# pf_fnc = rescale_im
# else:
# pf_fnc = normalize_im
if isinstance(rescale, int):
rescale = [rescale, rescale]
else:
assert len(rescale) == 2
image_data = Image(
im_name=im_name,
data_dir=data_dir,
n_channel=n_channel,
shuffle=False,
batch_dict_name=['image', 'shape'],
pf_list=(normalize_im,()))
image_data.setup(epoch_val=0, batch_size=batch_size)
return image_data
|
017878c8afce1be73160b338407a920c4f01a286
| 3,649,846
|
def build_optimizer(config, model):
"""
Build optimizer, set weight decay of normalization to 0 by default.
"""
skip = {}
skip_keywords = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
if hasattr(model, 'no_weight_decay_keywords'):
skip_keywords = model.no_weight_decay_keywords()
parameters = set_weight_decay(model, skip, skip_keywords)
opt_lower = config.TRAIN.OPTIMIZER.NAME.lower()
optimizer = None
if opt_lower == 'sgd':
optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)
return optimizer
|
83a09ed34c24caff7367ba1e43e051f362dfa85c
| 3,649,847
|
def ising2d_worm(T_range, mcsteps, L):
"""T = temperature [K]; L = Length of grid."""
def new_head_position(worm, lattice):
"""
Extract current worm head position indices,
then randomly set new worm head position index.
lattice.occupied points to either lattice.bonds_x or lattice.bonds_y.
"""
[i, j] = worm.head
bond_type = np.random.randint(1, worm.q)
direction = ["Up", "Down", "Left", "Right"][np.random.randint(0, 4)]
if direction=="Right":
# use current indices to check for bond
bond = [i, j]
site = [0 if i==L-1 else i+1, j]
lattice.bond_idx = 0
elif direction=="Left":
# use new indices to check for bond
site = [L-1 if i==0 else i-1, j]
bond = [site[0], site[1]]
lattice.bond_idx = 0
elif direction=="Up":
# use current indices to check for bond
bond = [i, j]
site = [i, 0 if j==L-1 else j+1]
lattice.bond_idx = 1
elif direction=="Down":
# use new indices to check for bond
site = [i, L-1 if j==0 else j-1]
bond = [site[0], site[1]]
lattice.bond_idx = 1
return bond, bond_type, site, lattice
def accept_movement(current_bond, bond_type, temperature):
"""
Bond creation/deletion using Boltzman factor.
Bonds are always deleted since 1/exp(-2/T) > 1 for all T>0.
"""
if current_bond:
if current_bond==bond_type:
# new_bond = 0 will delete the current bond
accept_probability, new_bond = 1, 0
else:
accept_probability, new_bond = 1-np.exp(-2/temperature), 0
else:
accept_probability, new_bond = np.exp(-2/temperature), bond_type
accept_move = True if np.random.rand()<accept_probability else False
"""
if current_bond==bond_type:
accept_probability, new_bond = 1, 0
else:
accept_probability, new_bond = np.exp(-2/temperature), bond_type
accept_move = True if np.random.rand()<accept_probability else False
"""
return accept_move, new_bond
def monte_carlo_step(lattice, worm, temperature):
"""
Since the lattice matrix is indexed as [column, row], we need to input the
i, j indices in reversed order, as lattice.bond.occupied[j, i].
Measured quantities per step:
Nb_step = number of bonds per step.
G_micro = 2pt correlation function per micro_step corresponding to the
partition function of the worm algorithm for the 2D Ising model.
G_step = 2pt correlation function per step corresponding to the partition
function of the metropolis algorithm for the 2D Ising model.
* Note that G_micro(|i-j|) == G_step(|i-j|) when |i-j|=0.
"""
Nb_step = np.zeros((2))
G_micro, G_step = np.zeros((L+1)), np.zeros((L+1))
G_step_bool = np.zeros((L+1), dtype=bool)
for micro_step in range(2*L**2):
# propose head movement; [i, j] = new bond indices.
[i, j], bond_type, new_site, lattice = new_head_position(worm, lattice)
accept_move, bond_type = accept_movement(lattice.bonds[lattice.bond_idx, j, i], bond_type, temperature)
if accept_move:
# move worm head and either change or delete the bond.
lattice.bonds[lattice.bond_idx, j, i] = bond_type
worm.head = new_site
# Update correlation function every microstep.
diameter = worm.Diameter()
G_micro[diameter] += 1
G_step_bool[diameter] = True
if np.all(worm.head==worm.tail):
# measure observables and reset worm when path is closed.
G_step[G_step_bool] += 1
G_step_bool[:] = False
B=(lattice.bonds>0).sum()
Nb_step += B, B**2
worm.ResetPosition()
return lattice, worm, G_micro, G_step, Nb_step
# initialize main structures.
print('Initializing Worm Algorithm.')
observables = Observables(q, L, T_range, mcsteps)
lattice = Lattice(L)
worm = Worm(q, L)
# correlation, correlation2, and bond_number each act as a pointer.
correlation = observables.correlation #relates to G_micro
correlation2 = observables.correlation2 #relates to G_step
bond_number = observables.mean_bonds #relates to Nb_step
print('Starting thermalization cycle ...')
for step in range(int(mcsteps/5)):
lattice, worm, G_micro, G_step, Nb_step = monte_carlo_step(lattice, worm, T_range[0])
print('Starting measurement cycle ...')
for T_idx, T in enumerate(T_range):
print(" ", "Running temperature =", T, "...")
for step in range(mcsteps):
lattice, worm, G_micro, G_step, Nb_step = monte_carlo_step(lattice, worm, T)
# sum observables
correlation[:, T_idx] += G_micro
correlation2[:, T_idx] += G_step
bond_number[:, T_idx] += Nb_step
# average and store observables.
observables.AverageObservables()
print('Simulation Complete!')
return observables, lattice, worm
|
6fba36aceb70f19605e20a460db7054b81264224
| 3,649,848
|
def valid_chapter_name(chapter_name):
"""
判断目录名称是否合理
Args:
chapter_name ([type]): [description]
"""
for each in ["目录"]:
if each in chapter_name:
return False
return True
|
9ec71837503f969808a6a666a3bf999ee3290f03
| 3,649,849
|
from typing import Iterable
from typing import Tuple
def lex_min(perms: Iterable[Perm]) -> Tuple[Perm, ...]:
"""Find the lexicographical minimum of the sets of all symmetries."""
return min(all_symmetry_sets(perms))
|
4cbb7e78de32c46684c9e621db90708934bb5e33
| 3,649,850
|
def subfield(string, delim, occurrence):
"""
function to extract specified occurence of subfield from string
using specified field delimiter
eg select subfield('abc/123/xyz','/',0) returns 'abc'
eg select subfield('abc/123/xyz','/',1) returns '123'
eg select subfield('abc/123/xyz','/',2) returns 'xyz'
eg select subfield('abc/123/xyz','/',3) returns ''
"""
"""
# this logic matches the functions written for msql and psql,
# because they do not have a string method to do this
ans = ''
found = 0
for ch in string:
if ch == delim:
found += 1
if found == occurrence + 1:
break
elif found == occurrence:
ans += ch
if not found:
ans = '' # else it returns the entire string
return ans
"""
# python does have a suitable string method, so use it
if delim in string:
try:
return string.split(delim)[occurrence]
except IndexError: # equivalent to the last example above
return ''
else:
return ''
|
ef022d0ca05e969e8ad69e4644cd24d1b7f47cb8
| 3,649,851
|
def in_hull(points, hull):
"""
Test if points in `p` are in `hull`
`p` should be a `NxK` coordinates of `N` points in `K` dimensions
`hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the
coordinates of `M` points in `K`dimensions for which Delaunay triangulation
will be computed
"""
# if not isinstance(hull,Delaunay):
del points['flight_name']
del points['output']
del points['TEMPS']
del hull['flight_name']
del hull['output']
del hull['TEMPS']
hull = Delaunay(hull.as_matrix())
return hull.find_simplex(points.as_matrix())>=0
|
ab116c17b42c26648b02930824dd0ae591b32eef
| 3,649,852
|
def sample_random(X_all, N):
"""Given an array of (x,t) points, sample N points from this."""
set_seed(0) # this can be fixed for all N_f
idx = np.random.choice(X_all.shape[0], N, replace=False)
X_sampled = X_all[idx, :]
return X_sampled
|
b2297c13cf7cf40dcdf82ea97e2029a96d7554ef
| 3,649,853
|
from typing import Optional
def read(db,
query: Optional[dict] = None,
pql: any = None,
order_by: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
disable_count_total: bool = False,
**kwargs):
"""Read data from DB.
Args:
db (MontyCollection): DB connection
query (dict or Query): Query to select items
pql (PQL) Python-Query-Language to select items
order_by (list): column name to sort by with format [ ( column1, 1 or -1 ), ... ]
limit (int): number of items to return per a page
offset (int): offset of cursor
disable_count_total (bool): set True to avoid counting total number of records
**kwargs: kwargs for function `pandas.read_sql_query`
or `influxdb.DataFrameClient.query`
Returns:
(list, int): list of data and total number of records
"""
if limit is None:
limit = 0
if offset is None:
offset = 0
if pql is not None and query is not None:
raise ValueError('Either query or pql can be specified')
if pql:
query = PQL.find(pql)
if query:
query = _fix_query_exists(query)
if order_by is None:
data = db.find(query).skip(offset).limit(limit)
count_total = db.count(query) if not disable_count_total else None
else:
data = db.find(query).sort(order_by).skip(offset).limit(limit)
count_total = db.count(query) if not disable_count_total else None
else:
if order_by is None:
data = db.find().skip(offset).limit(limit)
count_total = db.count({}) if not disable_count_total else None
else:
data = db.find().sort(order_by).skip(offset).limit(limit)
count_total = db.count({}) if not disable_count_total else None
data = list(data)
count_total = count_total if count_total is not None else len(data)
return data, count_total
|
b2153ce1b83de7f3f7dd5311a619a0623aedc01b
| 3,649,854
|
def check_horizontal(board: list) -> bool:
"""
Function check if in each line are unique elements.
It there are function return True. False otherwise.
>>> check_horizontal(["**** ****",\
"***1 ****",\
"** 3****",\
"* 4 1****",\
" 9 5 ",\
" 6 83 *",\
"3 1 **",\
" 8 2***",\
" 12 ****"])
True
>>> check_horizontal(["**** ****",\
"***1 ****",\
"** 3****",\
"* 4 1****",\
" 9 5 ",\
" 6 83 *",\
"3 1 **",\
" 8 2***",\
"112 ****"])
False
"""
unique = True
for line in board:
if not check_unique(list(line)):
unique = False
break
return unique
|
0769f0821637c78c1a18e387eb64d6234a0ced5c
| 3,649,855
|
import os
def IsDir(msg=None):
"""Verify the directory exists."""
def f(v):
if os.path.isdir(v):
return v
else:
raise Invalid(msg or 'not a directory')
return f
|
415e5c8f5a3f1414640fa298b07b5cb64b0293d4
| 3,649,856
|
import math
def update_events(dt: float, pos_x: float, pos_y: float, dir_x: float, dir_y: float, plane_x: float, plane_y: float):
""" Updates player position in response to user input.
"""
for e in pygame.event.get():
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
pygame.quit()
raise SystemExit
elif e.type == pygame.QUIT:
pygame.quit()
raise SystemExit
move_speed: float = dt * 5.0
rot_speed: float = dt * 3.0
pressed = pygame.key.get_pressed()
new_xpos_plus: int = int(pos_x + dir_x * move_speed)
new_ypos_plus: int = int(pos_y + dir_y * move_speed)
if pressed[pygame.K_UP]:
if not WORLD_MAP[new_xpos_plus][int(pos_y)]:
pos_x += dir_x * move_speed
if not WORLD_MAP[int(pos_x)][new_ypos_plus]:
pos_y += dir_y * move_speed
new_xpos_minus: int = int(pos_x - dir_x * move_speed)
new_ypos_minus: int = int(pos_y - dir_y * move_speed)
if pressed[pygame.K_DOWN]:
if not WORLD_MAP[new_xpos_minus][int(pos_y)]:
pos_x -= dir_x * move_speed
if not WORLD_MAP[int(pos_x)][new_ypos_minus]:
pos_y -= dir_y * move_speed
if pressed[pygame.K_RIGHT]:
old_dir_x: float = dir_x
dir_x = dir_x * math.cos(-rot_speed) - dir_y * math.sin(-rot_speed)
dir_y = old_dir_x * math.sin(-rot_speed) + dir_y * math.cos(-rot_speed)
old_plane_x: float = plane_x
plane_x = plane_x * math.cos(-rot_speed) - plane_y * math.sin(-rot_speed)
plane_y = old_plane_x * math.sin(-rot_speed) + plane_y * math.cos(-rot_speed)
if pressed[pygame.K_LEFT]:
old_dir_x: float = dir_x
dir_x = dir_x * math.cos(rot_speed) - dir_y * math.sin(rot_speed)
dir_y = old_dir_x * math.sin(rot_speed) + dir_y * math.cos(rot_speed)
old_plane_x: float = plane_x
plane_x = plane_x * math.cos(rot_speed) - plane_y * math.sin(rot_speed)
plane_y = old_plane_x * math.sin(rot_speed) + plane_y * math.cos(rot_speed)
return pos_x, pos_y, dir_x, dir_y, plane_x, plane_y
|
e43cc7a2e6ab3f35637bf4ab37baefed96279656
| 3,649,857
|
def deg_to_xyz(lat_deg, lon_deg, altitude):
"""
http://www.oc.nps.edu/oc2902w/coord/geodesy.js
lat,lon,altitude to xyz vector
input:
lat_deg geodetic latitude in deg
lon_deg longitude in deg
altitude altitude in km
output:
returns vector x 3 long ECEF in km
"""
clat = cos(radians(lat_deg))
slat = sin(radians(lat_deg))
clon = cos(radians(lon_deg))
slon = sin(radians(lon_deg))
_, rn, _ = radcur(lat_deg)
ecc = EARTH_Ecc
esq = ecc * ecc
x = (rn + altitude) * clat * clon
y = (rn + altitude) * clat * slon
z = ((1 - esq) * rn + altitude) * slat
return [x, y, z]
|
0493132eb0658026727d7a292862fcf2d5d6d48b
| 3,649,858
|
def remove_unused_colours(ip, line_colours):
"""
>>> remove_unused_colours(np.array([[0,0,3], [1,5,1], [2,0,6], [2,2,2],[4,4,0]]), {2, 4})
array([[0, 0, 0],
[0, 0, 0],
[2, 0, 0],
[2, 2, 2],
[4, 4, 0]])
"""
#get a list of all unique colours
all_colours = list(np.unique(ip))
#remove back ground colour 0
all_colours.remove(0)
#remove the line colours
for line_colour in line_colours:
all_colours.remove(line_colour)
#for all other colours, (i.e. those not back ground colour of zero of line colours) turn to back ground colour = 0
for each_colour in all_colours:
ip[np.where(ip == each_colour)]= 0
return ip
|
7e80cbb2e3e9ac86da4cf7d6e99a6d9bf2edeead
| 3,649,859
|
def extract_info(spec):
"""Extract information from the instance SPEC."""
info = {}
info['name'] = spec.get('InstanceTypeId')
info['cpu'] = spec.get('CpuCoreCount')
info['memory'] = spec.get('MemorySize')
info['nic_count'] = spec.get('EniQuantity')
info['disk_quantity'] = spec.get('DiskQuantity')
if spec.get('LocalStorageAmount'):
info['disk_count'] = spec.get('LocalStorageAmount')
info['disk_size'] = spec.get('LocalStorageCapacity')
info['disk_type'] = spec.get('LocalStorageCategory')
# Some special families use NVMe driver for local disks
_families = ['ecs.i3', 'ecs.i3g']
if spec.get('InstanceTypeFamily') in _families:
info['local_disk_driver'] = 'nvme'
else:
info['local_disk_driver'] = 'virtio_blk'
# Some special families use NVMe driver for cloud disks
_families = ['ecs.g7se']
if spec.get('InstanceTypeFamily') in _families:
info['cloud_disk_driver'] = 'nvme'
else:
info['cloud_disk_driver'] = 'virtio_blk'
# Some security-enhanced instance families have 50% encrypted memory
_families = ['ecs.c7t', 'ecs.g7t', 'ecs.r7t']
if spec.get('InstanceTypeFamily') in _families:
info['memory'] = int(info['memory'] * 0.5)
return info
|
7f93dcad1a8d99743a30d441dad64c2b9af08037
| 3,649,860
|
def sum_values(**d):
# doc string 예제. git commit 메시지 쓰듯이 쓰면 된다
"""dict의 values를 더한 값을 리턴
key는 뭐가 들어오던지 말던지 신경 안 쓴다.
"""
return sum_func(*d.values())
|
29b90a04760376d2b8f6844994a7341fa742f05d
| 3,649,861
|
def parse_title(title):
"""Parse strings from lineageos json
:param title: format should be `code - brand phone`
"""
split_datum = title.split(' - ')
split_name = split_datum[1].split(' ')
device = split_datum[0]
brand = split_name[0]
name = ' '.join(split_name[1:])
return [brand, name, device, device]
|
c3783ab36f4f7e021bdd5f0f781bb289ab2d458f
| 3,649,862
|
def addCountersTransactions(b):
"""Step 2 : The above list with count as the last element should be
[
[1, 1, 0, 1],
[0, 0, 0, 4],
[1, 1, 1, 3]
]
converted to the following way
[
[1, 1, 0, 1, 0],
[1, 1, 1, 3, 4]
]
with cnt 1 and cnt 2 for anti-mirroring technique
Algorithm
=========
Check for the first element in the listitem.
If it is 1,
cnt2 = 0
If it is 0,
Not the values of the list except the last item (count)
Check the Not valued list is matching with existing 1valued list
If it is matching,
then add the last count to cnt2 of that matched list
else
add a new entry with last count as cnt2 and cnt1 as 0
"""
# n = list(b)
# x = b[:]
# cnt1 = []
# cnt2 = []
temp_list2 = []
t1list = []
zlist = []
for i in range(len(b)):
#print b[i], b[i][0]
if b[i][0] == 1:
b[i] = b[i] + [0]
#adding this list item to another list
zlist = remove_counts(b[i],t1list)
#print 'zlist = ',zlist
temp_list2.append(b[i])
#print 'temp_list appended ', temp_list
#print b
if b[i][0] == 0:
#print 'Found an item that starts with 0'
for item in range(len(b[i])):
#print b[i][item],i,item, len(b[i])
if b[i][item] == 0:
#print 'Found a 0 item, change it to 1'
b[i][item] = 1
else:
#print 'Found a 1 item, change it to 0'
if item != len(b[i])-1:
#print 'Not the last element, so it is changed here (NOT)'
b[i][item] = 0
else:
b[i] = b[i] + [b[i][item]]
b[i][item] = 0
#print 'Changed cos'
#print 'Present list item inside loop is ', b[i]
#print 'Present list item is ', b[i]
temp = b[i]
#print temp
tlist = []
telist = remove_counts(temp,tlist)
temp_list2.append(b[i])
#########print 'temp_list appended \n', temp_list2
#print 'telist = ',telist
#print 'y is ', y
# if telist in temp_list2:
# print 'HEY FOUND HIM'
# #b[i] = b[i] + [b[i][item]]
# else:
# print'Else not found'
return temp_list2
'''Step 3: Do {I1} {I2} and {In}
Then check for support and prune the list
Do the above step for all the subsets and prune with support
To compute {I1}, {I2}, ... {In}
1. For loop i to len(items)
2. Check for ith item in lists,
If it is 1,
Sum up Cnt1 and put it in Ii
If it is 0,
Sum up Cnt2 and put it in Ii
2. Print all Ii's
'''
|
44fb81280fc7540c796e6f8308219147993c6b7a
| 3,649,863
|
import typing
import torch
def aggregate_layers(
hidden_states: dict, mode: typing.Union[str, typing.Callable]
) -> np.ndarray:
"""Input a hidden states dictionary (key = layer, value = 2D array of n_tokens x emb_dim)
Args:
hidden_states (dict): key = layer (int), value = 2D PyTorch tensor of shape (n_tokens, emb_dim)
Raises:
NotImplementedError
Returns:
dict: key = layer, value = array of emb_dim
"""
states_layers = dict()
emb_aggregation = mode
# iterate over layers
for i in hidden_states.keys():
if emb_aggregation == "last":
state = hidden_states[i][-1, :] # get last token
elif emb_aggregation == "first":
state = hidden_states[i][0, :] # get first token
elif emb_aggregation == "mean":
state = torch.mean(hidden_states[i], dim=0) # mean over tokens
elif emb_aggregation == "median":
state = torch.median(hidden_states[i], dim=0) # median over tokens
elif emb_aggregation == "sum":
state = torch.sum(hidden_states[i], dim=0) # sum over tokens
elif emb_aggregation == "all" or emb_aggregation == None:
state = hidden_states
elif callable(emb_aggregation):
state = emb_aggregation(hidden_states[i])
else:
raise NotImplementedError(
f"Sentence embedding method [{emb_aggregation}] not implemented"
)
states_layers[i] = state.detach().cpu().numpy()
return states_layers
|
21c91a4c031c561b6776a604aa653c3880d69b15
| 3,649,864
|
def get_bg_stat_info(int_faces, adj_list, face_inds, face_inds_new):
"""
Out put list of faces and list of verts for each stat.
"""
stat_faces = []
stat_verts = []
for k in range(len(int_faces)):
# Check if face already exists.
if int_faces[k] != 0:
continue
else:
# See if there are any adjacent faces.
for j in range(len(adj_list[k])):
if int_faces[adj_list[k][j]] != 0 and int_faces[adj_list[k][j-1]] != 0:
#stat_faces.append([k, adj_list[k][j], adj_list[k][j-1]])
# Find relevant verticies
stat_verts_new = find_vertex_ind(k,
adj_list[k][j],
adj_list[k][j-1],
face_inds,
face_inds_new)
#remaining_verts = set(face_inds_new[k])
#remaining_verts.remove(vert_0)
#remaining_verts = list(remaining_verts)
#stat_verts_new = [vert_0]
#print stat_verts_new, vert_0, remaining_verts, k, j
if stat_verts_new != None:
stat_faces.append([k, adj_list[k][j], adj_list[k][j-1]])
stat_verts.append(stat_verts_new)
#assert len(stat_verts_new) == 3, "ERROR: stat_verts incorectly computed"
return stat_faces, stat_verts
|
262130ffcb4fe474ece01ed6a63705efdaac360c
| 3,649,865
|
import socket
import sys
def _build_server_data():
"""
Returns a dictionary containing information about the server environment.
"""
# server environment
server_data = {
'host': socket.gethostname(),
'argv': sys.argv
}
for key in ['branch', 'root']:
if SETTINGS.get(key):
server_data[key] = SETTINGS[key]
return server_data
|
55eda8203b527952aea8f8682b980d7c3cb12ce5
| 3,649,866
|
def config_data() -> dict:
"""Dummy config data."""
return {
"rabbit_connection": {
"user": "guest",
"passwd": "guest",
"host": "localhost",
"port": 5672,
"vhost": "/",
},
"queues": {"my_queue": {"settings": {"durable": True}, "limit": 0}},
"queue_limits": {0: ["my_queue"], 1: ["my_other_queue"]},
"notifiers": {
"smtp": {
"host": "localhost",
"user": None,
"passwd": None,
"from_addr": "test@test.com",
"to_addr": ["test@yourtest.com"],
"subject": "AMQPeek - RMQ Monitor",
},
"slack": {"api_key": "apikey", "username": "ampeek", "channel": "#general"},
},
}
|
cbbed3baf79b5928be47d3d00c747ac6be625ae5
| 3,649,867
|
def copy_linear(net, net_old_dict):
"""
Copy linear layers stored within net_old_dict to net.
"""
net.linear.weight.data = net_old_dict["linears.0.weight"].data
net.linear.bias.data = net_old_dict["linears.0.bias"].data
return net
|
8ba7f40e72b65ebef9948025b3404cbc5a660960
| 3,649,868
|
async def read_book(request: Request) -> dict:
"""Read single book."""
data = await request.json()
query = readers_books.insert().values(**data)
last_record_id = await database.execute(query)
return {"id": last_record_id}
|
e2ec15df60e2e8a5974c16688a9e5caa8c4452d8
| 3,649,869
|
def setup_dev():
"""Runs the set-up needed for local development."""
return setup_general()
|
889153114ffecd74c50530e867a03128279fc26f
| 3,649,870
|
def countAllAnnotationLines(
mpqa_dir="mpqa_dataprocessing\\database.mpqa.cleaned", doclist_filename='doclist.2.0'
):
"""
It counts all annotation lines available in all documents of a corpus.
:return: an integer
"""
m2d = mpqa2_to_dict(mpqa_dir=mpqa_dir)
mpqadict = m2d.corpus_to_dict(doclist_filename=doclist_filename)
count = 0
for doc in mpqadict['doclist']: # Iterate over all docs
count += len(mpqadict['docs'][doc]['annotations'].keys())
return count
|
2a1c981db125db163e072eb495144be2b004a096
| 3,649,871
|
def convergence(report: Report, **kwargs):
"""
Function that displays the convergence using a antco.report.Report object.
Parameters
----------
report: antco.report.Report
antco.report.Report instance returned by the antco.run() function.
**kwargs
figsize: tuple, default=(8, 5)
Tuple indicating the size of the figure.
title: str, default='Convergence'
Plot title.
alpha_grid: float, default=0.7
Transparency of the grid lines of the plot.
alpha_graph: float, default=0.2
Transparency of the lines of the plot.
save_plot: str, default=None
File in which to save the generated graph, if no value is provided the graph will not
be saved.
Returns
-------
:matplotlib.pyplot.Fig
Figure with convergence graph.
"""
def _draw(ax_, params_: dict, alpha_: float, color_: str, label_: str, linestyle_: str, linewidth_: int):
x = np.arange(len(params_))
y = [np.mean(vals) for vals in params_.values()]
ax_.plot(x, y, color=color_, label=label_, alpha=alpha_, linestyle=linestyle_, linewidth=linewidth_)
return ax_
# Check that the parameters necessary to represent convergence can be obtained.
try:
report.get('mean_cost')
except Exception:
raise Exception(
'The Report instance does not have the "mean_cost" value, make sure you have saved the "mean_cost" value '
'throughout the interactions of the algorithm using the method report.save("mean_cost").')
try:
report.get('max_cost')
except Exception:
raise Exception(
'The Report instance does not have the "max_cost" value, make sure you have saved the "max_cost" value '
'throughout the interactions of the algorithm using the method report.save("max_cost").')
parameters = {
'mean_cost': {'color': '#85C1E9', 'label': 'Average cost', 'linestyle': 'solid', 'linewidth': 3},
'max_cost': {'color': '#AF7AC5', 'label': 'Max cost', 'linestyle': 'dashed', 'linewidth': 2}}
# Get optional arguments
figsize = kwargs.get('figsize', (8, 5))
title = kwargs.get('title', 'Convergence')
alpha_graph = kwargs.get('alpha_graph', 0.7)
alpha_grid = kwargs.get('alpha_grid', 0.2)
save_plot = kwargs.get('save_plot', None)
fig, ax = plt.subplots(figsize=figsize)
for param, values in parameters.items():
ax = _draw(ax, report.get(param), alpha_graph,
values['color'], values['label'], values['linestyle'], values['linewidth'])
ax.set_xlabel('Iteration')
ax.set_ylabel('Cost')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.grid(alpha=alpha_grid)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 0.1),
fancybox=True, shadow=True, ncol=len(parameters))
ax.set_title(title)
if save_plot is not None:
plt.savefig(save_plot, dpi=150)
return fig
|
523e64b68d88d705f22a5c31faecee51e5e59b2d
| 3,649,872
|
def scene():
""" Check that the scene is valid for submission and creates a report """
xrs.validation_report.new_report()
valid = True
# Start by getting into object mode with nothing selected
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action='DESELECT')
if (xrs.collection.collection_has_objects("master") == False):
xrs.validation_report.write_error("master collection not found or has no objects")
valid = False
else:
xrs.validation_report.write_ok("master collection found")
if (xrs.collection.collection_has_objects("web") == False):
valid = False
xrs.validation_report.write_error(
"web collection not found or has no objects"
)
else:
# Check all objects in the web collection
web_objects = xrs.collection.get_objects("web")
base_objects = xrs.collection.get_objects("master")
transparent_object_count = 0
total_triangles = 0
xrs.validation_report.write_ok(
"web collection found. object count: " + str(len(web_objects))
)
xrs.validation_report.write_hr()
# TODO: Additional checks for master objects
if ('dimensions_cube' not in bpy.data.objects):
valid = False
xrs.validation_report.write_error(
"dimensions_cube not found"
)
else:
dimensions_cube = bpy.data.objects['dimensions_cube']
tolerance = 1.05
web_dimensions = xrs.collection.get_dimensions("web")
# (WARN) Width
if (
web_dimensions[0] > dimensions_cube.dimensions.x * tolerance
):
xrs.validation_report.write_warning(
"Model width is too big (" +
str(web_dimensions[0]) + " > " +
str(dimensions_cube.dimensions.x) + ")"
)
elif (
web_dimensions[0] < dimensions_cube.dimensions.x / tolerance
):
xrs.validation_report.write_warning(
"Model width is too small (" +
str(web_dimensions[0]) + " < " +
str(dimensions_cube.dimensions.x) + ")"
)
else:
xrs.validation_report.write_ok(
"Model width is " + str(web_dimensions[0])
)
# (WARN) Depth
if (
web_dimensions[1] > dimensions_cube.dimensions.y * tolerance
):
xrs.validation_report.write_warning(
"Model depth is too big (" +
str(web_dimensions[1]) + " > " +
str(dimensions_cube.dimensions.y) + ")"
)
elif (
web_dimensions[1] < dimensions_cube.dimensions.y / tolerance
):
xrs.validation_report.write_warning(
"Model depth is too small (" +
str(web_dimensions[1]) + " < " +
str(dimensions_cube.dimensions.y) + ")"
)
else:
xrs.validation_report.write_ok(
"Model depth is " + str(web_dimensions[1])
)
# (WARN) Height
if (
web_dimensions[2] > dimensions_cube.dimensions.z * tolerance
):
xrs.validation_report.write_warning(
"Model height is too big (" +
str(web_dimensions[2]) + " > " +
str(dimensions_cube.dimensions.z) + ")"
)
elif (
web_dimensions[2] < dimensions_cube.dimensions.z / tolerance
):
xrs.validation_report.write_warning(
"Model height is too small (" +
str(web_dimensions[2]) + " < " +
str(dimensions_cube.dimensions.z) + ")"
)
else:
xrs.validation_report.write_ok(
"Model height is " + str(web_dimensions[2])
)
xrs.validation_report.write_hr()
# Base Collection
for obj in base_objects:
# (ERR) Modifiers need to be applied
if (len(obj.modifiers) > 0):
valid = False
xrs.validation_report.write_error(
obj.name + " needs to have all modifiers applied"
)
else:
xrs.validation_report.write_ok(
obj.name + " has no modifiers"
)
# (ERR) Transforms Not Applied (loc!=0,0,0;rot!=0,0,0;scale!=1)
if (xrs.object.transforms_are_applied(obj) == False):
valid = False
xrs.validation_report.write_error(
obj.name + " needs to have transforms applied"
)
else:
xrs.validation_report.write_ok(
obj.name + " transforms are correct"
)
# Web Collection
for obj in web_objects:
# (ERR) Modifiers need to be applied
if (len(obj.modifiers) > 0):
valid = False
xrs.validation_report.write_error(
obj.name + " needs to have all modifiers applied"
)
else:
xrs.validation_report.write_ok(
obj.name + " has no modifiers"
)
# (ERR) Non-manifold geometry
non_manifold_vertext_count = xrs.object.non_manifold_vertex_count(obj)
if (non_manifold_vertext_count > 0):
xrs.validation_report.write_warning(
obj.name + " has non-manifold geometry (" + str(non_manifold_vertext_count) + " vertices)"
)
else:
xrs.validation_report.write_ok(
obj.name + " has no non-manifold geometry"
)
# (ERR) Triangle count over 100k
triangle_count = xrs.object.get_triangle_count(obj)
total_triangles = total_triangles + triangle_count
if (triangle_count > 100000):
valid = False
xrs.validation_report.write_error(
obj.name + " has " +
str(triangle_count) +
" triangles. The web collection model must be less than 100,000 triangles"
)
# (WARN) Triangle count over 60k
if (triangle_count > 60000):
xrs.validation_report.write_warning(
obj.name + " has " +
str(triangle_count) +
" triangles. This web collection model should be optimized where possible"
)
else:
xrs.validation_report.write_ok(
obj.name + " has " + str(triangle_count) + " triangles"
)
# (ERR) Transforms Not Applied (loc!=0,0,0;rot!=0,0,0;scale!=1)
if (xrs.object.transforms_are_applied(obj) == False):
valid = False
xrs.validation_report.write_error(
obj.name + " needs to have transforms applied"
)
else:
xrs.validation_report.write_ok(
obj.name + " transforms are correct"
)
# (WARN) Object names match mesh names
if (obj.name != obj.data.name):
xrs.validation_report.write_warning(
obj.name + " mesh is named " +
obj.data.name +
". Names should match"
)
else:
xrs.validation_report.write_ok(
obj.name + " mesh name matches"
)
# Materials
material_count = len(obj.material_slots)
# (ERR) No material
if (material_count == 0):
valid = False
xrs.validation_report.write_error(
obj.name + " needs to have a material"
)
else:
# (WARN) >1 Material per web object
if (material_count > 1):
xrs.validation_report.write_warning(
obj.name + " has " +
str(material_count) +
" materials and should only have 1"
)
else:
xrs.validation_report.write_ok(
obj.name + " has 1 material"
)
#web collection should have ao
if xrs.tools.check_ao() == False:
xrs.validation_report.write_warning(
obj.name +
" does not have an AO map. Please make one for the web collection model and put in the textures folder"
)
else:
xrs.validation_report.write_ok(
obj.name + " has the needed AO map in the web collection"
)
# (ERR) Material names are correct
for slot in obj.material_slots:
mat = slot.material
# (ERR) Empty Material Slot
if (mat is None):
valid = False
xrs.validation_report.write_error(
obj.name + " has an empty material slot, which must be removed"
)
else:
# (WARN) Material name should be lower case
if (mat.name.islower() == False):
xrs.validation_report.write_warning(
mat.name + " name should be lower case with _s"
)
else:
xrs.validation_report.write_ok(
mat.name + " name is valid"
)
# (ERR) Material uses nodes
if (mat.use_nodes == False):
valid = False
xrs.validation_report.write_error(
mat.name + " needs to use nodes"
)
else:
xrs.validation_report.write_ok(
mat.name + " uses nodes"
)
# (ERR) Material does not use a BSDF Shader
bsdf = xrs.material.get_one_node_of_type(
mat.node_tree.nodes,
"BSDF_PRINCIPLED"
)
if (bsdf is None):
valid = False
xrs.validation_report.write_error(
mat.name + " needs to use a Principled BSDF shader"
)
else:
xrs.validation_report.write_ok(
mat.name + " has a Principled BSDF shader"
)
# Base Color
if (check_and_report_material(
bsdf, mat, "Base Color", "diffuse"
) == False):
valid = False
# Metallic (TODO: enforce 0 or 1)
if (check_and_report_material(
bsdf, mat, "Metallic", "metallic"
) == False):
valid = False
# Roughness
if (check_and_report_material(
bsdf, mat, "Roughness", "roughness"
) == False):
valid = False
# Emission
if (check_and_report_material(
bsdf, mat, "Emission", "emissive"
) == False):
valid = False
# Alpha (TODO: get count)
# Alpha is in the diffuse texture
if (check_and_report_material(
bsdf, mat, "Alpha", "diffuse"
) == False):
valid = False
# Normal
if (check_and_report_normal_material(
bsdf, mat
) == False):
valid = False
xrs.validation_report.write_hr()
# (WARN) web object count should only be > 1 if some are transparent
#TODO
# (WARN) Total triangle count for web collection
if (total_triangles > 65000):
xrs.validation_report.write_warning(
"web collection meshes have " +
str(total_triangles) +
" triangles. There should be less than 65,000 triangles where possible"
)
# Nice to have:
# Preview Render
# GLB export
# Show the report after it is complete
xrs.validation_report.show_report()
return valid
|
6885509d95868c64666d63d9f0daa738e6a40269
| 3,649,873
|
def ca_set_container_policies(h_session, h_container, policies):
"""
Set multiple container policies.
:param int h_session: Session handle
:param h_container: target container handle
:param policies: dict of policy ID ints and value ints
:return: result code
"""
h_sess = CK_SESSION_HANDLE(h_session)
container_id = CK_ULONG(h_container)
pol_id_list = list(policies.keys())
pol_val_list = list(policies.values())
pol_ids = AutoCArray(data=pol_id_list, ctype=CK_ULONG)
pol_vals = AutoCArray(data=pol_val_list, ctype=CK_ULONG)
ret = CA_SetContainerPolicies(
h_sess, container_id, pol_ids.size.contents, pol_ids.array, pol_vals.array
)
return ret
|
b4c56108d137d8caa6fa65f6ffcfd8c649af1840
| 3,649,874
|
def run_trial(benchmark):
"""Runs the benchmark once and returns the elapsed time."""
args = ['.build/debug/slox', join('test', 'benchmark', benchmark + '.lox')]
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
out = out.decode("utf-8").replace('\r\n', '\n')
# Remove the trailing last empty line.
out_lines = out.split('\n')
if out_lines[-1] == '':
del out_lines[-1]
# The benchmark should print the elapsed time last.
return float(out_lines[-1])
|
6c92e09134d4e12a022a5a7ae4bb4951f878be37
| 3,649,875
|
def extend(arr, num=1, log=True, append=False):
"""Extend the given array by extraplation.
Arguments
---------
arr <flt>[N] : array to extend
num <int> : number of points to add (on each side, if ``both``)
log <bool> : extrapolate in log-space
append <bool> : add the extended points onto the given array
Returns
-------
retval <flt>[M] : extension (or input ``arr`` with extension added, if ``append``).
"""
if(log): useArr = np.log10(arr)
else: useArr = np.array(arr)
steps = np.arange(1, num+1)
left = useArr[0] + (useArr[0] - useArr[1])*steps[::-1].squeeze()
rigt = useArr[-1] + (useArr[-1] - useArr[-2])*steps.squeeze()
if(log):
left = np.power(10.0, left)
rigt = np.power(10.0, rigt)
if(append): return np.hstack([left, arr, rigt])
return [left, rigt]
|
e5f8b7fea74b1a92dba19aed527be1c823c058f9
| 3,649,876
|
import sys
import logging
import os
def to_relative(path, root, relative):
"""Converts any absolute path to a relative path, only if under root."""
if sys.platform == 'win32':
path = path.lower()
root = root.lower()
relative = relative.lower()
if path.startswith(root):
logging.info('%s starts with %s' % (path, root))
path = os.path.relpath(path, relative)
else:
logging.info('%s not under %s' % (path, root))
return path
|
50911c6cec942e9be0d694f95213053e23d2707a
| 3,649,877
|
def do_associate_favorite(parser, token):
"""
@object - object to return the favorite count for
"""
try:
tag, node, user = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires one argument" % token.contents.split()[0]
return AssociateFavorite(node, user)
|
90ed604936a0b7639adf356911a803ae755a9653
| 3,649,878
|
from typing import Type
from pydantic import BaseModel # noqa: E0611
from typing import Tuple
from typing import List
def parse_cookie(cookie: Type[BaseModel]) -> Tuple[List[Parameter], dict]:
"""Parse cookie model"""
schema = get_schema(cookie)
parameters = []
components_schemas = dict()
properties = schema.get('properties')
definitions = schema.get('definitions')
if properties:
for name, value in properties.items():
data = {
"name": name,
"in": ParameterInType.cookie,
"description": value.get("description"),
"required": name in schema.get("required", []),
"schema": Schema(**value)
}
parameters.append(Parameter(**data))
if definitions:
for name, value in definitions.items():
components_schemas[name] = Schema(**value)
return parameters, components_schemas
|
797c876676b1e002b4e54a7943f77301ed82efb1
| 3,649,879
|
def bdev_nvme_add_error_injection(client, name, opc, cmd_type, do_not_submit, timeout_in_us,
err_count, sct, sc):
"""Add error injection
Args:
name: Name of the operating NVMe controller
opc: Opcode of the NVMe command
cmd_type: Type of NVMe command. Valid values are: admin, io
do_not_submit: Do not submit commands to the controller
timeout_in_us: Wait specified microseconds when do_not_submit is true
err_count: Number of matching NVMe commands to inject errors
sct: NVMe status code type
sc: NVMe status code
Returns:
True on success, RPC error otherwise
"""
params = {'name': name,
'opc': opc,
'cmd_type': cmd_type}
if do_not_submit:
params['do_not_submit'] = do_not_submit
if timeout_in_us:
params['timeout_in_us'] = timeout_in_us
if err_count:
params['err_count'] = err_count
if sct:
params['sct'] = sct
if sc:
params['sc'] = sc
return client.call('bdev_nvme_add_error_injection', params)
|
3833256e71f47a49eef2643bf8c244308795a0b1
| 3,649,880
|
def tetheredYN(L0, KxStar, Rtot, Kav, fully=True):
""" Compare tethered (bispecific) vs monovalent """
if fully:
return polyc(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)[2][0] / \
polyfc(L0 * 2, KxStar, 1, Rtot, [0.5, 0.5], Kav)[0]
else:
return polyc(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)[0][0] / \
polyfc(L0 * 2, KxStar, 1, Rtot, [0.5, 0.5], Kav)[0]
|
a8a4be3c7b217164d690eed29eb8ab1acca45e05
| 3,649,881
|
def valid_payload(request):
"""
Fixture that yields valid data payload values.
"""
return request.param
|
0c02e52a02b9089e4832ccf2e9c37fc2d355e893
| 3,649,882
|
import os
def lookup_content(path, source_id):
"""
Look for a filename in the form of:
ARCHIVE_SOURCEID.[extension]
"""
content_filename = None
files = [f for f in os.listdir(path) if not f.endswith(".xml")]
for f in files:
tokens = os.path.splitext(f)[0].split("_")
if len(tokens) == 0:
continue
if tokens[-1] == source_id:
log.info("Content file FOUND: {0}", f)
# content_path = os.path.join(path, f)
content_filename = f
break
return content_filename
|
f356a3d522a5c79f615c20b46cc9a3738b211417
| 3,649,883
|
import os
import time
import signal
def external(pgm, inp, out, cor, tim=5):
"""
The external checker is used to check for outputs using an external
program that reads the input and the generated output and writes
to stdout the veredict. If the program runs for more than tim seconds,
'IE' is returned. 'IE' also returned for non-existing pgm.
"""
if not util.file_exists(pgm):
return 'IE'
tmp = util.tmp_file()
pid = os.fork()
if pid == 0:
# Child
os.system('./%s %s %s %s > %s' % (pgm, inp, out, cor, tmp))
os._exit(0)
else:
# Parent
c = 0
while c <= tim:
ret = os.waitpid(pid, os.WNOHANG)
if ret[0] != 0:
# Ok!
ver = util.read_file(tmp).strip()
util.del_file(tmp)
return ver
time.sleep(0.1)
c += 0.1
os.kill(pid, signal.SIGKILL)
return 'IE'
|
17ca353623b5094ac9158671cefcb82e7a44c235
| 3,649,884
|
def prune_deg_one_nodes(sampled_graph):
""" prune out degree one nodes from graph """
deg_one_nodes = []
for v in sampled_graph.nodes():
if sampled_graph.degree(v) == 1:
deg_one_nodes.append(v)
for v in deg_one_nodes:
sampled_graph.remove_node(v)
return sampled_graph
|
c4df72a66c6fb57d5d42a1b877a846338f32f42a
| 3,649,885
|
def reduce_clauses(clauses):
"""
Reduce a clause set by eliminating redundant clauses
"""
used = []
unexplored = clauses
while unexplored:
cl, unexplored = unexplored[0], unexplored[1:]
if not subsume(used, cl) and not subsume(unexplored,cl):
used.append(cl)
return used
|
d28fc08f214a04aac433827560251143204fa290
| 3,649,886
|
import os
def get_pretrained_t2v(name, model_dir=MODEL_DIR):
"""
It is a good idea if you want to switch token list to vector earily.
Parameters
----------
name:str
select the pretrained model
e.g.:
d2v_all_256,
d2v_sci_256,
d2v_eng_256,
d2v_lit_256,
w2v_eng_300,
w2v_lit_300.
model_dir:str
the path of model, default: MODEL_DIR = '~/.EduNLP/model'
Returns
-------
t2v model: T2V
Examples
--------
>>> item = [{'ques_content':'有公式$\\FormFigureID{wrong1?}$和公式$\\FormFigureBase64{wrong2?}$,\
... 如图$\\FigureID{088f15ea-8b7c-11eb-897e-b46bfc50aa29}$,若$x,y$满足约束条件$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$'}]
>>> i2v = get_pretrained_t2v("test_d2v", "examples/test_model/data/d2v") # doctest: +ELLIPSIS
>>> print(i2v(item)) # doctest: +ELLIPSIS
[array([...dtype=float32)]
"""
if name not in PRETRAINED_MODELS:
raise KeyError(
"Unknown pretrained model %s, use one of the provided pretrained models: %s" % (
name, ", ".join(PRETRAINED_MODELS.keys()))
)
url, model_name, *args = PRETRAINED_MODELS[name]
model_path = get_data(url, model_dir)
if model_name in ["d2v", "w2v"]:
postfix = ".bin" if model_name == "d2v" else ".kv"
model_path = path_append(model_path, os.path.basename(model_path) + postfix, to_str=True)
return T2V(model_name, model_path, *args)
|
369def1a01a5ffa132db484a3340de2738f4b6c9
| 3,649,887
|
import numpy as np
def get_np_io(arr, **kwargs) -> BytesIO:
"""Get the numpy object as bytes.
:param arr: Array-like
:param kwargs: Additional kwargs to pass to :func:`numpy.save`.
:return: A bytes object that can be used as a file.
"""
bio = BytesIO()
np.save(bio, arr, **kwargs)
bio.seek(0)
return bio
|
278a452dc97d8ca74398771bd34545c7505c191f
| 3,649,888
|
from typing import Mapping
def get_deep_attr(obj, keys):
""" Helper for DeepKey"""
cur = obj
for k in keys:
if isinstance(cur, Mapping) and k in cur:
cur = cur[k]
continue
else:
try:
cur = getattr(cur, k)
continue
except AttributeError:
pass
raise DataError(error='Unexistent key')
return cur
|
f7e3af73c2e45a5448e882136811b6898cc45e29
| 3,649,889
|
def fork_node_item_inline_editor(item, view, pos=None) -> bool:
"""Text edit support for Named items."""
@transactional
def update_text(text):
item.subject.joinSpec = text
return True
def escape():
item.subject.joinSpec = join_spec
subject = item.subject
if not subject:
return False
join_spec = subject.joinSpec or ""
box = view.get_item_bounding_box(view.hovered_item)
entry = popup_entry(join_spec, update_text)
show_popover(entry, view, box, escape)
return True
|
7c4b0bdbe321bab427e22440e7225539262806f2
| 3,649,890
|
def get_selfies_alphabet(smiles_list):
"""Returns a sorted list of all SELFIES tokens required to build a
SELFIES string for each molecule."""
selfies_list = list(map(sf.encoder, smiles_list))
all_selfies_symbols = sf.get_alphabet_from_selfies(selfies_list)
all_selfies_symbols.add('[nop]')
selfies_alphabet = list(all_selfies_symbols)
selfies_alphabet.sort()
return selfies_alphabet
|
f18206e0c4c03ab75db3efd693655a1a1cacb9e2
| 3,649,891
|
import datasets
import random
def get_face_angular_dataloader(dataset_path, input_size, batch_size, num_workers, train_portion=1):
""" Prepare dataset for training and evaluating pipeline
Args:
dataset_path (str)
input_size (int)
batch_size (int)
num_workers (int)
train_portion (float)
Return:
train_loader (torch.utils.data.DataLoader)
val_loader (torch.utils.data.DataLoader)
test_loader (torch.utils.data.DataLoader)
"""
train_transform = transforms.Compose([
transforms.Resize(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(FACE_MEAN, FACE_STD)
])
test_transform = transforms.Compose([
transforms.ToTensor(),
])
train_dataset = datasets.ImageFolder(root=osp.join(dataset_path, "face", "train", "CASIA-WebFace"),
transform=train_transform)
test_dataset = PairFaceDataset(root=osp.join(dataset_path, "face", "test", "LFW"),
transform=test_transform)
if train_portion != 1:
train_len = len(train_dataset)
indices = list(range(train_len))
random.shuffle(indices)
split = int(np.floor(train_portion * train_len))
train_idx, val_idx = indices[:split], indices[split:]
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(val_idx)
train_loader = DataLoader(
train_dataset,
num_workers=num_workers,
batch_size=batch_size,
sampler=train_sampler,
pin_memory=True)
val_loader = DataLoader(
train_dataset,
num_workers=num_workers,
batch_size=batch_size,
sampler=val_sampler,
pin_memory=True)
else:
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=True)
val_loader = None
test_loader = DataLoader(
dataset=test_dataset,
shuffle=False,
batch_size=batch_size,
num_workers=num_workers)
return train_loader, val_loader, test_loader
|
5aa6d62c98ca942e79bbfaca192b11353a0a2fe1
| 3,649,892
|
def compile_sql_numericize(element, compiler, **kw):
"""
Turn common number formatting into a number. use metric abbreviations, remove stuff like $, etc.
"""
arg, = list(element.clauses)
def sql_only_numeric(text):
# Returns substring of numeric values only (-, ., numbers, scientific notation)
# return func.nullif(func.substring(text, r'([+\-]?(\d\.?\d*[Ee][+\-]?\d+|(\d+\.\d*|\d*\.\d+)|\d+))'), '')
return func.coalesce(
func.substring(text, r'([+\-]?(\d+\.?\d*[Ee][+\-]?\d+))'), # check for valid scientific notation
func.nullif(
func.regexp_replace(text, r'[^0-9\.\+\-]+', '', 'g'), # remove all the non-numeric characters
''
)
)
return compiler.process(sql_only_numeric(arg), **kw)
|
ef8631e98cd74b276ad00731c75a5c1c907eb303
| 3,649,893
|
def run_sgd(model, epochs):
"""
Runs SGD for a predefined number of epochs and saves the resulting model.
"""
print("Training full network")
weights_rand_init = model.optimize(epochs=epochs)
# weights_rand_init = model.optimize(epochs=epochs, batch_size=55000, learning_rate=0.1)
print("Model optimized!!!")
return [model.get_model_weights(), weights_rand_init]
|
14c6fd1ffa8aab3a783b5738093d69771d036411
| 3,649,894
|
def get_all_outcome_links_for_context_courses(request_ctx, course_id, outcome_style=None, outcome_group_style=None, per_page=None, **request_kwargs):
"""
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param outcome_style: (optional) The detail level of the outcomes. Defaults to "abbrev". Specify "full" for more information.
:type outcome_style: string or None
:param outcome_group_style: (optional) The detail level of the outcome groups. Defaults to "abbrev". Specify "full" for more information.
:type outcome_group_style: string or None
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: Get all outcome links for context
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/courses/{course_id}/outcome_group_links'
payload = {
'outcome_style' : outcome_style,
'outcome_group_style' : outcome_group_style,
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(course_id=course_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
|
78026eff6aef5a486d920a888d4dfdabc94bfc00
| 3,649,895
|
def GetContentResourceSpec():
"""Gets Content resource spec."""
return concepts.ResourceSpec(
'dataplex.projects.locations.lakes.content',
resource_name='content',
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,
locationsId=LocationAttributeConfig(),
lakesId=LakeAttributeConfig(),
contentId=ContentAttributeConfig())
|
434cb149fdeff6154928a4514d1f6241d44c85a7
| 3,649,896
|
from typing import Optional
def softplus(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the softplus value of Blob.
The equation is:
.. math::
out = log(e^x+1)
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def softplus_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.softplus(x)
x = np.array([-1, 0, 1]).astype(np.float32)
out = softplus_Job(x)
# out [0.31326166 0.6931472 1.3132616 ]
"""
return build_unary_elemwise_math_op("softplus", x, name)
|
2bef1db640e0e5b3e9971b1d9b4fbe23e4eba808
| 3,649,897
|
from typing import Tuple
from typing import List
def diff_gcs_directories(
base_directory_url: str, target_directory_url: str
) -> Tuple[List[str], List[str], List[str]]:
"""
Compare objects under different GCS prefixes.
:param base_directory_url: URL for base directory
:param target_directory_url: URL for target directory
:returns: Tuple with 3 elements:
List of objects in base directory that are not present in target directory
List of objects in target directory that are not present in base directory
List of objects with different content in base and target directory
"""
base = urlparse(base_directory_url)
target = urlparse(target_directory_url)
if base.scheme != "gs":
raise ValueError("base_directory_url must be a gs:// URL")
if target.scheme != "gs":
raise ValueError("target_directory_url must be a gs:// URL")
client = Client(project=None)
base_blobs = client.list_blobs(base.hostname, prefix=base.path.strip("/") + "/")
base_blobs = {
_remove_prefix(blob.name, base.path.strip("/")): blob for blob in base_blobs
}
missing_objects = set(base_blobs.keys())
extra_objects = []
changed_objects = []
target_blobs = client.list_blobs(
target.hostname, prefix=target.path.strip("/") + "/"
)
for blob in target_blobs:
key = _remove_prefix(blob.name, target.path.strip("/"))
missing_objects.discard(key)
try:
if blob.md5_hash != base_blobs[key].md5_hash:
changed_objects.append(key)
except KeyError:
extra_objects.append(key)
return GCSDiffResult(list(missing_objects), extra_objects, changed_objects)
|
1e7727fb352d320c79de16d6efdd6f46120e89d7
| 3,649,898
|
from typing import List
def load_compatible_apps(file_name: str) -> List[Product]:
"""Loads from file and from github and merges results"""
local_list = load_installable_apps_from_file(file_name)
try:
github_list = load_compatible_apps_from_github()
except (URLError, IOError):
github_list = []
return list(set(local_list) | set(github_list))
|
efbde4a2c2f4589bc73497017d89631e0333081c
| 3,649,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.