content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def dict_merge(lft, rgt):
"""
Recursive dict merge.
Recursively merges dict's. not just simple lft['key'] = rgt['key'], if
both lft and rgt have a key who's value is a dict then dict_merge is
called on both values and the result stored in the returned dictionary.
"""
if not isinstance(rgt, dict):
return rgt
result = deepcopy(lft)
for key, val in rgt.iteritems():
if key in result and isinstance(result[key], dict):
result[key] = dict_merge(result[key], val)
else:
result[key] = deepcopy(val)
return result | 5,329,500 |
def modified_zscore(x: np.ndarray) -> np.ndarray:
"""
Modified z-score transformation.
The modified z score might be more robust than the standard z-score because
it relies on the median for calculating the z-score. It is less influenced
by outliers when compared to the standard z-score.
Parameters
----------
x: (N,) np.ndarray
numbers
Returns
-------
z: (N,) np.ndarray
z-scored numbers computed using modified z-score
"""
med = np.median(x)
med_abs_dev = np.median(np.abs(x - med))
return (x - med) / (1.486 * med_abs_dev) | 5,329,501 |
def get_packet_unique(data: Union[bytes, memoryview], packet_type: str) -> Optional[Packet]:
"""
Get all the packets of a particular type. If no type is given, returns all packets.
WARNING: packets are generated with non-header data as a `memoryview`.
:param data: The data to search through.
:param packet_type: The type of packet to look for. PACKET_TYPES.keys()
""" | 5,329,502 |
def test_get_single_ride(test_client):
"""
Test request returns correct ride with specified ID
"""
response = test_client.get('/api/v1/rides/2')
result = json.loads(response.data)
assert response.status_code == 200
assert result['ride']['origin'] == 'Kisumu'
assert result['ride']['destination'] == 'Lodwar'
assert result['ride']['travel_date'] == '25th June 2018'
assert result['ride']['time'] == '12:00 am'
assert result['ride']['price'] == 400
assert result['ride']['requests'] == [] | 5,329,503 |
def update_profile(email, username, name, bio, interest, picture=None):
"""更新 profile"""
db = get_db()
cursor = db.cursor()
# query user
user = get_user_by_email(email)
email = user['email']
profile_id = user['profile_id']
if profile_id is None:
# add profile
cursor.execute(
"INSERT INTO profiles (username, name, bio, interest, picture) VALUES (?, ?, ?, ?, ?)",
(username, name, bio, interest, picture)
)
db.commit()
profile_id = cursor.lastrowid
cursor.execute(
"UPDATE users SET profile_id = ? WHERE email=?",
(profile_id, email)
)
db.commit()
else:
# Update profile
if picture:
sql = "UPDATE profiles SET username=?,name=?,bio=?,interest=?,picture=? WHERE id=?"
values = (username, name, bio, interest, picture, profile_id)
else:
sql = "UPDATE profiles SET username=?,name=?,bio=?,interest=? WHERE id=?"
values = (username, name, bio, interest, profile_id)
cursor.execute(
sql,
values,
)
db.commit()
return True | 5,329,504 |
def serialize(object):
"""
Serialize the data into bytes using marshal and zlib
Args:
object: a value
Returns:
Returns a bytes object containing compressed with zlib data.
"""
return zlib.compress(marshal.dumps(object, 2)) | 5,329,505 |
def logo_if(interp, expr, block, elseBlock=None):
"""
IF tf instructionlist
(IF tf instructionlist1 instructionlist2)
command. If the first input has the value TRUE, then IF runs
the second input. If the first input has the value FALSE, then
IF does nothing. (If given a third input, IF acts like IFELSE,
as described below.) It is an error if the first input is not
either TRUE or FALSE.
"""
if expr:
return logo_eval(interp, block)
elif elseBlock is not None:
return logo_eval(interp, elseBlock) | 5,329,506 |
def select_model_general(
df,
grid_search,
target_col_name,
frequency,
partition_columns=None,
parallel_over_columns=None,
executor=None,
include_rules=None,
exclude_rules=None,
country_code_column=None,
output_path="",
persist_cv_results=False,
persist_cv_data=False,
persist_model_reprs=False,
persist_best_model=False,
persist_partition=False,
persist_model_selector_results=False,
):
"""Run cross validation on data and select best model
Best models are selected for each timeseries and if wanted persisted.
Parameters
----------
df : pandas.DataFrame
Container holding historical data for training
grid_search : sklearn.model_selection.GridSearchCV
Preconfigured grid search definition which determines which models
and parameters will be tried
target_col_name : str
Name of target column
frequency : str
Temporal frequency of data.
Data with different frequency will be resampled to this frequency.
partition_columns : list, tuple
Column names based on which the data should be split up / partitioned
parallel_over_columns : list, tuple
Subset of partition_columns, that are used to parallel split.
executor : prefect.engine.executors
Provide prefect's executor. Only valid when `parallel_over_columns` is set.
For more information see https://docs.prefect.io/api/latest/engine/executors.html
include_rules : dict
Dictionary with keys being column names and values being list of values to include in
the output.
exclude_rules : dict
Dictionary with keys being column names and values being list of values to exclude
from the output.
country_code_column : str
Name of the column with country code, which can be used for supplying holiday
(i.e. having gridsearch with HolidayTransformer with argument `country_code_column`
set to this one).
output_path : str
Path to directory for storing the output, default behavior is current working directory
persist_cv_results : bool
If True cv_results of sklearn.model_selection.GridSearchCV as pandas df
will be saved as pickle for each partition
persist_cv_data : bool
If True the pandas df detail cv data
will be saved as pickle for each partition
persist_model_reprs : bool
If True model reprs will be saved as json for each partition
persist_best_model : bool
If True best model will be saved as pickle for each partition
persist_partition : bool
If True dictionary of partition label will be saved as json for each partition
persist_model_selector_results : bool
If True ModelSelectoResults with all important information
will be saved as pickle for each partition
Returns
-------
list
List of ModelSelectorResult
"""
if parallel_over_columns is not None:
# run prefect flow with paralellism
flow_result = run_model_selection(**locals())
# access result of select_model and flatten it
result = flow_result[1].result[flow_result[0].get_tasks("select_model")[0]].result
flat_list = list(itertools.chain.from_iterable(result))
return flat_list
else:
partition_columns = partition_columns if partition_columns is not None else []
# run without prefect
df_prep = df.pipe(filter_data, include_rules=include_rules, exclude_rules=exclude_rules).pipe(
prepare_data_for_training,
frequency=frequency,
partition_columns=partition_columns,
country_code_column=country_code_column,
)
result = select_model(
df=df_prep,
target_col_name=target_col_name,
partition_columns=partition_columns,
grid_search=grid_search,
parallel_over_dict=None,
frequency=frequency,
country_code_column=country_code_column,
)
if any(
[
persist_cv_results,
persist_cv_data,
persist_model_reprs,
persist_partition,
persist_best_model,
persist_model_selector_results,
]
):
persist_experts_in_physical_partition(
results=result,
folder_path=output_path,
persist_cv_results=persist_cv_results,
persist_cv_data=persist_cv_data,
persist_model_reprs=persist_model_reprs,
persist_partition=persist_partition,
persist_best_model=persist_best_model,
persist_model_selector_results=persist_model_selector_results,
)
return result | 5,329,507 |
def create_arma_sample(ar_order=1, ma_order=1, size=100):
"""Get a random ARMA sample.
Parameters
----------
ar_order, ma_order, size : int
Values for the desired AR order, MA order and sample size.
Returns
-------
An ARMA sample as a pandas Series.
"""
ar_coeff = np.linspace(1, -0.9, ar_order + 1) # arbitrary ar coefficients
ma_coeff = np.linspace(1, 0.9, ma_order + 1) # arbitrary ma coefficients
sample = tsa.ArmaProcess(ar_coeff, ma_coeff).generate_sample(size)
index = pd.date_range(start=date.today(), periods=size, freq="D")
return pd.Series(sample, index=index, name="sample") | 5,329,508 |
def list_field_to_reference(web2py_path, app , new_table_name , new_list_field , list_field_name , old_table_id_field , old_table):
"""
This method handles the migration in which a new table with a column for the
values they'll get from the list field is made and maybe some empty columns to be filled in later.
That new table has a foreign key reference back to the original table.
Then for each value in the list field for each record in the original table,
they create one record in the new table that points back to the original record.
Method of Calling
import migration_scripts
migration_scripts.list_field_to_reference(web2py_path,app,new_table_name , new_list_field , list_field_name , old_table_id_field , old_table)
@param web2py_path : The path to the web2py congaing the Eden app (i.e "/home/web2py")
@param app : The name of the eden application of whose database needs to be migrated (i.e "eden")
@param new_table_name : The name of the new table to which the list field needs to migrated
@param new_list_field : The name of the field in the new table which will hold the content of the list field
@param list_field_name : The name of the list field in the original table
@param old_table_id_field : The name of the id field in the original table
@param old_table : The name of the original table
"""
set_globals(web2py_path,app)
creating_new_table(new_table_name , new_list_field , list_field_name , old_table_id_field , old_table)
fill_the_new_table(new_table_name , new_list_field , list_field_name , old_table_id_field , old_table ) | 5,329,509 |
def get_img(file_path, gray=False):
"""
获取输入图片
:param file_path: 图片文件位置
:param gray: 是否转换为灰度图
:return: img
"""
try:
img = Image.open(file_path)
if gray:
img = img.convert('L')
return img
except Exception:
print("不支持的图片格式")
return None | 5,329,510 |
def working_days(days: int):
"""Return a list of N workingdays
Keyword arguments:
days -- days past
"""
dates = []
today = datetime.utcnow()
for i in range(days):
day = today - timedelta(days=i)
day = day.date()
dates.append(day)
for idx, date in enumerate(dates):
if date.weekday() == 6:
for i in range(idx, len(dates)):
dates[i] = dates[i] - timedelta(days=2)
if date.weekday() == 5:
for i in range(idx, len(dates)):
dates[i] = dates[i] - timedelta(days=1)
return dates | 5,329,511 |
def GetFile(message=None, title=None, directory=None, fileName=None,
allowsMultipleSelection=False, fileTypes=None):
"""
An get file dialog.
Optionally a `message`, `title`, `directory`, `fileName` and
`allowsMultipleSelection` can be provided.
::
from fontParts.ui import GetFile
print(GetFile())
"""
return dispatcher["GetFile"](message=message, title=title, directory=directory,
fileName=fileName,
allowsMultipleSelection=allowsMultipleSelection,
fileTypes=fileTypes) | 5,329,512 |
def str_to_dtype(s):
"""Convert dtype string to numpy dtype."""
return eval('np.' + s) | 5,329,513 |
def integrate_const(
f: Callable,
t_span: Tuple,
dt: float,
y0: np.ndarray,
method: str = 'runge_kutta4'
) -> Tuple[np.ndarray, np.ndarray]:
"""
A Python wrapper for Boost::odeint runge_kutta4 (the only one supported right now)
stepper and ODE integration.
:param f:
The ODE system RHS.
:param t_span:
The time range in which integration is performed. It is provided as
(t_initial, t_final) tuple.
:param dt:
The time-step to increment time from t_span[0] to t_span[1].
:param y0:
Initial conditions for the system state.
:param method:
The stepper method. Only 'runge_kutta4' is supported at the moment.
:return:
A tuple with two arrays: (time, solution). The first contains the time points
from integration and the last is a matrix with the solution for each state provided
by columns. In other words, solution[:, 0] contains the solution for state 0,
solution[:, 1] for state 1 and so forth.
"""
time, solution = _integrate_const(f, t_span, dt, y0, method)
solution = np.array(solution)
time = np.array(time)
return time, solution | 5,329,514 |
def test_metadata():
"""The metadata of the json file should be correct."""
# Note: The json file should have been created with previous tests
with open(file_struct.features_file) as f:
data = json.load(f)
assert("metadata" in data.keys())
metadata = data["metadata"]
assert("timestamp" in metadata.keys())
assert(metadata["versions"]["numpy"] == np.__version__)
assert(metadata["versions"]["msaf"] == msaf.__version__)
assert(metadata["versions"]["librosa"] == librosa.__version__) | 5,329,515 |
def test_execute_error_response(mocker: MockerFixture) -> None:
"""
Test error response handling on execute.
"""
entry_points = [FakeEntryPoint("gsheetsapi", GSheetsAPI)]
mocker.patch(
"shillelagh.backends.apsw.db.iter_entry_points",
return_value=entry_points,
)
adapter = requests_mock.Adapter()
session = requests.Session()
session.mount("https://", adapter)
mocker.patch(
"shillelagh.adapters.api.gsheets.adapter.GSheetsAPI._get_session",
return_value=session,
)
adapter.register_uri(
"GET",
"https://docs.google.com/spreadsheets/d/6/gviz/tq?gid=0&tq=SELECT%20%2A%20LIMIT%201",
json={
"version": "0.6",
"reqId": "0",
"status": "error",
"errors": [
{
"reason": "invalid_query",
"message": "INVALID_QUERY",
"detailed_message": "Invalid query: NO_COLUMN: C",
},
],
},
)
connection = connect(":memory:", ["gsheetsapi"])
cursor = connection.cursor()
sql = '''SELECT * FROM "https://docs.google.com/spreadsheets/d/6/edit#gid=0"'''
with pytest.raises(ProgrammingError) as excinfo:
cursor.execute(sql)
assert str(excinfo.value) == "Invalid query: NO_COLUMN: C" | 5,329,516 |
def cleanup(serialized):
"""
Remove all missing values. Sometimes its useful for object methods
to return missing value in order to not include that value in the
json format.
Examples::
>>> User(Serializable):
... def attributes():
... return ['id', 'name', 'birthday', 'somefunc']
... def age():
... if birthday:
... return empty
... else:
... return calc_age(self.birthday)
Now if some user has birthday the age function is going to return the age.
However if user doesn't have birthday the age function is returning a
special empty value which tells jsonifier not to include that key in
json format.
>>> User(id=1, name='someone').as_json()
{'id': 1, 'name': 'Someone'}
"""
return dict(filter(lambda a: a[1] is not empty, serialized.items())) | 5,329,517 |
async def discover_devices(
wave_devices: Optional[List[WaveDevice]] = None,
) -> List[WaveDevice]:
"""Discovers all valid, accessible Airthings Wave devices."""
wave_devices = wave_devices if isinstance(wave_devices, list) else []
device: BLEDevice # Typing annotation
for device in await discover():
serial = WaveDevice.parse_manufacturer_data(
device.metadata.get("manufacturer_data")
)
if serial:
wave_devices.append(WaveDevice(device, serial))
else:
_logger.debug(f"Device: ({device.address}) is not a valid Wave device.")
continue
return wave_devices | 5,329,518 |
def convert_examples_to_feats_lstm(examples, max_seq_length, glove_vocab, feat_file, language):
"""Loads a data file into a list of `InputBatch`s in glove+lstm manner"""
print("#examples", len(examples))
if os.path.exists(feat_file):
with open(feat_file, 'rb') as f:
features = pickle.load(f)
return features
else:
features = [[]]
if language == 'en':
nlp = spacy.load("en_core_web_sm")
else:
nlp = spacy.load("zh_core_web_md")
for (ex_index, example) in enumerate(examples):
abandon = False
dialog = nlp(example.text_a)
dialog_tokens = [token.text for token in dialog]
dialog_pos = [token.tag_ for token in dialog]
#label to id
label_ids = map_label_to_ids(example.label)
# print(dialog_tokens)
# print(label_ids)
# print(len(dialog_tokens))
# print(len(label_ids))
# exit(0)
truncate(dialog_tokens, max_seq_length)
truncate(dialog_pos, max_seq_length)
truncate(label_ids, max_seq_length)
# convert tokens to index
input_ids = glove_vocab.map(dialog_tokens)
# convert pos to index
pos_ids = map_to_ids(dialog_pos, constant.POS_TO_ID)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids) # actually not used
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
pos_ids.append(0)
label_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(pos_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 2:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join([str(token) for token in dialog_tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if not abandon:
features[-1].append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
pos_ids=pos_ids))
if len(features[-1]) == n_class:
features.append([])
if len(features[-1]) == 0:
features = features[:-1]
print('#features', len(features))
with open(feat_file, 'wb') as f:
pickle.dump(features, f)
return features | 5,329,519 |
def experiments():
"""
All statistics we run for our paper
:return:
"""
logger = logging.getLogger(__name__)
logger.info('====================')
logger.info('Citations aggregate:')
logger.info('====================')
classify_citations_basic()
logger.info('=========================')
logger.info('Citations per discipline:')
logger.info('=========================')
classify_citations_discipline()
logger.info('===================')
logger.info('Citations per year:')
logger.info('===================')
classify_citations_year()
logger.info('=====================')
logger.info('Readership aggregate:')
logger.info('=====================')
classify_readership_basic()
logger.info('==========================')
logger.info('Readership per discipline:')
logger.info('==========================')
classify_readership_discipline()
logger.info('====================')
logger.info('Readership per year:')
logger.info('====================')
classify_readership_year()
return | 5,329,520 |
def PyValueToMessage(message_type, value):
"""Convert the given python value to a message of type message_type."""
return JsonToMessage(message_type, json.dumps(value)) | 5,329,521 |
def istype(obj: Any, annotation: type) -> bool:
"""Check if object is consistent with the annotation"""
if get_origin(annotation) is None:
if annotation is None:
return obj is None
return isinstance(obj, annotation)
else:
raise NotImplementedError("Currently only the basic types are supported") | 5,329,522 |
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate(yformat.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 0),
textcoords="offset points",
ha='center', va='bottom', fontsize=fsz) | 5,329,523 |
def test_cand_gen(caplog):
"""Test extracting candidates from mentions from documents."""
caplog.set_level(logging.INFO)
if platform == "darwin":
logger.info("Using single core.")
PARALLEL = 1
else:
logger.info("Using two cores.")
PARALLEL = 2 # Travis only gives 2 cores
def do_nothing_matcher(fig):
return True
max_docs = 10
session = Meta.init("postgresql://localhost:5432/" + DB).Session()
docs_path = "tests/data/html/"
pdf_path = "tests/data/pdf/"
# Parsing
logger.info("Parsing...")
doc_preprocessor = HTMLDocPreprocessor(docs_path, max_docs=max_docs)
corpus_parser = Parser(
session, structural=True, lingual=True, visual=True, pdf_path=pdf_path
)
corpus_parser.apply(doc_preprocessor, parallelism=PARALLEL)
assert session.query(Document).count() == max_docs
assert session.query(Sentence).count() == 5548
docs = session.query(Document).order_by(Document.name).all()
# Mention Extraction
part_ngrams = MentionNgramsPart(parts_by_doc=None, n_max=3)
temp_ngrams = MentionNgramsTemp(n_max=2)
volt_ngrams = MentionNgramsVolt(n_max=1)
figs = MentionFigures(types="png")
Part = mention_subclass("Part")
Temp = mention_subclass("Temp")
Volt = mention_subclass("Volt")
Fig = mention_subclass("Fig")
fig_matcher = LambdaFunctionFigureMatcher(func=do_nothing_matcher)
with pytest.raises(ValueError):
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, volt_ngrams], # Fail, mismatched arity
[part_matcher, temp_matcher, volt_matcher],
)
with pytest.raises(ValueError):
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt],
[part_ngrams, temp_matcher, volt_ngrams],
[part_matcher, temp_matcher], # Fail, mismatched arity
)
mention_extractor = MentionExtractor(
session,
[Part, Temp, Volt, Fig],
[part_ngrams, temp_ngrams, volt_ngrams, figs],
[part_matcher, temp_matcher, volt_matcher, fig_matcher],
)
mention_extractor.apply(docs, parallelism=PARALLEL)
assert session.query(Part).count() == 234
assert session.query(Volt).count() == 107
assert session.query(Temp).count() == 136
assert session.query(Fig).count() == 223
part = session.query(Part).order_by(Part.id).all()[0]
volt = session.query(Volt).order_by(Volt.id).all()[0]
temp = session.query(Temp).order_by(Temp.id).all()[0]
logger.info("Part: {}".format(part.context))
logger.info("Volt: {}".format(volt.context))
logger.info("Temp: {}".format(temp.context))
# Candidate Extraction
PartTemp = candidate_subclass("PartTemp", [Part, Temp])
PartVolt = candidate_subclass("PartVolt", [Part, Volt])
with pytest.raises(ValueError):
candidate_extractor = CandidateExtractor(
session,
[PartTemp, PartVolt],
throttlers=[
temp_throttler,
volt_throttler,
volt_throttler,
], # Fail, mismatched arity
)
with pytest.raises(ValueError):
candidate_extractor = CandidateExtractor(
session,
[PartTemp], # Fail, mismatched arity
throttlers=[temp_throttler, volt_throttler],
)
# Test that no throttler in candidate extractor
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt]
) # Pass, no throttler
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 4141
assert session.query(PartVolt).count() == 3610
assert session.query(Candidate).count() == 7751
candidate_extractor.clear_all(split=0)
assert session.query(Candidate).count() == 0
# Test with None in throttlers in candidate extractor
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt], throttlers=[temp_throttler, None]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 3879
assert session.query(PartVolt).count() == 3610
assert session.query(Candidate).count() == 7489
candidate_extractor.clear_all(split=0)
assert session.query(Candidate).count() == 0
candidate_extractor = CandidateExtractor(
session, [PartTemp, PartVolt], throttlers=[temp_throttler, volt_throttler]
)
candidate_extractor.apply(docs, split=0, parallelism=PARALLEL)
assert session.query(PartTemp).count() == 3879
assert session.query(PartVolt).count() == 3266
assert session.query(Candidate).count() == 7145
assert docs[0].name == "112823"
assert len(docs[0].parts) == 70
assert len(docs[0].volts) == 33
assert len(docs[0].temps) == 24
# Test that deletion of a Candidate does not delete the Mention
session.query(PartTemp).delete()
assert session.query(PartTemp).count() == 0
assert session.query(Temp).count() == 136
assert session.query(Part).count() == 234
# Test deletion of Candidate if Mention is deleted
assert session.query(PartVolt).count() == 3266
assert session.query(Volt).count() == 107
session.query(Volt).delete()
assert session.query(Volt).count() == 0
assert session.query(PartVolt).count() == 0 | 5,329,524 |
def spending_from_savings(take_home_pay: float, savings: float) -> Decimal:
"""
Calculate your spending based on your take home pay and how much
you save. This is useful if you use what Paula Pant calls the anti-budget,
instead of tracking your spending in detail. This number can be used as
input for the savings_rate function.
Args:
take_home_pay: monthly take-home pay
savings: amount of money saved towards FI
Returns:
The amount of money spent
"""
return Decimal(take_home_pay) - Decimal(savings) | 5,329,525 |
def find_all_run_dirs(
base_output_dir: pathlib.Path,
state: str,
site: str,
stage: PipelineStage,
) -> Iterator[pathlib.Path]:
"""Find latest stage output path"""
stage_dir = base_output_dir / state / site / STAGE_OUTPUT_NAME[stage]
if not stage_dir.exists():
return
for run_dir in sorted(stage_dir.iterdir(), reverse=True):
if run_dir.name.startswith("_"):
continue
if run_dir.name.startswith("."):
continue
yield run_dir | 5,329,526 |
def stdlib_public_names(module: str, *, version: str = None) -> set[str]:
"""
Return a set of public names of a stdlib module, in specific Python version.
If no version is given, default to the current version.
The `version` parameter takes argument of the form `3.9`, `4.7`, etc.
"""
if module not in IMPORTABLE_STDLIB_MODULES:
raise ValueError(f"{module} is not importable stdlib module")
version = version or ".".join(str(c) for c in sys.version_info[:2])
return set(load_stdlib_public_names(version)[module]) | 5,329,527 |
def large_asymmetric_bulge(data):
"""
:param data: image data as array
:return: the width and location of the largest asymmetric bulge (if any) in the sequence
"""
# retrieve the lengths of the bars in the sequences (the counts) from the palindrome function
score, upper_half_counts, lower_half_counts, len_premiRNA = palindrome(data)
# zip the count lists and check whether a large asymmetric bulge is included (pixel bar reaching image border)
# and in which of the two image halves the bulge is located
bulge_array = []
bulge_locations = []
# go over the bar lengths in the counts arrays and check whether they match the large asymmetric bulge requirements
for pixel_upper, pixel_lower in zip(upper_half_counts[0:len_premiRNA], lower_half_counts[0:len_premiRNA]):
if pixel_upper == pixel_lower:
bulge_array.append(0)
bulge_locations.append(0)
else:
# check for large asymmetric bulge in lower half of image
if pixel_upper == 2 and pixel_lower == 12:
bulge_array.append(1)
bulge_locations.append('lower')
# check for large asymmetric bulge in upper half of image
elif pixel_upper == 12 and pixel_lower == 2:
bulge_array.append(1)
bulge_locations.append('upper')
else:
# if above conditions do not hold, the sequence does not contain a large asymmetric bulge
bulge_array.append(0)
bulge_locations.append(0)
# find the exact location and width of the large asymmetric bulge in the sequence by going over the bulge_array
widths = []
bulge_width = 0
bulge_exact_locations = []
bulge_exact_location = []
for i in range(len(bulge_array) - 1):
# if the integer in the bulge_array is 1, we are at a large asymmetric bulge and we should increment the width
if bulge_array[i] == 1:
bulge_width += 1
bulge_exact_location.append((bulge_locations[i], i))
# if the next integer in bulge_array is 0, we have reached the end of the bulge and we should store the
# width and all location info
if bulge_array[i + 1] == 0:
widths.append(bulge_width)
bulge_width = 0
bulge_exact_locations.append(bulge_exact_location)
bulge_exact_location = []
else:
i += 1
# create empty values for the attributes of interest if there is no large asymmetric bulge found in the sequence
if not widths:
largest_bulge = np.nan
largest_bulge_location = (np.nan, np.nan)
# if there is at least one large asymmetric bulge, find the widest one among all and store this as the largest
# asymmetric bulge of the sequence
else:
largest_bulge = np.max(widths)
largest_bulge_index = np.argmax(widths)
largest_bulge_location = bulge_exact_locations[largest_bulge_index]
middle_bulge_location = int(len(largest_bulge_location) / 2)
largest_bulge_location = (largest_bulge_location[0][0],
largest_bulge_location[middle_bulge_location][1])
return largest_bulge, largest_bulge_location | 5,329,528 |
def test_mcari(spectral_index_test_data):
"""Test for PlantCV."""
index_array = spectral_index.mcari(spectral_index_test_data.load_hsi(), distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255 | 5,329,529 |
def test_compound_includes(client, compound_factory, endpoint, substance_factory):
"""Tests that /compound endpoints can be provided an include parameter"""
compound = compound_factory().instance
substance_factory(
associated_compound={"type": compound.serializer_name, "id": compound.pk}
)
response = json.loads(client.get(endpoint, {"include": "substance"}).content)
assert response["included"] is not None | 5,329,530 |
def request(host, path, bearer_token, url_params):
"""Given a bearer token, send a GET request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
bearer_token (str): OAuth bearer token, obtained using client_id and client_secret.
url_params (dict): An optional set of query parameters in the request.
Returns:
dict: The JSON response from the request.
Raises:
HTTPError: An error occurs from the HTTP request.
"""
url_params = url_params or {}
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
headers = {
'Authorization': 'Bearer %s' % bearer_token,
}
response = requests.request('GET', url, headers=headers, params=url_params)
return response.json() | 5,329,531 |
def find_unique_distances(distance_ij: pd.Series) -> np.ndarray:
"""Finds the unique distances that define the neighbor groups.
:param distance_ij: A pandas ``Series`` of pairwise neighbor distances.
:return: An array of unique neighbor distances.
"""
unique_floats: np.ndarray = np.sort(distance_ij.unique())
next_distance_not_close: np.ndarray = np.logical_not(
np.isclose(unique_floats[1:], unique_floats[:-1])
)
return np.concatenate(
(unique_floats[:1], unique_floats[1:][next_distance_not_close])
) | 5,329,532 |
def push_msg(msg_type, content, key):
"""推送消息到企业微信群
content格式参考: https://work.weixin.qq.com/api/doc/90000/90136/91770
"""
api_send = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={}".format(key)
data = {"msgtype": msg_type, msg_type: content}
response = requests.post(api_send, json=data)
try:
assert response.json()['errmsg'] == 'ok'
except:
print("消息推送失败") | 5,329,533 |
def session(monkeypatch: pytest.MonkeyPatch) -> nox.Session:
"""Fixture for a Nox session."""
registry: Dict[str, Any] = {}
monkeypatch.setattr("nox.registry._REGISTRY", registry)
@nox.session(venv_backend="none")
def test(session: nox.Session) -> None:
"""Example session."""
config = nox._options.options.namespace(posargs=[])
[runner] = nox.manifest.Manifest(registry, config)
runner._create_venv()
return nox.Session(runner) | 5,329,534 |
def _load_yaml_with_clear_tag(stream):
"""Like yaml.safe_load(), but everything with a !clear tag before it
will be wrapped in ClearedValue()."""
loader = yaml.SafeLoader(stream)
loader.add_constructor('!clear', _cleared_value_constructor)
try:
return loader.get_single_data()
finally:
if hasattr(loader, 'dispose'): # it doesn't in PyYAML 3.09
loader.dispose() | 5,329,535 |
def get_7K_info(device_dict, collection):
"""
Checks Palo firewall to see if model and family are in the 7K family,
sets variables, and then gets 7K info
Parameters
----------
device_dict : dict
A dictionary of Panorama connected devices
collection : Collection
A MongoDB database collection
"""
logger.info('Starting')
key = config.paloalto['key']
for device in device_dict:
fw_dict = device_dict.get(device)
model = fw_dict.get('model')
ip_addr = fw_dict.get('ip-address')
if model == 'PA-7080':
smc_slot = '6'
lpc_slot = '7'
ps_total = 8
slot_total = 12
elif model == 'PA-7050':
smc_slot = '4'
lpc_slot = '8'
ps_total = 4
slot_total = 8
fw = firewall.Firewall(hostname=ip_addr, api_key=key)
get_7K_chassis_info(fw, collection, ip_addr, slot_total)
get_7K_power_info(fw, collection, ip_addr, smc_slot, ps_total)
get_7K_fan_info(fw, collection, ip_addr, smc_slot)
get_7K_amc_info(fw, collection, ip_addr, lpc_slot) | 5,329,536 |
def highlights(state_importance_df, exec_traces, budget, context_length, minimum_gap=0,
overlay_limit=0):
"""generate highlights summary"""
sorted_df = state_importance_df.sort_values(['importance'], ascending=False)
summary_states, summary_traces, state_trajectories = [], [], {}
seen_indexes, seen_importance = {x: [] for x in range(len(exec_traces))}, []
"""for each state by importance"""
for index, row in sorted_df.iterrows():
state = row['state']
"""unique score for frogger"""
if row["importance"] in seen_importance:
continue
else:
seen_importance.append(row["importance"])
trace_len = len(exec_traces[state[0]].states)
lower, upper = get_relevant_range(state[1], trace_len, context_length, minimum_gap,
overlay_limit)
if lower not in seen_indexes[state[0]] and upper not in seen_indexes[state[0]]:
seen_indexes[state[0]] += list(range(lower, upper + 1))
summary_states.append(state)
if len(summary_states) == budget:
break
#
# trajectories = {}
# for trace_idx, trace in enumerate(exec_traces):
# if state in trace.states:
# state_index = trace.states.index(state)
# trace_len = len(trace.states)
# lower, upper = get_relevant_range(state_index, trace_len, context_length,
# minimum_gap, overlay_limit)
# """check if these states are not neighbours of previously seen states"""
# for seen_state in summary_states:
# # if [1 for x in trace.states[lower:upper] if x == seen_state]:
# if seen_state[0] != trace_idx:
# break
# else:
# if seen_state[1] in trace.states[lower:upper]:
# break
# else:
# trajectories[trace_idx] = state_index
# if not summary_states:
# trajectories[trace_idx] = state_index
#
# """if no siutable trajectories found - try next state"""
# if not trajectories:
# continue
# else:
# state_trajectories[state] = trajectories
#
# """once a trace is obtained, get the state index in it"""
# summary_states.append(state)
# summary_traces.append(list(trajectories.keys()))
# if len(summary_states) == budget:
# break
summary_state_trajectories = {}
for t_i, s_i in summary_states:
t = exec_traces[t_i].states
lower, upper = get_relevant_range(s_i, len(t), context_length)
summary_state_trajectories[(t_i, s_i)] = t[lower:upper]
return summary_state_trajectories | 5,329,537 |
def draw_figure(canvas, figure, loc=(0, 0)):
"""
Draw a matplotlib figure onto a Tk grafica
loc: location of top-left corner of figure on grafica in pixels.
Inspired by matplotlib source: lib/matplotlib/backends/backend_tkagg.py
"""
figure_canvas_agg = FigureCanvasAgg(figure)
figure_canvas_agg.draw()
figure_x, figure_y, figure_w, figure_h = figure.bbox.bounds
figure_w, figure_h = int(figure_w+1), int(figure_h+1)
photo = tk.PhotoImage(master=canvas, width=figure_w, height=figure_h)
# Position: convert from top-left anchor to center anchor
canvas.create_image(loc[0] + figure_w/2, loc[1] + figure_h/2, image=photo)
# Unfortunatly, there's no accessor for the pointer to the native renderer
blit(photo, figure_canvas_agg.get_renderer()._renderer, colormode=2)
# Return a handle which contains a reference to the photo object
# which must be kept live or else the picture disappears
return photo | 5,329,538 |
def beautify_file(sources_file_path, len: int):
"""
Check file if everything is aligned
"""
for line in fileinput.input(sources_file_path, inplace=True):
if ".c" in line:
print('{0:{len}}{1}'.format(line.rstrip(), "\t\\\n", len=len), end='')
elif "+=" in line and "_BONUS" not in line:
print('{0:{len}}{1}'.format(line.rstrip(), "\t\t\\\n", len=len), end='')
elif "SOURCES_BONUS" in line:
print('{0:{len}}{1}'.format(line.rstrip(), "\t\t\t\\\n", len=len), end='')
else:
print(line, end='') | 5,329,539 |
def rpi_audio_support_install(junk, audio=None):
"""
Install audio support drivers (pimoroni or waveshare) on the remote Raspberry Pi device
"""
if audio=="pimoroni" or audio=="waveshare":
install_audio_drivers(orig_cxn, audio)
else:
print("Please provide a Raspberry Pi audio driver name (supported: pimoroni, waveshare")
orig_cxn.sudo("apt-get update && apt-get -y install omxplayer mpg123 mpg321 mplayer") | 5,329,540 |
def find_struct(lines):
"""Finds structures in output data"""
struct = ''
name1 = ''
name2 = ''
seq1 = ''
seq2 = ''
result = []
for line in lines:
if line.startswith('; ========'):
break
if line.startswith('; ALIGNING'):
line = line.split()
name1 = line[2]
name2 = line[4]
continue
if line.startswith('; ALIGN %s' % name1):
line = line.split()[3:]
line = ''.join(line)
seq1 = ''.join([seq1,line])
continue
if line.startswith('; ALIGN %s' % name2):
line = line.split()[3:]
line = ''.join(line)
seq2 = ''.join([seq2,line])
continue
if line.startswith('; ALIGN Structure'):
line = line.split()[3:]
line = ''.join(line)
struct = ''.join([struct,line])
continue
struct = ViennaStructure(struct).toPairs()
struct.sort()
result.append([struct,seq1,seq2])
return result | 5,329,541 |
def check_config_db():
"""Check that the config DB has the configured PBs.
Only run this step if the config DB is enabled.
"""
if ska_sdp_config is not None \
and SDPSubarray.is_feature_active('config_db'):
filename = join(dirname(__file__), 'data', 'command_Configure.json')
with open(filename, 'r') as file:
config = json.load(file)
config_db_client = ska_sdp_config.Config()
for txn in config_db_client.txn():
pb_ids = txn.list_processing_blocks()
for pb in config['processingBlocks']:
assert pb['id'] in pb_ids | 5,329,542 |
def chain(*fs: Callable) -> Callable:
"""
Compose given functions in reversed order.
Given functions f, g, the result of chain is chain(f, g) = g o f.
>>> def f(x: int) -> int:
... return x + 1
>>> def g(x: int) -> str:
... return str(x)
>>> chain(f, g)(41)
'42'
Chaining single function is the function itself.
>>> chain(f) is f
True
Empty function chain is identity.
>>> chain()(42)
42
"""
g: Callable = compose(*reversed(fs))
return g | 5,329,543 |
def astra_fp_2d_fan(volume, angles, source_object, object_det):
"""
:param volume:
:param angles: degrees
:return:
"""
detector_size = volume.shape[1]
proj_geom = build_proj_geometry_fan_2d(detector_size, angles, source_object, object_det)
rec = astra_fp_2d(volume, proj_geom)
return rec | 5,329,544 |
def get_remappings_prefix() -> Mapping[str, str]:
"""Get the remappings for xrefs based on the prefix.
.. note:: Doesn't take into account the semicolon `:`
"""
return _get_curated_registry()['remappings']['prefix'] | 5,329,545 |
def generate_schema():
""" schema generation from today filename dataset """
today = date.today().strftime("%d_%m_%Y")
complete_dataset = pd.read_csv(f"complete_dataset_{today}.csv")
json_schema = pd.io.json.build_table_schema(complete_dataset)
with open("json_schema_for_big_query.json", "w", encoding="utf-8") as f:
json.dump(json_schema, f, ensure_ascii=False, indent=4)
return None | 5,329,546 |
def upsample_labels_in_dir(labels_dir,
target_res,
result_dir,
path_label_list=None,
path_freesurfer='/usr/local/freesurfer/',
recompute=True):
"""This funtion upsamples all label maps within a folder. Importantly, each label map is converted into probability
maps for all label values, and all these maps are upsampled separetely. The upsampled label maps are recovered by
taking the argmax of the label values probability maps.
:param labels_dir: path of directory with label maps to upsample
:param target_res: resolution at which to upsample the label maps. can be a single number (isotropic), or a list.
:param result_dir: path of directory where the upsampled label maps will be writen
:param path_label_list: (optional) path of numpy array containing all label values.
Computed automatically if not given.
:param path_freesurfer: (optional) path freesurfer home (upsampling performed with mri_convert)
:param recompute: (optional) whether to recompute result files even if they already exists
"""
# prepare result dir
utils.mkdir(result_dir)
# set up FreeSurfer
os.environ['FREESURFER_HOME'] = path_freesurfer
os.system(os.path.join(path_freesurfer, 'SetUpFreeSurfer.sh'))
mri_convert = os.path.join(path_freesurfer, 'bin/mri_convert')
# list label maps
path_labels = utils.list_images_in_folder(labels_dir)
labels_shape, aff, n_dims, _, h, _ = utils.get_volume_info(path_labels[0], max_channels=3)
# build command
target_res = utils.reformat_to_list(target_res, length=n_dims)
post_cmd = '-voxsize ' + ' '.join([str(r) for r in target_res]) + ' -odt float'
# load label list and corresponding LUT to make sure that labels go from 0 to N-1
label_list, _ = utils.get_list_labels(path_label_list, labels_dir=path_labels, FS_sort=False)
new_label_list = np.arange(len(label_list), dtype='int32')
lut = utils.get_mapping_lut(label_list)
# loop over label maps
loop_info = utils.LoopInfo(len(path_labels), 5, 'upsampling', True)
for idx, path_label in enumerate(path_labels):
loop_info.update(idx)
path_result = os.path.join(result_dir, os.path.basename(path_label))
if (not os.path.isfile(path_result)) | recompute:
# load volume
labels, aff, h = utils.load_volume(path_label, im_only=False)
labels = lut[labels.astype('int')]
# create individual folders for label map
basefilename = utils.strip_extension(os.path.basename(path_label))
indiv_label_dir = os.path.join(result_dir, basefilename)
upsample_indiv_label_dir = os.path.join(result_dir, basefilename + '_upsampled')
utils.mkdir(indiv_label_dir)
utils.mkdir(upsample_indiv_label_dir)
# loop over label values
for label in new_label_list:
path_mask = os.path.join(indiv_label_dir, str(label)+'.nii.gz')
path_mask_upsampled = os.path.join(upsample_indiv_label_dir, str(label)+'.nii.gz')
if not os.path.isfile(path_mask):
mask = (labels == label) * 1.0
utils.save_volume(mask, aff, h, path_mask)
if not os.path.isfile(path_mask_upsampled):
cmd = utils.mkcmd(mri_convert, path_mask, path_mask_upsampled, post_cmd)
os.system(cmd)
# compute argmax of upsampled probability maps (upload them one at a time)
probmax, aff, h = utils.load_volume(os.path.join(upsample_indiv_label_dir, '0.nii.gz'), im_only=False)
labels = np.zeros(probmax.shape, dtype='int')
for label in new_label_list:
prob = utils.load_volume(os.path.join(upsample_indiv_label_dir, str(label) + '.nii.gz'))
idx = prob > probmax
labels[idx] = label
probmax[idx] = prob[idx]
utils.save_volume(label_list[labels], aff, h, path_result, dtype='int32') | 5,329,547 |
def test_zamichani_hry():
"""Každá hra by měla být jiná"""
from klondike import udelej_hru
hra1 = udelej_hru()
hra2 = udelej_hru()
# Je šance 1 z 80658175170943878571660636856403766975289505440883277824000000000000,
# že dvě náhodné hry budou stejné.
# Nejspíš je pravděpodobnější, že v průběhu testu odejde počítač,
# na kterém test běží, než aby se ty karty zamíchaly stejně.
assert hra1 != hra2, 'Karty nejsou zamíchané!' | 5,329,548 |
def create_search_index(*args, **kwargs):
"""
Creates the search index in elastic search
"""
manage('setup_index') | 5,329,549 |
def read_hdf5(filename, **kwargs):
"""
read a grid file in hdf5 into a microquake.core.data.grid.GridCollection
object
:param filename: filename
:param kwargs: additional keyword argument passed from wrapper.
:return: microquake.core.data.grid.GridCollection
""" | 5,329,550 |
def binary_truncated_sprt_with_llrs(llrs, labels, alpha, beta, order_sprt):
""" Used in run_truncated_sprt_with_llrs .
Args:
llrs: A Tensor with shape (batch, duration). LLRs (or scores) of all frames.
labels: A Tensor with shape (batch,).
alpha : A float.
beta: A float.
order_sprt: An int.
Returns:
confmx: A Tensor with shape (2, 2).
mean_hittime: A scalar Tensor.
var_hittime: A scalar Tensor.
truncate_rate: A scalar Tensor.
"""
llrs_shape = llrs.shape
duration = int(llrs_shape[1])
batch_size = llrs_shape[0]
assert batch_size != 0
# Calc thresholds
thresh = [np.log(beta/(1-alpha)), np.log((1-beta)/alpha)]
if not ( (thresh[1] >= thresh[0]) and (thresh[1] * thresh[0] < 0) ):
raise ValueError("thresh must be thresh[1] >= thresh[0] and thresh[1] * thresh[0] < 0. Now thresh = {}".format(thresh))
# Calc all predictions and waits
signs1 = (tf.sign(llrs - thresh[1]) + 1)/2 # 1:hit, 0:wait
signs0 = (-1 - tf.sign(thresh[0] - llrs))/2 # -1:hit, 0:wait
preds_all_frames = signs1 + signs0 # (batch, duration), value= +1, 0, -1
# Calc truncate rate
hit_or_wait_all_frames = -(tf.abs(preds_all_frames) - 1) # wait=1, hit=0
truncate_rate = tf.reduce_mean(tf.reduce_prod(hit_or_wait_all_frames, 1), 0)
# Truncate survivors (forced decision)
preds_last_frame = tf.sign(llrs[:,-1]) # (batch,) value= +1, -1
preds_last_frame = tf.expand_dims(preds_last_frame, -1) # (batch, 1)
preds_all_frames_trunc = tf.concat([preds_all_frames[:,:-1], preds_last_frame], -1) # (batch, duration-1)+(batch,1)=(batch, duration)
if duration == 1:
# Calc mean hitting time and confusion matrix
mean_hittime = tf.constant(1., tf.float32)
preds = preds_all_frames_trunc[:,0] # (batch,)
preds = tf.cast((preds + 1) / 2, tf.int32)
confmx = tf.math.confusion_matrix(labels, preds, num_classes=2, dtype=tf.int32)
else:
# Calc mean hitting time
mask = tf.constant([i+1 for i in range(duration)][::-1], tf.float32)
mask = tf.tile(mask, [batch_size,])
mask = tf.reshape(mask, [batch_size, duration])
masked = preds_all_frames_trunc * mask # (batch, duration)
signed_hittimes1 = tf.reduce_max(masked, 1, keepdims=True)
signed_hittimes0 = tf.reduce_min(masked, 1, keepdims=True)
signed_hittimes0_abs = tf.abs(signed_hittimes0)
signed_hittimes_twin = tf.concat([signed_hittimes1, signed_hittimes0], 1)
hittimes_twin = tf.abs(signed_hittimes_twin)
answers1 = tf.greater(signed_hittimes1, signed_hittimes0_abs)
answers0 = tf.less(signed_hittimes1, signed_hittimes0_abs)
answers = tf.concat([answers1, answers0], 1)
hittimes = hittimes_twin[answers]
hittimes = duration - hittimes + 1
mean_hittime, var_hittime = tf.nn.moments(hittimes, axes=[0])
# Calc confusion matrix
signs_twin = tf.sign(signed_hittimes_twin)
preds = signs_twin[answers]
preds = tf.cast((preds + 1) / 2, tf.int32)
confmx = tf.math.confusion_matrix(labels, preds, num_classes=2, dtype=tf.int32)
return confmx, mean_hittime, var_hittime, truncate_rate | 5,329,551 |
def backoff(action, condition, max_attempts=40):
"""
Calls result = action() up to max_attempts times until condition(result) becomes true, with 30 s backoff. Returns a bool flag indicating whether condition(result) was met.
"""
timeout = 30
for attempt in range(max_attempts):
result = action()
if condition(result):
return True
printf("Condition not met, retrying in {0} seconds...".format(timeout))
time.sleep(timeout)
return False | 5,329,552 |
def get_file_info(repo, path):
"""we need change_count, last_change, nbr_committers."""
committers = []
last_change = None
nbr_changes = 0
for commit in repo.iter_commits(paths=path):
#print(dir(commit))
committers.append(commit.committer)
last_change = commit.committed_date
nbr_changes += 1
return nbr_changes, last_change, len(set(committers)) | 5,329,553 |
def retryable_session(session: requests.Session, retries: int = 8) -> requests.Session:
"""
Session with requests to allow for re-attempts at downloading missing data
:param session: Session to download with
:param retries: How many retries to attempt
:return: Session that does downloading
"""
retry = urllib3.util.retry.Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
)
adapter = requests.adapters.HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session | 5,329,554 |
def load_model(checkpoint: Dict[str, Dict], model: Optional['BaseMethod'] = None,
buffer: Optional['BufferBase'] = None,
datasets: Optional[Dict[str, Dataset]] = None) -> None:
"""
Loads the state dicts of the model, buffer and datasets
Args:
checkpoint (Dict[str, Dict]): A dictionary of the state dictionaries
model (Optional[BaseMethod]): The Method object (subclass of BaseMethod) for which the state dict should
be updated (Default: None)
buffer (Optional[BufferBase]): The buffer object for which the state dict should be updated (Default: None)
datasets (Optional[Dict[str, Dataset]]): The different dataset splits for which the state dict should be
updated (Default: None)
"""
if model is not None:
model.load_method_state_dict(checkpoint["method_state_dict"])
if buffer is not None:
buffer.load_buffer_state_dict(checkpoint["buffer_state_dict"])
if datasets is not None:
for dataset_type in datasets.keys():
dataset_state_dict = checkpoint['datasets_state_dict'][dataset_type]
datasets[dataset_type].load_dataset_state_dict(dataset_state_dict) | 5,329,555 |
def apply_affine(x, y, z, affine):
""" Apply the affine matrix to the given coordinate.
Parameters
----------
x: number or ndarray
The x coordinates
y: number or ndarray
The y coordinates
z: number or ndarray
The z coordinates
affine: 4x4 ndarray
The affine matrix of the transformation
"""
shape = x.shape
assert y.shape == shape, 'Coordinate shapes are not equal'
assert z.shape == shape, 'Coordinate shapes are not equal'
# Ravel, but avoiding a copy if possible
x = np.reshape(x, (-1,))
y = np.reshape(y, (-1,))
z = np.reshape(z, (-1,))
in_coords = np.c_[x,
y,
z,
np.ones(x.shape)].T
x, y, z, _ = np.dot(affine, in_coords)
x = np.reshape(x, shape)
y = np.reshape(y, shape)
z = np.reshape(z, shape)
return x, y, z | 5,329,556 |
def binary_dilation_circle(input, radius):
"""Dilate with disk of given radius.
Parameters
----------
input : array_like
Input array
radius : float
Dilation radius (pix)
Returns
-------
TODO
"""
from scipy.ndimage import binary_dilation
structure = binary_disk(radius)
return binary_dilation(input, structure) | 5,329,557 |
def test_default_values():
"""
Tests that default values seem to have a physical meaning,
e.g. rho decreases with T, increases with S
"""
T_ref = 10
S_ref = 35
assert isclose(eos.compute_rho(T=T_ref, S=S_ref, z=0), 1026)
all_T = np.linspace(-1, 30, 10)
all_S = np.linspace(31, 37, 10)
for (t1, t2) in zip(all_T[:-1], all_T[1:]):
# t1 < t2
assert eos.compute_rho(T=t1, S=S_ref, z=0) > eos.compute_rho(T=t2, S=S_ref, z=0)
for (s1, s2) in zip(all_S[:-1], all_S[1:]):
# s1 < s2
assert eos.compute_rho(T=T_ref, S=s1, z=0) < eos.compute_rho(T=T_ref, S=s2, z=0)
# alpha should increase with the temperature
for (t1, t2) in zip(all_T[:-1], all_T[1:]):
# t1 < t2
assert eos.compute_alpha(T=t1, S=S_ref, z=0) < eos.compute_alpha(
T=t2, S=S_ref, z=0
) | 5,329,558 |
def set_debug(enabled):
"""
Enable or disable the debug mode. In debug mode, a bunch of extra checks in claripy will be executed. You'll want to
disable debug mode if you are running performance critical code.
"""
global _DEBUG
_DEBUG = enabled | 5,329,559 |
def square_valid(board: Board, n: int, pawn_value: int, x: int, y: int) -> bool:
"""Check if the square at x and y is available to put a pawn on it."""
return (coordinates_within_board(n, x, y) and
square_playable(board, pawn_value, x, y)) | 5,329,560 |
def test_Remove_Disk_At__Overflow_Disk(score, max_score):
"""Function remove_disk_at: overflow disk."""
max_score.value += 2
try:
set_up()
disk = Disk.init_disk(Disk.VISIBLE,4)
Board.set_disk_at(test_board_4,(3,5),disk)
Board.remove_disk_at(test_board_4, (3, 5))
assert not Board.has_disk_at(test_board_4, (3, 5))
assert Board.get_disk_at(test_board_4, (3, 4)) == visible_disk_value_3
score.value += 2
except:
pass | 5,329,561 |
def epb2jd(epb):
""" Besselian epoch to Julian date.
:param epb: Besselian epoch.
:type epb: float
:returns: a tuple of two items:
* MJD zero-point, always 2400000.5 (float)
* modified Julian date (float).
.. seealso:: |MANUAL| page 76
"""
djm0 = _ct.c_double()
djm = _ct.c_double()
_sofa.iauEpb2jd(epb, _ct.byref(djm0), _ct.byref(djm))
return djm0.value, djm.value | 5,329,562 |
def one_hot_decision_function(y):
"""
Examples
--------
>>> y = [[0.1, 0.4, 0.5],
... [0.8, 0.1, 0.1],
... [0.2, 0.2, 0.6],
... [0.3, 0.4, 0.3]]
>>> one_hot_decision_function(y)
array([[ 0., 0., 1.],
[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 1., 0.]])
"""
z = np.zeros_like(y)
z[np.arange(len(z)), np.argmax(y, axis=1)] = 1
return z | 5,329,563 |
def combine_incomes(toshl_income, excel_income):
"""
Combines two data sources of incomes: toshl incomes and incomes from cashflow excel.
:param toshl_income: Preprocessed dataframe of toshl incomes (after cleaning and splitting)
:param excel_income: Raw excel income data
:return: Total income data
"""
df_in = toshl_income.reset_index().copy()
df_in["Tags"] = df_in["Tags"].apply(lambda x: "Salary" if x in ["Privat", "NHK", "OL"] else x)
df_in2 = excel_income.copy()
df_in2 = df_in2[["Datum", "Art", "Betrag"]].rename(columns={"Datum": "Date",
"Art": "Tags",
"Betrag": "Amount"}).dropna()
df_in2["Date"] = pd.to_datetime(df_in2["Date"], format="%d.%m.%Y")
df_in2["Tags"] = df_in2["Tags"].apply(lambda x: "Salary" if x in ["Gehalt", "Sodexo"] else x)
df_income = pd.concat([df_in, df_in2], ignore_index=True)
assert df_income.count()[0] == df_in.count()[0] + df_in2.count()[0], "Some income rows were lost!"
df_income = df_income.groupby([pd.Grouper(key='Date', freq='1M'), 'Tags']).sum()
return(df_income) | 5,329,564 |
def generate_k(data_set, k):
"""
Given `data_set`, which is an array of arrays,
find the minimum and maximum for each coordinate, a range.
Generate `k` random points between the ranges.
Return an array of the random points within the ranges.
"""
centers = []
dimensions = len(data_set[0])
min_max = defaultdict(int)
for point in data_set:
for i in range(dimensions):
val = point[i]
min_key = 'min_%d' % i
max_key = 'max_%d' % i
if min_key not in min_max or val < min_max[min_key]:
min_max[min_key] = val
if max_key not in min_max or val > min_max[max_key]:
min_max[max_key] = val
for _k in range(k):
rand_point = []
for i in range(dimensions):
min_val = min_max['min_%d' % i]
max_val = min_max['max_%d' % i]
rand_point.append(uniform(min_val, max_val))
centers.append(rand_point)
return centers | 5,329,565 |
def __ping_url(url: str) -> bool:
"""Check a link for rotting."""
try:
r = requests.head(url)
return r.status_code in (
requests.codes.ok,
requests.codes.created,
requests.codes.no_content,
requests.codes.not_modified,
)
except Exception:
return False | 5,329,566 |
def get_all_list_data():
"""
Handles the GET request to '/get-all-list-data'.
:return: Json with all list data
"""
conn = get_db()
all_types = TypeDataAccess(conn).get_types(False)
all_tags = TagDataAccess(conn).get_tags()
all_groups = ResearchGroupDataAccess(conn).get_research_groups(False)
all_employees = EmployeeDataAccess(conn).get_employees(False)
result = {
"types": [obj.to_dict() for obj in all_types],
"tags": all_tags,
"research groups": [obj.to_dict() for obj in all_groups],
"employees": [obj.to_dict() for obj in all_employees]
}
return jsonify(result) | 5,329,567 |
def truncate_range(data, percMin=0.25, percMax=99.75, discard_zeros=True):
"""Truncate too low and too high values.
Parameters
----------
data : np.ndarray
Image to be truncated.
percMin : float
Percentile minimum.
percMax : float
Percentile maximum.
discard_zeros : bool
Discard voxels with value 0 from truncation.
Returns
-------
data : np.ndarray
Truncated data.
pMin : float
Minimum truncation threshold which is used.
pMax : float
Maximum truncation threshold which is used.
"""
if discard_zeros:
msk = ~np.isclose(data, 0.)
pMin, pMax = np.nanpercentile(data[msk], [percMin, percMax])
else:
pMin, pMax = np.nanpercentile(data, [percMin, percMax])
temp = data[~np.isnan(data)]
temp[temp < pMin], temp[temp > pMax] = pMin, pMax # truncate min and max
data[~np.isnan(data)] = temp
if discard_zeros:
data[~msk] = 0 # put back masked out voxels
return data, pMin, pMax | 5,329,568 |
def obtain_dcdb_to_drugbank(biana_cnx, unification_protocol, output_pickle_file):
"""
Obtain a dictionary {dcdb : drugbank}
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
query = ('''SELECT DC.value, DB.value FROM externalEntityDCDB_drugID DC, {} U1, {} U2, externalEntityDrugBankID DB
WHERE DC.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = DB.externalEntityID
'''.format(up_table, up_table))
cursor = biana_cnx.cursor()
cursor.execute(query)
dcdb_to_drugbank = {}
for items in cursor:
dcdb = items[0]
drugbank = items[1]
dcdb_to_drugbank.setdefault(dcdb, set())
dcdb_to_drugbank[dcdb].add(drugbank)
cursor.close()
print(dcdb_to_drugbank)
cPickle.dump(dcdb_to_drugbank, open(output_pickle_file, 'w'))
return dcdb_to_drugbank | 5,329,569 |
def duo_username(user):
""" Return the Duo username for user. """
return user.username | 5,329,570 |
def random_number_list(data=[]):
""" Add random number between 0 and 9 (both inclusive) to a list """
for i in range( 0, list_length ):
# append a random int to the data list
data.append( random.randint(0, 10))
return data | 5,329,571 |
def _UpdateFailureInfoBuilds(failed_steps, builds):
"""Deletes builds that are before the farthest last_pass."""
build_numbers_in_builds = builds.keys()
latest_last_pass = -1
for failed_step in failed_steps.itervalues():
if not failed_step.last_pass:
return
if (latest_last_pass < 0 or latest_last_pass > failed_step.last_pass):
latest_last_pass = failed_step.last_pass
for build_number in build_numbers_in_builds:
if build_number < latest_last_pass:
del builds[build_number] | 5,329,572 |
def _matrix_method_reshape(df: pd.DataFrame) -> pd.DataFrame:
"""
Reshape df for matrix method and deal with missing values.
We first drop columns which contain all missing values, transpose
the dataframe and then fill the remaining missing values with zero,
to deal with missing items in some periods.
Parameters
----------
df : pd.DataFrame
The dataframe to reshape.
Returns
-------
pd.DataFrame
The reshaped dataframe.
"""
return df.dropna(how='all', axis=1).T.fillna(0) | 5,329,573 |
def orbitrap(file_path):
"""Import Orbitrap data from XCalibur export. Designed for scan by scan Orbitrap data.
Original export of example data performed by Cech lab @ UNCG. Example data in MS_data external in Cech directory
"""
headers = ["scan", "rt", "mz", "drift", "intensity"]
input_data = []
intensity_cutoff = config.intensity_cutoff
for path_name in glob.glob(os.path.join(file_path, "*.mzML.binary.*.txt")):
file_name = path_name.split("/")[-1]
scan_number = int(file_name.split(".")[-2])
with open(path_name) as f:
for row in f:
if row.startswith("# retentionTime:"):
retention_time = float(row.split(" ")[-1])
break
with open(path_name) as f:
csv_f = csv.reader(f, delimiter="\t")
for row in csv_f:
if not row[0].startswith("#"):
intensity = round(float(row[1]), 0)
mass = round(float(row[0]), 4)
if intensity >= intensity_cutoff:
input_data.append([scan_number, retention_time, mass, None, intensity])
orbitrap_dataframe = pd.DataFrame.from_records(input_data, columns=headers, index=str)
return orbitrap_dataframe | 5,329,574 |
def sim_bursty_oscillator(T, Fs, freq, prob_enter_burst=.1,
prob_leave_burst=.1, cycle_features=None,
return_cycle_df=False):
"""Simulate a band-pass filtered signal with 1/f^2
Input suggestions: f_range=(2,None), Fs=1000, N=1001
Parameters
----------
freq : float
oscillator frequency
T : float
signal duration (seconds)
Fs : float
signal sampling rate
prob_enter_burst : float
probability of a cycle being oscillating given
the last cycle is not oscillating
prob_leave_burst : float
probability of a cycle not being oscillating
given the last cycle is oscillating
cycle_features : dict
specify the mean and standard deviations
(within and across bursts) of each cycle's
amplitude, period, and rise-decay symmetry.
This can include a complete or incomplete set
(using defaults) of the following keys:
amp_mean - mean cycle amplitude
amp_std - standard deviation of cycle amplitude
amp_burst_std - std. of mean amplitude for each burst
period_mean - mean period (computed from `freq`)
period_std - standard deviation of period (samples)
period_burst_std - std. of mean period for each burst
rdsym_mean - mean rise-decay symmetry
rdsym_std - standard deviation of rdsym
rdsym_burst_std - std. of mean rdsym for each burst
return_cycle_df : bool
if True, return the dataframe that contains the simulation
parameters for each cycle. This may be useful for computing
power, for example. Because the power of the oscillator
should only be considered over the times where there's
bursts, not when there's nothing.
Returns
-------
signal : np.array
bursty oscillator
df : pd.DataFrame
cycle-by-cycle properties of the simulated oscillator
"""
# Define default parameters for cycle features
mean_period_samples = int(Fs / freq)
cycle_features_use = {'amp_mean': 1, 'amp_burst_std': .1, 'amp_std': .2,
'period_mean': mean_period_samples,
'period_burst_std': .1 * mean_period_samples,
'period_std': .1 * mean_period_samples,
'rdsym_mean': .5, 'rdsym_burst_std': .05, 'rdsym_std': .05}
# Overwrite default cycle features with those specified
if cycle_features is not None:
for k in cycle_features:
cycle_features_use[k] = cycle_features[k]
# Determine number of cycles to generate
N_samples = T * Fs
N_cycles_overestimate = int(np.ceil(N_samples / mean_period_samples * 2))
# Simulate if a series of cycles are oscillating or not oscillating
is_oscillating = [False]
N_cycles_current = 1
while N_cycles_current < N_cycles_overestimate:
rand_num = np.random.rand()
if is_oscillating[-1]:
is_oscillating.append(rand_num > prob_leave_burst)
else:
is_oscillating.append(rand_num < prob_enter_burst)
N_cycles_current += 1
# Determine period, amp, and rdsym for each cycle
periods = []
amps = []
rdsyms = []
for is_osc in is_oscillating:
if is_osc is False:
period = cycle_features_use['period_mean'] + \
np.random.randn() * cycle_features_use['period_std']
periods.append(int(period))
amps.append(np.nan)
rdsyms.append(np.nan)
current_burst_period_mean = np.nan
current_burst_amp_mean = np.nan
current_burst_rdsym_mean = np.nan
else:
if np.isnan(current_burst_period_mean):
current_burst_period_mean = cycle_features_use['period_mean'] + \
np.random.randn() * cycle_features_use['period_burst_std']
current_burst_amp_mean = cycle_features_use['amp_mean'] + \
np.random.randn() * cycle_features_use['amp_burst_std']
current_burst_rdsym_mean = cycle_features_use['rdsym_mean'] + \
np.random.randn() * cycle_features_use['rdsym_burst_std']
N_iter = 0
period, amp, rdsym = 0, 0, 0
while np.min([period, amp, rdsym]) <= 0:
if N_iter > 0:
if period < 0:
feat0 = 'period'
elif rdsym < 0:
feat0 = 'rise-decay symmetry'
else:
feat0 = 'amp'
warnings.warn('Simulation settings are such that the {:s} is occasionally computed to be negative. You may want to reset your simulation settings'.format(feat0))
period = current_burst_period_mean + \
np.random.randn() * cycle_features_use['period_std']
amp = current_burst_amp_mean + \
np.random.randn() * cycle_features_use['amp_std']
rdsym = current_burst_rdsym_mean + \
np.random.randn() * cycle_features_use['rdsym_std']
N_iter += 1
periods.append(int(period))
amps.append(amp)
rdsyms.append(rdsym)
df = pd.DataFrame({'is_cycle': is_oscillating, 'period': periods,
'amp': amps, 'rdsym': rdsyms})
df['start_sample'] = np.insert(df['period'].cumsum().values[:-1], 0, 0)
df = df[df['start_sample'] < N_samples]
# Shorten df to only cycles that are included in the data
# Simulate time series for each cycle
x = np.array([])
last_cycle_oscillating = False
for i, row in df.iterrows():
if row['is_cycle'] is False:
# If last cycle was oscillating, add a decay to 0 then 0s
if last_cycle_oscillating:
decay_pha = np.linspace(0, np.pi / 2, int(row['period'] / 4))
decay_t = np.cos(decay_pha) * x[-1]
x = np.append(x, decay_t)
cycle_t = np.zeros(row['period'] - int(row['period'] / 4))
x = np.append(x, cycle_t)
else:
# Add a blank cycle
cycle_t = np.zeros(row['period'])
x = np.append(x, cycle_t)
last_cycle_oscillating = False
else:
# If last cycle was oscillating, add a decay to 0
if not last_cycle_oscillating:
rise_pha = np.linspace(-np.pi / 2, 0,
int(row['period'] / 4))[1:]
rise_t = np.cos(rise_pha) * row['amp']
x[-len(rise_t):] = rise_t
# Add a cycle with rdsym
rise_samples = int(np.round(row['period'] * row['rdsym']))
decay_samples = row['period'] - rise_samples
pha_t = np.hstack([np.linspace(0, np.pi, decay_samples + 1)[1:],
np.linspace(-np.pi, 0, rise_samples + 1)[1:]])
cycle_t = np.cos(pha_t)
# Adjust decay if the last cycle was oscillating
if last_cycle_oscillating:
scaling = (row['amp'] + x[-1]) / 2
offset = (x[-1] - row['amp']) / 2
cycle_t[:decay_samples] = cycle_t[:decay_samples] * \
scaling + offset
cycle_t[decay_samples:] = cycle_t[decay_samples:] * row['amp']
else:
cycle_t = cycle_t * row['amp']
x = np.append(x, cycle_t)
last_cycle_oscillating = True
x = x[:N_samples]
if return_cycle_df:
return x, df
else:
return x | 5,329,575 |
def visualize_percent_diff(df):
"""Creates a visualization of difference in percentage of tweets of a topic
across the entire US and returns the mean sentiment felt about the
topic across the entire US
Parameters:
-----------
df: pd.DataFrame
dataframe containing all tweets. Must contain the columns
- state
- sentiment
Returns:
--------
map: Choropleth map of the US, where the color refers to the total
number of tweets
avg_sentiment: The average sentiment of a topic
"""
avg_sentiment = df.sentiment.mean()
tweet_processor = process_tweets.TweetProcessor('models/stemmed_lr.pk')
default_rate = tweet_processor.get_default_rate()
df_grouped = df[['sentiment', 'state']].groupby(['state']).count()
df_grouped['sentiment'] = 100.*df_grouped['sentiment']\
/df_grouped['sentiment'].sum()
df_grouped = pd.merge(df_grouped, default_rate, how='right',
left_index=True, right_index=True)
df_grouped.fillna(0.)
df_grouped['sentiment'] = 100*df_grouped['sentiment']/df_grouped['rate']
gdf = gpd.read_file('data/cb_2016_us_state_20m.dbf')
merged_df = gdf.merge(df_grouped, how='left', left_on='NAME',
right_index=True)
merged_df = merged_df.fillna(0)
data_df = merged_df[['NAME', 'sentiment']].fillna(0)
geo_str = merged_df[['NAME', 'geometry']].to_json()
threshold_scale = np.linspace(min(0, data_df['sentiment'].min()),
data_df['sentiment'].max(),
6)
threshold_scale = list(threshold_scale)
map1 = folium.Map(location=[+37, -100],
tiles='Cartodb Positron',
zoom_start=4)
map1.choropleth(geo_data=geo_str,
data=data_df,
columns=['NAME', 'sentiment'],
fill_color='YlGn',
legend_name='percentage of expected',
name='topic: sentiment = {:.2f}'.format(avg_sentiment),
threshold_scale=threshold_scale,
key_on='feature.properties.NAME')
return map1, avg_sentiment | 5,329,576 |
def basic_compare(first, second, strict=False):
"""
Comparison used for custom match functions,
can do pattern matching, function evaluation or simple equality.
Returns traceback if something goes wrong.
"""
try:
if is_regex(second):
if not isinstance(first, six.string_types) and not strict:
first = str(first)
result = bool(second.match(first))
elif callable(second):
result = bool(second(first))
else:
result = first == second
return result, None
except Exception as exc:
return None, format_trace(inspect.trace(), exc) | 5,329,577 |
def usage():
"""Prints simple usage information."""
print '-o <file> the old bench output file.'
print '-n <file> the new bench output file.'
print '-h causes headers to be output.'
print '-f <fieldSpec> which fields to output and in what order.'
print ' Not specifying is the same as -f "bcondp".'
print ' b: bench'
print ' c: config'
print ' o: old time'
print ' n: new time'
print ' d: diff'
print ' p: percent diff' | 5,329,578 |
def display(lines, out):
"""Display text to user.
Args:
lines (str): lines of text to display
out (object): file object used by interpreter for output
Note:
original behavior: lines displayed in scrollable window, similar to
"less" shell command.
behavior with patch: lines printed to the console in standard format.
"""
if not (out is sys.stdout or out is sys.stderr):
out = sys.stdout
text = '\n'.join(lines) + '\n'
out.write(text) | 5,329,579 |
def parse_line(line,):
"""Return a list of 2-tuples of the possible atomic valences for a given line from
the APS defining sheet."""
possap = []
for valence, entry in enumerate(line[4:]):
if entry != "*":
possap.append((valence, int(entry)))
return possap | 5,329,580 |
def first_order_moments(X, min_words=3):
"""First-Order Moments
Generate first order Moment of document-word frequency matrix.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Matrix of Document-word frequency. `n_samples` is the
number of documnet and `n_features` are number of unique
words in the corpus.
min_words : Integer, default=3
Minimum number of words in each document. In LDA, the number
is 3 since we need 3rd order moments.
Returns
-------
e1 : array, shape=(n_features,)
Expectation of each words in the input matrix.
ignored: integer
Number of ignored documents.
"""
n_samples, n_features = X.shape
is_sparse_x = sp.issparse(X)
doc_word_cnts = np.asarray(X.sum(axis=1))
if len(doc_word_cnts.shape) > 1:
doc_word_cnts = np.squeeze(doc_word_cnts)
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
ignored_docs = 0
e1 = np.zeros(n_features)
# TODO: optimize for loop with cython
for idx_d in xrange(n_samples):
# get word_id and count in each document
words_cnt = doc_word_cnts[idx_d]
if words_cnt < min_words:
ignored_docs += 1
continue
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
for w_id, w_cnt in zip(ids, cnts):
e1[w_id] += (w_cnt / float(words_cnt))
e1 /= (n_samples - ignored_docs)
return (e1, ignored_docs) | 5,329,581 |
def dashboard():
"""Получить статистику по сайту"""
user = get_user_from_request()
if not user.is_admin:
return errors.no_access()
users = User.select().count()
d = datetime.datetime.now() - datetime.timedelta(days=7)
active_users = User.select().where(User.last_active_date > d).count()
return jsonify({"success": 1, "users": users, "active_users_7_days": active_users}) | 5,329,582 |
def dct(f, axis=-1):
"""
Compute the Discrete Cosine Transform over the specified axis.
:param f: The input array.
:param axis: Axis along which the DCT is computed. The default is over the last axis.
:return c: The computed DCT.
"""
# Size of the input along the specified axis.
n = f.shape[axis]
# Create two vectors containing the integers from 0 to n-1.
i = k = np.arange(n)
# Compute the x-axis coordinate of the f function.
x = (2 * i + 1) / (2 * n)
# Compute the outer product of x and kπ, obtaining the nxn matrix that will
# form the argument of the cosine.
arg = np.multiply.outer(x, k * np.pi)
# Normalization factors.
alpha = np.where(k == 0, 1 / np.sqrt(n), np.sqrt(2 / n))
# The orthonormal DCT basis.
w = alpha * np.cos(arg)
# Compute the convolution between the input array and the DCT basis.
# The output contains the amplitude coefficient for every frequency.
c = np.tensordot(f, w, axes=(axis, 0))
# `axis` becomes the last dimension in the output of `np.tensordot`.
# Move it back to its original position so that the output shape matches
# the input shape.
c = np.moveaxis(c, -1, axis)
return c | 5,329,583 |
def get_create_data_dir():
"""Get the data directory.
When the directory does not exist it is created.
"""
# Calculate the dataset data dir
data_dir = Path(get_data_dir()).expanduser()
dataset = _dataset_settings['name']
dataset_dir = data_dir / dataset
# Ensure that the directlry exists
dataset_dir.mkdir(parents=True, exist_ok=True)
return dataset_dir | 5,329,584 |
def test_gate_translation(gate):
"""Test gate operations with MyQLM interface"""
myqlm_operation = myqlm_call_operation(operation=gate[0],
qureg=qubits)
assert myqlm_operation == gate[1] | 5,329,585 |
def interpolate_bezier(points, steps=100, **kwargs):
"""Generates an array of waypoints which lie on a 2D Bezier curve described by n (x, y) points. The trajectory is
guaranteed to include the start and end points though only on (x, y, z) axes.
The curve generated is of the nth degree, where n = len(points) - 1
1st point is the start point.
2nd point indicates the orientation at the start point.
(n-1)th point indicates the orientation at the end point.
nth point is the end point.
For information about Bezier curve look at:
- http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-der.html
:param points: (n, 2+) array of waypoints
:return: trajectory with interpolated points
"""
n = len(points) - 1
t = np.linspace(0, 1, steps).reshape((steps, 1))
B = np.zeros((steps, 6))
# could be vectorised:
# r = range(0, n+1)
# coefs = sci.misc.comb(n, r)
# t_1_pow = np.power(np.tile(t-1, (1, 6)), np.tile(r, (steps, 1)))
# t_pow = np.power(np.tile(t, (1, 6)), np.tile(r, (steps, 1)))
for i in xrange(n+1):
e1 = ((1-t)**(n-i) * t**i).reshape((steps, 1))
e2 = points[i, 0:2].reshape((1, 2))
B[:, 0:2] += sci.misc.comb(n, i) * np.dot(e1, e2)
# coef = sci.misc.comb(n, i)
# B[:, 0] += coef * (1-t)**(n-i) * t**i * points[i, 0]
# B[:, 1] += coef * (1-t)**(n-i) * t**i * points[i, 1]
B[:, 2] = np.linspace(points[0, 2], points[-1, 2], steps)
B[:, 3:5] = 0
# calculate the xy slope at each point of the curve
der_x = np.diff(B[:, 0])
der_y = np.diff(B[:, 1])
B[1:, 5] = np.arctan2(der_y, der_x)
# add the initial point
B[0, :] = points[0]
return B | 5,329,586 |
def show_update_log_p_src_gives_correct_qualitative_behavior_for_examples(
pos=(1.5, 0.7), dt=0.1, w=0.5,
d=0.05, r=100, a=0.003, tau=1000, src_radius=0.02):
"""
Plot the resulting source posteriors (where prior is uniform) following
miss and hit at given sampling position.
"""
from infotaxis import build_log_src_prior, update_log_p_src
xs = np.linspace(0, 2, 101)
ys = np.linspace(0, 1, 51)
log_src_prior = build_log_src_prior('uniform', xs, ys)
src_prior = np.exp(log_src_prior)
# compute posterior after miss
log_p_src_miss = update_log_p_src(
pos=pos, xs=xs, ys=ys, dt=dt, h=0, w=w,
d=d, r=r, a=a, tau=tau, src_radius=src_radius, log_p_src=log_src_prior)
p_src_miss = np.exp(log_p_src_miss)
p_src_miss /= p_src_miss.sum()
# compute posterior after hit
log_p_src_hit = update_log_p_src(
pos=pos, xs=xs, ys=ys, dt=dt, h=1, w=w,
d=d, r=r, a=a, tau=tau, src_radius=src_radius, log_p_src=log_src_prior)
p_src_hit = np.exp(log_p_src_hit)
p_src_hit /= p_src_hit.sum()
# plot prior, posterior after miss, and posterior after hit
dx = np.diff(xs).mean()
dy = np.diff(ys).mean()
extent_x = [xs[0] - dx/2, xs[-1] + dx/2]
extent_y = [ys[0] - dy/2, ys[-1] + dy/2]
extent = extent_x + extent_y
axs = plt.subplots(3, 1, figsize=(7, 10), tight_layout=True)[1]
axs[0].imshow(src_prior.T, origin='lower', extent=extent, cmap='hot')
axs[1].imshow(p_src_miss.T, origin='lower', extent=extent, cmap='hot')
axs[2].imshow(p_src_hit.T, origin='lower', extent=extent, cmap='hot')
for ax in axs:
ax.set_xlabel('x (m)')
ax.set_ylabel('y (m)')
axs[0].set_title('prior')
axs[1].set_title('posterior after miss')
axs[2].set_title('posterior after hit')
for ax in axs.flatten():
set_font_size(ax, 14) | 5,329,587 |
def update_readme(image_name: str, readme_path: str) -> str:
"""Update README section on DockerHub."""
show_info(f"Updating README seciton for {image_name}")
repo = DOCKER_REPOSITORY.split("/")[-1]
uri = f"{DOCKER_API_URL}/repositories/{repo}/{image_name}/"
with open(readme_path) as fd:
readme_content = fd.read()
token = get_docker_token()
_ = requests.patch(
uri,
data=json.dumps({"full_description": readme_content}),
headers={
"Content-Type": "application/json",
"Authorization": f"JWT {token}",
},
) | 5,329,588 |
def _EAMS(track, Xmin=0.55, i0=12):
"""
Early-Age Main Sequence. Without this, the low-mass tracks do not
reach an EEP past the ZAMS before 15 Gyr.
"""
i_EAMS = _IorT_AMS(track, Xmin, i0)
return i_EAMS | 5,329,589 |
def neighboring_pairs(dataset, text_key='text', reuse_sentences=True):
"""Create a dataset consisting of neighboring sentence pairs.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'first' and 'second'.
We only take sentence pairs from within the same line since lines seem to
represent paragraph-like structures in our text datasets. Empty lines and
1-sentence lines will thus be ignored.
The argument reuse_sentences determines whether a sentence can be used as both
the first and last element in the pair. For example, the input with sentences
A,B,C,D will return (A,B),(B,C),(C,D) if reuse_sentences is True and
(A,B),(C,D) if reuse_sentences is False.
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
reuse_sentences: a boolean
Returns:
a tf.data.Dataset
"""
def split_by_lines(dataset):
"""Splits text in dataset by line, removing empty lines."""
def my_fn(text):
lines = tf.strings.split([text], sep='\n').values
return tf.strings.strip(lines)
dataset = dataset.map(
my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.unbatch()
return dataset.filter(lambda x: tf.strings.length(x) > 0)
def split_into_pairs(line):
"""Split a given text example into pairs of neighboring sentences."""
# TODO(mmatena): Use better sentence segmentation.
sep = str(uuid.uuid4())
sentences = tf.strings.regex_replace(line, r'((?:\.|\!|\?)+)', r'\1' + sep)
sentences = tf.strings.strip(tf.strings.split([sentences], sep).values)
if reuse_sentences:
firsts = sentences[:-1]
seconds = sentences[1:]
else:
firsts = sentences[:-1:2]
seconds = sentences[1::2]
return {
'first': firsts,
'second': seconds,
}
def example_len(x):
return tf.math.minimum(
tf.strings.length(x['first']), tf.strings.length(x['second']))
# Split by lines.
dataset = dataset.map(
lambda x: x[text_key], num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = split_by_lines(dataset)
# Get pairs of neighboring sentences.
dataset = dataset.map(
split_into_pairs, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.unbatch()
# Remove examples with empty strings.
dataset = dataset.filter(lambda x: example_len(x) > 0)
return dataset | 5,329,590 |
def decode(tokenizer, token):
"""decodes the tokens to the answer with a given tokenizer"""
answer_tokens = tokenizer.convert_ids_to_tokens(
token, skip_special_tokens=True)
return tokenizer.convert_tokens_to_string(answer_tokens) | 5,329,591 |
def test_clear_location():
"""
Tests clear location
:return:
"""
a = ListFile()
# try to clear location that isnt defined
with pytest.raises(AssertionError):
a.clear_location(1234)
a.insert_data(1234, 'AAAA')
assert a.get_starting_data(1234) is 'AAAA'
a.clear_location(1234)
with pytest.raises(AssertionError):
a.get_starting_data(1234) | 5,329,592 |
def davis_jaccard_measure(fg_mask, gt_mask):
""" Compute region similarity as the Jaccard Index.
:param fg_mask: (ndarray): binary segmentation map.
:param gt_mask: (ndarray): binary annotation map.
:return: jaccard (float): region similarity
"""
gt_mask = gt_mask.astype(np.bool)
fg_mask = fg_mask.astype(np.bool)
if np.isclose(np.sum(gt_mask), 0) and np.isclose(np.sum(fg_mask), 0):
return 1
else:
return np.sum((gt_mask & fg_mask)) / \
np.sum((gt_mask | fg_mask), dtype=np.float32) | 5,329,593 |
def extract_logits(logits = None, seq_pos = None):
"""
Args
logits: Tensor(batch_size,seq_length,vocab_size) e.g.(8,1024,50257)
seq_pos: list(batch_size)
Return:
output_logits: Tensor(batch_size,1,vocab_size) extract the Specified logit according to the seq_pos list .
"""
batch_size = logits.shape[0]
for i in range(batch_size):
logit = logits[i:i+1:1, seq_pos[i]:seq_pos[i]+1:1, ::]
# print("extract_logits logit shape: {}".format(logit.shape))
if i == 0 :
output_logits = logit
else:
output_logits = P.Concat()((output_logits, logit))
# print("final logits:",output_logits)
return output_logits | 5,329,594 |
def CollectSONAME(args):
"""Replaces: readelf -d $sofile | grep SONAME"""
toc = ''
readelf = subprocess.Popen(wrapper_utils.CommandToRun(
[args.readelf, '-d', args.sofile]),
stdout=subprocess.PIPE,
bufsize=-1,
universal_newlines=True)
for line in readelf.stdout:
if 'SONAME' in line:
toc += line
return readelf.wait(), toc | 5,329,595 |
def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):
"""
Pads a rectangle by the specified values on each individual side,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Pad by the specified value
x -= padLeft
y -= padTop
w += (padLeft + padRight)
h += (padTop + padBottom)
# Determine if we are clipping overflows/underflows or
# shifting the centre of the rectangle to compensate
if clipExcess == True:
# Clip any underflows
x = max(0, x)
y = max(0, y)
# Clip any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
h -= overflowY
w -= overflowX
else:
# Compensate for any underflows
underflowX = max(0, 0 - x)
underflowY = max(0, 0 - y)
x += underflowX
y += underflowY
# Compensate for any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
x -= overflowX
w += overflowX
y -= overflowY
h += overflowY
# If there are still overflows or underflows after our
# modifications, we have no choice but to clip them
x, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)
# Re-pack the padded rect
return (x,y,w,h) | 5,329,596 |
def gcs_csv_to_table(full_table_id: str, remote_csv_path: str) -> Table:
"""
Insert CSV from Google Storage to BigQuery Table.
:param full_table_id: Full ID of a Google BigQuery table.
:type full_table_id: str
:param remote_csv_path: Path to uploaded CSV.
:type remote_csv_path: str
:returns: str
"""
try:
gcs_csv_uri = f"gs://{GCP_BUCKET_NAME}/{remote_csv_path}"
job_config = LoadJobConfig(
autodetect=True,
skip_leading_rows=1,
source_format=SourceFormat.CSV,
)
load_job = gbq.load_table_from_uri(
gcs_csv_uri, full_table_id, job_config=job_config
)
LOGGER.info(f"Starting job {load_job.job_id}.")
LOGGER.info(load_job.result()) # Waits for table load to complete.
return gbq.get_table(full_table_id)
except BadRequest as e:
LOGGER.error(f"Invalid GCP request when creating table `{full_table_id}`: {e}")
except Exception as e:
LOGGER.error(f"Unexpected error when creating table `{full_table_id}`: {e}") | 5,329,597 |
def _get_client(args: argparse.Namespace) -> NodeClient:
"""Returns a pycspr client instance.
"""
return NodeClient(NodeConnectionInfo(
host=args.node_host,
port_sse=args.node_port_sse
)) | 5,329,598 |
def test_keyword__DeleteForm__1(address_book, KeywordFactory, browser):
"""Deletion can be canceled in the `DeleteForm`."""
KeywordFactory(address_book, u'friend')
browser.login('editor')
browser.open(browser.KEYWORD_EDIT_URL)
browser.getControl('Delete').click()
# There is a confirmation dialog where the user has to decide if he
# really wants to delete the keyword. If he decides not to delete the
# entry he is led back to the keyword's edit form:
assert browser.KEYWORD_DELETE_URL == browser.url
browser.getControl('No, cancel').click()
assert 'Deletion canceled.' == browser.message
assert browser.KEYWORD_EDIT_URL == browser.url | 5,329,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.