hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5431e40cf94a88341629a34f7423dff257d33bc7
| 13,705
|
py
|
Python
|
data_steward/cdr_cleaner/cleaning_rules/measurement_table_suppression.py
|
ratuagga/curation
|
047b984f20643e21bf3ab1e309903abaf816ecd5
|
[
"MIT"
] | 1
|
2019-03-18T18:22:41.000Z
|
2019-03-18T18:22:41.000Z
|
data_steward/cdr_cleaner/cleaning_rules/measurement_table_suppression.py
|
nishanthpp93/curation
|
ac9f38b2f4580ae806121dd929293159132c7d2a
|
[
"MIT"
] | null | null | null |
data_steward/cdr_cleaner/cleaning_rules/measurement_table_suppression.py
|
nishanthpp93/curation
|
ac9f38b2f4580ae806121dd929293159132c7d2a
|
[
"MIT"
] | 1
|
2021-09-16T14:25:19.000Z
|
2021-09-16T14:25:19.000Z
|
"""
Removing irrelevant observation records from the RDR dataset.
Original Issue: DC-481, 699
The intent is to clean data in the measurement table. The rule intends to
reset invalid fields values to null, drop records that do not provide
meaningful data, and drop duplicate records.
"""
# Python Imports
import logging
# Project imports
from common import MEASUREMENT, JINJA_ENV
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from constants.bq_utils import WRITE_TRUNCATE
from constants.cdr_cleaner import clean_cdr as cdr_consts
LOGGER = logging.getLogger(__name__)
ISSUE_NUMBERS = ['DC-699', 'DC-481']
INVALID_VALUES_RECORDS = 'dc699_save_9999999_as_null'
SITES_WITH_ONLY_BAD_DATA = 'dc699_sites_with_only_null_or_zero_meas_data'
SAVE_BAD_SITE_DATA = 'dc699_save_bad_site_data'
SAVE_NULL_VALUE_RECORDS = 'dc699_save_null_records_from_measurement'
SAVE_DUPLICATE_RECORDS = 'dc699_save_measurement_duplicates'
# Save rows that will be altered to a sandbox dataset.
NULL_VALUES_SAVE_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{sandbox}}.{{save_table}}` AS (
SELECT *
FROM `{{project}}.{{dataset}}.measurement`
WHERE value_as_number = 9999999
)""")
# Alter rows by changing 9999999 to NULL
NULL_VALUES_UPDATE_QUERY = JINJA_ENV.from_string("""
SELECT
measurement_id, person_id, measurement_concept_id, measurement_date,
measurement_datetime, measurement_type_concept_id, operator_concept_id,
CASE
WHEN value_as_number = 9999999 THEN NULL
ELSE value_as_number
END AS value_as_number,
value_as_concept_id, unit_concept_id, range_low, range_high, provider_id,
visit_occurrence_id, measurement_source_value, measurement_source_concept_id,
unit_source_value, value_source_value
from `{{project}}.{{dataset}}.measurement`
""")
# Identify sites who submitted "junk" data. Either all nulls or zero values in
# the value_as_number field. These sites should be saved to a table to make
# programmatic access easier
SITES_TO_REMOVE_DATA_FOR = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{sandbox}}.{{save_table}}` AS (
-- join the measurment and mapping table and only store EHR site records --
WITH joined_table AS (
SELECT *
FROM `{{project}}.{{dataset}}.measurement` AS m
JOIN `{{project}}.{{dataset}}.measurement_ext` AS me
USING (measurement_id)
WHERE src_id LIKE 'EHR site%'
),
-- get the src_id of sites having something greater than 0 in the value_as_number field --
values_containing_srcs AS(
SELECT DISTINCT(src_id)
FROM joined_table AS jt
GROUP BY src_id, value_as_number
HAVING value_as_number > 0
),
-- get the src_id of sites having either 0 or null in the value_as_number field --
junk_srcs AS (
SELECT DISTINCT(src_id)
FROM joined_table AS jt
GROUP BY src_id, value_as_number
HAVING value_as_number = 0 OR value_as_number IS NULL)
-- select those src_ids from junk_srcs that do not exist in value containing sources --
-- this means the site never submitted anything other than 0 or null in the --
-- value_as_number field --
SELECT js.src_id
FROM junk_srcs AS js
WHERE js.src_id NOT IN (SELECT src_id FROM values_containing_srcs)
)""")
# store data from sites that will be dropped.
NULL_AND_ZERO_VALUES_SAVE_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{sandbox}}.{{save_table}}` AS (
SELECT *
FROM `{{project}}.{{dataset}}.measurement` AS m
JOIN `{{project}}.{{dataset}}.measurement_ext` AS me
USING (measurement_id)
WHERE me.src_id IN (SELECT src_id FROM `{{project}}.{{sandbox}}.{{id_table}}`)
AND m.value_as_number = 0
)""")
# Update value_as_number for any site that has only submitted junk, i.e. 0 or null
# for value_as_number
SET_NULL_WHEN_ONLY_ZEROS_SUBMITTED = JINJA_ENV.from_string("""
UPDATE `{{project}}.{{dataset}}.measurement` as m
SET value_as_number = NULL
FROM `{{project}}.{{dataset}}.measurement_ext` AS me
WHERE m.measurement_id = me.measurement_id
AND me.src_id IN (SELECT src_id FROM `{{project}}.{{sandbox}}.{{id_table}}`)
""")
# Save records that will be dropped when
# value_as_number IS NULL AND value_as_concept_id IS NULL
SAVE_NULL_DROP_RECORDS = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{sandbox}}.{{save_table}}` AS (
SELECT *
FROM `{{project}}.{{dataset}}.measurement` AS m
WHERE m.value_as_number IS NULL AND m.value_as_concept_id IS NULL
)""")
# Only select records that we want to keep
SELECT_RECORDS_WITH_VALID_DATA = JINJA_ENV.from_string("""
SELECT *
FROM `{{project}}.{{dataset}}.measurement` AS m
WHERE m.value_as_number IS NOT NULL OR m.value_as_concept_id IS NOT NULL
""")
# Sandbox duplicate records based on the fields: person_id,
# measurement_source_concept_id, unit_concept_id, measurement_concept_id,
# measurement_datetime, value_as_number, value_as_concept_id
# Had to use grouping because ROW_NUMBER OVER cannot partition by value_as_number
SANDBOX_DUPLICATES = JINJA_ENV.from_string("""
-- identify duplicates with this context table statement --
-- only add duplicate field identifiers to this statement --
CREATE OR REPLACE TABLE `{{project}}.{{sandbox}}.{{save_table}}` AS (
WITH
cte AS (
SELECT
*,
ROW_NUMBER() OVER (PARTITION BY person_id, measurement_source_concept_id, unit_concept_id, measurement_concept_id, measurement_datetime, CAST(value_as_number AS string),
value_as_concept_id
ORDER BY
person_id,
measurement_source_concept_id,
unit_concept_id,
measurement_concept_id,
measurement_datetime,
value_as_number,
value_as_concept_id,
measurement_id) AS row_num
FROM
`{{project}}.{{dataset}}.measurement`
)
-- select all fields from the table for sandboxing --
SELECT *
FROM
cte
WHERE row_num > 1
)""")
REMOVE_DUPLICATES = JINJA_ENV.from_string("""
-- Select only the records that have not been sandboxed --
SELECT *
FROM `{{project}}.{{dataset}}.measurement`
WHERE measurement_id NOT IN
(SELECT measurement_id FROM `{{project}}.{{sandbox}}.{{id_table}}`)
""")
class MeasurementRecordsSuppression(BaseCleaningRule):
"""
Suppress measurement rows by values.
Suppress measurement rows if:
1. Convert value_as_number = 9999999 to value_as_number IS NULL,
2. value_as_number IS NULL AND value_as_concept_id IS NULL,
3. drop all measurement data from a site if value_as_number = 0 for all
records submitted by the site.
4. Eliminate duplicate rows based on the fields:
person_id, measurement_source_concept_id, unit_concept_id,
measurement_concept_id, measurement_datetime, value_as_number,
and value_as_concept_id.
"""
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
"""
Initialize the class with proper info.
Set the issue numbers, description and affected datasets. As other
tickets may affect this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = (f'Clean the measurement table after it was de-identified. '
f'Remove rows that do not contribute high quality data.')
super().__init__(issue_numbers=ISSUE_NUMBERS,
description=desc,
affected_datasets=[
cdr_consts.DEID_BASE,
cdr_consts.CONTROLLED_TIER_DEID_CLEAN
],
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
affected_tables=[MEASUREMENT])
def get_query_specs(self):
"""
Return a list of dictionary query specifications.
:return: A list of dictionaries. Each dictionary contains a
single query and a specification for how to execute that query.
The specifications are optional but the query is required.
"""
save_null_values = {
cdr_consts.QUERY:
NULL_VALUES_SAVE_QUERY.render(
project=self.project_id,
dataset=self.dataset_id,
sandbox=self.sandbox_dataset_id,
save_table=INVALID_VALUES_RECORDS),
}
update_to_null_values = {
cdr_consts.QUERY:
NULL_VALUES_UPDATE_QUERY.render(project=self.project_id,
dataset=self.dataset_id),
cdr_consts.DESTINATION_TABLE:
MEASUREMENT,
cdr_consts.DESTINATION_DATASET:
self.dataset_id,
cdr_consts.DISPOSITION:
WRITE_TRUNCATE
}
identify_bad_sites = {
cdr_consts.QUERY:
SITES_TO_REMOVE_DATA_FOR.render(
project=self.project_id,
dataset=self.dataset_id,
sandbox=self.sandbox_dataset_id,
save_table=SITES_WITH_ONLY_BAD_DATA)
}
save_data_from_bad_sites = {
cdr_consts.QUERY:
NULL_AND_ZERO_VALUES_SAVE_QUERY.render(
project=self.project_id,
dataset=self.dataset_id,
sandbox=self.sandbox_dataset_id,
save_table=SAVE_BAD_SITE_DATA,
id_table=SITES_WITH_ONLY_BAD_DATA)
}
set_null_for_zero_from_bad_sites = {
cdr_consts.QUERY:
SET_NULL_WHEN_ONLY_ZEROS_SUBMITTED.render(
project=self.project_id,
dataset=self.dataset_id,
sandbox=self.sandbox_dataset_id,
id_table=SITES_WITH_ONLY_BAD_DATA)
}
save_null_records_before_dropping = {
cdr_consts.QUERY:
SAVE_NULL_DROP_RECORDS.render(
project=self.project_id,
dataset=self.dataset_id,
sandbox=self.sandbox_dataset_id,
save_table=SAVE_NULL_VALUE_RECORDS)
}
keep_records_with_good_data = {
cdr_consts.QUERY:
SELECT_RECORDS_WITH_VALID_DATA.render(project=self.project_id,
dataset=self.dataset_id),
cdr_consts.DESTINATION_TABLE:
MEASUREMENT,
cdr_consts.DESTINATION_DATASET:
self.dataset_id,
cdr_consts.DISPOSITION:
WRITE_TRUNCATE
}
sandbox_duplicates = {
cdr_consts.QUERY:
SANDBOX_DUPLICATES.render(project=self.project_id,
dataset=self.dataset_id,
sandbox=self.sandbox_dataset_id,
save_table=SAVE_DUPLICATE_RECORDS)
}
remove_duplicates = {
cdr_consts.QUERY:
REMOVE_DUPLICATES.render(project=self.project_id,
dataset=self.dataset_id,
sandbox=self.sandbox_dataset_id,
id_table=SAVE_DUPLICATE_RECORDS),
cdr_consts.DESTINATION_TABLE:
MEASUREMENT,
cdr_consts.DESTINATION_DATASET:
self.dataset_id,
cdr_consts.DISPOSITION:
WRITE_TRUNCATE
}
return [
save_null_values, update_to_null_values, identify_bad_sites,
save_data_from_bad_sites, set_null_for_zero_from_bad_sites,
save_null_records_before_dropping, keep_records_with_good_data,
sandbox_duplicates, remove_duplicates
]
def setup_rule(self, client):
"""
Function to run any data upload options before executing a query.
"""
pass
def setup_validation(self, client):
"""
Run required steps for validation setup
This abstract method was added to the base class after this rule was authored.
This rule needs to implement logic to setup validation on cleaning rules that
will be updating or deleting the values.
Until done no issue exists for this yet.
"""
raise NotImplementedError("Please fix me.")
def validate_rule(self):
"""
Validates the cleaning rule which deletes or updates the data from the tables
This abstract method was added to the base class after this rule was authored.
This rule needs to implement logic to run validation on cleaning rules that will
be updating or deleting the values.
Until done no issue exists for this yet.
"""
raise NotImplementedError("Please fix me.")
def get_sandbox_tablenames(self):
return [
INVALID_VALUES_RECORDS, SITES_WITH_ONLY_BAD_DATA,
SAVE_BAD_SITE_DATA, SAVE_NULL_VALUE_RECORDS, SAVE_DUPLICATE_RECORDS
]
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(MeasurementRecordsSuppression,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(MeasurementRecordsSuppression,)])
| 37.651099
| 173
| 0.667056
|
e48acc6cd30ca365c381557de5bb8b05ce37398d
| 1,903
|
py
|
Python
|
tests/test_coax.py
|
BetaPollux/EMToolbox
|
c9e8e06dd45b764fe7f29f0118c78cf3ae31a4a8
|
[
"MIT"
] | 1
|
2021-02-19T22:36:14.000Z
|
2021-02-19T22:36:14.000Z
|
tests/test_coax.py
|
BetaPollux/EMToolbox
|
c9e8e06dd45b764fe7f29f0118c78cf3ae31a4a8
|
[
"MIT"
] | null | null | null |
tests/test_coax.py
|
BetaPollux/EMToolbox
|
c9e8e06dd45b764fe7f29f0118c78cf3ae31a4a8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
'''Test TLine Coax functions'''
import math
from pytest import approx
import emtoolbox.tline.coax as coax
import emtoolbox.utils.conversions as convert
import emtoolbox.utils.constants as const
FREQ = 100e6
W = 2 * math.pi * FREQ
EPSR = 2.3
RADIUS_W = convert.meters_from_mils(16)
RADIUS_S = convert.meters_from_mils(58)
def test_impedance():
zc = coax.impedance(RADIUS_W, RADIUS_S, EPSR)
assert zc == approx(50.0, abs=1.0)
def test_inductance():
ind = coax.inductance(RADIUS_W, RADIUS_S)
assert ind == approx(0.2576e-6, rel=1e-3)
def test_capacitance():
cap = coax.capacitance(RADIUS_W, RADIUS_S, EPSR)
assert cap == approx(99.2e-12, rel=1e-3)
def test_conductance_simple():
sigma = 10
con = coax.conductance_simple(RADIUS_W, RADIUS_S, sigma)
cap = coax.capacitance(RADIUS_W, RADIUS_S, EPSR)
assert con == approx(sigma / (const.EPS0 * EPSR) * cap, rel=1e-3)
def test_conductance_loss_tangent():
tan = 0.02
con = coax.conductance_loss_tangent(RADIUS_W, RADIUS_S, EPSR, tan)
cap = coax.capacitance(RADIUS_W, RADIUS_S, EPSR)
assert con(W) == approx(W * tan * cap, rel=1e-3)
def test_conductance_loss_tangent_mult():
tan = 0.02
con1 = coax.conductance_loss_tangent(RADIUS_W, RADIUS_S, EPSR, tan)
con2 = coax.conductance_loss_tangent(RADIUS_W, 2 * RADIUS_S, EPSR, tan)
assert con1(W) != con2(W)
def test_resistance_dc():
rad = convert.wire_radius_awg(28)
res = coax.resistance_dc(rad, const.COND_CU)
assert res == approx(0.213, rel=1e-3)
def test_resistance_skin_effect():
res = coax.resistance_skin_effect(RADIUS_W, const.COND_CU)
assert res(W) == approx(1.022, rel=1e-3)
def test_resistance_skin_effect_mult():
res1 = coax.resistance_skin_effect(RADIUS_W, const.COND_CU)
res2 = coax.resistance_skin_effect(2 * RADIUS_W, const.COND_CU)
assert res1(W) != res2(W)
| 27.57971
| 75
| 0.712559
|
e53a114bd5dbf81fdf7ba717dd01ea93e5b84065
| 5,669
|
py
|
Python
|
robocop/checkers/__init__.py
|
MalikMlitat/robotframework-robocop
|
615d535f30e72a8638a6084a692f683b97ee1fcb
|
[
"Apache-2.0"
] | null | null | null |
robocop/checkers/__init__.py
|
MalikMlitat/robotframework-robocop
|
615d535f30e72a8638a6084a692f683b97ee1fcb
|
[
"Apache-2.0"
] | 2
|
2021-11-04T20:27:48.000Z
|
2022-03-24T00:23:54.000Z
|
robocop/checkers/__init__.py
|
MalikMlitat/robotframework-robocop
|
615d535f30e72a8638a6084a692f683b97ee1fcb
|
[
"Apache-2.0"
] | null | null | null |
"""
Robocop rules are internally grouped into checkers. Each checker can scan for multiple related issues
(like ``LengthChecker`` checks both for minimum and maximum length of a keyword). You can refer to
specific rule reported by checkers by its name or id (for example `too-long-keyword` or `0501`).
Checkers are categorized into following groups:
* 01: base
* 02: documentation
* 03: naming
* 04: errors
* 05: lengths
* 06: tags
* 07: comments
* 08: duplications
* 09: misc
* 10: spacing
* 11-50: not yet used: reserved for future internal checkers
* 51-99: reserved for external checkers
Checker has two basic types:
- ``VisitorChecker`` uses Robot Framework parsing API and Python `ast` module for traversing Robot code as nodes,
- ``RawFileChecker`` simply reads Robot file as normal file and scans every line.
Every rule has a `unique id` made of 4 digits where first 2 are `checker id` while 2 latter are `rule id`.
`Unique id` as well as `rule name` can be used to refer to the rule (e.g. in include/exclude statements,
configurations etc.). You can optionally configure rule severity or other parameters.
"""
import inspect
try:
from robot.api.parsing import ModelVisitor
except ImportError:
from robot.parsing.model.visitor import ModelVisitor
from robot.utils import FileReader
from robocop.utils import modules_from_paths, modules_in_current_dir
from robocop.exceptions import RuleNotFoundError, RuleParamNotFoundError, RuleReportsNotFoundError
class BaseChecker:
rules = None
def __init__(self):
self.disabled = False
self.source = None
self.lines = None
self.issues = []
self.rules = {}
self.templated_suite = False
def param(self, rule, param_name):
try:
return self.rules[rule].config[param_name].value
except KeyError:
if rule not in self.rules:
raise RuleNotFoundError(rule, self) from None
if param_name not in self.rules[rule].config:
raise RuleParamNotFoundError(self.rules[rule], param_name, self) from None
def report(
self,
rule,
node=None,
lineno=None,
col=None,
end_lineno=None,
end_col=None,
ext_disablers=None,
**kwargs,
):
if rule not in self.rules:
raise ValueError(f"Missing definition for message with name {rule}")
message = self.rules[rule].prepare_message(
source=self.source,
node=node,
lineno=lineno,
col=col,
end_lineno=end_lineno,
end_col=end_col,
ext_disablers=ext_disablers,
**kwargs,
)
if message.enabled:
self.issues.append(message)
class VisitorChecker(BaseChecker, ModelVisitor): # noqa
type = "visitor_checker"
def scan_file(self, ast_model, filename, in_memory_content, templated=False):
self.issues = []
self.source = filename
self.templated_suite = templated
if in_memory_content is not None:
self.lines = in_memory_content.splitlines(keepends=True)
else:
self.lines = None
self.visit_File(ast_model)
return self.issues
def visit_File(self, node): # noqa
"""Perform generic ast visit on file node."""
self.generic_visit(node)
class RawFileChecker(BaseChecker): # noqa
type = "rawfile_checker"
def scan_file(self, ast_model, filename, in_memory_content, templated=False):
self.issues = []
self.source = filename
self.templated_suite = templated
if in_memory_content is not None:
self.lines = in_memory_content.splitlines(keepends=True)
else:
self.lines = None
self.parse_file()
return self.issues
def parse_file(self):
"""Read file line by line and for each call check_line method."""
if self.lines is not None:
for lineno, line in enumerate(self.lines):
self.check_line(line, lineno + 1)
else:
with FileReader(self.source) as file_reader:
for lineno, line in enumerate(file_reader.readlines()):
self.check_line(line, lineno + 1)
def check_line(self, line, lineno):
raise NotImplementedError
def init(linter):
"""For each module get `rules` dictionary and visitors. Instantiate each visitor and map it to the
rule class instance using `reports` visitor attribute."""
for module in get_modules(linter.config.ext_rules):
classes = inspect.getmembers(module, inspect.isclass)
module_rules = {rule.name: rule for rule in getattr(module, "rules", {}).values()}
for checker in classes:
if issubclass(checker[1], BaseChecker) and getattr(checker[1], "reports", False):
checker_instance = checker[1]()
for reported_rule in checker_instance.reports:
if reported_rule not in module_rules:
raise RuleReportsNotFoundError(reported_rule, checker_instance) from None
checker_instance.rules[reported_rule] = module_rules[reported_rule]
linter.register_checker(checker_instance)
def get_modules(ext_rules):
yield from modules_in_current_dir(__file__, __name__)
yield from modules_from_paths(ext_rules)
def get_rules():
for module in modules_in_current_dir(__file__, __name__):
module_name = module.__name__.split(".")[-1]
for rule in getattr(module, "rules", {}).values():
yield module_name, rule
| 34.779141
| 113
| 0.656553
|
267cd8c622ef7863a820e02d9f56fb5c310108bf
| 190
|
py
|
Python
|
PE20.py
|
TIJMacLean/Project-Euler
|
a7b7297c9fed2026d063e7f209f9ff61f45f0ccb
|
[
"Apache-2.0"
] | null | null | null |
PE20.py
|
TIJMacLean/Project-Euler
|
a7b7297c9fed2026d063e7f209f9ff61f45f0ccb
|
[
"Apache-2.0"
] | null | null | null |
PE20.py
|
TIJMacLean/Project-Euler
|
a7b7297c9fed2026d063e7f209f9ff61f45f0ccb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri May 15 00:06:30 2020
@author: Tom
"""
from math import factorial
number = str(factorial(100))
print(number)
print(sum([int(i) for i in number]))
| 15.833333
| 36
| 0.657895
|
5078d3c7316d47ce99d16a0359f5067724af6afd
| 38,012
|
py
|
Python
|
mesonbuild/rewriter.py
|
regolith-linux/meson
|
938e7037319852dd75c9da17c402d6903ab22fe3
|
[
"Apache-2.0"
] | 1
|
2020-01-03T07:15:50.000Z
|
2020-01-03T07:15:50.000Z
|
mesonbuild/rewriter.py
|
regolith-linux/meson
|
938e7037319852dd75c9da17c402d6903ab22fe3
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/rewriter.py
|
regolith-linux/meson
|
938e7037319852dd75c9da17c402d6903ab22fe3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
# This tool is used to manipulate an existing Meson build definition.
#
# - add a file to a target
# - remove files from a target
# - move targets
# - reindent?
from .ast import IntrospectionInterpreter, build_target_functions, AstConditionLevel, AstIDGenerator, AstIndentationGenerator, AstPrinter
from mesonbuild.mesonlib import MesonException
from . import mlog, environment
from functools import wraps
from typing import List, Dict, Optional
from .mparser import Token, ArrayNode, ArgumentNode, AssignmentNode, BaseNode, BooleanNode, ElementaryNode, IdNode, FunctionNode, StringNode
import json, os, re, sys
class RewriterException(MesonException):
pass
def add_arguments(parser, formatter=None):
parser.add_argument('-s', '--sourcedir', type=str, default='.', metavar='SRCDIR', help='Path to source directory.')
parser.add_argument('-V', '--verbose', action='store_true', default=False, help='Enable verbose output')
parser.add_argument('-S', '--skip-errors', dest='skip', action='store_true', default=False, help='Skip errors instead of aborting')
subparsers = parser.add_subparsers(dest='type', title='Rewriter commands', description='Rewrite command to execute')
# Target
tgt_parser = subparsers.add_parser('target', help='Modify a target', formatter_class=formatter)
tgt_parser.add_argument('-s', '--subdir', default='', dest='subdir', help='Subdirectory of the new target (only for the "add_target" action)')
tgt_parser.add_argument('--type', dest='tgt_type', choices=rewriter_keys['target']['target_type'][2], default='executable',
help='Type of the target to add (only for the "add_target" action)')
tgt_parser.add_argument('target', help='Name or ID of the target')
tgt_parser.add_argument('operation', choices=['add', 'rm', 'add_target', 'rm_target', 'info'],
help='Action to execute')
tgt_parser.add_argument('sources', nargs='*', help='Sources to add/remove')
# KWARGS
kw_parser = subparsers.add_parser('kwargs', help='Modify keyword arguments', formatter_class=formatter)
kw_parser.add_argument('operation', choices=rewriter_keys['kwargs']['operation'][2],
help='Action to execute')
kw_parser.add_argument('function', choices=list(rewriter_func_kwargs.keys()),
help='Function type to modify')
kw_parser.add_argument('id', help='ID of the function to modify (can be anything for "project")')
kw_parser.add_argument('kwargs', nargs='*', help='Pairs of keyword and value')
# Default options
def_parser = subparsers.add_parser('default-options', help='Modify the project default options', formatter_class=formatter)
def_parser.add_argument('operation', choices=rewriter_keys['default_options']['operation'][2],
help='Action to execute')
def_parser.add_argument('options', nargs='*', help='Key, value pairs of configuration option')
# JSON file/command
cmd_parser = subparsers.add_parser('command', help='Execute a JSON array of commands', formatter_class=formatter)
cmd_parser.add_argument('json', help='JSON string or file to execute')
class RequiredKeys:
def __init__(self, keys):
self.keys = keys
def __call__(self, f):
@wraps(f)
def wrapped(*wrapped_args, **wrapped_kwargs):
assert(len(wrapped_args) >= 2)
cmd = wrapped_args[1]
for key, val in self.keys.items():
typ = val[0] # The type of the value
default = val[1] # The default value -- None is required
choices = val[2] # Valid choices -- None is for everything
if key not in cmd:
if default is not None:
cmd[key] = default
else:
raise RewriterException('Key "{}" is missing in object for {}'
.format(key, f.__name__))
if not isinstance(cmd[key], typ):
raise RewriterException('Invalid type of "{}". Required is {} but provided was {}'
.format(key, typ.__name__, type(cmd[key]).__name__))
if choices is not None:
assert(isinstance(choices, list))
if cmd[key] not in choices:
raise RewriterException('Invalid value of "{}": Possible values are {} but provided was "{}"'
.format(key, choices, cmd[key]))
return f(*wrapped_args, **wrapped_kwargs)
return wrapped
class MTypeBase:
def __init__(self, node: Optional[BaseNode] = None):
if node is None:
self.node = self._new_node() # lgtm [py/init-calls-subclass] (node creation does not depend on base class state)
else:
self.node = node
self.node_type = None
for i in self.supported_nodes(): # lgtm [py/init-calls-subclass] (listing nodes does not depend on base class state)
if isinstance(self.node, i):
self.node_type = i
def _new_node(self):
# Overwrite in derived class
return BaseNode()
def can_modify(self):
return self.node_type is not None
def get_node(self):
return self.node
def supported_nodes(self):
# Overwrite in derived class
return []
def set_value(self, value):
# Overwrite in derived class
mlog.warning('Cannot set the value of type', mlog.bold(type(self).__name__), '--> skipping')
def add_value(self, value):
# Overwrite in derived class
mlog.warning('Cannot add a value of type', mlog.bold(type(self).__name__), '--> skipping')
def remove_value(self, value):
# Overwrite in derived class
mlog.warning('Cannot remove a value of type', mlog.bold(type(self).__name__), '--> skipping')
def remove_regex(self, value):
# Overwrite in derived class
mlog.warning('Cannot remove a regex in type', mlog.bold(type(self).__name__), '--> skipping')
class MTypeStr(MTypeBase):
def __init__(self, node: Optional[BaseNode] = None):
super().__init__(node)
def _new_node(self):
return StringNode(Token('', '', 0, 0, 0, None, ''))
def supported_nodes(self):
return [StringNode]
def set_value(self, value):
self.node.value = str(value)
class MTypeBool(MTypeBase):
def __init__(self, node: Optional[BaseNode] = None):
super().__init__(node)
def _new_node(self):
return StringNode(Token('', '', 0, 0, 0, None, False))
def supported_nodes(self):
return [BooleanNode]
def set_value(self, value):
self.node.value = bool(value)
class MTypeID(MTypeBase):
def __init__(self, node: Optional[BaseNode] = None):
super().__init__(node)
def _new_node(self):
return StringNode(Token('', '', 0, 0, 0, None, ''))
def supported_nodes(self):
return [IdNode]
def set_value(self, value):
self.node.value = str(value)
class MTypeList(MTypeBase):
def __init__(self, node: Optional[BaseNode] = None):
super().__init__(node)
def _new_node(self):
return ArrayNode(ArgumentNode(Token('', '', 0, 0, 0, None, '')), 0, 0, 0, 0)
def _new_element_node(self, value):
# Overwrite in derived class
return BaseNode()
def _ensure_array_node(self):
if not isinstance(self.node, ArrayNode):
tmp = self.node
self.node = self._new_node()
self.node.args.arguments += [tmp]
def _check_is_equal(self, node, value) -> bool:
# Overwrite in derived class
return False
def _check_regex_matches(self, node, regex: str) -> bool:
# Overwrite in derived class
return False
def get_node(self):
if isinstance(self.node, ArrayNode):
if len(self.node.args.arguments) == 1:
return self.node.args.arguments[0]
return self.node
def supported_element_nodes(self):
# Overwrite in derived class
return []
def supported_nodes(self):
return [ArrayNode] + self.supported_element_nodes()
def set_value(self, value):
if not isinstance(value, list):
value = [value]
self._ensure_array_node()
self.node.args.arguments = [] # Remove all current nodes
for i in value:
self.node.args.arguments += [self._new_element_node(i)]
def add_value(self, value):
if not isinstance(value, list):
value = [value]
self._ensure_array_node()
for i in value:
self.node.args.arguments += [self._new_element_node(i)]
def _remove_helper(self, value, equal_func):
def check_remove_node(node):
for j in value:
if equal_func(i, j):
return True
return False
if not isinstance(value, list):
value = [value]
self._ensure_array_node()
removed_list = []
for i in self.node.args.arguments:
if not check_remove_node(i):
removed_list += [i]
self.node.args.arguments = removed_list
def remove_value(self, value):
self._remove_helper(value, self._check_is_equal)
def remove_regex(self, regex: str):
self._remove_helper(regex, self._check_regex_matches)
class MTypeStrList(MTypeList):
def __init__(self, node: Optional[BaseNode] = None):
super().__init__(node)
def _new_element_node(self, value):
return StringNode(Token('', '', 0, 0, 0, None, str(value)))
def _check_is_equal(self, node, value) -> bool:
if isinstance(node, StringNode):
return node.value == value
return False
def _check_regex_matches(self, node, regex: str) -> bool:
if isinstance(node, StringNode):
return re.match(regex, node.value) is not None
return False
def supported_element_nodes(self):
return [StringNode]
class MTypeIDList(MTypeList):
def __init__(self, node: Optional[BaseNode] = None):
super().__init__(node)
def _new_element_node(self, value):
return IdNode(Token('', '', 0, 0, 0, None, str(value)))
def _check_is_equal(self, node, value) -> bool:
if isinstance(node, IdNode):
return node.value == value
return False
def _check_regex_matches(self, node, regex: str) -> bool:
if isinstance(node, StringNode):
return re.match(regex, node.value) is not None
return False
def supported_element_nodes(self):
return [IdNode]
rewriter_keys = {
'default_options': {
'operation': (str, None, ['set', 'delete']),
'options': (dict, {}, None)
},
'kwargs': {
'function': (str, None, None),
'id': (str, None, None),
'operation': (str, None, ['set', 'delete', 'add', 'remove', 'remove_regex', 'info']),
'kwargs': (dict, {}, None)
},
'target': {
'target': (str, None, None),
'operation': (str, None, ['src_add', 'src_rm', 'target_rm', 'target_add', 'info']),
'sources': (list, [], None),
'subdir': (str, '', None),
'target_type': (str, 'executable', ['both_libraries', 'executable', 'jar', 'library', 'shared_library', 'shared_module', 'static_library']),
}
}
rewriter_func_kwargs = {
'dependency': {
'language': MTypeStr,
'method': MTypeStr,
'native': MTypeBool,
'not_found_message': MTypeStr,
'required': MTypeBool,
'static': MTypeBool,
'version': MTypeStrList,
'modules': MTypeStrList
},
'target': {
'build_by_default': MTypeBool,
'build_rpath': MTypeStr,
'dependencies': MTypeIDList,
'gui_app': MTypeBool,
'link_with': MTypeIDList,
'export_dynamic': MTypeBool,
'implib': MTypeBool,
'install': MTypeBool,
'install_dir': MTypeStr,
'install_rpath': MTypeStr,
'pie': MTypeBool
},
'project': {
'default_options': MTypeStrList,
'meson_version': MTypeStr,
'license': MTypeStrList,
'subproject_dir': MTypeStr,
'version': MTypeStr
}
}
class Rewriter:
def __init__(self, sourcedir: str, generator: str = 'ninja', skip_errors: bool = False):
self.sourcedir = sourcedir
self.interpreter = IntrospectionInterpreter(sourcedir, '', generator, visitors = [AstIDGenerator(), AstIndentationGenerator(), AstConditionLevel()])
self.skip_errors = skip_errors
self.modefied_nodes = []
self.to_remove_nodes = []
self.to_add_nodes = []
self.functions = {
'default_options': self.process_default_options,
'kwargs': self.process_kwargs,
'target': self.process_target,
}
self.info_dump = None
def analyze_meson(self):
mlog.log('Analyzing meson file:', mlog.bold(os.path.join(self.sourcedir, environment.build_filename)))
self.interpreter.analyze()
mlog.log(' -- Project:', mlog.bold(self.interpreter.project_data['descriptive_name']))
mlog.log(' -- Version:', mlog.cyan(self.interpreter.project_data['version']))
def add_info(self, cmd_type: str, cmd_id: str, data: dict):
if self.info_dump is None:
self.info_dump = {}
if cmd_type not in self.info_dump:
self.info_dump[cmd_type] = {}
self.info_dump[cmd_type][cmd_id] = data
def print_info(self):
if self.info_dump is None:
return
sys.stderr.write(json.dumps(self.info_dump, indent=2))
def on_error(self):
if self.skip_errors:
return mlog.cyan('-->'), mlog.yellow('skipping')
return mlog.cyan('-->'), mlog.red('aborting')
def handle_error(self):
if self.skip_errors:
return None
raise MesonException('Rewriting the meson.build failed')
def find_target(self, target: str):
def check_list(name: str) -> List[BaseNode]:
result = []
for i in self.interpreter.targets:
if name == i['name'] or name == i['id']:
result += [i]
return result
targets = check_list(target)
if targets:
if len(targets) == 1:
return targets[0]
else:
mlog.error('There are multiple targets matching', mlog.bold(target))
for i in targets:
mlog.error(' -- Target name', mlog.bold(i['name']), 'with ID', mlog.bold(i['id']))
mlog.error('Please try again with the unique ID of the target', *self.on_error())
self.handle_error()
return None
# Check the assignments
tgt = None
if target in self.interpreter.assignments:
node = self.interpreter.assignments[target][0]
if isinstance(node, FunctionNode):
if node.func_name in ['executable', 'jar', 'library', 'shared_library', 'shared_module', 'static_library', 'both_libraries']:
tgt = self.interpreter.assign_vals[target][0]
return tgt
def find_dependency(self, dependency: str):
def check_list(name: str):
for i in self.interpreter.dependencies:
if name == i['name']:
return i
return None
dep = check_list(dependency)
if dep is not None:
return dep
# Check the assignments
if dependency in self.interpreter.assignments:
node = self.interpreter.assignments[dependency][0]
if isinstance(node, FunctionNode):
if node.func_name in ['dependency']:
name = self.interpreter.flatten_args(node.args)[0]
dep = check_list(name)
return dep
@RequiredKeys(rewriter_keys['default_options'])
def process_default_options(self, cmd):
# First, remove the old values
kwargs_cmd = {
'function': 'project',
'id': "/",
'operation': 'remove_regex',
'kwargs': {
'default_options': ['{}=.*'.format(x) for x in cmd['options'].keys()]
}
}
self.process_kwargs(kwargs_cmd)
# Then add the new values
if cmd['operation'] != 'set':
return
kwargs_cmd['operation'] = 'add'
kwargs_cmd['kwargs']['default_options'] = []
cdata = self.interpreter.coredata
options = {
**cdata.builtins,
**cdata.builtins_per_machine.host,
**{'build.' + k: o for k, o in cdata.builtins_per_machine.build.items()},
**cdata.backend_options,
**cdata.base_options,
**cdata.compiler_options.host,
**{'build.' + k: o for k, o in cdata.compiler_options.build.items()},
**cdata.user_options,
}
for key, val in sorted(cmd['options'].items()):
if key not in options:
mlog.error('Unknown options', mlog.bold(key), *self.on_error())
self.handle_error()
continue
try:
val = options[key].validate_value(val)
except MesonException as e:
mlog.error('Unable to set', mlog.bold(key), mlog.red(str(e)), *self.on_error())
self.handle_error()
continue
kwargs_cmd['kwargs']['default_options'] += ['{}={}'.format(key, val)]
self.process_kwargs(kwargs_cmd)
@RequiredKeys(rewriter_keys['kwargs'])
def process_kwargs(self, cmd):
mlog.log('Processing function type', mlog.bold(cmd['function']), 'with id', mlog.cyan("'" + cmd['id'] + "'"))
if cmd['function'] not in rewriter_func_kwargs:
mlog.error('Unknown function type', cmd['function'], *self.on_error())
return self.handle_error()
kwargs_def = rewriter_func_kwargs[cmd['function']]
# Find the function node to modify
node = None
arg_node = None
if cmd['function'] == 'project':
if cmd['id'] != '/':
mlog.error('The ID for the function type project must be "/"', *self.on_error())
return self.handle_error()
node = self.interpreter.project_node
arg_node = node.args
elif cmd['function'] == 'target':
tmp = self.find_target(cmd['id'])
if tmp:
node = tmp['node']
arg_node = node.args
elif cmd['function'] == 'dependency':
tmp = self.find_dependency(cmd['id'])
if tmp:
node = tmp['node']
arg_node = node.args
if not node:
mlog.error('Unable to find the function node')
assert(isinstance(node, FunctionNode))
assert(isinstance(arg_node, ArgumentNode))
# Print kwargs info
if cmd['operation'] == 'info':
info_data = {}
for key, val in sorted(arg_node.kwargs.items()):
info_data[key] = None
if isinstance(val, ElementaryNode):
info_data[key] = val.value
elif isinstance(val, ArrayNode):
data_list = []
for i in val.args.arguments:
element = None
if isinstance(i, ElementaryNode):
element = i.value
data_list += [element]
info_data[key] = data_list
self.add_info('kwargs', '{}#{}'.format(cmd['function'], cmd['id']), info_data)
return # Nothing else to do
# Modify the kwargs
num_changed = 0
for key, val in sorted(cmd['kwargs'].items()):
if key not in kwargs_def:
mlog.error('Cannot modify unknown kwarg', mlog.bold(key), *self.on_error())
self.handle_error()
continue
# Remove the key from the kwargs
if cmd['operation'] == 'delete':
if key in arg_node.kwargs:
mlog.log(' -- Deleting', mlog.bold(key), 'from the kwargs')
del arg_node.kwargs[key]
num_changed += 1
else:
mlog.log(' -- Key', mlog.bold(key), 'is already deleted')
continue
if key not in arg_node.kwargs:
arg_node.kwargs[key] = None
modifyer = kwargs_def[key](arg_node.kwargs[key])
if not modifyer.can_modify():
mlog.log(' -- Skipping', mlog.bold(key), 'because it is to complex to modify')
# Apply the operation
val_str = str(val)
if cmd['operation'] == 'set':
mlog.log(' -- Setting', mlog.bold(key), 'to', mlog.yellow(val_str))
modifyer.set_value(val)
elif cmd['operation'] == 'add':
mlog.log(' -- Adding', mlog.yellow(val_str), 'to', mlog.bold(key))
modifyer.add_value(val)
elif cmd['operation'] == 'remove':
mlog.log(' -- Removing', mlog.yellow(val_str), 'from', mlog.bold(key))
modifyer.remove_value(val)
elif cmd['operation'] == 'remove_regex':
mlog.log(' -- Removing all values matching', mlog.yellow(val_str), 'from', mlog.bold(key))
modifyer.remove_regex(val)
# Write back the result
arg_node.kwargs[key] = modifyer.get_node()
num_changed += 1
if num_changed > 0 and node not in self.modefied_nodes:
self.modefied_nodes += [node]
def find_assignment_node(self, node: BaseNode) -> AssignmentNode:
if hasattr(node, 'ast_id') and node.ast_id in self.interpreter.reverse_assignment:
return self.interpreter.reverse_assignment[node.ast_id]
return None
@RequiredKeys(rewriter_keys['target'])
def process_target(self, cmd):
mlog.log('Processing target', mlog.bold(cmd['target']), 'operation', mlog.cyan(cmd['operation']))
target = self.find_target(cmd['target'])
if target is None and cmd['operation'] != 'target_add':
mlog.error('Unknown target', mlog.bold(cmd['target']), *self.on_error())
return self.handle_error()
# Make source paths relative to the current subdir
def rel_source(src: str) -> str:
subdir = os.path.abspath(os.path.join(self.sourcedir, target['subdir']))
if os.path.isabs(src):
return os.path.relpath(src, subdir)
elif not os.path.exists(src):
return src # Trust the user when the source doesn't exist
# Make sure that the path is relative to the subdir
return os.path.relpath(os.path.abspath(src), subdir)
if target is not None:
cmd['sources'] = [rel_source(x) for x in cmd['sources']]
# Utility function to get a list of the sources from a node
def arg_list_from_node(n):
args = []
if isinstance(n, FunctionNode):
args = list(n.args.arguments)
if n.func_name in build_target_functions:
args.pop(0)
elif isinstance(n, ArrayNode):
args = n.args.arguments
elif isinstance(n, ArgumentNode):
args = n.arguments
return args
to_sort_nodes = []
if cmd['operation'] == 'src_add':
node = None
if target['sources']:
node = target['sources'][0]
else:
node = target['node']
assert(node is not None)
# Generate the current source list
src_list = []
for i in target['sources']:
for j in arg_list_from_node(i):
if isinstance(j, StringNode):
src_list += [j.value]
# Generate the new String nodes
to_append = []
for i in sorted(set(cmd['sources'])):
if i in src_list:
mlog.log(' -- Source', mlog.green(i), 'is already defined for the target --> skipping')
continue
mlog.log(' -- Adding source', mlog.green(i), 'at',
mlog.yellow('{}:{}'.format(os.path.join(node.subdir, environment.build_filename), node.lineno)))
token = Token('string', node.subdir, 0, 0, 0, None, i)
to_append += [StringNode(token)]
# Append to the AST at the right place
arg_node = None
if isinstance(node, (FunctionNode, ArrayNode)):
arg_node = node.args
elif isinstance(node, ArgumentNode):
arg_node = node
assert(arg_node is not None)
arg_node.arguments += to_append
# Mark the node as modified
if arg_node not in to_sort_nodes and not isinstance(node, FunctionNode):
to_sort_nodes += [arg_node]
if node not in self.modefied_nodes:
self.modefied_nodes += [node]
elif cmd['operation'] == 'src_rm':
# Helper to find the exact string node and its parent
def find_node(src):
for i in target['sources']:
for j in arg_list_from_node(i):
if isinstance(j, StringNode):
if j.value == src:
return i, j
return None, None
for i in cmd['sources']:
# Try to find the node with the source string
root, string_node = find_node(i)
if root is None:
mlog.warning(' -- Unable to find source', mlog.green(i), 'in the target')
continue
# Remove the found string node from the argument list
arg_node = None
if isinstance(root, (FunctionNode, ArrayNode)):
arg_node = root.args
elif isinstance(root, ArgumentNode):
arg_node = root
assert(arg_node is not None)
mlog.log(' -- Removing source', mlog.green(i), 'from',
mlog.yellow('{}:{}'.format(os.path.join(string_node.subdir, environment.build_filename), string_node.lineno)))
arg_node.arguments.remove(string_node)
# Mark the node as modified
if arg_node not in to_sort_nodes and not isinstance(root, FunctionNode):
to_sort_nodes += [arg_node]
if root not in self.modefied_nodes:
self.modefied_nodes += [root]
elif cmd['operation'] == 'target_add':
if target is not None:
mlog.error('Can not add target', mlog.bold(cmd['target']), 'because it already exists', *self.on_error())
return self.handle_error()
id_base = re.sub(r'[- ]', '_', cmd['target'])
target_id = id_base + '_exe' if cmd['target_type'] == 'executable' else '_lib'
source_id = id_base + '_sources'
# Build src list
src_arg_node = ArgumentNode(Token('string', cmd['subdir'], 0, 0, 0, None, ''))
src_arr_node = ArrayNode(src_arg_node, 0, 0, 0, 0)
src_far_node = ArgumentNode(Token('string', cmd['subdir'], 0, 0, 0, None, ''))
src_fun_node = FunctionNode(cmd['subdir'], 0, 0, 0, 0, 'files', src_far_node)
src_ass_node = AssignmentNode(cmd['subdir'], 0, 0, source_id, src_fun_node)
src_arg_node.arguments = [StringNode(Token('string', cmd['subdir'], 0, 0, 0, None, x)) for x in cmd['sources']]
src_far_node.arguments = [src_arr_node]
# Build target
tgt_arg_node = ArgumentNode(Token('string', cmd['subdir'], 0, 0, 0, None, ''))
tgt_fun_node = FunctionNode(cmd['subdir'], 0, 0, 0, 0, cmd['target_type'], tgt_arg_node)
tgt_ass_node = AssignmentNode(cmd['subdir'], 0, 0, target_id, tgt_fun_node)
tgt_arg_node.arguments = [
StringNode(Token('string', cmd['subdir'], 0, 0, 0, None, cmd['target'])),
IdNode(Token('string', cmd['subdir'], 0, 0, 0, None, source_id))
]
src_ass_node.accept(AstIndentationGenerator())
tgt_ass_node.accept(AstIndentationGenerator())
self.to_add_nodes += [src_ass_node, tgt_ass_node]
elif cmd['operation'] == 'target_rm':
to_remove = self.find_assignment_node(target['node'])
if to_remove is None:
to_remove = target['node']
self.to_remove_nodes += [to_remove]
mlog.log(' -- Removing target', mlog.green(cmd['target']), 'at',
mlog.yellow('{}:{}'.format(os.path.join(to_remove.subdir, environment.build_filename), to_remove.lineno)))
elif cmd['operation'] == 'info':
# List all sources in the target
src_list = []
for i in target['sources']:
for j in arg_list_from_node(i):
if isinstance(j, StringNode):
src_list += [j.value]
test_data = {
'name': target['name'],
'sources': src_list
}
self.add_info('target', target['id'], test_data)
# Sort files
for i in to_sort_nodes:
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
path_sorter = lambda key: ([(key.count('/') <= idx, alphanum_key(x)) for idx, x in enumerate(key.split('/'))])
unknown = [x for x in i.arguments if not isinstance(x, StringNode)]
sources = [x for x in i.arguments if isinstance(x, StringNode)]
sources = sorted(sources, key=lambda x: path_sorter(x.value))
i.arguments = unknown + sources
def process(self, cmd):
if 'type' not in cmd:
raise RewriterException('Command has no key "type"')
if cmd['type'] not in self.functions:
raise RewriterException('Unknown command "{}". Supported commands are: {}'
.format(cmd['type'], list(self.functions.keys())))
self.functions[cmd['type']](cmd)
def apply_changes(self):
assert(all(hasattr(x, 'lineno') and hasattr(x, 'colno') and hasattr(x, 'subdir') for x in self.modefied_nodes))
assert(all(hasattr(x, 'lineno') and hasattr(x, 'colno') and hasattr(x, 'subdir') for x in self.to_remove_nodes))
assert(all(isinstance(x, (ArrayNode, FunctionNode)) for x in self.modefied_nodes))
assert(all(isinstance(x, (ArrayNode, AssignmentNode, FunctionNode)) for x in self.to_remove_nodes))
# Sort based on line and column in reversed order
work_nodes = [{'node': x, 'action': 'modify'} for x in self.modefied_nodes]
work_nodes += [{'node': x, 'action': 'rm'} for x in self.to_remove_nodes]
work_nodes = list(sorted(work_nodes, key=lambda x: (x['node'].lineno, x['node'].colno), reverse=True))
work_nodes += [{'node': x, 'action': 'add'} for x in self.to_add_nodes]
# Generating the new replacement string
str_list = []
for i in work_nodes:
new_data = ''
if i['action'] == 'modify' or i['action'] == 'add':
printer = AstPrinter()
i['node'].accept(printer)
printer.post_process()
new_data = printer.result.strip()
data = {
'file': os.path.join(i['node'].subdir, environment.build_filename),
'str': new_data,
'node': i['node'],
'action': i['action']
}
str_list += [data]
# Load build files
files = {}
for i in str_list:
if i['file'] in files:
continue
fpath = os.path.realpath(os.path.join(self.sourcedir, i['file']))
fdata = ''
# Create an empty file if it does not exist
if not os.path.exists(fpath):
with open(fpath, 'w'):
pass
with open(fpath, 'r') as fp:
fdata = fp.read()
# Generate line offsets numbers
m_lines = fdata.splitlines(True)
offset = 0
line_offsets = []
for j in m_lines:
line_offsets += [offset]
offset += len(j)
files[i['file']] = {
'path': fpath,
'raw': fdata,
'offsets': line_offsets
}
# Replace in source code
def remove_node(i):
offsets = files[i['file']]['offsets']
raw = files[i['file']]['raw']
node = i['node']
line = node.lineno - 1
col = node.colno
start = offsets[line] + col
end = start
if isinstance(node, (ArrayNode, FunctionNode)):
end = offsets[node.end_lineno - 1] + node.end_colno
# Only removal is supported for assignments
elif isinstance(node, AssignmentNode) and i['action'] == 'rm':
if isinstance(node.value, (ArrayNode, FunctionNode)):
remove_node({'file': i['file'], 'str': '', 'node': node.value, 'action': 'rm'})
raw = files[i['file']]['raw']
while raw[end] != '=':
end += 1
end += 1 # Handle the '='
while raw[end] in [' ', '\n', '\t']:
end += 1
files[i['file']]['raw'] = raw[:start] + i['str'] + raw[end:]
for i in str_list:
if i['action'] in ['modify', 'rm']:
remove_node(i)
elif i['action'] in ['add']:
files[i['file']]['raw'] += i['str'] + '\n'
# Write the files back
for key, val in files.items():
mlog.log('Rewriting', mlog.yellow(key))
with open(val['path'], 'w') as fp:
fp.write(val['raw'])
target_operation_map = {
'add': 'src_add',
'rm': 'src_rm',
'add_target': 'target_add',
'rm_target': 'target_rm',
'info': 'info',
}
def list_to_dict(in_list: List[str]) -> Dict[str, str]:
result = {}
it = iter(in_list)
try:
for i in it:
# calling next(it) is not a mistake, we're taking the next element from
# the iterator, avoiding the need to preprocess it into a sequence of
# key value pairs.
result[i] = next(it)
except StopIteration:
raise TypeError('in_list parameter of list_to_dict must have an even length.')
return result
def generate_target(options) -> List[dict]:
return [{
'type': 'target',
'target': options.target,
'operation': target_operation_map[options.operation],
'sources': options.sources,
'subdir': options.subdir,
'target_type': options.tgt_type,
}]
def generate_kwargs(options) -> List[dict]:
return [{
'type': 'kwargs',
'function': options.function,
'id': options.id,
'operation': options.operation,
'kwargs': list_to_dict(options.kwargs),
}]
def generate_def_opts(options) -> List[dict]:
return [{
'type': 'default_options',
'operation': options.operation,
'options': list_to_dict(options.options),
}]
def genreate_cmd(options) -> List[dict]:
if os.path.exists(options.json):
with open(options.json, 'r') as fp:
return json.load(fp)
else:
return json.loads(options.json)
# Map options.type to the actual type name
cli_type_map = {
'target': generate_target,
'tgt': generate_target,
'kwargs': generate_kwargs,
'default-options': generate_def_opts,
'def': generate_def_opts,
'command': genreate_cmd,
'cmd': genreate_cmd,
}
def run(options):
if not options.verbose:
mlog.set_quiet()
try:
rewriter = Rewriter(options.sourcedir, skip_errors=options.skip)
rewriter.analyze_meson()
if options.type is None:
mlog.error('No command specified')
return 1
commands = cli_type_map[options.type](options)
if not isinstance(commands, list):
raise TypeError('Command is not a list')
for i in commands:
if not isinstance(i, object):
raise TypeError('Command is not an object')
rewriter.process(i)
rewriter.apply_changes()
rewriter.print_info()
return 0
except Exception as e:
raise e
finally:
mlog.set_verbose()
| 39.390674
| 156
| 0.567242
|
3ed53783999823af30b91fa38dd3764fa219c256
| 3,088
|
py
|
Python
|
core/settings/common.py
|
themightychris/prevention-point
|
a92f98b25d32dd30bb33e7cb1ac7f10439f5203f
|
[
"MIT"
] | 1
|
2020-07-18T02:14:51.000Z
|
2020-07-18T02:14:51.000Z
|
core/settings/common.py
|
themightychris/prevention-point
|
a92f98b25d32dd30bb33e7cb1ac7f10439f5203f
|
[
"MIT"
] | null | null | null |
core/settings/common.py
|
themightychris/prevention-point
|
a92f98b25d32dd30bb33e7cb1ac7f10439f5203f
|
[
"MIT"
] | 1
|
2020-02-22T18:21:04.000Z
|
2020-02-22T18:21:04.000Z
|
"""
Django settings for preventionpoint project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gz!+^ra#d*j&rpq@^udd89hwzq7%z%%!c=pgl)wac!+rcp8+d-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
}
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'core',
'rest_framework_swagger',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 26.169492
| 91
| 0.701425
|
1567a8bc81286172983347092f627c5cf7988cbc
| 3,629
|
py
|
Python
|
python_experiments/paper_figures/vldbj/draw_index_update_time.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 8
|
2020-04-14T23:17:00.000Z
|
2021-06-21T12:34:04.000Z
|
python_experiments/paper_figures/vldbj/draw_index_update_time.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | null | null | null |
python_experiments/paper_figures/vldbj/draw_index_update_time.py
|
RapidsAtHKUST/SimRank
|
3a601b08f9a3c281e2b36b914e06aba3a3a36118
|
[
"MIT"
] | 1
|
2021-01-17T16:26:50.000Z
|
2021-01-17T16:26:50.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from data_analysis.vldbj_data_parsing.generate_index_markdown import *
def get_name_dict():
with open('data_names.json') as ifs:
return eval(''.join(ifs.readlines()))
# data set abbreviation dictionary
data_names = get_name_dict()
# figure parameters
FIG_SIZE_MULTIPLE = (32, 6)
LABEL_SIZE = 22
TICK_SIZE = 22
LEGEND_SIZE = 22
def get_algorithm_indexing_time_lst(tag):
if tag in [tsf_tag]:
return [4.36215e-06, 3.85213e-06, 4.10121e-06, 4.10403e-06, 4.64019e-06, 1.18873e-05,
# 2.71164e-05,
1.72031e-05, 1.69887e-05, 2.52288e-05, 1.39529e-05, 1.50784e-05, 5.99948e-05,
1.0361e-05, 1.42169e-05]
elif tag in [reads_d_tag]:
return [0.00522976, 0.00612305, 0.00374263, 0.00103439, 0.00722572, 0.00366369,
# 0.00847435,
0.00673668, 0.020272, 0.0117233, 0.00976721, 0.0828088, 0,
0.00699347, 0.0430287]
elif tag in [reads_rq_tag]:
return [0.000235797, 0.000275629, 0.00020318, 7.15575e-05, 0.000356041, 0.000165915,
# 0.000449374,
0.000430675, 0.00149114, 0.000618879, 0.000762481, 0.00199481, 0.0045728,
0.000455444, 0.00191903]
else:
return [0.00130415, 0.00241406, 0.00237629, 0.00163426, 0.0139743, 0.0493718,
# 0.124753,
0.102021, 0.271308, 0.268973, 1.25391, 2.47118, 9.09415,
0.56654, 8.8932]
def draw_index_update_time():
data_set_lst = [
'ca-GrQc', 'ca-HepTh', 'p2p-Gnutella06', 'wiki-Vote',
'email-Enron', 'email-EuAll',
'web-Stanford', 'web-BerkStan', 'web-Google',
'cit-Patents', 'soc-LiveJournal1',
'wiki-Link',
'digg-friends',
'flickr-growth', ]
algorithm_lst = [tsf_tag, reads_d_tag, reads_rq_tag, local_push_tag]
time_dict = dict()
for algo in algorithm_lst:
time_dict[algo] = dict(zip(data_set_lst, get_algorithm_indexing_time_lst(algo)))
with open('data_set_lst.json') as ifs:
data_set_lst = json.load(ifs)
g_names = list(map(lambda data: data_names[data], data_set_lst))
size_of_fig = (FIG_SIZE_MULTIPLE[0], FIG_SIZE_MULTIPLE[1])
fig, ax = plt.subplots()
N = len(g_names)
# indent lst
width = 0.2
ind = 1.2 * np.arange(N) # the x locations for the groups
indent_lst = list(map(lambda idx: ind + idx * width, range(5)))
# other lst
hatch_lst = ["//", '', 'O', '--', ]
algorithm_tag_lst = [local_push_tag, reads_d_tag, reads_rq_tag, tsf_tag]
label_lst = ["FLP", "READS_D", "READS-Rq", "TSF"]
color_lst = ['blue', '#fe01b1', '#ceb301', 'brown']
# 1st: bars
for idx, tag in enumerate(algorithm_tag_lst):
ax.bar(indent_lst[idx], [time_dict[tag][data] for data in data_set_lst], width, hatch=hatch_lst[idx],
label=label_lst[idx], edgecolor=color_lst[idx], fill=False)
# 2nd: x and y's ticks and labels
ax.set_xticks(ind + 1.5 * width)
ax.set_xticklabels(g_names, fontsize=LABEL_SIZE)
plt.xticks(fontsize=TICK_SIZE)
plt.yscale('log')
ax.set_ylabel("Avg Update Time (s)", fontsize=LABEL_SIZE)
plt.yticks(fontsize=TICK_SIZE)
plt.ylim(10 ** -6, 13)
# 3rd: figure properties
fig.set_size_inches(*size_of_fig) # set ratio
plt.legend(prop={'size': LEGEND_SIZE, "weight": "bold"}, loc="upper left", ncol=5)
fig.savefig("./figures/" + 'index_update_time.pdf', bbox_inches='tight', dpi=300)
if __name__ == '__main__':
os.system('mkdir -p figures')
draw_index_update_time()
| 34.561905
| 109
| 0.628823
|
2642b0f1c4d7d81e2520c391ca6ecca925d68467
| 7,508
|
py
|
Python
|
src/compas_blender/conversions/mesh.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
src/compas_blender/conversions/mesh.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
src/compas_blender/conversions/mesh.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
import bpy
import bmesh
from compas.datastructures import Mesh
from compas.geometry import Point
from ._geometry import BlenderGeometry
class BlenderMesh(BlenderGeometry):
"""Wrapper for Blender meshes.
Examples
--------
.. code-block:: python
pass
"""
@property
def object(self):
""":blender:`bpy.types.Object` - The Blender scene object."""
return self._object
@object.setter
def object(self, obj):
mesh = bpy.data.meshes.new_from_object(obj)
self._object = obj
self._geometry = mesh
@property
def geometry(self):
""":blender:`bpy.types.Mesh` - The mesh data block."""
return self._geometry
@geometry.setter
def geometry(self, data):
self._object = None
self._geometry = data
@property
def bmesh(self):
""":blender:`bpy.types.BMesh` - The mesh data structure."""
return bmesh.from_edit_mesh(self.mesh)
@property
def location(self):
""":class:`compas.geometry.Point` - The location of the object in the scene."""
return Point(self.geometry.location)
@property
def vertices(self):
"""List[:class:`compas.geometry.Point`] - The mesh vertex locations."""
point = self.location
return [point + list(vertex.co) for vertex in self.geometry.vertices]
@property
def faces(self):
"""List[List[:obj:`int`]] - The mesh face vertices."""
return [list(face.vertices) for face in self.geometry.polygons]
@classmethod
def from_bmesh(cls, bm, name=None, free=True):
"""Construct a Blender mesh wrappper from a BMesh.
Parameters
----------
bm : :blender:`bpy.types.BMesh`
The Blender mesh data structure.
name : :obj:`str`, optional
The name of the data block.
free : :obj:`bool`, optional
Free the data structure once the data block is created.
Returns
-------
:class:`compas_blender.conversions.BlenderMesh`
"""
data = bpy.data.meshes.new(name or 'Mesh')
bm.to_mesh(data)
if free:
bm.free()
mesh = cls()
mesh.geometry = data
return mesh
@classmethod
def from_monkey(cls, name=None):
"""Construct a Blender mesh wrappper from the Blender monkey.
Parameters
----------
name : :obj:`str`, optional
The name of the data block.
Returns
-------
:class:`compas_blender.conversions.BlenderMesh`
"""
bm = bmesh.new()
bmesh.ops.create_monkey(bm)
data = bpy.data.meshes.new(name or 'Mesh')
bm.to_mesh(data)
bm.free()
mesh = cls()
mesh.geometry = data
return mesh
def to_compas(self, cls=None):
"""Convert the Blender mesh to a COMPAS mesh.
Parameters
----------
cls : :class:`compas.datastructures.Mesh`, optional
The type of COMPAS mesh.
Returns
-------
:class:`compas.datastructure.Mesh`
"""
cls = cls or Mesh
return cls.from_vertices_and_faces(self.vertices, self.faces)
# def get_vertex_coordinates(self, vertex):
# return add_vectors(self.location, self.geometry.vertices[vertex].co)
# def get_vertices_coordinates(self):
# xyzs = [vertex.co for vertex in self.geometry.vertices]
# return {vertex: add_vectors(self.location, xyz) for vertex, xyz in enumerate(xyzs)}
# def set_vertices_coordinates(self, xyzs):
# for vertex, xyz in xyzs.items():
# self.geometry.vertices[vertex].co = subtract_vectors(xyz, self.location)
# def get_vertices_colors(self, vertices=None):
# colors = {}
# col = self.geometry.vertex_colors.active
# if col:
# if not vertices:
# vertices = range(len(self.geometry.vertices))
# for face in self.geometry.polygons:
# for i in face.loop_indices:
# j = self.geometry.loops[i].vertex_index
# if (j in vertices) and (not colors.get(j, None)):
# colors[j] = list(col.data[i].color)[:3]
# return colors
# def set_vertices_colors(self, colors):
# if self.geometry.vertex_colors:
# col = self.geometry.vertex_colors.active
# else:
# col = self.geometry.vertex_colors.new()
# for face in self.geometry.polygons:
# for i in face.loop_indices:
# j = self.geometry.loops[i].vertex_index
# if j in colors:
# col.data[i].color = list(colors[j]) + [1]
# def unset_vertices_colors(self):
# vertex_colors = self.geometry.vertex_colors
# while vertex_colors:
# vertex_colors.remove(vertex_colors[0])
# def get_edge_vertex_indices(self, edge):
# return list(self.geometry.edges[edge].vertices)
# def get_edges_vertex_indices(self, edges=None):
# if not edges:
# edges = range(len(self.geometry.edges))
# return {edge: self.get_edge_vertex_indices(edge=edge) for edge in edges}
# def edge_length(self, edge):
# u, v = self.geometry.edges[edge].vertices
# sp, ep = [list(self.geometry.vertices[i].co) for i in [u, v]]
# return distance_point_point(sp, ep)
# def edges_lengths(self, edges=None):
# if not edges:
# edges = range(len(self.geometry.edges))
# return {edge: self.edge_length(edge=edge) for edge in edges}
# def get_face_vertex_indices(self, face):
# return list(self.geometry.polygons[face].vertices)
# def get_faces_vertex_indices(self, faces=None):
# if not faces:
# faces = range(len(self.geometry.polygons))
# return {face: self.get_face_vertex_indices(face=face) for face in faces}
# def face_normal(self, face):
# return list(self.geometry.polygons[face].normal)
# def faces_normals(self, faces=None):
# if not faces:
# faces = range(len(self.geometry.polygons))
# return {face: self.face_normal(face=face) for face in faces}
# def face_area(self, face):
# return self.geometry.polygons[face].area
# def faces_areas(self, faces=None):
# if not faces:
# faces = range(len(self.geometry.polygons))
# return {face: self.face_area(face=face) for face in faces}
# def bevel(self, width=0.2, segments=1, only_vertices=False):
# self.object.modifiers.new('bevel', type='BEVEL')
# self.object.modifiers['bevel'].width = width
# self.object.modifiers['bevel'].segments = segments
# self.object.modifiers['bevel'].use_only_vertices = only_vertices
# self.refresh()
# def subdivide(self, levels=1, type='SIMPLE'):
# self.object.modifiers.new('subdivision', type='SUBSURF')
# self.object.modifiers['subdivision'].levels = levels
# self.object.modifiers['subdivision'].subdivision_type = type # or 'CATMULL_CLARK'
# self.refresh()
# def triangulate(self):
# self.object.modifiers.new('triangulate', type='TRIANGULATE')
# self.refresh()
# def get_vertices_and_faces(self):
# vertices = self.get_vertices_coordinates()
# faces = self.get_faces_vertex_indices()
# return vertices, faces
| 33.07489
| 93
| 0.598428
|
52255ac7b03099ea5f57578d88a1865bc054d6ba
| 2,255
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_axis16.py
|
totdiao/XlsxWriter
|
3d65858d8933bddb8262d500bcc2005f28fde645
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_axis16.py
|
totdiao/XlsxWriter
|
3d65858d8933bddb8262d500bcc2005f28fde645
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_axis16.py
|
totdiao/XlsxWriter
|
3d65858d8933bddb8262d500bcc2005f28fde645
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_axis16.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'stock'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [43572608, 43812736]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column('A:D', 11)
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$D$1:$D$5',
})
chart.set_x_axis({
'minor_unit': 14,
'major_unit': 1,
'major_unit_type': 'months'
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 27.5
| 79
| 0.531707
|
ddf2cbe644bbef8c11949874032bd8bbb57979b7
| 7,592
|
py
|
Python
|
symphony/bdk/gen/pod_model/stream_list.py
|
symphony-elias/symphony-bdk-python
|
0d1cd94a9982e3687ea52c49acdb5f942ecd9bec
|
[
"Apache-2.0"
] | 17
|
2018-09-06T09:58:18.000Z
|
2021-07-13T12:54:20.000Z
|
symphony/bdk/gen/pod_model/stream_list.py
|
symphony-elias/symphony-bdk-python
|
0d1cd94a9982e3687ea52c49acdb5f942ecd9bec
|
[
"Apache-2.0"
] | 59
|
2018-11-21T15:17:57.000Z
|
2021-08-03T10:00:43.000Z
|
symphony/bdk/gen/pod_model/stream_list.py
|
symphony-elias/symphony-bdk-python
|
0d1cd94a9982e3687ea52c49acdb5f942ecd9bec
|
[
"Apache-2.0"
] | 37
|
2018-09-01T03:07:48.000Z
|
2021-07-06T10:21:50.000Z
|
"""
Pod API
This document refers to Symphony API calls that do not need encryption or decryption of content. - sessionToken can be obtained by calling the authenticationAPI on the symphony back end and the key manager respectively. Refer to the methods described in authenticatorAPI.yaml. - Actions are defined to be atomic, ie will succeed in their entirety or fail and have changed nothing. - If it returns a 40X status then it will have made no change to the system even if ome subset of the request would have succeeded. - If this contract cannot be met for any reason then this is an error and the response code will be 50X. # noqa: E501
The version of the OpenAPI document: 20.13.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from typing import List
from symphony.bdk.gen.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from symphony.bdk.gen.pod_model.stream_attributes import StreamAttributes
globals()['StreamAttributes'] = StreamAttributes
class StreamList(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a pod_model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': ([StreamAttributes],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""StreamList - a pod_model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([StreamAttributes]): A list of streams of which the requesting user is a member.. # noqa: E501
Keyword Args:
value ([StreamAttributes]): A list of streams of which the requesting user is a member.. # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the pod_model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value: List[StreamAttributes] = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
| 41.26087
| 637
| 0.597866
|
0ded02dbd45a8b53d8326ab6a12b226ec75dd7ee
| 8,780
|
py
|
Python
|
DynatraceSyntheticConverter/commands/generate.py
|
Appdynamics/DynatraceSyntheticConverter
|
6a43357640a140ac6d0625731175cf3f3a557c0f
|
[
"Apache-2.0"
] | 2
|
2021-09-06T07:08:02.000Z
|
2022-01-20T20:37:42.000Z
|
DynatraceSyntheticConverter/commands/generate.py
|
Appdynamics/DynatraceSyntheticConverter
|
6a43357640a140ac6d0625731175cf3f3a557c0f
|
[
"Apache-2.0"
] | null | null | null |
DynatraceSyntheticConverter/commands/generate.py
|
Appdynamics/DynatraceSyntheticConverter
|
6a43357640a140ac6d0625731175cf3f3a557c0f
|
[
"Apache-2.0"
] | 3
|
2021-02-09T19:32:43.000Z
|
2021-04-19T13:42:36.000Z
|
import os
import re
from click import command
import glob
import json
from pathlib import Path
import logging
CONVERSION_SNIPPET_BASE = 'DynatraceSyntheticConverter/resources/conversionSnippets/'
@command(
name='generate',
help='''
Generate python scripts from Dynatrace synthetic monitor JSON.
Generated scripts are placed in the output directory and will overwrite existing scripts of the same name.
''')
def generate():
logging.info(f'-----Launching generate step-----')
if not os.path.exists('output'):
os.makedirs('output')
for file in glob.iglob('input/*.json'):
filename = Path(file).stem
schema = json.loads(open(file).read())
# if file is the complete synthetic config taken from API, not just downloaded script
if 'script' in schema:
schema = schema['script']
if 'configuration' not in schema or 'events' not in schema:
continue
if schema['type'] == 'clickpath' or schema['type'] == 'availability':
logging.info(f'Converting {filename}')
events = schema['events']
code = open(CONVERSION_SNIPPET_BASE + 'base.txt').read()
eventsCode = ''
hasUnsupportedElements = False
for event in events:
if event['type'] == 'navigate':
eventsCode += __genNavigateCode(event)
elif event['type'] == 'keystrokes':
eventsCode += __genKeystrokesCode(event)
elif event['type'] == 'click':
eventsCode += __genClickCode(event)
elif event['type'] == 'javascript':
eventsCode += __genJsCode(event)
elif event['type'] == 'selectOption':
eventsCode += __genSelectOptionCode(event)
else:
hasUnsupportedElements = True
logging.debug(f'{event["type"]} is not yet supported')
if 'validate' in event:
eventsCode += __genTextMatchCode(event)
if hasUnsupportedElements:
logging.info(f'{filename} not fully converted, contains unsupported elements')
# trim trailing newline when we append our events code
code = code.replace('# $EVENT_STEPS', eventsCode[:-1])
with open(f'output/{Path(file).stem}.py', 'w') as outfile:
logging.debug(f'Saving {filename}')
outfile.write(code)
else:
logging.error(f'Schema type {schema["type"]} for {filename} is not supported. Skipping...')
def __genNavigateCode(event) -> str:
url = event['url']
description = event['description']
code = open(CONVERSION_SNIPPET_BASE + 'actions/navigate.txt').read() \
.replace('$URL', url) \
.replace('$DESCRIPTION', description)
return code
def __genKeystrokesCode(event):
# next event target locator where the type is css and value contains a #, else grab the first one
locators = event['target']['locators']
selector = __selectorFromLocators(locators)
keys = event['textValue']
description = event['description']
code = open(CONVERSION_SNIPPET_BASE + 'actions/keystrokes.txt').read() \
.replace('$SELECTOR_TYPE', selector['selectorType']) \
.replace('$SELECTOR_STRING', selector['selectorString']) \
.replace('$KEYS', keys) \
.replace('$DESCRIPTION', description)
return code
def __genClickCode(event):
locators = event['target']['locators']
selector = __selectorFromLocators(locators)
description = event['description']
code = open(CONVERSION_SNIPPET_BASE + 'actions/click.txt').read() \
.replace('$SELECTOR_TYPE', selector['selectorType']) \
.replace('$SELECTOR_STRING', selector['selectorString']) \
.replace('$DESCRIPTION', description)
return code
def __genSelectOptionCode(event):
locators = event['target']['locators']
selector = __selectorFromLocators(locators)
description = event['description']
selections = event['selections'][0]['index'] # TODO: this'll not work for multi selects
code = open(CONVERSION_SNIPPET_BASE + 'actions/selectOption.txt').read() \
.replace('$SELECTOR_TYPE', selector['selectorType']) \
.replace('$SELECTOR_STRING', selector['selectorString']) \
.replace('$DESCRIPTION', description) \
.replace('$INDEX', str(selections))
return code
def __genTextMatchCode(event):
code = ""
for validator in event['validate']:
if validator['type'] == 'content_match' or validator['type'] == 'text_match':
if validator['failIfFound']:
code += open(CONVERSION_SNIPPET_BASE + 'validators/textMatchFailIfFound.txt').read()
else:
code += open(CONVERSION_SNIPPET_BASE + 'validators/textMatchFailIfNotFound.txt').read()
code = code.replace('$TEXT', validator['match'])
if validator['type'] == 'element_match':
locators = validator['target']['locators']
selector = __selectorFromLocators(locators)
if validator['failIfFound']:
code += open(
CONVERSION_SNIPPET_BASE + 'validators/elementMatchFailIfFound.txt').read()
else:
code += open(CONVERSION_SNIPPET_BASE + 'validators/elementMatchFailIfNotFound.txt').read()
code = code.replace('$SELECTOR_TYPE', selector['selectorType']) \
.replace('$SELECTOR_STRING', selector['selectorString'])
return code
def __genJsCode(event):
description = event['description']
code = open(CONVERSION_SNIPPET_BASE + 'actions/jsCode.txt').read() \
.replace('$DESCRIPTION', description) \
.replace('$JS_CODE', event['javaScript'].replace('\n', '\t\t'))
return code
def __selectorFromLocators(locators):
cssIdLocator = \
next((locator for locator in locators if locator['type'] == 'css' and 'contains' not in locator['value']), None)
if cssIdLocator is not None:
cssID = cssIdLocator['value'].replace("\"", "\\\"").replace('\'', '')
return {
'selectorType': 'By.CSS_SELECTOR',
'selectorString': cssID
}
cssContainsLocator = \
next((locator for locator in locators if locator['type'] == 'css' and 'contains' in locator['value']), None)
if cssContainsLocator is not None:
val = cssContainsLocator['value'].replace('\'', '')
content = re.search(r'\((.*)\)', val).group(1).replace("\"", "\\\"")
tag = val.split(':')[0]
return {
'selectorType': 'By.XPATH',
'selectorString': f"//{tag}[contains(text(), {content})]"
}
if next((locator for locator in locators if locator['type'] == 'dom'), None) is not None:
if (locator := next((locator for locator in locators if 'getElementsByName' in locator['value']), None)) is not None:
val = locator['value'].replace('\'', '')
content = re.search(r'\((.*)\)', val).group(1)
return {
'selectorType': 'By.NAME',
'selectorString': content
}
if (locator := next((locator for locator in locators if 'querySelector' in locator['value']), None)) is not None:
val = locator['value'].replace('\'', '')
content = re.search(r'\((.*)\)', val).group(1)
return {
'selectorType': 'By.CSS_SELECTOR',
'selectorString': content
}
if (locator := next((locator for locator in locators if 'getElementsByClassName' in locator['value']), None)) is not None:
val = locator['value'].replace('\'', '')
content = re.search(r'\((.*)\)', val).group(1)
return {
'selectorType': 'By.CLASS_NAME',
'selectorString': content
}
if (locator := next((locator for locator in locators if 'getElementsByName' in locator['value']), None)) is not None:
val = locator['value'].replace('\'', '')
content = re.search(r'\((.*)\)', val).group(1)
return {
'selectorType': 'By.NAME',
'selectorString': content
}
if (locator := next((locator for locator in locators if 'getElementById' in locator['value']), None)) is not None:
val = locator['value'].replace('\'', '')
content = re.search(r'\((.*)\)', val).group(1)
return {
'selectorType': 'By.ID',
'selectorString': content
}
return 'None # TODO: locator found is ' + locators[0]["value"].replace("\"", "\\\"")
| 40.460829
| 130
| 0.586902
|
7de39c671abe872e0ac1d88cfa9a40523de61ce8
| 322
|
py
|
Python
|
vec_env/vec_remove_dict_obs.py
|
leesharkey/train-procgen-pfrl
|
16d39132d284a3f826633945694be83dd659d471
|
[
"Apache-2.0"
] | 20
|
2020-10-27T09:24:20.000Z
|
2021-07-20T12:22:42.000Z
|
vec_env/vec_remove_dict_obs.py
|
leesharkey/train-procgen-pfrl
|
16d39132d284a3f826633945694be83dd659d471
|
[
"Apache-2.0"
] | 1
|
2021-04-04T23:59:39.000Z
|
2021-04-08T18:40:30.000Z
|
vec_env/vec_remove_dict_obs.py
|
leesharkey/train-procgen-pfrl
|
16d39132d284a3f826633945694be83dd659d471
|
[
"Apache-2.0"
] | 1
|
2021-03-07T19:01:04.000Z
|
2021-03-07T19:01:04.000Z
|
from .vec_env import VecEnvObservationWrapper
class VecExtractDictObs(VecEnvObservationWrapper):
def __init__(self, venv, key):
self.key = key
super().__init__(venv=venv,
observation_space=venv.observation_space.spaces[self.key])
def process(self, obs):
return obs[self.key]
| 26.833333
| 70
| 0.695652
|
b1cc72e889686b228106bab5037c7d98a0ca7e2b
| 775
|
py
|
Python
|
Version0.2/Python/Gencode/runcode.py
|
glwagner/Exasim
|
ee4540443435f958fa2ca78d59cbf9cff0fe69de
|
[
"MIT"
] | 37
|
2020-12-09T20:24:36.000Z
|
2022-02-18T17:19:23.000Z
|
Version0.2/Python/Gencode/runcode.py
|
glwagner/Exasim
|
ee4540443435f958fa2ca78d59cbf9cff0fe69de
|
[
"MIT"
] | 25
|
2020-11-25T20:37:33.000Z
|
2022-02-25T15:53:11.000Z
|
Version0.2/Python/Gencode/runcode.py
|
glwagner/Exasim
|
ee4540443435f958fa2ca78d59cbf9cff0fe69de
|
[
"MIT"
] | 8
|
2020-11-30T15:34:06.000Z
|
2022-01-09T21:06:00.000Z
|
import os
def runcode(app):
print("run code...");
mpirun = app['mpirun'];
if app['platform'] == "cpu":
if app['mpiprocs']==1:
mystr = "./app/serial" + app['appname'] + " datain/ dataout/out";
else:
mystr = mpirun + " -np " + str(app['mpiprocs']) + " ./app/mpi" + app['appname'] + " datain/ dataout/out";
os.system(mystr);
elif app['platform'] == "gpu":
if app['mpiprocs']==1:
mystr = "./app/gpu" + app['appname'] + " datain/ dataout/out";
else:
mystr = mpirun + " -np " + str(app['mpiprocs']) + " ./app/gpumpi" + app['appname'] + " datain/ dataout/out";
os.system(mystr);
else:
error("app['platform'] must be either cpu or gpu");
return mystr
| 32.291667
| 120
| 0.507097
|
b8cc96e05ebc5ce702aa839784ee990fcba30467
| 587
|
py
|
Python
|
military-overlay/utils-source-and-test-data/pairwise-test-generation/Complete_test_scripts/scripts/change data source.py
|
Esri/military-features-data
|
20ad73fc34385f9ac78bc274e7cab5777980df61
|
[
"Apache-2.0"
] | 35
|
2015-01-01T16:45:43.000Z
|
2022-03-05T15:10:46.000Z
|
military-overlay/utils-source-and-test-data/pairwise-test-generation/Complete_test_scripts/scripts/change data source.py
|
Esri/military-features-data
|
20ad73fc34385f9ac78bc274e7cab5777980df61
|
[
"Apache-2.0"
] | 285
|
2015-01-02T18:03:12.000Z
|
2019-04-11T04:34:57.000Z
|
military-overlay/utils-source-and-test-data/pairwise-test-generation/Complete_test_scripts/scripts/change data source.py
|
Esri/military-features-data
|
20ad73fc34385f9ac78bc274e7cab5777980df61
|
[
"Apache-2.0"
] | 31
|
2015-01-01T16:45:44.000Z
|
2021-09-13T12:16:03.000Z
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# change data source.py
# Created on: 2016-08-29 10:26:31.00000
# (generated by ArcGIS/ModelBuilder)
# Description:
# ---------------------------------------------------------------------------
# Import arcpy module
import arcpy
inputProject = arcpy.mp.ArcGISProject("CURRENT")
inputFeatureDS = arcpy.GetParameterAsText(0)
outputFeatureDS = arcpy.GetParameterAsText(1)
inputProject.updateConnectionProperties(inputFeatureDS, outputFeatureDS)
inputProject.save()
del inputProject
| 30.894737
| 77
| 0.575809
|
c41c8909d025db75790ed493d8ba34a5911c39f8
| 1,983
|
py
|
Python
|
tests/models/test_train.py
|
nathdip/unit_testing_python
|
b9c01f4d4c32f5e5c7b40988ccc7837e8cd5e17f
|
[
"MIT"
] | null | null | null |
tests/models/test_train.py
|
nathdip/unit_testing_python
|
b9c01f4d4c32f5e5c7b40988ccc7837e8cd5e17f
|
[
"MIT"
] | null | null | null |
tests/models/test_train.py
|
nathdip/unit_testing_python
|
b9c01f4d4c32f5e5c7b40988ccc7837e8cd5e17f
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
import sys
from models.train import split_into_training_and_testing_sets
# Declare the test class
class TestSplitIntoTrainingAndTestingSets(object):
# Fill in with the correct mandatory argument
def test_on_one_row(self):
test_argument = np.array([[1382.0, 390167.0]])
with pytest.raises(ValueError) as exc_info:
split_into_training_and_testing_sets(test_argument)
expected_error_msg = "Argument data_array must have at least 2 rows,"
" it actually has just 1"
assert exc_info.match(expected_error_msg)
# Add a reason for the expected failure
@pytest.mark.xfail(reason="Using TDD, model_test() has not"
"yet been implemented")
class TestModelTest(object):
def test_on_linear_data(self):
test_input = np.array([[1.0, 3.0], [2.0, 5.0], [3.0, 7.0]])
expected = 1.0
actual = model_test(test_input, 2.0, 1.0)
message = "model_test({0}) should return {1}, but it actually returned {2}".format(test_input, expected, actual)
assert actual == pytest.approx(expected), message
def test_on_one_dimensional_array(self):
test_input = np.array([1.0, 2.0, 3.0, 4.0])
with pytest.raises(ValueError) as exc_info:
model_test(test_input, 1.0, 1.0)
class TestGetDataAsNumpyArray(object):
# Add a reason for skipping the test
@pytest.mark.skipif(sys.version_info > (2, 7), reason="Works only on Python 2.7 or lower")
def test_on_clean_file(self):
expected = np.array([[2081.0, 314942.0],
[1059.0, 186606.0],
[1148.0, 206186.0]
]
)
actual = get_data_as_numpy_array("example_clean_data.txt", num_columns=2)
message = "Expected return value: {0}, Actual return value: {1}".format(expected, actual)
assert actual == pytest.approx(expected), message
| 43.108696
| 120
| 0.640444
|
a93f057a249a0371ffa4613d50e19691fdd576e0
| 9,397
|
py
|
Python
|
mrcnn/config.py
|
CaelynCheung1996/CLS-Mask_R-CNN
|
e6a640bfbe0797e9a12d3f903025ca8f5ef6090f
|
[
"MIT"
] | 1
|
2021-08-09T08:06:59.000Z
|
2021-08-09T08:06:59.000Z
|
mrcnn/config.py
|
comp-imaging-sci/CLS-Mask_R-CNN
|
e6a640bfbe0797e9a12d3f903025ca8f5ef6090f
|
[
"MIT"
] | null | null | null |
mrcnn/config.py
|
comp-imaging-sci/CLS-Mask_R-CNN
|
e6a640bfbe0797e9a12d3f903025ca8f5ef6090f
|
[
"MIT"
] | null | null | null |
"""
Mask R-CNN
Base Configurations class.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import numpy as np
# Base Configuration Class
# Don't use this class directly. Instead, sub-class it and override
# the configurations you need to change.
class Config(object):
"""Base configuration class. For custom configurations, create a
sub-class that inherits from this one and override properties
that need to be changed.
"""
# Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.
# Useful if your code needs to do things differently depending on which
# experiment is running.
NAME = None # Override in sub-classes
# NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 2
# Number of training steps per epoch
# This doesn't need to match the size of the training set. Tensorboard
# updates are saved at the end of each epoch, so setting this to a
# smaller number means getting more frequent TensorBoard updates.
# Validation stats are also calculated at each epoch end and they
# might take a while, so don't set this too small to avoid spending
# a lot of time on validation stats.
STEPS_PER_EPOCH = 1000
# Number of validation steps to run at the end of every training epoch.
# A bigger number improves accuracy of validation stats, but slows
# down the training.
VALIDATION_STEPS = 50
# Backbone network architecture
# Supported values are: resnet50, resnet101.
# You can also provide a callable that should have the signature
# of model.resnet_graph. If you do so, you need to supply a callable
# to COMPUTE_BACKBONE_SHAPE as well
BACKBONE = "resnet101"
# Only useful if you supply a callable to BACKBONE. Should compute
# the shape of each layer of the FPN Pyramid.
# See model.compute_backbone_shapes
COMPUTE_BACKBONE_SHAPE = None
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# Size of the fully-connected layers in the classification graph
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
# Size of the top-down layers used to build the feature pyramid
TOP_DOWN_PYRAMID_SIZE = 256
# Number of classification classes (including background)
NUM_CLASSES = 1 # Override in sub-classes
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# ROIs kept after tf.nn.top_k and before non-maximum suppression
PRE_NMS_LIMIT = 6000
# ROIs kept after non-maximum suppression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Input image resizing
# Generally, use the "square" resizing mode for training and predicting
# and it should work well in most cases. In this mode, images are scaled
# up such that the small side is = IMAGE_MIN_DIM, but ensuring that the
# scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is
# padded with zeros to make it a square so multiple images can be put
# in one batch.
# Available resizing modes:
# none: No resizing or padding. Return the image unchanged.
# square: Resize and pad with zeros to get a square image
# of size [max_dim, max_dim].
# pad64: Pads width and height with zeros to make them multiples of 64.
# If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales
# up before padding. IMAGE_MAX_DIM is ignored in this mode.
# The multiple of 64 is needed to ensure smooth scaling of feature
# maps up and down the 6 levels of the FPN pyramid (2**6=64).
# crop: Picks random crops from the image. First, scales the image based
# on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of
# size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only.
# IMAGE_MAX_DIM is not used in this mode.
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further
# up scaling. For example, if set to 2 then images are scaled up to double
# the width and height, or more, even if MIN_IMAGE_DIM doesn't require it.
# However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.
IMAGE_MIN_SCALE = 0
# Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4
# Changing this requires other changes in the code. See the WIKI for more
# details: https://github.com/matterport/Mask_RCNN/wiki
IMAGE_CHANNEL_COUNT = 3
# Image mean (RGB)
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 200
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = 0.33
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
# Shape of output mask
# To change this you also need to change the neural network mask branch
MASK_SHAPE = [28, 28]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
# Bounding box refinement standard deviation for RPN and final detections.
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimizer
# implementation.
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.0001
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.
}
# Use RPN ROIs or externally generated ROIs for training
# Keep this True for most situations. Set to False if you want to train
# the head branches on ROI generated by code rather than the ROIs from
# the RPN. For example, to debug the classifier head without having to
# train the RPN.
USE_RPN_ROIS = True
# Train or freeze batch normalization layers
# None: Train BN layers. This is the normal mode
# False: Freeze BN layers. Good when using a small batch size
# True: (don't use). Set layer in training mode even when predicting
TRAIN_BN = False # Defaulting to False since batch size is often small
# Gradient norm clipping
GRADIENT_CLIP_NORM = 5.0
def __init__(self):
"""Set values of computed attributes."""
# Effective batch size
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
# Input image size
if self.IMAGE_RESIZE_MODE == "crop":
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM,
self.IMAGE_CHANNEL_COUNT])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM,
self.IMAGE_CHANNEL_COUNT])
# Image meta data length
# See compose_image_meta() for details
self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
| 39.483193
| 80
| 0.689156
|
eaec92fa3bc58170d4c0437232e0da3bccb8f274
| 3,251
|
py
|
Python
|
package/awesome_panel/express/fontawesome/share_link.py
|
slamer59/awesome-panel
|
91c30bd6d6859eadf9c65b1e143952f7e64d5290
|
[
"Apache-2.0"
] | 179
|
2019-12-04T14:54:53.000Z
|
2022-03-30T09:08:38.000Z
|
package/awesome_panel/express/fontawesome/share_link.py
|
slamer59/awesome-panel
|
91c30bd6d6859eadf9c65b1e143952f7e64d5290
|
[
"Apache-2.0"
] | 62
|
2019-12-14T16:51:28.000Z
|
2022-03-19T18:47:12.000Z
|
package/awesome_panel/express/fontawesome/share_link.py
|
slamer59/awesome-panel
|
91c30bd6d6859eadf9c65b1e143952f7e64d5290
|
[
"Apache-2.0"
] | 35
|
2019-12-08T13:19:53.000Z
|
2022-03-25T10:33:02.000Z
|
"""This module provides button to share on social media"""
import urllib.parse
import panel as pn
import param
DEFAULT_URL = "https://awesome-panel.org"
DEFAULT_CLASS = "fas fa-external-link-alt"
DEFAULT_TEXT = "Checkout"
FACEBOOK_CLASS = "fab fa-facebook-f"
LINKEDIN_CLASS = "fab fa-linkedin-in"
TWITTER_CLASS = "fab fa-twitter"
REDDIT_CLASS = "fab fa-reddit-alien"
MAIL_CLASS = "fas fa-envelope"
class ShareOnBase(param.Parameterized):
"""Base class for implementing ShareOnFacebook, ShareOnLinkedIn links etc.
- The href property should be overridden
"""
url = param.String(DEFAULT_URL)
icon_class = param.String(DEFAULT_CLASS)
text = param.String(DEFAULT_TEXT)
@property
def _url_parsed(
self,
):
return urllib.parse.quote(self.url).replace(
"/",
"%2F",
)
@property
def href(
self,
) -> str:
"""The href to goto when clicked
Override this method in a base class
Raises:
NotImplementedError:
Returns:
str: A href string
"""
raise NotImplementedError()
def __html__(
self,
) -> str:
"""A html string with link and icon tags
Returns:
str: A html string with link and icon tags
"""
return (
f'<a href="{self.href}" class="button-share-link">'
f'<i class="{self.icon_class}"></i></a>'
)
@param.depends(
"url",
"icon_class",
)
def view(
self,
) -> pn.pane.HTML:
"""A HTML pane with the a link and icon
Returns:
pn.pane.HTML: A HTML pane with the link and icon
"""
return pn.pane.HTML(self.__html__())
class ShareOnFacebook(ShareOnBase):
"""A Share on Facebook button"""
icon_class = param.String(FACEBOOK_CLASS)
@property
def href(
self,
):
return f"https://www.facebook.com/sharer/sharer.php?u={self._url_parsed}"
class ShareOnLinkedIn(ShareOnBase):
"""A Share on LinkedIn button"""
icon_class = param.String(LINKEDIN_CLASS)
@property
def href(
self,
):
return (
f"http://www.linkedin.com/shareArticle?mini=true&url={self._url_parsed}"
f"&title={self.text}"
)
class ShareOnTwitter(ShareOnBase):
"""A Share on Twitter button"""
icon_class = param.String(TWITTER_CLASS)
@property
def href(
self,
):
return f"https://twitter.com/intent/tweet?url={self._url_parsed}&text={self.text}"
class ShareOnReddit(ShareOnBase):
"""A Share on Reddit button"""
icon_class = param.String(REDDIT_CLASS)
@property
def href(
self,
):
return f"https://reddit.com/submit?url={self._url_parsed}&title={self.text}"
class ShareOnMail(ShareOnBase):
"""A Share on Mail button"""
icon_class = param.String(MAIL_CLASS)
@property
def href(
self,
):
return f"mailto:?subject={self._url_parsed}&body={self.text} {self._url_parsed}"
| 23.056738
| 98
| 0.574285
|
78d9d12b23563627abfef8b8596cdb6fbecf9dbe
| 1,830
|
py
|
Python
|
backend/db/admin.py
|
ChansongJo/PsychoLingExperiment
|
0d731b0552a92127a29df7b5929dcbc54e6c81fb
|
[
"MIT"
] | null | null | null |
backend/db/admin.py
|
ChansongJo/PsychoLingExperiment
|
0d731b0552a92127a29df7b5929dcbc54e6c81fb
|
[
"MIT"
] | 7
|
2020-07-23T07:21:19.000Z
|
2021-05-30T04:48:57.000Z
|
backend/db/admin.py
|
ChansongJo/PsychoLingExperiment
|
0d731b0552a92127a29df7b5929dcbc54e6c81fb
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from import_export.admin import ImportExportModelAdmin
from import_export import resources
from backend.db.models import Trial, Subject, Stimulus
class TrialResource(resources.ModelResource):
class Meta:
model = Trial
def dehydrate_session_id(self, obj):
return obj.session_id.to_representation()
def dehydrate_stimulus(self, obj):
return obj.stimulus.to_representation()
def after_export(self, queryset, data, *args, **kwargs):
stimulus = data['stimulus']
del data['stimulus']
subject = data['session_id']
del data['session_id']
# fast transpose zip(*arr)
subject_columns = queryset[0].session_id.get_representation_columns()
for col_name, col_data in zip(subject_columns, zip(*subject)):
data.append_col(col_data, header='subject_' + col_name)
stimulus_columns = queryset[0].stimulus.get_representation_columns()
for col_name, col_data in zip(stimulus_columns, zip(*stimulus)):
data.append_col(col_data, header='stimulus_' + col_name)
class SubjectResource(resources.ModelResource):
class Meta:
model = Subject
class StimulusResource(resources.ModelResource):
class Meta:
model = Stimulus
fields = ('sentence', 'is_grammatical', 'type', 'group', 'event', 'id')
import_id_fields = []
@admin.register(Trial)
class TrialAdmin(ImportExportModelAdmin):
resource_class = TrialResource
list_filter = ('session_id', 'exp_date')
@admin.register(Subject)
class SubjectAdmin(ImportExportModelAdmin):
resource_class = SubjectResource
@admin.register(Stimulus)
class StimulusAdmin(ImportExportModelAdmin):
resource_class = StimulusResource
list_filter = ('is_grammatical', 'type', 'group', 'id')
| 31.016949
| 79
| 0.708743
|
de998d5ca6bb3018acfc0429fcc03f941d577d2c
| 3,102
|
py
|
Python
|
project/cms_menus.py
|
kremazar/Django
|
64a29d911f92a24cce84ba928860d94703c88c9e
|
[
"MIT"
] | null | null | null |
project/cms_menus.py
|
kremazar/Django
|
64a29d911f92a24cce84ba928860d94703c88c9e
|
[
"MIT"
] | null | null | null |
project/cms_menus.py
|
kremazar/Django
|
64a29d911f92a24cce84ba928860d94703c88c9e
|
[
"MIT"
] | null | null | null |
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from django.utils.translation import ugettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import Modifier
from cms.models import Page
class TestMenu(CMSAttachMenu):
name = _("test menu")
def get_nodes(self, request):
nodes = []
n = NavigationNode(_('sample root page'), "/", 1)
n2 = NavigationNode(_('sample settings page'), "/bye/", 2)
n3 = NavigationNode(_('sample account page'), "/hello/", 3)
n4 = NavigationNode(_('sample my profile page'), "/hello/world/", 4, 3)
nodes.append(n)
nodes.append(n2)
nodes.append(n3)
nodes.append(n4)
return nodes
class UserMenu(Menu):
def get_nodes(self, request):
return [
NavigationNode(_("Profile"), reverse(profile), 1, attr={'visible_for_anonymous': False}),
NavigationNode(_("Log in"), reverse(login), 3, attr={'visible_for_authenticated': False}),
NavigationNode(_("Sign up"), reverse(logout), 4, attr={'visible_for_authenticated': False}),
NavigationNode(_("Log out"), reverse(logout), 2, attr={'visible_for_anonymous': False}),
]
class MyExampleModifier(Modifier):
"""
This modifier makes the changed_by attribute of a page
accessible for the menu system.
"""
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
# only do something when the menu has already been cut
if post_cut:
# only consider nodes that refer to cms pages
# and put them in a dict for efficient access
page_nodes = {n.id: n for n in nodes if n.attr["is_page"]}
# retrieve the attributes of interest from the relevant pages
pages = Page.objects.filter(id__in=page_nodes.keys()).values('id', 'changed_by')
# loop over all relevant pages
for page in pages:
# take the node referring to the page
node = page_nodes[page['id']]
# put the changed_by attribute on the node
node.attr["changed_by"] = page['changed_by']
return nodes
class Level(Modifier):
"""
marks all node levels
"""
post_cut = True
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
if breadcrumb:
return nodes
for node in nodes:
if not node.parent:
if post_cut:
node.menu_level = 0
else:
node.level = 0
self.mark_levels(node, post_cut)
return nodes
def mark_levels(self, node, post_cut):
for child in node.children:
if post_cut:
child.menu_level = node.menu_level + 1
else:
child.level = node.level + 1
self.mark_levels(child, post_cut)
menu_pool.register_modifier(Level)
menu_pool.register_modifier(MyExampleModifier)
menu_pool.register_menu(TestMenu)
| 37.373494
| 108
| 0.60735
|
be009f6115bef7e90b9bc196adb39a83603a92bb
| 2,748
|
py
|
Python
|
backend/constants.py
|
jak2030/recurse-interview
|
9920f8b1b0cc4c7b1eed75e3b7c086178ad8b817
|
[
"MIT"
] | 1
|
2019-07-26T18:40:12.000Z
|
2019-07-26T18:40:12.000Z
|
backend/constants.py
|
jak2030/bardi-b
|
9920f8b1b0cc4c7b1eed75e3b7c086178ad8b817
|
[
"MIT"
] | 7
|
2020-09-06T15:46:23.000Z
|
2022-02-18T06:18:29.000Z
|
backend/constants.py
|
jak2030/recurse-coding-exercise
|
9920f8b1b0cc4c7b1eed75e3b7c086178ad8b817
|
[
"MIT"
] | null | null | null |
import os
DATA_DIR = os.getenv("BARDI_B_DATA_DIR")
# The directory in which to store all models.
MODELS_DIR = os.path.join(DATA_DIR, "models/")
# The root directory for all data generated by the app...
# TODO: Use a database :P
SHAKESPEARE_DATA_DIR = os.path.join(DATA_DIR, "shakespeare/")
VILLAINS = [
{"play": "hamlet", "character": "claudius"},
{"play": "king-lear", "character": "regan"},
{"play": "king-lear", "character": "goneral"},
{"play": "king-lear", "character": "edmund"},
{"play": "othello", "character": "iago"},
{"play": "much-ado-about-nothing", "character": "don-john"},
{"play": "macbeth", "character": "lady-macbeth"},
{"play": "richard-iii", "character": "richard-iii"},
{"play": "julius-caesar", "character": "cassius"},
{"play": "romeo-and-juliet", "character": "tybalt"},
]
JESTERS = [
{"character": "a-fool", "play": "timon-of-athens"},
{"character": "autolycus", "play": "the-winters-tale"},
{"character": "citizen", "play": "julius-caesar"},
{"character": "cloten", "play": "cymbeline"},
{"character": "clown", "play": "othello"},
{"character": "clown", "play": "titus-andronicus"},
{"character": "costard", "play": "loves-labours-lost"},
{"character": "dogberry", "play": "much-ado-about-nothing"},
{"character": "dromio of ephesus", "play": "the-comedy-of-errors"},
{"character": "dromio of syracuse", "play": "the-comedy-of-errors"},
{"character": "falstaff", "play": "henry-iv-part-1"},
{"character": "falstaff", "play": "henry-iv-part-2"},
{"character": "feste", "play": "twelfth-night"},
{"character": "grumio", "play": "the-taming-of-the-shrew"},
{"character": "launce", "play": "two-gentlemen-of-verona"},
{"character": "launcelot gobbo", "play": "the-merchant-of-venice"},
{"character": "lavache", "play": "alls-well-that-ends-well"},
{"character": "nick bottom", "play": "a-midsummer-nights-dream"},
{"character": "pompey", "play": "measure-for-measure"},
{"character": "puck", "play": "a-midsummer-nights-dream"},
{"character": "speed", "play": "two-gentlemen-of-verona"},
{"character": "the-fool", "play": "king-lear"},
{"character": "the-gravediggers", "play": "hamlet"},
{"character": "the-porter", "play": "macbeth"},
{"character": "thersites", "play": "troilus-and-cressida"},
{"character": "touchstone", "play": "as-you-like-it"},
{"character": "trinculo", "play": "the-tempest"},
{"character": "yorick", "play": "hamlet"},
]
DREAMERS = [
{"character": "ophelia", "play": "hamlet"},
{"character": "hamlet", "play": "hamlet"},
{"character": "julia", "play": "romeo-and-juliet"},
{"character": "romeo", "play": "romeo-and-juliet"},
]
| 45.8
| 72
| 0.598253
|
1327a485957358b1e27a970be2d0cf98ec6a170f
| 1,891
|
py
|
Python
|
popbl_servicesapp/flask_app/auth/application/models.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | null | null | null |
popbl_servicesapp/flask_app/auth/application/models.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | 1
|
2021-06-02T00:57:11.000Z
|
2021-06-02T00:57:11.000Z
|
popbl_servicesapp/flask_app/auth/application/models.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, DateTime, Integer, String, TEXT, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class BaseModel(Base):
__abstract__ = True
creation_date = Column(DateTime(timezone=True), server_default=func.now())
update_date = Column(DateTime, nullable=False,
server_default=func.now(), onupdate=func.now())
def __repr__(self):
fields = ""
for c in self.__table__.columns:
if fields == "":
fields = "{}='{}'".format(c.name, getattr(self, c.name))
else:
fields = "{}, {}='{}'".format(
fields, c.name, getattr(self, c.name))
return "<{}({})>".format(self.__class__.__name__, fields)
@staticmethod
def list_as_dict(items):
return [i.as_dict() for i in items]
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class User(BaseModel):
ROL_CLIENT = "client"
ROL_ADMIN = "admin"
__tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(TEXT, nullable=False)
password = Column(TEXT, nullable=False)
rol = Column(TEXT, nullable=False)
permissions = relationship("Permission", lazy="joined", cascade='all, delete-orphan')
def as_dict(self):
d = super().as_dict()
return d
class Permission(BaseModel):
C_OWN_ORDER = "C OWN ORDER"
R_OWN_ORDER = "R OWN ORDER"
U_OWN_ORDER = "U OWN ORDER"
D_OWN_ORDER = "D OWN ORDER"
C_ALL = "C ALL ALL"
R_ALL = "R ALL ALL"
U_ALL = "U ALL ALL"
D_ALL = "D ALL ALL"
__tablename__ = "permission"
rol = Column(TEXT, ForeignKey('user.rol'), primary_key=True )
permission = Column(TEXT, primary_key=True)
| 30.015873
| 89
| 0.628239
|
54c6faced63184f3ceb3d8f31a1f157150c8e909
| 28,086
|
py
|
Python
|
Fuzzy_clustering/ver_tf2/skopt/searchcv.py
|
joesider9/forecasting_library
|
db07ff8f0f2693983058d49004f2fc6f8849d197
|
[
"Apache-2.0"
] | null | null | null |
Fuzzy_clustering/ver_tf2/skopt/searchcv.py
|
joesider9/forecasting_library
|
db07ff8f0f2693983058d49004f2fc6f8849d197
|
[
"Apache-2.0"
] | null | null | null |
Fuzzy_clustering/ver_tf2/skopt/searchcv.py
|
joesider9/forecasting_library
|
db07ff8f0f2693983058d49004f2fc6f8849d197
|
[
"Apache-2.0"
] | null | null | null |
try:
from collections.abc import Sized
except ImportError:
from collections import Sized
from collections import defaultdict
from functools import partial
import numpy as np
from scipy.stats import rankdata
import sklearn
from sklearn.base import is_classifier, clone
from joblib import Parallel, delayed
from sklearn.model_selection._search import BaseSearchCV
from sklearn.utils import check_random_state
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.validation import indexable, check_is_fitted
try:
from sklearn.metrics import check_scoring
except ImportError:
from sklearn.metrics.scorer import check_scoring
from . import Optimizer
from .utils import point_asdict, dimensions_aslist, eval_callbacks
from .space import check_dimension
from .callbacks import check_callback
class BayesSearchCV(BaseSearchCV):
"""Bayesian optimization over hyper parameters.
BayesSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
Parameters are presented as a list of skopt.space.Dimension objects.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each search point.
This object is assumed to implement the scikit-learn estimator api.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
search_spaces : dict, list of dict or list of tuple containing
(dict, int).
One of these cases:
1. dictionary, where keys are parameter names (strings)
and values are skopt.space.Dimension instances (Real, Integer
or Categorical) or any other valid value that defines skopt
dimension (see skopt.Optimizer docs). Represents search space
over parameters of the provided estimator.
2. list of dictionaries: a list of dictionaries, where every
dictionary fits the description given in case 1 above.
If a list of dictionary objects is given, then the search is
performed sequentially for every parameter space with maximum
number of evaluations set to self.n_iter.
3. list of (dict, int > 0): an extension of case 2 above,
where first element of every tuple is a dictionary representing
some search subspace, similarly as in case 2, and second element
is a number of iterations that will be spent optimizing over
this subspace.
n_iter : int, default=50
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution. Consider increasing
``n_points`` if you want to try more parameter settings in
parallel.
optimizer_kwargs : dict, optional
Dict of arguments passed to :class:`Optimizer`. For example,
``{'base_estimator': 'RF'}`` would use a Random Forest surrogate
instead of the default Gaussian Process.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel. At maximum there are
``n_points`` times ``cv`` jobs available during each iteration.
n_points : int, default=1
Number of parameter settings to sample in parallel. If this does
not align with ``n_iter``, the last iteration will sample less
points. See also :func:`~Optimizer.ask`
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : boolean, default=False
If ``'True'``, the ``cv_results_`` attribute will include training
scores.
Examples
--------
>>> from skopt import BayesSearchCV
>>> # parameter ranges are specified by one of below
>>> from skopt.space import Real, Categorical, Integer
>>>
>>> from sklearn.datasets import load_iris
>>> from sklearn.svm import SVC
>>> from sklearn.model_selection import train_test_split
>>>
>>> X, y = load_iris(True)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... train_size=0.75,
... random_state=0)
>>>
>>> # log-uniform: understand as search over p = exp(x) by varying x
>>> opt = BayesSearchCV(
... SVC(),
... {
... 'C': Real(1e-6, 1e+6, prior='log-uniform'),
... 'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
... 'degree': Integer(1,8),
... 'kernel': Categorical(['linear', 'poly', 'rbf']),
... },
... n_iter=32
... )
>>>
>>> # executes bayesian optimization
>>> _ = opt.fit(X_train, y_train)
>>>
>>> # model can be saved, used for predictions or scoring
>>> print(opt.score(X_test, y_test))
0.973...
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+--------------+-------------+-------------------+---+---------------+
| param_kernel | param_gamma | split0_test_score |...|rank_test_score|
+==============+=============+===================+===+===============+
| 'rbf' | 0.1 | 0.8 |...| 2 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.2 | 0.9 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.3 | 0.7 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'split0_test_score' : [0.8, 0.9, 0.7],
'split1_test_score' : [0.82, 0.5, 0.7],
'mean_test_score' : [0.81, 0.7, 0.7],
'std_test_score' : [0.02, 0.2, 0.],
'rank_test_score' : [3, 1, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
"""
def __init__(self, estimator, search_spaces, optimizer_kwargs=None,
n_iter=50, scoring=None, fit_params=None, n_jobs=1,
n_points=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', random_state=None,
error_score='raise', return_train_score=False):
self.search_spaces = search_spaces
self.n_iter = n_iter
self.n_points = n_points
self.random_state = random_state
self.optimizer_kwargs = optimizer_kwargs
self._check_search_space(self.search_spaces)
# Temporary fix for compatibility with sklearn 0.20 and 0.21
# See scikit-optimize#762
# To be consistent with sklearn 0.21+, fit_params should be deprecated
# in the constructor and be passed in ``fit``.
self.fit_params = fit_params
super(BayesSearchCV, self).__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
def _check_search_space(self, search_space):
"""Checks whether the search space argument is correct"""
if len(search_space) == 0:
raise ValueError(
"The search_spaces parameter should contain at least one"
"non-empty search space, got %s" % search_space
)
# check if space is a single dict, convert to list if so
if isinstance(search_space, dict):
search_space = [search_space]
# check if the structure of the space is proper
if isinstance(search_space, list):
# convert to just a list of dicts
dicts_only = []
# 1. check the case when a tuple of space, n_iter is provided
for elem in search_space:
if isinstance(elem, tuple):
if len(elem) != 2:
raise ValueError(
"All tuples in list of search spaces should have"
"length 2, and contain (dict, int), got %s" % elem
)
subspace, n_iter = elem
if (not isinstance(n_iter, int)) or n_iter < 0:
raise ValueError(
"Number of iterations in search space should be"
"positive integer, got %s in tuple %s " %
(n_iter, elem)
)
# save subspaces here for further checking
dicts_only.append(subspace)
elif isinstance(elem, dict):
dicts_only.append(elem)
else:
raise TypeError(
"A search space should be provided as a dict or"
"tuple (dict, int), got %s" % elem)
# 2. check all the dicts for correctness of contents
for subspace in dicts_only:
for k, v in subspace.items():
check_dimension(v)
else:
raise TypeError(
"Search space should be provided as a dict or list of dict,"
"got %s" % search_space)
# copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV
@property
def best_score_(self):
check_is_fitted(self, 'cv_results_')
return self.cv_results_['mean_test_score'][self.best_index_]
# copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV
@property
def best_params_(self):
check_is_fitted(self, 'cv_results_')
return self.cv_results_['params'][self.best_index_]
# copied for compatibility with 0.19 sklearn from 0.18 BaseSearchCV
def _fit(self, X, y, groups, parameter_iterable):
"""
Actual fitting, performing the search over parameters.
Taken from https://github.com/scikit-learn/scikit-learn/blob/0.18.X
.../sklearn/model_selection/_search.py
"""
estimator = self.estimator
cv = sklearn.model_selection._validation.check_cv(
self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(
self.estimator, scoring=self.scoring)
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
if self.verbose > 0 and isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
cv_iter = list(cv.split(X, y, groups))
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(sklearn.model_selection._validation._fit_and_score)(
clone(base_estimator),
X, y, self.scorer_,
train, test, self.verbose, parameters,
fit_params=self.fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True, return_parameters=True,
error_score=self.error_score
)
for parameters in parameter_iterable
for train, test in cv_iter)
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_scores, test_scores, test_sample_counts,
fit_time, score_time, parameters) = zip(*out)
else:
(test_scores, test_sample_counts,
fit_time, score_time, parameters) = zip(*out)
candidate_params = parameters[::n_splits]
n_candidates = len(candidate_params)
results = dict()
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
# Computed the (weighted) mean and std for test scores alone
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
_store('test_score', test_scores, splits=True, rank=True,
weights=test_sample_counts if self.iid else None)
if self.return_train_score:
_store('train_score', train_scores, splits=True)
_store('fit_time', fit_time)
_store('score_time', score_time)
best_index = np.flatnonzero(results["rank_test_score"] == 1)[0]
best_parameters = candidate_params[best_index]
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(
MaskedArray,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
self.cv_results_ = results
self.best_index_ = best_index
self.n_splits_ = n_splits
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best_parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
def _fit_best_model(self, X, y):
"""Fit the estimator copy with best parameters found to the
provided data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output],
Target relative to X for classification or regression.
Returns
-------
self
"""
self.best_estimator_ = clone(self.estimator)
self.best_estimator_.set_params(**self.best_params_)
self.best_estimator_.fit(X, y, **(self.fit_params or {}))
return self
def _make_optimizer(self, params_space):
"""Instantiate skopt Optimizer class.
Parameters
----------
params_space : dict
Represents parameter search space. The keys are parameter
names (strings) and values are skopt.space.Dimension instances,
one of Real, Integer or Categorical.
Returns
-------
optimizer: Instance of the `Optimizer` class used for for search
in some parameter space.
"""
kwargs = self.optimizer_kwargs_.copy()
kwargs['dimensions'] = dimensions_aslist(params_space)
optimizer = Optimizer(**kwargs)
return optimizer
def _step(self, X, y, search_space, optimizer, groups=None, n_points=1):
"""Generate n_jobs parameters and evaluate them in parallel.
"""
# get parameter values to evaluate
params = optimizer.ask(n_points=n_points)
# convert parameters to python native types
params = [[np.array(v).item() for v in p] for p in params]
# make lists into dictionaries
params_dict = [point_asdict(search_space, p) for p in params]
# HACK: self.cv_results_ is reset at every call to _fit, keep current
all_cv_results = self.cv_results_
# HACK: this adds compatibility with different versions of sklearn
refit = self.refit
self.refit = False
self._fit(X, y, groups, params_dict)
self.refit = refit
# merge existing and new cv_results_
for k in self.cv_results_:
all_cv_results[k].extend(self.cv_results_[k])
all_cv_results["rank_test_score"] = list(np.asarray(
rankdata(-np.array(all_cv_results['mean_test_score']),
method='min'), dtype=np.int32))
if self.return_train_score:
all_cv_results["rank_train_score"] = list(np.asarray(
rankdata(-np.array(all_cv_results['mean_train_score']),
method='min'), dtype=np.int32))
self.cv_results_ = all_cv_results
self.best_index_ = np.argmax(self.cv_results_['mean_test_score'])
# feed the point and objective back into optimizer
local_results = self.cv_results_['mean_test_score'][-len(params):]
# optimizer minimizes objective, hence provide negative score
return optimizer.tell(params, [-score for score in local_results])
@property
def total_iterations(self):
"""
Count total iterations that will be taken to explore
all subspaces with `fit` method.
Returns
-------
max_iter: int, total number of iterations to explore
"""
total_iter = 0
for elem in self.search_spaces:
if isinstance(elem, tuple):
space, n_iter = elem
else:
n_iter = self.n_iter
total_iter += n_iter
return total_iter
def _run_search(self, x):
pass
def fit(self, X, y=None, groups=None, callback=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_output]
Target relative to X for classification or regression (class
labels should be integers or strings).
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
callback: [callable, list of callables, optional]
If callable then `callback(res)` is called after each parameter
combination tested. If list of callables, then each callable in
the list is called.
"""
# check if space is a single dict, convert to list if so
search_spaces = self.search_spaces
if isinstance(search_spaces, dict):
search_spaces = [search_spaces]
callbacks = check_callback(callback)
if self.optimizer_kwargs is None:
self.optimizer_kwargs_ = {}
else:
self.optimizer_kwargs_ = dict(self.optimizer_kwargs)
random_state = check_random_state(self.random_state)
self.optimizer_kwargs_['random_state'] = random_state
# Instantiate optimizers for all the search spaces.
optimizers = []
for search_space in search_spaces:
if isinstance(search_space, tuple):
search_space = search_space[0]
optimizers.append(self._make_optimizer(search_space))
self.optimizers_ = optimizers # will save the states of the optimizers
self.cv_results_ = defaultdict(list)
self.best_index_ = None
self.multimetric_ = False
n_points = self.n_points
for search_space, optimizer in zip(search_spaces, optimizers):
# if not provided with search subspace, n_iter is taken as
# self.n_iter
if isinstance(search_space, tuple):
search_space, n_iter = search_space
else:
n_iter = self.n_iter
# do the optimization for particular search space
while n_iter > 0:
# when n_iter < n_points points left for evaluation
n_points_adjusted = min(n_iter, n_points)
optim_result = self._step(
X, y, search_space, optimizer,
groups=groups, n_points=n_points_adjusted
)
n_iter -= n_points
if eval_callbacks(callbacks, optim_result):
break
# Refit the best model on the the whole dataset
if self.refit:
self._fit_best_model(X, y)
return self
| 40.645441
| 79
| 0.596667
|
a19c627fc940c953e5199b96a6d56234703313f5
| 1,588
|
py
|
Python
|
image-downloader/instagram-image-download.py
|
MayankShrivastava17/mini-project
|
c6fa2c95caba792ec4641eaeafaa5ab18f9e7abd
|
[
"MIT"
] | 1
|
2021-05-25T13:30:05.000Z
|
2021-05-25T13:30:05.000Z
|
image-downloader/instagram-image-download.py
|
MayankShrivastava17/mini-project
|
c6fa2c95caba792ec4641eaeafaa5ab18f9e7abd
|
[
"MIT"
] | null | null | null |
image-downloader/instagram-image-download.py
|
MayankShrivastava17/mini-project
|
c6fa2c95caba792ec4641eaeafaa5ab18f9e7abd
|
[
"MIT"
] | null | null | null |
from sys import argv
import urllib
from bs4 import BeautifulSoup
import datetime
def ShowHelp():
print 'Insta Image Downloader'
print ''
print 'Usage:'
print 'insta.py [OPTION] [URL]'
print ''
print 'Options:'
print '-u [Instagram URL]\tDownload single photo from Instagram URL'
print '-f [File path]\t\tDownload Instagram photo(s) using file list'
print '-h, --help\t\tShow this help message'
print ''
print 'Example:'
print 'python insta.py -u https://instagram.com/p/xxxxx'
print 'python insta.py -f /home/username/filelist.txt'
print ''
exit()
def DownloadSingleFile(fileURL):
print 'Downloading image...'
f = urllib.urlopen(fileURL)
htmlSource = f.read()
soup = BeautifulSoup(htmlSource,'html.parser')
metaTag = soup.find_all('meta', {'property':'og:image'})
imgURL = metaTag[0]['content']
fileName = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") + '.jpg'
urllib.urlretrieve(imgURL, fileName)
print 'Done. Image saved to disk as ' + fileName
if __name__ == '__main__':
if len(argv) == 1:
ShowHelp()
if argv[1] in ('-h', '--help'):
ShowHelp()
elif argv[1] == '-u':
instagramURL = argv[2]
DownloadSingleFile(instagramURL)
elif argv[1] == '-f':
filePath = argv[2]
f = open(filePath)
line = f.readline()
while line:
instagramURL = line.rstrip('\n')
DownloadSingleFile(instagramURL)
line = f.readline()
f.close()
| 33.083333
| 78
| 0.593199
|
a9cd1d4dfae01e8e61648e2c63a50ff0b3b56b77
| 616
|
py
|
Python
|
admin_privileges.py
|
julencosme/python-crash-course
|
6b37d7346e235273c266110932207cd67ce4eb0e
|
[
"MIT"
] | null | null | null |
admin_privileges.py
|
julencosme/python-crash-course
|
6b37d7346e235273c266110932207cd67ce4eb0e
|
[
"MIT"
] | null | null | null |
admin_privileges.py
|
julencosme/python-crash-course
|
6b37d7346e235273c266110932207cd67ce4eb0e
|
[
"MIT"
] | null | null | null |
# Module for classes: Admin and Privileges.
class Privileges():
"""Represent admin user's privileges."""
def __init__(self):
self.privileges = ["can add a post", "can delete a post",
"can ban a user", "can add admin user"]
def describe_admin_privileges(self):
print("Privileges of the admin user(s) are as follows: " +
str(self.privileges))
class Admin(Privileges):
"""Represent the privileges, specific to an admin user."""
def __init__(self):
"""Intialize the attributes of the parent class."""
super().__init__()
| 28
| 66
| 0.613636
|
060d9086996942f4a71b95d24fa49db47dc99bdb
| 3,602
|
py
|
Python
|
tests/test_spacingd.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | 1
|
2022-02-15T09:32:28.000Z
|
2022-02-15T09:32:28.000Z
|
tests/test_spacingd.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_spacingd.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import List, Tuple
import numpy as np
import torch
from parameterized import parameterized
from monai.transforms import Spacingd
from monai.utils.enums import PostFix
from tests.utils import TEST_NDARRAYS, assert_allclose
TESTS: List[Tuple] = []
for p in TEST_NDARRAYS:
TESTS.append(
(
"spacing 3d",
{"image": p(np.ones((2, 10, 15, 20))), PostFix.meta("image"): {"affine": p(np.eye(4))}},
dict(keys="image", pixdim=(1, 2, 1.4)),
("image", PostFix.meta("image"), "image_transforms"),
(2, 10, 8, 15),
p(np.diag([1, 2, 1.4, 1.0])),
)
)
TESTS.append(
(
"spacing 2d",
{"image": np.ones((2, 10, 20)), PostFix.meta("image"): {"affine": np.eye(3)}},
dict(keys="image", pixdim=(1, 2)),
("image", PostFix.meta("image"), "image_transforms"),
(2, 10, 10),
np.diag((1, 2, 1)),
)
)
TESTS.append(
(
"spacing 2d no metadata",
{"image": np.ones((2, 10, 20))},
dict(keys="image", pixdim=(1, 2)),
("image", PostFix.meta("image"), "image_transforms"),
(2, 10, 10),
np.diag((1, 2, 1)),
)
)
TESTS.append(
(
"interp all",
{
"image": np.arange(20).reshape((2, 1, 10)),
"seg": np.ones((2, 1, 10)),
PostFix.meta("image"): {"affine": np.eye(4)},
PostFix.meta("seg"): {"affine": np.eye(4)},
},
dict(keys=("image", "seg"), mode="nearest", pixdim=(1, 0.2)),
("image", PostFix.meta("image"), "image_transforms", "seg", PostFix.meta("seg"), "seg_transforms"),
(2, 1, 46),
np.diag((1, 0.2, 1, 1)),
)
)
TESTS.append(
(
"interp sep",
{
"image": np.ones((2, 1, 10)),
"seg": np.ones((2, 1, 10)),
PostFix.meta("image"): {"affine": np.eye(4)},
PostFix.meta("seg"): {"affine": np.eye(4)},
},
dict(keys=("image", "seg"), mode=("bilinear", "nearest"), pixdim=(1, 0.2)),
("image", PostFix.meta("image"), "image_transforms", "seg", PostFix.meta("seg"), "seg_transforms"),
(2, 1, 46),
np.diag((1, 0.2, 1, 1)),
)
)
class TestSpacingDCase(unittest.TestCase):
@parameterized.expand(TESTS)
def test_spacingd(self, _, data, kw_args, expected_keys, expected_shape, expected_affine):
res = Spacingd(**kw_args)(data)
if isinstance(data["image"], torch.Tensor):
self.assertEqual(data["image"].device, res["image"].device)
self.assertEqual(expected_keys, tuple(sorted(res)))
np.testing.assert_allclose(res["image"].shape, expected_shape)
assert_allclose(res[PostFix.meta("image")]["affine"], expected_affine)
if __name__ == "__main__":
unittest.main()
| 36.02
| 111
| 0.54442
|
30f3884a50ae2a4a205514fdc73d2de677bd9d6b
| 3,691
|
py
|
Python
|
app/models.py
|
ephantuskaranja/Pitches
|
eaa25816199ac7060f5deb5882760071e5b74979
|
[
"Unlicense"
] | null | null | null |
app/models.py
|
ephantuskaranja/Pitches
|
eaa25816199ac7060f5deb5882760071e5b74979
|
[
"Unlicense"
] | null | null | null |
app/models.py
|
ephantuskaranja/Pitches
|
eaa25816199ac7060f5deb5882760071e5b74979
|
[
"Unlicense"
] | null | null | null |
from . import db
from flask_login import UserMixin
from . import login_manager
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy.sql import func
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255))
email = db.Column(db.String(255), unique=True, index=True)
password_hash = db.Column(db.String(255))
pitches = db.relationship('Content', backref='user', lazy='dynamic')
comments = db.relationship('Comment', backref='user', lazy='dynamic')
votes = db.relationship('Vote', backref='user', lazy='dynamic')
def __repr__(self):
return f'User{self.username}'
pass_secure = db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.pass_secure, password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Category(db.Model):
__tablename__ = 'categories'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
pitches = db.relationship('Content', backref='category', lazy='dynamic')
def save_category(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_categories(cls):
categories = Category.query.all()
return categories
class Content(db.Model):
__tablename__ = 'pitches'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(255))
category_id = db.Column(db.Integer, db.ForeignKey("categories.id"))
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
comments = db.relationship('Comment', backref='pitch', lazy='dynamic')
votes = db.relationship('Vote', backref='content', lazy='dynamic')
def save_content(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_pitches(cls, category_id):
pitches = Content.query.order_by(Content.id.desc()).filter_by(category_id=category_id).all()
return pitches
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
comment = db.Column(db.String(255))
content_id = db.Column(db.Integer, db.ForeignKey("pitches.id"))
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls, content_id):
comments = Comment.query.order_by(Comment.id.desc()).filter_by(content_id=content_id).all()
return comments
class Vote(db.Model):
__tablename__ = 'votes'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
content_id = db.Column(db.Integer, db.ForeignKey("pitches.id"))
vote_number = db.Column(db.Integer)
def save_vote(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_votes(cls,user_id,content_id):
votes = Vote.query.filter_by(user_id=user_id,content_id=content_id).all()
return votes
@classmethod
def num_vote(cls,content_id):
found_votes = db.session.query(func.sum(Vote.vote_number))
found_votes = found_votes.filter_by(content_id=content_id).group_by(Vote.content_id)
votes_list = sum([i[0] for i in found_votes.all()])
return votes_list
| 31.547009
| 100
| 0.684638
|
615ca80daebf4c7b55a408dbb693c3c298d0d7a7
| 220
|
py
|
Python
|
tests/date/test_construct.py
|
shammellee/pendulum
|
bb179c8fb6ef92b7bfc471a46338abbfac9fafca
|
[
"MIT"
] | 5,049
|
2016-07-04T07:16:34.000Z
|
2022-03-31T07:41:48.000Z
|
tests/date/test_construct.py
|
shammellee/pendulum
|
bb179c8fb6ef92b7bfc471a46338abbfac9fafca
|
[
"MIT"
] | 536
|
2016-07-05T22:46:29.000Z
|
2022-03-22T12:41:54.000Z
|
tests/date/test_construct.py
|
shammellee/pendulum
|
bb179c8fb6ef92b7bfc471a46338abbfac9fafca
|
[
"MIT"
] | 373
|
2016-07-05T19:51:51.000Z
|
2022-03-23T16:57:46.000Z
|
from pendulum import Date
from ..conftest import assert_date
def test_construct():
d = Date(2016, 10, 20)
assert_date(d, 2016, 10, 20)
def test_today():
d = Date.today()
assert isinstance(d, Date)
| 13.75
| 34
| 0.659091
|
57b18f30f170a4f198a6d7504c6e843a81117428
| 31,352
|
py
|
Python
|
astropy/table/index.py
|
jayvdb/astropy
|
bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f
|
[
"BSD-3-Clause"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
astropy/table/index.py
|
jayvdb/astropy
|
bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f
|
[
"BSD-3-Clause"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
astropy/table/index.py
|
jayvdb/astropy
|
bc6d8f106dd5b60bf57a8e6e29c4e2ae2178991f
|
[
"BSD-3-Clause"
] | 31
|
2019-03-10T09:51:27.000Z
|
2022-02-14T23:11:12.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The Index class can use several implementations as its
engine. Any implementation should implement the following:
__init__(data, row_index) : initialize index based on key/row list pairs
add(key, row) -> None : add (key, row) to existing data
remove(key, data=None) -> boolean : remove data from self[key], or all of
self[key] if data is None
shift_left(row) -> None : decrement row numbers after row
shift_right(row) -> None : increase row numbers >= row
find(key) -> list : list of rows corresponding to key
range(lower, upper, bounds) -> list : rows in self[k] where k is between
lower and upper (<= or < based on bounds)
sort() -> None : make row order align with key order
sorted_data() -> list of rows in sorted order (by key)
replace_rows(row_map) -> None : replace row numbers based on slice
items() -> list of tuples of the form (key, data)
Notes
-----
When a Table is initialized from another Table, indices are
(deep) copied and their columns are set to the columns of the new Table.
Column creation:
Column(c) -> deep copy of indices
c[[1, 2]] -> deep copy and reordering of indices
c[1:2] -> reference
array.view(Column) -> no indices
"""
from copy import deepcopy
import numpy as np
from .bst import MinValue, MaxValue
from .sorted_array import SortedArray
class QueryError(ValueError):
'''
Indicates that a given index cannot handle the supplied query.
'''
pass
class Index:
'''
The Index class makes it possible to maintain indices
on columns of a Table, so that column values can be queried
quickly and efficiently. Column values are stored in lexicographic
sorted order, which allows for binary searching in O(log n).
Parameters
----------
columns : list or None
List of columns on which to create an index. If None,
create an empty index for purposes of deep copying.
engine : type, instance, or None
Indexing engine class to use (from among SortedArray, BST,
FastBST, FastRBT, and SCEngine) or actual engine instance.
If the supplied argument is None (by default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
# If (and only if) unpickling for protocol >= 2, then args and kwargs
# are both empty. The class __init__ requires at least the `columns`
# arg. In this case return a bare `Index` object which is then morphed
# by the unpickling magic into the correct SlicedIndex object.
if not args and not kwargs:
return self
self.__init__(*args, **kwargs)
return SlicedIndex(self, slice(0, 0, None), original=True)
def __init__(self, columns, engine=None, unique=False):
# Local imports to avoid import problems.
from .table import Table, Column
from astropy.time import Time
if engine is not None and not isinstance(engine, type):
# create from data
self.engine = engine.__class__
self.data = engine
self.columns = columns
return
# by default, use SortedArray
self.engine = engine or SortedArray
if columns is None: # this creates a special exception for deep copying
columns = []
data = []
row_index = []
elif len(columns) == 0:
raise ValueError("Cannot create index without at least one column")
elif len(columns) == 1:
col = columns[0]
row_index = Column(col.argsort())
data = Table([col[row_index]])
else:
num_rows = len(columns[0])
# replace Time columns with approximate form and remainder
new_columns = []
for col in columns:
if isinstance(col, Time):
new_columns.append(col.jd)
remainder = col - col.__class__(col.jd, format='jd', scale=col.scale)
new_columns.append(remainder.jd)
else:
new_columns.append(col)
# sort the table lexicographically and keep row numbers
table = Table(columns + [np.arange(num_rows)], copy_indices=False)
sort_columns = new_columns[::-1]
try:
lines = table[np.lexsort(sort_columns)]
except TypeError: # arbitrary mixins might not work with lexsort
lines = table[table.argsort()]
data = lines[lines.colnames[:-1]]
row_index = lines[lines.colnames[-1]]
self.data = self.engine(data, row_index, unique=unique)
self.columns = columns
def __len__(self):
'''
Number of rows in index.
'''
return len(self.columns[0])
def replace_col(self, prev_col, new_col):
'''
Replace an indexed column with an updated reference.
Parameters
----------
prev_col : Column
Column reference to replace
new_col : Column
New column reference
'''
self.columns[self.col_position(prev_col.info.name)] = new_col
def reload(self):
'''
Recreate the index based on data in self.columns.
'''
self.__init__(self.columns, engine=self.engine)
def col_position(self, col_name):
'''
Return the position of col_name in self.columns.
Parameters
----------
col_name : str
Name of column to look up
'''
for i, c in enumerate(self.columns):
if c.info.name == col_name:
return i
raise ValueError(f"Column does not belong to index: {col_name}")
def insert_row(self, pos, vals, columns):
'''
Insert a new row from the given values.
Parameters
----------
pos : int
Position at which to insert row
vals : list or tuple
List of values to insert into a new row
columns : list
Table column references
'''
key = [None] * len(self.columns)
for i, col in enumerate(columns):
try:
key[i] = vals[self.col_position(col.info.name)]
except ValueError: # not a member of index
continue
num_rows = len(self.columns[0])
if pos < num_rows:
# shift all rows >= pos to the right
self.data.shift_right(pos)
self.data.add(tuple(key), pos)
def get_row_specifier(self, row_specifier):
'''
Return an iterable corresponding to the
input row specifier.
Parameters
----------
row_specifier : int, list, ndarray, or slice
'''
if isinstance(row_specifier, (int, np.integer)):
# single row
return (row_specifier,)
elif isinstance(row_specifier, (list, np.ndarray)):
return row_specifier
elif isinstance(row_specifier, slice):
col_len = len(self.columns[0])
return range(*row_specifier.indices(col_len))
raise ValueError("Expected int, array of ints, or slice but "
"got {} in remove_rows".format(row_specifier))
def remove_rows(self, row_specifier):
'''
Remove the given rows from the index.
Parameters
----------
row_specifier : int, list, ndarray, or slice
Indicates which row(s) to remove
'''
rows = []
# To maintain the correct row order, we loop twice,
# deleting rows first and then reordering the remaining rows
for row in self.get_row_specifier(row_specifier):
self.remove_row(row, reorder=False)
rows.append(row)
# second pass - row order is reversed to maintain
# correct row numbers
for row in reversed(sorted(rows)):
self.data.shift_left(row)
def remove_row(self, row, reorder=True):
'''
Remove the given row from the index.
Parameters
----------
row : int
Position of row to remove
reorder : bool
Whether to reorder indices after removal
'''
# for removal, form a key consisting of column values in this row
if not self.data.remove(tuple([col[row] for col in self.columns]), row):
raise ValueError(f"Could not remove row {row} from index")
# decrement the row number of all later rows
if reorder:
self.data.shift_left(row)
def find(self, key):
'''
Return the row values corresponding to key, in sorted order.
Parameters
----------
key : tuple
Values to search for in each column
'''
return self.data.find(key)
def same_prefix(self, key):
'''
Return rows whose keys contain the supplied key as a prefix.
Parameters
----------
key : tuple
Prefix for which to search
'''
return self.same_prefix_range(key, key, (True, True))
def same_prefix_range(self, lower, upper, bounds=(True, True)):
'''
Return rows whose keys have a prefix in the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
n = len(lower)
ncols = len(self.columns)
a = MinValue() if bounds[0] else MaxValue()
b = MaxValue() if bounds[1] else MinValue()
# [x, y] search corresponds to [(x, min), (y, max)]
# (x, y) search corresponds to ((x, max), (x, min))
lower = lower + tuple((ncols - n) * [a])
upper = upper + tuple((ncols - n) * [b])
return self.data.range(lower, upper, bounds)
def range(self, lower, upper, bounds=(True, True)):
'''
Return rows within the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
return self.data.range(lower, upper, bounds)
def replace(self, row, col_name, val):
'''
Replace the value of a column at a given position.
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
'''
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
'''
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
'''
row_map = dict((row, i) for i, row in enumerate(col_slice))
self.data.replace_rows(row_map)
def sort(self):
'''
Make row numbers follow the same sort order as the keys
of the index.
'''
self.data.sort()
def sorted_data(self):
'''
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
'''
return self.data.sorted_data()
def __getitem__(self, item):
'''
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
'''
return SlicedIndex(self, item)
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self)
def __deepcopy__(self, memo):
'''
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
'''
# Bypass Index.__new__ to create an actual Index, not a SlicedIndex.
index = super().__new__(self.__class__)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex:
'''
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
'''
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
else: # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
'''
The stopping position of the slice, or the end of the
index if this is an original slice.
'''
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
'''
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
'''
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
'''
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
'''
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
'''
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
'''
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.copy().insert_row(self.orig_coords(pos), vals,
columns)
def get_row_specifier(self, row_specifier):
return [self.orig_coords(x) for x in
self.index.get_row_specifier(row_specifier)]
def remove_rows(self, row_specifier):
if not self._frozen:
self.copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.copy().sort()
def __repr__(self):
if self.original:
return repr(self.index)
return 'Index slice {} of\n{}'.format(
(self.start, self.stop, self.step), self.index)
def __str__(self):
return repr(self)
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
'''
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
'''
from .table import Table
if len(self.columns) == 1:
return Index([col_slice], engine=self.data.__class__)
t = Table(self.columns, copy_indices=False)
with t.index_mode('discard_on_copy'):
new_cols = t[item].columns.values()
return Index(new_cols, engine=self.data.__class__)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy=None, names=None):
"""
Inputs a table and some subset of its columns as table_copy.
List or tuple containing names of columns as names,and returns an index
corresponding to this subset or list or None if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`, optional
Subset of the columns in the ``table`` argument
names : list, tuple, optional
Subset of column names in the ``table`` argument
Returns
-------
Index of columns or None
"""
if names is not None and table_copy is not None:
raise ValueError('one and only one argument from "table_copy" or'
' "names" is required')
if names is None and table_copy is None:
raise ValueError('one and only one argument from "table_copy" or'
' "names" is required')
if names is not None:
names = set(names)
else:
names = set(table_copy.colnames)
if not names <= set(table.colnames):
raise ValueError(f'{names} is not a subset of table columns')
for name in names:
for index in table[name].info.indices:
if set([col.info.name for col in index.columns]) == names:
return index
return None
def get_index_by_names(table, names):
'''
Returns an index in ``table`` corresponding to the ``names`` columns or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
nmaes : tuple, list
Column names
'''
names = list(names)
for index in table.indices:
index_names = [col.info.name for col in index.columns]
if index_names == names:
return index
else:
return None
class _IndexModeContext:
'''
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
'''
_col_subclasses = {}
def __init__(self, table, mode):
'''
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ('freeze', 'discard_on_copy', 'copy_on_getitem'):
raise ValueError("Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{}'".format(mode))
def __enter__(self):
if self.mode == 'discard_on_copy':
self.table._copy_indices = False
elif self.mode == 'copy_on_getitem':
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == 'discard_on_copy':
self.table._copy_indices = True
elif self.mode == 'copy_on_getitem':
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = f'_{cls.__name__}WithIndexCopy'
new_cls = type(str(clsname), (cls,), {'__getitem__': __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
class TableIndices(list):
'''
A special list of table indices allowing
for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
'''
def __init__(self, lst):
super().__init__(lst)
def __getitem__(self, item):
'''
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
'''
if isinstance(item, str):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError(f"No index found for {item}")
return super().__getitem__(item)
class TableLoc:
"""
A pseudo-list of Table rows allowing for retrieval
of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
self.table = table
self.indices = table.indices
if len(self.indices) == 0:
raise ValueError("Cannot create TableLoc object with no indices")
def _get_rows(self, item):
"""
Retrieve Table rows indexes by value slice.
"""
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
if len(index.columns) > 1:
raise ValueError("Cannot use .loc on multi-column indices")
if isinstance(item, slice):
# None signifies no upper/lower bound
start = MinValue() if item.start is None else item.start
stop = MaxValue() if item.stop is None else item.stop
rows = index.range((start,), (stop,))
else:
if not isinstance(item, (list, np.ndarray)): # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for key in item:
p = index.find((key,))
if len(p) == 0:
raise KeyError(f'No matches found for key {key}')
else:
rows.extend(p)
return rows
def __getitem__(self, item):
"""
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError(f'No matches found for key {item}')
elif len(rows) == 1: # single row
return self.table[rows[0]]
return self.table[rows]
def __setitem__(self, key, value):
"""
Assign Table row's by value slice.
Parameters
----------
key : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
value : New values of the row elements.
Can be a list of tuples/lists to update the row.
"""
rows = self._get_rows(key)
if len(rows) == 0: # no matches found
raise KeyError(f'No matches found for key {key}')
elif len(rows) == 1: # single row
self.table[rows[0]] = value
else: # multiple rows
if len(rows) == len(value):
for row, val in zip(rows, value):
self.table[row] = val
else:
raise ValueError('Right side should contain {} values'.format(len(rows)))
class TableLocIndices(TableLoc):
def __getitem__(self, item):
"""
Retrieve Table row's indices by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError(f'No matches found for key {item}')
elif len(rows) == 1: # single row
return rows[0]
return rows
class TableILoc(TableLoc):
'''
A variant of TableLoc allowing for row retrieval by
indexed order rather than data values.
Parameters
----------
table : Table
Indexed table to use
'''
def __init__(self, table):
super().__init__(table)
def __getitem__(self, item):
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
rows = index.sorted_data()[item]
table_slice = self.table[rows]
if len(table_slice) == 0: # no matches found
raise IndexError(f'Invalid index for iloc: {item}')
return table_slice
| 32.658333
| 89
| 0.577794
|
5af1c6181ea714a1dccc6bd6184735400939d2d7
| 8,296
|
py
|
Python
|
__main__.py
|
qntnt/badman-scraper
|
f54f1b5f48dfa64d455c39c86c34cad99202c893
|
[
"MIT"
] | null | null | null |
__main__.py
|
qntnt/badman-scraper
|
f54f1b5f48dfa64d455c39c86c34cad99202c893
|
[
"MIT"
] | null | null | null |
__main__.py
|
qntnt/badman-scraper
|
f54f1b5f48dfa64d455c39c86c34cad99202c893
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pandas as pd
import csv
from multiprocessing.pool import ThreadPool
from get_result import get_result_with_context
from alive_progress import alive_bar
import argparse
import os
from loguru import logger
import sys
from textutils import similarity, clean_address
import tkinter as tk
from tkinter import filedialog, simpledialog
if __name__ == '__main__':
def noneOrEmpty(s: str):
return s is None or len(s.strip()) == 0
root = tk.Tk()
try:
TMP_DIR = '.tmp'
if TMP_DIR not in os.listdir():
os.makedirs(TMP_DIR)
# Parse command line arguments
parser = argparse.ArgumentParser(description='Find detailed parcel data based on xlsx formatted parcel listings.', allow_abbrev=True)
parser.add_argument('-input_file', type=str, required=False, help='Path to parcel listings .xlsx file',)
parser.add_argument('-concurrency', type=int, required=False, help='The number of concurrent threads to process parcels (be careful not to set this too high)')
parser.add_argument('-output_file', type=str, required=False, help='The output .csv file')
parser.add_argument(
'-log',
type=str,
required=False,
help='The log level for output',
choices=('CRITICAL', 'ERROR','WARNING', 'SUCCESS', 'INFO', 'DEBUG', 'TRACE'),
default='INFO'
)
args = parser.parse_args()
logger.configure(
handlers = [
{
'sink': sys.stdout,
'format':'<green>{time:YYYY:MM:DD HH:mm:ss zz}</green> | <level>{level}: {message}</level>',
'level': args.log,
},
],
)
input_file = args.input_file
if input_file is None:
root.withdraw()
input_file = filedialog.askopenfilename(title='Select input Excel file', filetypes=[('Excel files', '.xlsx .xls',)])
output_file = args.output_file
if output_file is None:
root.withdraw()
output_file = filedialog.asksaveasfilename(
title='Save output as',
filetypes=[('Csv files', '.csv',)],
initialfile='output.csv'
)
concurrency = args.concurrency
if concurrency is None:
root.withdraw()
concurrency = simpledialog.askinteger(
'Set concurrency',
'Set the number of documents to request and process at the same time.',
minvalue=1,
maxvalue=200,
initialvalue=20
)
concurrency = max(1, min(200, concurrency))
df = pd.read_excel(input_file)
field_names = [
'parcel_number',
'alt_parcel_number',
'county',
'township',
'property_address',
'owner',
'owner_address',
'tax_district',
'neighborhood',
'property_class',
'property_subclass',
]
parcels = {}
unprocessed_parcels = set()
parcel_ids = df['PARCEL_C']
# Find unprocessed parcels
if os.path.exists(output_file):
logger.info('Checking input for unprocessed parcels')
with open(output_file, 'r', newline='') as out_csv:
out_reader = csv.DictReader(out_csv)
with alive_bar(len(parcel_ids)) as progress:
for row in out_reader:
parcel_id = row['parcel_number']
owner = row['owner']
property_address = row['property_address']
if (not noneOrEmpty(owner) and 'property address' not in property_address.lower()) and parcel_id not in parcels:
parcels[parcel_id] = row
for parcel_id in parcel_ids:
if str(parcel_id) not in parcels:
unprocessed_parcels.add(parcel_id)
progress()
else:
unprocessed_parcels = parcel_ids
logger.info('{count} unprocessed parcels', count=len(unprocessed_parcels))
broken_parcels = set()
# Process unprocessed parcels
logger.info('Processing parcels. Writing to {file}. This may take a while...', file=output_file)
with open(output_file, 'w', newline='') as out_csv:
out_writer = csv.DictWriter(out_csv, fieldnames=field_names)
# Set up file
out_writer.writeheader()
for processed_parcel in parcels.values():
out_writer.writerow(processed_parcel)
out_csv.flush()
with alive_bar(len(unprocessed_parcels)) as progress, ThreadPool(concurrency) as pool:
for result in pool.imap(get_result_with_context(broken_parcels), unprocessed_parcels):
try:
out_writer.writerow(result)
out_csv.flush()
progress()
except Exception as e:
print(e)
progress()
broken_file = None
# Report broken parcels
if len(broken_parcels) > 0:
path, file = os.path.split(output_file)
filename = os.path.join(path, 'broken_parcels.csv')
logger.warning('Some parcels are broken. Writing their ids to \"{file}\"', file=filename)
with open(filename, 'w', newline='') as broken_csv:
out_writer = csv.DictWriter(broken_csv, fieldnames=['broken_parcel_id'])
out_writer.writeheader()
for parcel_id in broken_parcels:
out_writer.writerow({ 'broken_parcel_id': parcel_id })
broken_file = filename
# Clean up files
logger.info('Cleaning temporary files')
for rootpath, dirs, files in os.walk(TMP_DIR, topdown=False):
for name in files:
os.remove(os.path.join(rootpath, name))
for name in dirs:
os.rmdir(os.path.join(rootpath, name))
os.rmdir(TMP_DIR)
# Post-process output
path, ext = os.path.splitext(output_file)
post_processed_output_filename = path + '_post_processed' + ext
logger.info('Post-processing output to {file}', file=post_processed_output_filename)
rows = []
with open(output_file, 'r', newline='') as output_file:
out_reader = csv.DictReader(output_file)
for row in out_reader:
rows.append(row)
with open(post_processed_output_filename, 'w', newline='') as outfile:
post_processed_field_names = field_names + [ 'property_address_owner_address_similarity' ]
outwriter = csv.DictWriter(outfile, fieldnames=post_processed_field_names)
outwriter.writeheader()
with alive_bar(len(rows)) as progress:
for row in rows:
processed_row = row
processed_row['property_address'] = clean_address(processed_row['property_address'])
processed_row['owner_address'] = clean_address(processed_row['owner_address'])
processed_row['property_address_owner_address_similarity'] = similarity(row['property_address'], row['owner_address'])
outwriter.writerow(processed_row)
outfile.flush()
progress()
broken_message = ''
if not noneOrEmpty(broken_file):
broken_message = '\nBroken parcels written to file: {}'.format(broken_file)
tk.messagebox.showinfo(
'badman-scraper',
'''Processing complete!
Processed {} files
Output to files: "{}" and "{}"{}'''.format(
len(parcel_ids),
output_file.name,
post_processed_output_filename,
broken_message
),
)
except Exception as e:
root.withdraw()
tk.messagebox.showerror('badman-scraper', '{}'.format(e))
raise e
| 41.273632
| 167
| 0.566056
|
0205d1fa24498ef2142fa494f41a70730ccd75ff
| 473
|
py
|
Python
|
boil/template/environment.py
|
soursoup/boil
|
dbd9711cc082d2fd357f9bb66311ba524102d2e7
|
[
"MIT"
] | 4
|
2018-04-01T18:02:14.000Z
|
2022-02-28T16:58:37.000Z
|
boil/template/environment.py
|
bzurkowski/boil
|
dbd9711cc082d2fd357f9bb66311ba524102d2e7
|
[
"MIT"
] | null | null | null |
boil/template/environment.py
|
bzurkowski/boil
|
dbd9711cc082d2fd357f9bb66311ba524102d2e7
|
[
"MIT"
] | null | null | null |
import jinja2
from boil.common import filters
class Environment(jinja2.Environment):
def __init__(self, package_name):
loader = jinja2.PackageLoader(package_name)
super(Environment, self).__init__(
loader=loader,
keep_trailing_newline=True,
trim_blocks=True,
lstrip_blocks=True)
self._setup_filters()
def _setup_filters(self):
self.filters.update(filters.TEMPLATE_FILTERS.copy())
| 24.894737
| 60
| 0.668076
|
22f434fc3e638032d8123b8ec6e90228a56cd5ae
| 487
|
py
|
Python
|
sam/layer/python/liftlog/custom_encoder.py
|
nickznaj/lift-log
|
022f80c3daaaea268f162f55545e63277fde59d8
|
[
"MIT"
] | null | null | null |
sam/layer/python/liftlog/custom_encoder.py
|
nickznaj/lift-log
|
022f80c3daaaea268f162f55545e63277fde59d8
|
[
"MIT"
] | null | null | null |
sam/layer/python/liftlog/custom_encoder.py
|
nickznaj/lift-log
|
022f80c3daaaea268f162f55545e63277fde59d8
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
import datetime
import json
class CustomEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
if o % 1 != 0:
return float(o)
else:
return int(o)
elif isinstance(o, datetime.datetime):
return o.strftime("%Y-%m-%d")
elif isinstance(o, datetime.date):
return o.strftime("%Y-%m-%d")
return json.JSONEncoder.default(self, o)
| 28.647059
| 48
| 0.562628
|
46ced261fe7e78ab37a6f98b22eea860cf16a799
| 3,525
|
py
|
Python
|
-Loan-Defaulters/code.py
|
nishantmaru01/ga-learner-dsmp-repo
|
959fa690d32c4d06efa274cc9b69b06c6dd7ea0a
|
[
"MIT"
] | null | null | null |
-Loan-Defaulters/code.py
|
nishantmaru01/ga-learner-dsmp-repo
|
959fa690d32c4d06efa274cc9b69b06c6dd7ea0a
|
[
"MIT"
] | null | null | null |
-Loan-Defaulters/code.py
|
nishantmaru01/ga-learner-dsmp-repo
|
959fa690d32c4d06efa274cc9b69b06c6dd7ea0a
|
[
"MIT"
] | null | null | null |
# --------------
#Importing header files
import pandas as pd
from sklearn.model_selection import train_test_split
# Code starts here
data=pd.read_csv(path)
X=data.drop(['customer.id','paid.back.loan'],axis=1)
y=data['paid.back.loan']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size = 0.3,random_state = 0)
# Code ends here
# --------------
#Importing header files
import matplotlib.pyplot as plt
# Code starts here
fully_paid=y_train.value_counts()
fully_paid.plot(kind='bar')
# Code ends here
# --------------
#Importing header files
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['int.rate']=X_train['int.rate'].str.replace("%","").astype(float)/100
X_test['int.rate']=X_test['int.rate'].str.replace("%","").astype(float)/100
num_df=X_train.select_dtypes(exclude='object')
cat_df=X_train.select_dtypes(include='object')
# Code ends here
# --------------
#Importing header files
import seaborn as sns
# Code starts here
cols=num_df.columns
fig ,axes=plt.subplots(nrows = 9 , ncols = 1)
for i in range(9):
axes[i]=sns.boxplot(x=y_train, y=num_df[cols[i]])
# Code ends here
# --------------
# Code starts here
cols=cat_df.columns
fig ,axes=plt.subplots(nrows = 2 , ncols = 2)
for i in range(2):
for j in range(2):
axes[i,j]=sns.countplot(x=X_train[cols[i*2+j]],hue=y_train)
# Code ends here
# --------------
#Importing header files
from sklearn.tree import DecisionTreeClassifier
# Code starts here
for col in cat_df.columns:
#Filling null values with 'NA'
X_train[col].fillna('NA',inplace=True)
#Initalising a label encoder object
le=LabelEncoder()
#Fitting and transforming the column in X_train with 'le'
X_train[col]=le.fit_transform(X_train[col])
#Filling null values with 'NA'
X_test[col].fillna('NA',inplace=True)
#Fitting the column in X_test with 'le'
X_test[col]=le.transform(X_test[col])
# Replacing the values of y_train
y_train.replace({'No':0,'Yes':1},inplace=True)
# Replacing the values of y_test
y_test.replace({'No':0,'Yes':1},inplace=True)
#Initialising 'Decision Tree' model
model=DecisionTreeClassifier(random_state=0)
#Training the 'Decision Tree' model
model.fit(X_train, y_train)
#Finding the accuracy of 'Decision Tree' model
acc=model.score(X_test, y_test)
#Printing the accuracy
print(acc)
# Code ends here
# --------------
#Importing header files
from sklearn.model_selection import GridSearchCV
#Parameter grid
parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}
# Code starts here
model_2=DecisionTreeClassifier(random_state=0)
p_tree=GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5)
p_tree.fit(X_train,y_train)
acc_2=p_tree.score(X_test,y_test)
# Code ends here
# --------------
#Importing header files
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn import metrics
from IPython.display import Image
import pydotplus
# Code starts here
dot_data=export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True,class_names=['loan_paid_back_yes','loan_paid_back_no'])
graph_big=pydotplus.graph_from_dot_data(dot_data)
# show graph - do not delete/modify the code below this line
img_path = user_data_dir+'/file.png'
graph_big.write_png(img_path)
plt.figure(figsize=(20,15))
plt.imshow(plt.imread(img_path))
plt.axis('off')
plt.show()
# Code ends here
| 22.03125
| 172
| 0.719716
|
2eaeae5b653de99ab8b322973bae0d2745ad4c1c
| 154
|
py
|
Python
|
src/main.py
|
SergioSV96/MyOwnBlockchain
|
ee62ccedfa42b22ded3501557d5ad16d83329de0
|
[
"MIT"
] | 1
|
2021-11-04T22:20:39.000Z
|
2021-11-04T22:20:39.000Z
|
src/main.py
|
SergioSV96/MyOwnBlockchain
|
ee62ccedfa42b22ded3501557d5ad16d83329de0
|
[
"MIT"
] | null | null | null |
src/main.py
|
SergioSV96/MyOwnBlockchain
|
ee62ccedfa42b22ded3501557d5ad16d83329de0
|
[
"MIT"
] | null | null | null |
from src.blockchain import Blockchain
blockchain = Blockchain()
blockchain.add_block("Second!")
blockchain.add_block("Third!")
print(blockchain)
| 19.25
| 38
| 0.75974
|
5abae6098056742e71eac805d6881655c858b488
| 2,439
|
py
|
Python
|
2020/util.py
|
vypxl/aoc
|
4187837ecd8bf7464efa4953588b8c53d5675cfb
|
[
"WTFPL"
] | 1
|
2022-01-08T23:39:52.000Z
|
2022-01-08T23:39:52.000Z
|
2020/util.py
|
vypxl/aoc
|
4187837ecd8bf7464efa4953588b8c53d5675cfb
|
[
"WTFPL"
] | null | null | null |
2020/util.py
|
vypxl/aoc
|
4187837ecd8bf7464efa4953588b8c53d5675cfb
|
[
"WTFPL"
] | 2
|
2020-12-19T16:44:54.000Z
|
2020-12-19T19:00:55.000Z
|
from os.path import basename
import re
import itertools as it # pylint: disable=unused-import
import numpy as np # pylint: disable=unused-import
import networkx as nx # pylint: disable=unused-import
import matplotlib.pyplot as plt # pylint: disable=unused-import
from toolz.curried import * # pylint: disable=unused-wildcard-import
import __main__ as mainmodule
def get_day():
return re.findall(r"\d+", basename(mainmodule.__file__))[0]
def data(day=None):
return open(f"{day or get_day()}.in").read()
def data_lines(day=None):
return data(day).splitlines()
def data_nums(day=None, by=None):
return nums(data(day), by)
def data_lines_nums(day=None, by=None):
return list(map(lambda l: nums(l, by), data_lines(day=day)))
def nums(s, by=None):
if by is None:
by = r"[-\d]+"
return list(map(int, re.findall(by, s)))
def call(f):
return f()
def init(xs):
return xs[:-1]
async def qCollect(q):
xs = []
while not q.empty():
item = await q.get()
xs.append(item)
return xs
async def qFill(q, xs):
for x in xs:
await q.put(x)
return q
def tr(s, a, b):
return s.translate(str.maketrans(a, b))
def nx_from_node_list(nodes, directed=False, weighted=False):
ctor = lambda x: nx.DiGraph(x, directed=True) if directed else nx.Graph
if weighted:
el = [(group[0], dest[0], { 'weight': dest[1] }) for group in nodes for dest in group[1] if dest]
else:
el = [(group[0], dest) for group in nodes for dest in group[1] if dest]
return ctor(el)
def nx_draw_graph(G, weighted=False):
pos = nx.shell_layout(G)
if weighted:
edge_labels = dict([((u, v, ), d['weight']) for u, v, d in G.edges(data=True)])
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
nx.draw(G, pos, arrows=True, with_labels=True, node_size=1800)
plt.show()
def np_print_grid(grid, chars):
for i in range(grid.shape[0]):
for j in range(grid.shape[1]):
print(chars[grid[i, j]], end='')
print('')
lmap = compose(list, map)
attr = curry(flip(getattr)) # pylint: disable=no-value-for-parameter
mapattr = compose(map, attr)
applyN = curry(lambda f, n, x: reduce(lambda x, f: f(x), [f] * n, x))
swap = lambda t: (t[1], t[0])
flatten = lambda l: [item for sublist in l for item in sublist]
prod = reduce(lambda a, b: a * b)
fst = lambda x: x[0]
snd = lambda x: x[1]
thd = lambda x: x[2]
| 29.385542
| 105
| 0.645346
|
7b6d9bea24f3b8938ef97c1125ff2b2eb5992cbf
| 574
|
py
|
Python
|
neighbor/urls.py
|
LewisNjagi/neighborhood
|
cbefac55b930999629dba202a784a096799949a4
|
[
"MIT"
] | null | null | null |
neighbor/urls.py
|
LewisNjagi/neighborhood
|
cbefac55b930999629dba202a784a096799949a4
|
[
"MIT"
] | null | null | null |
neighbor/urls.py
|
LewisNjagi/neighborhood
|
cbefac55b930999629dba202a784a096799949a4
|
[
"MIT"
] | 2
|
2021-04-14T05:56:06.000Z
|
2021-04-15T14:20:02.000Z
|
from django.urls import path, include
from . import views
urlpatterns=[
path('',views.index,name = 'index'),
path('profile/<str:username>/',views.profile,name = 'profile'),
path('hoods/',views.hoods,name = 'hoods'),
path('single_hood/<hood_id>',views.single_hood,name = 'single_hood'),
path('results/',views.search_profile,name = 'results'),
path('join_hood/<id>',views.join,name='join'),
path('leave_hood/<id>',views.leave,name='leave'),
path('api/profile/', views.ProfileList.as_view()),
path('api/users/', views.UserList.as_view()),
]
| 41
| 73
| 0.667247
|
e8528ac4c8b1b91a44ff3ccd628ea7e9a7dc8c5f
| 35,020
|
py
|
Python
|
airflow/contrib/hooks/bigquery_hook.py
|
siyuan-platform/airflow
|
a6c28f34de52f9aa6b62111aac8fa586694e6807
|
[
"Apache-2.0"
] | null | null | null |
airflow/contrib/hooks/bigquery_hook.py
|
siyuan-platform/airflow
|
a6c28f34de52f9aa6b62111aac8fa586694e6807
|
[
"Apache-2.0"
] | null | null | null |
airflow/contrib/hooks/bigquery_hook.py
|
siyuan-platform/airflow
|
a6c28f34de52f9aa6b62111aac8fa586694e6807
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
from builtins import range
from past.builtins import basestring
import logging
import time
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from apiclient.discovery import build, HttpError
from pandas.io.gbq import GbqConnector, \
_parse_data as gbq_parse_data, \
_check_google_client_version as gbq_check_google_client_version, \
_test_google_api_imports as gbq_test_google_api_imports
from pandas.tools.merge import concat
logging.getLogger("bigquery").setLevel(logging.INFO)
class BigQueryHook(GoogleCloudBaseHook, DbApiHook):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None):
super(BigQueryHook, self).__init__(
conn_id=bigquery_conn_id,
delegate_to=delegate_to)
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(service=service, project_id=project)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, bql, parameters=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param bql: The BigQuery SQL to execute.
:type bql: string
"""
service = self.get_service()
project = self._get_field('project')
connector = BigQueryPandasConnector(project, service)
schema, pages = connector.run_query(bql)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
dataframe_list.append(gbq_parse_data(schema, page))
if len(dataframe_list) > 0:
return concat(dataframe_list, ignore_index=True)
else:
return gbq_parse_data(schema, [])
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self, project_id, service, reauth=False, verbose=False):
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(object):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id):
self.service = service
self.project_id = project_id
def run_query(
self, bql, destination_dataset_table = False,
write_disposition = 'WRITE_EMPTY',
allow_large_results=False,
udf_config = False,
use_legacy_sql=True):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: The BigQuery SQL to execute.
:type bql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:param write_disposition: What to do if the table already exists in
BigQuery.
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: boolean
"""
configuration = {
'query': {
'query': bql,
'useLegacySql': use_legacy_sql
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'writeDisposition': write_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
return self.run_with_configuration(configuration)
def run_extract( # noqa
self, source_project_dataset_table, destination_cloud_storage_uris,
compression='NONE', export_format='CSV', field_delimiter=',',
print_header=True):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED'):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
"""
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields, source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=','):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table> BigQuery table to load
data into. If <project> is not included, project will be the project defined
in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
"""
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'schema': {
'fields': schema_fields
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
}
}
if source_format == 'CSV':
configuration['load']['skipLeadingRows'] = skip_leading_rows
configuration['load']['fieldDelimiter'] = field_delimiter
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {
'configuration': configuration
}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
job_id = query_reply['jobReference']['jobId']
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
# Wait for query to finish.
while not job['status']['state'] == 'DONE':
logging.info('Waiting for job to complete: %s, %s', self.project_id, job_id)
time.sleep(5)
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.format(
job['status']['errorResult'], job
)
)
return job_id
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, page_token=None, start_index=None):
"""
Get the data of a given dataset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (
self.service.tabledata()
.list(
projectId=self.project_id, datasetId=dataset_id,
tableId=table_id, **optional_params)
.execute()
)
def run_table_delete(self, deletion_dataset_table, ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
tables_resource = self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
logging.info('Deleted table %s:%s.%s.',
deletion_project, deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception(
'Table deletion failed. Table does not exist.')
else:
logging.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(projectId=project_id,
datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
logging.info('table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
logging.info('table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project = None,
view_project = None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(projectId=source_project,
datasetId=source_dataset).execute()
access = source_dataset_resource['access'] if 'access' in source_dataset_resource else []
view_access = {'view': {'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table}}
# check to see if the view we want to add already exists.
if view_access not in access:
logging.info('granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
access.append(view_access)
return self.service.datasets().patch(projectId=source_project,
datasetId=source_dataset,
body={'access': access}).execute()
else:
# if view is already in access, do nothing.
logging.info('table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
return source_dataset_resource
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id):
super(BigQueryCursor, self).__init__(service=service, project_id=project_id)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
bql = _bind_parameters(operation, parameters) if parameters else operation
self.job_id = self.run_query(bql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param parameters: List of dictionary parameters to substitute into the
query.
:type parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token)
.execute()
)
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned.
An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences
(e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER' or bq_type == 'TIMESTAMP':
return int(string_field)
elif bq_type == 'FLOAT':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
cmpt = table_input.split(':')
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, (
"{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
logging.info(
'project not included in {var}: '
'{input}; using project "{project}"'.format(
var=var_name, input=table_input, project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
| 39.795455
| 99
| 0.604483
|
6a9d3727fac72c95f23838e45e0ddcbcadeab746
| 7,923
|
py
|
Python
|
features/steps/knowledge-source-steps.py
|
YaphetKG/translator-testing-framework
|
e6125d8c35a90588c8b5701e340e427a8e0991c9
|
[
"BSD-3-Clause"
] | 5
|
2019-08-06T18:09:02.000Z
|
2020-07-16T19:58:31.000Z
|
features/steps/knowledge-source-steps.py
|
YaphetKG/translator-testing-framework
|
e6125d8c35a90588c8b5701e340e427a8e0991c9
|
[
"BSD-3-Clause"
] | 1
|
2019-08-20T21:12:18.000Z
|
2019-08-20T21:12:18.000Z
|
features/steps/knowledge-source-steps.py
|
YaphetKG/translator-testing-framework
|
e6125d8c35a90588c8b5701e340e427a8e0991c9
|
[
"BSD-3-Clause"
] | 15
|
2019-07-10T18:10:32.000Z
|
2020-01-13T16:22:45.000Z
|
from behave import given, when, then
import json
import requests
import jsonpath_rw
import logging
from contextlib import closing
"""
Knowldege-source tests
"""
@given('a knowledge source at "{url}"')
def step_impl(context, url):
"""
Given a base URL of a knowledge source
"""
context.base_url = url
@when('we fire "{query}" query')
def step_impl(context, query):
"""
Fire a knowledge-source query
"""
url = context.base_url+query
print('url:',url,'\n')
with closing(requests.get(url)) as response:
context.response = response
context.response_json = response.json()
@when('we fire "{query}" query with the following body')
def step_impl(context, query):
"""
Fire a knowledge-source query
"""
url = context.base_url+query
print('url:',url,'\n')
with closing(requests.post(url, json=json.loads(context.text))) as response:
context.response = response
context.response_json = response.json()
@then('the response contains the following entries in "{key}" of "{parent}"')
def step_impl(context, key, parent):
"""
This step checks whether all values specified in the test are contained in the response
"""
entries = set()
print('Collected entries:')
for entry in context.response_json:
print(' ', entry[parent][key])
entries.add(entry[parent][key])
print('Tested entries:')
for row in context.table:
print(' ', row[key])
assert row[key] in entries
def _get_collected_entries(field_value):
collected_entries = set()
# Some fields may be a list of values
if isinstance(field_value, list):
for item in field_value:
print(' ', item)
collected_entries.add(item)
else: # assume a simple scalar
print(' ', field_value)
collected_entries.add(field_value)
return collected_entries
def _aggregate_collected_entries(context, key):
collected_entries = set()
for entry in context.response_json:
field_value = entry[key]
[collected_entries.add(e) for e in _get_collected_entries(field_value)]
return collected_entries
@then('the response contains the following entries in the field "{key}"')
def step_impl(context, key):
"""
This step checks whether all values specified in the test are contained within the field of the response
"""
print('Collected entries:')
field_value = context.response_json[key]
collected_entries = _get_collected_entries(field_value)
print('Tested entries:')
for row in context.table:
value = row[key]
print(' ', value)
assert value in collected_entries
@then('some entry in the response contains "{value}" in field "{key}"')
def step_impl(context, value, key):
"""
This step checks whether all values specified in the test are contained in the response
"""
print('Collected entries:')
collected_entries = _aggregate_collected_entries(context, key)
print('Tested entry:')
print(' ', value)
assert value in collected_entries
@then('some entry in the response contains one of the following values in field "{key}"')
def step_impl(context, key):
"""
This step checks whether all values specified in the test are contained in the response
"""
print('Collected entries:')
collected_entries = _aggregate_collected_entries(context, key)
print('Tested entries:')
for row in context.table:
value = row[key]
print(' ', value)
assert value in collected_entries
@then('the response entries contain the following entries in the field "{key}"')
def step_impl(context, key):
"""
This step checks whether all values specified in the test are contained within the field of the response
"""
print('Collected entries:')
collected_entries = _aggregate_collected_entries(context, key)
print('Tested entries:')
for row in context.table:
value = row[key]
print(' ', value)
assert value in collected_entries
@then('the response only contains the following entries in "{key}" of "{parent}"')
def step_impl(context, key, parent):
"""
This step checks whether all values found in the response are contained in the test table
"""
collected_entries = set()
print('Collected entries:')
for row in context.table:
field_value = row[key]
# Some fields may be a list of values
if isinstance(field_value, list):
for item in field_value:
print(' ', item)
collected_entries.add(item)
else: # assume a simple scalar
print(' ', field_value)
collected_entries.add(field_value)
print('Tested entries:')
tested_entries = set()
for entry in context.response_json:
field_value = entry.get(parent).get(key)
if isinstance(field_value, list):
for item in field_value:
tested_entries.add(item)
else: # assume a simple scalar
tested_entries.add(field_value)
for item in tested_entries:
print(' ', item)
assert item in collected_entries
@then('the response only contains the following entries in "{key}"')
def step_impl(context, key):
"""
This step checks whether all values found in the response are contained in the test table
"""
entries = set()
print('Collected entries:')
for row in context.table:
print(' ', row[key])
entries.add(row[key])
print('Tested entries:')
for entry in context.response_json:
print(' ', entry[key])
assert entry[key] in entries
@then('the size of the response is {size}')
def step_impl(context, size):
"""
This step checks the size of the response
"""
assert len(context.response_json) == int(size)
@then('the size of entry "{key}" is {size}')
def step_impl(context, key, size):
"""
This step checks the size of the response
"""
assert len(context.response_json[key]) == int(size)
@then('the response should have a field "{field}" with "{data_type}" "{value}"')
def step_impl(context, field, data_type, value):
"""
The response should have a result with a field containing a defined value of a specified data type.
"""
result = context.response_json
field_expr = jsonpath_rw.parse(field)
fields = field_expr.find(result)
assert len(fields) != 0
if data_type == "string":
value = str(value)
elif data_type == "integer":
value = int(value)
elif data_type == "float":
value = float(value)
elif data_type == "boolean":
value = eval(value)
else:
logging.error("Unhandled data_type: {}".format(data_type))
assert False
is_found = False
for f in fields:
if f.value == value:
is_found = True
break
assert is_found is True
@then('the response should have some entry with field "{field}" with "{data_type}" "{value}"')
def step_impl(context, field, data_type, value):
"""
The response should have some entry with a field containing a defined value of a specified data type.
"""
field_expr = jsonpath_rw.parse(field)
for entry in context.response_json:
results = field_expr.find(entry)
assert len(results) != 0
if data_type == "string":
value = str(value)
elif data_type == "integer":
value = int(value)
elif data_type == "float":
value = float(value)
elif data_type == "boolean":
value = eval(value)
else:
logging.error("Unhandled data_type: {}".format(data_type))
assert False
is_found = False
for r in results:
if r.value == value:
is_found = True
break
assert is_found is True
| 30.011364
| 108
| 0.641929
|
35186548e4c5ef295c999ff259cf2ca3bb56e5ea
| 10,048
|
py
|
Python
|
nova_powervm/virt/powervm/event.py
|
openstack/nova-powervm
|
376d9493e2a10313068508daf9054d7ecf6d121f
|
[
"Apache-2.0"
] | 24
|
2015-10-18T02:55:20.000Z
|
2021-11-17T11:43:51.000Z
|
nova_powervm/virt/powervm/event.py
|
openstack/nova-powervm
|
376d9493e2a10313068508daf9054d7ecf6d121f
|
[
"Apache-2.0"
] | null | null | null |
nova_powervm/virt/powervm/event.py
|
openstack/nova-powervm
|
376d9493e2a10313068508daf9054d7ecf6d121f
|
[
"Apache-2.0"
] | 12
|
2015-10-26T17:38:05.000Z
|
2021-07-21T12:45:19.000Z
|
# Copyright 2014, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
from nova.compute import power_state
from nova.compute import task_states
from nova import context as ctx
from nova import exception
from nova.virt import event
from oslo_concurrency import lockutils
from oslo_log import log as logging
from pypowervm import adapter as pvm_apt
from pypowervm import util as pvm_util
from pypowervm.wrappers import event as pvm_evt
from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__)
_INST_ACTIONS_HANDLED = {'PartitionState', 'NVRAM'}
_NO_EVENT_TASK_STATES = {
task_states.SPAWNING,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_REVERTING,
task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.REBOOT_STARTED_HARD,
task_states.PAUSING,
task_states.UNPAUSING,
task_states.SUSPENDING,
task_states.RESUMING,
task_states.POWERING_OFF,
task_states.POWERING_ON,
task_states.RESCUING,
task_states.UNRESCUING,
task_states.REBUILDING,
task_states.REBUILD_SPAWNING,
task_states.MIGRATING,
task_states.DELETING,
task_states.SOFT_DELETING,
task_states.RESTORING,
task_states.SHELVING,
task_states.SHELVING_OFFLOADING,
task_states.UNSHELVING,
}
_LIFECYCLE_EVT_LOCK = 'pvm_lifecycle_event'
_CONTEXT = None
def _get_instance(inst, pvm_uuid):
global _CONTEXT
if inst is not None:
return inst
with lockutils.lock('get_context_once'):
if _CONTEXT is None:
_CONTEXT = ctx.get_admin_context()
LOG.debug('PowerVM Nova Event Handler: Getting inst for id %s', pvm_uuid)
return vm.get_instance(_CONTEXT, pvm_uuid)
class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler):
"""Used to receive and handle events from PowerVM and convert to Nova."""
def __init__(self, driver):
self._driver = driver
self._lifecycle_handler = PowerVMLifecycleEventHandler(self._driver)
self._uuid_cache = {}
def _get_inst_uuid(self, inst, pvm_uuid):
"""Retrieve instance UUID from cache keyed by the PVM UUID.
:param inst: the instance object.
:param pvm_uuid: the PowerVM uuid of the vm
:return inst: the instance object.
:return inst_uuid: The nova instance uuid
"""
inst_uuid = self._uuid_cache.get(pvm_uuid)
if not inst_uuid:
inst = _get_instance(inst, pvm_uuid)
inst_uuid = inst.uuid if inst else None
if inst_uuid:
self._uuid_cache[pvm_uuid] = inst_uuid
return inst, inst_uuid
def _handle_inst_event(self, inst, pvm_uuid, details):
"""Handle an instance event.
This method will check if an instance event signals a change in the
state of the instance as known to OpenStack and if so, trigger an
event upward.
:param inst: the instance object.
:param pvm_uuid: the PowerVM uuid of the vm
:param details: Parsed Details from the event
:return inst: The nova instance, which may be None
"""
# If the NVRAM has changed for this instance and a store is configured.
if 'NVRAM' in details and self._driver.nvram_mgr is not None:
# Schedule the NVRAM for the instance to be stored.
# We'll need to fetch the instance object in the event we don't
# have the object and the UUID isn't cached. By updating the
# object reference here and returning it the process method will
# save the object in its cache.
inst, inst_uuid = self._get_inst_uuid(inst, pvm_uuid)
if inst_uuid is None:
return None
LOG.debug('Handle NVRAM event for PowerVM LPAR %s', pvm_uuid)
self._driver.nvram_mgr.store(inst_uuid)
# If the state of the vm changed see if it should be handled
if 'PartitionState' in details:
self._lifecycle_handler.process(inst, pvm_uuid)
return inst
def process(self, events):
"""Process the event that comes back from PowerVM.
:param events: The pypowervm Event wrapper.
"""
inst_cache = {}
for pvm_event in events:
try:
if pvm_event.etype in (pvm_evt.EventType.NEW_CLIENT,
pvm_evt.EventType.CACHE_CLEARED):
# TODO(efried): Should we pull and check all the LPARs?
self._uuid_cache.clear()
continue
# See if this uri (from data) ends with a PowerVM UUID.
pvm_uuid = pvm_util.get_req_path_uuid(
pvm_event.data, preserve_case=True)
if pvm_uuid is None:
continue
# Is it an instance event?
if not pvm_event.data.endswith('LogicalPartition/' + pvm_uuid):
continue
# Are we deleting? Meaning we need to clear the cache entry.
if pvm_event.etype == pvm_evt.EventType.DELETE_URI:
try:
del self._uuid_cache[pvm_uuid]
except KeyError:
pass
continue
# Pull all the pieces of the event.
details = (pvm_event.detail.split(',') if pvm_event.detail
else [])
# Is it one we care about?
if not _INST_ACTIONS_HANDLED & set(details):
continue
inst_cache[pvm_event.data] = self._handle_inst_event(
inst_cache.get(pvm_event.data), pvm_uuid, details)
except Exception:
# We deliberately keep this exception clause as broad as
# possible - we don't want *any* error to stop us from
# attempting to process the next event.
LOG.exception('Unable to process PowerVM event %s',
str(pvm_event))
class PowerVMLifecycleEventHandler(object):
"""Because lifecycle events are weird, we need our own handler.
Lifecycle events that come back from the hypervisor are very 'surface
value'. They tell you that it started, stopped, migrated, etc... However,
multiple events may come in quickly that represent a bigger action. For
instance a restart will generate a stop and then a start rapidly.
Nova being asynchronous can flip those events around. Where the start
would flow through before the stop. That is bad.
We need to make sure that these events that can be linked to bigger
lifecycle events can be wiped out if the converse action is run against
it. Ex. Don't send a stop event up to nova if you received a start event
shortly after it.
"""
def __init__(self, driver):
self._driver = driver
self._delayed_event_threads = {}
@lockutils.synchronized(_LIFECYCLE_EVT_LOCK)
def _emit_event(self, pvm_uuid, inst):
# Get the current state
try:
pvm_state = vm.get_vm_qp(self._driver.adapter, pvm_uuid,
'PartitionState')
except exception.InstanceNotFound:
LOG.debug("LPAR %s was deleted while event was delayed.", pvm_uuid,
instance=inst)
return
LOG.debug('New state %s for partition %s', pvm_state, pvm_uuid,
instance=inst)
inst = _get_instance(inst, pvm_uuid)
if inst is None:
LOG.debug("Not emitting LifecycleEvent: no instance for LPAR %s",
pvm_uuid)
return
# If we're in the middle of a nova-driven operation, no event necessary
if inst.task_state in _NO_EVENT_TASK_STATES:
LOG.debug("Not emitting LifecycleEvent: instance task_state is %s",
inst.task_state, instance=inst)
return
# See if it's really a change of state from what OpenStack knows
transition = vm.translate_event(pvm_state, inst.power_state)
if transition is None:
LOG.debug("No LifecycleEvent necessary for pvm_state(%s) and "
"power_state(%s).", pvm_state,
power_state.STATE_MAP[inst.power_state], instance=inst)
return
# Log as if normal event
lce = event.LifecycleEvent(inst.uuid, transition)
LOG.info('Sending LifecycleEvent for instance state change to: %s',
pvm_state, instance=inst)
self._driver.emit_event(lce)
# Delete out the queue
del self._delayed_event_threads[pvm_uuid]
@lockutils.synchronized(_LIFECYCLE_EVT_LOCK)
def process(self, inst, pvm_uuid):
"""Emits the event, or adds it to the queue for delayed emission.
:param inst: The nova instance. May be None.
:param pvm_uuid: The PowerVM LPAR UUID.
"""
# Cancel out the current delay event. Can happen as it goes
# from SHUTTING_DOWN to NOT_ACTIVATED, multiple delayed events
# can come in at once. Only want the last.
if pvm_uuid in self._delayed_event_threads:
self._delayed_event_threads[pvm_uuid].cancel()
# Spawn in the background
elem = greenthread.spawn_after(15, self._emit_event, pvm_uuid, inst)
self._delayed_event_threads[pvm_uuid] = elem
| 38.945736
| 79
| 0.640625
|
97cad1e3d62d043708cb4a9ee2802ac046580b1d
| 559
|
py
|
Python
|
davarocr/davarocr/davar_rcg/models/recognizors/__init__.py
|
icedream2/DAVAR-Lab-OCR
|
c8b82f45516850eeadcab2739fb2a4292f2fdca1
|
[
"Apache-2.0"
] | 387
|
2021-01-02T07:50:15.000Z
|
2022-03-31T04:30:03.000Z
|
davarocr/davarocr/davar_rcg/models/recognizors/__init__.py
|
icedream2/DAVAR-Lab-OCR
|
c8b82f45516850eeadcab2739fb2a4292f2fdca1
|
[
"Apache-2.0"
] | 70
|
2021-05-04T18:28:18.000Z
|
2022-03-31T14:14:52.000Z
|
davarocr/davarocr/davar_rcg/models/recognizors/__init__.py
|
icedream2/DAVAR-Lab-OCR
|
c8b82f45516850eeadcab2739fb2a4292f2fdca1
|
[
"Apache-2.0"
] | 83
|
2021-01-05T08:28:26.000Z
|
2022-03-31T07:14:03.000Z
|
"""
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : __init__.py
# Abstract :
# Current Version: 1.0.0
# Date : 2021-05-01
##################################################################################################
"""
from .general import GeneralRecognizor
from .rf_learning import RFLRecognizor
__all__ = [
'GeneralRecognizor',
'RFLRecognizor',
]
| 31.055556
| 98
| 0.418605
|
2beb532b8d9deb40b0e8fb339fd926126f15f204
| 1,739
|
py
|
Python
|
CAPG/Reacher/capg_swimmer.py
|
Mee321/HAPG_exp
|
ccd0d92ad2ffcd8438efbd6bc09123a4c3aafabe
|
[
"MIT"
] | null | null | null |
CAPG/Reacher/capg_swimmer.py
|
Mee321/HAPG_exp
|
ccd0d92ad2ffcd8438efbd6bc09123a4c3aafabe
|
[
"MIT"
] | null | null | null |
CAPG/Reacher/capg_swimmer.py
|
Mee321/HAPG_exp
|
ccd0d92ad2ffcd8438efbd6bc09123a4c3aafabe
|
[
"MIT"
] | null | null | null |
import gym
from garage.baselines import LinearFeatureBaseline
from garage.theano.baselines import GaussianMLPBaseline
from garage.baselines import ZeroBaseline
from garage.envs import normalize
from garage.envs.box2d import CartpoleEnv
from garage.envs.mujoco import SwimmerEnv
from garage.theano.algos.capg import CAPG
from garage.theano.envs import TheanoEnv
from garage.theano.policies import GaussianMLPPolicy
from garage.theano.baselines import GaussianMLPBaseline
from garage.misc.instrument import run_experiment
from garage.misc.ext import set_seed
import numpy as np
for i in range(5):
seed = np.random.randint(1,10000)
env_name = "CAPG_Reacher"
hidden_sizes = (32, 32)
env = TheanoEnv(normalize(gym.make("Reacher-v2")))
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=hidden_sizes)
backup_policy = GaussianMLPPolicy(env.spec, hidden_sizes=hidden_sizes)
mix_policy = GaussianMLPPolicy(env.spec, hidden_sizes=hidden_sizes)
pos_eps_policy = GaussianMLPPolicy(env.spec, hidden_sizes=hidden_sizes)
neg_eps_policy = GaussianMLPPolicy(env.spec, hidden_sizes=hidden_sizes)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = CAPG(
env=env,
policy=policy,
backup_policy=backup_policy,
mix_policy=mix_policy,
pos_eps_policy=pos_eps_policy,
neg_eps_policy=neg_eps_policy,
n_timestep=1e6,
learning_rate=0.02,
batch_size=5000,
minibatch_size=500,
n_sub_itr = 10,
center_adv=True,
baseline=baseline,
max_path_length=500,
decay_learing_rate=True,
discount=0.99,
log_dir='./capg/' + env_name + "seed" + str(seed) + '/',
)
algo.train()
| 34.78
| 76
| 0.734905
|
909b284aa8efb4729ee3fd36cbe1a608dbafb85a
| 7,161
|
py
|
Python
|
braindrAnalysis/braindrAnalysis.py
|
amitasviper/braindr-analysis
|
e16ef5042d8b369be7ac68b160dd3f886e3ddf6d
|
[
"MIT"
] | 1
|
2018-08-10T16:33:17.000Z
|
2018-08-10T16:33:17.000Z
|
braindrAnalysis/braindrAnalysis.py
|
amitasviper/braindr-analysis
|
e16ef5042d8b369be7ac68b160dd3f886e3ddf6d
|
[
"MIT"
] | null | null | null |
braindrAnalysis/braindrAnalysis.py
|
amitasviper/braindr-analysis
|
e16ef5042d8b369be7ac68b160dd3f886e3ddf6d
|
[
"MIT"
] | 3
|
2018-07-10T18:00:24.000Z
|
2019-08-31T22:53:43.000Z
|
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import simplejson as json
import os
from .due import due, Doi
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
__all__ = ["aggregate_braindr_votes", "model"]
# Use duecredit (duecredit.org) to provide a citation to relevant work to
# be cited. This does nothing, unless the user has duecredit installed,
# And calls this with duecredit (as in `python -m duecredit script.py`):
due.cite(Doi("10.1167/13.9.30"),
description="Analysis for braindr",
tags=["reference-implementation"],
path='braindr-results')
log = {}
def model(bdr_pivot, learning_rates=[0.1], n_estimators=[200], max_depth=[2],
test_size=0.33):
# bdr_pivot = pd.DataFrame(braindr_pivot)
X = bdr_pivot[[c for c in bdr_pivot.columns if c not in ['plain_average',
'truth']]].values
y = bdr_pivot.truth.values
log["X_shape"] = X.shape
log['y_shape'] = y.shape
seed = 7
# test_size = 0.33
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=test_size,
random_state=seed,
stratify=y)
log['X_train_shape'] = X_train.shape
# make sure everyone has a vote in the train and test
assert(np.isfinite(X_train).sum(0).all()), 'not everyone has a vote'
assert(np.isfinite(X_test).sum(0).all()), 'not everyone has a vote'
model = XGBClassifier()
# run the grid search
param_grid = dict(learning_rate=learning_rates,
max_depth=max_depth,
n_estimators=n_estimators)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
grid_search = GridSearchCV(model, param_grid, scoring="neg_log_loss",
n_jobs=-1, cv=kfold)
grid_result = grid_search.fit(X_train, y_train)
# results
log["Best: %f using %s"] = (grid_result.best_score_,
grid_result.best_params_)
y_pred_prob = grid_result.predict_proba(X_test)[:, 1]
log['y_pred_prob'] = y_pred_prob.tolist()
log["y_test"] = y_test.tolist()
B = grid_result.best_estimator_.get_booster()
fscores = B.get_fscore()
fdf = pd.DataFrame([fscores]).T.rename(columns={0: 'F'})
not_col = ['plain_average', 'truth']
users = [c for c in bdr_pivot.columns if c not in not_col]
fdf['user'] = fdf.index.map(lambda x: users[int(x[1:])])
fdf.sort_values('F', inplace=True)
log['user_importance'] = fdf[::-1].to_json(orient='records')
return grid_result
def aggregate_braindr_votes(braindr_data, pass_labels, fail_labels,
learning_rates=[0.1], n_estimators=[200],
max_depth=[2], test_size=0.33):
"""
Function that aggregates braindr data using the XGBoost model
Parameters
----------
braindr_data : string.
This is the path to the braindr data downloaded from firebase or a URL
to the data
pass_labels : list of strings
a list of names that are considered passing
fail_labels : list of strings
a list of names that are considered failing
learning_rates : list of floats
a list of learning rates to grid search in XGBoost
n_estimators : list of ints
a list of number of estimators to grid search in XGBoost
max_depth : list of ints
a list of maximum tree depth for to grid search in XGBoost
test_size : float
fraction of data to put into test set
Returns
-------
anon_path : string
path to anonymized data
"""
assert isinstance(braindr_data, str), "input a string path to\
braindr_data"
if braindr_data.startswith('http'):
braindr_df = pd.read_csv(braindr_data)
else:
assert(os.path.exists(braindr_data)), "please give a valid path\
to braindr data"
braindr_df = pd.read_table(braindr_data)
braindr_df['subject_name'] = braindr_df.image_id.map(lambda x: x.split('__')[0])
braindr_df_pass_subset = braindr_df[braindr_df.subject_name.isin(pass_labels)]
braindr_df_fail_subset = braindr_df[braindr_df.subject_name.isin(fail_labels)]
braindr_df_pass_subset['truth'] = 1
braindr_df_fail_subset['truth'] = 0
braindr_subset = braindr_df_pass_subset.append(braindr_df_fail_subset,
ignore_index=True)
# count users contributions
user_counts = braindr_subset.groupby('username')\
.apply(lambda x: x.shape[0])
username_keep = user_counts[user_counts >= user_counts.describe()['75%']]\
.index.values
bdr = braindr_subset[braindr_subset.username.isin(username_keep)]
bdr_pivot = braindr_subset.pivot_table(columns="username",
index='image_id',
values='vote',
aggfunc=np.mean)
uname_img_counts = pd.DataFrame()
for uname in bdr_pivot.columns:
uname_img_counts.loc[uname, 'counts'] = (pd.isnull(bdr_pivot[uname]) == False).sum()
username_keep = uname_img_counts[uname_img_counts.counts >= uname_img_counts.describe().loc['75%']['counts']]
username_keep = username_keep.index.values
bdr = braindr_subset[braindr_subset.username.isin(username_keep)]
bdr_pivot = bdr.pivot_table(columns="username", index='image_id',
values='vote', aggfunc=np.mean)
truth_vals = bdr.groupby('image_id').apply(lambda x: x.truth.values[0])
bdr_pivot['truth'] = truth_vals
plain_avg = bdr_pivot[bdr_pivot.columns[:-1]].mean(1)
bdr_pivot['plain_average'] = plain_avg
log['bdr_pivot'] = bdr_pivot.to_json(orient='columns')
grid_result = model(bdr_pivot, learning_rates=learning_rates,
n_estimators=n_estimators, max_depth=max_depth,
test_size=test_size)
modelUsers = [c for c in bdr_pivot.columns if c not in ['plain_average',
'truth']]
braindr_full_pivot = braindr_df[braindr_df.username.isin(modelUsers)]\
.pivot_table(columns='username', index='image_id',
values='vote', aggfunc=np.mean)
# braindr_full_pivot = braindr_full_pivot[modelUsers]
log['braindr_full_pivot_shape'] = braindr_full_pivot.shape
X_all = braindr_full_pivot.values
y_all_pred = grid_result.best_estimator_.predict_proba(X_all)
# model.predict_proba(X_all)
plain_avg = braindr_full_pivot.mean(1)
braindr_full_pivot['average_label'] = plain_avg
braindr_full_pivot['xgboost_label'] = y_all_pred[:, 1]
log['output'] = braindr_full_pivot.to_json(orient='columns')
return log # braindr_full_pivot.to_json(orient='columns')
| 40.230337
| 113
| 0.641391
|
30b9b4ecdbc1d610f0347ce3be7152ab0e1299d3
| 18,639
|
py
|
Python
|
dpnp/dpnp_iface_statistics.py
|
Rubtsowa/dpnp
|
ef404c0f284b0c508ed1e556e140f02f76ae5551
|
[
"BSD-2-Clause"
] | null | null | null |
dpnp/dpnp_iface_statistics.py
|
Rubtsowa/dpnp
|
ef404c0f284b0c508ed1e556e140f02f76ae5551
|
[
"BSD-2-Clause"
] | null | null | null |
dpnp/dpnp_iface_statistics.py
|
Rubtsowa/dpnp
|
ef404c0f284b0c508ed1e556e140f02f76ae5551
|
[
"BSD-2-Clause"
] | null | null | null |
# cython: language_level=3
# distutils: language = c++
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2016-2020, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
Interface of the statistics function of the DPNP
Notes
-----
This module is a face or public interface file for the library
it contains:
- Interface functions
- documentation for the functions
- The functions parameters check
"""
import numpy
from dpnp.dpnp_algo import *
from dpnp.dparray import dparray
from dpnp.dpnp_utils import *
import dpnp
__all__ = [
'amax',
'amin',
'average',
'correlate',
'cov',
'max',
'mean',
'median',
'min',
'std',
'var',
]
def amax(input, axis=None, out=None):
"""
Return the maximum of an array or maximum along an axis.
For full documentation refer to :obj:`numpy.amax`.
See Also
--------
:obj:`dpnp.amin` : The minimum value of an array along a given axis,
propagating any NaNs.
:obj:`dpnp.nanmax` : The maximum value of an array along a given axis,
ignoring any NaNs.
:obj:`dpnp.maximum` : Element-wise maximum of two arrays,
propagating any NaNs.
:obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignoring any NaNs.
:obj:`dpnp.argmax` : Return the indices of the maximum values.
:obj:`dpnp.nanmin` : Return minimum of an array or minimum along an axis,
ignoring any NaNs.
:obj:`dpnp.minimum` : Element-wise minimum of array elements.
:obj:`dpnp.fmin` : Element-wise minimum of array elements.
Notes
-----
This function works exactly the same as :obj:`dpnp.max`.
"""
return max(input, axis=axis, out=out)
def amin(input, axis=None, out=None):
"""
Return the minimum of an array or minimum along an axis.
For full documentation refer to :obj:`numpy.amin`.
See Also
--------
:obj:`dpnp.amax` : The maximum value of an array along a given axis,
propagating any NaNs.
:obj:`dpnp.nanmin` : Return minimum of an array or minimum along an axis,
ignoring any NaNs.
:obj:`dpnp.minimum` : Element-wise minimum of array elements.
:obj:`dpnp.fmin` : Element-wise minimum of array elements.
:obj:`dpnp.argmin` : Return the indices of the minimum values.
:obj:`dpnp.nanmax` : The maximum value of an array along a given axis,
ignoring any NaNs.
:obj:`dpnp.maximum` : Element-wise maximum of two arrays,
propagating any NaNs.
:obj:`dpnp.fmax` : Element-wise maximum of two arrays, ignoring any NaNs.
Notes
-----
This function works exactly the same as :obj:`dpnp.min`.
"""
return min(input, axis=axis, out=out)
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
For full documentation refer to :obj:`numpy.average`.
Limitations
-----------
Input array is supported as :obj:`dpnp.ndarray`.
Prameters ``axis`` is supported only with default value ``None``.
Prameters ``weights`` is supported only with default value ``None``.
Prameters ``returned`` is supported only with default value ``False``.
Otherwise the function will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
See Also
--------
:obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis.
Examples
--------
>>> import dpnp as np
>>> data = np.arange(1, 5)
>>> [i for i in data]
[1, 2, 3, 4]
>>> np.average(data)
2.5
"""
if not use_origin_backend(a):
if not isinstance(a, dparray):
pass
elif axis is not None:
pass
elif weights is not None:
pass
elif returned:
pass
else:
array_avg = dpnp_average(a)
# scalar returned
if array_avg.shape == (1,):
return array_avg.dtype.type(array_avg[0])
return array_avg
return call_origin(numpy.average, a, axis, weights, returned)
def correlate(a, v, mode='valid'):
"""
Cross-correlation of two 1-dimensional sequences.
For full documentation refer to :obj:`numpy.correlate`.
Limitations
-----------
Input arrays are supported as :obj:`dpnp.ndarray`.
Size and shape of input arrays are supported to be equal.
Prameters ``mode`` is supported only with default value ``"valid``.
Otherwise the function will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
See Also
--------
:obj:`dpnp.convolve` : Discrete, linear convolution of
two one-dimensional sequences.
Examples
--------
>>> import dpnp as np
>>> x = np.correlate([1, 2, 3], [0, 1, 0.5])
>>> [i for i in x]
[3.5]
"""
if not use_origin_backend(a):
if not isinstance(a, dparray):
pass
elif not isinstance(v, dparray):
pass
elif a.size != v.size or a.size == 0:
pass
elif a.shape != v.shape:
pass
elif mode != 'valid':
pass
else:
return dpnp_correlate(a, v)
return call_origin(numpy.correlate, a, v, mode=mode)
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None):
"""
Estimate a covariance matrix, given data and weights.
For full documentation refer to :obj:`numpy.cov`.
Limitations
-----------
Input array ``m`` is supported as :obj:`dpnp.ndarray`.
Dimension of input array ``m`` is limited by ``m.ndim > 2``.
Size and shape of input arrays are supported to be equal.
Prameters ``y`` is supported only with default value ``None``.
Prameters ``rowvar`` is supported only with default value ``True``.
Prameters ``bias`` is supported only with default value ``False``.
Prameters ``ddof`` is supported only with default value ``None``.
Prameters ``fweights`` is supported only with default value ``None``.
Prameters ``aweights`` is supported only with default value ``None``.
Otherwise the function will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
.. seealso:: :obj:`dpnp.corrcoef` normalized covariance matrix.
Examples
--------
>>> import dpnp as np
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x.shape
(2, 3)
>>> [i for i in x]
[0, 1, 2, 2, 1, 0]
>>> out = np.cov(x)
>>> out.shape
(2, 2)
>>> [i for i in out]
[1.0, -1.0, -1.0, 1.0]
"""
if not use_origin_backend(m):
if not isinstance(m, dparray):
pass
elif m.ndim > 2:
pass
elif y is not None:
pass
elif not rowvar:
pass
elif bias:
pass
elif ddof is not None:
pass
elif fweights is not None:
pass
elif aweights is not None:
pass
else:
return dpnp_cov(m)
return call_origin(numpy.cov, m, y, rowvar, bias, ddof, fweights, aweights)
def max(input, axis=None, out=None, keepdims=numpy._NoValue, initial=numpy._NoValue, where=numpy._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Limitations
-----------
Input array is supported as :obj:`dpnp.ndarray`.
Otherwise the function will be executed sequentially on CPU.
Prameters ``out`` is supported only with default value ``None``.
Input array data types are limited by supported DPNP :ref:`Data types`.
Examples
--------
>>> import dpnp as np
>>> a = np.arange(4).reshape((2,2))
>>> a.shape
(2, 2)
>>> [i for i in a]
[0, 1, 2, 3]
>>> np.max(a)
3
"""
if not use_origin_backend(input):
if not isinstance(input, dparray):
pass
elif out is not None:
pass
elif keepdims is not numpy._NoValue:
pass
elif initial is not numpy._NoValue:
pass
elif where is not numpy._NoValue:
pass
else:
result = dpnp_max(input, axis=axis)
# scalar returned
if result.shape == (1,):
return result.dtype.type(result[0])
return result
return call_origin(numpy.max, input, axis, out, keepdims, initial, where)
def mean(a, axis=None, **kwargs):
"""
Compute the arithmetic mean along the specified axis.
For full documentation refer to :obj:`numpy.mean`.
Limitations
-----------
Input array is supported as :obj:`dpnp.ndarray`.
Keyword arguments ``kwargs`` are currently unsupported.
Size of input array is limited by ``a.size > 0``.
Otherwise the function will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
See Also
--------
:obj:`dpnp.average` : Weighted average.
:obj:`dpnp.std` : Compute the standard deviation along the specified axis.
:obj:`dpnp.var` : Compute the variance along the specified axis.
:obj:`dpnp.nanmean` : Compute the arithmetic mean along the specified axis,
ignoring NaNs.
:obj:`dpnp.nanstd` : Compute the standard deviation along
the specified axis, while ignoring NaNs.
:obj:`dpnp.nanvar` : Compute the variance along the specified axis,
while ignoring NaNs.
Examples
--------
>>> import dpnp as np
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
"""
if not use_origin_backend(a) and not kwargs:
if not isinstance(a, dparray):
pass
elif a.size == 0:
pass
else:
result = dpnp_mean(a, axis=axis)
# scalar returned
if result.shape == (1,):
return result.dtype.type(result[0])
return result
return call_origin(numpy.mean, a, axis=axis, **kwargs)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
For full documentation refer to :obj:`numpy.median`.
Limitations
-----------
Input array is supported as :obj:`dpnp.ndarray`.
Prameters ``axis`` is supported only with default value ``None``.
Prameters ``out`` is supported only with default value ``None``.
Prameters ``overwrite_input`` is supported only with default value ``False``.
Prameters ``keepdims`` is supported only with default value ``False``.
Otherwise the function will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
See Also
--------
:obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis.
:obj:`dpnp.percentile` : Compute the q-th percentile of the data
along the specified axis.
Examples
--------
>>> import dpnp as np
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> np.median(a)
3.5
"""
if not use_origin_backend(a):
if not isinstance(a, dparray):
pass
elif axis is not None:
pass
elif out is not None:
pass
elif overwrite_input:
pass
elif keepdims:
pass
else:
result = dpnp_median(a)
# scalar returned
if result.shape == (1,):
return result.dtype.type(result[0])
return result
return call_origin(numpy.median, a, axis, out, overwrite_input, keepdims)
def min(input, axis=None, out=None, keepdims=numpy._NoValue, initial=numpy._NoValue, where=numpy._NoValue):
"""
Return the minimum along a given axis.
Limitations
-----------
Input array is supported as :obj:`dpnp.ndarray`.
Otherwise the function will be executed sequentially on CPU.
Prameters ``out`` is supported only with default value ``None``.
Input array data types are limited by supported DPNP :ref:`Data types`.
Examples
--------
>>> import dpnp as np
>>> a = np.arange(4).reshape((2,2))
>>> a.shape
(2, 2)
>>> [i for i in a]
[0, 1, 2, 3]
>>> np.min(a)
0
"""
if not use_origin_backend(input):
if not isinstance(input, dparray):
pass
elif out is not None:
pass
elif keepdims is not numpy._NoValue:
pass
elif initial is not numpy._NoValue:
pass
elif where is not numpy._NoValue:
pass
else:
result = dpnp_min(input, axis=axis)
# scalar returned
if result.shape == (1,):
return result.dtype.type(result[0])
return result
return call_origin(numpy.min, input, axis, out, keepdims, initial, where)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=numpy._NoValue):
"""
Compute the standard deviation along the specified axis.
For full documentation refer to :obj:`numpy.std`.
Limitations
-----------
Input array is supported as :obj:`dpnp.ndarray`.
Size of input array is limited by ``a.size > 0``.
Prameters ``axis`` is supported only with default value ``None``.
Prameters ``dtype`` is supported only with default value ``None``.
Prameters ``out`` is supported only with default value ``None``.
Prameters ``keepdims`` is supported only with default value ``numpy._NoValue``.
Otherwise the function will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
See Also
--------
:obj:`dpnp.var` : Compute the variance along the specified axis.
:obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis.
:obj:`dpnp.nanmean` : Compute the arithmetic mean along the specified axis,
ignoring NaNs.
:obj:`dpnp.nanstd` : Compute the standard deviation along
the specified axis, while ignoring NaNs.
:obj:`dpnp.nanvar` : Compute the variance along the specified axis,
while ignoring NaNs.
Examples
--------
>>> import dpnp as np
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.118033988749895
"""
if not use_origin_backend(a):
if not isinstance(a, dparray):
pass
elif a.size == 0:
pass
elif axis is not None:
pass
elif dtype is not None:
pass
elif out is not None:
pass
elif keepdims is not numpy._NoValue:
pass
else:
result = dpnp_std(a, ddof)
if result.shape == (1,):
return result.dtype.type(result[0])
return result
return call_origin(numpy.std, a, axis, dtype, out, ddof, keepdims)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=numpy._NoValue):
"""
Compute the variance along the specified axis.
For full documentation refer to :obj:`numpy.var`.
Limitations
-----------
Input array is supported as :obj:`dpnp.ndarray`.
Size of input array is limited by ``a.size > 0``.
Prameters ``axis`` is supported only with default value ``None``.
Prameters ``dtype`` is supported only with default value ``None``.
Prameters ``out`` is supported only with default value ``None``.
Prameters ``keepdims`` is supported only with default value ``numpy._NoValue``.
Otherwise the function will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
See Also
--------
:obj:`dpnp.std` : Compute the standard deviation along the specified axis.
:obj:`dpnp.mean` : Compute the arithmetic mean along the specified axis.
:obj:`dpnp.nanmean` : Compute the arithmetic mean along the specified axis,
ignoring NaNs.
:obj:`dpnp.nanstd` : Compute the standard deviation along
the specified axis, while ignoring NaNs.
:obj:`dpnp.nanvar` : Compute the variance along the specified axis,
while ignoring NaNs.
Examples
--------
>>> import dpnp as np
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
"""
if not use_origin_backend(a):
if not isinstance(a, dparray):
pass
elif a.size == 0:
pass
elif axis is not None:
pass
elif dtype is not None:
pass
elif out is not None:
pass
elif keepdims is not numpy._NoValue:
pass
else:
result = dpnp_var(a, ddof)
if result.shape == (1,):
return result.dtype.type(result[0])
return result
return call_origin(numpy.var, a, axis, dtype, out, ddof, keepdims)
| 31.32605
| 107
| 0.597779
|
7675177ca8977aa2ed494932468ee28c1b5fc775
| 19,613
|
py
|
Python
|
hydsensread/file_reader/abstract_file_reader.py
|
cgq-qgc/HydroSensorReader
|
956e522102ad8a4be172123049a43de235123ef6
|
[
"MIT"
] | 1
|
2019-03-11T12:43:51.000Z
|
2019-03-11T12:43:51.000Z
|
hydsensread/file_reader/abstract_file_reader.py
|
cgq-qgc/HydroSensorReader
|
956e522102ad8a4be172123049a43de235123ef6
|
[
"MIT"
] | 19
|
2018-11-22T13:51:59.000Z
|
2022-01-18T15:47:43.000Z
|
hydsensread/file_reader/abstract_file_reader.py
|
cgq-qgc/HydroSensorReader
|
956e522102ad8a4be172123049a43de235123ef6
|
[
"MIT"
] | 4
|
2018-11-22T14:29:51.000Z
|
2021-12-03T01:14:49.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import warnings
import os.path as osp
from abc import abstractmethod, ABCMeta
from collections import defaultdict
from typing import Dict, List, Union, Tuple
from xml.etree import ElementTree as ET
import bs4
import matplotlib.axes as mp_axe
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
from pandas import DataFrame
from pandas.plotting import register_matplotlib_converters
from hydsensread import file_parser
from hydsensread.site_and_records import (
DrillingSite, geographical_coordinates, Sample, SensorPlateform)
register_matplotlib_converters()
sample_ana_type = Dict[str, Sample]
sample_dict = Dict[str, sample_ana_type]
date_list = List[datetime.datetime]
class LineDefinition(object):
"""
Line definition element to pass to the plot method
"""
def __init__(self, parameter,
color: str = 'blue',
linestyle: str = '-',
outward: int = 0,
linewidth: float = 2,
make_grid: bool = False) -> None:
self.param = parameter
self.linestyle = linestyle
self.color = color
self.outward = outward
self.linewidth = linewidth
self.make_grid = make_grid
class AbstractFileReader(object, metaclass=ABCMeta):
"""
Interface permettant de lire un fichier provenant d'un datalogger
quelconque classe permettant d'extraire des données d'un fichier
quelconque.
Un fichier de donnée est en général composé de :
- Entete d'information sur l'environnement de prise de données
- Entete d'information sur les colonnes de données
- Les colonnes de données
"""
TXT_FILE_TYPES = ['dat', 'lev', 'txt']
XLS_FILES_TYPES = ['xls', 'xlsx']
XML_FILES_TYPES = ['xle', 'xml']
CSV_FILES_TYPES = ['csv']
WEB_XML_FILES_TYPES = ['http']
MONTH_S_DAY_S_YEAR_HMS_DATE_STRING_FORMAT = '%m/%d/%y %H:%M:%S'
YEAR_S_MONTH_S_DAY_HM_DATE_STRING_FORMAT = '%Y/%m/%d %H:%M'
YEAR_S_MONTH_S_DAY_HMS_DATE_STRING_FORMAT = (
YEAR_S_MONTH_S_DAY_HM_DATE_STRING_FORMAT + ":%S")
YEAR_S_MONTH_S_DAY_HMSMS_DATE_STRING_FORMAT = (
YEAR_S_MONTH_S_DAY_HMS_DATE_STRING_FORMAT + ".%f")
def __init__(self, file_path: str = None,
header_length: int = 10,
request_params: dict = None,
encoding='utf8',
wait_read=False,
csv_delim_regex: str = None):
"""
:param file_path: path to the file to treat
:param header_length: header length
:param request_params: request parameter for web element
:param encoding: encoding type :default = 'utf-8'
:param wait_read: if wait_read is True, will wait to read the file
content. This is usefull for hierarchi-class.
:param csv_delim_regex: a regex used to determine the delimiter of
csv files when parsing the data
See file_reader.compagny_file_reader.solinst_file_reader.py
for an example
"""
self.request_params = request_params
self._file = file_path
self._header_length = header_length
self._encoding = encoding
self._csv_delim_regex = csv_delim_regex
self._site_of_interest = None
self.file_reader = self._set_file_reader()
if not wait_read:
self.file_reader.read_file()
@property
def sites(self):
return self._site_of_interest
def _set_file_reader(self) -> Union[file_parser.CSVFileParser,
file_parser.EXCELFileParser,
file_parser.TXTFileParser,
file_parser.WEBFileParser]:
"""
set the good file parser to open and read the provided file
:return:
"""
file_reader = ""
file_ext = self.file_extension
try:
if file_ext in self.TXT_FILE_TYPES:
file_reader = file_parser.TXTFileParser(
file_path=self._file,
header_length=self._header_length,
encoding=self._encoding)
elif file_ext in self.XLS_FILES_TYPES:
file_reader = file_parser.EXCELFileParser(
file_path=self._file,
header_length=self._header_length)
elif file_ext in self.CSV_FILES_TYPES:
file_reader = file_parser.CSVFileParser(
file_path=self._file,
header_length=self._header_length,
csv_delim_regex=self._csv_delim_regex)
elif file_ext in self.WEB_XML_FILES_TYPES or 'http' in self._file:
file_reader = file_parser.WEBFileParser(
file_path=self._file,
requests_params=self.request_params)
elif file_ext in self.XML_FILES_TYPES:
file_reader = file_parser.XMLFileParser(file_path=self._file)
except ValueError as e:
print(self._file)
print("File ext: {}".format(file_ext))
raise e
else:
return file_reader
def read_file(self):
self._make_site()
self._make_data()
@property
def file_extension(self):
ext = osp.splitext(self._file)[1]
if ext == '':
raise ValueError("The path given doesn't point to a file name.")
else:
return ext[1:].lower()
@property
def file_content(self) -> Union[ET.ElementTree, bs4.BeautifulSoup, list, ]:
return self.file_reader.get_file_content
def _make_site(self):
"""
Create a site object by reading the file header and the data header
to know what was recorded.
"""
self._read_file_header()
self._read_file_data_header()
def _make_data(self):
"""Read and classify the data."""
self._read_file_data()
@abstractmethod
def _read_file_header(self):
"""Read the file header."""
pass
@abstractmethod
def _read_file_data_header(self):
"""Read the data header (what was recorded)."""
pass
@abstractmethod
def _read_file_data(self):
"""Read and classify the data columns."""
pass
class TimeSeriesFileReader(AbstractFileReader):
def __init__(self, file_path: str = None, header_length: int = 10,
encoding='utf8', wait_read: bool = False,
csv_delim_regex: str = None):
super().__init__(file_path, header_length, encoding=encoding,
wait_read=wait_read, csv_delim_regex=csv_delim_regex)
self._site_of_interest = SensorPlateform()
self._date_list = []
self.header_content = {}
if not wait_read:
self.read_file()
@property
def time_series_dates(self):
return self._date_list
@abstractmethod
def _get_date_list(self) -> date_list:
pass
@property
def sites(self) -> SensorPlateform:
return self._site_of_interest
@property
def records(self) -> DataFrame:
return self.sites.records
@records.setter
def records(self, value: DataFrame):
self._site_of_interest.records = value
def plot(self, main_axis_def: LineDefinition, other_axis,
legend_loc='upper left',
*args, **kwargs) -> Tuple[plt.Figure, List[plt.Axes]]:
"""
:param main_axis_def:
:param other_axis:
:param legend_loc:
:param args:
:param kwargs:
:return:
"""
fig, main_axis = plt.subplots(figsize=(20, 10))
main_axis = self._add_first_axis(main_axis, main_axis_def)
all_axis = [main_axis]
for lines in other_axis:
new_axis = self._add_axe_to_plot(main_axis, lines)
all_axis.append(new_axis)
self._set_date_time_plot_format(main_axis)
fig.legend(loc=legend_loc)
return fig, all_axis
def remove_duplicates(self) -> DataFrame:
self.records = self.records.drop_duplicates()
return self.records
def _add_axe_to_plot(self, parent_plot,
new_line_def: LineDefinition,
**kwargs) -> mp_axe.Axes:
new_axis = parent_plot.twinx()
new_axis.plot(self.records[new_line_def.param],
color=new_line_def.color, linestyle=new_line_def.linestyle,
linewidth=new_line_def.linewidth, **kwargs)
new_axis.grid(new_line_def.make_grid)
new_axis.set_ylabel(new_line_def.param, color=new_line_def.color)
new_axis.spines["right"].set_color(new_line_def.color)
if new_line_def.outward != 0:
new_axis.spines["right"].set_position(
("outward", new_line_def.outward))
return new_axis
def _add_first_axis(self, main_axis: mp_axe.Axes,
line_def: LineDefinition, **kwargs) -> mp_axe.Axes:
main_axis.plot(self.records[line_def.param],
color=line_def.color,
linestyle=line_def.linestyle,
linewidth=line_def.linewidth, **kwargs)
main_axis.set_ylabel(line_def.param, color=line_def.color)
main_axis.spines['left'].set_color(line_def.color)
main_axis.set_title(self.sites.site_name +
" - Visit date: " +
str(self.sites.visit_date))
main_axis.grid(line_def.make_grid)
return main_axis
@staticmethod
def _set_date_time_plot_format(axis: mp_axe.Axes):
myFmt = mdates.DateFormatter('(%Y-%m-%d) %H:%M')
axis.xaxis.set_major_formatter(myFmt)
axis.grid(True, axis='x')
class GeochemistryFileReader(AbstractFileReader):
def __init__(self, file_path: str = None,
header_length: int = 10, **kwargs):
super().__init__(file_path, header_length)
self._site_of_interest = defaultdict(dict) # dict of Samples
self.project = None
self.report_date = None
self.analysis_methode = None
def _read_file_header(self):
pass
def _read_file_data_header(self):
pass
def create_sample(self, sample_name: str):
sample = Sample(site_name=sample_name)
self._site_of_interest[sample_name] = sample
yield self._site_of_interest[sample_name]
def create_complete_sample(self, site_name: str = None,
visit_date: datetime.datetime = None,
lab_sample_name: str = None,
sample_type: str = None,
project_name: str = None):
sample = Sample(site_name, visit_date, lab_sample_name, sample_type, project_name)
self._site_of_interest[site_name] = sample
class TimeSeriesGeochemistryFileReader(TimeSeriesFileReader, GeochemistryFileReader):
TIME_SERIES_DATA = 'timeSerie'
GEOCHEMISTRY_DATA = 'samples'
def __init__(self, file_path: str = None, header_length: int = 10, encoding='utf-8'):
"""
class between TimeSeriesFileReader and GeochemistryFileReader.
internal data structure is like:
self._site_of_interest
[TIMES_SERIES]
[site_name]
SensorPlateform
[GEOCHEMISTRY]
[date : datetime.datetime]
[sample_name:str]
Sample
"""
warnings.warn("""Deprecated class.
Needs to be adapted to site.py and records.py refactoring
Don't know if this class is still usefull if a pandas.Dataframe is used.
Maybe a MultiIndex can do the trick !
- 2018-04-03""", DeprecationWarning)
# TimeSeriesFileReader.__init__(self, file_path, header_length, encoding=encoding)
# GeochemistryFileReader.__init__(self, file_path, header_length)
super().__init__(file_path, header_length, encoding=encoding)
self._site_of_interest = defaultdict(dict)
self._site_of_interest[self.TIME_SERIES_DATA] = defaultdict(SensorPlateform)
self._site_of_interest[self.GEOCHEMISTRY_DATA] = defaultdict(dict) # dict sorted by [samp_name][samp_date]
def get_sample_by_date(self, p_date, p_samp_name) -> Sample:
try:
return self._site_of_interest[self.GEOCHEMISTRY_DATA][p_date][p_samp_name]
except:
return None
def get_time_series_data(self, site_name=None) -> Union[SensorPlateform, dict]:
"""
get all sites avaible that have a time serie OR
get all timeseries for the given "site_name"
with this structure:
[TIMES_SERIES]
[site_name]
SensorPlateform
:param site_name:
:return:
"""
if site_name is not None:
return self._site_of_interest[self.TIME_SERIES_DATA][site_name]
else:
return self._site_of_interest[self.TIME_SERIES_DATA]
def get_geochemistry_data(self) -> dict:
"""
get the dictionnary for geochemistry in this structure:
[GEOCHEMISTRY]
[date : datetime.datetime]
[sample_name:str]
Sample
:return:
"""
return self._site_of_interest[self.GEOCHEMISTRY_DATA]
def _get_date_list(self, site_name) -> date_list:
"""
get all dates for the given site_name. No matter the parameter
:param site_name:
:return:
"""
return self.get_time_series_data(site_name).get_dates()
@TimeSeriesFileReader.time_series_dates.getter
def time_series_dates(self, site_name):
"""
overide of time_series_dates property getter. Needs to have a site_name because of
the dict structure
:param site_name:
:return:
"""
self._date_list = self._get_date_list(site_name)
return self._date_list
def makes_samples_with_time_series(self, site_name):
"""
make sample with all the time series for the given site_name
:param site_name:
:return:
"""
sample_name = self.get_time_series_data(site_name).site_name
project = self.get_time_series_data(site_name).project_name
# iterate through all dates
for dates in self._get_date_list(site_name):
# create a sample
samp = Sample(site_name=sample_name,
visit_date=dates,
lab_sample_name=None,
sample_type='automatic',
analysis_type=None,
project_name=project)
# create and add a record to the sample
for rec in self.get_time_series_data(site_name).get_records():
val = rec.get_value_at_date(dates)
param = rec.parameter
unit = rec.parameter_unit
if val is not None:
samp.create_complete_record(dates, param, unit, val, None, dates, None)
# add the sample to the geochemistry datas
self.get_geochemistry_data()[dates][sample_name] = samp
def make_time_series_with_samples(self):
"""
take all the samples in self._site_of_interest[self.GEOCHEMISTRY_DATA]
and create a time serie for each record.
After all timeseries are made, they are filled with all the sampling data
:return:
"""
self._site_of_interest[self.TIME_SERIES_DATA].clear()
self._site_of_interest[self.TIME_SERIES_DATA] = defaultdict(SensorPlateform)
self._create_time_series_with_samples()
self._fill_time_series_with_samples_data()
def _create_time_series_with_samples(self):
"""
create time serie entry for each parameters available for each samples
remember, geochemistry data structure is like:
[GEOCHEMISTRY]
[date : datetime.datetime]
[sample_name:str]
Sample
:return:
"""
for sampled_dates in self.get_geochemistry_data().keys():
for samples_at_date in self.get_geochemistry_data()[sampled_dates].keys():
for records_in_sample in self.get_sample_by_date(sampled_dates, samples_at_date).get_records():
self._add_time_serie_value_by_geochemistry_record(records_in_sample, samples_at_date)
def _add_time_serie_value_by_geochemistry_record(self, rec, sample_name):
param = rec.parameter
unit = rec.parameter_unit
val = [rec.value]
val_date = [rec.sampling_date]
try:
self.get_time_series_data(sample_name).create_time_serie(param, unit, val_date, val)
except:
pass
def _fill_time_series_with_samples_data(self):
"""
fill the time series for the given parameter with all the values avaible
:return:
"""
for site in self.get_time_series_data():
for ts in self.get_time_series_data(site).get_records():
for _dates in self.get_geochemistry_data():
rec = self.get_sample_by_date(_dates, site).get_record_by_parameter(ts.parameter)
try:
ts.add_value(rec.sampling_date, rec.value)
except KeyError as k:
continue
except AttributeError as a:
pass
except Exception as e:
print(type(e))
print(e)
ts.reorder_values()
class DrillingFileReader(AbstractFileReader):
def __init__(self, file_path: str = None, header_length: int = None, request_params: dict = None):
super().__init__(file_path, header_length, request_params)
self._site_of_interest = defaultdict(dict)
def create_drilling_site(self, site_name: str):
self.create_complete_drilling_site(site_name=site_name)
yield self._site_of_interest[site_name]
def create_complete_drilling_site(self, site_name: str, visit_date: datetime.datetime = None,
project_name: str = None,
other_identifier: str = None,
coordinates_x_y_z: geographical_coordinates = None,
drilling_depth: float = 0.0,
drill_dip: float = 0.0,
drill_azimut: float = 0.0,
drilling_diameter: float = 0.0):
drilling_site = DrillingSite(site_name=site_name,
visit_date=visit_date,
project_name=project_name,
other_identifier=other_identifier,
coordinates_x_y_z=coordinates_x_y_z,
drill_azimut=drill_azimut,
drill_dip=drill_dip,
drilling_depth=drilling_depth,
drilling_diameter=drilling_diameter)
self._site_of_interest[site_name] = drilling_site
return self._site_of_interest[site_name]
| 38.684418
| 115
| 0.607556
|
aa2612977acaacbf43c1c1f8e3f1682ddad4bc9c
| 10,969
|
py
|
Python
|
artworks/views.py
|
mnosinov/artworks_project
|
b6901ff83c1cd370ce1539f09b464ecb995e1ce3
|
[
"MIT"
] | null | null | null |
artworks/views.py
|
mnosinov/artworks_project
|
b6901ff83c1cd370ce1539f09b464ecb995e1ce3
|
[
"MIT"
] | null | null | null |
artworks/views.py
|
mnosinov/artworks_project
|
b6901ff83c1cd370ce1539f09b464ecb995e1ce3
|
[
"MIT"
] | null | null | null |
import os
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import View, ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.contrib import messages
from django.forms.models import model_to_dict
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from formtools.wizard.views import SessionWizardView
from .insertdatahelper import InserDataHelper
from .models import Genre, Author, Artwork, ArtworkType, Painting, Media, Book
from . import forms
def index(request):
return render(request, 'artworks/index.html')
class InsertSampleDataView(View):
def get(self, request, *args, **kwargs):
InserDataHelper(request).reset_data()
messages.success(request,
'Sample Data have been successfully inserted.')
return redirect('index')
# Genre -----------------------------------------------
class GenreListView(ListView):
model = Genre
class GenreDetailView(DetailView):
model = Genre
class GenreCreateView(CreateView):
model = Genre
fields = ('title', 'artwork_type')
success_url = reverse_lazy('genres')
class GenreUpdateView(UpdateView):
model = Genre
fields = ('title', 'artwork_type')
success_url = reverse_lazy('genres')
class GenreDeleteView(DeleteView):
model = Genre
success_url = reverse_lazy('genres')
# Author -----------------------------------------------
class AuthorListView(ListView):
model = Author
class AuthorDetailView(DetailView):
model = Author
class AuthorCreateView(CreateView):
model = Author
fields = ('name',)
success_url = reverse_lazy('authors')
class AuthorUpdateView(UpdateView):
model = Author
fields = ('name',)
success_url = reverse_lazy('authors')
class AuthorDeleteView(DeleteView):
model = Author
success_url = reverse_lazy('authors')
# Art Work -----------------------------------------------
class ArtworkListView(ListView):
model = Artwork
class ArtworkDetailView(DetailView):
model = Artwork
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
artwork = kwargs['object']
if artwork.type == ArtworkType.PAINTING:
context['painting'] = Painting.objects.get(pk=artwork.id)
elif artwork.type == ArtworkType.MUSIC:
context['music'] = Media.objects.get(pk=artwork.id)
elif artwork.type == ArtworkType.MOVIE:
context['movie'] = Media.objects.get(pk=artwork.id)
elif artwork.type == ArtworkType.BOOK:
context['book'] = Book.objects.get(pk=artwork.id)
return context
class ArtworkUpdateView(UpdateView):
model = Artwork
fields = ('name',)
success_url = reverse_lazy('artworks')
class ArtworkDeleteView(DeleteView):
model = Artwork
success_url = reverse_lazy('artworks')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
artwork = kwargs['object']
if artwork.type == ArtworkType.PAINTING:
context['painting'] = Painting.objects.get(pk=artwork.id)
elif artwork.type == ArtworkType.MUSIC:
context['music'] = Media.objects.get(pk=artwork.id)
elif artwork.type == ArtworkType.MOVIE:
context['movie'] = Media.objects.get(pk=artwork.id)
elif artwork.type == ArtworkType.BOOK:
context['book'] = Book.objects.get(pk=artwork.id)
return context
# multistep wizard form for artwork -----------------------------------------------
class ArtworkWizard(SessionWizardView):
# form_list = [
# ArtworkForm1,
# ArtworkPaintingForm2, ArtworkBookForm2, ArtworkMovieForm2, ArtworkMusicForm2,
# ]
# template_name = 'artworks/artwork_wizard_single_unified_template.html'
# forms
FORMS = [
("form1", forms.ArtworkForm1),
("painting_form2", forms.ArtworkPaintingForm2),
("painting_form3", forms.ArtworkPaintingForm3),
("painting_form4", forms.ArtworkPaintingForm4),
("painting_form5", forms.ArtworkPaintingForm5),
("music_form2", forms.ArtworkMusicForm2),
("music_form3", forms.ArtworkMusicForm3),
("music_form4", forms.ArtworkMusicForm4),
("movie_form2", forms.ArtworkMovieForm2),
("movie_form3", forms.ArtworkMovieForm3),
("movie_form4", forms.ArtworkMovieForm4),
("book_form2", forms.ArtworkBookForm2),
("book_form3", forms.ArtworkBookForm3),
("book_form4", forms.ArtworkBookForm4),
("book_form5", forms.ArtworkBookForm5),
]
# form templates
TEMPLATES = {
"form1": "artworks/artwork_wizard/form1.html",
"painting_form2": "artworks/artwork_wizard/painting_form2.html",
"painting_form3": "artworks/artwork_wizard/painting_form3.html",
"painting_form4": "artworks/artwork_wizard/painting_form4.html",
"painting_form5": "artworks/artwork_wizard/painting_form5.html",
"music_form2": "artworks/artwork_wizard/music_form2.html",
"music_form3": "artworks/artwork_wizard/music_form3.html",
"music_form4": "artworks/artwork_wizard/music_form4.html",
"movie_form2": "artworks/artwork_wizard/movie_form2.html",
"movie_form3": "artworks/artwork_wizard/movie_form3.html",
"movie_form4": "artworks/artwork_wizard/movie_form4.html",
"book_form2": "artworks/artwork_wizard/book_form2.html",
"book_form3": "artworks/artwork_wizard/book_form3.html",
"book_form4": "artworks/artwork_wizard/book_form4.html",
"book_form5": "artworks/artwork_wizard/book_form5.html",
}
file_storage = FileSystemStorage(location=os.path.join(settings.MEDIA_ROOT, 'book_covers'))
# conditions
def is_painting(wizard):
cleaned_data = wizard.get_cleaned_data_for_step('form1') or {'type': 'NONE'}
return cleaned_data['type'] == ArtworkType.PAINTING
def is_music(wizard):
cleaned_data = wizard.get_cleaned_data_for_step('form1') or {'type': 'NONE'}
return cleaned_data['type'] == ArtworkType.MUSIC
def is_movie(wizard):
cleaned_data = wizard.get_cleaned_data_for_step('form1') or {'type': 'NONE'}
return cleaned_data['type'] == ArtworkType.MOVIE
def is_book(wizard):
cleaned_data = wizard.get_cleaned_data_for_step('form1') or {'type': 'NONE'}
return cleaned_data['type'] == ArtworkType.BOOK
condition_dict = {
"painting_form2": is_painting,
"painting_form3": is_painting,
"painting_form4": is_painting,
"painting_form5": is_painting,
"music_form2": is_music,
"music_form3": is_music,
"music_form4": is_music,
"movie_form2": is_movie,
"movie_form3": is_movie,
"movie_form4": is_movie,
"book_form2": is_book,
"book_form3": is_book,
"book_form4": is_book,
"book_form5": is_book,
}
def get_template_names(self):
return [ArtworkWizard.TEMPLATES[self.steps.current]]
def get_form_initial(self, step):
if 'pk' in self.kwargs:
artwork_id = self.kwargs['pk']
if step in ('form1', 'painting_form2', 'painting_form5', 'music_form2',
'movie_form2', 'book_form2', 'music_form4', 'movie_form4',
'book_form5'):
artwork = Artwork.objects.get(pk=artwork_id)
return model_to_dict(artwork)
elif step in ('painting_form3', 'painting_form4'):
painting = Painting.objects.get(pk=artwork_id)
return model_to_dict(painting)
elif step in ('music_form3', 'movie_form3'):
media = Media.objects.get(pk=artwork_id)
return model_to_dict(media)
elif step in ('book_form3', 'book_form4'):
book = Book.objects.get(pk=artwork_id)
return model_to_dict(book)
else:
return self.initial_dict.get(step, {})
else:
return self.initial_dict.get(step, {})
def done(self, form_list, form_dict, **kwargs):
# save data from all of the steps
artwork_type = form_dict['form1'].cleaned_data['type']
artwork = Artwork(
type=artwork_type,
author=form_dict['form1'].cleaned_data['author'],
title=form_dict['form1'].cleaned_data['title'],
pub_year=form_dict['form1'].cleaned_data['pub_year'],
)
# if pk exists then it is UPDATE mode
if 'pk' in self.kwargs:
artwork.id = self.kwargs['pk']
artwork.save()
if artwork_type == ArtworkType.PAINTING:
genre = form_dict['painting_form2'].cleaned_data['genre']
price = form_dict['painting_form5'].cleaned_data['price']
painting = Painting(
artwork=artwork,
height=form_dict['painting_form3'].cleaned_data['height'],
width=form_dict['painting_form3'].cleaned_data['width'],
paint=form_dict['painting_form4'].cleaned_data['paint'],
)
painting.save()
success_message = 'Painting successfully saved.'
elif artwork_type == ArtworkType.MUSIC:
genre = form_dict['music_form2'].cleaned_data['genre']
price = form_dict['music_form4'].cleaned_data['price']
music = Media(
artwork=artwork,
media_type=Media.Type.MUSIC,
duration=form_dict['music_form3'].cleaned_data['duration'],
)
music.save()
success_message = 'Music successfully saved.'
elif artwork_type == ArtworkType.MOVIE:
genre = form_dict['movie_form2'].cleaned_data['genre']
price = form_dict['movie_form4'].cleaned_data['price']
movie = Media(
artwork=artwork,
media_type=Media.Type.VIDEO,
duration=form_dict['movie_form3'].cleaned_data['duration'],
)
movie.save()
success_message = 'Movie successfully saved.'
elif artwork_type == ArtworkType.BOOK:
genre = form_dict['book_form2'].cleaned_data['genre']
price = form_dict['book_form5'].cleaned_data['price']
book = Book(
artwork=artwork,
pages=form_dict['book_form3'].cleaned_data['pages'],
cover=form_dict['book_form4'].cleaned_data['cover'],
)
book.save()
success_message = 'Book successfully saved.'
artwork.genre = genre
artwork.price = price
artwork.save()
messages.success(self.request, success_message)
return redirect('artworks')
| 36.082237
| 95
| 0.630595
|
b21c29454fa8780097d49b65427a698b46e63dc6
| 1,738
|
py
|
Python
|
pyweather/apmodel.py
|
ravigoel08/py-open-weather
|
08e6945f78ab43112a84f6989677e4b4fd389ecf
|
[
"MIT"
] | 5
|
2021-01-05T09:46:10.000Z
|
2021-05-26T10:58:18.000Z
|
pyweather/apmodel.py
|
ravigoel08/py-open-weather
|
08e6945f78ab43112a84f6989677e4b4fd389ecf
|
[
"MIT"
] | 1
|
2021-07-21T12:32:08.000Z
|
2021-07-21T12:32:08.000Z
|
pyweather/apmodel.py
|
ravigoel08/py-open-weather
|
08e6945f78ab43112a84f6989677e4b4fd389ecf
|
[
"MIT"
] | 3
|
2021-01-18T13:14:13.000Z
|
2021-04-26T14:14:54.000Z
|
from __future__ import annotations
from typing import List, Dict
from pydantic import BaseModel, Field
import orjson
def orjson_dumps(v, *, default):
# orjson.dumps returns bytes, to match standard json.dumps we need to decode
return orjson.dumps(v, default=default).decode()
class Main(BaseModel):
aqi: int = Field(
None,
description="Air Quality Index Possible values: 1, 2, 3, 4, 5. Where 1 = Good, 2 = Fair, 3 = Moderate, 4 = Poor, 5 = Very Poor.",
)
class Components(BaseModel):
co: float = Field(None, description="Сoncentration of CO (Carbon monoxide), μg/m3")
no: float = Field(
None, description="Сoncentration of NO (Nitrogen monoxide), μg/m3"
)
no2: float = Field(
None, description="Сoncentration of NO2 (Nitrogen dioxide), μg/m3"
)
o3: float = Field(None, description="Сoncentration of O3 (Ozone), μg/m3")
so2: float = Field(
None, description="Сoncentration of SO2 (Sulphur dioxide), μg/m3"
)
pm2_5: float = Field(
None, description="Сoncentration of PM2.5 (Fine particles matter), μg/m3"
)
pm10: float = Field(
None, description="Сoncentration of PM10 (Coarse particulate matter), μg/m3"
)
nh3: float = Field(None, description="Сoncentration of NH3 (Ammonia), μg/m3")
class ListItem(BaseModel):
dt: int = Field(None, description="Date and time, Unix, UTC")
main: Main
components: Components
class AirpolData(BaseModel):
coord: Dict[str, float] = Field(
None,
description="Coordinates from the specified location (latitude, longitude)",
)
list: List[ListItem]
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps
| 29.457627
| 137
| 0.661105
|
16e1bc4da530742dcf7746ad524e2b410fa4a288
| 2,355
|
py
|
Python
|
update.py
|
Jimgersnap/DJ-Roomba-legacy
|
3c4ded6635213c7497bfb432a6c7adafe0532c1b
|
[
"MIT"
] | 2
|
2019-07-16T04:42:56.000Z
|
2020-06-02T05:36:57.000Z
|
update.py
|
Jimgersnap/DJ-Roomba-legacy
|
3c4ded6635213c7497bfb432a6c7adafe0532c1b
|
[
"MIT"
] | null | null | null |
update.py
|
Jimgersnap/DJ-Roomba-legacy
|
3c4ded6635213c7497bfb432a6c7adafe0532c1b
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import sys
def y_n(q):
while True:
ri = input('{} (y/n): '.format(q))
if ri.lower() in ['yes', 'y']: return True
elif ri.lower() in ['no', 'n']: return False
def main():
print('Starting...')
# Make sure that we're in a Git repository
if not os.path.isdir('.git'):
raise EnvironmentError("This isn't a Git repository.")
# Make sure that we can actually use Git on the command line
# because some people install Git Bash without allowing access to Windows CMD
try:
subprocess.check_call('git --version', shell=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
raise EnvironmentError("Couldn't use Git on the CLI. You will need to run 'git pull' yourself.")
print("Passed Git checks...")
# Check that the current working directory is clean
sp = subprocess.check_output('git status --porcelain', shell=True, universal_newlines=True)
if sp:
oshit = y_n('You have modified files that are tracked by Git (e.g the bot\'s source files).\n'
'We can try to reset your folder to a clean version for you. Continue?')
if oshit:
try:
subprocess.check_call('git reset --hard', shell=True)
except subprocess.CalledProcessError:
raise OSError("Could not reset the directory to a clean state.")
else:
print('Okay. Cancelling update process for now.')
return
print("Attempting to update the bot using Git...")
try:
subprocess.check_call('git pull', shell=True)
except subprocess.CalledProcessError:
raise OSError("Could not update the bot. You will need to run 'git pull' yourself.")
print("Attempting to update dependencies...")
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-U', '-r', 'requirements.txt'], shell=True)
except subprocess.CalledProcessError:
raise OSError("Could not update dependencies. You will need to run '{0} -m pip install -U -r requirements.txt' yourself.".format(sys.executable))
try:
from musicbot.constants import VERSION
print('DJ Roomba is at version {0}'.format(VERSION))
except Exception:
pass
print("Done!")
if __name__ == '__main__':
main()
| 35.681818
| 153
| 0.64034
|
adf2c701927ee7ecd456b6eb9b37a41950e0b429
| 5,223
|
py
|
Python
|
tests/test_evaluator.py
|
denisra/pypoker
|
c6443b215786d6d33f032dc74faaf00ebc193f09
|
[
"MIT"
] | null | null | null |
tests/test_evaluator.py
|
denisra/pypoker
|
c6443b215786d6d33f032dc74faaf00ebc193f09
|
[
"MIT"
] | null | null | null |
tests/test_evaluator.py
|
denisra/pypoker
|
c6443b215786d6d33f032dc74faaf00ebc193f09
|
[
"MIT"
] | null | null | null |
from pypoker.cards import *
from pypoker.evaluator import *
class TestSetup:
def setup(self):
self.deck = CardsDeck()
self.royal_flush = Hand(self.deck[8:13]) # [TS, JS, QS, KS, AS]
self.straight_flush = Hand(self.deck[5:10]) # [7S, 8S, 9S, TS, JS]
self.four_of_kind = Hand(self.deck[0:52:13] + [self.deck[1]]) # [2S, 2D, 2C, 2H, 3S]
self.full_house = Hand(self.deck[0:39:13] + self.deck[11:26:13]) #[2S, 2D, 2C, KS, KD]
self.flush = Hand(self.deck[3:12:2]) # [5S, 7S, 9S, JS, KS]
self.straight = Hand(self.deck[:3] + self.deck[16:18]) # [2S, 3S, 4S, 5D, 6D]
self.low_straight = Hand([Card('A', 'diamonds')] + self.deck[:4]) #[AD, 2S, 3S, 4S, 5S])
self.three_of_kind = Hand(self.deck[0:39:13] + self.deck[1:3]) #[2S, 2D, 2C, 3S, 4S]
self.two_pair = Hand(self.deck[0:26:13] + self.deck[11:26:13] + [self.deck[5]]) #[2S, 2D, KS, KD, 7S]
self.pair = Hand(self.deck[3:12:3] + self.deck[11:25:13]) # [5S, 8S, JS, KS, KD]
self.high_card = Hand(self.deck[9:18:2]) # [JS, KS, 2D, 4D, 6D]
self.high_card_A = Hand(self.deck[9:26:4]) # [JS, 2D, 6D, TD, AD]
self.seven_card_straight_flush = Hand(self.deck[5:12])
self.seven_card_royal_flush = Hand(self.deck[6:13])
class TestHand(TestSetup):
def test_hand(self):
ranks1 = sorted([int(rank) for rank, suit in self.deck[:5]], reverse=True)
suits1 = len(set([suit for rank, suit in self.deck[:5]]))
assert ranks1 == self.straight.ranks
#assert suits1 == len(set(self.straight.suits))
ranks2 = sorted([int(rank) if rank != 'A' else 1 for rank, suit in [
Card('A', 'diamonds')] + self.deck[:4]], reverse=True)
suits2 = len(set([suit for rank, suit in [Card('A', 'diamonds')] + self.deck[:4]]))
assert ranks2 == self.low_straight.ranks
assert suits2 == len(set(self.low_straight.suits))
class TestEvaluator(TestSetup):
def test_evaluator(self):
ev_royal_flush = Evaluator(self.royal_flush)
assert ev_royal_flush.straight_flush()
ev_straight = Evaluator(self.straight)
assert ev_straight.straight()
assert not ev_straight.flush()
assert not ev_straight.straight_flush()
ev_low_straight = Evaluator(self.low_straight)
assert ev_low_straight.straight()
assert ev_low_straight.flush() == False
ev_two_pair = Evaluator(self.two_pair)
assert ev_two_pair.two_pair()
ev_straight_flush = Evaluator(self.straight_flush)
assert ev_straight_flush.straight() and ev_straight_flush.flush()
ev_four_of_kind = Evaluator(self.four_of_kind)
assert ev_four_of_kind.kind(4)
assert not ev_four_of_kind.kind(2)
ev_full_house = Evaluator(self.full_house)
assert ev_full_house.kind(3) and ev_full_house.kind(2)
ev_flush = Evaluator(self.flush)
assert ev_flush.flush()
ev_three_of_kind = Evaluator(self.three_of_kind)
assert ev_three_of_kind.kind(3)
ev_pair = Evaluator(self.pair)
assert ev_pair.kind(2)
ev_high_card = Evaluator(self.high_card)
#assert ev_high_card
ev_seven_card_straight_flush = Evaluator(self.seven_card_straight_flush)
assert ev_seven_card_straight_flush.straight()
assert ev_seven_card_straight_flush.flush()
###### Evaluate hand_value method #####
assert ev_royal_flush.hand_value() == HandValue(800, None, self.royal_flush.ranks)
assert ev_straight_flush.hand_value() == HandValue(800, None, self.straight_flush.ranks)
assert ev_four_of_kind.hand_value() == HandValue(700, [2], self.four_of_kind.ranks)
assert ev_full_house.hand_value() == HandValue(600, [2, 13], None)
assert ev_flush.hand_value() == HandValue(500, None, self.flush.ranks)
assert ev_straight.hand_value() == HandValue(400, None, self.straight.ranks)
assert ev_three_of_kind.hand_value() == HandValue(300, [2], None)
assert ev_two_pair.hand_value() == HandValue(200, [13, 2], self.two_pair.ranks)
assert ev_pair.hand_value() == HandValue(100, [13], self.pair.ranks)
assert ev_high_card.hand_value() == HandValue(0, None, self.high_card.ranks)
##### __gt__ method ####
assert ev_royal_flush > ev_straight_flush
assert not ev_straight_flush > ev_royal_flush
assert ev_straight_flush < ev_royal_flush
assert ev_straight > ev_low_straight
assert ev_low_straight < ev_straight
assert not ev_straight > ev_straight
#### best_hand method ####
assert Evaluator.best_hand([self.royal_flush, self.straight]) == [self.royal_flush]
assert Evaluator.best_hand([self.royal_flush, self.royal_flush]) == [self.royal_flush, self.royal_flush]
assert Evaluator.best_hand([self.low_straight, self.straight]) == [self.straight]
assert Evaluator.best_hand([self.flush, self.full_house, self.pair]) == [self.full_house]
assert Evaluator.best_hand([self.high_card, self.high_card_A]) == [self.high_card_A]
assert Evaluator.best_hand([self.high_card_A, self.three_of_kind]) == [self.three_of_kind]
| 52.23
| 112
| 0.655945
|
8586cd4102f631e11c0fe1adfe6d647a8b2f263a
| 219
|
py
|
Python
|
wagtailcommerce/frontend/context_processors.py
|
theplusagency/wagtail-commerce
|
6047170f29199ccaf2778534976ab0970c2877e7
|
[
"BSD-3-Clause"
] | 3
|
2019-04-12T15:38:43.000Z
|
2019-09-22T10:23:20.000Z
|
wagtailcommerce/frontend/context_processors.py
|
wagtailcommerce/wagtailcommerce
|
308ed8348483806c16062d09a7e69ec44d9a2e73
|
[
"BSD-3-Clause"
] | null | null | null |
wagtailcommerce/frontend/context_processors.py
|
wagtailcommerce/wagtailcommerce
|
308ed8348483806c16062d09a7e69ec44d9a2e73
|
[
"BSD-3-Clause"
] | null | null | null |
from wagtailcommerce.stores.utils import get_store
def store(request):
store = getattr(request, 'store', None)
return {
'store': store,
'store_currency': store.currency if store else ''
}
| 19.909091
| 57
| 0.648402
|
21ffccac6fdcad481809e1713c166d90cc55b961
| 5,845
|
py
|
Python
|
kitsune/search/utils.py
|
joshua-s/kitsune
|
b4b79b8aa89d0dc3c50cc6dee8a9ceb8cd758cd5
|
[
"BSD-3-Clause"
] | 1
|
2020-11-03T23:46:56.000Z
|
2020-11-03T23:46:56.000Z
|
kitsune/search/utils.py
|
983834572/kitsune
|
d4b3dcdce3294214dbc659c375c8491504473213
|
[
"BSD-3-Clause"
] | null | null | null |
kitsune/search/utils.py
|
983834572/kitsune
|
d4b3dcdce3294214dbc659c375c8491504473213
|
[
"BSD-3-Clause"
] | 1
|
2020-11-03T23:47:55.000Z
|
2020-11-03T23:47:55.000Z
|
import subprocess
import time
from itertools import islice
from django.conf import settings
import bleach
from kitsune.lib.sumo_locales import LOCALES
call = lambda x: subprocess.Popen(x, stdout=subprocess.PIPE).communicate()
class FakeLogger(object):
"""Fake logger that we can pretend is a Python Logger
Why? Well, because Django has logging settings that prevent me
from setting up a logger here that uses the stdout that the Django
BaseCommand has. At some point p while fiddling with it, I
figured, 'screw it--I'll just write my own' and did.
The minor ramification is that this isn't a complete
implementation so if it's missing stuff, we'll have to add it.
"""
def __init__(self, stdout):
self.stdout = stdout
def _out(self, level, msg, *args):
msg = msg % args
self.stdout.write('%s %-8s: %s\n' % (
time.strftime('%H:%M:%S'), level, msg))
def info(self, msg, *args):
self._out('INFO', msg, *args)
def error(self, msg, *args):
self._out('ERROR', msg, *args)
def clean_excerpt(excerpt):
return bleach.clean(excerpt, tags=['b', 'i'])
def locale_or_default(locale):
"""Return `locale` or, if `locale` isn't a known locale, a default.
Default is taken from Django's LANGUAGE_CODE setting.
"""
if locale not in LOCALES:
locale = settings.LANGUAGE_CODE
return locale
def create_batch_id():
"""Returns a batch_id"""
# TODO: This is silly, but it's a good enough way to distinguish
# between batches by looking at a Record. This is just over the
# number of seconds in a day.
return str(int(time.time()))[-6:]
def chunked(iterable, n):
"""Returns chunks of n length of iterable
If len(iterable) % n != 0, then the last chunk will have length
less than n.
Example:
>>> chunked([1, 2, 3, 4, 5], 2)
[(1, 2), (3, 4), (5,)]
"""
iterable = iter(iterable)
while 1:
t = tuple(islice(iterable, n))
if t:
yield t
else:
return
class ComposedList(object):
"""Takes counts and pretends they're sublists of a big list
This helps in the case where you know the lengths of the sublists,
need to treat them all as a big list, but don't want to actually
have to generate the lists.
With ComposedList, you do pagination and other things
including slice the list and get the bounds of the sublists you
need allowing you to generate just those tiny bits rather than the
whole thing.
Handles "length", "index", and "slicing" as if they were
operations on the complete list.
**length**
Length of the ComposedList is the sum of the counts of the
sublists.
**index**
Returns a tuple (kind, index) for the index if the FDL
were one big list of (kind, index) tuples.
Raises IndexError if the index exceeds the list.
**slice**
Returns a list of (kind, (start, stop)) tuples for the kinds
that are in the slice bounds. The start and stop are not
indexes--they're slice start and stop, so it's start up to but
not including stop.
For example::
>>> cl = ComposedList()
>>> # group a has 5 items indexed 0 through 4
...
>>> cl.set_count('a', 5)
>>> # group b has 2 items indexed 0 and 1
...
>>> cl.set_count('b', 2)
>>> cl[1:7]
[('a', (1, 5)) ('b', (0, 2))]
This is the same if this were a real list:
>>> reallist = [('a', 0), ('a', 1), ('a', 2), ('a', 3)
... ('a', 4), ('b', 0), ('b', 1)]
>>> reallist[1:7]
[('a', 1), ('a', 2), ('a', 3), ('a', 4), ('b', 0), ('b', 1)]
"""
def __init__(self):
self.counts = []
def set_count(self, kind, count):
"""Adds a (kind, count) to the counts
>>> cl = ComposedList()
>>> cl.set_count('wiki', 10)
:arg kind: str. e.g. 'wiki'
:arg count: int. e.g. 40
.. Note::
The order you call set_count() is important. If you have
three groups of things, you need to call set_count() in the
order you want those things returned in a slice.
"""
self.counts.append((kind, count))
def __repr__(self):
return repr(self.counts)
def __len__(self):
"""Returns the total length of the composed list"""
return sum(mem[1] for mem in self.counts)
def __getitem__(self, key):
"""Returns the 'index' or 'slice' of this composed list"""
if isinstance(key, slice):
start = key.start
stop = key.stop
docs = []
# figure out the start
for mem in self.counts:
if start is not None:
if start <= mem[1]:
if stop <= mem[1]:
docs.append((mem[0], (start, stop)))
break
docs.append((mem[0], (start, mem[1])))
start = None
else:
start = start - mem[1]
stop = stop - mem[1]
else:
if stop <= mem[1]:
docs.append((mem[0], (0, stop)))
break
else:
docs.append((mem[0], (0, mem[1])))
stop = stop - mem[1]
return docs
if isinstance(key, int):
for mem in self.counts:
if key < mem[1]:
return (mem[0], key)
else:
key = key - mem[1]
if key >= 0:
raise IndexError('Index exceeded list length.')
| 28.373786
| 74
| 0.538751
|
64ae0c6fd48d4fa68ab30eceffe74256d7df68d6
| 9,835
|
py
|
Python
|
scripts/log_stats.py
|
aw32/sched
|
b6ef35c5b517875a5954c70e2dc366fab3721a60
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/log_stats.py
|
aw32/sched
|
b6ef35c5b517875a5954c70e2dc366fab3721a60
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/log_stats.py
|
aw32/sched
|
b6ef35c5b517875a5954c70e2dc366fab3721a60
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019, Alex Wiens <awiens@mail.upb.de>, Achim Lösch <achim.loesch@upb.de>
# SPDX-License-Identifier: BSD-2-Clause
import sys
import json
import io
import math
import datetime
resource_short = {"IntelXeon":"C", "NvidiaTesla":"G", "MaxelerVectis":"F"}
class App:
def __init__(self):
self.tids = []
self.tasks = []
def arrival_time(self):
return self.tasks[0].arrival
def load_apps(tasks):
pools = []
for tid in tasks.keys():
pools += [[tid]]
for tid in tasks.keys():
ownpool = None
# search own pool
for p in pools:
if tid in p:
ownpool = p
pools.remove(p)
break
# search pool with dependencies
deppools = []
for d in tasks[tid].dep:
for p in pools:
if d in p:
deppools.append(p)
pools.remove(p)
break
# merge
for d in deppools:
ownpool += d
# put back
pools.append(ownpool)
apps = []
for p in pools:
a = App()
a.tids = sorted(p)
for tid in a.tids:
a.tasks.append(tasks[tid])
apps.append(a)
return apps
class Task:
def __init__(self):
self.parts = []
self.dep = []
self.tid = None
self.name = None
self.arrival = None
self.checkpoints = None
self.size = None
self.newtask = None
self.endtask = None
self.state = None
self.finish = None
class TaskPart:
def __init__(self):
self.start = None
self.stop = None
self.res = None
self.res_short = None
self.startevent = None
self.stopevent = None
self.startprogress = 0
self.progress = None
class Schedule:
def __init__(self):
self.schedule = None
class Algorithm:
def __init__(self, name, algorithm):
self.name = name
self.algorithm = algorithm
self.parameters = []
class Algo:
def __init__(self):
self.start = None
self.stop = None
class EventLog:
def __init__(self):
self.jsonobjects = []
self.reset()
self.filepath = None
self.schedule_start = None
self.schedule_stop = None
def reset(self):
self.resources = []
self.tasks = {}
self.algos = []
self.minstart = None
self.maxstop = None
self.resstop = {}
self.schedules = []
self.apps = []
self.measure = []
self.algorithm = None
self.algorithm_parameters = []
def get_app_id(self, taskid):
for ix,a in enumerate(self.apps):
if taskid in a.tids:
return ix
return -1
def start_time(self):
if self.schedule_start != None and "realtime" in self.schedule_start:
realtime = self.schedule_start["realtime"]
return datetime.datetime.fromtimestamp(int(realtime[0:realtime.find(".")]))
return None
def stop_time(self):
start = self.start_time()
leng = self.length()
if start != None and leng != None:
return start + datetime.timedelta(seconds=leng)
return None
def length(self):
if self.schedule_start != None and self.schedule_stop != None:
start = self.schedule_start["time"]
stop = self.schedule_stop["time"]
return float(stop) - float(start)
return None
def real_stop_time(self):
start = self.start_time()
leng = self.length()
if start != None and leng != None:
return start + datetime.timedelta(seconds=leng)
return None
def real_length(self):
if self.schedule_start != None and self.schedule_stop != None:
if "walltime" in self.schedule_start and "walltime" in self.schedule_stop:
start = self.schedule_start["walltime"]
stop = self.schedule_stop["walltime"]
else:
start = self.schedule_start["time"]
stop = self.schedule_stop["time"]
return float(stop) - float(start)
return None
def loadEvents(filepath):
# load lines
jsonlines = None
with open(filepath, "r") as jsonf:
jsonlines = jsonf.readlines()
log = EventLog()
log.filepath = filepath
# load json objects from lines
for line in jsonlines:
#print(line)
try:
obj = json.load(io.StringIO(line))
log.jsonobjects.append(obj)
except Exception as e:
if line != None and line != "":
print(e)
for o in log.jsonobjects:
if "event" not in o:
continue
if o["event"] == "SCHEDULER_START":
log.reset()
log.schedule_start = o
elif o["event"] == "SCHEDULER_STOP":
log.schedule_stop = o
elif o["event"] == "ALGORITHM":
log.algorithm = Algorithm(o["algorithm"], o)
elif o["event"] == "ALGORITHM_PARAM":
log.algorithm_parameters.append(o)
elif o["event"] == "NEWTASK":
t = Task()
t.tid = o["id"]
t.dep = o["dep"]
t.name = o["name"]
t.size = o["size"]
t.arrival = o["time"]
t.checkpoints = o["checkpoints"]
t.newtask = o
log.tasks[t.tid] = t
elif o["event"] == "SCHEDULE":
s = Schedule()
s.schedule = o
log.schedules.append(s)
elif o["event"] == "RESOURCES":
log.resources = o["resources"]
elif o["event"] == "ENDTASK":
# tasks.append(o)
t = o["times"]
#if minstart == None or t["started"] < minstart:
# minstart = float(t["started"])
#if maxstop == None or t["finished"] > maxstop:
# maxstop = float(t["finished"])
tid = o["id"]
if tid in log.tasks:
log.tasks[tid].endtask = o
if "state" in o:
log.tasks[tid].state = o["state"]
elif o["event"] == "TASK_START":
tid = o["id"]
t = None
if tid not in log.tasks:
t = Task()
log.tasks[tid] = t
else:
t = log.tasks[tid]
# check if last part was finished (update message)
if len(t.parts) > 0 and t.parts[-1].stop == None:
continue
p = TaskPart()
p.start = float(o["time"]) #* 1000000000.0
#print(tid, p.start, "START")
p.startevent = o
if log.minstart == None or p.start < log.minstart:
log.minstart = float(p.start)
p.res = o["res"]
if p.res in resource_short:
p.res_short = resource_short[p.res]
else:
p.res_short = p.res
# set start progress if available
if len(t.parts) > 0:
p.startprogress = t.parts[-1].progress
t.parts.append(p)
elif o["event"] == "TASK_STARTED":
tid = o["id"]
time = float(o["time"]) #* 1000000000.0
#print(tid, time, "STARTED")
elif o["event"] == "TASK_SUSPENDED" or o["event"] == "TASK_FINISHED":
tid = o["id"]
t = log.tasks[tid]
p = t.parts[-1]
p.stop = float(o["time"]) #* 1000000000.0
p.stopevent = o
#print(tid, p.stop, o["event"])
if o["event"] == "TASK_SUSPENDED":
if "progress" in o:
p.progress = o["progress"]
else:
p.progress = 0
elif o["event"] == "TASK_FINISHED":
p.progress = t.checkpoints
t.finish = o["time"]
if log.maxstop == None or p.stop > log.maxstop:
log.maxstop = float(p.stop)
elif o["event"] == "COMPUTER_ALGOSTART":
a = Algo()
a.start = float(o["time"])
log.algos.append(a)
if log.minstart == None or a.start < log.minstart:
log.minstart = float(a.start)
elif o["event"] == "COMPUTER_ALGOSTOP":
log.algos[-1].stop = float(o["time"])
elif o["event"] == "MEASURE":
log.measure.append(o)
# add parameters to algorithm object
if log.algorithm != None:
log.algorithm.parameters += log.algorithm_parameters
# compute task stop times
for t in log.tasks:
for p in log.tasks[t].parts:
r = p.startevent["res"]
if r not in log.resstop:
log.resstop[r] = 0.0
if p.stop != None and p.stop > log.resstop[r]:
log.resstop[r] = p.stop
# default resources
if len(log.resources) == 0:
log.resources = ["IntelXeon","NvidiaTesla","MaxelerVectis","Scheduler"]
log.apps = App.load_apps(log.tasks)
# sort apps
def min_app(a):
return min(a.tids)
log.apps.sort(key=min_app)
return log
def stat(self, metric):
if metric == "makespan":
return self.maxstop - self.minstart if self.maxstop != None and self.minstart != None else None
elif metric == "mintime":
return self.minstart
elif metric == "maxtime":
return self.maxstop
elif metric == "events":
return len(self.jsonobjects)
return None
class WrapApp:
def __init__(self):
self.tasks = []
self.name = None
self.size = None
self.status = None
self.signaled = None
self.signaled_signal = None
self.id = None
self.added = None
self.started = None
self.finished = None
self.aborted = None
self.state = None
class WrapLog:
def __init__(self):
self.filepath = None
self.jsonobjects = []
self.apps = []
def loadWrapLog(filepath):
log = None
try:
# load lines
jsonlines = None
with open(filepath, "r") as jsonf:
jsonlines = jsonf.readlines()
log = WrapLog()
log.filepath = filepath
for line in jsonlines:
#print(line)
try:
obj = json.load(io.StringIO(line))
log.jsonobjects.append(obj)
except Exception as e:
if line != None and line != "":
print(e)
for o in log.jsonobjects:
if o["event"] == "WRAPAPP":
app = WrapApp()
app.tasks = o["tasks"]
app.name = o["name"]
app.size = o["size"]
app.status = o["status"]
app.signaled = o["signaled"]
app.signaled_signal = o["signaled_signal"]
if "endtask" in o and "id" in o["endtask"]:
app.id = o["endtask"]["id"]
app.added = o["endtask"]["times"]["added"]
app.started = o["endtask"]["times"]["started"]
app.finished = o["endtask"]["times"]["finished"]
app.aborted = o["endtask"]["times"]["aborted"]
app.state = o["endtask"]["state"]
log.apps.append(app)
except Exception as e:
print(e)
return log
if __name__ == "__main__":
if len(sys.argv)<3:
print(sys.argv[0], "eventlogfile", "stat")
print("stats:")
print("\t\tmakespan")
print("\t\tmintime")
print("\t\tmaxtime")
print("\t\tevents")
sys.exit(1)
jsonfilename = sys.argv[1]
statname = sys.argv[2]
#print(len(jsonobjects), "events")
log = EventLog.loadEvents(jsonfilename)
#print(resources)
#print("min",minstart)
#print("max",maxstop)
#for r in resstop:
# print(r, resstop[r])
print(log.stat(statname))
| 25.153453
| 98
| 0.625928
|
051e18928f181a8d2918ae5c52401ac314d25a0f
| 4,429
|
py
|
Python
|
run_examples.py
|
dylanljones/dqmc
|
d4969b6624c1b596d8b2fd0dcaefb16064958eee
|
[
"MIT"
] | 1
|
2022-01-18T22:27:47.000Z
|
2022-01-18T22:27:47.000Z
|
run_examples.py
|
dylanljones/dqmc
|
d4969b6624c1b596d8b2fd0dcaefb16064958eee
|
[
"MIT"
] | null | null | null |
run_examples.py
|
dylanljones/dqmc
|
d4969b6624c1b596d8b2fd0dcaefb16064958eee
|
[
"MIT"
] | null | null | null |
# coding: utf-8
#
# This code is part of dqmc.
#
# Copyright (c) 2022, Dylan Jones
#
# This code is licensed under the MIT License. The copyright notice in the
# LICENSE file in the root directory and this permission notice shall
# be included in all copies or substantial portions of the Software.
import os.path
import numpy as np
import matplotlib.pyplot as plt
from dqmc import map_params, parse
from dqmc.data import Database, compute_datasets
def transform_results(results):
tresults = [list() for _ in range(len(results[0]))]
num_res = len(tresults)
for res in results:
for i in range(num_res):
tresults[i].append(res[i])
# Last result is user callback, so not guaranted to be convertible to np.array
out = [np.array(x) for x in tresults[:-1]]
last = tresults[-1]
try:
last = np.array(last)
except Exception: # noqa
pass
out.append(last)
return out
def average_observables(results):
# Only use observables, not Green's functions (first two items)
tresults = transform_results(results)[2:]
# Average observables
for i in range(len(tresults) - 1):
tresults[i] = np.mean(tresults[i], axis=1)
return tresults
def compute_data_temp(db, file, temps, interactions, max_workers=-1, batch=None,
overwrite=False):
print(f"Running simulations for {file}")
p_default = parse(file)
for u in interactions:
p = p_default.copy(u=u)
params = map_params(p, temp=temps)
# Check which datasets allready exist
missing = db.find_missing(params, overwrite)
# Compute missing datasets and store in database
head = f"U={u}"
if missing:
compute_datasets(db, missing, max_workers, batch_size=batch, header=head)
else:
print(f"{head}: Found existing data!")
print("Done!\n")
def plot_local_moment(db, file, temps, interactions, save=True):
p_default = parse(file)
directory, filename = os.path.split(file)
name = os.path.splitext(filename)[0]
figpath = os.path.join(directory, name + "_moment" + ".png")
fig, ax = plt.subplots()
ax.set_xscale("log")
for u in interactions:
p = p_default.copy(u=u)
params = map_params(p, temp=temps)
results = db.get_results(*params)
n_up, n_dn, n_double, moment, _ = average_observables(results)
ax.plot(temps, moment, marker="o", ms=3, label=f"$U={u}$")
ax.set_xlabel(r"$T$")
ax.set_ylabel(r"$\langle m_z^2 \rangle$")
ax.set_ylim(0.48, 1.02)
ax.set_xticks([0.1, 1, 10, 100])
ax.set_xticklabels(["0.1", "1", "10", "100"])
ax.grid()
ax.legend()
fig.tight_layout()
if save:
fig.savefig(figpath)
return fig, ax
def plot_magnetization(db, file, temps, interactions, save=True):
p_default = parse(file)
directory, filename = os.path.split(file)
name = os.path.splitext(filename)[0]
figpath = os.path.join(directory, name + "_mag" + ".png")
fig, ax = plt.subplots()
ax.set_xscale("log")
for u in interactions:
p = p_default.copy(u=u)
params = map_params(p, temp=temps)
results = db.get_results(*params)
n_up, n_dn, n_double, moment, _ = average_observables(results)
mag = n_up - n_dn
ax.plot(temps, mag, marker="o", ms=3, label=f"$U={u}$")
ax.set_xlabel(r"$T$")
ax.set_ylabel(r"$\langle m_z \rangle$")
# ax.set_ylim(0.48, 1.02)
ax.set_xticks([0.1, 1, 10, 100])
ax.set_xticklabels(["0.1", "1", "10", "100"])
ax.grid()
ax.legend()
fig.tight_layout()
if save:
fig.savefig(figpath)
return fig, ax
def main():
root = "examples"
batch = None
overwrite = False
db = Database(os.path.join(root, "examples.hdf5"))
temps = np.geomspace(0.1, 100, 20)
inter = [1, 2, 4, 6, 8]
max_workers = -1
# Find all text files in `root` directory
files = list()
for name in os.listdir(root):
if os.path.splitext(name)[1] == ".txt":
path = os.path.join(root, name)
files.append(path)
print(f"Found {len(files)} input files!")
print()
# Run simulations for each input file
for file in files:
compute_data_temp(db, file, temps, inter, max_workers, batch, overwrite)
plot_local_moment(db, file, temps, inter)
if __name__ == "__main__":
main()
| 28.947712
| 85
| 0.62452
|
0d3f8555557252a45274ebac4e4e8cd0cad05610
| 497
|
py
|
Python
|
plotly/validators/layout/scene/zaxis/_gridcolor.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/layout/scene/zaxis/_gridcolor.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/layout/scene/zaxis/_gridcolor.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class GridcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='gridcolor',
parent_name='layout.scene.zaxis',
**kwargs
):
super(GridcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 26.157895
| 70
| 0.609658
|
7b2b36ccedc8942471d98a57e09c428b3ad651e3
| 7,134
|
py
|
Python
|
nas-search/preprocessing.py
|
jsrimr/single-path-nas
|
2f07922ec51f71d0eab08d07bab3394ab2f423e9
|
[
"Apache-2.0"
] | 428
|
2019-04-08T00:57:29.000Z
|
2022-03-10T02:10:49.000Z
|
nas-search/preprocessing.py
|
jsrimr/single-path-nas
|
2f07922ec51f71d0eab08d07bab3394ab2f423e9
|
[
"Apache-2.0"
] | 15
|
2019-04-12T16:34:54.000Z
|
2021-03-21T14:56:48.000Z
|
nas-search/preprocessing.py
|
jsrimr/single-path-nas
|
2f07922ec51f71d0eab08d07bab3394ab2f423e9
|
[
"Apache-2.0"
] | 67
|
2019-04-08T03:49:49.000Z
|
2021-10-05T07:43:04.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet preprocessing (TPU models)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
IMAGE_SIZE = 224
CROP_PADDING = 32
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_random_crop(image_bytes, image_size):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: tf.image.resize_bicubic([image], # pylint: disable=g-long-lambda
[image_size, image_size])[0])
return image
def _decode_and_center_crop(image_bytes, image_size):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize_bicubic([image], [image_size, image_size])[0]
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_random_crop(image_bytes, image_size)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_image(image_bytes,
is_training=False,
use_bfloat16=False,
image_size=IMAGE_SIZE):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
is_training: `bool` for whether the preprocessing is for training.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
Returns:
A preprocessed image `Tensor` with value range of [0, 255].
"""
if is_training:
return preprocess_for_train(image_bytes, use_bfloat16, image_size)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size)
| 37.350785
| 80
| 0.690496
|
b0adf90c0302054054e805fc6ee46aec9e32bf46
| 4,300
|
py
|
Python
|
authors/apps/notifications/tests/test_models.py
|
MuhweziDeo/Ah-backend-xmen
|
60c830977fa39a7eea9ab978a9ba0c3beb0c4d88
|
[
"BSD-3-Clause"
] | 4
|
2019-01-07T09:15:17.000Z
|
2020-11-09T09:58:54.000Z
|
authors/apps/notifications/tests/test_models.py
|
MuhweziDeo/Ah-backend-xmen
|
60c830977fa39a7eea9ab978a9ba0c3beb0c4d88
|
[
"BSD-3-Clause"
] | 34
|
2019-01-07T15:30:14.000Z
|
2019-03-06T08:23:34.000Z
|
authors/apps/notifications/tests/test_models.py
|
MuhweziDeo/Ah-backend-xmen
|
60c830977fa39a7eea9ab978a9ba0c3beb0c4d88
|
[
"BSD-3-Clause"
] | 10
|
2018-12-18T14:43:52.000Z
|
2020-02-07T08:27:50.000Z
|
from django.test import TestCase
from authors.apps.notifications.models import Notification, NotificationManager
from authors.apps.profiles.models import Profile
from authors.apps.authentication.models import User
class TestModel(TestCase):
def _create_notification(self, sender=None, receiver=None, message=None):
with self.assertRaises(TypeError):
Notification.objects.create_notification(
sender=sender,
receiver=receiver,
message=message
)
def _create_profile(self):
user = User.objects.create(username='jake2', email='jake2@jake.jake')
return Profile.objects.get(user=user)
def test_create_notification_missing_sender_and_receiver_fails(self):
with self.assertRaises(TypeError):
Notification.objects.create_notification(
sender=None,
receiver=None,
message="Your article has received a new comment"
)
def test_create_notification_missing_sender_fails(self):
receiver = self._create_profile()
self._create_notification(
None, receiver, "Your article has received a new comment")
def test_create_notification_missing_receiver_fails(self):
sender = self._create_profile()
self._create_notification(
sender, None, "Your article has received a new comment")
def test_create_notification_missing_message_fails(self):
r_user = User.objects.create(username='jake', email='jake@jake.jake')
receiver = Profile.objects.get(user=r_user)
s_user = User.objects.create(username='jake2', email='jake2@jake.jake')
sender = Profile.objects.get(user=s_user)
with self.assertRaises(TypeError):
Notification.objects.create_notification(
sender=sender,
receiver=receiver,
message=None
)
def test_create_notification_valid_data_succeeds(self):
r_user = User.objects.create(username='jake', email='jake@jake.jake')
receiver = Profile.objects.get(user=r_user)
s_user = User.objects.create(username='jake1', email='jake1@jake.jake')
sender = Profile.objects.get(user=s_user)
notice = Notification.objects.create_notification(
sender=sender,
receiver=receiver,
message="Your article has received a new comment"
)
self.assertIn(notice, Notification.objects.all())
def test_get_all_notifications_succeeds(self):
r_user = User.objects.create(username='jake', email='jake@jake.jake')
receiver = Profile.objects.get(user=r_user)
s_user = User.objects.create(username='jake1', email='jake1@jake.jake')
sender = Profile.objects.get(user=s_user)
Notification.objects.create_notification(
sender=sender,
receiver=receiver,
message="Your article has received a new comment"
)
notices = Notification.objects.get_all(receiver)
self.assertEqual(1,len(list(notices)))
def test_get_unread_notifications_succeeds(self):
r_user = User.objects.create(username='jake', email='jake@jake.jake')
receiver = Profile.objects.get(user=r_user)
s_user = User.objects.create(username='jake1', email='jake1@jake.jake')
sender = Profile.objects.get(user=s_user)
notice = Notification.objects.create_notification(
sender=sender,
receiver=receiver,
message="Your article has received a new comment"
)
notice.is_read = True
notice.save()
Notification.objects.create_notification(
sender=sender,
receiver=receiver,
message="Your article has received a new comment"
)
all_notices = Notification.objects.get_all(receiver)
self.assertEqual(2, len(list(all_notices)))
unread_notices = Notification.objects.get_unread(receiver)
self.assertEqual(1, len(list(unread_notices)))
Notification.objects.mark_all_as_read(receiver)
unread_notices = Notification.objects.get_unread(receiver)
self.assertEqual(0, len(list(unread_notices)))
| 42.156863
| 79
| 0.659302
|
00283d77fce3078d3d2326d537f07d544609e13c
| 703
|
py
|
Python
|
app/main/views.py
|
mzazakeith/news-highlight
|
8ac928bb074ad53e217648a3a76f83969438137a
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
mzazakeith/news-highlight
|
8ac928bb074ad53e217648a3a76f83969438137a
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
mzazakeith/news-highlight
|
8ac928bb074ad53e217648a3a76f83969438137a
|
[
"MIT"
] | null | null | null |
from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_sources,get_article
@main.route('/')
def index():
"""
Function that returns the index page and all its data
"""
sport_news = get_sources('sports')
tech_news = get_sources('technology')
title = 'ZIGS News-Breaking News,Latest News & Headlines'
return render_template('index.html', title=title, sports=sport_news, technology=tech_news)
#
@main.route('/source/<id>')
def articles(id):
"""
View articles
"""
article = get_article(id)
print(article)
title = f'ZIGS News ~ {id}'
return render_template('news.html', title=title, article=article)
| 27.038462
| 94
| 0.691323
|
b62a0d47a1afd135b5901fbf3fad61804d21b248
| 2,699
|
py
|
Python
|
edcnlp/utils/utils.py
|
edchengg/MyNLP
|
350c14efebc1440d6e132c6c516f0a02625df320
|
[
"MIT"
] | 1
|
2021-10-31T21:40:25.000Z
|
2021-10-31T21:40:25.000Z
|
edcnlp/utils/utils.py
|
edchengg/MyNLP
|
350c14efebc1440d6e132c6c516f0a02625df320
|
[
"MIT"
] | null | null | null |
edcnlp/utils/utils.py
|
edchengg/MyNLP
|
350c14efebc1440d6e132c6c516f0a02625df320
|
[
"MIT"
] | null | null | null |
from edcnlp.utils.constant import MODELS_dict
import torch
from transformers import BertConfig, BertTokenizer
def display(option):
sorted(option.items(), key=lambda s: s[0])
for k, v in option.items():
print(k, '=', v)
def build_pretrained_model_from_huggingface(option, add_tokens=None):
# Define pretrained model
pretrained_model_dic = MODELS_dict[option['pretrained_model']]
ckpt = pretrained_model_dic['checkpoint']
Pretrained_model = pretrained_model_dic['model'].from_pretrained(ckpt, output_hidden_states=True)
if 'uncased' in option['pretrained_model']:
indicator = True
else:
indicator = False
tokenizer = pretrained_model_dic['tokenizer'].from_pretrained(ckpt, do_lower_case=indicator)
if 'Bert' in option['pretrained_model']:
tokenizer.bos_token = '[CLS]'
tokenizer.eos_token = '[SEP]'
tokenizer.unk_token = '[UNK]'
tokenizer.sep_token = '[SEP]'
tokenizer.cls_token = '[CLS]'
tokenizer.mask_token = '[MASK]'
tokenizer.pad_token = '[PAD]'
if add_tokens != None:
tokenizer.add_tokens(add_tokens)
print('We have added', add_tokens, 'tokens')
Pretrained_model.resize_token_embeddings(len(tokenizer))
return Pretrained_model, tokenizer
def build_pretrained_model_from_ckpt(option, add_tokens, device):
'''
Loading a pretrained model from a pytorch ckpt
BERT BASED
:param option:
:return:
'''
# define BERT model
pretrained_model_dic = MODELS_dict['Bert_base_uncased']
# config = BertConfig.from_json_file(option['pretrained_model'] + '/config.json')
# config.output_hidden_states = True
# Pretrained_model = pretrained_model_dic['model'](config=config)
# Pretrained_model.load_state_dict(torch.load(option['pretrained_model'] + '/pytorch_model.bin', map_location=device),
# strict=False)
Pretrained_model = pretrained_model_dic['model'].from_pretrained(option['pretrained_model'])
lower_case_flag = True
print('lower_case_flag: ', lower_case_flag)
tokenizer = BertTokenizer.from_pretrained(
option['pretrained_model'],
do_lower_case=lower_case_flag)
tokenizer.bos_token = '[CLS]'
tokenizer.eos_token = '[SEP]'
tokenizer.unk_token = '[UNK]'
tokenizer.sep_token = '[SEP]'
tokenizer.cls_token = '[CLS]'
tokenizer.mask_token = '[MASK]'
tokenizer.pad_token = '[PAD]'
if add_tokens != None:
tokenizer.add_tokens(add_tokens)
print('We have added', add_tokens, 'tokens')
Pretrained_model.resize_token_embeddings(len(tokenizer))
return Pretrained_model, tokenizer
| 38.014085
| 122
| 0.690997
|
c1e6cb405e76572e742225373b25cbee65975e1e
| 5,857
|
py
|
Python
|
repost_sles/main.py
|
SaiFi0102/repost_sles
|
453d671f071135ac62609a4ecdfd15b304d69c49
|
[
"MIT"
] | 1
|
2021-07-27T19:50:07.000Z
|
2021-07-27T19:50:07.000Z
|
repost_sles/main.py
|
SaiFi0102/repost_sles
|
453d671f071135ac62609a4ecdfd15b304d69c49
|
[
"MIT"
] | null | null | null |
repost_sles/main.py
|
SaiFi0102/repost_sles
|
453d671f071135ac62609a4ecdfd15b304d69c49
|
[
"MIT"
] | null | null | null |
import frappe
import datetime
import os
import json
from frappe.utils import flt
def repost_all_stock_vouchers(from_date, repost_gle=True, update_source_doc=False):
import repost_sles.overrides
from manufacturing_dv.events.stock_entry import validate
frappe.flags.ignored_closed_or_disabled = 1
frappe.flags.do_not_update_reserved_qty = 1
frappe.db.auto_commit_on_many_writes = 1
print("Repost GLEs: {0}".format(repost_gle))
print("Update Source Documents: {0}".format(update_source_doc))
date_where_condition = ""
date_and_condition = ""
if from_date:
print("From Date: {0}".format(frappe.format(from_date)))
date_condition = "posting_date >= {0}".format(frappe.db.escape(from_date))
date_where_condition = "where {0}".format(date_condition)
date_and_condition = "and {0}".format(date_condition)
print("Enabling Allow Negative Stock")
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
filename = "repost_all_stock_vouchers_checkpoint.json"
if not os.path.isfile(filename):
print("No checkpoint found")
print("Updating Purchase Valuation Rates")
precs = frappe.db.sql("select 'Purchase Receipt' as doctype, name from `tabPurchase Receipt` where docstatus=1 {0}".format(date_and_condition))
pinvs = frappe.db.sql("select 'Purchase Invoice' as doctype, name from `tabPurchase Invoice` where docstatus=1 {0}".format(date_and_condition))
for doctype, name in precs + pinvs:
doc = frappe.get_doc(doctype, name)
doc.set_landed_cost_voucher_amount()
doc.update_valuation_rate("items")
for d in doc.items:
d.db_update()
doc.clear_cache()
frappe.db.commit()
print("Getting Stock Vouchers List")
vouchers = frappe.db.sql("""
select distinct voucher_type, voucher_no
from `tabStock Ledger Entry` sle
{0}
order by posting_date, posting_time, creation
""".format(date_where_condition))
print("Deleting SLEs")
frappe.db.sql("delete from `tabStock Ledger Entry` {0}".format(date_where_condition))
if repost_gle:
print("Deleting GLEs")
for voucher_type, voucher_no in vouchers:
frappe.db.sql("""delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
print()
frappe.db.commit()
else:
print("Checkpoint found")
with open(filename, "r") as f:
vouchers = json.loads(f.read())
start_time = datetime.datetime.now()
print("Starting at: {0}".format(start_time))
i = 0
for voucher_type, voucher_no in vouchers:
try:
if not frappe.db.exists(voucher_type, voucher_no):
i += 1
print("{0} / {1}: {2} {3} NOT FOUND! Skipping"
.format(i, len(vouchers), voucher_type, voucher_no))
continue
doc = frappe.get_doc(voucher_type, voucher_no)
if voucher_type == "Stock Entry":
doc.calculate_rate_and_amount()
validate(doc, 'validate', set_cullet_rate=False)
if update_source_doc:
doc.db_update()
for d in doc.items:
d.db_update()
elif voucher_type=="Purchase Receipt":
if doc.is_subcontracted == "Yes":
doc.validate()
default_cost_center = frappe.get_cached_value("Company", doc.company, "cost_center")
for d in doc.items:
if not d.cost_center:
d.cost_center = default_cost_center
elif voucher_type=="Delivery Note":
for d in doc.items:
if d.target_warehouse:
d.db_set('target_warehouse', None)
doc.update_stock_ledger()
if repost_gle:
doc.make_gl_entries(repost_future_gle=False, from_repost=True)
frappe.db.commit()
i += 1
doc.clear_cache()
now_time = datetime.datetime.now()
total_duration = now_time - start_time
repost_rate = flt(i) / total_duration.seconds if total_duration.seconds else "Inf"
remaining_duration = datetime.timedelta(seconds=(len(vouchers) - i) / flt(repost_rate)) if flt(repost_rate) else "N/A"
print("{0} / {1}: Elapsed Time: {4} | Rate: {5:.2f} Vouchers/Sec | ETA: {6} | {2} {3}".format(i, len(vouchers), voucher_type, voucher_no,
total_duration, flt(repost_rate), remaining_duration))
except:
with open(filename, "w") as f:
print("Creating checkpoint")
f.write(json.dumps(vouchers[i:]))
frappe.db.rollback()
raise
print("Disabling Allow Negative Stock")
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 0)
frappe.db.commit()
frappe.db.auto_commit_on_many_writes = 0
if os.path.isfile(filename):
print("Deleting Checkpoint")
os.remove(filename)
def set_basic_rate_manually(item_code, rate, from_date, to_date):
rate = flt(rate)
print("Item Code: {0}".format(item_code))
print("Rate: {0}".format(frappe.format(rate)))
print("From Date: {0}".format(from_date))
print("To Date: {0}".format(to_date))
date_condition = ""
if from_date:
date_condition += " and ste.posting_date >= %(from_date)s"
if from_date:
date_condition += " and ste.posting_date <= %(to_date)s"
args = {
'item_code': item_code,
'from_date': from_date,
'to_date': to_date,
'rate': rate
}
stes = frappe.db.sql_list("""
select distinct ste.name
from `tabStock Entry` ste
inner join `tabStock Entry Detail` d on d.parent = ste.name
where ste.purpose in ('Manufacture', 'Repack', 'Material Receipt')
and ste.docstatus < 2
and d.item_code = %(item_code)s
and ifnull(d.t_warehouse, '') != ''
{0}
""".format(date_condition), args)
if stes:
for name in stes:
print(name)
frappe.db.sql("""
update `tabStock Entry Detail` d
inner join `tabStock Entry` ste on d.parent = ste.name
set d.basic_rate = %(rate)s, d.set_basic_rate_manually = 1
where ste.purpose in ('Manufacture', 'Repack', 'Material Receipt')
and ste.docstatus < 2
and d.item_code = %(item_code)s
and ifnull(d.t_warehouse, '') != ''
{0}
""".format(date_condition), args)
else:
print("No Manufacture/Repack Entry found with Finished Good Item {0} found".format(item_code))
| 30.989418
| 145
| 0.708554
|
671f62ecb41dde76cdf0010ef917b978f1691a8c
| 1,368
|
py
|
Python
|
tools/refactoring/datagetters.py
|
ImagiaViz/inviwo
|
a00bb6b0551bc1cf26dc0366c827c1a557a9603d
|
[
"BSD-2-Clause"
] | 349
|
2015-01-30T09:21:52.000Z
|
2022-03-25T03:10:02.000Z
|
tools/refactoring/datagetters.py
|
liu3xing3long/inviwo
|
69cca9b6ecd58037bda0ed9e6f53d02f189f19a7
|
[
"BSD-2-Clause"
] | 641
|
2015-09-23T08:54:06.000Z
|
2022-03-23T09:50:55.000Z
|
tools/refactoring/datagetters.py
|
liu3xing3long/inviwo
|
69cca9b6ecd58037bda0ed9e6f53d02f189f19a7
|
[
"BSD-2-Clause"
] | 124
|
2015-02-27T23:45:02.000Z
|
2022-02-21T09:37:14.000Z
|
import sys
import os
import re
import colorama
colorama.init()
import refactoring # Note: refactoring.py need to be in the current working directory
#paths = [
# "/Users/petst/Work/Projects/Inviwo-Developent/Private/Inviwo-dev",
# "/Users/petst/Work/Projects/Inviwo-Developent/Private/Inviwo-research"
#]
paths = [
"C:/Users/petst55/Work/Inviwo/Inviwo-dev",
"C:/Users/petst55/Work/Inviwo/Inviwo-research"
]
excludespatterns = ["*/ext/*", "*moc_*", "*cmake*", "*/proteindocking/*", "*/proteindocking2/*", "*/genetree/*", "*/vrnbase/*"];
files = refactoring.find_files(paths, ['*.h', '*.cpp'], excludes=excludespatterns)
def replace(pattern, replacement) :
print("Matches:")
matches = refactoring.find_matches(files, pattern)
print("\n")
print("Replacing:")
refactoring.replace_matches(matches, pattern, replacement)
replacements1 = {
"getValueAsSingleDouble" : "getAsNormalizedDouble",
"getValueAsVec2Double" : "getAsNormalizedDVec2",
"getValueAsVec3Double" : "getAsNormalizedDVec3",
"getValueAsVec4Double" : "getAsNormalizedDVec4",
"setValueFromSingleDouble" : "setFromDouble",
"setValueFromVec2Double" : "setFromDVec2",
"setValueFromVec3Double" : "setFromDVec3",
"setValueFromVec4Double" : "setFromDVec4"
}
print("Looking in " + str(len(files)) + " files")
for k,v in replacements1.items():
replace(r"\b"+k+r"\b", v)
| 29.73913
| 128
| 0.716374
|
9f0930252da80a641b99fa16d582df66bcbfdad7
| 1,829
|
py
|
Python
|
setup.py
|
emory-libraries/emory-baggins
|
53c202782c1b52925b0cbd685a7faa617a6251d0
|
[
"Apache-2.0"
] | 2
|
2016-10-18T21:30:15.000Z
|
2017-03-14T13:40:23.000Z
|
setup.py
|
emory-libraries/emory-baggins
|
53c202782c1b52925b0cbd685a7faa617a6251d0
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
emory-libraries/emory-baggins
|
53c202782c1b52925b0cbd685a7faa617a6251d0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
import baggins
LONG_DESCRIPTION = None
try:
# read the description if it's there
with open('README.md') as desc_f:
LONG_DESCRIPTION = desc_f.read()
except:
pass
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Archiving',
'Topic :: Utilities'
]
requirements = [
'eulxml',
'requests',
'pymarc',
'bagit',
'eulfedora',
'pyyaml',
'cached-property',
'awesome-slugify',
]
test_requirements = ['pytest', 'pytest-cov', 'mock']
setup(
name='baggins',
version=baggins.__version__,
author='Emory University Libraries',
author_email='libsysdev-l@listserv.cc.emory.edu',
url='https://github.com/emory-libraries/emory-baggins',
license='Apache License, Version 2.0',
packages=find_packages(),
install_requires=requirements,
dependency_links=["git+ssh://git@github.com/LibraryOfCongress/bagit-python.git@master#egg=bagit"],
setup_requires=['pytest-runner'],
tests_require=test_requirements,
extras_require={
'test': test_requirements,
},
description='scripts and utilities for creating bagit archives of digital content',
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
scripts=['scripts/lsdi-bagger'],
package_data={'baggins': [
"lsdi/content/*.*"
]}
)
| 27.712121
| 102
| 0.669218
|
bd88f4ca60a3611487daa4eb999b8a5770ea6f82
| 1,242
|
py
|
Python
|
OOP_the_internet_herokuapp/src/page/form_auth_page.py
|
AntonioIonica/Automation_testing
|
6f7c94c55677b0958e6fada24058f1a00d2c0d0e
|
[
"MIT"
] | null | null | null |
OOP_the_internet_herokuapp/src/page/form_auth_page.py
|
AntonioIonica/Automation_testing
|
6f7c94c55677b0958e6fada24058f1a00d2c0d0e
|
[
"MIT"
] | null | null | null |
OOP_the_internet_herokuapp/src/page/form_auth_page.py
|
AntonioIonica/Automation_testing
|
6f7c94c55677b0958e6fada24058f1a00d2c0d0e
|
[
"MIT"
] | null | null | null |
"""
Login on Form authentication page
taking the user and the password from the text
(italic words)
"""
from selenium.webdriver.common.by import By
from OOP_the_internet_herokuapp.src.page.base_page import BasePage
class FormAuth(BasePage):
USR = (By.ID, 'username')
PWD = (By.ID, 'password')
LOGIN_BTN = (By.XPATH, '//*[@id="login"]/button')
TEXT_AUTH = (By.XPATH, '//*[@id="content"]/div/h4')
def invalid_login(self):
self.driver.find_element(*self.USR).send_keys('admin')
self.driver.find_element(*self.PWD).send_keys('test123')
self.driver.find_element(*self.LOGIN_BTN).click()
self.driver.implicitly_wait(3)
def valid_login(self):
auth_text = self.driver.find_element(*self.TEXT_AUTH).text
text_list = auth_text.split('.')[1].split()
print(text_list)
usr = [text_list[i + 1] for i in range(0, len(text_list)) if text_list[i] == 'Enter']
pwd = [text_list[i + 1] for i in range(0, len(text_list)) if text_list[i] == 'and']
self.driver.find_element(*self.USR).send_keys(usr)
self.driver.find_element(*self.PWD).send_keys(pwd)
self.driver.find_element(*self.LOGIN_BTN).click()
self.driver.implicitly_wait(3)
| 37.636364
| 93
| 0.663446
|
4eaf787ad7501957ff06c5f4067d15003f89f8b1
| 310
|
py
|
Python
|
Hacker Rank/Python/Exception.py
|
Ahmad-Fahad/Python
|
5a5f8f3395f7085947430b8309f6af70b2e25a77
|
[
"Apache-2.0"
] | null | null | null |
Hacker Rank/Python/Exception.py
|
Ahmad-Fahad/Python
|
5a5f8f3395f7085947430b8309f6af70b2e25a77
|
[
"Apache-2.0"
] | null | null | null |
Hacker Rank/Python/Exception.py
|
Ahmad-Fahad/Python
|
5a5f8f3395f7085947430b8309f6af70b2e25a77
|
[
"Apache-2.0"
] | null | null | null |
t = int(input())
while t>0:
t-=1
dividend, divisor = input().split()
try:
q = int(dividend)//int(divisor)
print(q)
except ZeroDivisionError:
print("Error Code: integer division or modulo by zero")
except ValueError:
print("Error Code: invalid literal for int() with base 10: {}".format(divisor))
| 23.846154
| 81
| 0.687097
|
389891c275bd40bbd1a08da7cc62658b9a217ab8
| 1,932
|
py
|
Python
|
models/beta/vss.py
|
YorkSu/hat
|
b646b6689f3d81c985ed13f3d5c23b6c717fd07d
|
[
"Apache-2.0"
] | 1
|
2019-04-10T04:49:30.000Z
|
2019-04-10T04:49:30.000Z
|
models/beta/vss.py
|
Suger131/HAT-tf2.0
|
b646b6689f3d81c985ed13f3d5c23b6c717fd07d
|
[
"Apache-2.0"
] | null | null | null |
models/beta/vss.py
|
Suger131/HAT-tf2.0
|
b646b6689f3d81c985ed13f3d5c23b6c717fd07d
|
[
"Apache-2.0"
] | 1
|
2019-06-14T05:53:42.000Z
|
2019-06-14T05:53:42.000Z
|
"""
VGG-Swish-SE
For Cifar10
本模型默认总参数量[参考基准:cifar10]:
Total params: 16,402,790
Trainable params: 16,390,246
Non-trainable params: 12,544
"""
from hat.models.advance import AdvNet
class vss(AdvNet):
"""
VSS
"""
def args(self):
self.CONV_KI = 'he_normal'
self.CONV_KR = None # l2(0.0005)
self.TIME = [2, 2, 3, 3, 3]
self.CONV = [64, 128, 256, 512, 512]
self.BATCH_SIZE = 128
self.EPOCHS = 384
self.OPT = 'Adam'
def build_model(self):
# params processing
self.CONV_ = list(zip(
self.TIME,
self.CONV,
))
x_in = self.input(self.INPUT_SHAPE)
x = x_in
for ix, i in enumerate(self.CONV_):
if ix: x = self.poolx(x)
x = self.repeat(self.conv_s, *i)(x)
x = self.SE(x)
x = self.GAPool(x)
x = self.local_s(x, 1024)
x = self.dropout(x, 0.3 )
x = self.local_s(x, 1024)
x = self.dropout(x, 0.3 )
x = self.local(x, self.NUM_CLASSES, activation='softmax')
return self.Model(inputs=x_in, outputs=x, name='vss')
def conv_s(self, x_in, filters, kernel_size=3):
x = self.conv(
x_in,
filters,
kernel_size,
kernel_initializer=self.CONV_KI,
kernel_regularizer=self.CONV_KR
)
x = self.bn(x)
x = self.swish(x)
# x = self.SE(x)
return x
def local_s(self, x_in, units):
x = self.local(
x_in,
units,
activation=None,
kernel_initializer=self.CONV_KI,
)
x = self.bn(x)
x = self.swish(x)
return x
def poolx(self, x_in, pool_size=3, strides=2):
maxx = self.maxpool(x_in, pool_size=pool_size, strides=strides)
avgx = self.avgpool(x_in, pool_size=pool_size, strides=strides)
x = self.add([maxx, avgx])
return x
# test part
if __name__ == "__main__":
mod = vss(DATAINFO={'INPUT_SHAPE': (256, 256, 3), 'NUM_CLASSES': 120}, built=True)
mod.summary()
| 19.714286
| 84
| 0.586439
|
96a789a1572b5b11a0e3a8353f82c7b70e9d7249
| 2,805
|
py
|
Python
|
calc_stats.py
|
jianguoz/DNNC-few-shot-intent
|
ea6f39a225d62f87c05f3628206b06dcaeb56f01
|
[
"MIT"
] | 30
|
2020-11-13T22:21:51.000Z
|
2022-03-02T03:46:46.000Z
|
calc_stats.py
|
jianguoz/DNNC-few-shot-intent
|
ea6f39a225d62f87c05f3628206b06dcaeb56f01
|
[
"MIT"
] | 5
|
2021-04-14T04:48:59.000Z
|
2021-08-04T11:22:43.000Z
|
calc_stats.py
|
jianguoz/DNNC-few-shot-intent
|
ea6f39a225d62f87c05f3628206b06dcaeb56f01
|
[
"MIT"
] | 7
|
2020-11-17T08:56:40.000Z
|
2022-03-30T06:22:48.000Z
|
# Copyright 2020, Salesforce.com, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import glob
import sys
import torch
from models.utils import THRESHOLDS
def main():
file_name = sys.argv[1]
if len(sys.argv) >= 3:
best_index = int(sys.argv[2])
else:
best_index = None
in_acc = []
oos_recall = []
oos_prec = []
oos_f1 = []
with open(file_name, 'r') as f:
for line in f:
in_acc.append([])
oos_recall.append([])
oos_prec.append([])
oos_f1.append([])
for elms in line.strip().split():
elms = elms.split(',')
in_acc[-1].append(float(elms[0]))
oos_recall[-1].append(float(elms[1]))
oos_prec[-1].append(float(elms[2]))
oos_f1[-1].append(float(elms[3]))
in_acc = torch.FloatTensor(in_acc) * 100
oos_recall = torch.FloatTensor(oos_recall) * 100
oos_prec = torch.FloatTensor(oos_prec) * 100
oos_f1 = torch.FloatTensor(oos_f1) * 100
if best_index is None:
best_index = (in_acc.mean(dim = 0) + oos_recall.mean(dim = 0)).max(dim = 0)[1]
print()
print('Best threshold: {} (index: {})'.format(THRESHOLDS[best_index], best_index))
print('Best in_acc: {} std: {}'.format(in_acc.mean(dim = 0)[best_index], in_acc.std(dim = 0)[best_index]))
print('Best oos_recall: {} std: {}'.format(oos_recall.mean(dim = 0)[best_index], oos_recall.std(dim = 0)[best_index]))
print('Best oos_prec: {} std: {}'.format(oos_prec.mean(dim = 0)[best_index], oos_prec.std(dim = 0)[best_index]))
print('Best oos_f1: {} std: {}'.format(oos_f1.mean(dim = 0)[best_index], oos_f1.std(dim = 0)[best_index]))
if __name__ == '__main__':
main()
| 48.362069
| 462
| 0.653476
|
af63580a7c0606ac4c2771534c5efbdf0ef0ef49
| 1,567
|
py
|
Python
|
aliyun-python-sdk-airec/aliyunsdkairec/request/v20181012/QuerySingleAggregationReportRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2021-03-08T02:59:17.000Z
|
2021-03-08T02:59:17.000Z
|
aliyun-python-sdk-airec/aliyunsdkairec/request/v20181012/QuerySingleAggregationReportRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2020-05-31T14:51:47.000Z
|
2020-05-31T14:51:47.000Z
|
aliyun-python-sdk-airec/aliyunsdkairec/request/v20181012/QuerySingleAggregationReportRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkairec.endpoint import endpoint_data
class QuerySingleAggregationReportRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Airec', '2018-10-12', 'QuerySingleAggregationReport','airec')
self.set_uri_pattern('/openapi/instances/[InstanceId]/sync-reports/single-aggregation-report')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_path_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_path_param('InstanceId',InstanceId)
| 40.179487
| 97
| 0.774729
|
73efe75c50a3a004c875914b98e876a15b6825ba
| 1,895
|
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/localipv6address_db0b032b0167051f08ee1b875f3bb0d5.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/localipv6address_db0b032b0167051f08ee1b875f3bb0d5.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/localipv6address_db0b032b0167051f08ee1b875f3bb0d5.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class LocalIpv6Address(Base):
"""
The LocalIpv6Address class encapsulates a required localIpv6Address resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'localIpv6Address'
_SDM_ATT_MAP = {
'Count': 'count',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(LocalIpv6Address, self).__init__(parent, list_op)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: total number of values
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
| 36.442308
| 157
| 0.718734
|
f914e149a54efbf7a03106daa08f2bb56f200639
| 11,751
|
py
|
Python
|
webdriver_wharf/app.py
|
mshriver/webdriver-wharf
|
d02091805dea2830a0fcd1044ba51339cafa453a
|
[
"MIT"
] | 4
|
2017-12-21T10:30:24.000Z
|
2021-11-27T13:58:55.000Z
|
webdriver_wharf/app.py
|
mshriver/webdriver-wharf
|
d02091805dea2830a0fcd1044ba51339cafa453a
|
[
"MIT"
] | 13
|
2017-05-18T14:58:45.000Z
|
2019-07-02T20:52:59.000Z
|
webdriver_wharf/app.py
|
mshriver/webdriver-wharf
|
d02091805dea2830a0fcd1044ba51339cafa453a
|
[
"MIT"
] | 4
|
2017-05-18T15:46:21.000Z
|
2020-03-17T14:21:56.000Z
|
import logging
import os
from datetime import datetime
from pkg_resources import require
from threading import Thread
from time import sleep, time
from operator import attrgetter
from apscheduler.schedulers.background import BackgroundScheduler
from docker.errors import APIError
from flask import Flask, jsonify, request, render_template
from pytz import utc
from requests.exceptions import RequestException
from webdriver_wharf import db, interactions, lock
pool = set()
logger = logging.getLogger(__name__)
image_name = os.environ.get("WEBDRIVER_WHARF_IMAGE", "cfmeqe/cfme_sel_stable")
# Number of containers to have on "hot standby" for checkout
pool_size = int(os.environ.get("WEBDRIVER_WHARF_POOL_SIZE", 4))
# Max time for an appliance to be checked out before it's forcibly checked in, in seconds.
max_checkout_time = int(os.environ.get("WEBDRIVER_WHARF_MAX_CHECKOUT_TIME", 3600))
pull_interval = int(os.environ.get("WEBDRIVER_WHARF_IMAGE_PULL_INTERVAL", 3600))
rebalance_interval = int(os.environ.get("WEBDRIVER_WHARF_REBALANCE_INTERVAL", 3600 * 6))
no_content = ("", 204)
application = Flask("webdriver_wharf")
@application.route("/checkout")
def checkout():
if not pool:
logger.info("Pool exhausted on checkout, waiting for an available container")
balance_containers.trigger()
# Sleep until we get a container back with selenium running
while True:
try:
with lock:
container = pool.pop()
keepalive(container)
if not interactions.check_selenium(container):
continue
break
except KeyError:
# pool pop blew up, still no containers in the pool
sleep(1)
logger.info("Container %s checked out", container.name)
info = container_info(container)
info.update(expiry_info(container))
balance_containers.trigger()
return jsonify(**{container.name: info})
@application.route("/checkin/<container_name>")
def checkin(container_name):
if container_name == "all":
for container in interactions.containers():
stop_async(container)
logger.info("All containers checked in")
else:
container = db.Container.from_name(container_name)
if container:
stop_async(container)
logger.info("Container %s checked in", container.name)
balance_containers.trigger()
return no_content
@application.route("/pull")
def pull():
pull_latest_image.trigger()
logger.info("Pull latest image triggered")
return no_content
@application.route("/rebalance")
def balance():
balance_containers.trigger()
logger.info("Rebalance triggered")
return no_content
@application.route("/renew/<container_name>")
def renew(container_name):
container = db.Container.from_name(container_name)
if container:
keepalive(container)
logger.info("Container %s renewed", container.name)
out = expiry_info(container)
else:
out = {}
return jsonify(**out)
@application.route("/status")
def status():
containers = interactions.running()
return jsonify(
**{container.name: container_info(container) for container in containers}
)
@application.route("/status/<container_name>")
def container_status(container_name):
container = db.Container.from_name(container_name)
if container:
out = {container_name: container_info(container)}
else:
out = {}
return jsonify(**out)
@application.route("/")
def index():
return render_template("index.html", max_checkout_time=max_checkout_time)
def container_info(container):
host = requesting_host()
host_noport = host.split(":")[0]
data = {
"image_id": container.image_id,
"checked_out": container.checked_out,
"checkin_url": "http://{}/checkin/{}".format(host, container.name),
"renew_url": "http://{}/renew/{}".format(host, container.name),
}
def porturl(key, viewkey, fmt):
the_port = getattr(container, key)
if the_port:
data[key] = the_port
data[viewkey] = fmt.format(host=host_noport, port=the_port)
else:
data.setdefault("missing_keys", []).extend([key, viewkey])
porturl("webdriver_port", "webdriver_url", "http://{host}:{port}/wd/hub")
porturl("vnc_port", "vnc_display", "vnc://{host}:{port}")
porturl("http_port", "fileviewer_url", "http://{host}:{port}/")
return data
def keepalive(container):
with db.transaction() as session:
container.checked_out = datetime.utcnow()
session.merge(container)
def _stop_async_worker(container):
interactions.stop(container)
balance_containers.trigger()
def stop_async(container):
with lock:
try:
pool.remove(container)
except KeyError:
pass
Thread(target=_stop_async_worker, args=(container,)).start()
def expiry_info(container):
# Expiry time for checkout and renew returns, plus renewl url
# includes 'now' as seen by the wharf in addition to the expire time so
# client can choose how to best handle renewals without doing
host = requesting_host()
now = int(time())
expire_time = now + max_checkout_time
return {
"renew_url": "http://{}/renew/{}".format(host, container.name),
"now": now,
"expire_time": expire_time,
"expire_interval": max_checkout_time,
}
def requesting_host():
return request.headers.get("Host")
# Scheduled tasks use docker for state, so use memory for jobs
scheduler = BackgroundScheduler(
{
"apschedule.jobstores.default": {"type": "sqlite", "engine": db.engine()},
"apscheduler.executors.default": {
"class": "apscheduler.executors.pool:ThreadPoolExecutor",
"max_workers": "15",
},
},
daemon=True,
)
@scheduler.scheduled_job(
"interval", id="pull_latest_image", seconds=pull_interval, timezone=utc
)
def pull_latest_image():
# Try to pull a new image
if interactions.pull(image_name):
# If we got a new image, trigger a rebalance
balance_containers.trigger()
pull_latest_image.trigger = lambda: scheduler.modify_job(
"pull_latest_image", next_run_time=datetime.now()
)
def stop_outdated():
# Clean up before interacting with the pool:
# - checks in containers that are checked out longer than the max lifetime
# - stops containers that aren't running if their image is out of date
# - stops containers from the pool not running selenium
for container in interactions.containers():
if container.checked_out:
checked_out_time = (
datetime.utcnow() - container.checked_out
).total_seconds()
if checked_out_time > max_checkout_time:
logger.info(
"Container %s checked out longer than %d seconds, forcing stop",
container.name,
max_checkout_time,
)
interactions.stop(container)
continue
else:
if container.image_id != interactions.last_pulled_image_id:
logger.info("Container %s running an old image", container.name)
interactions.stop(container)
continue
@scheduler.scheduled_job(
"interval", id="balance_containers", seconds=rebalance_interval, timezone=utc
)
def balance_containers():
try:
stop_outdated()
pool_balanced = False
while not pool_balanced:
# Grabs/releases the lock each time through the loop so checkouts don't have to wait
# too long if a container's being destroyed
with lock:
# Make sure the number of running containers that aren't checked out
containers = interactions.containers()
running = interactions.running(*containers)
not_running = containers - running
checked_out = set(filter(lambda c: bool(c.checked_out), running))
# Reset the global pool based on the current derived state
pool.clear()
pool.update(running - checked_out)
pool_stat_str = "%d/%d - %d checked out - %d to destroy" % (
len(pool),
pool_size,
len(checked_out),
len(not_running),
)
containers_to_start = pool_size - len(pool)
containers_to_stop = len(pool) - pool_size
# Starting containers can happen at-will, and shouldn't be done under lock
# so that checkouts don't have to block unless the pool is exhausted
if containers_to_start > 0:
if containers_to_start > 4:
# limit the number of ocntainers to start so we
# don't spend *too* much time refilling the pool if
# there's more work to be done
containers_to_start = 4
logger.info(
"Pool %s, adding %d containers", pool_stat_str, containers_to_start
)
interactions.create_containers(image_name, containers_to_start)
# after starting, continue the loop to ensure that
# starting new containers happens before destruction
continue
# Stopping containers does need to happen under lock to ensure that
# simultaneous balance_containers don't attempt to stop the same container
# This should be rare, since containers are never returned to the pool,
# but can happen if, for example, the configured pool size changes
if containers_to_stop > 0:
logger.debug("%d containers to stop", containers_to_stop)
with lock:
oldest_container = min(pool, key=attrgetter("created"))
logger.info(
"Pool %s, removing oldest container %s",
pool_stat_str,
oldest_container.name,
)
interactions.stop(oldest_container)
# again, continue the loop here to save destroys for last
continue
# If we've made it this far...
logger.info("Pool balanced, %s", pool_stat_str)
pool_balanced = True
except (APIError, RequestException) as exc:
logger.error("%s while balancing containers, retrying." % type(exc).__name__)
logger.exception(exc)
balance_containers.trigger()
balance_containers.trigger = lambda: scheduler.modify_job(
"balance_containers", next_run_time=datetime.now()
)
# starts the scheduler before handling the first request
@application.before_first_request
def _wharf_init():
version = require("webdriver-wharf")[0].version
logger.info("version %s", version)
# Before doing anything else, grab the image or explode
logger.info("Pulling image %s -- this could take a while", image_name)
interactions.pull(image_name)
logger.info("done")
scheduler.start()
balance_containers.trigger()
# Give the scheduler and executor a nice cool glass of STFU
# This supresses informational messages about the task be fired by the scheduler,
# as well as warnings from the executor that the task is already running.
# For our purposes, neither is notable.
logging.getLogger("apscheduler.scheduler").setLevel(logging.ERROR)
logging.getLogger("apscheduler.executors.default").setLevel(logging.ERROR)
logger.info("Initializing pool, ready for checkout")
| 35.077612
| 96
| 0.647434
|
72ade9097be0b41e1fbcf47b7bb112f4cab7d473
| 2,399
|
py
|
Python
|
test/functional/p2p_timeouts.py
|
bitcoinlimitededition/bitcoinlimitededition
|
befc25e21abf8e02f260474210bd074a4f0b07b2
|
[
"MIT"
] | null | null | null |
test/functional/p2p_timeouts.py
|
bitcoinlimitededition/bitcoinlimitededition
|
befc25e21abf8e02f260474210bd074a4f0b07b2
|
[
"MIT"
] | null | null | null |
test/functional/p2p_timeouts.py
|
bitcoinlimitededition/bitcoinlimitededition
|
befc25e21abf8e02f260474210bd074a4f0b07b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoinlimitededition Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various net timeouts.
- Create three bitcoinlimitededitiond nodes:
no_verack_node - we never send a verack in response to their version
no_version_node - we never send a version (only a ping)
no_send_node - we never send any P2P message.
- Start all three nodes
- Wait 1 second
- Assert that we're connected
- Send a ping to no_verack_node and no_version_node
- Wait 30 seconds
- Assert that we're still connected
- Send a ping to no_verack_node and no_version_node
- Wait 31 seconds
- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds)
"""
from time import sleep
from test_framework.mininode import *
from test_framework.test_framework import BitcoinlimitededitionTestFramework
from test_framework.util import *
class TestP2PConn(P2PInterface):
def on_version(self, message):
# Don't send a verack in response
pass
class TimeoutsTest(BitcoinlimitededitionTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
# Setup the p2p connections and start up the network thread.
no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn())
no_version_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False)
no_send_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False)
network_thread_start()
sleep(1)
assert no_verack_node.connected
assert no_version_node.connected
assert no_send_node.connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
sleep(30)
assert "version" in no_verack_node.last_message
assert no_verack_node.connected
assert no_version_node.connected
assert no_send_node.connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
sleep(31)
assert not no_verack_node.connected
assert not no_version_node.connected
assert not no_send_node.connected
if __name__ == '__main__':
TimeoutsTest().main()
| 31.565789
| 93
| 0.732805
|
03cef242ac6c2d4d3d1b6c47b7c2ca2da4b57a0b
| 105
|
py
|
Python
|
backend-django/src/alpaca/urls.py
|
ryanpinnock10/Trading-Bot
|
2f8d3b30f0a47423eed66d67566460fc099f188e
|
[
"MIT"
] | 1
|
2021-11-08T01:44:14.000Z
|
2021-11-08T01:44:14.000Z
|
backend-django/src/alpaca/urls.py
|
webclinic017/Trading-Bot-9
|
2f8d3b30f0a47423eed66d67566460fc099f188e
|
[
"MIT"
] | 1
|
2021-03-30T13:11:11.000Z
|
2021-03-30T13:11:11.000Z
|
backend-django/src/alpaca/urls.py
|
webclinic017/Trading-Bot-9
|
2f8d3b30f0a47423eed66d67566460fc099f188e
|
[
"MIT"
] | 1
|
2021-11-08T01:44:11.000Z
|
2021-11-08T01:44:11.000Z
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index),
]
| 13.125
| 37
| 0.67619
|
1582abd02def58da37b0cc1a41f13aea738a7cbc
| 1,235
|
py
|
Python
|
flypy/tests/test_calling_conv.py
|
filmackay/flypy
|
d64e70959c5c8af9e914dcc3ce1068fb99859c3a
|
[
"BSD-2-Clause"
] | null | null | null |
flypy/tests/test_calling_conv.py
|
filmackay/flypy
|
d64e70959c5c8af9e914dcc3ce1068fb99859c3a
|
[
"BSD-2-Clause"
] | null | null | null |
flypy/tests/test_calling_conv.py
|
filmackay/flypy
|
d64e70959c5c8af9e914dcc3ce1068fb99859c3a
|
[
"BSD-2-Clause"
] | 1
|
2020-01-01T00:43:24.000Z
|
2020-01-01T00:43:24.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from flypy import jit
class TestCallingConventionFromPython(unittest.TestCase):
def test_varargs(self):
@jit
def f(a, b, *args):
return [a, b, args[1]]
self.assertEqual(f(1, 2, 0, 3, 0), [1, 2, 3])
class TestCallingFlypyConvention(unittest.TestCase):
def test_varargs(self):
@jit
def g(a, b, *args):
return [a, b, args[1]]
@jit
def f(a, b, c, d, e):
return g(a, b, c, d, e)
self.assertEqual(f(1, 2, 0, 3, 0), [1, 2, 3])
def test_unpacking(self):
@jit
def g(a, b, c):
return [a, b, c]
@jit
def f(*args):
return g(*args)
self.assertEqual(f(1, 2, 3), [1, 2, 3])
def test_unpacking2(self):
raise unittest.SkipTest("unpacking with additional varargs")
@jit
def g(a, b, *args):
return [a, b, args[0]]
@jit
def f(*args):
return g(*args)
self.assertEqual(f(1, 2, 3), [1, 2, 3])
# TODO: Test unpacking with GenericTuple
if __name__ == '__main__':
unittest.main()
| 22.053571
| 68
| 0.526316
|
e0fb16f468e597808b30f6895f84bf9527341d73
| 7,275
|
py
|
Python
|
c3lingo/models.py
|
c3lingo/c3lingo
|
b9d0c84164ec3b2ab60da87b0f6f36e10b4b27e9
|
[
"MIT"
] | 5
|
2020-04-22T19:49:41.000Z
|
2020-12-30T16:17:37.000Z
|
c3lingo/models.py
|
c3lingo/c3lingo
|
b9d0c84164ec3b2ab60da87b0f6f36e10b4b27e9
|
[
"MIT"
] | 7
|
2020-05-16T13:55:40.000Z
|
2021-09-22T18:56:41.000Z
|
c3lingo/models.py
|
c3lingo/c3lingo
|
b9d0c84164ec3b2ab60da87b0f6f36e10b4b27e9
|
[
"MIT"
] | 1
|
2020-04-27T15:19:20.000Z
|
2020-04-27T15:19:20.000Z
|
from django.db import models
from django.contrib.auth.models import User
from django.utils.crypto import get_random_string
import hashlib
class Language(models.Model):
"""A language we support, spoken or otherwise."""
# Language code.
# For spoken languages, if possible, follow Django's convention of
# using RFC 3066 (https://docs.djangoproject.com/en/3.0/topics/i18n/).
# For signed languages, use ISO 639-3.
code = models.CharField(max_length=8)
# User-visible language name, in English, for our team's use.
name_en = models.CharField(max_length=100)
# User-visible language name, in the form users of that language
# will most likely recognize (i.e. in the language itself, if it
# has a written form).
name_self = models.CharField(max_length=100)
def __str__(self):
return self.code
class Conference(models.Model):
"""A public event in which one or more talks will be translated.
Example: the 36C3.
"""
shortname = models.CharField(max_length=100) # e.g. 36c3
# e.g. "36th Chaos Communication Congress"
name = models.TextField()
image_url = models.TextField(blank=True)
start = models.DateTimeField(null=True)
end = models.DateTimeField(null=True)
# The latest version of the Fahrplan JSON we imported.
fahrplan_version = models.TextField(blank=True)
def __str__(self):
return self.shortname
class Room(models.Model):
"""A Room is where Talks are given during a Conference.
E.g. "Hall Ada"
"""
conference = models.ForeignKey(Conference, on_delete=models.PROTECT)
name = models.TextField()
def __str__(self):
return '{conf}/{name}'.format(conf=self.conference, name=self.name)
class TalkBaseModel(models.Model):
"""The translatable properties of a talk: title, subtitle, etc.
In Talk, these are imported untranslated from the Fahrplan."""
title = models.TextField(blank=True)
subtitle = models.TextField(blank=True)
abstract = models.TextField(blank=True)
description = models.TextField(blank=True)
class Meta:
abstract = True
class Talk(TalkBaseModel):
"""A single presentation given at a Conference."""
conference = models.ForeignKey(Conference, on_delete=models.PROTECT)
fahrplan_id = models.CharField(max_length=100)
fahrplan_guid = models.CharField(max_length=100)
# These fields are untranlsated and imported from the Fahrplan
logo_url = models.TextField(blank=True)
talk_type = models.TextField(blank=True)
speakers = models.TextField(blank=True) # comma-separated list of speakers from the Fahrplan
language = models.ForeignKey(Language, on_delete=models.PROTECT)
room = models.ForeignKey(Room, on_delete=models.PROTECT)
start = models.DateTimeField()
end = models.DateTimeField()
class Meta:
unique_together = [['conference', 'fahrplan_id']]
def __str__(self):
return '{conf}/{title}'.format(conf=self.conference, title=self.title)
@property
def slug(self):
return '{conference_name}-{fahrplan_id}-{name}'.format(
conference_name=self.conference.acronym,
fahrplan_id=self.fahrplan_id,
name=parametrize(self.name),
)
@property
def watch_url(self):
return 'https://media.ccc.de/v/{slug}'.format(slug=self.slug)
@property
def slides_url(self):
return 'https://speakers.c3lingo.org/talks/{guid}/'.format(self.guid)
class Translation(TalkBaseModel):
"""A translation of the properties of a talk (title, subtitle, etc.)"""
author = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
talk = models.ForeignKey(Talk, on_delete=models.CASCADE)
language = models.ForeignKey(Language, on_delete=models.PROTECT)
class Meta:
unique_together = [['talk', 'language']]
def __str__(self):
return '{talk} ({language})'.format(talk=self.talk, language=self.language)
class Translator(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
confirmed = models.BooleanField()
bio = models.TextField(blank=True)
contact_info = models.TextField(blank=True)
# Used to authenticate requests to the iCal URL.
secret_token = models.CharField(max_length=64, default=lambda: get_random_string(64))
@property
def avatar_url(self):
"""Automatically generated Gravatar URL from the email.
TODO: this leaks the email address (MD5 is rainbowed to death)
TODO: how does the Chaos community feel about Gravatar?
"""
return 'https://www.gravatar.com/avatar/{}'.format(
hashlib.md5(self.user.email.lower()).hexdigest(),
)
def __str__(self):
return str(self.user)
class TranslatorSpeaks(models.Model):
"""Indicates that a translator speaks a given language."""
user = models.ForeignKey(User, on_delete=models.CASCADE)
language = models.ForeignKey(Language, on_delete=models.PROTECT)
def __str__(self):
return '{translator}: {language}'.format(translator=self.user, language=self.language)
class Booth(models.Model):
"""Represents a translation booth that contains a console."""
room = models.ForeignKey(Room, on_delete=models.PROTECT)
name = models.TextField() # e.g. "Hall A booth 1"
location = models.TextField() # How to get there (c3nav link, free
# text, ...)
dect = models.CharField(max_length=30, blank=True)
# How many translators we want in this booth for a typical talk
desired_occupancy = models.PositiveIntegerField()
# How many translators can fit in this booth, max
maximum_occupancy = models.PositiveIntegerField()
def __str__(self):
return '{room}/{name}'.format(room=self.room, name=self.name)
class Shift(models.Model):
"""A shift is an opportunity for a number of Translators to translate
a given Talk in a given Booth."""
booth = models.ForeignKey(Booth, on_delete=models.PROTECT)
talk = models.ForeignKey(Talk, on_delete=models.CASCADE)
# The language may not be defined, e.g. if we have a booth used
# for multiple languages based on availability/interest.
language = models.ForeignKey(Language, blank=True, null=True, on_delete=models.PROTECT)
@property
def language_or_any(self):
if self.language is not None:
return self.language
return '*'
def __str__(self):
return '{talk} ({src} -> {dst})'.format(
talk=self.talk,
src=self.talk.language,
dst=self.language_or_any
)
class ShiftAssignment(models.Model):
"""Represents a Translator volunteering for a Shift."""
shift = models.ForeignKey(Shift, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
waitlisted = models.BooleanField(default=True)
freeloaded = models.BooleanField(default=False)
comment = models.TextField(blank=True)
def __str__(self):
return '{user} {langfrom} -> {langto} {talk}'.format(
user=self.user,
langfrom=self.shift.talk.language,
langto=self.shift.language,
talk=self.shift.talk
)
| 35.144928
| 97
| 0.683574
|
c26de2405b6a7cad7acd36c8766a6714bacb6c33
| 21,150
|
py
|
Python
|
indicators/ind_ann.py
|
AwesomeTrading/backtrader-live
|
8d29b6161055e711618ab47338e0ef055f3bddf5
|
[
"Apache-2.0"
] | 27
|
2019-09-23T12:21:08.000Z
|
2022-01-21T07:47:27.000Z
|
indicators/ind_ann.py
|
AwesomeTrading/backtrader-live
|
8d29b6161055e711618ab47338e0ef055f3bddf5
|
[
"Apache-2.0"
] | null | null | null |
indicators/ind_ann.py
|
AwesomeTrading/backtrader-live
|
8d29b6161055e711618ab47338e0ef055f3bddf5
|
[
"Apache-2.0"
] | 16
|
2020-01-10T14:09:37.000Z
|
2022-02-23T00:41:58.000Z
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import backtrader as bt
from backtrader.functions import Logic
import math
import ccxt
class ANN(bt.Indicator):
lines = ('ann',
'prediction'
)
plotinfo = dict(
subplot=True,
plotlinelabels=True, plotlinevalues=True, plotvaluetags=True,
)
params = (
('threshold', 0.12),
)
def __init__(self):
super(ANN, self).__init__()
l0_0 = l0_1 = l0_2 = l0_3 = l0_4 = l0_5 = l0_6 = l0_7 = l0_8 = l0_9 = l0_10 = l0_11 = l0_12 = l0_13 = l0_14 = self.getDiff()
l1_0 = self.PineActivationFunctionTanh(
l0_0 * 5.040340774 + l0_1 * -1.3025994088 + l0_2 * 19.4225543981 + l0_3 * 1.1796960423 + l0_4 * 2.4299395823 + l0_5 * 3.159003445 + l0_6 * 4.6844527551 + l0_7 * -6.1079267196 + l0_8 * -2.4952869198 + l0_9 * -4.0966081154 + l0_10 * -2.2432843111 + l0_11 * -0.6105764807 + l0_12 * -0.0775684605 + l0_13 * -0.7984753138 + l0_14 * 3.4495907342)
l1_1 = self.PineActivationFunctionTanh(
l0_0 * 5.9559031982 + l0_1 * -3.1781960056 + l0_2 * -1.6337491061 + l0_3 * -4.3623166512 + l0_4 * 0.9061990402 + l0_5 * -0.731285093 + l0_6 * -6.2500232251 + l0_7 * 0.1356087758 + l0_8 * -0.8570572885 + l0_9 * -4.0161353298 + l0_10 * 1.5095552083 + l0_11 * 1.324789197 + l0_12 * -0.1011973878 + l0_13 * -2.3642090162 + l0_14 * -0.7160862442)
l1_2 = self.PineActivationFunctionTanh(
l0_0 * 4.4350881378 + l0_1 * -2.8956461034 + l0_2 * 1.4199762607 + l0_3 * -0.6436844261 + l0_4 * 1.1124274281 + l0_5 * -4.0976954985 + l0_6 * 2.9317456342 + l0_7 * 0.0798318393 + l0_8 * -5.5718144311 + l0_9 * -0.6623352208 + l0_10 * 3.2405203222 + l0_11 * -10.6253384513 + l0_12 * 4.7132919253 + l0_13 * -5.7378151597 + l0_14 * 0.3164836695)
l1_3 = self.PineActivationFunctionTanh(
l0_0 * -6.1194605467 + l0_1 * 7.7935605604 + l0_2 * -0.7587522153 + l0_3 * 9.8382495905 + l0_4 * 0.3274314734 + l0_5 * 1.8424796541 + l0_6 * -1.2256355427 + l0_7 * -1.5968600758 + l0_8 * 1.9937700922 + l0_9 * 5.0417809111 + l0_10 * -1.9369944654 + l0_11 * 6.1013201778 + l0_12 * 1.5832910747 + l0_13 * -2.148403244 + l0_14 * 1.5449437366)
l1_4 = self.PineActivationFunctionTanh(
l0_0 * 3.5700040028 + l0_1 * -4.4755892733 + l0_2 * 0.1526702072 + l0_3 * -0.3553664401 + l0_4 * -2.3777962662 + l0_5 * -1.8098849587 + l0_6 * -3.5198449134 + l0_7 * -0.4369370497 + l0_8 * 2.3350169623 + l0_9 * 1.9328960346 + l0_10 * 1.1824141812 + l0_11 * 3.0565148049 + l0_12 * -9.3253401534 + l0_13 * 1.6778555498 + l0_14 * -3.045794332)
l1_5 = self.PineActivationFunctionTanh(
l0_0 * 3.6784907623 + l0_1 * 1.1623683715 + l0_2 * 7.1366362145 + l0_3 * -5.6756546585 + l0_4 * 12.7019884334 + l0_5 * -1.2347823331 + l0_6 * 2.3656619827 + l0_7 * -8.7191778213 + l0_8 * -13.8089238753 + l0_9 * 5.4335943836 + l0_10 * -8.1441181338 + l0_11 * -10.5688113287 + l0_12 * 6.3964140758 + l0_13 * -8.9714236223 + l0_14 * -34.0255456929)
l1_6 = self.PineActivationFunctionTanh(
l0_0 * -0.4344517548 + l0_1 * -3.8262167437 + l0_2 * -0.2051098003 + l0_3 * 0.6844201221 + l0_4 * 1.1615893422 + l0_5 * -0.404465314 + l0_6 * -0.1465747632 + l0_7 * -0.006282458 + l0_8 * 0.1585655487 + l0_9 * 1.1994484991 + l0_10 * -0.9879081404 + l0_11 * -0.3564970612 + l0_12 * 1.5814717823 + l0_13 * -0.9614804676 + l0_14 * 0.9204822346)
l1_7 = self.PineActivationFunctionTanh(
l0_0 * -4.2700957175 + l0_1 * 9.4328591157 + l0_2 * -4.3045548 + l0_3 * 5.0616868842 + l0_4 * 3.3388781058 + l0_5 * -2.1885073225 + l0_6 * -6.506301518 + l0_7 * 3.8429000108 + l0_8 * -1.6872237349 + l0_9 * 2.4107095799 + l0_10 * -3.0873985314 + l0_11 * -2.8358325447 + l0_12 * 2.4044366491 + l0_13 * 0.636779082 + l0_14 * -13.2173215035)
l1_8 = self.PineActivationFunctionTanh(
l0_0 * -8.3224697492 + l0_1 * -9.4825530183 + l0_2 * 3.5294389835 + l0_3 * 0.1538618049 + l0_4 * -13.5388631898 + l0_5 * -0.1187936017 + l0_6 * -8.4582741139 + l0_7 * 5.1566299292 + l0_8 * 10.345519938 + l0_9 * 2.9211759333 + l0_10 * -5.0471804233 + l0_11 * 4.9255989983 + l0_12 * -9.9626142544 + l0_13 * 23.0043143258 + l0_14 * 20.9391809343)
l1_9 = self.PineActivationFunctionTanh(
l0_0 * -0.9120518654 + l0_1 * 0.4991807488 + l0_2 * -1.877244586 + l0_3 * 3.1416466525 + l0_4 * 1.063709676 + l0_5 * 0.5210126835 + l0_6 * -4.9755780108 + l0_7 * 2.0336532347 + l0_8 * -1.1793121093 + l0_9 * -0.730664855 + l0_10 * -2.3515987428 + l0_11 * -0.1916546514 + l0_12 * -2.2530340504 + l0_13 * -0.2331829119 + l0_14 * 0.7216218149)
l1_10 = self.PineActivationFunctionTanh(
l0_0 * -5.2139618683 + l0_1 * 1.0663790028 + l0_2 * 1.8340834959 + l0_3 * 1.6248173447 + l0_4 * -0.7663740145 + l0_5 * 0.1062788171 + l0_6 * 2.5288021501 + l0_7 * -3.4066549066 + l0_8 * -4.9497988755 + l0_9 * -2.3060668143 + l0_10 * -1.3962486274 + l0_11 * 0.6185583427 + l0_12 * 0.2625299576 + l0_13 * 2.0270246444 + l0_14 * 0.6372015811)
l1_11 = self.PineActivationFunctionTanh(
l0_0 * 0.2020072665 + l0_1 * 0.3885852709 + l0_2 * -0.1830248843 + l0_3 * -1.2408598444 + l0_4 * -0.6365798088 + l0_5 * 1.8736534268 + l0_6 * 0.656206442 + l0_7 * -0.2987482678 + l0_8 * -0.2017485963 + l0_9 * -1.0604095303 + l0_10 * 0.239793356 + l0_11 * -0.3614172938 + l0_12 * 0.2614678044 + l0_13 * 1.0083551762 + l0_14 * -0.5473833797)
l1_12 = self.PineActivationFunctionTanh(
l0_0 * -0.4367517149 + l0_1 * -10.0601304934 + l0_2 * 1.9240604838 + l0_3 * -1.3192184047 + l0_4 * -0.4564760159 + l0_5 * -0.2965270368 + l0_6 * -1.1407423613 + l0_7 * 2.0949647291 + l0_8 * -5.8212599297 + l0_9 * -1.3393321939 + l0_10 * 7.6624548265 + l0_11 * 1.1309391851 + l0_12 * -0.141798054 + l0_13 * 5.1416736187 + l0_14 * -1.8142503125)
l1_13 = self.PineActivationFunctionTanh(
l0_0 * 1.103948336 + l0_1 * -1.4592033032 + l0_2 * 0.6146278432 + l0_3 * 0.5040966421 + l0_4 * -2.4276090772 + l0_5 * -0.0432902426 + l0_6 * -0.0044259999 + l0_7 * -0.5961347308 + l0_8 * 0.3821026107 + l0_9 * 0.6169102373 + l0_10 * -0.1469847611 + l0_11 * -0.0717167683 + l0_12 * -0.0352403695 + l0_13 * 1.2481310788 + l0_14 * 0.1339628411)
l1_14 = self.PineActivationFunctionTanh(
l0_0 * -9.8049980534 + l0_1 * 13.5481068519 + l0_2 * -17.1362809025 + l0_3 * 0.7142100864 + l0_4 * 4.4759163422 + l0_5 * 4.5716161777 + l0_6 * 1.4290884628 + l0_7 * 8.3952862712 + l0_8 * -7.1613700432 + l0_9 * -3.3249489518 + l0_10 * -0.7789587912 + l0_11 * -1.7987628873 + l0_12 * 13.364752545 + l0_13 * 5.3947219678 + l0_14 * 12.5267547127)
l1_15 = self.PineActivationFunctionTanh(
l0_0 * 0.9869461803 + l0_1 * 1.9473351905 + l0_2 * 2.032925759 + l0_3 * 7.4092080633 + l0_4 * -1.9257741399 + l0_5 * 1.8153585328 + l0_6 * 1.1427866392 + l0_7 * -0.3723167449 + l0_8 * 5.0009927384 + l0_9 * -0.2275103411 + l0_10 * 2.8823012914 + l0_11 * -3.0633141934 + l0_12 * -2.785334815 + l0_13 * 2.727981E-4 + l0_14 * -0.1253009512)
l1_16 = self.PineActivationFunctionTanh(
l0_0 * 4.9418118585 + l0_1 * -2.7538199876 + l0_2 * -16.9887588104 + l0_3 * 8.8734475297 + l0_4 * -16.3022734814 + l0_5 * -4.562496601 + l0_6 * -1.2944373699 + l0_7 * -9.6022946986 + l0_8 * -1.018393866 + l0_9 * -11.4094515429 + l0_10 * 24.8483091382 + l0_11 * -3.0031522277 + l0_12 * 0.1513114555 + l0_13 * -6.7170487021 + l0_14 * -14.7759227576)
l1_17 = self.PineActivationFunctionTanh(
l0_0 * 5.5931454656 + l0_1 * 2.22272078 + l0_2 * 2.603416897 + l0_3 * 1.2661196599 + l0_4 * -2.842826446 + l0_5 * -7.9386099121 + l0_6 * 2.8278849111 + l0_7 * -1.2289445238 + l0_8 * 4.571484248 + l0_9 * 0.9447425595 + l0_10 * 4.2890688351 + l0_11 * -3.3228258483 + l0_12 * 4.8866215526 + l0_13 * 1.0693412194 + l0_14 * -1.963203112)
l1_18 = self.PineActivationFunctionTanh(
l0_0 * 0.2705520264 + l0_1 * 0.4002328199 + l0_2 * 0.1592515845 + l0_3 * 0.371893552 + l0_4 * -1.6639467871 + l0_5 * 2.2887318884 + l0_6 * -0.148633664 + l0_7 * -0.6517792263 + l0_8 * -0.0993032992 + l0_9 * -0.964940376 + l0_10 * 0.1286342935 + l0_11 * 0.4869943595 + l0_12 * 1.4498648166 + l0_13 * -0.3257333384 + l0_14 * -1.3496419812)
l1_19 = self.PineActivationFunctionTanh(
l0_0 * -1.3223200798 + l0_1 * -2.2505204324 + l0_2 * 0.8142804525 + l0_3 * -0.848348177 + l0_4 * 0.7208860589 + l0_5 * 1.2033423756 + l0_6 * -0.1403005786 + l0_7 * 0.2995941644 + l0_8 * -1.1440473062 + l0_9 * 1.067752916 + l0_10 * -1.2990534679 + l0_11 * 1.2588583869 + l0_12 * 0.7670409455 + l0_13 * 2.7895972983 + l0_14 * -0.5376152512)
l1_20 = self.PineActivationFunctionTanh(
l0_0 * 0.7382351572 + l0_1 * -0.8778865631 + l0_2 * 1.0950766363 + l0_3 * 0.7312146997 + l0_4 * 2.844781386 + l0_5 * 2.4526730903 + l0_6 * -1.9175165077 + l0_7 * -0.7443755288 + l0_8 * -3.1591419438 + l0_9 * 0.8441602697 + l0_10 * 1.1979484448 + l0_11 * 2.138098544 + l0_12 * 0.9274159536 + l0_13 * -2.1573448803 + l0_14 * -3.7698356464)
l1_21 = self.PineActivationFunctionTanh(
l0_0 * 5.187120117 + l0_1 * -7.7525670576 + l0_2 * 1.9008346975 + l0_3 * -1.2031603996 + l0_4 * 5.917669142 + l0_5 * -3.1878682719 + l0_6 * 1.0311747828 + l0_7 * -2.7529484612 + l0_8 * -1.1165884578 + l0_9 * 2.5524942323 + l0_10 * -0.38623241 + l0_11 * 3.7961317445 + l0_12 * -6.128820883 + l0_13 * -2.1470707709 + l0_14 * 2.0173792965)
l1_22 = self.PineActivationFunctionTanh(
l0_0 * -6.0241676562 + l0_1 * 0.7474455584 + l0_2 * 1.7435724844 + l0_3 * 0.8619835076 + l0_4 * -0.1138406797 + l0_5 * 6.5979359352 + l0_6 * 1.6554154348 + l0_7 * -3.7969458806 + l0_8 * 1.1139097376 + l0_9 * -1.9588417 + l0_10 * 3.5123392221 + l0_11 * 9.4443103128 + l0_12 * -7.4779291395 + l0_13 * 3.6975940671 + l0_14 * 8.5134262747)
l1_23 = self.PineActivationFunctionTanh(
l0_0 * -7.5486576471 + l0_1 * -0.0281420865 + l0_2 * -3.8586839454 + l0_3 * -0.5648792233 + l0_4 * -7.3927282026 + l0_5 * -0.3857538046 + l0_6 * -2.9779885698 + l0_7 * 4.0482279965 + l0_8 * -1.1522499578 + l0_9 * -4.1562500212 + l0_10 * 0.7813134307 + l0_11 * -1.7582667612 + l0_12 * 1.7071109988 + l0_13 * 6.9270873208 + l0_14 * -4.5871357362)
l1_24 = self.PineActivationFunctionTanh(
l0_0 * -5.3603442228 + l0_1 * -9.5350611629 + l0_2 * 1.6749984422 + l0_3 * -0.6511065892 + l0_4 * -0.8424823239 + l0_5 * 1.9946675213 + l0_6 * -1.1264361638 + l0_7 * 0.3228676616 + l0_8 * 5.3562230396 + l0_9 * -1.6678168952 + l0_10 * 1.2612580068 + l0_11 * -3.5362671399 + l0_12 * -9.3895191366 + l0_13 * 2.0169228673 + l0_14 * -3.3813191557)
l1_25 = self.PineActivationFunctionTanh(
l0_0 * 1.1362866429 + l0_1 * -1.8960071702 + l0_2 * 5.7047307243 + l0_3 * -1.6049785053 + l0_4 * -4.8353898931 + l0_5 * -1.4865381145 + l0_6 * -0.2846893475 + l0_7 * 2.2322095997 + l0_8 * 2.0930488668 + l0_9 * 1.7141411002 + l0_10 * -3.4106032176 + l0_11 * 3.0593289612 + l0_12 * -5.0894813904 + l0_13 * -0.5316299133 + l0_14 * 0.4705265416)
l1_26 = self.PineActivationFunctionTanh(
l0_0 * -0.9401400975 + l0_1 * -0.9136086957 + l0_2 * -3.3808688582 + l0_3 * 4.7200776773 + l0_4 * 3.686296919 + l0_5 * 14.2133723935 + l0_6 * 1.5652940954 + l0_7 * -0.2921139433 + l0_8 * 1.0244504511 + l0_9 * -7.6918299134 + l0_10 * -0.594936135 + l0_11 * -1.4559914156 + l0_12 * 2.8056435224 + l0_13 * 2.6103905733 + l0_14 * 2.3412348872)
l1_27 = self.PineActivationFunctionTanh(
l0_0 * 1.1573980186 + l0_1 * 2.9593661909 + l0_2 * 0.4512594325 + l0_3 * -0.9357210858 + l0_4 * -1.2445804495 + l0_5 * 4.2716471631 + l0_6 * 1.5167912375 + l0_7 * 1.5026853293 + l0_8 * 1.3574772038 + l0_9 * -1.9754386842 + l0_10 * 6.727671436 + l0_11 * 8.0145772889 + l0_12 * 7.3108970663 + l0_13 * -2.5005627841 + l0_14 * 8.9604502277)
l1_28 = self.PineActivationFunctionTanh(
l0_0 * 6.3576350212 + l0_1 * -2.9731672725 + l0_2 * -2.7763558082 + l0_3 * -3.7902984555 + l0_4 * -1.0065574585 + l0_5 * -0.7011836061 + l0_6 * -1.0298068578 + l0_7 * 1.201007784 + l0_8 * -0.7835862254 + l0_9 * -3.9863597435 + l0_10 * 6.7851825502 + l0_11 * 1.1120256721 + l0_12 * -2.263287351 + l0_13 * 1.8314374104 + l0_14 * -2.279102097)
l1_29 = self.PineActivationFunctionTanh(
l0_0 * -7.8741911036 + l0_1 * -5.3370618518 + l0_2 * 11.9153868964 + l0_3 * -4.1237170553 + l0_4 * 2.9491152758 + l0_5 * 1.0317132502 + l0_6 * 2.2992199883 + l0_7 * -2.0250502364 + l0_8 * -11.0785995839 + l0_9 * -6.3615588554 + l0_10 * -1.1687644976 + l0_11 * 6.3323478015 + l0_12 * 6.0195076962 + l0_13 * -2.8972208702 + l0_14 * 3.6107747183)
l2_0 = self.PineActivationFunctionTanh(
l1_0 * -0.590546797 + l1_1 * 0.6608304658 + l1_2 * -0.3358268839 + l1_3 * -0.748530283 + l1_4 * -0.333460383 + l1_5 * -0.3409307681 + l1_6 * 0.1916558198 + l1_7 * -0.1200399453 + l1_8 * -0.5166151854 + l1_9 * -0.8537164676 + l1_10 * -0.0214448647 + l1_11 * -0.553290271 + l1_12 * -1.2333302892 + l1_13 * -0.8321813811 + l1_14 * -0.4527761741 + l1_15 * 0.9012545631 + l1_16 * 0.415853215 + l1_17 * 0.1270548319 + l1_18 * 0.2000460279 + l1_19 * -0.1741942671 + l1_20 * 0.419830522 + l1_21 * -0.059839291 + l1_22 * -0.3383001769 + l1_23 * 0.1617814073 + l1_24 * 0.3071848006 + l1_25 * -0.3191182045 + l1_26 * -0.4981831822 + l1_27 * -1.467478375 + l1_28 * -0.1676432563 + l1_29 * 1.2574849126)
l2_1 = self.PineActivationFunctionTanh(
l1_0 * -0.5514235841 + l1_1 * 0.4759190049 + l1_2 * 0.2103576983 + l1_3 * -0.4754377924 + l1_4 * -0.2362941295 + l1_5 * 0.1155082119 + l1_6 * 0.7424215794 + l1_7 * -0.3674198672 + l1_8 * 0.8401574461 + l1_9 * 0.6096563193 + l1_10 * 0.7437935674 + l1_11 * -0.4898638101 + l1_12 * -0.4168668092 + l1_13 * -0.0365111095 + l1_14 * -0.342675224 + l1_15 * 0.1870268765 + l1_16 * -0.5843050987 + l1_17 * -0.4596547471 + l1_18 * 0.452188522 + l1_19 * -0.6737126684 + l1_20 * 0.6876072741 + l1_21 * -0.8067776704 + l1_22 * 0.7592979467 + l1_23 * -0.0768239468 + l1_24 * 0.370536097 + l1_25 * -0.4363884671 + l1_26 * -0.419285676 + l1_27 * 0.4380251141 + l1_28 * 0.0822528948 + l1_29 * -0.2333910809)
l2_2 = self.PineActivationFunctionTanh(
l1_0 * -0.3306539521 + l1_1 * -0.9382247194 + l1_2 * 0.0746711276 + l1_3 * -0.3383838985 + l1_4 * -0.0683232217 + l1_5 * -0.2112358049 + l1_6 * -0.9079234054 + l1_7 * 0.4898595603 + l1_8 * -0.2039825863 + l1_9 * 1.0870698641 + l1_10 * -1.1752901237 + l1_11 * 1.1406403923 + l1_12 * -0.6779626786 + l1_13 * 0.4281048906 + l1_14 * -0.6327670055 + l1_15 * -0.1477678844 + l1_16 * 0.2693637584 + l1_17 * 0.7250738509 + l1_18 * 0.7905904504 + l1_19 * -1.6417250883 + l1_20 * -0.2108095534 + l1_21 * -0.2698557472 + l1_22 * -0.2433656685 + l1_23 * -0.6289943273 + l1_24 * 0.436428207 + l1_25 * -0.8243825184 + l1_26 * -0.8583496686 + l1_27 * 0.0983131026 + l1_28 * -0.4107462518 + l1_29 * 0.5641683087)
l2_3 = self.PineActivationFunctionTanh(
l1_0 * 1.7036869992 + l1_1 * -0.6683507666 + l1_2 * 0.2589197112 + l1_3 * 0.032841148 + l1_4 * -0.4454796342 + l1_5 * -0.6196149423 + l1_6 * -0.1073622976 + l1_7 * -0.1926393101 + l1_8 * 1.5280232458 + l1_9 * -0.6136527036 + l1_10 * -1.2722934357 + l1_11 * 0.2888655811 + l1_12 * -1.4338638512 + l1_13 * -1.1903556863 + l1_14 * -1.7659663905 + l1_15 * 0.3703086867 + l1_16 * 1.0409140889 + l1_17 * 0.0167382209 + l1_18 * 0.6045646461 + l1_19 * 4.2388788116 + l1_20 * 1.4399738234 + l1_21 * 0.3308571935 + l1_22 * 1.4501137667 + l1_23 * 0.0426123724 + l1_24 * -0.708479795 + l1_25 * -1.2100800732 + l1_26 * -0.5536278651 + l1_27 * 1.3547250573 + l1_28 * 1.2906250286 + l1_29 * 0.0596007114)
l2_4 = self.PineActivationFunctionTanh(
l1_0 * -0.462165126 + l1_1 * -1.0996742176 + l1_2 * 1.0928262999 + l1_3 * 1.806407067 + l1_4 * 0.9289147669 + l1_5 * 0.8069022793 + l1_6 * 0.2374237802 + l1_7 * -2.7143979019 + l1_8 * -2.7779203877 + l1_9 * 0.214383903 + l1_10 * -1.3111536623 + l1_11 * -2.3148813568 + l1_12 * -2.4755355804 + l1_13 * -0.6819733236 + l1_14 * 0.4425615226 + l1_15 * -0.1298218043 + l1_16 * -1.1744832824 + l1_17 * -0.395194848 + l1_18 * -0.2803397703 + l1_19 * -0.4505071197 + l1_20 * -0.8934956598 + l1_21 * 3.3232916348 + l1_22 * -1.7359534851 + l1_23 * 3.8540421743 + l1_24 * 1.4424032523 + l1_25 * 0.2639823693 + l1_26 * 0.3597053634 + l1_27 * -1.0470693728 + l1_28 * 1.4133480357 + l1_29 * 0.6248098695)
l2_5 = self.PineActivationFunctionTanh(
l1_0 * 0.2215807411 + l1_1 * -0.5628295071 + l1_2 * -0.8795982905 + l1_3 * 0.9101585104 + l1_4 * -1.0176831976 + l1_5 * -0.0728884401 + l1_6 * 0.6676331658 + l1_7 * -0.7342174108 + l1_8 * 9.4428E-4 + l1_9 * 0.6439774272 + l1_10 * -0.0345236026 + l1_11 * 0.5830977027 + l1_12 * -0.4058921837 + l1_13 * -0.3991888077 + l1_14 * -1.0090426973 + l1_15 * -0.9324780698 + l1_16 * -0.0888749165 + l1_17 * 0.2466351736 + l1_18 * 0.4993304601 + l1_19 * -1.115408696 + l1_20 * 0.9914246705 + l1_21 * 0.9687743445 + l1_22 * 0.1117130875 + l1_23 * 0.7825109733 + l1_24 * 0.2217023612 + l1_25 * 0.3081256411 + l1_26 * -0.1778007966 + l1_27 * -0.3333287743 + l1_28 * 1.0156352461 + l1_29 * -0.1456257813)
l2_6 = self.PineActivationFunctionTanh(
l1_0 * -0.5461783383 + l1_1 * 0.3246015999 + l1_2 * 0.1450605434 + l1_3 * -1.3179944349 + l1_4 * -1.5481775261 + l1_5 * -0.679685633 + l1_6 * -0.9462335139 + l1_7 * -0.6462399371 + l1_8 * 0.0991658683 + l1_9 * 0.1612892194 + l1_10 * -1.037660602 + l1_11 * -0.1044778824 + l1_12 * 0.8309203243 + l1_13 * 0.7714766458 + l1_14 * 0.2566767663 + l1_15 * 0.8649416329 + l1_16 * -0.5847461285 + l1_17 * -0.6393969272 + l1_18 * 0.8014049359 + l1_19 * 0.2279568228 + l1_20 * 1.0565217821 + l1_21 * 0.134738029 + l1_22 * 0.3420395576 + l1_23 * -0.2417397219 + l1_24 * 0.3083072038 + l1_25 * 0.6761739059 + l1_26 * -0.4653817053 + l1_27 * -1.0634057566 + l1_28 * -0.5658892281 + l1_29 * -0.6947283681)
l2_7 = self.PineActivationFunctionTanh(
l1_0 * -0.5450410944 + l1_1 * 0.3912849372 + l1_2 * -0.4118641117 + l1_3 * 0.7124695074 + l1_4 * -0.7510266122 + l1_5 * 1.4065673913 + l1_6 * 0.9870731545 + l1_7 * -0.2609363107 + l1_8 * -0.3583639958 + l1_9 * 0.5436375706 + l1_10 * 0.4572450099 + l1_11 * -0.4651538878 + l1_12 * -0.2180218212 + l1_13 * 0.5241262959 + l1_14 * -0.8529323253 + l1_15 * -0.4200378937 + l1_16 * 0.4997885721 + l1_17 * -1.1121528189 + l1_18 * 0.5992411048 + l1_19 * -1.0263270781 + l1_20 * -1.725160642 + l1_21 * -0.2653995722 + l1_22 * 0.6996703032 + l1_23 * 0.348549086 + l1_24 * 0.6522482482 + l1_25 * -0.7931928436 + l1_26 * -0.5107994359 + l1_27 * 0.0509642698 + l1_28 * 0.8711187423 + l1_29 * 0.8999449627)
l2_8 = self.PineActivationFunctionTanh(
l1_0 * -0.7111081522 + l1_1 * 0.4296245062 + l1_2 * -2.0720732038 + l1_3 * -0.4071818684 + l1_4 * 1.0632721681 + l1_5 * 0.8463224325 + l1_6 * -0.6083948423 + l1_7 * 1.1827669608 + l1_8 * -0.9572307844 + l1_9 * -0.9080517673 + l1_10 * -0.0479029057 + l1_11 * -1.1452853213 + l1_12 * 0.2884352688 + l1_13 * 0.1767851586 + l1_14 * -1.089314461 + l1_15 * 1.2991763966 + l1_16 * 1.6236630806 + l1_17 * -0.7720263697 + l1_18 * -0.5011541755 + l1_19 * -2.3919413568 + l1_20 * 0.0084018338 + l1_21 * 0.9975216139 + l1_22 * 0.4193541029 + l1_23 * 1.4623834571 + l1_24 * -0.6253069691 + l1_25 * 0.6119677341 + l1_26 * 0.5423948388 + l1_27 * 1.0022450377 + l1_28 * -1.2392984069 + l1_29 * 1.5021529822)
self.l3_0 = self.PineActivationFunctionTanh(
l2_0 * 0.3385061186 + l2_1 * 0.6218531956 + l2_2 * -0.7790340983 + l2_3 * 0.1413078332 + l2_4 * 0.1857010624 + l2_5 * -0.1769456351 + l2_6 * -0.3242337911 + l2_7 * -0.503944883 + l2_8 * 0.1540568869)
self.lines.prediction = self.l3_0
# self.lines.prediction = l0_0
self.lines.ann = Cmp2(self.l3_0, self.p.threshold, 1, 0)
def getDiff(self):
yesterday = (self.data1.open(-1) + self.data1.high(-1) + self.data1.low(-1) + self.data1.close(-1)) / 4
today = (self.data1.open(0) + self.data1.high(0) + self.data1.low(0) + self.data1.close(0)) / 4
delta = today-yesterday
percentage = delta / today
return percentage
def exp(self, v):
return pow(math.e, v)
def PineActivationFunctionLinear(self, v):
return v
def PineActivationFunctionTanh(self, v):
return (self.exp(v) - self.exp(-v)) / (self.exp(v) + self.exp(-v))
class Cmp2(bt.Logic):
def __init__(self, a, b, r1, r2):
super(Cmp2, self).__init__(a, b, r1, r2)
self.a = self.args[0]
self.b = self.args[1]
self.r1 = self.args[2]
self.r2 = self.args[3]
def next(self):
if self.a[0] > self.b[0]: self[0] = self.r1[0]
elif self.a[0] < -self.b[0]: self[0] = self.r2[0]
else: self[0] = self.r2[0]
def once(self, start, end):
# cache python dictionary lookups
dst = self.array
srca = self.a.array
for i in range(start, end):
if srca[i] > self.b: dst[i] = self.r1
elif srca[i] < -self.b: dst[i] = self.r2
else: dst[i] = dst[i-1]
| 136.451613
| 708
| 0.650496
|
ee412f12c250f786d2341d88fdeb57aa342bd73f
| 14,104
|
py
|
Python
|
backend/api/tests/ActividadTestCase.py
|
kukiamarilla/polijira
|
510dbc1473db973ac71fc68fa5a9b758b90a780b
|
[
"MIT"
] | 1
|
2022-03-02T02:28:49.000Z
|
2022-03-02T02:28:49.000Z
|
backend/api/tests/ActividadTestCase.py
|
kukiamarilla/polijira
|
510dbc1473db973ac71fc68fa5a9b758b90a780b
|
[
"MIT"
] | 22
|
2021-09-01T17:44:25.000Z
|
2021-10-07T19:39:09.000Z
|
backend/api/tests/ActividadTestCase.py
|
kukiamarilla/polijira
|
510dbc1473db973ac71fc68fa5a9b758b90a780b
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.test.client import Client
from backend.api.models import Actividad, Miembro, MiembroSprint, Proyecto, SprintBacklog, Sprint, Usuario
class ActividadTestCase(TestCase):
"""
ActividadTestCase Tests para probar funcionalidad de CRUD de Actividades
Args:
TestCase (TestCase): Tests del módulo Django
"""
fixtures = [
"backend/api/fixtures/testing/auth.json",
"backend/api/fixtures/testing/usuarios.json",
"backend/api/fixtures/testing/permisos.json",
"backend/api/fixtures/testing/roles.json",
"backend/api/fixtures/testing/proyectos.json",
"backend/api/fixtures/testing/permisosProyecto.json",
"backend/api/fixtures/testing/plantillas.json",
"backend/api/fixtures/testing/rolesProyecto.json",
"backend/api/fixtures/testing/miembros.json",
"backend/api/fixtures/testing/horarios.json",
"backend/api/fixtures/testing/user-stories.json",
"backend/api/fixtures/testing/product-backlogs.json",
"backend/api/fixtures/testing/registro-user-stories.json",
"backend/api/fixtures/testing/sprints.json",
"backend/api/fixtures/testing/sprintbacklogs.json",
"backend/api/fixtures/testing/miembrosprints.json",
"backend/api/fixtures/testing/actividades.json"
]
def setUp(self):
"""
setUp Configura el TestCase
"""
self.client = Client()
def test_crear_actividad(self):
"""
test_crear_actividad Prueba crear una Actividad
"""
print("\nProbando crear una Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
sprint = Sprint.objects.get(pk=2)
sprint.estado = "A"
sprint.save()
request_data = {
"titulo": "Actividad",
"sprint_backlog": 1,
"descripcion": "Holiii",
"horas": 2
}
response = self.client.post("/api/actividades/", request_data)
self.assertEquals(response.status_code, 200)
actividad = Actividad.objects.filter(**request_data)
self.assertEquals(len(actividad), 1)
def test_modificar_actividad(self):
"""
test_modificar_actividad Prueba modificar una Actividad
"""
print("\nProbando modificar una Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
sprint = Sprint.objects.get(pk=2)
sprint.estado = "A"
sprint.save()
request_data = {
"titulo": "Actividad",
"descripcion": "Holiii :)",
"horas": 3
}
self.client.put("/api/actividades/1/", request_data, content_type="application/json")
actividad = Actividad.objects.filter(**request_data)
self.assertEquals(len(actividad), 1)
def test_eliminar_actividad(self):
"""
test_eliminar_actividad Prueba eliminar una Actividad
"""
print("\nProbando eliminar una Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
response = self.client.delete("/api/actividades/1/")
self.assertEquals(response.status_code, 200)
actividad = Actividad.objects.filter(pk=1)
self.assertEquals(len(actividad), 0)
def test_error_validacion_crear_actividad_campos(self):
"""
test_error_validacion_crear_actividad_campos
Prueba validar la existencia de los campos: Sprint Backlog, Descripcion y Horas. En Crear Actividad
"""
print("\nProbando validar la existencia de: Sprint Backlog, Descripcion y Horas. En Crear Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
sprint = Sprint.objects.get(pk=2)
sprint.estado = "A"
sprint.save()
response = self.client.post("/api/actividades/")
self.assertEquals(response.status_code, 422)
body = response.json()
self.assertEquals(body.get("errors").get("sprint_backlog"), ["No se pasó: Sprint Backlog"])
self.assertEquals(body.get("errors").get("descripcion"), ["No se pasó: Descripcion"])
self.assertEquals(body.get("errors").get("horas"), ["No se pasó: Horas"])
def test_error_validacion_crear_actividad_sprint_backlog(self):
"""
test_error_validacion_crear_actividad_sprint_backlog
Prueba validar la existencia del campo Sprint Backlog en la BD al Crear Actividad
"""
print("\nProbando validar la existencia del campo Sprint Backlog en la BD al Crear Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
sprint = Sprint.objects.get(pk=2)
sprint.estado = "A"
sprint.save()
request_data = {
"sprint_backlog": 1000,
"descripcion": "Holiii",
"horas": 2
}
response = self.client.post("/api/actividades/", request_data)
self.assertEquals(response.status_code, 422)
body = response.json()
self.assertEquals(body.get("errors").get("sprint_backlog"), [
"No se encontró el Sprint Backlog en la base de datos"])
def test_error_validacion_crear_actividad_horas(self):
"""
test_error_validacion_crear_actividad_horas
Prueba validar que el valor de la hora sea mayor a cero al Crear Actividad
"""
print("\nProbando validar que el valor de la hora sea mayor a cero al Crear Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
sprint = Sprint.objects.get(pk=2)
sprint.estado = "A"
sprint.save()
request_data = {
"sprint_backlog": 1,
"descripcion": "Holiii",
"horas": -2
}
response = self.client.post("/api/actividades/", request_data)
self.assertEquals(response.status_code, 422)
body = response.json()
self.assertEquals(body.get("errors").get("horas"), ["La hora no puede ser negativa"])
def test_validar_miembro_proyecto_crear_actividad(self):
"""
test_validar_miembro_proyecto_crear_actividad
Prueba validar que el usuario sea miembro del proyecto al Crear Actividad
"""
print("\nProbando validar que el usuario sea miembro del proyecto al Crear Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
sprint = Sprint.objects.get(pk=2)
sprint.estado = "A"
sprint.proyecto = Proyecto.objects.get(pk=4)
sprint.save()
request_data = {
"titulo": "Actividad",
"sprint_backlog": 1,
"descripcion": "Holiii",
"horas": 2
}
response = self.client.post("/api/actividades/", request_data)
self.assertEquals(response.status_code, 403)
body = response.json()
self.assertEquals(body.get("message"), "Usted no es miembro de este Proyecto")
self.assertEquals(body.get("error"), "forbidden")
def test_validar_miembro_sprint_crear_actividad(self):
"""
test_validar_miembro_sprint_crear_actividad
Prueba validar que el usuario sea miembro del sprint al Crear Actividad
"""
print("\nProbando validar que el usuario sea miembro del sprint al Crear Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
sprint = Sprint.objects.get(pk=2)
sprint.estado = "A"
sprint.save()
miembro_sprint = MiembroSprint.objects.get(pk=2)
miembro_sprint.miembro_proyecto = Miembro.objects.get(pk=2)
miembro_sprint.save()
request_data = {
"titulo": "Actividad",
"sprint_backlog": 1,
"descripcion": "Holiii",
"horas": 2
}
response = self.client.post("/api/actividades/", request_data)
self.assertEquals(response.status_code, 403)
body = response.json()
self.assertEquals(body.get("message"), "Usted no es miembro de este Sprint")
self.assertEquals(body.get("error"), "forbidden")
def test_validar_desarrollador_crear_actividad(self):
"""
test_validar_desarrollador_crear_actividad
Prueba validar que el usuario sea desarrollador del User Story al Crear Actividad
"""
print("\nProbando validar que el usuario sea desarrollador del User Story al Crear Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
sprint = Sprint.objects.get(pk=2)
sprint.estado = "A"
sprint.save()
sprint_backlog = SprintBacklog.objects.get(pk=1)
sprint_backlog.desarrollador = MiembroSprint.objects.get(pk=1)
sprint_backlog.save()
request_data = {
"titulo": "Actividad",
"sprint_backlog": 1,
"descripcion": "Holiii",
"horas": 2
}
response = self.client.post("/api/actividades/", request_data)
self.assertEquals(response.status_code, 403)
body = response.json()
self.assertEquals(body.get("message"), "Usted no es desarrollador del User Story")
self.assertEquals(body.get("error"), "forbidden")
def test_validar_sprint_crear_actividad(self):
"""
test_validar_sprint_crear_actividad
Prueba validar que el sprint este activo al Crear Actividad
"""
print("\nProbando validar que el sprint este activo al Crear Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
request_data = {
"titulo": "Actividad",
"sprint_backlog": 1,
"descripcion": "Holiii",
"horas": 2
}
response = self.client.post("/api/actividades/", request_data)
self.assertEquals(response.status_code, 400)
body = response.json()
self.assertEquals(body.get("message"), "Para registrar una actividad el Sprint debe estar Activo")
self.assertEquals(body.get("error"), "bad_request")
def test_error_validacion_modificar_actividad_campos(self):
"""
test_error_validacion_modificar_actividad_campos
Prueba validar la existencia de los campos: Sprint Backlog, Descripcion y Horas. En Modificar Actividad
"""
print("\nProbando validar la existencia de: Sprint Backlog, Descripcion y Horas. En Modificar Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
sprint = Sprint.objects.get(pk=2)
sprint.estado = "A"
sprint.save()
response = self.client.put("/api/actividades/1/")
self.assertEquals(response.status_code, 422)
body = response.json()
self.assertEquals(body.get("errors").get("descripcion"), ["No se pasó: Descripcion"])
self.assertEquals(body.get("errors").get("horas"), ["No se pasó: Horas"])
def test_error_validacion_modificar_actividad_horas(self):
"""
test_error_validacion_modificar_actividad_horas
Prueba validar que el valor de la hora sea mayor a cero en Modificar Actividad
"""
print("\nProbando validar que el valor de la hora sea mayor a cero en Modificar Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
sprint = Sprint.objects.get(pk=2)
sprint.estado = "A"
sprint.save()
request_data = {
"descripcion": "Holiii",
"horas": -2
}
response = self.client.put("/api/actividades/1/", request_data, "application/json")
self.assertEquals(response.status_code, 422)
body = response.json()
self.assertEquals(body.get("errors").get("horas"), ["La hora no puede ser negativa"])
def test_validar_desarrollador_modificar_actividad(self):
"""
test_validar_desarrollador_modificar_actividad
Prueba validar que el usuario sea desarrollador del User Story al Modificar Actividad
"""
print("\nProbando validar que el usuario sea desarrollador del User Story al Modificar Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
actividad = Actividad.objects.get(pk=1)
actividad.desarrollador = Usuario.objects.get(pk=2)
actividad.save()
request_data = {
"titulo": "Actividad",
"descripcion": "Holiii",
"horas": 2
}
response = self.client.put("/api/actividades/1/", request_data, "application/json")
self.assertEquals(response.status_code, 403)
body = response.json()
self.assertEquals(body.get("message"), "Usted no es desarrollador de esta Actividad")
self.assertEquals(body.get("error"), "forbidden")
def test_validar_actividad_modificar_actividad(self):
"""
test_validar_actividad_modificar_activida
Prueba validar que exista la actividad en la BD al Modificar Actividad
"""
print("\nProbando validar que exista la actividad en la BD al Modificar Actividad")
self.client.login(
username="testing",
password="polijira2021"
)
request_data = {
"titulo": "Actividad",
"descripcion": "Holiii",
"horas": 2
}
response = self.client.put("/api/actividades/1000/", request_data, "application/json")
self.assertEquals(response.status_code, 404)
body = response.json()
self.assertEquals(body.get("message"), "No existe la Actividad")
self.assertEquals(body.get("error"), "not_found")
| 39.841808
| 113
| 0.620391
|
c111f50abf6f68f4b8047bf46e38cd39b38530b1
| 931
|
py
|
Python
|
benchmark.py
|
marangonico/xplane_airports
|
00259ab889d9c8e1af2f0be54e1ca7bd4b9ffe1f
|
[
"MIT"
] | null | null | null |
benchmark.py
|
marangonico/xplane_airports
|
00259ab889d9c8e1af2f0be54e1ca7bd4b9ffe1f
|
[
"MIT"
] | null | null | null |
benchmark.py
|
marangonico/xplane_airports
|
00259ab889d9c8e1af2f0be54e1ca7bd4b9ffe1f
|
[
"MIT"
] | null | null | null |
import gc
import timeit
from xplane_airports.AptDat import AptDat
from pathlib import Path
xplane_installation = Path('/Users/tyler/design')
assert xplane_installation.is_dir(), f"{xplane_installation} does not exist or is not a directory"
iterations = 3
print(f"Repeating {iterations} iterations of parsing 35,000+ airports from disk (this will take awhile)")
# Tyler observes: We can't just run a bunch of iterations using timeit(), because it disables GC,
# and we use gigabytes of RAM per parse of our giant files.
#
# It's not realistic to benchmark us parsing multiple 300 MB files... there are only so many airports in the world!
total_seconds = 0
for i in range(iterations):
total_seconds += timeit.timeit(lambda: AptDat(xplane_installation / 'Resources/default scenery/default apt dat/Earth nav data/apt.dat'), number=1)
gc.collect()
print(f"Average time over {iterations} runs: {total_seconds / iterations}")
| 42.318182
| 150
| 0.771214
|
9e3eeea2326a13340afe81214bbb5c767deac0f1
| 9,478
|
py
|
Python
|
bokeh/_testing/plugins/project.py
|
samwill/bokeh
|
228132eba4b696b91b2a77f7e9d07771ba868093
|
[
"BSD-3-Clause"
] | 1
|
2021-05-03T15:19:05.000Z
|
2021-05-03T15:19:05.000Z
|
bokeh/_testing/plugins/project.py
|
samwill/bokeh
|
228132eba4b696b91b2a77f7e9d07771ba868093
|
[
"BSD-3-Clause"
] | 3
|
2021-09-08T03:16:42.000Z
|
2022-03-12T00:57:18.000Z
|
bokeh/_testing/plugins/project.py
|
samwill/bokeh
|
228132eba4b696b91b2a77f7e9d07771ba868093
|
[
"BSD-3-Clause"
] | 2
|
2021-01-12T18:22:24.000Z
|
2021-10-30T00:32:02.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a Pytest plugin for a Bokeh-specific testing tools
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import socket
import time
from contextlib import closing
from threading import Thread
# External imports
import pytest
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler
# Bokeh imports
import bokeh.server.views.ws as ws
from bokeh._testing.util.selenium import INIT, RESULTS, wait_for_canvas_resize
from bokeh.io import save
from bokeh.server.server import Server
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
"bokeh._testing.plugins.file_server",
"bokeh._testing.plugins.selenium",
)
__all__ = (
'bokeh_app_info',
'bokeh_model_page',
'bokeh_server_page',
'find_free_port',
'output_file_url',
'single_plot_page',
'test_file_path_and_url',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@pytest.fixture
def output_file_url(request, file_server):
from bokeh.io import output_file
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
output_file(file_path, mode='inline')
def tear_down():
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tear_down)
return file_server.where_is(url)
@pytest.fixture
def test_file_path_and_url(request, file_server):
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
def tear_down():
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tear_down)
return file_path, file_server.where_is(url)
class _ExitHandler(RequestHandler):
def initialize(self, io_loop):
self.io_loop = io_loop
async def get(self, *args, **kwargs):
self.io_loop.stop()
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
@pytest.fixture
def bokeh_app_info(request, driver):
''' Start a Bokeh server app and return information needed to test it.
Returns a tuple (url, message_test_port), where the latter is defined as
namedtuple('MessageTestPort', ['sent', 'received'])
and will contain all messages that the Bokeh Server sends/receives while
running during the test.
'''
def func(modify_doc):
from collections import namedtuple
MessageTestPort = namedtuple('MessageTestPort', ['sent', 'received'])
ws._message_test_port = MessageTestPort([], [])
port = find_free_port()
def worker():
io_loop = IOLoop()
server = Server({'/': modify_doc},
port=port,
io_loop=io_loop,
extra_patterns=[('/exit', _ExitHandler, dict(io_loop=io_loop))])
server.start()
server.io_loop.start()
t = Thread(target=worker)
t.start()
def cleanup():
driver.get("http://localhost:%d/exit" % port)
# XXX (bev) this line is a workaround for https://github.com/bokeh/bokeh/issues/7970
# and should be removed when that issue is resolved
driver.get_log('browser')
ws._message_test_port = None
t.join()
request.addfinalizer(cleanup)
return "http://localhost:%d/" % port, ws._message_test_port
return func
class _BokehPageMixin:
@property
def results(self):
WebDriverWait(self._driver, 10).until(EC.staleness_of(self.test_div))
self.test_div = self._driver.find_element_by_class_name("bokeh-test-div")
return self._driver.execute_script(RESULTS)
@property
def driver(self):
return self._driver
def init_results(self):
self._driver.execute_script(INIT)
self.test_div = self._driver.find_element_by_class_name("bokeh-test-div")
def click_element_at_position(self, element, x, y):
actions = ActionChains(self._driver)
actions.move_to_element_with_offset(element, x, y)
actions.click()
actions.perform()
def double_click_element_at_position(self, element, x, y):
actions = ActionChains(self._driver)
actions.move_to_element_with_offset(element, x, y)
actions.click()
actions.click()
actions.perform()
def drag_element_at_position(self, element, x, y, dx, dy, mod=None):
actions = ActionChains(self._driver)
if mod:
actions.key_down(mod)
actions.move_to_element_with_offset(element, x, y)
actions.click_and_hold()
actions.move_by_offset(dx, dy)
actions.release()
if mod:
actions.key_up(mod)
actions.perform()
def send_keys(self, *keys):
actions = ActionChains(self._driver)
actions.send_keys(*keys)
actions.perform()
def has_no_console_errors(self):
return self._has_no_console_errors(self._driver)
class _BokehModelPage(_BokehPageMixin):
def __init__(self, model, driver, output_file_url, has_no_console_errors):
self._driver = driver
self._model = model
self._has_no_console_errors = has_no_console_errors
save(self._model)
self._driver.get(output_file_url)
self.init_results()
class _CanvasMixin:
def click_canvas_at_position(self, x, y):
self.click_element_at_position(self.canvas, x, y)
def double_click_canvas_at_position(self, x, y):
self.double_click_element_at_position(self.canvas, x, y)
def click_custom_action(self):
button = self._driver.find_element_by_class_name("bk-toolbar-button-custom-action")
button.click()
def drag_canvas_at_position(self, x, y, dx, dy, mod=None):
self.drag_element_at_position(self.canvas, x, y, dx, dy, mod)
def get_toolbar_button(self, name):
return self.driver.find_element_by_class_name('bk-tool-icon-' + name)
@pytest.fixture()
def bokeh_model_page(driver, output_file_url, has_no_console_errors):
def func(model):
return _BokehModelPage(model, driver, output_file_url, has_no_console_errors)
return func
class _SinglePlotPage(_BokehModelPage, _CanvasMixin):
# model may be a layout, but should only contain a single plot
def __init__(self, model, driver, output_file_url, has_no_console_errors):
super().__init__(model, driver, output_file_url, has_no_console_errors)
self.canvas = self._driver.find_element_by_tag_name('canvas')
wait_for_canvas_resize(self.canvas, self._driver)
@pytest.fixture()
def single_plot_page(driver, output_file_url, has_no_console_errors):
def func(model):
return _SinglePlotPage(model, driver, output_file_url, has_no_console_errors)
return func
class _BokehServerPage(_BokehPageMixin, _CanvasMixin):
def __init__(self, modify_doc, driver, bokeh_app_info, has_no_console_errors):
self._driver = driver
self._has_no_console_errors = has_no_console_errors
self._app_url, self.message_test_port = bokeh_app_info(modify_doc)
time.sleep(0.1)
self._driver.get(self._app_url)
self.init_results()
self.canvas = self._driver.find_element_by_tag_name('canvas')
wait_for_canvas_resize(self.canvas, self._driver)
@pytest.fixture()
def bokeh_server_page(driver, bokeh_app_info, has_no_console_errors):
def func(modify_doc):
return _BokehServerPage(modify_doc, driver, bokeh_app_info, has_no_console_errors)
return func
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 32.682759
| 96
| 0.5977
|
197b7b6d2cfe8653de3bd9be040b2452a756bc7b
| 401
|
py
|
Python
|
questionsApp/wsgi.py
|
javitocor/PsychologicalTest-React-Django-
|
459ef21b85e3e9432ac7c2dfd8d0d12115c925bd
|
[
"MIT"
] | 1
|
2021-09-30T12:12:10.000Z
|
2021-09-30T12:12:10.000Z
|
questionsApp/wsgi.py
|
javitocor/PsychologicalTest-React-Django-
|
459ef21b85e3e9432ac7c2dfd8d0d12115c925bd
|
[
"MIT"
] | null | null | null |
questionsApp/wsgi.py
|
javitocor/PsychologicalTest-React-Django-
|
459ef21b85e3e9432ac7c2dfd8d0d12115c925bd
|
[
"MIT"
] | 1
|
2021-12-23T00:41:41.000Z
|
2021-12-23T00:41:41.000Z
|
"""
WSGI config for questionsApp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'questionsApp.settings')
application = get_wsgi_application()
| 23.588235
| 78
| 0.790524
|
2883ede219aab0b942a5717218b15935f5f9e78a
| 66,811
|
py
|
Python
|
compiler/gdsMill/mpmath/tests/test_functions2.py
|
panicmarvin/OpenRAM
|
abf47bab50adb48337c59b72ccd6023c1999f3fc
|
[
"BSD-3-Clause"
] | null | null | null |
compiler/gdsMill/mpmath/tests/test_functions2.py
|
panicmarvin/OpenRAM
|
abf47bab50adb48337c59b72ccd6023c1999f3fc
|
[
"BSD-3-Clause"
] | null | null | null |
compiler/gdsMill/mpmath/tests/test_functions2.py
|
panicmarvin/OpenRAM
|
abf47bab50adb48337c59b72ccd6023c1999f3fc
|
[
"BSD-3-Clause"
] | 1
|
2020-01-23T07:12:52.000Z
|
2020-01-23T07:12:52.000Z
|
import math
from mpmath import *
def test_bessel():
mp.dps = 15
assert j0(1).ae(0.765197686557966551)
assert j0(pi).ae(-0.304242177644093864)
assert j0(1000).ae(0.0247866861524201746)
assert j0(-25).ae(0.0962667832759581162)
assert j1(1).ae(0.440050585744933516)
assert j1(pi).ae(0.284615343179752757)
assert j1(1000).ae(0.00472831190708952392)
assert j1(-25).ae(0.125350249580289905)
assert besselj(5,1).ae(0.000249757730211234431)
assert besselj(5,pi).ae(0.0521411843671184747)
assert besselj(5,1000).ae(0.00502540694523318607)
assert besselj(5,-25).ae(0.0660079953984229934)
assert besselj(-3,2).ae(-0.128943249474402051)
assert besselj(-4,2).ae(0.0339957198075684341)
assert besselj(3,3+2j).ae(0.424718794929639595942 + 0.625665327745785804812j)
assert besselj(0.25,4).ae(-0.374760630804249715)
assert besselj(1+2j,3+4j).ae(0.319247428741872131 - 0.669557748880365678j)
assert (besselj(3, 10**10) * 10**5).ae(0.76765081748139204023)
assert bessely(-0.5, 0) == 0
assert bessely(0.5, 0) == -inf
assert bessely(1.5, 0) == -inf
assert bessely(0,0) == -inf
assert bessely(-0.4, 0) == -inf
assert bessely(-0.6, 0) == inf
assert bessely(-1, 0) == inf
assert bessely(-1.4, 0) == inf
assert bessely(-1.6, 0) == -inf
assert bessely(-1, 0) == inf
assert bessely(-2, 0) == -inf
assert bessely(-3, 0) == inf
assert bessely(0.5, 0) == -inf
assert bessely(1, 0) == -inf
assert bessely(1.5, 0) == -inf
assert bessely(2, 0) == -inf
assert bessely(2.5, 0) == -inf
assert bessely(3, 0) == -inf
assert bessely(0,0.5).ae(-0.44451873350670655715)
assert bessely(1,0.5).ae(-1.4714723926702430692)
assert bessely(-1,0.5).ae(1.4714723926702430692)
assert bessely(3.5,0.5).ae(-138.86400867242488443)
assert bessely(0,3+4j).ae(4.6047596915010138655-8.8110771408232264208j)
assert bessely(0,j).ae(-0.26803248203398854876+1.26606587775200833560j)
assert (bessely(3, 10**10) * 10**5).ae(0.21755917537013204058)
assert besseli(0,0) == 1
assert besseli(1,0) == 0
assert besseli(2,0) == 0
assert besseli(-1,0) == 0
assert besseli(-2,0) == 0
assert besseli(0,0.5).ae(1.0634833707413235193)
assert besseli(1,0.5).ae(0.25789430539089631636)
assert besseli(-1,0.5).ae(0.25789430539089631636)
assert besseli(3.5,0.5).ae(0.00068103597085793815863)
assert besseli(0,3+4j).ae(-3.3924877882755196097-1.3239458916287264815j)
assert besseli(0,j).ae(besselj(0,1))
assert (besseli(3, 10**10) * mpf(10)**(-4342944813)).ae(4.2996028505491271875)
assert besselk(0,0) == inf
assert besselk(1,0) == inf
assert besselk(2,0) == inf
assert besselk(-1,0) == inf
assert besselk(-2,0) == inf
assert besselk(0,0.5).ae(0.92441907122766586178)
assert besselk(1,0.5).ae(1.6564411200033008937)
assert besselk(-1,0.5).ae(1.6564411200033008937)
assert besselk(3.5,0.5).ae(207.48418747548460607)
assert besselk(0,3+4j).ae(-0.007239051213570155013+0.026510418350267677215j)
assert besselk(0,j).ae(-0.13863371520405399968-1.20196971531720649914j)
assert (besselk(3, 10**10) * mpf(10)**4342944824).ae(1.1628981033356187851)
def test_hankel():
mp.dps = 15
assert hankel1(0,0.5).ae(0.93846980724081290423-0.44451873350670655715j)
assert hankel1(1,0.5).ae(0.2422684576748738864-1.4714723926702430692j)
assert hankel1(-1,0.5).ae(-0.2422684576748738864+1.4714723926702430692j)
assert hankel1(1.5,0.5).ae(0.0917016996256513026-2.5214655504213378514j)
assert hankel1(1.5,3+4j).ae(0.0066806866476728165382-0.0036684231610839127106j)
assert hankel2(0,0.5).ae(0.93846980724081290423+0.44451873350670655715j)
assert hankel2(1,0.5).ae(0.2422684576748738864+1.4714723926702430692j)
assert hankel2(-1,0.5).ae(-0.2422684576748738864-1.4714723926702430692j)
assert hankel2(1.5,0.5).ae(0.0917016996256513026+2.5214655504213378514j)
assert hankel2(1.5,3+4j).ae(14.783528526098567526-7.397390270853446512j)
def test_struve():
mp.dps = 15
assert struveh(2,3).ae(0.74238666967748318564)
assert struveh(-2.5,3).ae(0.41271003220971599344)
assert struvel(2,3).ae(1.7476573277362782744)
assert struvel(-2.5,3).ae(1.5153394466819651377)
def test_whittaker():
mp.dps = 15
assert whitm(2,3,4).ae(49.753745589025246591)
assert whitw(2,3,4).ae(14.111656223052932215)
def test_kelvin():
mp.dps = 15
assert ber(2,3).ae(0.80836846563726819091)
assert ber(3,4).ae(-0.28262680167242600233)
assert ber(-3,2).ae(-0.085611448496796363669)
assert bei(2,3).ae(-0.89102236377977331571)
assert bei(-3,2).ae(-0.14420994155731828415)
assert ker(2,3).ae(0.12839126695733458928)
assert ker(-3,2).ae(-0.29802153400559142783)
assert ker(0.5,3).ae(-0.085662378535217097524)
assert kei(2,3).ae(0.036804426134164634000)
assert kei(-3,2).ae(0.88682069845786731114)
assert kei(0.5,3).ae(0.013633041571314302948)
def test_hyper_misc():
mp.dps = 15
assert hyp0f1(1,0) == 1
assert hyp1f1(1,2,0) == 1
assert hyp1f2(1,2,3,0) == 1
assert hyp2f1(1,2,3,0) == 1
assert hyp2f2(1,2,3,4,0) == 1
assert hyp2f3(1,2,3,4,5,0) == 1
# Degenerate case: 0F0
assert hyper([],[],0) == 1
assert hyper([],[],-2).ae(exp(-2))
# Degenerate case: 1F0
assert hyper([2],[],1.5) == 4
#
assert hyp2f1((1,3),(2,3),(5,6),mpf(27)/32).ae(1.6)
assert hyp2f1((1,4),(1,2),(3,4),mpf(80)/81).ae(1.8)
assert hyp2f1((2,3),(1,1),(3,2),(2+j)/3).ae(1.327531603558679093+0.439585080092769253j)
mp.dps = 25
v = mpc('1.2282306665029814734863026', '-0.1225033830118305184672133')
assert hyper([(3,4),2+j,1],[1,5,j/3],mpf(1)/5+j/8).ae(v)
mp.dps = 15
def test_elliptic_integrals():
mp.dps = 15
assert ellipk(0).ae(pi/2)
assert ellipk(0.5).ae(gamma(0.25)**2/(4*sqrt(pi)))
assert ellipk(1) == inf
assert ellipk(1+0j) == inf
assert ellipk(-1).ae('1.3110287771460599052')
assert ellipk(-2).ae('1.1714200841467698589')
assert isinstance(ellipk(-2), mpf)
assert isinstance(ellipe(-2), mpf)
assert ellipk(-50).ae('0.47103424540873331679')
mp.dps = 30
n1 = +fraction(99999,100000)
n2 = +fraction(100001,100000)
mp.dps = 15
assert ellipk(n1).ae('7.1427724505817781901')
assert ellipk(n2).ae(mpc('7.1427417367963090109', '-1.5707923998261688019'))
assert ellipe(n1).ae('1.0000332138990829170')
v = ellipe(n2)
assert v.real.ae('0.999966786328145474069137')
assert (v.imag*10**6).ae('7.853952181727432')
assert ellipk(2).ae(mpc('1.3110287771460599052', '-1.3110287771460599052'))
assert ellipk(50).ae(mpc('0.22326753950210985451', '-0.47434723226254522087'))
assert ellipk(3+4j).ae(mpc('0.91119556380496500866', '0.63133428324134524388'))
assert ellipk(3-4j).ae(mpc('0.91119556380496500866', '-0.63133428324134524388'))
assert ellipk(-3+4j).ae(mpc('0.95357894880405122483', '0.23093044503746114444'))
assert ellipk(-3-4j).ae(mpc('0.95357894880405122483', '-0.23093044503746114444'))
assert isnan(ellipk(nan))
assert isnan(ellipe(nan))
assert ellipk(inf) == 0
assert isinstance(ellipk(inf), mpc)
assert ellipk(-inf) == 0
assert ellipk(1+0j) == inf
assert ellipe(0).ae(pi/2)
assert ellipe(0.5).ae(pi**(mpf(3)/2)/gamma(0.25)**2 +gamma(0.25)**2/(8*sqrt(pi)))
assert ellipe(1) == 1
assert ellipe(1+0j) == 1
assert ellipe(inf) == mpc(0,inf)
assert ellipe(-inf) == inf
assert ellipe(3+4j).ae(1.4995535209333469543-1.5778790079127582745j)
assert ellipe(3-4j).ae(1.4995535209333469543+1.5778790079127582745j)
assert ellipe(-3+4j).ae(2.5804237855343377803-0.8306096791000413778j)
assert ellipe(-3-4j).ae(2.5804237855343377803+0.8306096791000413778j)
assert ellipe(2).ae(0.59907011736779610372+0.59907011736779610372j)
assert ellipe('1e-1000000000').ae(pi/2)
assert ellipk('1e-1000000000').ae(pi/2)
assert ellipe(-pi).ae(2.4535865983838923)
mp.dps = 50
assert ellipk(1/pi).ae('1.724756270009501831744438120951614673874904182624739673')
assert ellipe(1/pi).ae('1.437129808135123030101542922290970050337425479058225712')
assert ellipk(-10*pi).ae('0.5519067523886233967683646782286965823151896970015484512')
assert ellipe(-10*pi).ae('5.926192483740483797854383268707108012328213431657645509')
v = ellipk(pi)
assert v.real.ae('0.973089521698042334840454592642137667227167622330325225')
assert v.imag.ae('-1.156151296372835303836814390793087600271609993858798016')
v = ellipe(pi)
assert v.real.ae('0.4632848917264710404078033487934663562998345622611263332')
assert v.imag.ae('1.0637961621753130852473300451583414489944099504180510966')
mp.dps = 15
def test_exp_integrals():
mp.dps = 15
x = +e
z = e + sqrt(3)*j
assert ei(x).ae(8.21168165538361560)
assert li(x).ae(1.89511781635593676)
assert si(x).ae(1.82104026914756705)
assert ci(x).ae(0.213958001340379779)
assert shi(x).ae(4.11520706247846193)
assert chi(x).ae(4.09647459290515367)
assert fresnels(x).ae(0.437189718149787643)
assert fresnelc(x).ae(0.401777759590243012)
assert airyai(x).ae(0.0108502401568586681)
assert airybi(x).ae(8.98245748585468627)
assert ei(z).ae(3.72597969491314951 + 7.34213212314224421j)
assert li(z).ae(2.28662658112562502 + 1.50427225297269364j)
assert si(z).ae(2.48122029237669054 + 0.12684703275254834j)
assert ci(z).ae(0.169255590269456633 - 0.892020751420780353j)
assert shi(z).ae(1.85810366559344468 + 3.66435842914920263j)
assert chi(z).ae(1.86787602931970484 + 3.67777369399304159j)
assert fresnels(z/3).ae(0.034534397197008182 + 0.754859844188218737j)
assert fresnelc(z/3).ae(1.261581645990027372 + 0.417949198775061893j)
assert airyai(z).ae(-0.0162552579839056062 - 0.0018045715700210556j)
assert airybi(z).ae(-4.98856113282883371 + 2.08558537872180623j)
assert li(0) == 0.0
assert li(1) == -inf
assert li(inf) == inf
assert isinstance(li(0.7), mpf)
assert si(inf).ae(pi/2)
assert si(-inf).ae(-pi/2)
assert ci(inf) == 0
assert ci(0) == -inf
assert isinstance(ei(-0.7), mpf)
assert airyai(inf) == 0
assert airybi(inf) == inf
assert airyai(-inf) == 0
assert airybi(-inf) == 0
assert fresnels(inf) == 0.5
assert fresnelc(inf) == 0.5
assert fresnels(-inf) == -0.5
assert fresnelc(-inf) == -0.5
assert shi(0) == 0
assert shi(inf) == inf
assert shi(-inf) == -inf
assert chi(0) == -inf
assert chi(inf) == inf
def test_ei():
mp.dps = 15
assert ei(0) == -inf
assert ei(inf) == inf
assert ei(-inf) == -0.0
assert ei(20+70j).ae(6.1041351911152984397e6 - 2.7324109310519928872e6j)
# tests for the asymptotic expansion
# values checked with Mathematica ExpIntegralEi
mp.dps = 50
r = ei(20000)
s = '3.8781962825045010930273870085501819470698476975019e+8681'
assert str(r) == s
r = ei(-200)
s = '-6.8852261063076355977108174824557929738368086933303e-90'
assert str(r) == s
r =ei(20000 + 10*j)
sre = '-3.255138234032069402493850638874410725961401274106e+8681'
sim = '-2.1081929993474403520785942429469187647767369645423e+8681'
assert str(r.real) == sre and str(r.imag) == sim
mp.dps = 15
# More asymptotic expansions
assert chi(-10**6+100j).ae('1.3077239389562548386e+434288 + 7.6808956999707408158e+434287j')
assert shi(-10**6+100j).ae('-1.3077239389562548386e+434288 - 7.6808956999707408158e+434287j')
mp.dps = 15
assert ei(10j).ae(-0.0454564330044553726+3.2291439210137706686j)
assert ei(100j).ae(-0.0051488251426104921+3.1330217936839529126j)
u = ei(fmul(10**20, j, exact=True))
assert u.real.ae(-6.4525128526578084421345e-21, abs_eps=0, rel_eps=8*eps)
assert u.imag.ae(pi)
assert ei(-10j).ae(-0.0454564330044553726-3.2291439210137706686j)
assert ei(-100j).ae(-0.0051488251426104921-3.1330217936839529126j)
u = ei(fmul(-10**20, j, exact=True))
assert u.real.ae(-6.4525128526578084421345e-21, abs_eps=0, rel_eps=8*eps)
assert u.imag.ae(-pi)
assert ei(10+10j).ae(-1576.1504265768517448+436.9192317011328140j)
u = ei(-10+10j)
assert u.real.ae(7.6698978415553488362543e-7, abs_eps=0, rel_eps=8*eps)
assert u.imag.ae(3.141595611735621062025)
def test_e1():
mp.dps = 15
assert e1(0) == inf
assert e1(inf) == 0
assert e1(-inf) == mpc(-inf, -pi)
assert e1(10j).ae(0.045456433004455372635 + 0.087551267423977430100j)
assert e1(100j).ae(0.0051488251426104921444 - 0.0085708599058403258790j)
assert e1(fmul(10**20, j, exact=True)).ae(6.4525128526578084421e-21 - 7.6397040444172830039e-21j, abs_eps=0, rel_eps=8*eps)
assert e1(-10j).ae(0.045456433004455372635 - 0.087551267423977430100j)
assert e1(-100j).ae(0.0051488251426104921444 + 0.0085708599058403258790j)
assert e1(fmul(-10**20, j, exact=True)).ae(6.4525128526578084421e-21 + 7.6397040444172830039e-21j, abs_eps=0, rel_eps=8*eps)
def test_expint():
mp.dps = 15
assert expint(0,0) == inf
assert expint(0,1).ae(1/e)
assert expint(0,1.5).ae(2/exp(1.5)/3)
assert expint(1,1).ae(-ei(-1))
assert expint(2,0).ae(1)
assert expint(3,0).ae(1/2.)
assert expint(4,0).ae(1/3.)
assert expint(-2, 0.5).ae(26/sqrt(e))
assert expint(-1,-1) == 0
assert expint(-2,-1).ae(-e)
assert expint(5.5, 0).ae(2/9.)
assert expint(2.00000001,0).ae(100000000./100000001)
assert expint(2+3j,4-j).ae(0.0023461179581675065414+0.0020395540604713669262j)
assert expint('1.01', '1e-1000').ae(99.9999999899412802)
assert expint('1.000000000001', 3.5).ae(0.00697013985754701819446)
assert expint(2,3).ae(3*ei(-3)+exp(-3))
assert (expint(10,20)*10**10).ae(0.694439055541231353)
assert expint(3,inf) == 0
assert expint(3.2,inf) == 0
assert expint(3.2+2j,inf) == 0
assert expint(1,3j).ae(-0.11962978600800032763 + 0.27785620120457163717j)
assert expint(1,3).ae(0.013048381094197037413)
assert expint(1,-3).ae(-ei(3)-pi*j)
#assert expint(3) == expint(1,3)
assert expint(1,-20).ae(-25615652.66405658882 - 3.1415926535897932385j)
assert expint(1000000,0).ae(1./999999)
assert expint(0,2+3j).ae(-0.025019798357114678171 + 0.027980439405104419040j)
assert expint(-1,2+3j).ae(-0.022411973626262070419 + 0.038058922011377716932j)
assert expint(-1.5,0) == inf
def test_trig_integrals():
mp.dps = 30
assert si(mpf(1)/1000000).ae('0.000000999999999999944444444444446111')
assert ci(mpf(1)/1000000).ae('-13.2382948930629912435014366276')
assert si(10**10).ae('1.5707963267075846569685111517747537')
assert ci(10**10).ae('-4.87506025174822653785729773959e-11')
assert si(10**100).ae(pi/2)
assert (ci(10**100)*10**100).ae('-0.372376123661276688262086695553')
assert si(-3) == -si(3)
assert ci(-3).ae(ci(3) + pi*j)
# Test complex structure
mp.dps = 15
assert mp.ci(50).ae(-0.0056283863241163054402)
assert mp.ci(50+2j).ae(-0.018378282946133067149+0.070352808023688336193j)
assert mp.ci(20j).ae(1.28078263320282943611e7+1.5707963267949j)
assert mp.ci(-2+20j).ae(-4.050116856873293505e6+1.207476188206989909e7j)
assert mp.ci(-50+2j).ae(-0.0183782829461330671+3.0712398455661049023j)
assert mp.ci(-50).ae(-0.0056283863241163054+3.1415926535897932385j)
assert mp.ci(-50-2j).ae(-0.0183782829461330671-3.0712398455661049023j)
assert mp.ci(-2-20j).ae(-4.050116856873293505e6-1.207476188206989909e7j)
assert mp.ci(-20j).ae(1.28078263320282943611e7-1.5707963267949j)
assert mp.ci(50-2j).ae(-0.018378282946133067149-0.070352808023688336193j)
assert mp.si(50).ae(1.5516170724859358947)
assert mp.si(50+2j).ae(1.497884414277228461-0.017515007378437448j)
assert mp.si(20j).ae(1.2807826332028294459e7j)
assert mp.si(-2+20j).ae(-1.20747603112735722103e7-4.050116856873293554e6j)
assert mp.si(-50+2j).ae(-1.497884414277228461-0.017515007378437448j)
assert mp.si(-50).ae(-1.5516170724859358947)
assert mp.si(-50-2j).ae(-1.497884414277228461+0.017515007378437448j)
assert mp.si(-2-20j).ae(-1.20747603112735722103e7+4.050116856873293554e6j)
assert mp.si(-20j).ae(-1.2807826332028294459e7j)
assert mp.si(50-2j).ae(1.497884414277228461+0.017515007378437448j)
assert mp.chi(50j).ae(-0.0056283863241163054+1.5707963267948966192j)
assert mp.chi(-2+50j).ae(-0.0183782829461330671+1.6411491348185849554j)
assert mp.chi(-20).ae(1.28078263320282943611e7+3.1415926535898j)
assert mp.chi(-20-2j).ae(-4.050116856873293505e6+1.20747571696809187053e7j)
assert mp.chi(-2-50j).ae(-0.0183782829461330671-1.6411491348185849554j)
assert mp.chi(-50j).ae(-0.0056283863241163054-1.5707963267948966192j)
assert mp.chi(2-50j).ae(-0.0183782829461330671-1.500443518771208283j)
assert mp.chi(20-2j).ae(-4.050116856873293505e6-1.20747603112735722951e7j)
assert mp.chi(20).ae(1.2807826332028294361e7)
assert mp.chi(2+50j).ae(-0.0183782829461330671+1.500443518771208283j)
assert mp.shi(50j).ae(1.5516170724859358947j)
assert mp.shi(-2+50j).ae(0.017515007378437448+1.497884414277228461j)
assert mp.shi(-20).ae(-1.2807826332028294459e7)
assert mp.shi(-20-2j).ae(4.050116856873293554e6-1.20747603112735722103e7j)
assert mp.shi(-2-50j).ae(0.017515007378437448-1.497884414277228461j)
assert mp.shi(-50j).ae(-1.5516170724859358947j)
assert mp.shi(2-50j).ae(-0.017515007378437448-1.497884414277228461j)
assert mp.shi(20-2j).ae(-4.050116856873293554e6-1.20747603112735722103e7j)
assert mp.shi(20).ae(1.2807826332028294459e7)
assert mp.shi(2+50j).ae(-0.017515007378437448+1.497884414277228461j)
def ae(x,y,tol=1e-12):
return abs(x-y) <= abs(y)*tol
assert fp.ci(fp.inf) == 0
assert ae(fp.ci(fp.ninf), fp.pi*1j)
assert ae(fp.si(fp.inf), fp.pi/2)
assert ae(fp.si(fp.ninf), -fp.pi/2)
assert fp.si(0) == 0
assert ae(fp.ci(50), -0.0056283863241163054402)
assert ae(fp.ci(50+2j), -0.018378282946133067149+0.070352808023688336193j)
assert ae(fp.ci(20j), 1.28078263320282943611e7+1.5707963267949j)
assert ae(fp.ci(-2+20j), -4.050116856873293505e6+1.207476188206989909e7j)
assert ae(fp.ci(-50+2j), -0.0183782829461330671+3.0712398455661049023j)
assert ae(fp.ci(-50), -0.0056283863241163054+3.1415926535897932385j)
assert ae(fp.ci(-50-2j), -0.0183782829461330671-3.0712398455661049023j)
assert ae(fp.ci(-2-20j), -4.050116856873293505e6-1.207476188206989909e7j)
assert ae(fp.ci(-20j), 1.28078263320282943611e7-1.5707963267949j)
assert ae(fp.ci(50-2j), -0.018378282946133067149-0.070352808023688336193j)
assert ae(fp.si(50), 1.5516170724859358947)
assert ae(fp.si(50+2j), 1.497884414277228461-0.017515007378437448j)
assert ae(fp.si(20j), 1.2807826332028294459e7j)
assert ae(fp.si(-2+20j), -1.20747603112735722103e7-4.050116856873293554e6j)
assert ae(fp.si(-50+2j), -1.497884414277228461-0.017515007378437448j)
assert ae(fp.si(-50), -1.5516170724859358947)
assert ae(fp.si(-50-2j), -1.497884414277228461+0.017515007378437448j)
assert ae(fp.si(-2-20j), -1.20747603112735722103e7+4.050116856873293554e6j)
assert ae(fp.si(-20j), -1.2807826332028294459e7j)
assert ae(fp.si(50-2j), 1.497884414277228461+0.017515007378437448j)
assert ae(fp.chi(50j), -0.0056283863241163054+1.5707963267948966192j)
assert ae(fp.chi(-2+50j), -0.0183782829461330671+1.6411491348185849554j)
assert ae(fp.chi(-20), 1.28078263320282943611e7+3.1415926535898j)
assert ae(fp.chi(-20-2j), -4.050116856873293505e6+1.20747571696809187053e7j)
assert ae(fp.chi(-2-50j), -0.0183782829461330671-1.6411491348185849554j)
assert ae(fp.chi(-50j), -0.0056283863241163054-1.5707963267948966192j)
assert ae(fp.chi(2-50j), -0.0183782829461330671-1.500443518771208283j)
assert ae(fp.chi(20-2j), -4.050116856873293505e6-1.20747603112735722951e7j)
assert ae(fp.chi(20), 1.2807826332028294361e7)
assert ae(fp.chi(2+50j), -0.0183782829461330671+1.500443518771208283j)
assert ae(fp.shi(50j), 1.5516170724859358947j)
assert ae(fp.shi(-2+50j), 0.017515007378437448+1.497884414277228461j)
assert ae(fp.shi(-20), -1.2807826332028294459e7)
assert ae(fp.shi(-20-2j), 4.050116856873293554e6-1.20747603112735722103e7j)
assert ae(fp.shi(-2-50j), 0.017515007378437448-1.497884414277228461j)
assert ae(fp.shi(-50j), -1.5516170724859358947j)
assert ae(fp.shi(2-50j), -0.017515007378437448-1.497884414277228461j)
assert ae(fp.shi(20-2j), -4.050116856873293554e6-1.20747603112735722103e7j)
assert ae(fp.shi(20), 1.2807826332028294459e7)
assert ae(fp.shi(2+50j), -0.017515007378437448+1.497884414277228461j)
def test_airy():
mp.dps = 15
assert (airyai(10)*10**10).ae(1.1047532552898687)
assert (airybi(10)/10**9).ae(0.45564115354822515)
assert (airyai(1000)*10**9158).ae(9.306933063179556004)
assert (airybi(1000)/10**9154).ae(5.4077118391949465477)
assert airyai(-1000).ae(0.055971895773019918842)
assert airybi(-1000).ae(-0.083264574117080633012)
assert (airyai(100+100j)*10**188).ae(2.9099582462207032076 + 2.353013591706178756j)
assert (airybi(100+100j)/10**185).ae(1.7086751714463652039 - 3.1416590020830804578j)
def test_hyper_0f1():
mp.dps = 15
v = 8.63911136507950465
assert hyper([],[(1,3)],1.5).ae(v)
assert hyper([],[1/3.],1.5).ae(v)
assert hyp0f1(1/3.,1.5).ae(v)
assert hyp0f1((1,3),1.5).ae(v)
# Asymptotic expansion
assert hyp0f1(3,1e9).ae('4.9679055380347771271e+27455')
assert hyp0f1(3,1e9j).ae('-2.1222788784457702157e+19410 + 5.0840597555401854116e+19410j')
def test_hyper_1f1():
mp.dps = 15
v = 1.2917526488617656673
assert hyper([(1,2)],[(3,2)],0.7).ae(v)
assert hyper([(1,2)],[(3,2)],0.7+0j).ae(v)
assert hyper([0.5],[(3,2)],0.7).ae(v)
assert hyper([0.5],[1.5],0.7).ae(v)
assert hyper([0.5],[(3,2)],0.7+0j).ae(v)
assert hyper([0.5],[1.5],0.7+0j).ae(v)
assert hyper([(1,2)],[1.5+0j],0.7).ae(v)
assert hyper([0.5+0j],[1.5],0.7).ae(v)
assert hyper([0.5+0j],[1.5+0j],0.7+0j).ae(v)
assert hyp1f1(0.5,1.5,0.7).ae(v)
assert hyp1f1((1,2),1.5,0.7).ae(v)
# Asymptotic expansion
assert hyp1f1(2,3,1e10).ae('2.1555012157015796988e+4342944809')
assert (hyp1f1(2,3,1e10j)*10**10).ae(-0.97501205020039745852 - 1.7462392454512132074j)
# Shouldn't use asymptotic expansion
assert hyp1f1(-2, 1, 10000).ae(49980001)
def test_hyper_2f1():
mp.dps = 15
v = 1.0652207633823291032
assert hyper([(1,2), (3,4)], [2], 0.3).ae(v)
assert hyper([(1,2), 0.75], [2], 0.3).ae(v)
assert hyper([0.5, 0.75], [2.0], 0.3).ae(v)
assert hyper([0.5, 0.75], [2.0], 0.3+0j).ae(v)
assert hyper([0.5+0j, (3,4)], [2.0], 0.3+0j).ae(v)
assert hyper([0.5+0j, (3,4)], [2.0], 0.3).ae(v)
assert hyper([0.5, (3,4)], [2.0+0j], 0.3).ae(v)
assert hyper([0.5+0j, 0.75+0j], [2.0+0j], 0.3+0j).ae(v)
v = 1.09234681096223231717 + 0.18104859169479360380j
assert hyper([(1,2),0.75+j], [2], 0.5).ae(v)
assert hyper([0.5,0.75+j], [2.0], 0.5).ae(v)
assert hyper([0.5,0.75+j], [2.0], 0.5+0j).ae(v)
assert hyper([0.5,0.75+j], [2.0+0j], 0.5+0j).ae(v)
v = 0.9625 - 0.125j
assert hyper([(3,2),-1],[4], 0.1+j/3).ae(v)
assert hyper([1.5,-1.0],[4], 0.1+j/3).ae(v)
assert hyper([1.5,-1.0],[4+0j], 0.1+j/3).ae(v)
assert hyper([1.5+0j,-1.0+0j],[4+0j], 0.1+j/3).ae(v)
v = 1.02111069501693445001 - 0.50402252613466859521j
assert hyper([(2,10),(3,10)],[(4,10)],1.5).ae(v)
assert hyper([0.2,(3,10)],[0.4+0j],1.5).ae(v)
assert hyper([0.2,(3,10)],[0.4+0j],1.5+0j).ae(v)
v = 0.76922501362865848528 + 0.32640579593235886194j
assert hyper([(2,10),(3,10)],[(4,10)],4+2j).ae(v)
assert hyper([0.2,(3,10)],[0.4+0j],4+2j).ae(v)
assert hyper([0.2,(3,10)],[(4,10)],4+2j).ae(v)
def test_hyper_2f1_hard():
mp.dps = 15
# Singular cases
assert hyp2f1(2,-1,-1,3).ae(0.25)
assert hyp2f1(2,-2,-2,3).ae(0.25)
assert hyp2f1(2,-1,-1,3,eliminate=False) == 7
assert hyp2f1(2,-2,-2,3,eliminate=False) == 34
assert hyp2f1(2,-2,-3,3) == 14
assert hyp2f1(2,-3,-2,3) == inf
assert hyp2f1(2,-1.5,-1.5,3) == 0.25
assert hyp2f1(1,2,3,0) == 1
assert hyp2f1(0,1,0,0) == 1
assert hyp2f1(0,0,0,0) == 1
assert isnan(hyp2f1(1,1,0,0))
assert hyp2f1(2,-1,-5, 0.25+0.25j).ae(1.1+0.1j)
assert hyp2f1(2,-5,-5, 0.25+0.25j, eliminate=False).ae(163./128 + 125./128*j)
assert hyp2f1(0.7235, -1, -5, 0.3).ae(1.04341)
assert hyp2f1(0.7235, -5, -5, 0.3, eliminate=False).ae(1.2939225017815903812)
assert hyp2f1(-1,-2,4,1) == 1.5
assert hyp2f1(1,2,-3,1) == inf
assert hyp2f1(-2,-2,1,1) == 6
assert hyp2f1(1,-2,-4,1).ae(5./3)
assert hyp2f1(0,-6,-4,1) == 1
assert hyp2f1(0,-3,-4,1) == 1
assert hyp2f1(0,0,0,1) == 1
assert hyp2f1(1,0,0,1,eliminate=False) == 1
assert hyp2f1(1,1,0,1) == inf
assert hyp2f1(1,-6,-4,1) == inf
assert hyp2f1(-7.2,-0.5,-4.5,1) == 0
assert hyp2f1(-7.2,-1,-2,1).ae(-2.6)
assert hyp2f1(1,-0.5,-4.5, 1) == inf
assert hyp2f1(1,0.5,-4.5, 1) == -inf
# Check evaluation on / close to unit circle
z = exp(j*pi/3)
w = (nthroot(2,3)+1)*exp(j*pi/12)/nthroot(3,4)**3
assert hyp2f1('1/2','1/6','1/3', z).ae(w)
assert hyp2f1('1/2','1/6','1/3', z.conjugate()).ae(w.conjugate())
assert hyp2f1(0.25, (1,3), 2, '0.999').ae(1.06826449496030635)
assert hyp2f1(0.25, (1,3), 2, '1.001').ae(1.06867299254830309446-0.00001446586793975874j)
assert hyp2f1(0.25, (1,3), 2, -1).ae(0.96656584492524351673)
assert hyp2f1(0.25, (1,3), 2, j).ae(0.99041766248982072266+0.03777135604180735522j)
assert hyp2f1(2,3,5,'0.99').ae(27.699347904322690602)
assert hyp2f1((3,2),-0.5,3,'0.99').ae(0.68403036843911661388)
assert hyp2f1(2,3,5,1j).ae(0.37290667145974386127+0.59210004902748285917j)
assert fsum([hyp2f1((7,10),(2,3),(-1,2), 0.95*exp(j*k)) for k in range(1,15)]).ae(52.851400204289452922+6.244285013912953225j)
assert fsum([hyp2f1((7,10),(2,3),(-1,2), 1.05*exp(j*k)) for k in range(1,15)]).ae(54.506013786220655330-3.000118813413217097j)
assert fsum([hyp2f1((7,10),(2,3),(-1,2), exp(j*k)) for k in range(1,15)]).ae(55.792077935955314887+1.731986485778500241j)
assert hyp2f1(2,2.5,-3.25,0.999).ae(218373932801217082543180041.33)
# Branches
assert hyp2f1(1,1,2,1.01).ae(4.5595744415723676911-3.1104877758314784539j)
assert hyp2f1(1,1,2,1.01+0.1j).ae(2.4149427480552782484+1.4148224796836938829j)
assert hyp2f1(1,1,2,3+4j).ae(0.14576709331407297807+0.48379185417980360773j)
assert hyp2f1(1,1,2,4).ae(-0.27465307216702742285 - 0.78539816339744830962j)
assert hyp2f1(1,1,2,-4).ae(0.40235947810852509365)
# Other:
# Cancellation with a large parameter involved (bug reported on sage-devel)
assert hyp2f1(112, (51,10), (-9,10), -0.99999).ae(-1.6241361047970862961e-24, abs_eps=0, rel_eps=eps*16)
def test_hyper_3f2_etc():
assert hyper([1,2,3],[1.5,8],-1).ae(0.67108992351533333030)
assert hyper([1,2,3,4],[5,6,7], -1).ae(0.90232988035425506008)
assert hyper([1,2,3],[1.25,5], 1).ae(28.924181329701905701)
assert hyper([1,2,3,4],[5,6,7],5).ae(1.5192307344006649499-1.1529845225075537461j)
assert hyper([1,2,3,4,5],[6,7,8,9],-1).ae(0.96288759462882357253)
assert hyper([1,2,3,4,5],[6,7,8,9],1).ae(1.0428697385885855841)
assert hyper([1,2,3,4,5],[6,7,8,9],5).ae(1.33980653631074769423-0.07143405251029226699j)
assert hyper([1,2.79,3.08,4.37],[5.2,6.1,7.3],5).ae(1.0996321464692607231-1.7748052293979985001j)
assert hyper([1,1,1],[1,2],1) == inf
assert hyper([1,1,1],[2,(101,100)],1).ae(100.01621213528313220)
# slow -- covered by doctests
#assert hyper([1,1,1],[2,3],0.9999).ae(1.2897972005319693905)
def test_hyper_u():
mp.dps = 15
assert hyperu(2,-3,0).ae(0.05)
assert hyperu(2,-3.5,0).ae(4./99)
assert hyperu(2,0,0) == 0.5
assert hyperu(-5,1,0) == -120
assert hyperu(-5,2,0) == inf
assert hyperu(-5,-2,0) == 0
assert hyperu(7,7,3).ae(0.00014681269365593503986) #exp(3)*gammainc(-6,3)
assert hyperu(2,-3,4).ae(0.011836478100271995559)
assert hyperu(3,4,5).ae(1./125)
assert hyperu(2,3,0.0625) == 256
assert hyperu(-1,2,0.25+0.5j) == -1.75+0.5j
assert hyperu(0.5,1.5,7.25).ae(2/sqrt(29))
assert hyperu(2,6,pi).ae(0.55804439825913399130)
assert (hyperu((3,2),8,100+201j)*10**4).ae(-0.3797318333856738798 - 2.9974928453561707782j)
assert (hyperu((5,2),(-1,2),-5000)*10**10).ae(-5.6681877926881664678j)
# XXX: fails because of undetected cancellation in low level series code
# Alternatively: could use asymptotic series here, if convergence test
# tweaked back to recognize this one
#assert (hyperu((5,2),(-1,2),-500)*10**7).ae(-1.82526906001593252847j)
def test_hyper_2f0():
mp.dps = 15
assert hyper([1,2],[],3) == hyp2f0(1,2,3)
assert hyp2f0(2,3,7).ae(0.0116108068639728714668 - 0.0073727413865865802130j)
assert hyp2f0(2,3,0) == 1
assert hyp2f0(0,0,0) == 1
assert hyp2f0(-1,-1,1).ae(2)
assert hyp2f0(-4,1,1.5).ae(62.5)
assert hyp2f0(-4,1,50).ae(147029801)
assert hyp2f0(-4,1,0.0001).ae(0.99960011997600240000)
assert hyp2f0(0.5,0.25,0.001).ae(1.0001251174078538115)
assert hyp2f0(0.5,0.25,3+4j).ae(0.85548875824755163518 + 0.21636041283392292973j)
# Important: cancellation check
assert hyp2f0((1,6),(5,6),-0.02371708245126284498).ae(0.996785723120804309)
# Should be exact; polynomial case
assert hyp2f0(-2,1,0.5+0.5j) == 0
assert hyp2f0(1,-2,0.5+0.5j) == 0
# There used to be a bug in thresholds that made one of the following hang
for d in [15, 50, 80]:
mp.dps = d
assert hyp2f0(1.5, 0.5, 0.009).ae('1.006867007239309717945323585695344927904000945829843527398772456281301440034218290443367270629519483 + 1.238277162240704919639384945859073461954721356062919829456053965502443570466701567100438048602352623e-46j')
def test_hyper_1f2():
mp.dps = 15
assert hyper([1],[2,3],4) == hyp1f2(1,2,3,4)
a1,b1,b2 = (1,10),(2,3),1./16
assert hyp1f2(a1,b1,b2,10).ae(298.7482725554557568)
assert hyp1f2(a1,b1,b2,100).ae(224128961.48602947604)
assert hyp1f2(a1,b1,b2,1000).ae(1.1669528298622675109e+27)
assert hyp1f2(a1,b1,b2,10000).ae(2.4780514622487212192e+86)
assert hyp1f2(a1,b1,b2,100000).ae(1.3885391458871523997e+274)
assert hyp1f2(a1,b1,b2,1000000).ae('9.8851796978960318255e+867')
assert hyp1f2(a1,b1,b2,10**7).ae('1.1505659189516303646e+2746')
assert hyp1f2(a1,b1,b2,10**8).ae('1.4672005404314334081e+8685')
assert hyp1f2(a1,b1,b2,10**20).ae('3.6888217332150976493e+8685889636')
assert hyp1f2(a1,b1,b2,10*j).ae(-16.163252524618572878 - 44.321567896480184312j)
assert hyp1f2(a1,b1,b2,100*j).ae(61938.155294517848171 + 637349.45215942348739j)
assert hyp1f2(a1,b1,b2,1000*j).ae(8455057657257695958.7 + 6261969266997571510.6j)
assert hyp1f2(a1,b1,b2,10000*j).ae(-8.9771211184008593089e+60 + 4.6550528111731631456e+59j)
assert hyp1f2(a1,b1,b2,100000*j).ae(2.6398091437239324225e+193 + 4.1658080666870618332e+193j)
assert hyp1f2(a1,b1,b2,1000000*j).ae('3.5999042951925965458e+613 + 1.5026014707128947992e+613j')
assert hyp1f2(a1,b1,b2,10**7*j).ae('-8.3208715051623234801e+1939 - 3.6752883490851869429e+1941j')
assert hyp1f2(a1,b1,b2,10**8*j).ae('2.0724195707891484454e+6140 - 1.3276619482724266387e+6141j')
assert hyp1f2(a1,b1,b2,10**20*j).ae('-1.1734497974795488504e+6141851462 + 1.1498106965385471542e+6141851462j')
def test_hyper_2f3():
mp.dps = 15
assert hyper([1,2],[3,4,5],6) == hyp2f3(1,2,3,4,5,6)
a1,a2,b1,b2,b3 = (1,10),(2,3),(3,10), 2, 1./16
# Check asymptotic expansion
assert hyp2f3(a1,a2,b1,b2,b3,10).ae(128.98207160698659976)
assert hyp2f3(a1,a2,b1,b2,b3,1000).ae(6.6309632883131273141e25)
assert hyp2f3(a1,a2,b1,b2,b3,10000).ae(4.6863639362713340539e84)
assert hyp2f3(a1,a2,b1,b2,b3,100000).ae(8.6632451236103084119e271)
assert hyp2f3(a1,a2,b1,b2,b3,10**6).ae('2.0291718386574980641e865')
assert hyp2f3(a1,a2,b1,b2,b3,10**7).ae('7.7639836665710030977e2742')
assert hyp2f3(a1,a2,b1,b2,b3,10**8).ae('3.2537462584071268759e8681')
assert hyp2f3(a1,a2,b1,b2,b3,10**20).ae('1.2966030542911614163e+8685889627')
assert hyp2f3(a1,a2,b1,b2,b3,10*j).ae(-18.551602185587547854 - 13.348031097874113552j)
assert hyp2f3(a1,a2,b1,b2,b3,100*j).ae(78634.359124504488695 + 74459.535945281973996j)
assert hyp2f3(a1,a2,b1,b2,b3,1000*j).ae(597682550276527901.59 - 65136194809352613.078j)
assert hyp2f3(a1,a2,b1,b2,b3,10000*j).ae(-1.1779696326238582496e+59 + 1.2297607505213133872e+59j)
assert hyp2f3(a1,a2,b1,b2,b3,100000*j).ae(2.9844228969804380301e+191 + 7.5587163231490273296e+190j)
assert hyp2f3(a1,a2,b1,b2,b3,1000000*j).ae('7.4859161049322370311e+610 - 2.8467477015940090189e+610j')
assert hyp2f3(a1,a2,b1,b2,b3,10**7*j).ae('-1.7477645579418800826e+1938 - 1.7606522995808116405e+1938j')
assert hyp2f3(a1,a2,b1,b2,b3,10**8*j).ae('-1.6932731942958401784e+6137 - 2.4521909113114629368e+6137j')
assert hyp2f3(a1,a2,b1,b2,b3,10**20*j).ae('-2.0988815677627225449e+6141851451 + 5.7708223542739208681e+6141851452j')
def test_hyper_2f2():
mp.dps = 15
assert hyper([1,2],[3,4],5) == hyp2f2(1,2,3,4,5)
a1,a2,b1,b2 = (3,10),4,(1,2),1./16
assert hyp2f2(a1,a2,b1,b2,10).ae(448225936.3377556696)
assert hyp2f2(a1,a2,b1,b2,10000).ae('1.2012553712966636711e+4358')
assert hyp2f2(a1,a2,b1,b2,-20000).ae(-0.04182343755661214626)
assert hyp2f2(a1,a2,b1,b2,10**20).ae('1.1148680024303263661e+43429448190325182840')
def test_orthpoly():
mp.dps = 15
assert jacobi(-4,2,3,0.7).ae(22800./4913)
assert jacobi(3,2,4,5.5) == 4133.125
assert jacobi(1.5,5/6.,4,0).ae(-1.0851951434075508417)
assert jacobi(-2, 1, 2, 4).ae(-0.16)
assert jacobi(2, -1, 2.5, 4).ae(34.59375)
#assert jacobi(2, -1, 2, 4) == 28.5
assert legendre(5, 7) == 129367
assert legendre(0.5,0).ae(0.53935260118837935667)
assert legendre(-1,-1) == 1
assert legendre(0,-1) == 1
assert legendre(0, 1) == 1
assert legendre(1, -1) == -1
assert legendre(7, 1) == 1
assert legendre(7, -1) == -1
assert legendre(8,1.5).ae(15457523./32768)
assert legendre(j,-j).ae(2.4448182735671431011 + 0.6928881737669934843j)
assert chebyu(5,1) == 6
assert chebyt(3,2) == 26
assert legendre(3.5,-1) == inf
assert legendre(4.5,-1) == -inf
assert legendre(3.5+1j,-1) == mpc(inf,inf)
assert legendre(4.5+1j,-1) == mpc(-inf,-inf)
assert laguerre(4, -2, 3).ae(-1.125)
assert laguerre(3, 1+j, 0.5).ae(0.2291666666666666667 + 2.5416666666666666667j)
def test_hermite():
mp.dps = 15
assert hermite(-2, 0).ae(0.5)
assert hermite(-1, 0).ae(0.88622692545275801365)
assert hermite(0, 0).ae(1)
assert hermite(1, 0) == 0
assert hermite(2, 0).ae(-2)
assert hermite(0, 2).ae(1)
assert hermite(1, 2).ae(4)
assert hermite(1, -2).ae(-4)
assert hermite(2, -2).ae(14)
assert hermite(0.5, 0).ae(0.69136733903629335053)
assert hermite(9, 0) == 0
assert hermite(4,4).ae(3340)
assert hermite(3,4).ae(464)
assert hermite(-4,4).ae(0.00018623860287512396181)
assert hermite(-3,4).ae(0.0016540169879668766270)
assert hermite(9, 2.5j).ae(13638725j)
assert hermite(9, -2.5j).ae(-13638725j)
assert hermite(9, 100).ae(511078883759363024000)
assert hermite(9, -100).ae(-511078883759363024000)
assert hermite(9, 100j).ae(512922083920643024000j)
assert hermite(9, -100j).ae(-512922083920643024000j)
assert hermite(-9.5, 2.5j).ae(-2.9004951258126778174e-6 + 1.7601372934039951100e-6j)
assert hermite(-9.5, -2.5j).ae(-2.9004951258126778174e-6 - 1.7601372934039951100e-6j)
assert hermite(-9.5, 100).ae(1.3776300722767084162e-22, abs_eps=0, rel_eps=eps)
assert hermite(-9.5, -100).ae('1.3106082028470671626e4355')
assert hermite(-9.5, 100j).ae(-9.7900218581864768430e-23 - 9.7900218581864768430e-23j, abs_eps=0, rel_eps=eps)
assert hermite(-9.5, -100j).ae(-9.7900218581864768430e-23 + 9.7900218581864768430e-23j, abs_eps=0, rel_eps=eps)
assert hermite(2+3j, -1-j).ae(851.3677063883687676 - 1496.4373467871007997j)
def test_gegenbauer():
mp.dps = 15
assert gegenbauer(1,2,3).ae(12)
assert gegenbauer(2,3,4).ae(381)
assert gegenbauer(0,0,0) == 0
assert gegenbauer(2,-1,3) == 0
assert gegenbauer(-7, 0.5, 3).ae(8989)
assert gegenbauer(1, -0.5, 3).ae(-3)
assert gegenbauer(1, -1.5, 3).ae(-9)
assert gegenbauer(1, -0.5, 3).ae(-3)
assert gegenbauer(-0.5, -0.5, 3).ae(-2.6383553159023906245)
assert gegenbauer(2+3j, 1-j, 3+4j).ae(14.880536623203696780 + 20.022029711598032898j)
#assert gegenbauer(-2, -0.5, 3).ae(-12)
def test_legenp():
mp.dps = 15
assert legenp(2,0,4) == legendre(2,4)
assert legenp(-2, -1, 0.5).ae(0.43301270189221932338)
assert legenp(-2, -1, 0.5, type=3).ae(0.43301270189221932338j)
assert legenp(-2, 1, 0.5).ae(-0.86602540378443864676)
assert legenp(2+j, 3+4j, -j).ae(134742.98773236786148 + 429782.72924463851745j)
assert legenp(2+j, 3+4j, -j, type=3).ae(802.59463394152268507 - 251.62481308942906447j)
assert legenp(2,4,3).ae(0)
assert legenp(2,4,3,type=3).ae(0)
assert legenp(2,1,0.5).ae(-1.2990381056766579701)
assert legenp(2,1,0.5,type=3).ae(1.2990381056766579701j)
assert legenp(3,2,3).ae(-360)
assert legenp(3,3,3).ae(240j*2**0.5)
assert legenp(3,4,3).ae(0)
assert legenp(0,0.5,2).ae(0.52503756790433198939 - 0.52503756790433198939j)
assert legenp(-1,-0.5,2).ae(0.60626116232846498110 + 0.60626116232846498110j)
assert legenp(-2,0.5,2).ae(1.5751127037129959682 - 1.5751127037129959682j)
assert legenp(-2,0.5,-0.5).ae(-0.85738275810499171286)
def test_legenq():
mp.dps = 15
f = legenq
# Evaluation at poles
assert isnan(f(3,2,1))
assert isnan(f(3,2,-1))
assert isnan(f(3,2,1,type=3))
assert isnan(f(3,2,-1,type=3))
# Evaluation at 0
assert f(0,1,0,type=2).ae(-1)
assert f(-2,2,0,type=2,zeroprec=200).ae(0)
assert f(1.5,3,0,type=2).ae(-2.2239343475841951023)
assert f(0,1,0,type=3).ae(j)
assert f(-2,2,0,type=3,zeroprec=200).ae(0)
assert f(1.5,3,0,type=3).ae(2.2239343475841951022*(1-1j))
# Standard case, degree 0
assert f(0,0,-1.5).ae(-0.8047189562170501873 + 1.5707963267948966192j)
assert f(0,0,-0.5).ae(-0.54930614433405484570)
assert f(0,0,0,zeroprec=200).ae(0)
assert f(0,0,0.5).ae(0.54930614433405484570)
assert f(0,0,1.5).ae(0.8047189562170501873 - 1.5707963267948966192j)
assert f(0,0,-1.5,type=3).ae(-0.80471895621705018730)
assert f(0,0,-0.5,type=3).ae(-0.5493061443340548457 - 1.5707963267948966192j)
assert f(0,0,0,type=3).ae(-1.5707963267948966192j)
assert f(0,0,0.5,type=3).ae(0.5493061443340548457 - 1.5707963267948966192j)
assert f(0,0,1.5,type=3).ae(0.80471895621705018730)
# Standard case, degree 1
assert f(1,0,-1.5).ae(0.2070784343255752810 - 2.3561944901923449288j)
assert f(1,0,-0.5).ae(-0.72534692783297257715)
assert f(1,0,0).ae(-1)
assert f(1,0,0.5).ae(-0.72534692783297257715)
assert f(1,0,1.5).ae(0.2070784343255752810 - 2.3561944901923449288j)
# Standard case, degree 2
assert f(2,0,-1.5).ae(-0.0635669991240192885 + 4.5160394395353277803j)
assert f(2,0,-0.5).ae(0.81866326804175685571)
assert f(2,0,0,zeroprec=200).ae(0)
assert f(2,0,0.5).ae(-0.81866326804175685571)
assert f(2,0,1.5).ae(0.0635669991240192885 - 4.5160394395353277803j)
# Misc orders and degrees
assert f(2,3,1.5,type=2).ae(-5.7243340223994616228j)
assert f(2,3,1.5,type=3).ae(-5.7243340223994616228)
assert f(2,3,0.5,type=2).ae(-12.316805742712016310)
assert f(2,3,0.5,type=3).ae(-12.316805742712016310j)
assert f(2,3,-1.5,type=2).ae(-5.7243340223994616228j)
assert f(2,3,-1.5,type=3).ae(5.7243340223994616228)
assert f(2,3,-0.5,type=2).ae(-12.316805742712016310)
assert f(2,3,-0.5,type=3).ae(-12.316805742712016310j)
assert f(2+3j, 3+4j, 0.5, type=3).ae(0.0016119404873235186807 - 0.0005885900510718119836j)
assert f(2+3j, 3+4j, -1.5, type=3).ae(0.008451400254138808670 + 0.020645193304593235298j)
assert f(-2.5,1,-1.5).ae(3.9553395527435335749j)
assert f(-2.5,1,-0.5).ae(1.9290561746445456908)
assert f(-2.5,1,0).ae(1.2708196271909686299)
assert f(-2.5,1,0.5).ae(-0.31584812990742202869)
assert f(-2.5,1,1.5).ae(-3.9553395527435335742 + 0.2993235655044701706j)
assert f(-2.5,1,-1.5,type=3).ae(0.29932356550447017254j)
assert f(-2.5,1,-0.5,type=3).ae(-0.3158481299074220287 - 1.9290561746445456908j)
assert f(-2.5,1,0,type=3).ae(1.2708196271909686292 - 1.2708196271909686299j)
assert f(-2.5,1,0.5,type=3).ae(1.9290561746445456907 + 0.3158481299074220287j)
assert f(-2.5,1,1.5,type=3).ae(-0.29932356550447017254)
def test_agm():
mp.dps = 15
assert agm(0,0) == 0
assert agm(0,1) == 0
assert agm(1,1) == 1
assert agm(7,7) == 7
assert agm(j,j) == j
assert (1/agm(1,sqrt(2))).ae(0.834626841674073186)
assert agm(1,2).ae(1.4567910310469068692)
assert agm(1,3).ae(1.8636167832448965424)
assert agm(1,j).ae(0.599070117367796104+0.599070117367796104j)
assert agm(2) == agm(1,2)
assert agm(-3,4).ae(0.63468509766550907+1.3443087080896272j)
def test_gammainc():
mp.dps = 15
assert gammainc(2,5).ae(6*exp(-5))
assert gammainc(2,0,5).ae(1-6*exp(-5))
assert gammainc(2,3,5).ae(-6*exp(-5)+4*exp(-3))
assert gammainc(-2.5,-0.5).ae(-0.9453087204829418812-5.3164237738936178621j)
assert gammainc(0,2,4).ae(0.045121158298212213088)
assert gammainc(0,3).ae(0.013048381094197037413)
assert gammainc(0,2+j,1-j).ae(0.00910653685850304839-0.22378752918074432574j)
assert gammainc(0,1-j).ae(0.00028162445198141833+0.17932453503935894015j)
assert gammainc(3,4,5,True).ae(0.11345128607046320253)
assert gammainc(3.5,0,inf).ae(gamma(3.5))
assert gammainc(-150.5,500).ae('6.9825435345798951153e-627')
assert gammainc(-150.5,800).ae('4.6885137549474089431e-788')
assert gammainc(-3.5, -20.5).ae(0.27008820585226911 - 1310.31447140574997636j)
assert gammainc(-3.5, -200.5).ae(0.27008820585226911 - 5.3264597096208368435e76j) # XXX real part
assert gammainc(0,0,2) == inf
assert gammainc(1,b=1).ae(0.6321205588285576784)
assert gammainc(3,2,2) == 0
assert gammainc(2,3+j,3-j).ae(-0.28135485191849314194j)
# Regularized upper gamma
assert isnan(gammainc(0, 0, regularized=True))
assert gammainc(-1, 0, regularized=True) == inf
assert gammainc(1, 0, regularized=True) == 1
assert gammainc(0, 5, regularized=True) == 0
assert gammainc(0, 2+3j, regularized=True) == 0
assert gammainc(0, 5000, regularized=True) == 0
assert gammainc(0, 10**30, regularized=True) == 0
assert gammainc(-1, 5, regularized=True) == 0
assert gammainc(-1, 5000, regularized=True) == 0
assert gammainc(-1, 10**30, regularized=True) == 0
assert gammainc(-1, -5, regularized=True) == 0
assert gammainc(-1, -5000, regularized=True) == 0
assert gammainc(-1, -10**30, regularized=True) == 0
assert gammainc(-1, 3+4j, regularized=True) == 0
assert gammainc(1, 5, regularized=True).ae(exp(-5))
assert gammainc(1, 5000, regularized=True).ae(exp(-5000))
assert gammainc(1, 10**30, regularized=True).ae(exp(-10**30))
assert gammainc(1, 3+4j, regularized=True).ae(exp(-3-4j))
assert gammainc(-1000000,2).ae('1.3669297209397347754e-301037', abs_eps=0, rel_eps=8*eps)
assert gammainc(-1000000,2,regularized=True) == 0
assert gammainc(-1000000,3+4j).ae('-1.322575609404222361e-698979 - 4.9274570591854533273e-698978j', abs_eps=0, rel_eps=8*eps)
assert gammainc(-1000000,3+4j,regularized=True) == 0
assert gammainc(2+3j, 4+5j, regularized=True).ae(0.085422013530993285774-0.052595379150390078503j)
assert gammainc(1000j, 1000j, regularized=True).ae(0.49702647628921131761 + 0.00297355675013575341j)
# Generalized
assert gammainc(3,4,2) == -gammainc(3,2,4)
assert gammainc(4, 2, 3).ae(1.2593494302978947396)
assert gammainc(4, 2, 3, regularized=True).ae(0.20989157171631578993)
assert gammainc(0, 2, 3).ae(0.035852129613864082155)
assert gammainc(0, 2, 3, regularized=True) == 0
assert gammainc(-1, 2, 3).ae(0.015219822548487616132)
assert gammainc(-1, 2, 3, regularized=True) == 0
assert gammainc(0, 2, 3).ae(0.035852129613864082155)
assert gammainc(0, 2, 3, regularized=True) == 0
# Should use upper gammas
assert gammainc(5, 10000, 12000).ae('1.1359381951461801687e-4327', abs_eps=0, rel_eps=8*eps)
# Should use lower gammas
assert gammainc(10000, 2, 3).ae('8.1244514125995785934e4765')
def test_gammainc_expint_n():
# These tests are intended to check all cases of the low-level code
# for upper gamma and expint with small integer index.
# Need to cover positive/negative arguments; small/large/huge arguments
# for both positive and negative indices, as well as indices 0 and 1
# which may be special-cased
mp.dps = 15
assert expint(-3,3.5).ae(0.021456366563296693987)
assert expint(-2,3.5).ae(0.014966633183073309405)
assert expint(-1,3.5).ae(0.011092916359219041088)
assert expint(0,3.5).ae(0.0086278238349481430685)
assert expint(1,3.5).ae(0.0069701398575483929193)
assert expint(2,3.5).ae(0.0058018939208991255223)
assert expint(3,3.5).ae(0.0049453773495857807058)
assert expint(-3,-3.5).ae(-4.6618170604073311319)
assert expint(-2,-3.5).ae(-5.5996974157555515963)
assert expint(-1,-3.5).ae(-6.7582555017739415818)
assert expint(0,-3.5).ae(-9.4615577024835182145)
assert expint(1,-3.5).ae(-13.925353995152335292 - 3.1415926535897932385j)
assert expint(2,-3.5).ae(-15.62328702434085977 - 10.995574287564276335j)
assert expint(3,-3.5).ae(-10.783026313250347722 - 19.242255003237483586j)
assert expint(-3,350).ae(2.8614825451252838069e-155, abs_eps=0, rel_eps=8*eps)
assert expint(-2,350).ae(2.8532837224504675901e-155, abs_eps=0, rel_eps=8*eps)
assert expint(-1,350).ae(2.8451316155828634555e-155, abs_eps=0, rel_eps=8*eps)
assert expint(0,350).ae(2.8370258275042797989e-155, abs_eps=0, rel_eps=8*eps)
assert expint(1,350).ae(2.8289659656701459404e-155, abs_eps=0, rel_eps=8*eps)
assert expint(2,350).ae(2.8209516419468505006e-155, abs_eps=0, rel_eps=8*eps)
assert expint(3,350).ae(2.8129824725501272171e-155, abs_eps=0, rel_eps=8*eps)
assert expint(-3,-350).ae(-2.8528796154044839443e+149)
assert expint(-2,-350).ae(-2.8610072121701264351e+149)
assert expint(-1,-350).ae(-2.8691813842677537647e+149)
assert expint(0,-350).ae(-2.8774025343659421709e+149)
u = expint(1,-350)
assert u.ae(-2.8856710698020863568e+149)
assert u.imag.ae(-3.1415926535897932385)
u = expint(2,-350)
assert u.ae(-2.8939874026504650534e+149)
assert u.imag.ae(-1099.5574287564276335)
u = expint(3,-350)
assert u.ae(-2.9023519497915044349e+149)
assert u.imag.ae(-192422.55003237483586)
assert expint(-3,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(-2,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(-1,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(0,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(1,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(2,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(3,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(-3,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871')
assert expint(-2,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871')
assert expint(-1,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871')
assert expint(0,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871')
u = expint(1,-350000000000000000000000)
assert u.ae('-3.7805306852415755699e+152003068666138139677871')
assert u.imag.ae(-3.1415926535897932385)
u = expint(2,-350000000000000000000000)
assert u.imag.ae(-1.0995574287564276335e+24)
assert u.ae('-3.7805306852415755699e+152003068666138139677871')
u = expint(3,-350000000000000000000000)
assert u.imag.ae(-1.9242255003237483586e+47)
assert u.ae('-3.7805306852415755699e+152003068666138139677871')
# Small case; no branch cut
assert gammainc(-3,3.5).ae(0.00010020262545203707109)
assert gammainc(-2,3.5).ae(0.00040370427343557393517)
assert gammainc(-1,3.5).ae(0.0016576839773997501492)
assert gammainc(0,3.5).ae(0.0069701398575483929193)
assert gammainc(1,3.5).ae(0.03019738342231850074)
assert gammainc(2,3.5).ae(0.13588822540043325333)
assert gammainc(3,3.5).ae(0.64169439772426814072)
# Small case; with branch cut
assert gammainc(-3,-3.5).ae(0.03595832954467563286 - 0.52359877559829887308j)
assert gammainc(-2,-3.5).ae(-0.88024704597962022221 - 1.5707963267948966192j)
assert gammainc(-1,-3.5).ae(4.4637962926688170771 - 3.1415926535897932385j)
assert gammainc(0,-3.5).ae(-13.925353995152335292 - 3.1415926535897932385j)
assert gammainc(1,-3.5).ae(33.115451958692313751)
assert gammainc(2,-3.5).ae(-82.788629896730784377)
assert gammainc(3,-3.5).ae(240.08702670051927469)
# Asymptotic case; no branch cut
assert gammainc(-3,350).ae(6.5424095113340358813e-163, abs_eps=0, rel_eps=8*eps)
assert gammainc(-2,350).ae(2.296312222489899769e-160, abs_eps=0, rel_eps=8*eps)
assert gammainc(-1,350).ae(8.059861834133858573e-158, abs_eps=0, rel_eps=8*eps)
assert gammainc(0,350).ae(2.8289659656701459404e-155, abs_eps=0, rel_eps=8*eps)
assert gammainc(1,350).ae(9.9295903962649792963e-153, abs_eps=0, rel_eps=8*eps)
assert gammainc(2,350).ae(3.485286229089007733e-150, abs_eps=0, rel_eps=8*eps)
assert gammainc(3,350).ae(1.2233453960006379793e-147, abs_eps=0, rel_eps=8*eps)
# Asymptotic case; branch cut
u = gammainc(-3,-350)
assert u.ae(6.7889565783842895085e+141)
assert u.imag.ae(-0.52359877559829887308)
u = gammainc(-2,-350)
assert u.ae(-2.3692668977889832121e+144)
assert u.imag.ae(-1.5707963267948966192)
u = gammainc(-1,-350)
assert u.ae(8.2685354361441858669e+146)
assert u.imag.ae(-3.1415926535897932385)
u = gammainc(0,-350)
assert u.ae(-2.8856710698020863568e+149)
assert u.imag.ae(-3.1415926535897932385)
u = gammainc(1,-350)
assert u.ae(1.0070908870280797598e+152)
assert u.imag == 0
u = gammainc(2,-350)
assert u.ae(-3.5147471957279983618e+154)
assert u.imag == 0
u = gammainc(3,-350)
assert u.ae(1.2266568422179417091e+157)
assert u.imag == 0
# Extreme asymptotic case
assert gammainc(-3,350000000000000000000000).ae('5.0362468738874738859e-152003068666138139677990', abs_eps=0, rel_eps=8*eps)
assert gammainc(-2,350000000000000000000000).ae('1.7626864058606158601e-152003068666138139677966', abs_eps=0, rel_eps=8*eps)
assert gammainc(-1,350000000000000000000000).ae('6.1694024205121555102e-152003068666138139677943', abs_eps=0, rel_eps=8*eps)
assert gammainc(0,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert gammainc(1,350000000000000000000000).ae('7.5575179651273905e-152003068666138139677896', abs_eps=0, rel_eps=8*eps)
assert gammainc(2,350000000000000000000000).ae('2.645131287794586675e-152003068666138139677872', abs_eps=0, rel_eps=8*eps)
assert gammainc(3,350000000000000000000000).ae('9.2579595072810533625e-152003068666138139677849', abs_eps=0, rel_eps=8*eps)
u = gammainc(-3,-350000000000000000000000)
assert u.ae('8.8175642804468234866e+152003068666138139677800')
assert u.imag.ae(-0.52359877559829887308)
u = gammainc(-2,-350000000000000000000000)
assert u.ae('-3.0861474981563882203e+152003068666138139677824')
assert u.imag.ae(-1.5707963267948966192)
u = gammainc(-1,-350000000000000000000000)
assert u.ae('1.0801516243547358771e+152003068666138139677848')
assert u.imag.ae(-3.1415926535897932385)
u = gammainc(0,-350000000000000000000000)
assert u.ae('-3.7805306852415755699e+152003068666138139677871')
assert u.imag.ae(-3.1415926535897932385)
assert gammainc(1,-350000000000000000000000).ae('1.3231857398345514495e+152003068666138139677895')
assert gammainc(2,-350000000000000000000000).ae('-4.6311500894209300731e+152003068666138139677918')
assert gammainc(3,-350000000000000000000000).ae('1.6209025312973255256e+152003068666138139677942')
def test_incomplete_beta():
mp.dps = 15
assert betainc(-2,-3,0.5,0.75).ae(63.4305673311255413583969)
assert betainc(4.5,0.5+2j,2.5,6).ae(0.2628801146130621387903065 + 0.5162565234467020592855378j)
assert betainc(4,5,0,6).ae(90747.77142857142857142857)
def test_erf():
mp.dps = 15
assert erf(0) == 0
assert erf(1).ae(0.84270079294971486934)
assert erf(3+4j).ae(-120.186991395079444098 - 27.750337293623902498j)
assert erf(-4-3j).ae(-0.99991066178539168236 + 0.00004972026054496604j)
assert erf(pi).ae(0.99999112385363235839)
assert erf(1j).ae(1.6504257587975428760j)
assert erf(-1j).ae(-1.6504257587975428760j)
assert isinstance(erf(1), mpf)
assert isinstance(erf(-1), mpf)
assert isinstance(erf(0), mpf)
assert isinstance(erf(0j), mpc)
assert erf(inf) == 1
assert erf(-inf) == -1
assert erfi(0) == 0
assert erfi(1/pi).ae(0.371682698493894314)
assert erfi(inf) == inf
assert erfi(-inf) == -inf
assert erf(1+0j) == erf(1)
assert erfc(1+0j) == erfc(1)
assert erf(0.2+0.5j).ae(1 - erfc(0.2+0.5j))
assert erfc(0) == 1
assert erfc(1).ae(1-erf(1))
assert erfc(-1).ae(1-erf(-1))
assert erfc(1/pi).ae(1-erf(1/pi))
assert erfc(-10) == 2
assert erfc(-1000000) == 2
assert erfc(-inf) == 2
assert erfc(inf) == 0
assert isnan(erfc(nan))
assert (erfc(10**4)*mpf(10)**43429453).ae('3.63998738656420')
mp.dps = 50
# This one does not use the asymptotic series
assert (erfc(10)*10**45).ae('2.0884875837625447570007862949577886115608181193212')
# This one does
assert (erfc(50)*10**1088).ae('2.0709207788416560484484478751657887929322509209954')
mp.dps = 15
assert str(erfc(10**50)) == '3.66744826532555e-4342944819032518276511289189166050822943970058036665661144537831658646492088707747292249493384317534'
assert erfinv(0) == 0
assert erfinv(0.5).ae(0.47693627620446987338)
assert erfinv(-0.5).ae(-0.47693627620446987338)
assert erfinv(1) == inf
assert erfinv(-1) == -inf
assert erf(erfinv(0.95)).ae(0.95)
assert erf(erfinv(0.999999999995)).ae(0.999999999995)
assert erf(erfinv(-0.999999999995)).ae(-0.999999999995)
mp.dps = 50
assert erf(erfinv('0.99999999999999999999999999999995')).ae('0.99999999999999999999999999999995')
assert erf(erfinv('0.999999999999999999999999999999995')).ae('0.999999999999999999999999999999995')
assert erf(erfinv('-0.999999999999999999999999999999995')).ae('-0.999999999999999999999999999999995')
mp.dps = 15
# Complex asymptotic expansions
v = erfc(50j)
assert v.real == 1
assert v.imag.ae('-6.1481820666053078736e+1083')
assert erfc(-100+5j).ae(2)
assert (erfc(100+5j)*10**4335).ae(2.3973567853824133572 - 3.9339259530609420597j)
assert erfc(100+100j).ae(0.00065234366376857698698 - 0.0039357263629214118437j)
def test_pdf():
mp.dps = 15
assert npdf(-inf) == 0
assert npdf(inf) == 0
assert npdf(5,0,2).ae(npdf(5+4,4,2))
assert quadts(lambda x: npdf(x,-0.5,0.8), [-inf, inf]) == 1
assert ncdf(0) == 0.5
assert ncdf(3,3) == 0.5
assert ncdf(-inf) == 0
assert ncdf(inf) == 1
assert ncdf(10) == 1
# Verify that this is computed accurately
assert (ncdf(-10)*10**24).ae(7.619853024160526)
def test_lambertw():
mp.dps = 15
assert lambertw(0) == 0
assert lambertw(0+0j) == 0
assert lambertw(inf) == inf
assert isnan(lambertw(nan))
assert lambertw(inf,1).real == inf
assert lambertw(inf,1).imag.ae(2*pi)
assert lambertw(-inf,1).real == inf
assert lambertw(-inf,1).imag.ae(3*pi)
assert lambertw(0,-1) == -inf
assert lambertw(0,1) == -inf
assert lambertw(0,3) == -inf
assert lambertw(e).ae(1)
assert lambertw(1).ae(0.567143290409783873)
assert lambertw(-pi/2).ae(j*pi/2)
assert lambertw(-log(2)/2).ae(-log(2))
assert lambertw(0.25).ae(0.203888354702240164)
assert lambertw(-0.25).ae(-0.357402956181388903)
assert lambertw(-1./10000,0).ae(-0.000100010001500266719)
assert lambertw(-0.25,-1).ae(-2.15329236411034965)
assert lambertw(0.25,-1).ae(-3.00899800997004620-4.07652978899159763j)
assert lambertw(-0.25,-1).ae(-2.15329236411034965)
assert lambertw(0.25,1).ae(-3.00899800997004620+4.07652978899159763j)
assert lambertw(-0.25,1).ae(-3.48973228422959210+7.41405453009603664j)
assert lambertw(-4).ae(0.67881197132094523+1.91195078174339937j)
assert lambertw(-4,1).ae(-0.66743107129800988+7.76827456802783084j)
assert lambertw(-4,-1).ae(0.67881197132094523-1.91195078174339937j)
assert lambertw(1000).ae(5.24960285240159623)
assert lambertw(1000,1).ae(4.91492239981054535+5.44652615979447070j)
assert lambertw(1000,-1).ae(4.91492239981054535-5.44652615979447070j)
assert lambertw(1000,5).ae(3.5010625305312892+29.9614548941181328j)
assert lambertw(3+4j).ae(1.281561806123775878+0.533095222020971071j)
assert lambertw(-0.4+0.4j).ae(-0.10396515323290657+0.61899273315171632j)
assert lambertw(3+4j,1).ae(-0.11691092896595324+5.61888039871282334j)
assert lambertw(3+4j,-1).ae(0.25856740686699742-3.85211668616143559j)
assert lambertw(-0.5,-1).ae(-0.794023632344689368-0.770111750510379110j)
assert lambertw(-1./10000,1).ae(-11.82350837248724344+6.80546081842002101j)
assert lambertw(-1./10000,-1).ae(-11.6671145325663544)
assert lambertw(-1./10000,-2).ae(-11.82350837248724344-6.80546081842002101j)
assert lambertw(-1./100000,4).ae(-14.9186890769540539+26.1856750178782046j)
assert lambertw(-1./100000,5).ae(-15.0931437726379218666+32.5525721210262290086j)
assert lambertw((2+j)/10).ae(0.173704503762911669+0.071781336752835511j)
assert lambertw((2+j)/10,1).ae(-3.21746028349820063+4.56175438896292539j)
assert lambertw((2+j)/10,-1).ae(-3.03781405002993088-3.53946629633505737j)
assert lambertw((2+j)/10,4).ae(-4.6878509692773249+23.8313630697683291j)
assert lambertw(-(2+j)/10).ae(-0.226933772515757933-0.164986470020154580j)
assert lambertw(-(2+j)/10,1).ae(-2.43569517046110001+0.76974067544756289j)
assert lambertw(-(2+j)/10,-1).ae(-3.54858738151989450-6.91627921869943589j)
assert lambertw(-(2+j)/10,4).ae(-4.5500846928118151+20.6672982215434637j)
mp.dps = 50
assert lambertw(pi).ae('1.073658194796149172092178407024821347547745350410314531')
mp.dps = 15
# Former bug in generated branch
assert lambertw(-0.5+0.002j).ae(-0.78917138132659918344 + 0.76743539379990327749j)
assert lambertw(-0.5-0.002j).ae(-0.78917138132659918344 - 0.76743539379990327749j)
assert lambertw(-0.448+0.4j).ae(-0.11855133765652382241 + 0.66570534313583423116j)
assert lambertw(-0.448-0.4j).ae(-0.11855133765652382241 - 0.66570534313583423116j)
def test_meijerg():
mp.dps = 15
assert meijerg([[2,3],[1]],[[0.5,2],[3,4]], 2.5).ae(4.2181028074787439386)
assert meijerg([[],[1+j]],[[1],[1]], 3+4j).ae(271.46290321152464592 - 703.03330399954820169j)
assert meijerg([[0.25],[1]],[[0.5],[2]],0) == 0
assert meijerg([[0],[]],[[0,0,'1/3','2/3'], []], '2/27').ae(2.2019391389653314120)
# Verify 1/z series being used
assert meijerg([[-3],[-0.5]], [[-1],[-2.5]], -0.5).ae(-1.338096165935754898687431)
assert meijerg([[1-(-1)],[1-(-2.5)]], [[1-(-3)],[1-(-0.5)]], -2.0).ae(-1.338096165935754898687431)
assert meijerg([[-3],[-0.5]], [[-1],[-2.5]], -1).ae(-(pi+4)/(4*pi))
a = 2.5
b = 1.25
for z in [mpf(0.25), mpf(2)]:
x1 = hyp1f1(a,b,z)
x2 = gamma(b)/gamma(a)*meijerg([[1-a],[]],[[0],[1-b]],-z)
x3 = gamma(b)/gamma(a)*meijerg([[1-0],[1-(1-b)]],[[1-(1-a)],[]],-1/z)
assert x1.ae(x2)
assert x1.ae(x3)
def test_appellf1():
mp.dps = 15
assert appellf1(2,-2,1,1,2,3).ae(-1.75)
assert appellf1(2,1,-2,1,2,3).ae(-8)
assert appellf1(2,1,-2,1,0.5,0.25).ae(1.5)
assert appellf1(-2,1,3,2,3,3).ae(19)
assert appellf1(1,2,3,4,0.5,0.125).ae( 1.53843285792549786518)
def test_coulomb():
# Note: most tests are doctests
# Test for a bug:
mp.dps = 15
assert coulombg(mpc(-5,0),2,3).ae(20.087729487721430394)
def test_hyper_param_accuracy():
mp.dps = 15
As = [n+1e-10 for n in range(-5,-1)]
Bs = [n+1e-10 for n in range(-12,-5)]
assert hyper(As,Bs,10).ae(-381757055858.652671927)
assert legenp(0.5, 100, 0.25).ae(-2.4124576567211311755e+144)
assert (hyp1f1(1000,1,-100)*10**24).ae(5.2589445437370169113)
assert (hyp2f1(10, -900, 10.5, 0.99)*10**24).ae(1.9185370579660768203)
assert (hyp2f1(1000,1.5,-3.5,-1.5)*10**385).ae(-2.7367529051334000764)
assert hyp2f1(-5, 10, 3, 0.5, zeroprec=500) == 0
assert (hyp1f1(-10000, 1000, 100)*10**424).ae(-3.1046080515824859974)
assert (hyp2f1(1000,1.5,-3.5,-0.75,maxterms=100000)*10**231).ae(-4.0534790813913998643)
assert legenp(2, 3, 0.25) == 0
try:
hypercomb(lambda a: [([],[],[],[],[a],[-a],0.5)], [3])
assert 0
except ValueError:
pass
assert hypercomb(lambda a: [([],[],[],[],[a],[-a],0.5)], [3], infprec=200) == inf
assert meijerg([[],[]],[[0,0,0,0],[]],0.1).ae(1.5680822343832351418)
assert (besselk(400,400)*10**94).ae(1.4387057277018550583)
mp.dps = 5
(hyp1f1(-5000.5, 1500, 100)*10**185).ae(8.5185229673381935522)
(hyp1f1(-5000, 1500, 100)*10**185).ae(9.1501213424563944311)
mp.dps = 15
(hyp1f1(-5000.5, 1500, 100)*10**185).ae(8.5185229673381935522)
(hyp1f1(-5000, 1500, 100)*10**185).ae(9.1501213424563944311)
assert hyp0f1(fadd(-20,'1e-100',exact=True), 0.25).ae(1.85014429040102783e+49)
assert hyp0f1((-20*10**100+1, 10**100), 0.25).ae(1.85014429040102783e+49)
def test_hypercomb_zero_pow():
# check that 0^0 = 1
assert hypercomb(lambda a: (([0],[a],[],[],[],[],0),), [0]) == 1
assert meijerg([[-1.5],[]],[[0],[-0.75]],0).ae(1.4464090846320771425)
def test_spherharm():
mp.dps = 15
t = 0.5; r = 0.25
assert spherharm(0,0,t,r).ae(0.28209479177387814347)
assert spherharm(1,-1,t,r).ae(0.16048941205971996369 - 0.04097967481096344271j)
assert spherharm(1,0,t,r).ae(0.42878904414183579379)
assert spherharm(1,1,t,r).ae(-0.16048941205971996369 - 0.04097967481096344271j)
assert spherharm(2,-2,t,r).ae(0.077915886919031181734 - 0.042565643022253962264j)
assert spherharm(2,-1,t,r).ae(0.31493387233497459884 - 0.08041582001959297689j)
assert spherharm(2,0,t,r).ae(0.41330596756220761898)
assert spherharm(2,1,t,r).ae(-0.31493387233497459884 - 0.08041582001959297689j)
assert spherharm(2,2,t,r).ae(0.077915886919031181734 + 0.042565643022253962264j)
assert spherharm(3,-3,t,r).ae(0.033640236589690881646 - 0.031339125318637082197j)
assert spherharm(3,-2,t,r).ae(0.18091018743101461963 - 0.09883168583167010241j)
assert spherharm(3,-1,t,r).ae(0.42796713930907320351 - 0.10927795157064962317j)
assert spherharm(3,0,t,r).ae(0.27861659336351639787)
assert spherharm(3,1,t,r).ae(-0.42796713930907320351 - 0.10927795157064962317j)
assert spherharm(3,2,t,r).ae(0.18091018743101461963 + 0.09883168583167010241j)
assert spherharm(3,3,t,r).ae(-0.033640236589690881646 - 0.031339125318637082197j)
assert spherharm(0,-1,t,r) == 0
assert spherharm(0,-2,t,r) == 0
assert spherharm(0,1,t,r) == 0
assert spherharm(0,2,t,r) == 0
assert spherharm(1,2,t,r) == 0
assert spherharm(1,3,t,r) == 0
assert spherharm(1,-2,t,r) == 0
assert spherharm(1,-3,t,r) == 0
assert spherharm(2,3,t,r) == 0
assert spherharm(2,4,t,r) == 0
assert spherharm(2,-3,t,r) == 0
assert spherharm(2,-4,t,r) == 0
assert spherharm(3,4.5,0.5,0.25).ae(-22.831053442240790148 + 10.910526059510013757j)
assert spherharm(2+3j, 1-j, 1+j, 3+4j).ae(-2.6582752037810116935 - 1.0909214905642160211j)
assert spherharm(-6,2.5,t,r).ae(0.39383644983851448178 + 0.28414687085358299021j)
assert spherharm(-3.5, 3, 0.5, 0.25).ae(0.014516852987544698924 - 0.015582769591477628495j)
assert spherharm(-3, 3, 0.5, 0.25) == 0
assert spherharm(-6, 3, 0.5, 0.25).ae(-0.16544349818782275459 - 0.15412657723253924562j)
assert spherharm(-6, 1.5, 0.5, 0.25).ae(0.032208193499767402477 + 0.012678000924063664921j)
assert spherharm(3,0,0,1).ae(0.74635266518023078283)
assert spherharm(3,-2,0,1) == 0
assert spherharm(3,-2,1,1).ae(-0.16270707338254028971 - 0.35552144137546777097j)
| 52.483111
| 256
| 0.67658
|
2090a40e7df0d5462e034ccfb57770676a75d8a2
| 787
|
py
|
Python
|
code/tests/test_tile.py
|
Nocty-chan/cs224n-squad
|
0c0b342621e038aba8e20ff411da13dfa173351d
|
[
"Apache-2.0"
] | 2
|
2018-04-15T06:13:41.000Z
|
2019-07-25T20:22:34.000Z
|
code/tests/test_tile.py
|
Nocty-chan/cs224n-squad
|
0c0b342621e038aba8e20ff411da13dfa173351d
|
[
"Apache-2.0"
] | 1
|
2020-11-10T04:51:36.000Z
|
2020-11-10T04:51:36.000Z
|
code/tests/test_tile.py
|
Nocty-chan/cs224n-squad
|
0c0b342621e038aba8e20ff411da13dfa173351d
|
[
"Apache-2.0"
] | 3
|
2018-08-08T08:48:04.000Z
|
2020-02-10T09:52:41.000Z
|
import numpy as np
H = 2
N = 2
M = 3
c = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) # N x 2H
q = np.array([[1, 2, 3, 0], [5, 6, 7, 4], [8, 9 , 10, 11]]) # M x 2H
q_tile = np.tile(q, (N, 1, 1)) # N x M x 2H
q_tile = np.transpose(q_tile, (0, 2, 1)) # N x 2H x M
c = np.reshape(c, (N, 2*H, 1))
result = (c * q_tile) # N x 2H x M
result = np.transpose(result, (0, 2, 1))
result = np.reshape(result, (-1, 2 * H ))
print (result) # C_1 * Q ; C_2 * Q
w_1 = np.array([1, 2, 3, 4])
w_2 = np.array([5, 6, 7, 8])
w_3 = np.array([13, 12, 11, 10])
# term1
term1 = np.dot(c.reshape(N, 2*H), w_1)
term2 = np.dot(q, w_2)
term3 = np.dot(result, w_3).reshape(N, M)
print (term1)
print (term2)
print (term3)
result = term1.reshape(N, 1) + term3
result = result + term2.reshape(1, M)
print (result)
| 20.179487
| 68
| 0.550191
|
fc27000ef7ef911da644b215ede6e94c738924fc
| 50,583
|
py
|
Python
|
tests/test_eda/test_slcs.py
|
logstar/scedar
|
f8cd7dab9f885a0c58d127a9ee3b143d7e4bae06
|
[
"MIT"
] | 17
|
2018-07-24T08:07:03.000Z
|
2020-01-14T11:06:31.000Z
|
tests/test_eda/test_slcs.py
|
benstear/scedar
|
056fc2fd1bdb2473700e4c24ec4dab3996f2b5fc
|
[
"MIT"
] | 2
|
2021-05-30T15:40:56.000Z
|
2021-11-08T08:38:42.000Z
|
tests/test_eda/test_slcs.py
|
benstear/scedar
|
056fc2fd1bdb2473700e4c24ec4dab3996f2b5fc
|
[
"MIT"
] | 8
|
2018-08-23T01:06:59.000Z
|
2019-11-07T22:19:47.000Z
|
import scipy.sparse as spsp
import numpy as np
import seaborn as sns
import scedar.eda as eda
import matplotlib as mpl
mpl.use("agg", warn=False) # noqa
import matplotlib.pyplot as plt
import pytest
class TestSingleLabelClassifiedSamples(object):
"""docstring for TestSingleLabelClassifiedSamples"""
np.random.seed(123)
sfm3x3_arr = np.arange(9, dtype='float64').reshape(3, 3)
sfm_2x0 = np.array([[], []])
sfm5x10_arr = np.random.ranf(50).reshape(5, 10)
sfm5x10_lst = list(map(list, np.random.ranf(50).reshape(5, 10)))
def test_init_empty_labs(self):
# wrong lab length. Although 2x0, there are 2 empty samples
with pytest.raises(Exception) as excinfo:
eda.SingleLabelClassifiedSamples(self.sfm_2x0, [])
sfm1 = eda.SingleLabelClassifiedSamples(np.array([[], []]), [1, 0])
assert sfm1._x.shape == (2, 0)
assert sfm1._sids.shape == (2,)
assert sfm1.labs == [1, 0]
assert sfm1._fids.shape == (0,)
# wrong x dim
with pytest.raises(Exception) as excinfo:
eda.SingleLabelClassifiedSamples(np.empty(0), [])
# ok
sfm2 = eda.SingleLabelClassifiedSamples(np.empty((0, 0)), [])
assert sfm2._x.shape == (0, 0)
assert sfm2._sids.shape == (0,)
assert sfm2.labs == []
assert sfm2._fids.shape == (0,)
def test_init_wrong_lab_len(self):
with pytest.raises(Exception) as excinfo:
eda.SingleLabelClassifiedSamples(
self.sfm3x3_arr, [0, 1], None, None)
def test_init_non1d_labs(self):
with pytest.raises(Exception) as excinfo:
eda.SingleLabelClassifiedSamples(self.sfm3x3_arr, [[0], [1], [2]],
[0, 1, 2], [0, 1, 2])
with pytest.raises(Exception) as excinfo:
eda.SingleLabelClassifiedSamples(self.sfm3x3_arr,
[[0, 1], [1, 2], [2, 3]],
[0, 1, 2], [0, 1, 2])
def test_init_bad_lab_type(self):
with pytest.raises(Exception) as excinfo:
eda.SingleLabelClassifiedSamples(
self.sfm3x3_arr, [False, True, 2], [0, 1, 1], None)
with pytest.raises(Exception) as excinfo:
eda.SingleLabelClassifiedSamples(
self.sfm3x3_arr, [[0], [0, 1], 2], [0, 1, 1], None)
with pytest.raises(Exception) as excinfo:
eda.SingleLabelClassifiedSamples(
self.sfm3x3_arr, np.array([0, 1, 2]), [0, 1, 1], None)
with pytest.raises(Exception) as excinfo:
eda.SingleLabelClassifiedSamples(
self.sfm3x3_arr, [(0), (0, 1), 2], [0, 1, 1], None)
def test_valid_init(self):
eda.SingleLabelClassifiedSamples(
self.sfm5x10_arr, [0, 1, 1, 2, 0], list(range(5)), list(range(10)))
eda.SingleLabelClassifiedSamples(
self.sfm5x10_arr, [0, 1, 1, 2, 0], None, list(range(10)))
eda.SingleLabelClassifiedSamples(
self.sfm5x10_arr, ['a', 'a', 'b', 'd', 'c'], list(range(5)), None)
eda.SingleLabelClassifiedSamples(
np.arange(100).reshape(-1, 10), list(range(10)))
eda.SingleLabelClassifiedSamples(
np.arange(100).reshape(10, -1), list('abcdefghij'))
def test_sort_by_labels(self):
x = np.array([[0, 0], [1, 1],
[100, 100], [150, 150], [125, 125],
[6, 6], [10, 10], [8, 8]])
slcs = eda.SingleLabelClassifiedSamples(
x, [0, 0, 1, 1, 1, 2, 2, 2], metric='euclidean')
slcs_ls = slcs.sort_by_labels()
assert slcs_ls.labs == [0, 0, 2, 2, 2, 1, 1, 1]
assert slcs_ls.fids == list(range(2))
assert slcs_ls.sids == [0, 1, 5, 7, 6, 2, 4, 3]
def test_sort_by_labels_empty_mat(self):
sfm = eda.SingleLabelClassifiedSamples(np.array([[], []]), [1, 0])
sfm1 = sfm.sort_by_labels()
assert sfm1._x.shape == (2, 0)
assert sfm1._sids.shape == (2,)
assert sfm1.labs == [1, 0]
assert sfm1._fids.shape == (0,)
sfm2 = eda.SingleLabelClassifiedSamples(np.empty((0, 0)),
[]).sort_by_labels()
assert sfm2._x.shape == (0, 0)
assert sfm2._sids.shape == (0,)
assert sfm2.labs == []
assert sfm2._fids.shape == (0,)
def test_lab_sorted_sids(self):
qsids = [0, 1, 5, 3, 2, 4]
qlabs = [0, 0, 2, 1, 1, 1]
rsids = [3, 4, 2, 5, 1, 0]
slab_csamples = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), qlabs, qsids)
rs_qsids, rs_qlabs = slab_csamples.lab_sorted_sids(rsids)
np.testing.assert_equal(rs_qsids, np.array([3, 4, 2, 5, 1, 0]))
np.testing.assert_equal(rs_qlabs, np.array([1, 1, 1, 2, 0, 0]))
rs_qsids, rs_qlabs = slab_csamples.lab_sorted_sids()
np.testing.assert_equal(rs_qsids, np.array([0, 1, 3, 2, 4, 5]))
np.testing.assert_equal(rs_qlabs, np.array([0, 0, 1, 1, 1, 2]))
def test_filter_min_class_n(self):
sids = [0, 1, 2, 3, 4, 5]
labs = [0, 0, 0, 1, 2, 2]
slab_csamples = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), labs, sids, None)
min_cl_n = 2
mcnf_slab_csamples = slab_csamples.filter_min_class_n(min_cl_n)
np.testing.assert_equal(mcnf_slab_csamples.sids,
np.array([0, 1, 2, 4, 5]))
np.testing.assert_equal(mcnf_slab_csamples.labs,
np.array([0, 0, 0, 2, 2]))
np.testing.assert_equal(mcnf_slab_csamples._x.shape,
(5, 10))
np.testing.assert_equal(mcnf_slab_csamples.fids,
slab_csamples.fids)
np.testing.assert_equal(mcnf_slab_csamples._x,
slab_csamples._x[np.array([0, 1, 2, 4, 5])])
s_inds = np.array([0, 1, 2, 4, 5])
np.testing.assert_equal(mcnf_slab_csamples._d,
slab_csamples._d[s_inds][:, s_inds])
def test_ind_x(self):
sids = list('abcdef')
fids = list(range(10, 20))
labs = [0, 0, 0, 1, 2, 2]
slcs = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), labs=labs,
sids=sids, fids=fids)
# select sf
ss_slcs = slcs.ind_x([0, 5], list(range(9)))
assert ss_slcs._x.shape == (2, 9)
assert ss_slcs.sids == ['a', 'f']
assert ss_slcs.labs == [0, 2]
assert ss_slcs.fids == list(range(10, 19))
np.testing.assert_equal(
ss_slcs.d, slcs._d[np.ix_((0, 5), (0, 5))])
# select with Default
ss_slcs = slcs.ind_x()
assert ss_slcs._x.shape == (6, 10)
assert ss_slcs.sids == list('abcdef')
assert ss_slcs.labs == labs
assert ss_slcs.fids == list(range(10, 20))
np.testing.assert_equal(ss_slcs.d, slcs._d)
# select with None
ss_slcs = slcs.ind_x(None, None)
assert ss_slcs._x.shape == (6, 10)
assert ss_slcs.sids == list('abcdef')
assert ss_slcs.labs == labs
assert ss_slcs.fids == list(range(10, 20))
np.testing.assert_equal(ss_slcs.d, slcs._d)
# select non-existent inds
with pytest.raises(IndexError) as excinfo:
slcs.ind_x([6])
with pytest.raises(IndexError) as excinfo:
slcs.ind_x(None, ['a'])
def test_ind_x_empty(self):
sids = list('abcdef')
fids = list(range(10, 20))
labs = [0, 0, 0, 1, 2, 2]
slcs = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), labs=labs,
sids=sids, fids=fids)
empty_s = slcs.ind_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._d.shape == (0, 0)
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
assert empty_s._labs.shape == (0,)
empty_f = slcs.ind_x(None, [])
assert empty_f._x.shape == (6, 0)
assert empty_f._d.shape == (6, 6)
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
assert empty_f._labs.shape == (6,)
empty_sf = slcs.ind_x([], [])
assert empty_sf._x.shape == (0, 0)
assert empty_sf._d.shape == (0, 0)
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
assert empty_sf._labs.shape == (0,)
def test_relabel(self):
sids = list('abcdef')
fids = list(range(10, 20))
labs = [0, 0, 0, 1, 2, 2]
slcs = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), labs=labs,
sids=sids, fids=fids)
new_labs = ['a', 'b', 'c', 'd', 'e', 'f']
slcs_rl = slcs.relabel(new_labs)
assert slcs_rl.labs == new_labs
assert slcs_rl._x is not slcs._x
assert slcs_rl._d is not slcs._d
assert slcs_rl._sids is not slcs._sids
assert slcs_rl._fids is not slcs._fids
np.testing.assert_equal(slcs_rl._x, slcs._x)
np.testing.assert_equal(slcs_rl._d, slcs._d)
np.testing.assert_equal(slcs_rl._sids, slcs._sids)
np.testing.assert_equal(slcs_rl._fids, slcs._fids)
def test_merge_labels(self):
sids = list('abcdef')
fids = list(range(10, 20))
labs = [0, 0, 1, 1, 2, 3]
slcs = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), labs=labs,
sids=sids, fids=fids)
slcs.merge_labels([1, 2, 3], 5)
new_labs = [0, 0, 5, 5, 5, 5]
assert slcs.labs == new_labs
assert slcs.sids == sids
assert slcs.fids == fids
assert slcs.labs_to_sids([5]) == (('c', 'd', 'e', 'f'),)
assert slcs.sids_to_labs(sids).tolist() == new_labs
assert slcs._uniq_labs.tolist() == [0, 5]
assert slcs._uniq_lab_cnts.tolist() == [2, 4]
def test_merge_labels_wrong_args(self):
sids = list('abcdef')
fids = list(range(10, 20))
labs = [0, 0, 1, 1, 2, 3]
slcs = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), labs=labs,
sids=sids, fids=fids)
# wrong new lab type
with pytest.raises(ValueError) as excinfo:
slcs.merge_labels([1, 2, 3], [5])
# wrong m lab type
with pytest.raises(ValueError) as excinfo:
slcs.merge_labels([[], [1]], 1)
# duplicated m labs
with pytest.raises(ValueError) as excinfo:
slcs.merge_labels([1, 1, 2], 1)
# m lab not in original lab
with pytest.raises(ValueError) as excinfo:
slcs.merge_labels([0, 1, 5], 1)
def test_id_x(self):
sids = list('abcdef')
fids = list(range(10, 20))
labs = [0, 0, 0, 1, 2, 2]
slcs = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), labs=labs,
sids=sids, fids=fids)
# select sf
ss_slcs = slcs.id_x(['a', 'f'], list(range(10, 15)))
assert ss_slcs._x.shape == (2, 5)
assert ss_slcs.sids == ['a', 'f']
assert ss_slcs.labs == [0, 2]
assert ss_slcs.fids == list(range(10, 15))
np.testing.assert_equal(
ss_slcs.d, slcs._d[np.ix_((0, 5), (0, 5))])
# select with Default
ss_slcs = slcs.id_x()
assert ss_slcs._x.shape == (6, 10)
assert ss_slcs.sids == list('abcdef')
assert ss_slcs.labs == labs
assert ss_slcs.fids == list(range(10, 20))
np.testing.assert_equal(ss_slcs.d, slcs._d)
# select with None
ss_slcs = slcs.id_x(None, None)
assert ss_slcs._x.shape == (6, 10)
assert ss_slcs.sids == list('abcdef')
assert ss_slcs.labs == labs
assert ss_slcs.fids == list(range(10, 20))
np.testing.assert_equal(ss_slcs.d, slcs._d)
# select non-existent inds
# id lookup raises ValueError
with pytest.raises(ValueError) as excinfo:
slcs.id_x([6])
with pytest.raises(ValueError) as excinfo:
slcs.id_x(None, ['a'])
def test_id_x_empty(self):
sids = list('abcdef')
fids = list(range(10, 20))
labs = [0, 0, 0, 1, 2, 2]
slcs = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), labs=labs,
sids=sids, fids=fids)
empty_s = slcs.id_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._d.shape == (0, 0)
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
assert empty_s._labs.shape == (0,)
empty_f = slcs.id_x(None, [])
assert empty_f._x.shape == (6, 0)
assert empty_f._d.shape == (6, 6)
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
assert empty_f._labs.shape == (6,)
empty_sf = slcs.id_x([], [])
assert empty_sf._x.shape == (0, 0)
assert empty_sf._d.shape == (0, 0)
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
assert empty_sf._labs.shape == (0,)
def test_lab_x(self):
sids = list('abcdef')
fids = list(range(10, 20))
labs = [0, 0, 0, 1, 2, 2]
slcs = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), labs=labs,
sids=sids, fids=fids)
# select sf
ss_slcs = slcs.lab_x([0, 2])
assert ss_slcs._x.shape == (5, 10)
assert ss_slcs.sids == ['a', 'b', 'c', 'e', 'f']
assert ss_slcs.labs == [0, 0, 0, 2, 2]
assert ss_slcs.fids == list(range(10, 20))
ss_s_inds = [0, 1, 2, 4, 5]
np.testing.assert_equal(ss_slcs.d,
slcs._d[np.ix_(ss_s_inds, ss_s_inds)])
# select sf
ss_slcs = slcs.lab_x(0)
assert ss_slcs._x.shape == (3, 10)
assert ss_slcs.sids == ['a', 'b', 'c']
assert ss_slcs.labs == [0, 0, 0]
assert ss_slcs.fids == list(range(10, 20))
ss_s_inds = [0, 1, 2]
np.testing.assert_equal(ss_slcs.d,
slcs._d[np.ix_(ss_s_inds, ss_s_inds)])
# select with None
slcs_n = slcs.lab_x(None)
np.testing.assert_equal(slcs_n._x, slcs._x)
np.testing.assert_equal(slcs_n._d, slcs._d)
np.testing.assert_equal(slcs_n._sids, slcs._sids)
np.testing.assert_equal(slcs_n._fids, slcs._fids)
np.testing.assert_equal(slcs_n._labs, slcs._labs)
# select non-existent labs
with pytest.raises(ValueError) as excinfo:
slcs.lab_x([-1])
with pytest.raises(ValueError) as excinfo:
slcs.lab_x([0, 3])
with pytest.raises(ValueError) as excinfo:
slcs.lab_x([0, -3])
def test_lab_x_empty(self):
sids = list('abcdef')
fids = list(range(10, 20))
labs = [0, 0, 0, 1, 2, 2]
slcs = eda.SingleLabelClassifiedSamples(
np.random.ranf(60).reshape(6, -1), labs=labs,
sids=sids, fids=fids)
# select sf
empty_s = slcs.lab_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._d.shape == (0, 0)
assert empty_s._sids.shape == (0,)
assert empty_s._labs.shape == (0,)
assert empty_s._fids.shape == (10,)
assert empty_s._labs.shape == (0,)
def test_feature_importance_across_labs(self):
# Generate simple dataset with gaussian noise
x_centers = np.array([[0, 0, 1, 1, 5, 50, 10, 37],
[0, 0, 1.5, 5, 5, 50, 10, 35],
[0, 0, 10, 10, 5, 50, 10, 33]])
np.random.seed(1920)
c1x = np.array(x_centers[0]) + np.random.normal(size=(500, 8))
c2x = np.array(x_centers[1]) + np.random.normal(size=(200, 8))
c3x = np.array(x_centers[2]) + np.random.normal(size=(300, 8))
x = np.vstack((c1x, c2x, c3x))
labs = [0] * 500 + [1] * 200 + [2] * 300
slcs = eda.SingleLabelClassifiedSamples(x, labs=labs)
# binary logistic regression
f_importance_list, bst = slcs.feature_importance_across_labs(
[0, 1], silent=0)
assert f_importance_list[0][0] == 3
# multi class softmax
f_importance_list2, bst2 = slcs.feature_importance_across_labs(
[0, 1, 2], random_state=123, silent=1)
assert f_importance_list2[0][0] == 3
assert f_importance_list2 != f_importance_list
# multiclass with provided parames
xgb_params = {
'eta': 0.3,
'max_depth': 6,
'silent': 0,
'nthread': 1,
'alpha': 1,
'lambda': 0,
'seed': 0,
'objective': 'multi:softmax',
'eval_metric': 'merror',
'num_class': 3
}
f_importance_list3, bst3 = slcs.feature_importance_across_labs(
[0, 1, 2], random_state=123, xgb_params=xgb_params)
assert f_importance_list3 == f_importance_list2
# shuffle features
f_importance_list4, bst4 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True)
assert f_importance_list2[0][0] == 3
# bootstrapping
f_importance_list5, bst5 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
f_importance_list6, bst6 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
assert f_importance_list5 == f_importance_list6
assert f_importance_list5[0][0] == 3
def test_feature_importance_across_labs_bootstrap(self):
# Generate simple dataset with gaussian noise
x_centers = np.array([[0, 0, 1, 1, 5, 50, 10, 37],
[0, 0, 1.5, 5, 5, 50, 10, 35],
[0, 0, 10, 10, 5, 50, 10, 33]])
np.random.seed(1920)
c1x = np.array(x_centers[0]) + np.random.normal(size=(50, 8))
c2x = np.array(x_centers[1]) + np.random.normal(size=(20, 8))
c3x = np.array(x_centers[2]) + np.random.normal(size=(30, 8))
x = np.vstack((c1x, c2x, c3x))
labs = [0] * 50 + [1] * 20 + [2] * 30
slcs = eda.SingleLabelClassifiedSamples(x, labs=labs)
# bootstrapping
f_importance_list, bst = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
f_importance_list2, bst2 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
assert f_importance_list == f_importance_list2
assert f_importance_list2[0][0] == 3
# no feature shuffling
f_importance_list3, bst3 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=False,
num_bootstrap_round=10)
# provide resampling size
f_importance_list4, bst4 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=False,
bootstrap_size=30, num_bootstrap_round=10)
f_importance_list4, bst4 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
bootstrap_size=30, num_bootstrap_round=10)
def test_feature_importance_across_labs_bootstrap_npd(self):
# Generate simple dataset with gaussian noise
x_centers = np.array([[0, 0, 1, 1, 5, 50, 10, 37],
[0, 0, 1.5, 5, 5, 50, 10, 35],
[0, 0, 10, 10, 5, 50, 10, 33]])
np.random.seed(1920)
c1x = np.array(x_centers[0]) + np.random.normal(size=(50, 8))
c2x = np.array(x_centers[1]) + np.random.normal(size=(20, 8))
c3x = np.array(x_centers[2]) + np.random.normal(size=(30, 8))
x = np.vstack((c1x, c2x, c3x))
labs = [0] * 50 + [1] * 20 + [2] * 30
slcs = eda.SingleLabelClassifiedSamples(x, labs=labs, use_pdist=False)
# bootstrapping
f_importance_list, bst = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
f_importance_list2, bst2 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
assert f_importance_list == f_importance_list2
assert f_importance_list2[0][0] == 3
# no feature shuffling
f_importance_list3, bst3 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=False,
num_bootstrap_round=10)
# provide resampling size
f_importance_list4, bst4 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=False,
bootstrap_size=30, num_bootstrap_round=10)
f_importance_list4, bst4 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
bootstrap_size=30, num_bootstrap_round=10)
def test_feature_importance_across_labs_bootstrap_sparse(self):
# Generate simple dataset with gaussian noise
x_centers = np.array([[0, 0, 1, 1, 5, 50, 10, 37],
[0, 0, 1.5, 5, 5, 50, 10, 35],
[0, 0, 10, 10, 5, 50, 10, 33]])
np.random.seed(1920)
c1x = np.array(x_centers[0]) + np.random.normal(size=(50, 8))
c2x = np.array(x_centers[1]) + np.random.normal(size=(20, 8))
c3x = np.array(x_centers[2]) + np.random.normal(size=(30, 8))
x = spsp.csr_matrix(np.vstack((c1x, c2x, c3x)))
labs = [0] * 50 + [1] * 20 + [2] * 30
slcs = eda.SingleLabelClassifiedSamples(x, labs=labs, metric='cosine')
# bootstrapping
f_importance_list, bst = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
f_importance_list2, bst2 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
assert f_importance_list == f_importance_list2
assert f_importance_list2[0][0] == 3
# no feature shuffling
f_importance_list3, bst3 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=False,
num_bootstrap_round=10)
# provide resampling size
f_importance_list4, bst4 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=False,
bootstrap_size=30, num_bootstrap_round=10)
f_importance_list4, bst4 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
bootstrap_size=30, num_bootstrap_round=10)
def test_feature_importance_across_labs_bootstrap_sparse_npd(self):
# Generate simple dataset with gaussian noise
x_centers = np.array([[0, 0, 1, 1, 5, 50, 10, 37],
[0, 0, 1.5, 5, 5, 50, 10, 35],
[0, 0, 10, 10, 5, 50, 10, 33]])
np.random.seed(1920)
c1x = np.array(x_centers[0]) + np.random.normal(size=(50, 8))
c2x = np.array(x_centers[1]) + np.random.normal(size=(20, 8))
c3x = np.array(x_centers[2]) + np.random.normal(size=(30, 8))
x = spsp.csr_matrix(np.vstack((c1x, c2x, c3x)))
labs = [0] * 50 + [1] * 20 + [2] * 30
slcs = eda.SingleLabelClassifiedSamples(x, labs=labs, metric='cosine',
use_pdist=False)
# bootstrapping
f_importance_list, bst = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
f_importance_list2, bst2 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
assert f_importance_list == f_importance_list2
assert f_importance_list2[0][0] == 3
# no feature shuffling
f_importance_list3, bst3 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=False,
num_bootstrap_round=10)
# provide resampling size
f_importance_list4, bst4 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=False,
bootstrap_size=30, num_bootstrap_round=10)
f_importance_list4, bst4 = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
bootstrap_size=30, num_bootstrap_round=10)
# resampling procedure will be repeated until all unique labels exist
# in the resample.
def test_feature_importance_across_labs_bootstrap_resample(self):
x_centers = np.array([[0, 0, 1, 1, 5, 50, 10, 37],
[0, 0, 1.5, 5, 5, 50, 10, 35],
[0, 0, 10, 10, 5, 50, 10, 33]])
np.random.seed(1920)
c1x = np.array(x_centers[0]) + np.random.normal(size=(500, 8))
c2x = np.array(x_centers[1]) + np.random.normal(size=(1, 8))
c3x = np.array(x_centers[2]) + np.random.normal(size=(30, 8))
x = np.vstack((c1x, c2x, c3x))
labs = [0] * 500 + [1] * 1 + [2] * 30
slcs = eda.SingleLabelClassifiedSamples(x, labs=labs)
# bootstrapping
f_importance_list, bst = slcs.feature_importance_across_labs(
[0, 1], random_state=123, shuffle_features=True,
num_bootstrap_round=10)
def test_feature_importance_across_labs_wrong_args(self):
# Generate simple dataset with gaussian noise
x_centers = np.array([[0, 0, 1, 1, 5, 50, 10, 37],
[0, 0, 1.5, 5, 5, 50, 10, 35],
[0, 0, 10, 10, 5, 50, 10, 33]])
np.random.seed(1920)
c1x = np.array(x_centers[0]) + np.random.normal(size=(50, 8))
c2x = np.array(x_centers[1]) + np.random.normal(size=(20, 8))
c3x = np.array(x_centers[2]) + np.random.normal(size=(30, 8))
x = np.vstack((c1x, c2x, c3x))
labs = [0] * 50 + [1] * 20 + [2] * 30
slcs = eda.SingleLabelClassifiedSamples(x, labs=labs)
with pytest.raises(ValueError) as excinfo:
slcs.feature_importance_across_labs([0, 3])
with pytest.raises(ValueError) as excinfo:
slcs.feature_importance_across_labs([0, 3], num_boost_round=0)
with pytest.raises(ValueError) as excinfo:
slcs.feature_importance_across_labs([0, 3], num_boost_round=0.5)
with pytest.raises(ValueError) as excinfo:
slcs.feature_importance_across_labs([0, 3], num_boost_round=-1)
with pytest.raises(ValueError) as excinfo:
slcs.feature_importance_across_labs([-1])
with pytest.raises(ValueError) as excinfo:
slcs.feature_importance_across_labs([3, 5])
# meaningless to run this on empty matrix
with pytest.raises(ValueError) as excinfo:
slcs.feature_importance_across_labs([])
def test_feature_importance_distintuishing_labs(self):
# Generate simple dataset with gaussian noise
x_centers = np.array([[0, 0, 1, 1, 5, 50, 10, 37],
[0, 0, 1.5, 5, 5, 50, 10, 35],
[0, 0, 10, 10, 5, 50, 10, 33]])
np.random.seed(1920)
c1x = np.array(x_centers[0]) + np.random.normal(size=(500, 8))
c2x = np.array(x_centers[1]) + np.random.normal(size=(200, 8))
c3x = np.array(x_centers[2]) + np.random.normal(size=(300, 8))
x = np.vstack((c1x, c2x, c3x))
labs = [0] * 500 + [1] * 200 + [2] * 300
slcs = eda.SingleLabelClassifiedSamples(x, labs=labs)
# binary logistic regression
f_importance_list, bst = slcs.feature_importance_distintuishing_labs(
[0, 1], silent=0)
assert f_importance_list[0][0] == 2
def test_feature_importance_each_lab(self):
# Generate simple dataset with gaussian noise
x_centers = np.array([[0, 0, 1, 1, 5, 50, 10, 37],
[0, 0, 1.5, 5, 5, 50, 10, 35],
[0, 0, 10, 10, 5, 50, 10, 33]])
np.random.seed(1920)
c1x = np.array(x_centers[0]) + np.random.normal(size=(500, 8))
c2x = np.array(x_centers[1]) + np.random.normal(size=(200, 8))
c3x = np.array(x_centers[2]) + np.random.normal(size=(300, 8))
x = np.vstack((c1x, c2x, c3x))
labs = [0] * 500 + [1] * 200 + [2] * 300
slcs = eda.SingleLabelClassifiedSamples(x, labs=labs)
# binary logistic regression
ulab_fi_lut = slcs.feature_importance_each_lab()
assert ulab_fi_lut[0][-1][0] == 3
print(ulab_fi_lut)
assert ulab_fi_lut[1][-1][0] == 2
def test_cross_labs(self):
rsids = [0, 1, 2, 3, 4]
rlabs = [0, 0, 0, 1, 1]
rscl_samples = eda.SingleLabelClassifiedSamples(
self.sfm5x10_lst, rlabs, rsids)
qsids = [0, 1, 2, 3, 4]
qlabs = [1, 1, 0, 2, 3]
qscl_samples = eda.SingleLabelClassifiedSamples(
self.sfm5x10_lst, qlabs, qsids)
cross_lab_lut = rscl_samples.cross_labs(qscl_samples)
test_lut = {
0: (3, ((0, 1), (1, 2))),
1: (2, ((2, 3), (1, 1)))
}
assert cross_lab_lut == test_lut
qsids2 = [0, 1, 2]
qlabs2 = [1, 1, 0]
qscl_samples2 = eda.SingleLabelClassifiedSamples(
self.sfm3x3_arr, qlabs2, qsids2)
cross_lab_lut2 = rscl_samples.cross_labs(qscl_samples2)
test_lut2 = {
0: (3, ((0, 1), (1, 2)))
}
assert cross_lab_lut2 == test_lut2
with pytest.raises(Exception) as excinfo:
rscl_samples.cross_labs([1, 2, 3])
qsfm = eda.SampleFeatureMatrix(self.sfm5x10_lst)
with pytest.raises(Exception) as excinfo:
rscl_samples.cross_labs(qsfm)
# Contains mismatch to rsids
mm_qsids = [0, 1, 6]
mm_qlabs = [1, 1, 0]
mm_qscl_samples = eda.SingleLabelClassifiedSamples(self.sfm3x3_arr,
mm_qlabs, mm_qsids)
with pytest.raises(Exception) as excinfo:
rscl_samples.cross_labs(mm_qscl_samples)
@pytest.mark.mpl_image_compare
def test_tsne_feature_gradient_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
labs = list(range(8))
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
slab_csamples = eda.SingleLabelClassifiedSamples(
x_sorted, labs, sids=sids, fids=fids)
return slab_csamples.tsne_feature_gradient_plot(
'5', figsize=(10, 10), s=50)
@pytest.mark.mpl_image_compare
def test_tsne_feature_gradient_plot_abclabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
labs = list(range(8))
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
slab_csamples = eda.SingleLabelClassifiedSamples(
x_sorted, labs, sids=sids, fids=fids)
return slab_csamples.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), figsize=(10, 10), s=50)
# select specific labels to plot gradient
@pytest.mark.mpl_image_compare
def test_tsne_feature_gradient_plot_sslabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
labs = list(range(8))
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
slab_csamples = eda.SingleLabelClassifiedSamples(
x_sorted, labs, sids=sids, fids=fids)
return slab_csamples.tsne_feature_gradient_plot(
'5', selected_labels=[5, 6, 7], figsize=(10, 10), s=50)
@pytest.mark.mpl_image_compare
def test_tsne_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
labs = list(range(8))
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
slab_csamples = eda.SingleLabelClassifiedSamples(
x_sorted, labs, sids=sids, fids=fids)
return slab_csamples.tsne_plot(g, figsize=(10, 10), s=50)
@pytest.mark.mpl_image_compare
def test_tsne_plot_abclabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
labs = list(range(8))
np.random.seed(123)
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
slab_csamples = eda.SingleLabelClassifiedSamples(
x_sorted, labs, sids=sids, fids=fids)
return slab_csamples.tsne_plot(g, labels=list('abcdefgh'),
figsize=(10, 10), s=50)
def test_tsne_feature_gradient_plot_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
labs = list(range(8))
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
slab_csamples = eda.SingleLabelClassifiedSamples(
x, labs, sids=sids, fids=fids)
with pytest.raises(ValueError):
slab_csamples.tsne_feature_gradient_plot([0, 1])
with pytest.raises(ValueError):
slab_csamples.tsne_feature_gradient_plot(11)
with pytest.raises(ValueError):
slab_csamples.tsne_feature_gradient_plot(11)
with pytest.raises(ValueError):
slab_csamples.tsne_feature_gradient_plot(-1)
with pytest.raises(ValueError):
slab_csamples.tsne_feature_gradient_plot(5)
with pytest.raises(ValueError):
slab_csamples.tsne_feature_gradient_plot('123')
@pytest.mark.mpl_image_compare
def test_swarm_a(self):
# array([[0, 1],
# [2, 3],
# [4, 5],
# [6, 7],
# [8, 9]])
tslcs = eda.SingleLabelClassifiedSamples(np.arange(10).reshape(5, 2),
[0, 0, 1, 2, 3],
['1', '2', '3', '4', '5'],
['a', 'z'])
return tslcs.feature_swarm_plot('a', transform=lambda x: x + 200,
selected_labels=[0, 2, 3],
title='test', xlab='x', ylab='y')
@pytest.mark.mpl_image_compare
def test_dmat_heatmap(self):
x = [[0, 0], [1, 1], [2, 2], [10, 10], [12, 12], [11, 11], [100, 100]]
tslcs = eda.SingleLabelClassifiedSamples(x, [0, 0, 0, 1, 1, 1, 2],
metric='euclidean')
return tslcs.dmat_heatmap(selected_labels=[0, 1],
transform=lambda x: x + 100)
@pytest.mark.mpl_image_compare
def test_xmat_heatmap(self):
x = [[0, 0], [1, 1], [2, 2], [10, 10], [12, 12], [11, 11], [100, 100]]
tslcs = eda.SingleLabelClassifiedSamples(x, [0, 0, 0, 1, 1, 1, 2],
metric='euclidean')
return tslcs.xmat_heatmap(selected_labels=[0, 1],
selected_fids=[1, 0],
col_labels=['spec1', 'spec2'],
transform=lambda x: x + 200)
@pytest.mark.mpl_image_compare
def test_swarm_minimal_z(self):
tslcs = eda.SingleLabelClassifiedSamples(np.arange(10).reshape(5, 2),
[0, 0, 1, 2, 3],
['1', '2', '3', '4', '5'],
['a', 'z'])
return tslcs.feature_swarm_plot('z')
def test_swarm_wrong_args(self):
tslcs = eda.SingleLabelClassifiedSamples(np.arange(10).reshape(5, 2),
[0, 0, 1, 2, 3],
['1', '2', '3', '4', '5'],
['a', 'z'])
# non-callable transform
with pytest.raises(ValueError) as excinfo:
tslcs.feature_swarm_plot('z', transform=1)
# wrong label size
with pytest.raises(ValueError) as excinfo:
tslcs.feature_swarm_plot('z', labels=[0, 2, 1])
def test_getters(self):
tslcs = eda.SingleLabelClassifiedSamples(np.arange(10).reshape(5, 2),
[0, 0, 1, 2, 3],
['a', 'b', 'c', '1', '2'],
['a', 'z'])
np.testing.assert_equal(tslcs.x, np.array(
np.arange(10).reshape(5, 2), dtype='float64'))
np.testing.assert_equal(
tslcs.sids, np.array(['a', 'b', 'c', '1', '2']))
np.testing.assert_equal(tslcs.fids, np.array(['a', 'z']))
np.testing.assert_equal(tslcs.labs, np.array([0, 0, 1, 2, 3]))
assert tslcs.x is not tslcs._x
assert tslcs.sids is not tslcs._sids
assert tslcs.fids is not tslcs._fids
assert tslcs.labs is not tslcs._labs
def test_lab_to_sids(self):
tslcs = eda.SingleLabelClassifiedSamples(np.arange(10).reshape(5, 2),
[0, 0, 1, 2, 3],
['a', 'b', 'c', '1', '2'],
['a', 'z'])
qsid_arr = tslcs.labs_to_sids((0, 1))
np.testing.assert_equal(qsid_arr, (('a', 'b'), ('c',)))
def test_sids_to_labs(self):
tslcs = eda.SingleLabelClassifiedSamples(np.arange(10).reshape(5, 2),
[0, 0, 1, 2, 3],
['a', 'b', 'c', '1', '2'],
['a', 'z'])
qlab_arr = tslcs.sids_to_labs(('a', 'b', '2'))
np.testing.assert_equal(qlab_arr, np.array([0, 0, 3]))
qlab_arr = tslcs.sids_to_labs(('1', 'a', 'b', '2'))
np.testing.assert_equal(qlab_arr, np.array([2, 0, 0, 3]))
class TestMDLSingleLabelClassifiedSamples(object):
"""docstring for TestMDLSingleLabelClassifiedSamples"""
np.random.seed(5009)
x50x5 = np.vstack((np.zeros((30, 5)), np.random.ranf((20, 5))))
labs50 = [0]*10 + [1]*35 + [2]*5
# for some how parallel computation will exceed certain limit when running
# in pytest. downgrading matplotlib to 3.1.0 prevents such problem from
# happening.
# def test_pytest(self):
# mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
# self.x50x5, labs=self.labs50, encode_type="distance",
# mdl_method=eda.mdl.GKdeMdl, metric="euclidean")
# for i in range(10):
# data_no_lab_mdl = mdl_slcs.no_lab_mdl(nprocs=3)
def test_mdl_computation(self):
mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, metric="euclidean")
no_lab_mdl = mdl_slcs.no_lab_mdl()
(ulab_mdl_sum, ulab_s_ind_l, ulab_cnt_l, ulab_mdl_l,
cluster_mdl) = mdl_slcs.lab_mdl()
assert ulab_s_ind_l == [list(range(10)), list(range(10, 45)),
list(range(45, 50))]
assert ulab_mdl_sum == np.sum(ulab_mdl_l)
ulab_cnt_l = [10, 35, 5]
for i in range(3):
ci_mdl = eda.MDLSingleLabelClassifiedSamples(
self.x50x5[ulab_s_ind_l[i], :],
labs=[self.labs50[ii] for ii in ulab_s_ind_l[i]],
metric="euclidean")
np.testing.assert_allclose(
ci_mdl.no_lab_mdl(),
ulab_mdl_l[i] - cluster_mdl * ulab_cnt_l[i] / 50)
def test_data_mdl_computation_mp(self):
mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, metric="euclidean")
no_lab_mdl = mdl_slcs.no_lab_mdl(nprocs=2)
(ulab_mdl_sum, ulab_s_ind_l, ulab_cnt_l,
ulab_mdl_l, cluster_mdl) = mdl_slcs.lab_mdl(nprocs=2)
assert ulab_s_ind_l == [list(range(10)), list(range(10, 45)),
list(range(45, 50))]
assert ulab_mdl_sum == np.sum(ulab_mdl_l)
ulab_cnt_l = [10, 35, 5]
for i in range(3):
ci_mdl = eda.MDLSingleLabelClassifiedSamples(
self.x50x5[ulab_s_ind_l[i], :],
labs=[self.labs50[ii] for ii in ulab_s_ind_l[i]],
metric="euclidean")
np.testing.assert_allclose(
ci_mdl.no_lab_mdl(nprocs=5),
ulab_mdl_l[i] - cluster_mdl * ulab_cnt_l[i] / 50)
def test_distance_mdl_computation(self):
mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, encode_type="distance",
mdl_method=eda.mdl.GKdeMdl, metric="euclidean")
no_lab_mdl = mdl_slcs.no_lab_mdl(nprocs=1)
(ulab_mdl_sum, ulab_s_ind_l, ulab_cnt_l,
ulab_mdl_l, cluster_mdl) = mdl_slcs.lab_mdl(nprocs=1)
assert ulab_s_ind_l == [list(range(10)), list(range(10, 45)),
list(range(45, 50))]
assert ulab_mdl_sum == np.sum(ulab_mdl_l)
ulab_cnt_l = [10, 35, 5]
for i in range(3):
ci_mdl = eda.MDLSingleLabelClassifiedSamples(
mdl_slcs._x[ulab_s_ind_l[i]],
labs=[self.labs50[ii] for ii in ulab_s_ind_l[i]],
metric="euclidean", encode_type=mdl_slcs._encode_type,
mdl_method=mdl_slcs._mdl_method)
np.testing.assert_allclose(
ci_mdl.no_lab_mdl(nprocs=1),
ulab_mdl_l[i] - cluster_mdl * ulab_cnt_l[i] / 50)
def test_distance_mdl_computation_mp(self):
mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, encode_type="data",
mdl_method=eda.mdl.GKdeMdl, metric="euclidean")
data_no_lab_mdl = mdl_slcs.no_lab_mdl(nprocs=2)
mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, encode_type="distance",
mdl_method=eda.mdl.GKdeMdl, metric="euclidean")
no_lab_mdl = mdl_slcs.no_lab_mdl(nprocs=2)
(ulab_mdl_sum, ulab_s_ind_l, ulab_cnt_l,
ulab_mdl_l, cluster_mdl) = mdl_slcs.lab_mdl(nprocs=2)
assert ulab_s_ind_l == [list(range(10)), list(range(10, 45)),
list(range(45, 50))]
assert ulab_mdl_sum == np.sum(ulab_mdl_l)
ulab_cnt_l = [10, 35, 5]
for i in range(3):
ci_mdl = eda.MDLSingleLabelClassifiedSamples(
mdl_slcs._x[ulab_s_ind_l[i]].copy(),
labs=[self.labs50[ii] for ii in ulab_s_ind_l[i]],
metric="euclidean", encode_type="distance",
mdl_method=eda.mdl.GKdeMdl)
ci_mdl_no_lab_res = ci_mdl.no_lab_mdl(nprocs=2)
np.testing.assert_allclose(
ci_mdl_no_lab_res,
ulab_mdl_l[i] - cluster_mdl * ulab_cnt_l[i] / 50)
def test_mdl_method(self):
zigk_mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, mdl_method=eda.mdl.ZeroIGKdeMdl,
metric="euclidean")
gk_mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, mdl_method=eda.mdl.GKdeMdl,
metric="euclidean")
assert gk_mdl_slcs._mdl_method is eda.mdl.GKdeMdl
assert zigk_mdl_slcs._mdl_method is eda.mdl.ZeroIGKdeMdl
assert zigk_mdl_slcs.no_lab_mdl() != gk_mdl_slcs.no_lab_mdl()
for mdl_method in eda.MDL_METHODS:
mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, mdl_method=mdl_method,
metric="euclidean")
(ulab_mdl_sum, ulab_s_ind_l, ulab_cnt_l,
ulab_mdl_l, cluster_mdl) = mdl_slcs.lab_mdl()
assert ulab_s_ind_l == [list(range(10)), list(range(10, 45)),
list(range(45, 50))]
assert ulab_mdl_sum == np.sum(ulab_mdl_l)
ulab_cnt_l = [10, 35, 5]
for i in range(3):
ci_mdl = eda.MDLSingleLabelClassifiedSamples(
self.x50x5[ulab_s_ind_l[i], :],
labs=[self.labs50[ii] for ii in ulab_s_ind_l[i]],
mdl_method=mdl_method,
metric="euclidean")
np.testing.assert_allclose(
ci_mdl.no_lab_mdl(),
ulab_mdl_l[i] - cluster_mdl * ulab_cnt_l[i] / 50)
def test_wrong_mdl_method(self):
with pytest.raises(ValueError) as excinfo:
eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, mdl_method="123",
metric="euclidean").no_lab_mdl()
with pytest.raises(ValueError) as excinfo:
eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, mdl_method=int,
metric="euclidean").no_lab_mdl()
with pytest.raises(ValueError) as excinfo:
eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, mdl_method="ZeroIMdl",
metric="euclidean").no_lab_mdl()
with pytest.raises(ValueError) as excinfo:
eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, mdl_method=2,
metric="euclidean").no_lab_mdl()
def test_wrong_encode_type(self):
with pytest.raises(ValueError) as excinfo:
eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, encode_type="123",
metric="euclidean").no_lab_mdl()
with pytest.raises(ValueError) as excinfo:
eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, encode_type=1,
metric="euclidean").no_lab_mdl()
with pytest.raises(ValueError) as excinfo:
eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, encode_type=None,
metric="euclidean").no_lab_mdl()
def test_auto_param(self):
eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, encode_type="auto",
mdl_method=None, metric="euclidean")
eda.MDLSingleLabelClassifiedSamples(
np.zeros((100, 101)), labs=[0]*100, encode_type="auto",
mdl_method=None, metric="euclidean")
eda.MDLSingleLabelClassifiedSamples(
np.ones((100, 100)), labs=[0]*100, encode_type="auto",
mdl_method=None, metric="euclidean")
eda.MDLSingleLabelClassifiedSamples(
[[], []], labs=[0]*2, encode_type="auto",
mdl_method=None, metric="euclidean")
def test_lab_mdl_ret_internal(self):
mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, metric="euclidean")
((ulab_mdl_sum, ulab_s_ind_l, ulab_cnt_l, ulab_mdl_l,
cluster_mdl), mdl_l) = mdl_slcs.lab_mdl(ret_internal=True)
np.testing.assert_allclose(sum(mdl_l) + cluster_mdl,
sum(ulab_mdl_l))
lab_mdl_res = mdl_slcs.lab_mdl()
ulab_mdl_sum2 = lab_mdl_res.ulab_mdl_sum
assert ulab_mdl_sum2 == ulab_mdl_sum
assert ulab_mdl_sum2 == np.sum(lab_mdl_res.ulab_mdls)
def test_per_col_encoders_wrong_xshape(self):
with pytest.raises(ValueError) as excinfo:
eda.MDLSingleLabelClassifiedSamples.per_col_encoders(
np.zeros(10), "data")
with pytest.raises(ValueError) as excinfo:
eda.MDLSingleLabelClassifiedSamples.per_col_encoders(
np.zeros((10, 10, 10)), "data")
def test_encode_mdl(self):
mdl_slcs = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, labs=self.labs50, metric="euclidean")
# wrong dimensions
with pytest.raises(ValueError) as excinfo:
mdl_slcs.encode(np.zeros((10, 3)))
with pytest.raises(ValueError) as excinfo:
mdl_slcs.encode(np.zeros(20))
with pytest.raises(ValueError) as excinfo:
mdl_slcs.encode(np.zeros(20), col_summary_func=1)
with pytest.raises(ValueError) as excinfo:
mdl_slcs.encode(np.zeros(20), col_summary_func=None)
emdl = mdl_slcs.encode(np.arange(100).reshape(-1, 5))
emdl2 = mdl_slcs.encode(np.arange(100).reshape(-1, 5), nprocs=2)
np.testing.assert_approx_equal(emdl, emdl2)
emdl3 = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, mdl_method=eda.mdl.GKdeMdl, labs=self.labs50,
metric="euclidean").encode(np.arange(100).reshape(-1, 5))
assert emdl != emdl3
emdl4 = eda.MDLSingleLabelClassifiedSamples(
np.zeros((50, 5)), mdl_method=eda.mdl.GKdeMdl, labs=self.labs50,
metric="euclidean").encode(np.arange(100).reshape(-1, 5))
assert emdl != emdl4
emdl5 = eda.MDLSingleLabelClassifiedSamples(
np.zeros((50, 5)), mdl_method=eda.mdl.GKdeMdl, labs=self.labs50,
metric="euclidean").encode(np.arange(100).reshape(-1, 5),
non_zero_only=True)
assert emdl5 != emdl3
emdl6 = eda.MDLSingleLabelClassifiedSamples(
self.x50x5, encode_type="distance", mdl_method=eda.mdl.GKdeMdl,
labs=self.labs50, metric="euclidean").encode(
np.arange(100).reshape(-1, 50), non_zero_only=True)
assert emdl5 != emdl3
| 43.456186
| 79
| 0.567681
|
afb6dd7a678773d5f7b1fc15bd92bc36579fb00b
| 3,035
|
py
|
Python
|
scitail/models/entailment/simple_overlap.py
|
allenai/scitail
|
2e57f46a4620d50e85323c4a642114426db67393
|
[
"Apache-2.0"
] | 40
|
2018-02-01T04:20:50.000Z
|
2022-03-08T02:31:08.000Z
|
scitail/models/entailment/simple_overlap.py
|
allenai/scitail
|
2e57f46a4620d50e85323c4a642114426db67393
|
[
"Apache-2.0"
] | 6
|
2018-02-06T03:41:00.000Z
|
2018-10-22T06:13:59.000Z
|
scitail/models/entailment/simple_overlap.py
|
allenai/scitail
|
2e57f46a4620d50e85323c4a642114426db67393
|
[
"Apache-2.0"
] | 8
|
2018-02-08T09:06:16.000Z
|
2019-07-17T23:12:17.000Z
|
from typing import Dict, List, Any
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward
from allennlp.nn import InitializerApplicator
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("simple_overlap")
class SimpleOverlap(Model):
"""
Simple model that applies a feedforward network on overlap-based feature vectors to
compute entailment probability
"""
def __init__(self, vocab: Vocabulary,
classifier: FeedForward,
initializer: InitializerApplicator = InitializerApplicator()) -> None:
super(SimpleOverlap, self).__init__(vocab)
self.linear_mlp = classifier
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(self,
features: torch.Tensor,
metadata: List[Dict[str, Any]] = None,
label: torch.IntTensor = None) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
features: torch.Tensor,
From a ``FloatField`` over the overlap features computed by the SimpleOverlapReader
metadata: List[Dict[str, Any]]
Metadata information
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
Returns
-------
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log
probabilities of the entailment label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the
entailment label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
label_logits = self.linear_mlp(features)
label_probs = torch.nn.functional.softmax(label_logits)
output_dict = {"label_logits": label_logits, "label_probs": label_probs}
if label is not None:
loss = self._loss(label_logits, label.long().view(-1))
self._accuracy(label_logits, label.squeeze(-1))
output_dict["loss"] = loss
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
'accuracy': self._accuracy.get_metric(reset),
}
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleOverlap':
classifier = FeedForward.from_params(params.pop('classifier'))
init_params = params.pop('initializer', None)
initializer = (InitializerApplicator.from_params(init_params)
if init_params is not None
else InitializerApplicator())
return cls(vocab=vocab,
classifier=classifier,
initializer=initializer)
| 37.469136
| 95
| 0.637891
|
f2ad3c78a1401c26baf86e913c05027330312ee2
| 3,927
|
py
|
Python
|
spikeextractors/extractors/openephysextractors/openephysextractors.py
|
TRuikes/spikeextractors
|
c3cbdaa18629aeb5ecb52f648e69b503a4f091d2
|
[
"MIT"
] | null | null | null |
spikeextractors/extractors/openephysextractors/openephysextractors.py
|
TRuikes/spikeextractors
|
c3cbdaa18629aeb5ecb52f648e69b503a4f091d2
|
[
"MIT"
] | null | null | null |
spikeextractors/extractors/openephysextractors/openephysextractors.py
|
TRuikes/spikeextractors
|
c3cbdaa18629aeb5ecb52f648e69b503a4f091d2
|
[
"MIT"
] | null | null | null |
from spikeextractors import RecordingExtractor, SortingExtractor
from pathlib import Path
import numpy as np
from spikeextractors.extraction_tools import check_get_traces_args, check_valid_unit_id
try:
import pyopenephys
HAVE_OE = True
except ImportError:
HAVE_OE = False
class OpenEphysRecordingExtractor(RecordingExtractor):
extractor_name = 'OpenEphysRecording'
has_default_locations = False
installed = HAVE_OE # check at class level if installed or not
is_writable = False
mode = 'folder'
installation_mesg = "To use the OpenEphys extractor, install pyopenephys: \n\n pip install pyopenephys\n\n" # error message when not installed
def __init__(self, folder_path, *, experiment_id=0, recording_id=0, dtype='float'):
assert HAVE_OE, "To use the OpenEphys extractor, install pyopenephys: \n\n pip install pyopenephys\n\n"
assert dtype == 'int16' or 'float' in dtype, "'dtype' can be int16 (memory map) or 'float' (load into memory)"
RecordingExtractor.__init__(self)
self._recording_file = folder_path
self._recording = pyopenephys.File(folder_path).experiments[experiment_id].recordings[recording_id]
self._dtype = dtype
self._kwargs = {'folder_path': str(Path(folder_path).absolute()), 'experiment_id': experiment_id,
'recording_id': recording_id, 'dtype': dtype}
def get_channel_ids(self):
return list(range(self._recording.analog_signals[0].signal.shape[0]))
def get_num_frames(self):
return self._recording.analog_signals[0].signal.shape[1]
def get_sampling_frequency(self):
return float(self._recording.sample_rate.rescale('Hz').magnitude)
@check_get_traces_args
def get_traces(self, channel_ids=None, start_frame=None, end_frame=None):
if self._dtype == 'int16':
return self._recording.analog_signals[0].signal[channel_ids, start_frame:end_frame]
elif self._dtype == 'float':
return self._recording.analog_signals[0].signal[channel_ids, start_frame:end_frame] * \
self._recording.analog_signals[0].gain
class OpenEphysSortingExtractor(SortingExtractor):
extractor_name = 'OpenEphysSortingExtractor'
installed = HAVE_OE # check at class level if installed or not
is_writable = False
mode = 'file'
installation_mesg = "To use the OpenEphys extractor, install pyopenephys: \n\n pip install pyopenephys\n\n" # error message when not installed
def __init__(self, folder_path, *, experiment_id=0, recording_id=0):
assert HAVE_OE, "To use the OpenEphys extractor, install pyopenephys: \n\n pip install pyopenephys\n\n"
SortingExtractor.__init__(self)
self._recording_file = folder_path
self._recording = pyopenephys.File(folder_path).experiments[experiment_id].recordings[recording_id]
self._spiketrains = self._recording.spiketrains
self._unit_ids = list([np.unique(st.clusters)[0] for st in self._spiketrains])
self._sampling_frequency = float(self._recording.sample_rate.rescale('Hz').magnitude)
self._kwargs = {'folder_path': str(Path(folder_path).absolute()), 'experiment_id': experiment_id,
'recording_id': recording_id}
def get_unit_ids(self):
return self._unit_ids
@check_valid_unit_id
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None):
start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame)
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = np.Inf
st = self._spiketrains[unit_id]
inds = np.where((start_frame <= (st.times * self._recording.sample_rate)) &
((st.times * self._recording.sample_rate) < end_frame))
return (st.times[inds] * self._recording.sample_rate).magnitude
| 47.313253
| 147
| 0.708429
|
be4cfef0edb04b8e98a10691a1d87292eec1ec54
| 14,967
|
py
|
Python
|
crowdsourcing/viewsets/task.py
|
ramcn/sept20
|
e6f6e238d0561ebf3353158161f1b20052e8b08b
|
[
"MIT"
] | null | null | null |
crowdsourcing/viewsets/task.py
|
ramcn/sept20
|
e6f6e238d0561ebf3353158161f1b20052e8b08b
|
[
"MIT"
] | null | null | null |
crowdsourcing/viewsets/task.py
|
ramcn/sept20
|
e6f6e238d0561ebf3353158161f1b20052e8b08b
|
[
"MIT"
] | null | null | null |
from crowdsourcing.serializers.task import *
from rest_framework import status, viewsets
from rest_framework.response import Response
from rest_framework.decorators import detail_route, list_route
from django.shortcuts import get_object_or_404
from crowdsourcing.permissions.project import IsProjectOwnerOrCollaborator
from crowdsourcing.models import Task, TaskWorker, TaskWorkerResult, WorkerRequesterRating
from django.utils import timezone
from django.db.models import Q
from rest_framework.permissions import IsAuthenticated
from crowdsourcing.permissions.task import HasExceededReservedLimit
from crowdsourcing.serializers.rating import WorkerRequesterRatingSerializer
from crowdsourcing.experimental_models import SubModule
from datetime import timedelta
class TaskViewSet(viewsets.ModelViewSet):
queryset = Task.objects.all()
serializer_class = TaskSerializer
@detail_route(methods=['post'], permission_classes=[IsProjectOwnerOrCollaborator])
def update_task(self, request, id=None):
task_serializer = TaskSerializer(data=request.data)
task = self.get_object()
if task_serializer.is_valid():
task_serializer.update(task, task_serializer.validated_data)
return Response({'status': 'updated task'})
else:
return Response(task_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
try:
module = request.query_params.get('module')
task = Task.objects.filter(module=module)
task_serialized = TaskSerializer(task, many=True)
return Response(task_serialized.data)
except:
return Response([])
def destroy(self, request, *args, **kwargs):
task_serializer = TaskSerializer()
task = self.get_object()
task_serializer.delete(task)
return Response({'status': 'deleted task'})
@detail_route(methods=['get'])
def retrieve_with_data(self, request, *args, **kwargs):
task = self.get_object()
serializer = TaskSerializer(instance=task, fields=('id', 'task_template', 'module_data', 'status', 'has_comments'))
rating = models.WorkerRequesterRating.objects.filter(origin=request.user.userprofile.id,
target=task.module.owner.profile.id,
origin_type='worker', module=task.module.id)
requester_alias = task.module.owner.alias
module = task.module.id
target = task.module.owner.profile.id
if rating.count() != 0:
rating_serializer = WorkerRequesterRatingSerializer(instance=rating, many=True,
fields=('id', 'weight'))
return Response({'data': serializer.data,
'rating': rating_serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
else:
return Response({'data': serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
@list_route(methods=['get'])
def list_by_module(self, request, **kwargs):
tasks = Task.objects.filter(module=request.query_params.get('module_id'))
task_serializer = TaskSerializer(instance=tasks, many=True, fields=('id', 'status',
'template_items_monitoring',
'task_workers_monitoring',
'has_comments', 'comments'))
response_data = {
'project_name': tasks[0].module.project.name,
'project_id': tasks[0].module.project.id,
'module_name': tasks[0].module.name,
'module_id': tasks[0].module.id,
'tasks': task_serializer.data
}
return Response(response_data, status.HTTP_200_OK)
@list_route(methods=['get'])
def sample_by_submodule(self, request, **kwargs):
submodule = SubModule.objects.get(fake_module_id=request.query_params.get('fake_module_id'))
hours_before_results = submodule.hours_before_results
if submodule.created_timestamp + timedelta(hours=submodule.hours_before_results) <= timezone.now():
results_per_round = submodule.results_per_round
round_exp = submodule.round_exp
sample = len(submodule.taskworkers) == 0
pool = submodule.owner.pool
tasks = Task.objects.filter(module=submodule.origin_module.id)
task_serializer = TaskSerializer(instance=tasks, many=True,
context={'requester': request.user.userprofile.id, 'submodule': submodule.id,
'round_exp': round_exp, 'results_per_round': results_per_round,
'sample': sample, 'pool': pool},
fields=('id', 'status', 'template_items_monitoring', 'has_comments',
'comments', 'task_workers_sampled'))
for task in task_serializer.data:
task['task_workers_monitoring'] = task['task_workers_sampled']
response_data = {
'project_name': tasks[0].module.project.name,
'project_id': tasks[0].module.project.id,
'module_name': tasks[0].module.name,
'module_id': tasks[0].module.id,
'tasks': task_serializer.data
}
return Response(response_data, status.HTTP_200_OK)
else:
return Response([], status.HTTP_200_OK)
@detail_route(methods=['get'])
def list_comments(self, request, **kwargs):
comments = models.TaskComment.objects.filter(task=kwargs['pk'])
serializer = TaskCommentSerializer(instance=comments, many=True, fields=('comment', 'id',))
response_data = {
'task': kwargs['pk'],
'comments': serializer.data
}
return Response(response_data, status.HTTP_200_OK)
@detail_route(methods=['post'])
def post_comment(self, request, **kwargs):
serializer = TaskCommentSerializer(data=request.data)
task_comment_data = {}
if serializer.is_valid():
comment = serializer.create(task=kwargs['pk'], sender=request.user.userprofile)
task_comment_data = TaskCommentSerializer(comment, fields=('id', 'comment',)).data
return Response(task_comment_data, status.HTTP_200_OK)
class TaskWorkerViewSet(viewsets.ModelViewSet):
queryset = TaskWorker.objects.all()
serializer_class = TaskWorkerSerializer
permission_classes = [IsAuthenticated, HasExceededReservedLimit]
lookup_field = 'task__id'
def create(self, request, *args, **kwargs):
serializer = TaskWorkerSerializer(data=request.data)
if serializer.is_valid():
instance, http_status = serializer.create(worker=request.user.userprofile.worker,
module=request.data.get('module', None))
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
serializer = TaskWorkerSerializer()
obj = self.queryset.get(task=kwargs['task__id'], worker=request.user.userprofile.worker.id)
instance, http_status = serializer.create(worker=request.user.userprofile.worker, module=obj.task.module_id)
obj.task_status = 6
obj.save()
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
@list_route(methods=['post'])
def bulk_update_status(self, request, *args, **kwargs):
task_status = request.data.get('task_status', -1)
task_workers = TaskWorker.objects.filter(id__in=tuple(request.data.get('task_workers', [])))
task_workers.update(task_status=task_status, last_updated=timezone.now())
return Response(TaskWorkerSerializer(instance=task_workers, many=True,
fields=('id', 'task', 'task_status', 'task_worker_results_monitoring',
'worker_alias', 'updated_delta')).data, status.HTTP_200_OK)
@list_route(methods=['get'])
def list_by_status(self, request, *args, **kwargs):
status_map = {1: 'In Progress', 2: 'Submitted', 3: 'Accepted', 4: 'Rejected', 5: 'Returned'}
response = dict()
for key, value in status_map.iteritems():
task_workers = TaskWorker.objects.filter(worker=request.user.userprofile.worker, task_status=key)
serializer = TaskWorkerSerializer(instance=task_workers, many=True,
fields=(
'id', 'task_status', 'task', 'requester_alias', 'module', 'project_name',
'is_paid', 'last_updated'))
response[value] = serializer.data
return Response(response, status.HTTP_200_OK)
@detail_route(methods=['get'])
def retrieve_with_data_and_results(self, request, *args, **kwargs):
task_worker = TaskWorker.objects.get(id=request.query_params['id'])
serializer = TaskWorkerSerializer(instance=task_worker,
fields=('task', 'task_status', 'task_template', 'has_comments'))
rating = models.WorkerRequesterRating.objects.filter(origin=request.user.userprofile.id,
target=task_worker.task.module.owner.profile.id,
origin_type='worker', module=task_worker.task.module.id)
requester_alias = task_worker.task.module.owner.alias
module = task_worker.task.module.id
target = task_worker.task.module.owner.profile.id
if rating.count() != 0:
rating_serializer = WorkerRequesterRatingSerializer(instance=rating, many=True,
fields=('id', 'weight'))
return Response({'data': serializer.data,
'rating': rating_serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
else:
return Response({'data': serializer.data,
'requester_alias': requester_alias,
'module': module,
'target': target}, status.HTTP_200_OK)
@list_route(methods=['post'])
def drop_saved_tasks(self, request, *args, **kwargs):
task_ids = request.data.get('task_ids', [])
self.queryset.filter(task_id__in=task_ids, worker=request.user.userprofile.worker.id).update(
task_status=6, last_updated=timezone.now())
return Response('Success', status.HTTP_200_OK)
@list_route(methods=['post'])
def bulk_pay_by_module(self, request, *args, **kwargs):
module = request.data.get('module')
accepted, rejected = 3, 4
task_workers = TaskWorker.objects.filter(task__module=module).filter(
Q(task_status=accepted) | Q(task_status=rejected))
task_workers.update(is_paid=True, last_updated=timezone.now())
return Response('Success', status.HTTP_200_OK)
class TaskWorkerResultViewSet(viewsets.ModelViewSet):
queryset = TaskWorkerResult.objects.all()
serializer_class = TaskWorkerResultSerializer
# permission_classes = [IsOwnerOrReadOnly]
def update(self, request, *args, **kwargs):
task_worker_result_serializer = TaskWorkerResultSerializer(data=request.data)
task_worker_result = self.queryset.filter(id=kwargs['pk'])[0]
status = 1
if 'status' in request.data:
status = request.data['status']
task_worker_result.status = status
task_worker_result.save()
return Response("Success")
def retrieve(self, request, *args, **kwargs):
worker = get_object_or_404(self.queryset, worker=request.worker)
serializer = TaskWorkerResultSerializer(instance=worker)
return Response(serializer.data)
@list_route(methods=['post'], url_path="submit-results")
def submit_results(self, request, *args, **kwargs):
task = request.data.get('task', None)
template_items = request.data.get('template_items', [])
task_status = request.data.get('task_status', None)
saved = request.data.get('saved')
task_worker = TaskWorker.objects.get(worker=request.user.userprofile.worker, task=task)
task_worker.task_status = task_status
task_worker.save()
task_worker_results = TaskWorkerResult.objects.filter(task_worker_id=task_worker.id)
if task_status == 1:
serializer = TaskWorkerResultSerializer(data=template_items, many=True, partial=True)
else:
serializer = TaskWorkerResultSerializer(data=template_items, many=True)
if serializer.is_valid():
if task_worker_results.count() != 0:
serializer.update(task_worker_results, serializer.validated_data)
else:
serializer.create(task_worker=task_worker)
if task_status == 1 or saved:
return Response('Success', status.HTTP_200_OK)
elif task_status == 2 and not saved:
task_worker_serializer = TaskWorkerSerializer()
instance, http_status = task_worker_serializer.create(
worker=request.user.userprofile.worker, module=task_worker.task.module_id)
serialized_data = {}
if http_status == 200:
serialized_data = TaskWorkerSerializer(instance=instance).data
return Response(serialized_data, http_status)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
class CurrencyViewSet(viewsets.ModelViewSet):
from crowdsourcing.models import Currency
queryset = Currency.objects.all()
serializer_class = CurrencySerializer
| 51.43299
| 123
| 0.615955
|
493c7ff03d38e6694699c027de894081f8cfa4d3
| 687
|
py
|
Python
|
Arase/Tools/__init__.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | null | null | null |
Arase/Tools/__init__.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | 1
|
2021-06-10T22:51:09.000Z
|
2021-06-10T22:51:09.000Z
|
Arase/Tools/__init__.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | null | null | null |
from .ReadCDF import ReadCDF
from .CountstoFlux import CountstoFlux
from .CountstoPSD import CountstoPSD
from .FluxtoCounts import FluxtoCounts
from .FluxtoPSD import FluxtoPSD
from .PSDtoCounts import PSDtoCounts
from .PSDtoFlux import PSDtoFlux
from .MaxwellBoltzmannDist import MaxwellBoltzmannDist,MaxwellBoltzmannDistCts,MaxwellBoltzmannDistE
from .KappaDist import KappaDist,KappaDistCts
from .FitMaxwellianDist import FitMaxwellianDist,FitMaxwellianDistCts
from .FitKappaDist import FitKappaDist,FitKappaDistCts
from .ListDates import ListDates
from .SpecCls import SpecCls
from .PSpecCls import PSpecCls
from .PSpecPADCls import PSpecPADCls
from .RelVelocity import RelVelocity
| 40.411765
| 100
| 0.874818
|
5531f3e7ae6541c69293005755bf4fa33c3c7639
| 38,457
|
py
|
Python
|
dannce/engine/processing.py
|
jessedmarshall/dannce
|
6518415fc723de51b766664bd8f2ec3793dc9af8
|
[
"MIT"
] | 1
|
2021-12-07T20:46:58.000Z
|
2021-12-07T20:46:58.000Z
|
dannce/engine/processing.py
|
jessedmarshall/dannce
|
6518415fc723de51b766664bd8f2ec3793dc9af8
|
[
"MIT"
] | null | null | null |
dannce/engine/processing.py
|
jessedmarshall/dannce
|
6518415fc723de51b766664bd8f2ec3793dc9af8
|
[
"MIT"
] | null | null | null |
"""Processing functions for dannce."""
import numpy as np
from skimage.color import rgb2gray
from skimage.transform import downscale_local_mean as dsm
import imageio
import os
import dannce.engine.serve_data_DANNCE as serve_data_DANNCE
import PIL
from six.moves import cPickle
import scipy.io as sio
from dannce.engine import io
import matplotlib
import warnings
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import yaml
import shutil
import time
def initialize_vids(CONFIG_PARAMS, datadict, e, vids, pathonly=True):
"""
Initializes video path dictionaries for a training session. This is different
than a predict session because it operates over a single animal ("experiment")
at a time
"""
for i in range(len(CONFIG_PARAMS["experiment"][e]["camnames"])):
# Rather than opening all vids, only open what is needed based on the
# maximum frame ID for this experiment and Camera
flist = []
for key in datadict.keys():
if int(key.split("_")[0]) == e:
flist.append(
datadict[key]["frames"][
CONFIG_PARAMS["experiment"][e]["camnames"][i]
]
)
flist = max(flist)
# For COM prediction, we don't prepend experiment IDs
# So detect this case and act accordingly.
basecam = CONFIG_PARAMS["experiment"][e]["camnames"][i]
if "_" in basecam:
basecam = basecam.split("_")[1]
if CONFIG_PARAMS["vid_dir_flag"]:
addl = ""
else:
addl = os.listdir(
os.path.join(CONFIG_PARAMS["experiment"][e]["viddir"], basecam,)
)[0]
r = generate_readers(
CONFIG_PARAMS["experiment"][e]["viddir"],
os.path.join(basecam, addl),
maxopt=flist, # Large enough to encompass all videos in directory.
extension=CONFIG_PARAMS["experiment"][e]["extension"],
pathonly=pathonly,
)
if "_" in CONFIG_PARAMS["experiment"][e]["camnames"][i]:
vids[CONFIG_PARAMS["experiment"][e]["camnames"][i]] = {}
for key in r:
vids[CONFIG_PARAMS["experiment"][e]["camnames"][i]][
str(e) + "_" + key
] = r[key]
else:
vids[CONFIG_PARAMS["experiment"][e]["camnames"][i]] = r
return vids
def infer_params(params, dannce_net, prediction):
"""
Some parameters that were previously specified in configs can just be inferred
from others, thus relieving config bloat
"""
# Grab the camnames from *dannce.mat if not in config
if params["camnames"] is None:
f = grab_predict_label3d_file()
params["camnames"] = io.load_camnames(f)
if params["camnames"] is None:
raise Exception("No camnames in config or in *dannce.mat")
# Infer vid_dir_flag and extension and n_channels_in and chunks
# from the videos and video folder organization.
# Look into the video directory / camnames[0]. Is there a video file?
# If so, vid_dir_flag = True
viddir = os.path.join(params["viddir"], params["camnames"][0])
video_files = os.listdir(viddir)
if any([".mp4" in file for file in video_files]) or any(
[".avi" in file for file in video_files]
):
print_and_set(params, "vid_dir_flag", True)
else:
print_and_set(params, "vid_dir_flag", False)
viddir = os.path.join(viddir, video_files[0])
video_files = os.listdir(viddir)
extension = ".mp4" if any([".mp4" in file for file in video_files]) else ".avi"
print_and_set(params, "extension", extension)
video_files = [file for file in video_files if extension in file]
if len(video_files) > 1:
video_files = sorted(video_files, key=lambda x: int(x.split(".")[0]))
chunks = int(video_files[1].split(".")[0]) - int(video_files[0].split(".")[0])
else:
chunks = 10000000000000
camf = os.path.join(viddir, video_files[0])
print_and_set(params, "chunks", chunks)
# Infer n_channels_in from the video info
v = imageio.get_reader(camf)
im = v.get_data(0)
v.close()
print_and_set(params, "n_channels_in", im.shape[-1])
# set the raw im height and width
print_and_set(params, "raw_im_h", im.shape[0])
print_and_set(params, "raw_im_w", im.shape[1])
if dannce_net and params["net"] is None:
# Here we assume that if the network and expval are specified by the user
# then there is no reason to infer anything. net + expval compatibility
# are subsequently verified during check_config()
#
# If both the net and expval are unspecified, then we use the simpler
# 'net_type' + 'train_mode' to select the correct network and set expval.
# During prediction, the train_mode might be missing, and in any case only the
# expval needs to be set.
if params["net_type"] is None:
raise Exception(
"Without a net name, net_type must be specified"
)
if not prediction and params["train_mode"] is None:
raise Exception("Need to specific train_mode for DANNCE training")
if params["net_type"] == "AVG":
print_and_set(params, "expval", True)
elif params["net_type"] == "MAX":
print_and_set(params, "expval", False)
else:
raise Exception("{} not a valid net_type".format(params["net_type"]))
# if not prediction:
if params["net_type"] == "AVG" and params["train_mode"] == "finetune":
print_and_set(params, "net", "finetune_AVG")
elif params["net_type"] == "AVG":
# This is the network for training from scratch.
# This will also set the network for "continued", but that network
# will be ignored, as for continued training the full model file
# is loaded in without a call to construct the network. However, a value
# for params['net'] is still required for initialization
print_and_set(params, "net", "unet3d_big_expectedvalue")
elif params["net_type"] == "MAX" and params["train_mode"] == "finetune":
print_and_set(params, "net", "finetune_MAX")
elif params["net_type"] == "MAX":
print_and_set(params, "net", "unet3d_big")
elif dannce_net and params["expval"] is None:
if "AVG" in params["net"] or "expected" in params["net"]:
print_and_set(params, "expval", True)
else:
print_and_set(params, "expval", False)
if dannce_net:
#infer crop_height and crop_width if None. Just use max dims of video, as
# DANNCE does not need to crop.
if params["crop_height"] is None or params["crop_width"] is None:
im_h = []
im_w = []
for i in range(len(params["camnames"])):
viddir = os.path.join(params["viddir"], params["camnames"][i])
if not params["vid_dir_flag"]:
# add intermediate directory to path
viddir = os.path.join(params["viddir"], params["camnames"][i], os.listdir(viddir)[0])
video_files = os.listdir(viddir)
camf = os.path.join(viddir, video_files[0])
v = imageio.get_reader(camf)
im = v.get_data(0)
v.close()
im_h.append(im.shape[0])
im_w.append(im.shape[1])
if params["crop_height"] is None:
print_and_set(params, "crop_height", [0, np.max(im_h)])
if params["crop_width"] is None:
print_and_set(params, "crop_width", [0, np.max(im_w)])
if params["max_num_samples"] is not None:
if params["max_num_samples"] == "max":
print_and_set(params, "maxbatch", "max")
elif isinstance(params["max_num_samples"], (int, np.integer)):
print_and_set(
params,
"maxbatch",
int(params["max_num_samples"] // params["batch_size"]),
)
else:
raise TypeError("max_num_samples must be an int or 'max'")
else:
print_and_set(params, "maxbatch", "max")
if params["start_sample"] is not None:
if isinstance(params["start_sample"], (int, np.integer)):
print_and_set(
params,
"start_batch",
int(params["start_sample"] // params["batch_size"]),
)
else:
raise TypeError("start_sample must be an int.")
else:
print_and_set(params, "start_batch", 0)
if params["vol_size"] is not None:
print_and_set(params, "vmin", -1 * params["vol_size"] / 2)
print_and_set(params, "vmax", params["vol_size"] / 2)
# There will be straneg behavior if using a mirror acquisition system and are cropping images
if params["mirror"] and params["crop_height"][-1] != params["raw_im_h"]:
msg = "Note: You are using a mirror acquisition system with image cropping."
msg = msg + " All coordinates will be flipped relative to the raw image height, so ensure that your labels are also in that reference frame."
warnings.warn(msg)
return params
def print_and_set(params, varname, value):
# Should add new values to params in place, no need to return
params[varname] = value
print("Setting {} to {}.".format(varname, params[varname]))
def check_config(params, dannce_net, prediction):
"""
Add parameter checks and restrictions here.
"""
check_camnames(params)
if params["exp"] is not None:
for expdict in params["exp"]:
check_camnames(expdict)
if dannce_net:
check_net_expval(params)
check_vmin_vmax(params)
def check_vmin_vmax(params):
for v in ["vmin", "vmax", "nvox"]:
if params[v] is None:
raise Exception(
"{} not in parameters. Please add it, or use vol_size instead of vmin and vmax".format(
v
)
)
def get_ft_wt(params):
if params["dannce_finetune_weights"] is not None:
weights = os.listdir(params["dannce_finetune_weights"])
weights = [f for f in weights if ".hdf5" in f]
weights = weights[0]
return os.path.join(
params["dannce_finetune_weights"], weights
)
def check_camnames(camp):
"""
Raises an exception if camera names contain '_'
"""
if "camnames" in camp:
for cam in camp["camnames"]:
if "_" in cam:
raise Exception("Camera names cannot contain '_' ")
def check_net_expval(params):
"""
Raise an exception if the network and expval (i.e. AVG/MAX) are incompatible
"""
if params["net"] is None:
raise Exception("net is None. You must set either net or net_type.")
if params["net_type"] is not None:
if (
params["net_type"] == 'AVG'
and "AVG" not in params["net"]
and "expected" not in params["net"]
):
raise Exception("net_type is set to AVG, but you are using a MAX network")
if (
params["net_type"] == 'MAX'
and "MAX" not in params["net"]
and params["net"] != "unet3d_big"
):
raise Exception("net_type is set to MAX, but you are using a AVG network")
if (
params["expval"]
and "AVG" not in params["net"]
and "expected" not in params["net"]
):
raise Exception("expval is set to True but you are using a MAX network")
if (
not params["expval"]
and "MAX" not in params["net"]
and params["net"] != "unet3d_big"
):
raise Exception("expval is set to False but you are using an AVG network")
def copy_config(RESULTSDIR, main_config, io_config):
"""
Copies config files into the results directory, and creates results
directory if necessary
"""
print("Saving results to: {}".format(RESULTSDIR))
if not os.path.exists(RESULTSDIR):
os.makedirs(RESULTSDIR)
mconfig = os.path.join(
RESULTSDIR, "copy_main_config_" + main_config.split(os.sep)[-1]
)
dconfig = os.path.join(RESULTSDIR, "copy_io_config_" + io_config.split(os.sep)[-1])
shutil.copyfile(main_config, mconfig)
shutil.copyfile(io_config, dconfig)
def make_data_splits(samples, params, RESULTSDIR, num_experiments):
"""
Make train/validation splits from list of samples, or load in a specific
list of sampleIDs if desired.
"""
# TODO: Switch to .mat from .pickle so that these lists are easier to read
# and change.
partition = {}
if params["load_valid"] is None:
all_inds = np.arange(len(samples))
# extract random inds from each set for validation
v = params["num_validation_per_exp"]
valid_inds = []
if params["num_validation_per_exp"] > 0: # if 0, do not perform validation
for e in range(num_experiments):
tinds = [
i for i in range(len(samples)) if int(samples[i].split("_")[0]) == e
]
valid_inds = valid_inds + list(
np.random.choice(tinds, (v,), replace=False)
)
valid_inds = list(np.sort(valid_inds))
train_inds = [i for i in all_inds if i not in valid_inds]
assert (set(valid_inds) & set(train_inds)) == set()
partition["valid_sampleIDs"] = samples[valid_inds]
partition["train_sampleIDs"] = samples[train_inds]
# Save train/val inds
with open(os.path.join(RESULTSDIR, "val_samples.pickle"), "wb") as f:
cPickle.dump(partition["valid_sampleIDs"], f)
with open(os.path.join(RESULTSDIR, "train_samples.pickle"), "wb") as f:
cPickle.dump(partition["train_sampleIDs"], f)
else:
# Load validation samples from elsewhere
with open(os.path.join(params["load_valid"], "val_samples.pickle"), "rb",) as f:
partition["valid_sampleIDs"] = cPickle.load(f)
partition["train_sampleIDs"] = [
f for f in samples if f not in partition["valid_sampleIDs"]
]
return partition
def rename_weights(traindir, kkey, mon):
"""
At the end of DANNCe or COM training, rename the best weights file with the epoch #
and value of the monitored quantity
"""
# First load in the training.csv
r = np.genfromtxt(os.path.join(traindir, "training.csv"), delimiter=",", names=True)
e = r["epoch"]
q = r[mon]
minq = np.min(q)
beste = e[np.argmin(q)]
newname = "weights." + str(int(beste)) + "-" + "{:.5f}".format(minq) + ".hdf5"
os.rename(os.path.join(traindir, kkey), os.path.join(traindir, newname))
def make_paths_safe(params):
"""Given a parameter dictionary, loops through the keys and replaces any \\ or / with os.sep
to promote OS agnosticism
"""
for key in params.keys():
if isinstance(params[key], str):
params[key] = params[key].replace("/", os.sep)
params[key] = params[key].replace("\\", os.sep)
return params
def trim_COM_pickle(fpath, start_sample, end_sample, opath=None):
"""Trim dictionary entries to the range [start_sample, end_sample].
spath is the output path for saving the trimmed COM dictionary, if desired
"""
with open(fpath, "rb") as f:
save_data = cPickle.load(f)
sd = {}
for key in save_data:
if key >= start_sample and key <= end_sample:
sd[key] = save_data[key]
with open(opath, "wb") as f:
cPickle.dump(sd, f)
return sd
def save_params(outdir, params):
"""
Save copy of params to outdir as .mat file
"""
sio.savemat(os.path.join(outdir, 'copy_params.mat'),
prepare_save_metadata(params))
return True
def make_none_safe(pdict):
if isinstance(pdict, dict):
for key in pdict:
pdict[key] = make_none_safe(pdict[key])
else:
if pdict is None or (isinstance(pdict, list) and None in pdict) or (isinstance(pdict, tuple) and None in pdict):
return "None"
else:
return pdict
def prepare_save_metadata(params):
"""
To save metadata, i.e. the prediction param values associated with COM or DANNCE
output, we need to convert loss and metrics and net into names, and remove
the 'experiment' field
"""
# Need to convert None to string but still want to conserve the metadat structure
# format, so we don't want to convert the whole dict to a string
meta = params.copy()
if "experiment" in meta:
del meta["experiment"]
if "loss" in meta:
meta["loss"] = meta["loss"].__name__
if "net" in meta:
meta["net"] = meta["net"].__name__
if "metric" in meta:
meta["metric"] = [
f.__name__ if not isinstance(f, str) else f for f in meta["metric"]
]
make_none_safe(meta)
return meta
def save_COM_dannce_mat(params, com3d, sampleID):
"""
Instead of saving 3D COM to com3d.mat, save it into the dannce.mat file, which
streamlines subsequent dannce access.
"""
com = {}
com["com3d"] = com3d
com["sampleID"] = sampleID
com["metadata"] = prepare_save_metadata(params)
# Open dannce.mat file, add com and re-save
print("Saving COM predictions to " + params["label3d_file"])
rr = sio.loadmat(params["label3d_file"])
# For safety, save old file to temp and delete it at the end
sio.savemat(params["label3d_file"]+".temp", rr)
rr["com"] = com
sio.savemat(params["label3d_file"], rr)
os.remove(params["label3d_file"]+".temp")
def save_COM_checkpoint(save_data, RESULTSDIR, datadict_, cameras, params):
"""
Saves COM pickle and matfiles
"""
# Save undistorted 2D COMs and their 3D triangulations
f = open(os.path.join(RESULTSDIR, "com3d.pickle"), "wb")
cPickle.dump(save_data, f)
f.close()
# We need to remove the eID in front of all the keys in datadict
# for prepare_COM to run properly
datadict_save = {}
for key in datadict_:
datadict_save[int(float(key.split("_")[-1]))] = datadict_[key]
_, com3d_dict = serve_data_DANNCE.prepare_COM(
os.path.join(RESULTSDIR, "com3d.pickle"),
datadict_save,
comthresh=0,
weighted=False,
camera_mats=cameras,
method="median",
)
cfilename = os.path.join(RESULTSDIR, "com3d.mat")
print("Saving 3D COM to {}".format(cfilename))
samples_keys = list(com3d_dict.keys())
c3d = np.zeros((len(samples_keys), 3))
for i in range(len(samples_keys)):
c3d[i] = com3d_dict[samples_keys[i]]
sio.savemat(
cfilename,
{
"sampleID": samples_keys,
"com": c3d,
"metadata": prepare_save_metadata(params),
},
)
# Also save a copy into the label3d file
save_COM_dannce_mat(params, c3d, samples_keys)
def inherit_config(child, parent, keys):
"""
If a key in keys does not exist in child, assigns the key-value in parent to
child.
"""
for key in keys:
if key not in child.keys():
child[key] = parent[key]
print(
"{} not found in io.yaml file, falling back to main config".format(key)
)
return child
def grab_predict_label3d_file(defaultdir=""):
"""
Finds the paths to the training experiment yaml files.
"""
def_ep = os.path.join(".", defaultdir)
label3d_files = os.listdir(def_ep)
label3d_files = [
os.path.join(def_ep, f) for f in label3d_files if "dannce.mat" in f
]
label3d_files.sort()
if len(label3d_files) == 0:
raise Exception("Did not find any *dannce.mat file in {}".format(def_ep))
print("Using the following *dannce.mat files: {}".format(label3d_files[0]))
return label3d_files[0]
def load_expdict(params, e, expdict, _DEFAULT_VIDDIR):
"""
Load in camnames and video directories and label3d files for a single experiment
during training.
"""
exp = params.copy()
exp = make_paths_safe(exp)
exp["label3d_file"] = expdict["label3d_file"]
exp["base_exp_folder"] = os.path.dirname(exp["label3d_file"])
if "viddir" not in expdict:
# if the videos are not at the _DEFAULT_VIDDIR, then it must
# be specified in the io.yaml experiment portion
exp["viddir"] = os.path.join(exp["base_exp_folder"], _DEFAULT_VIDDIR)
else:
exp["viddir"] = expdict["viddir"]
print("Experiment {} using videos in {}".format(e, exp["viddir"]))
l3d_camnames = io.load_camnames(expdict["label3d_file"])
if "camnames" in expdict:
exp["camnames"] = expdict["camnames"]
elif l3d_camnames is not None:
exp["camnames"] = l3d_camnames
print("Experiment {} using camnames: {}".format(e, exp["camnames"]))
return exp
def batch_rgb2gray(imstack):
"""Convert to gray image-wise.
batch dimension is first.
"""
grayim = np.zeros((imstack.shape[0], imstack.shape[1], imstack.shape[2]), "float32")
for i in range(grayim.shape[0]):
grayim[i] = rgb2gray(imstack[i].astype("uint8"))
return grayim
def return_tile(imstack, fac=2):
"""Crop a larger image into smaller tiles without any overlap."""
height = imstack.shape[1] // fac
width = imstack.shape[2] // fac
out = np.zeros(
(imstack.shape[0] * fac * fac, height, width, imstack.shape[3]), "float32"
)
cnt = 0
for i in range(imstack.shape[0]):
for j in np.arange(0, imstack.shape[1], height):
for k in np.arange(0, imstack.shape[2], width):
out[cnt, :, :, :] = imstack[i, j : j + height, k : k + width, :]
cnt = cnt + 1
return out
def tile2im(imstack, fac=2):
"""Reconstruct lagrer image from tiled data."""
height = imstack.shape[1]
width = imstack.shape[2]
out = np.zeros(
(imstack.shape[0] // (fac * fac), height * fac, width * fac, imstack.shape[3]),
"float32",
)
cnt = 0
for i in range(out.shape[0]):
for j in np.arange(0, out.shape[1], height):
for k in np.arange(0, out.shape[2], width):
out[i, j : j + height, k : k + width, :] = imstack[cnt]
cnt += 1
return out
def downsample_batch(imstack, fac=2, method="PIL"):
"""Downsample each image in a batch."""
if method == "PIL":
out = np.zeros(
(
imstack.shape[0],
imstack.shape[1] // fac,
imstack.shape[2] // fac,
imstack.shape[3],
),
"float32",
)
if out.shape[-1] == 3:
# this is just an RGB image, so no need to loop over channels with PIL
for i in range(imstack.shape[0]):
out[i] = np.array(
PIL.Image.fromarray(imstack[i].astype("uint8")).resize(
(out.shape[2], out.shape[1]), resample=PIL.Image.LANCZOS
)
)
else:
for i in range(imstack.shape[0]):
for j in range(imstack.shape[3]):
out[i, :, :, j] = np.array(
PIL.Image.fromarray(imstack[i, :, :, j]).resize(
(out.shape[2], out.shape[1]), resample=PIL.Image.LANCZOS
)
)
elif method == "dsm":
out = np.zeros(
(
imstack.shape[0],
imstack.shape[1] // fac,
imstack.shape[2] // fac,
imstack.shape[3],
),
"float32",
)
for i in range(imstack.shape[0]):
for j in range(imstack.shape[3]):
out[i, :, :, j] = dsm(imstack[i, :, :, j], (fac, fac))
elif method == "nn":
out = imstack[:, ::fac, ::fac]
elif fac > 1:
raise Exception("Downfac > 1. Not a valid downsampling method")
return out
def batch_maximum(imstack):
"""Find the location of the maximum for each image in a batch."""
maxpos = np.zeros((imstack.shape[0], 2))
for i in range(imstack.shape[0]):
if np.isnan(imstack[i, 0, 0]):
maxpos[i, 0] = np.nan
maxpos[i, 1] = np.nan
else:
ind = np.unravel_index(
np.argmax(np.squeeze(imstack[i]), axis=None),
np.squeeze(imstack[i]).shape,
)
maxpos[i, 0] = ind[1]
maxpos[i, 1] = ind[0]
return maxpos
def generate_readers(
viddir, camname, minopt=0, maxopt=300000, pathonly=False, extension=".mp4"
):
"""Open all mp4 objects with imageio, and return them in a dictionary."""
out = {}
mp4files = [
os.path.join(camname, f)
for f in os.listdir(os.path.join(viddir, camname))
if extension in f
and int(f.rsplit(extension)[0]) <= maxopt
and int(f.rsplit(extension)[0]) >= minopt
]
# This is a trick (that should work) for getting rid of
# awkward sub-directory folder names when they are being used
mp4files_scrub = [
os.path.join(
os.path.normpath(f).split(os.sep)[0], os.path.normpath(f).split(os.sep)[-1]
)
for f in mp4files
]
pixelformat = "yuv420p"
input_params = []
output_params = []
for i in range(len(mp4files)):
if pathonly:
out[mp4files_scrub[i]] = os.path.join(viddir, mp4files[i])
else:
print("NOTE: Ignoring {} files numbered above {}".format(extensions,maxopt))
out[mp4files_scrub[i]] = imageio.get_reader(
os.path.join(viddir, mp4files[i]),
pixelformat=pixelformat,
input_params=input_params,
output_params=output_params,
)
return out
def cropcom(im, com, size=512):
"""Crops single input image around the coordinates com."""
minlim_r = int(np.round(com[1])) - size // 2
maxlim_r = int(np.round(com[1])) + size // 2
minlim_c = int(np.round(com[0])) - size // 2
maxlim_c = int(np.round(com[0])) + size // 2
out = im[np.max([minlim_r, 0]) : maxlim_r, np.max([minlim_c, 0]) : maxlim_c, :]
dim = out.shape[2]
# pad with zeros if region ended up outside the bounds of the original image
if minlim_r < 0:
out = np.concatenate(
(np.zeros((abs(minlim_r), out.shape[1], dim)), out), axis=0
)
if maxlim_r > im.shape[0]:
out = np.concatenate(
(out, np.zeros((maxlim_r - im.shape[0], out.shape[1], dim))), axis=0
)
if minlim_c < 0:
out = np.concatenate(
(np.zeros((out.shape[0], abs(minlim_c), dim)), out), axis=1
)
if maxlim_c > im.shape[1]:
out = np.concatenate(
(out, np.zeros((out.shape[0], maxlim_c - im.shape[1], dim))), axis=1
)
return out
def write_config(resultsdir, configdict, message, filename="modelconfig.cfg"):
"""Write a dictionary of k-v pairs to file.
A much more customizable configuration writer. Accepts a dictionary of
key-value pairs and just writes them all to file,
together with a custom message
"""
f = open(resultsdir + filename, "w")
for key in configdict:
f.write("{}: {}\n".format(key, configdict[key]))
f.write("message:" + message)
def read_config(filename):
"""Read configuration file.
:param filename: Path to configuration file.
"""
with open(filename) as f:
CONFIG_PARAMS = yaml.safe_load(f)
return CONFIG_PARAMS
def plot_markers_2d(im, markers, newfig=True):
"""Plot markers in two dimensions."""
if newfig:
plt.figure()
plt.imshow((im - np.min(im)) / (np.max(im) - np.min(im)))
for mark in range(markers.shape[-1]):
ind = np.unravel_index(
np.argmax(markers[:, :, mark], axis=None), markers[:, :, mark].shape
)
plt.plot(ind[1], ind[0], ".r")
def preprocess_3d(im_stack):
"""Easy inception-v3 style image normalization across a set of images."""
im_stack /= 127.5
im_stack -= 1.0
return im_stack
def norm_im(im):
"""Normalize image."""
return (im - np.min(im)) / (np.max(im) - np.min(im))
def plot_markers_3d(stack, nonan=True):
"""Return the 3d coordinates for each of the peaks in probability maps."""
x = []
y = []
z = []
for mark in range(stack.shape[-1]):
ind = np.unravel_index(
np.argmax(stack[:, :, :, mark], axis=None), stack[:, :, :, mark].shape
)
if ~np.isnan(stack[0, 0, 0, mark]) and nonan:
x.append(ind[1])
y.append(ind[0])
z.append(ind[2])
elif ~np.isnan(stack[0, 0, 0, mark]) and not nonan:
x.append(ind[1])
y.append(ind[0])
z.append(ind[2])
elif not nonan:
x.append(np.nan)
y.append(np.nan)
z.append(np.nan)
return x, y, z
def plot_markers_3d_tf(stack, nonan=True):
"""Return the 3d coordinates for each of the peaks in probability maps."""
import tensorflow as tf
with tf.device(stack.device):
n_mark = stack.shape[-1]
indices = tf.math.argmax(tf.reshape(stack, [-1, n_mark]), output_type="int32")
inds = unravel_index(indices, stack.shape[:-1])
if ~tf.math.reduce_any(tf.math.is_nan(stack[0, 0, 0, :])) and (
nonan or not nonan
):
x = inds[1]
y = inds[0]
z = inds[2]
elif not nonan:
x = tf.Variable(tf.cast(inds[1], "float32"))
y = tf.Variable(tf.cast(inds[0], "float32"))
z = tf.Variable(tf.cast(inds[3], "float32"))
nans = tf.math.is_nan(stack[0, 0, 0, :])
for mark in range(0, n_mark):
if nans[mark]:
x[mark].assign(np.nan)
y[mark].assign(np.nan)
z[mark].assign(np.nan)
return x, y, z
def plot_markers_3d_torch(stack, nonan=True):
"""Return the 3d coordinates for each of the peaks in probability maps."""
import torch
n_mark = stack.shape[-1]
index = stack.flatten(0, 2).argmax(dim=0).to(torch.int32)
inds = unravel_index(index, stack.shape[:-1])
if ~torch.any(torch.isnan(stack[0, 0, 0, :])) and (nonan or not nonan):
x = inds[1]
y = inds[0]
z = inds[2]
elif not nonan:
x = inds[1]
y = inds[0]
z = inds[2]
for mark in range(0, n_mark):
if torch.isnan(stack[:, :, :, mark]):
x[mark] = torch.nan
y[mark] = torch.nan
z[mark] = torch.nan
return x, y, z
def unravel_index(index, shape):
out = []
for dim in reversed(shape):
out.append(index % dim)
index = index // dim
return tuple(reversed(out))
def grid_channelwise_max(grid_):
"""Return the max value in each channel over a 3D volume.
input--
grid_: shape (nvox, nvox, nvox, nchannels)
output--
shape (nchannels,)
"""
return np.max(np.max(np.max(grid_, axis=0), axis=0), axis=0)
def moment_3d(im, mesh, thresh=0):
"""Get the normalized spatial moments of the 3d image stack.
inputs--
im: 3d volume confidence map, one for each channel (marker)
i.e. shape (nvox,nvox,nvox,nchannels)
mesh: spatial coordinates for every position on im
thresh: threshold applied to im before calculating moments
"""
x = []
y = []
z = []
for mark in range(im.shape[3]):
# get normalized probabilities
im_norm = (im[:, :, :, mark] * (im[:, :, :, mark] >= thresh)) / np.sum(
im[:, :, :, mark] * (im[:, :, :, mark] >= thresh)
)
x.append(np.sum(mesh[0] * im_norm))
y.append(np.sum(mesh[1] * im_norm))
z.append(np.sum(mesh[2] * im_norm))
return x, y, z
def get_peak_inds(map_):
"""Return the indices of the peak value of an n-d map."""
return np.unravel_index(np.argmax(map_, axis=None), map_.shape)
def get_marker_peaks_2d(stack):
"""Return the concatenated coordinates of all peaks for each map/marker."""
x = []
y = []
for i in range(stack.shape[-1]):
inds = get_peak_inds(stack[:, :, i])
x.append(inds[1])
y.append(inds[0])
return x, y
def savedata_expval(
fname, params, write=True, data=None, num_markers=20, tcoord=True, pmax=False
):
"""Save the expected values."""
if data is None:
f = open(fname, "rb")
data = cPickle.load(f)
f.close()
d_coords = np.zeros((len(list(data.keys())), 3, num_markers))
t_coords = np.zeros((len(list(data.keys())), 3, num_markers))
sID = np.zeros((len(list(data.keys())),))
p_max = np.zeros((len(list(data.keys())), num_markers))
for (i, key) in enumerate(data.keys()):
d_coords[i] = data[key]["pred_coord"]
if tcoord:
t_coords[i] = np.reshape(data[key]["true_coord_nogrid"], (3, num_markers))
if pmax:
p_max[i] = data[key]["pred_max"]
sID[i] = data[key]["sampleID"]
sdict = {
"pred": d_coords,
"data": t_coords,
"p_max": p_max,
"sampleID": sID,
"metadata": prepare_save_metadata(params),
}
if write and data is None:
sio.savemat(
fname.split(".pickle")[0] + ".mat", sdict,
)
elif write and data is not None:
sio.savemat(fname, sdict)
return d_coords, t_coords, p_max, sID
def savedata_tomat(
fname,
params,
vmin,
vmax,
nvox,
write=True,
data=None,
num_markers=20,
tcoord=True,
tcoord_scale=True,
addCOM=None,
):
"""Save pickled data to a mat file.
From a save_data structure saved to a *.pickle file, save a matfile
with useful variables for easier manipulation in matlab.
Also return pred_out_world and other variables for plotting within jupyter
"""
if data is None:
f = open(fname, "rb")
data = cPickle.load(f)
f.close()
d_coords = np.zeros((list(data.keys())[-1] + 1, 3, num_markers))
t_coords = np.zeros((list(data.keys())[-1] + 1, 3, num_markers))
p_max = np.zeros((list(data.keys())[-1] + 1, num_markers))
log_p_max = np.zeros((list(data.keys())[-1] + 1, num_markers))
sID = np.zeros((list(data.keys())[-1] + 1,))
for (i, key) in enumerate(data.keys()):
d_coords[i] = data[key]["pred_coord"]
if tcoord:
t_coords[i] = np.reshape(data[key]["true_coord_nogrid"], (3, num_markers))
p_max[i] = data[key]["pred_max"]
log_p_max[i] = data[key]["logmax"]
sID[i] = data[key]["sampleID"]
vsize = (vmax - vmin) / nvox
# First, need to move coordinates over to centers of voxels
pred_out_world = vmin + d_coords * vsize + vsize / 2
if tcoord and tcoord_scale:
t_coords = vmin + t_coords * vsize + vsize / 2
if addCOM is not None:
# We use the passed comdict to add back in the com, this is useful
# if one wnats to bootstrap on these values for COMnet or otherwise
for i in range(len(sID)):
pred_out_world[i] = pred_out_world[i] + addCOM[int(sID)][:, np.newaxis]
sdict = {
"pred": pred_out_world,
"data": t_coords,
"p_max": p_max,
"sampleID": sID,
"log_pmax": log_p_max,
"metadata": prepare_save_metadata(params),
}
if write and data is None:
sio.savemat(
fname.split(".pickle")[0] + ".mat", sdict,
)
elif write and data is not None:
sio.savemat(
fname, sdict,
)
return pred_out_world, t_coords, p_max, log_p_max, sID
def spatial_expval(map_):
"""Calculate the spatial expected value of the input.
Note there is probably underflow here that I am ignoring, because this
doesn't need to be *that* accurate
"""
map_ = map_ / np.sum(map_)
x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))
return np.sum(map_ * x), np.sum(map_ * y)
def spatial_var(map_):
"""Calculate the spatial variance of the input."""
expx, expy = spatial_expval(map_)
map_ = map_ / np.sum(map_)
x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))
return np.sum(map_ * ((x - expx) ** 2 + (y - expy) ** 2))
def spatial_entropy(map_):
"""Calculate the spatial entropy of the input."""
map_ = map_ / np.sum(map_)
return -1 * np.sum(map_ * np.log(map_))
def dupe_params(exp, dupes, n_views):
"""
When The number of views (n_views) required
as input to the network is greater than the
number of actual cameras (e.g. when trying to
fine-tune a 6-camera network on data from a
2-camera system), automatically duplicate necessary
parameters to match the required n_views.
"""
for d in dupes:
val = exp[d]
if n_views % len(val) == 0:
num_reps = n_views // len(val)
exp[d] = val * num_reps
else:
prompt = "The length of the {} list must divide evenly into {}. Duplicate a subset of the views starting from the first camera (y/n)?".format(
d, n_views
)
val_in = input(prompt)
if val_in == 'y':
num_reps = n_views // len(val)
num_extra = n_views % len(val)
duped = val * num_reps
for i in range(num_extra):
duped.append(duped[i])
print("Duping {}. Changed from {} to {}".format(d, val, duped))
exp[d] = duped
else:
raise Exception(
"The length of the {} list must divide evenly into {}. Exiting".format(
d, n_views
)
)
return exp
| 33.469974
| 154
| 0.579478
|
0cb725d0ce1c6541bcb13849b3c1ca07f7fae4a4
| 1,796
|
py
|
Python
|
BlenderRenderController/BlenderRenderController/Scripts/get_project_info.py
|
Isti115/BlenderRenderController
|
9f3434326ea9b0f223934d354762ccb417253340
|
[
"MIT"
] | 36
|
2016-05-23T12:44:03.000Z
|
2022-01-09T00:15:37.000Z
|
BlenderRenderController/BlenderRenderController/Scripts/get_project_info.py
|
Isti115/BlenderRenderController
|
9f3434326ea9b0f223934d354762ccb417253340
|
[
"MIT"
] | 16
|
2016-06-01T06:36:13.000Z
|
2021-03-09T00:13:42.000Z
|
BlenderRenderController/BlenderRenderController/Scripts/get_project_info.py
|
Isti115/BlenderRenderController
|
9f3434326ea9b0f223934d354762ccb417253340
|
[
"MIT"
] | 11
|
2016-07-02T16:53:00.000Z
|
2021-08-15T05:47:36.000Z
|
import os
import json
import bpy
import re
from bpy import context
from bpy import ops
from bpy import data
blendPath = bpy.context.blend_data.filepath;
projName = bpy.path.display_name_from_filepath( blendPath );
# get number of Scenes and active scene name
n_data = bpy.data.scenes
a_data = bpy.context.scene
# get values from strings
N_of_Scenes = str(n_data).partition('[')[-1].rpartition(']')[0]
ActiveScene = str(a_data).partition('("')[-1].rpartition('")')[0]
# set infos acording to active Scene
startFrame = bpy.data.scenes[ActiveScene].frame_start
endFrame = bpy.data.scenes[ActiveScene].frame_end
outputPath = bpy.data.scenes[ActiveScene].render.filepath
# get rendering engine
renderingEngine = bpy.context.scene.render.engine
"""
Error code table:
0: no errors
-1: output unset, lenth = 0
-2: output invalid, no slashes in path
-3: output is relative, has // at start
"""
# check if relative
rel_chk = outputPath[0:2]
if len(outputPath) == 0:
errorcode = -1
elif outputPath.count("\\") == 0:
errorcode = -2
elif rel_chk == "//":
errorcode = -3
else:
errorcode = 0
# os.path.isabs(my_path) | true = absolute, false = relative
# get output dir minus file name
altdir = str(outputPath).rpartition('\\')[:-1][0]
#print( "Proj Name: %s\n" % (projName) )
#print( "Start: %s\n" % (startFrame) )
#print( "end: %s\n" % (endFrame) )
data = { 'ProjectName': projName, 'StartFrame': startFrame, 'EndFrame': endFrame, 'OutputDirectory': outputPath,
'NumScenes': N_of_Scenes, 'ActiveScene': ActiveScene, 'AltDir': altdir, 'ErrorCode': errorcode,
'RenderingEngine': renderingEngine };
jsonData = json.dumps(data, indent=4, skipkeys=True, sort_keys=True);
with open('blend_info.json', 'w') as f:
print(jsonData, file=f)
print(jsonData);
| 24.944444
| 114
| 0.698775
|
a1ef18504dc85cdc9f1ce1e8c7bcb0a0f4013181
| 1,301
|
py
|
Python
|
PreferencePanes/_metadata.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 4
|
2019-03-11T18:05:49.000Z
|
2021-05-22T21:09:09.000Z
|
PreferencePanes/_metadata.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
PreferencePanes/_metadata.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2019-03-18T18:53:36.000Z
|
2019-03-18T18:53:36.000Z
|
# This file is generated by objective.metadata
#
# Last update: Mon Jul 18 12:18:57 2016
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b): return b
else:
def sel32or64(a, b): return a
if sys.byteorder == 'little':
def littleOrBig(a, b): return a
else:
def littleOrBig(a, b): return b
misc = {
}
constants = '''$NSPrefPaneHelpMenuAnchorKey$NSPrefPaneHelpMenuInfoPListKey$NSPrefPaneHelpMenuTitleKey$NSPreferencePaneCancelUnselectNotification$NSPreferencePaneDoUnselectNotification$NSPreferencePaneSwitchToPaneNotification$NSPreferencePaneUpdateHelpMenuNotification$NSPreferencePrefPaneIsAvailableNotification$'''
enums = '''$NSUnselectCancel@0$NSUnselectLater@2$NSUnselectNow@1$'''
misc.update({'kNSPrefPaneHelpMenuAnchorKey': b'anchor'.decode("utf-8"), 'kNSPrefPaneHelpMenuTitleKey': b'title'.decode("utf-8"), 'kNSPrefPaneHelpMenuInfoPListKey': b'NSPrefPaneHelpAnchors'.decode("utf-8")})
r = objc.registerMetaDataForSelector
objc._updatingMetadata(True)
try:
r(b'NSPreferencePane', b'autoSaveTextFields', {'retval': {'type': 'Z'}})
r(b'NSPreferencePane', b'isSelected', {'retval': {'type': 'Z'}})
r(b'NSPreferencePane', b'replyToShouldUnselect:', {'arguments': {2: {'type': 'Z'}}})
finally:
objc._updatingMetadata(False)
expressions = {}
# END OF FILE
| 40.65625
| 315
| 0.753267
|
281a54250f12daf2f62c8b4b1de93778cc97be51
| 21,938
|
py
|
Python
|
tf_agents/utils/eager_utils_test.py
|
ayansengupta17/agents
|
c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a
|
[
"Apache-2.0"
] | 2
|
2021-10-30T16:57:37.000Z
|
2021-11-17T10:21:17.000Z
|
tf_agents/utils/eager_utils_test.py
|
ayansengupta17/agents
|
c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/utils/eager_utils_test.py
|
ayansengupta17/agents
|
c5a2f1f57d4fd0070eb75204aa0b1663de3e2c0a
|
[
"Apache-2.0"
] | 2
|
2020-06-05T18:38:16.000Z
|
2020-07-08T14:41:42.000Z
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for tf_agents.utils.eager_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.utils import eager_utils
from tf_agents.utils import test_utils
from tensorflow.python.eager import context # TF internal
from tensorflow.python.framework import test_util # TF internal
def input_fn():
tf.compat.v1.set_random_seed(1)
inputs = tf.constant([[1, 2], [2, 3], [3, 4]], dtype=tf.float32)
labels = tf.constant([[0], [1], [2]])
return inputs, labels
class Network(tf.keras.layers.Layer):
def __init__(self, name=None):
super(Network, self).__init__(name=name)
self._layer = tf.keras.layers.Dense(
3, kernel_initializer=tf.compat.v1.initializers.ones(), name='logits')
def call(self, inputs):
return self._layer(inputs)
class Model(object):
def __init__(self, name, network):
self._name = name
self._network = network
def __call__(self, inputs):
return self._network(inputs)
@property
def variables(self):
return self._network.variables
@property
def trainable_variables(self):
return self._network.trainable_variables
@eager_utils.future_in_eager_mode
def loss_fn(self, inputs, labels):
logits = self._network(inputs)
return tf.compat.v1.losses.sparse_softmax_cross_entropy(labels, logits)
@eager_utils.future_in_eager_mode
def minimize_loss(loss, optimizer):
return optimizer.minimize(loss)
class Aux(object):
def __init__(self):
pass
def method(self, inputs, labels, param=0):
assert isinstance(self, Aux), self
return inputs, labels, tf.convert_to_tensor(value=param)
def aux_function(inputs, labels, param=0):
return inputs, labels, tf.convert_to_tensor(value=param)
@parameterized.named_parameters(
('.func_eager', aux_function, context.eager_mode),
('.func_graph', aux_function, context.graph_mode),
('.method_eager', Aux().method, context.eager_mode),
('.method_graph', Aux().method, context.graph_mode),
)
class FutureTest(test_utils.TestCase, parameterized.TestCase):
def testCreate(self, func_or_method, run_mode):
with run_mode():
future = eager_utils.Future(input_fn)
self.assertTrue(callable(future))
self.assertIsInstance(future, eager_utils.Future)
inputs, labels = future()
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
def testArgsAtInit(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, inputs, labels)
inputs, labels, param = future()
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method)
inputs, labels, param = future(inputs, labels)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testArgsAtCallOverwriteKwargsInit(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, param=1)
inputs, labels, param = future(inputs, labels, 0)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testKWArgsAtInit(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(
func_or_method, inputs=inputs, labels=labels, param=1)
inputs, labels, param = future()
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 1)
def testKWArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method)
inputs, labels, param = future(inputs=inputs, labels=labels, param=1)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 1)
def testArgsAtInitKWArgsAtInit(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, inputs, labels=labels)
inputs, labels, param = future()
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testArgsAtInitKWArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, inputs, param=1)
inputs, labels, param = future(labels=labels)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 1)
def testOverwriteKWArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, param=-1)
inputs, labels, param = future(inputs, labels, param=1)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 1)
def testArgsatInitOverwritedKWArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, inputs, param=-1)
inputs, labels, param = future(labels=labels, param=1)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 1)
def testPartialArgsAtCallRaisesError(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, inputs)
with self.assertRaisesRegexp(TypeError, 'argument'):
future(labels)
def testArgsAtInitArgsReplacedAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, labels, inputs)
inputs, labels, param = future(inputs, labels)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testArgsAtCallKWArgsAtInit(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method, labels=labels)
inputs, labels, param = future(inputs)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
def testArgsAtCallKWArgsAtCall(self, func_or_method, run_mode):
with run_mode():
inputs, labels = input_fn()
future = eager_utils.Future(func_or_method)
inputs, labels, param = future(inputs, labels=labels)
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
self.assertEqual(self.evaluate(param), 0)
class FutureInEagerModeTest(test_utils.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testCreate(self):
decorator = eager_utils.future_in_eager_mode(input_fn)
self.assertTrue(callable(decorator))
if context.executing_eagerly():
self.assertTrue(isinstance(decorator(), eager_utils.Future))
inputs, labels = decorator()()
else:
inputs, labels = decorator()
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
def testDecorator(self):
@eager_utils.future_in_eager_mode
def aux_fn(inputs, labels):
return inputs, labels
self.assertTrue(callable(aux_fn))
inputs, labels = input_fn()
outputs = aux_fn(inputs, labels)
if context.executing_eagerly():
self.assertTrue(isinstance(outputs, eager_utils.Future))
inputs, labels = outputs.__call__()
else:
inputs, labels = outputs
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
def testDelayedArgs(self):
@eager_utils.future_in_eager_mode
def aux_fn(inputs, labels):
return inputs, labels
self.assertTrue(callable(aux_fn))
inputs, labels = input_fn()
outputs = aux_fn(inputs, labels)
if context.executing_eagerly():
self.assertTrue(isinstance(outputs, eager_utils.Future))
inputs, labels = outputs.__call__()
else:
inputs, labels = outputs
self.assertAllEqual(self.evaluate(inputs), [[1, 2], [2, 3], [3, 4]])
self.assertAllEqual(self.evaluate(labels), [[0], [1], [2]])
class EagerUtilsTest(test_utils.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testModel(self):
inputs, labels = input_fn()
model = Model('model', Network())
loss = model.loss_fn(inputs, labels)
expected_loss = 1.098612
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(loss), expected_loss)
@test_util.run_in_graph_and_eager_modes()
def testLossDecreasesAfterTrainStep(self):
inputs, labels = input_fn()
model = Model('model', Network())
loss = model.loss_fn(inputs, labels)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
train_step = minimize_loss(loss, optimizer)
initial_loss = 1.098612
final_loss = 1.064379
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(loss), initial_loss)
self.evaluate(train_step)
self.assertAllClose(self.evaluate(loss), final_loss)
class ClipGradsTest(test_utils.TestCase):
def testClipGrads(self):
xs = tf.Variable(0.0)
grads = tf.constant(4.0)
gradients_to_variables = [(grads, xs)]
clipped_gradients_to_variables = eager_utils.clip_gradient_norms(
gradients_to_variables, 3.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAlmostEqual(4.0, self.evaluate(gradients_to_variables[0][0]))
self.assertAlmostEqual(3.0,
self.evaluate(clipped_gradients_to_variables[0][0]))
def testClipGradsIndexedSlices(self):
xs = tf.Variable(0.0)
grads = tf.IndexedSlices(values=tf.constant(4.0),
indices=tf.constant([0]),
dense_shape=None)
gradients_to_variables = [(grads, xs)]
clipped_gradients_to_variables = eager_utils.clip_gradient_norms(
gradients_to_variables, 3.0)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAlmostEqual(
4.0, self.evaluate(gradients_to_variables[0][0].values))
self.assertAlmostEqual(
3.0, self.evaluate(clipped_gradients_to_variables[0][0].values))
def testClipGradsFn(self):
xs = tf.Variable(0.0)
grads = tf.constant(4.0)
gradients_to_variables = [(grads, xs)]
clipped_gradients_to_variables = eager_utils.clip_gradient_norms_fn(3.0)(
gradients_to_variables)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAlmostEqual(4.0, self.evaluate(gradients_to_variables[0][0]))
self.assertAlmostEqual(3.0,
self.evaluate(clipped_gradients_to_variables[0][0]))
class CreateTrainOpTest(test_utils.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testLossDecreasesAfterTrainOp(self):
inputs, labels = input_fn()
model = Model('model', Network())
loss = model.loss_fn(inputs, labels)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
train_step = eager_utils.create_train_step(loss, optimizer)
initial_loss = 1.098612
final_loss = 1.064379
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(train_step), initial_loss)
self.assertAllClose(self.evaluate(loss), final_loss)
@test_util.run_in_graph_and_eager_modes()
def testCreateTrainOpWithTotalLossFn(self):
inputs, labels = input_fn()
model = Model('model', Network())
loss = model.loss_fn(inputs, labels)
model_2 = Model('model_2', Network())
loss_2 = model_2.loss_fn(inputs, labels)
@eager_utils.future_in_eager_mode
def tuple_loss(loss, loss_2):
return (loss() if callable(loss) else loss,
loss_2() if callable(loss_2) else loss_2)
tuple_loss_value = tuple_loss(loss, loss_2)
def first_element(tuple_value):
return tuple_value[0]
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
loss = eager_utils.create_train_step(
tuple_loss_value, optimizer, total_loss_fn=first_element)
expected_loss = 1.098612
self.evaluate(tf.compat.v1.global_variables_initializer())
train_step_model_0, train_step_model_1 = self.evaluate(loss)
self.assertAllClose(train_step_model_0, expected_loss)
self.assertAllClose(train_step_model_1, expected_loss)
@test_util.run_in_graph_and_eager_modes()
def testMultipleCallsTrainStep(self):
inputs, labels = input_fn()
model = Model('model', Network())
loss = model.loss_fn(inputs, labels)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
train_step = eager_utils.create_train_step(loss, optimizer)
initial_loss = 1.098612
final_loss = 1.033917
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(train_step), initial_loss)
if context.executing_eagerly():
for _ in range(5):
train_step = eager_utils.create_train_step(loss, optimizer)
train_step = eager_utils.create_train_step(loss, optimizer)
self.assertAllClose(self.evaluate(train_step), final_loss)
else:
for _ in range(5):
self.evaluate(train_step)
self.assertAllClose(self.evaluate(train_step), final_loss)
@test_util.run_in_graph_and_eager_modes()
def testVariablesToTrain(self):
inputs, labels = input_fn()
model = Model('model', Network())
if context.executing_eagerly():
variables_to_train = lambda: model.trainable_variables
else:
model(inputs)
variables_to_train = model.trainable_variables
self.assertEqual(len(variables_to_train), 2)
loss = model.loss_fn(inputs, labels)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
train_step = eager_utils.create_train_step(
loss, optimizer, variables_to_train=variables_to_train)
expected_loss = 1.098612
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(self.evaluate(train_step), expected_loss)
self.assertEqual(len(model.trainable_variables), 2)
class HasSelfClsArgTest(test_utils.TestCase):
def testDirect(self):
def func():
pass
func2 = lambda: 0
class A(object):
def method(self):
pass
@classmethod
def class_method(cls):
pass
@staticmethod
def static_method():
pass
self.assertFalse(eager_utils.has_self_cls_arg(func))
self.assertFalse(eager_utils.has_self_cls_arg(func2))
self.assertFalse(eager_utils.has_self_cls_arg(A.static_method))
self.assertTrue(eager_utils.has_self_cls_arg(A.method))
self.assertTrue(eager_utils.has_self_cls_arg(A().method))
self.assertTrue(eager_utils.has_self_cls_arg(A.class_method))
self.assertTrue(eager_utils.has_self_cls_arg(A().class_method))
self.assertTrue(eager_utils.has_self_cls_arg(A.__dict__['method']))
self.assertTrue(eager_utils.has_self_cls_arg(A.__dict__['class_method']))
self.assertFalse(eager_utils.has_self_cls_arg(A.__dict__['static_method']))
def testDecorator(self):
def decorator(method):
@functools.wraps(method)
def _decorator(*args, **kwargs):
method(*args, **kwargs)
return _decorator
class A(object):
@decorator
def method(self):
pass
@staticmethod
@decorator
def static_method():
pass
@classmethod
@decorator
def class_method(cls):
pass
self.assertTrue(eager_utils.has_self_cls_arg(A.method))
self.assertTrue(eager_utils.has_self_cls_arg(A.class_method))
self.assertFalse(eager_utils.has_self_cls_arg(A.static_method))
@eager_utils.np_function
def meshgrid(low, high, nx=2, ny=3):
x = np.linspace(low, high, nx)
y = np.linspace(low, high, ny)
return np.meshgrid(x, y)
@eager_utils.np_function(output_dtypes=np.float32)
def mean(x):
return np.mean(x)
@eager_utils.np_function(output_dtypes=lambda x: (x, x))
def repeat(x):
return x, x
@eager_utils.np_function(output_dtypes=lambda x, y: {'x': x, 'y': y})
def dictionary(x, y):
return {'x': x, 'y': y}
class NpFunctionTest(test_utils.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testMeshGrid(self):
xv, yv = meshgrid(tf.constant(0), tf.constant(1))
self.assertAllEqual(self.evaluate(xv), [[0., 1.], [0., 1.], [0., 1.]])
self.assertAllEqual(self.evaluate(yv), [[0., 0.], [.5, .5], [1., 1.]])
xv, yv = meshgrid(tf.constant(0.), tf.constant(1.))
self.assertAllEqual(self.evaluate(xv), [[0., 1.], [0., 1.], [0., 1.]])
self.assertAllEqual(self.evaluate(yv), [[0., 0.], [.5, .5], [1., 1.]])
@test_util.run_in_graph_and_eager_modes()
def testMeshGridKwargs(self):
xv, yv = meshgrid(tf.constant(0), tf.constant(1), nx=2, ny=2)
self.assertAllEqual(self.evaluate(xv), [[0., 1.], [0., 1.]])
self.assertAllEqual(self.evaluate(yv), [[0., 0.], [1., 1.]])
@test_util.run_in_graph_and_eager_modes()
def testVariables(self):
a, b = tf.Variable(0), tf.Variable(1)
xv, yv = meshgrid(a, b, nx=2, ny=2)
self.evaluate(tf.compat.v1.initializers.global_variables())
self.assertAllEqual(self.evaluate(xv), [[0., 1.], [0., 1.]])
self.assertAllEqual(self.evaluate(yv), [[0., 0.], [1., 1.]])
@test_util.run_in_graph_and_eager_modes()
def testGetOutputDtypesInts2Floats(self):
x = tf.constant([1, 2, 3])
mean_x = mean(x)
self.assertEqual(self.evaluate(mean_x), 2.)
@test_util.run_in_graph_and_eager_modes()
def testGetOutputDtypesFloats2Floats(self):
x = tf.constant([1., 2., 3.])
mean_x = mean(x)
self.assertEqual(self.evaluate(mean_x), 2.)
@test_util.run_in_graph_and_eager_modes()
def testIdentityDtypes(self):
x = tf.constant([1])
self.assertAllEqual(self.evaluate(repeat(x)), ([1], [1]))
y = tf.constant([1.])
self.assertAllEqual(self.evaluate(repeat(y)), ([1.], [1.]))
@test_util.run_in_graph_and_eager_modes()
def testInline(self):
square = eager_utils.np_function(np.square)
x = tf.constant([1, 2, 3])
self.assertAllEqual([1, 4, 9], self.evaluate(square(x)))
y = tf.constant([1., 2., 3.])
self.assertAllEqual([1., 4., 9.], self.evaluate(square(y)))
@test_util.run_in_graph_and_eager_modes()
def testOutputDictionary(self):
x = tf.constant([1])
y = tf.constant([1.])
outputs = dictionary(x, y)
self.assertAllEqual([1], self.evaluate(outputs['x']))
self.assertAllEqual([1.], self.evaluate(outputs['y']))
@eager_utils.np_function(output_dtypes=np.float32)
def np_descent(x, d, mu, n_epochs):
n = len(x)
f = 2 / n
y = np.zeros(n)
err = np.zeros(n)
w = np.zeros(2)
grad = np.zeros(2)
for _ in itertools.repeat(None, n_epochs):
np.subtract(d, y, out=err)
grad[:] = [f * np.sum(err), f * np.dot(err, x)]
w = w + mu * grad
y = w[0] + w[1] * x
return w
class NpDescentTest(test_utils.TestCase):
def setUp(self):
np.random.seed(444)
n = 10000
sigma = 0.1
noise = sigma * np.random.randn(n)
self._x = np.linspace(0, 2, n)
self._d = 3 + 2 * self._x + noise
@test_util.run_in_graph_and_eager_modes()
def testSolve(self):
x, d = tf.constant(self._x), tf.constant(self._d)
w = np_descent(x, d, mu=0.001, n_epochs=10000)
self.assertAllClose([2.96, 2.03], self.evaluate(w), atol=0.01, rtol=0.01)
@test_util.run_all_in_graph_and_eager_modes
class DatasetIteratorTest(test_utils.TestCase):
def testIteration(self):
data = np.arange(100)
ds = tf.data.Dataset.from_tensor_slices(data)
itr = eager_utils.dataset_iterator(ds)
for d in data:
self.assertEqual(np.array([d]),
self.evaluate(eager_utils.get_next(itr)))
if __name__ == '__main__':
tf.test.main()
| 34.988836
| 79
| 0.689944
|
5968249a492308032463e35301e09e109ec5fca3
| 616
|
py
|
Python
|
PythonMPI/MandelbrotSeq.py
|
terasakisatoshi/MPI
|
d4064d8bee356c35a63702c5687726945286213c
|
[
"MIT"
] | 1
|
2022-02-07T10:51:53.000Z
|
2022-02-07T10:51:53.000Z
|
PythonMPI/MandelbrotSeq.py
|
terasakisatoshi/MPI
|
d4064d8bee356c35a63702c5687726945286213c
|
[
"MIT"
] | null | null | null |
PythonMPI/MandelbrotSeq.py
|
terasakisatoshi/MPI
|
d4064d8bee356c35a63702c5687726945286213c
|
[
"MIT"
] | 1
|
2022-02-07T10:51:33.000Z
|
2022-02-07T10:51:33.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import time
def mandelbrot(x,y,maxit):
c=x+y*1j#complex exporession
z=0+0j#complex expression
it=0
while abs(z)<2 and it<maxit:
z=z**2+c
it+=1
return it
#main
x1,x2=-2.0,1.0
y1,y2=-1.0,1.0
w,h=150,100
maxit=1025
C=np.zeros([h,w],dtype=int)
dx=(x2-x1)/w
dy=(y2-y1)/h
start=time.time()
for i in range(h):
y=y1+i*dy
for j in range(w):
x=x1+j*dx
C[i,j]=mandelbrot(x,y,maxit)
plt.imshow(C, aspect="equal")
plt.spectral()
end=time.time()
elapsed_time=end-start
print("time= %s" % elapsed_time)
plt.show()
| 18.117647
| 36
| 0.623377
|
6773f7a2442e21a0af672b9805ba9d407085df63
| 4,999
|
py
|
Python
|
python_analyzer/commands/get_api/_model.py
|
lars-reimann/python-analyzer
|
6e0f12acee1c533ca9ef19bbfdc7cc8337e25696
|
[
"MIT"
] | null | null | null |
python_analyzer/commands/get_api/_model.py
|
lars-reimann/python-analyzer
|
6e0f12acee1c533ca9ef19bbfdc7cc8337e25696
|
[
"MIT"
] | 5
|
2021-10-12T10:39:35.000Z
|
2022-01-01T13:11:19.000Z
|
python_analyzer/commands/get_api/_model.py
|
lars-reimann/python-analyzer
|
6e0f12acee1c533ca9ef19bbfdc7cc8337e25696
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Any, Optional
from python_analyzer.utils import declaration_name, parent_qname
class API:
@staticmethod
def from_json(json: Any) -> API:
result = API(
json["distribution"],
json["package"],
json["version"]
)
for class_json in json["classes"]:
result.add_class(Class.from_json(class_json))
for function_json in json["functions"]:
result.add_function(Function.from_json(function_json))
return result
def __init__(self, distribution: str, package: str, version: str) -> None:
self.distribution: str = distribution
self.package: str = package
self.version: str = version
self.classes: dict[str, Class] = dict()
self.functions: dict[str, Function] = dict()
def add_class(self, clazz: Class) -> None:
self.classes[clazz.qname] = clazz
def add_function(self, function: Function) -> None:
self.functions[function.qname] = function
def is_public_class(self, class_qname: str) -> bool:
return class_qname in self.classes and self.classes[class_qname].is_public
def is_public_function(self, function_qname: str) -> bool:
return function_qname in self.functions and self.functions[function_qname].is_public
def class_count(self) -> int:
return len(self.classes)
def public_class_count(self) -> int:
return len([it for it in self.classes.values() if it.is_public])
def function_count(self) -> int:
return len(self.functions)
def public_function_count(self) -> int:
return len([it for it in self.functions.values() if it.is_public])
def parameter_count(self) -> int:
return len(self.parameters())
def public_parameter_count(self) -> int:
return len([it for it in self.parameters().values() if it.is_public])
def parameters(self) -> dict[str, Parameter]:
result: dict[str, Parameter] = {}
for function in self.functions.values():
for parameter in function.parameters:
parameter_qname = f"{function.qname}.{parameter.name}"
result[parameter_qname] = parameter
return result
def get_default_value(self, parameter_qname: str) -> Optional[str]:
function_qname = parent_qname(parameter_qname)
parameter_name = declaration_name(parameter_qname)
if function_qname not in self.functions:
return None
for parameter in self.functions[function_qname].parameters:
if parameter.name == parameter_name:
return parameter.default_value
return None
def to_json(self) -> Any:
return {
"distribution": self.distribution,
"package": self.package,
"version": self.version,
"classes": [
clazz.to_json()
for clazz in sorted(self.classes.values(), key=lambda it: it.qname)
],
"functions": [
function.to_json()
for function in sorted(self.functions.values(), key=lambda it: it.qname)
]
}
class Class:
@staticmethod
def from_json(json: Any) -> Class:
return Class(
json["qname"],
json["is_public"]
)
def __init__(self, qname: str, is_public: bool) -> None:
self.qname: str = qname
self.is_public: bool = is_public
def to_json(self) -> Any:
return {
"qname": self.qname,
"is_public": self.is_public
}
class Function:
@staticmethod
def from_json(json: Any) -> Function:
return Function(
json["qname"],
[Parameter.from_json(parameter_json) for parameter_json in json["parameters"]],
json["is_public"]
)
def __init__(self, qname: str, parameters: list[Parameter], is_public: bool) -> None:
self.qname: str = qname
self.parameters: list[Parameter] = parameters
self.is_public: bool = is_public
def to_json(self) -> Any:
return {
"qname": self.qname,
"parameters": [
parameter.to_json()
for parameter in self.parameters
],
"is_public": self.is_public
}
class Parameter:
@staticmethod
def from_json(json: Any) -> Parameter:
return Parameter(
json["name"],
json["default_value"],
json["is_public"]
)
def __init__(self, name: str, default_value: Optional[str], is_public: bool) -> None:
self.name: str = name
self.default_value: Optional[str] = default_value
self.is_public: bool = is_public
def to_json(self) -> Any:
return {
"name": self.name,
"default_value": self.default_value,
"is_public": self.is_public
}
| 29.579882
| 92
| 0.597119
|
d12fa580a922544789cbc871aa190ae167dc357f
| 4,990
|
py
|
Python
|
jenkins_jobs/sphinx/yaml.py
|
pratikmallya/jenkins-job-builder
|
83114faba6b85bb0dae066ca3ec4f23871683fb5
|
[
"Apache-2.0"
] | 3
|
2018-02-20T19:11:48.000Z
|
2020-04-03T08:44:12.000Z
|
jenkins_jobs/sphinx/yaml.py
|
pratikmallya/jenkins-job-builder
|
83114faba6b85bb0dae066ca3ec4f23871683fb5
|
[
"Apache-2.0"
] | 12
|
2015-11-30T17:13:11.000Z
|
2022-03-18T10:58:07.000Z
|
jenkins_jobs/sphinx/yaml.py
|
pratikmallya/jenkins-job-builder
|
83114faba6b85bb0dae066ca3ec4f23871683fb5
|
[
"Apache-2.0"
] | 11
|
2016-10-25T12:03:32.000Z
|
2021-04-27T12:16:51.000Z
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Most of this code originated in sphinx.domains.python and
# sphinx.ext.autodoc and has been only slightly adapted for use in
# subclasses here.
# :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
# :license: BSD, see LICENSE for details.
import re
from sphinx import addnodes
from sphinx.domains.python import _pseudo_parse_arglist
from sphinx.domains.python import PyModulelevel
from sphinx.ext.autodoc import Documenter
from sphinx.ext.autodoc import FunctionDocumenter
from sphinx.locale import _
yaml_sig_re = re.compile('yaml:\s*(.*)')
class PyYAMLFunction(PyModulelevel):
def handle_signature(self, sig, signode):
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
name_prefix = None
name = sig
arglist = None
retann = None
# determine module and class name (if applicable), as well as full name
modname = self.options.get(
'module', self.env.temp_data.get('py:module'))
classname = self.env.temp_data.get('py:class')
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_returns(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
_pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_returns(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
def get_index_text(self, modname, name_cls):
return _('%s (in module %s)') % (name_cls[0], modname)
class YAMLFunctionDocumenter(FunctionDocumenter):
priority = FunctionDocumenter.priority + 10
objtype = 'yamlfunction'
directivetype = 'yamlfunction'
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
if not FunctionDocumenter.can_document_member(member, membername,
isattr, parent):
return False
if member.__doc__ is not None and yaml_sig_re.match(member.__doc__):
return True
return False
def _find_signature(self, encoding=None):
docstrings = Documenter.get_doc(self, encoding, 2)
if len(docstrings) != 1:
return
doclines = docstrings[0]
setattr(self, '__new_doclines', doclines)
if not doclines:
return
# match first line of docstring against signature RE
match = yaml_sig_re.match(doclines[0])
if not match:
return
name = match.group(1)
# ok, now jump over remaining empty lines and set the remaining
# lines as the new doclines
i = 1
while i < len(doclines) and not doclines[i].strip():
i += 1
setattr(self, '__new_doclines', doclines[i:])
return name
def get_doc(self, encoding=None, ignore=1):
lines = getattr(self, '__new_doclines', None)
if lines is not None:
return [lines]
return Documenter.get_doc(self, encoding, ignore)
def format_signature(self):
result = self._find_signature()
self._name = result
return ''
def format_name(self):
return self._name
def setup(app):
app.add_autodocumenter(YAMLFunctionDocumenter)
app.add_directive_to_domain('py', 'yamlfunction', PyYAMLFunction)
| 34.413793
| 79
| 0.649299
|
160e6071e1da7d7ab2a81e843be45cab88c92203
| 685
|
py
|
Python
|
app/core/migrations/0003_ingredient.py
|
Badhansen/recipe-app-api
|
69b7136ac0616314f354fa27eb6f8b137197d3f8
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_ingredient.py
|
Badhansen/recipe-app-api
|
69b7136ac0616314f354fa27eb6f8b137197d3f8
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_ingredient.py
|
Badhansen/recipe-app-api
|
69b7136ac0616314f354fa27eb6f8b137197d3f8
|
[
"MIT"
] | 1
|
2020-12-07T07:57:22.000Z
|
2020-12-07T07:57:22.000Z
|
# Generated by Django 3.1.2 on 2020-11-03 12:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.541667
| 118
| 0.617518
|
9de0ed824ebaabb289cf475e4ffcff88b32489fd
| 1,574
|
py
|
Python
|
tests/old_suite/interactive/test_pyqt4.py
|
soleil0-0/pyinstaller
|
4249a7347f6b81aba9825ded8addb92ee0f85ea9
|
[
"Apache-2.0"
] | 2
|
2020-09-13T09:15:02.000Z
|
2021-07-04T04:26:50.000Z
|
tests/old_suite/interactive/test_pyqt4.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 3
|
2021-06-08T22:52:09.000Z
|
2021-09-08T02:48:20.000Z
|
tests/old_suite/interactive/test_pyqt4.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 1
|
2021-09-15T08:52:26.000Z
|
2021-09-15T08:52:26.000Z
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import sys
from PyQt4 import Qt
from PyQt4 import QtCore
from PyQt4 import QtGui
class MyDialog(QtGui.QDialog):
def __init__(self):
super(MyDialog, self).__init__()
self.label = Qt.QLabel(
u"Press <ESC> to exit. Some non-ascii chars: řčšěíáŘ",
self)
self.setWindowTitle("Hello World from PyQt4")
self.resize(400, 200)
self.show()
def sizeHint(self):
return self.label.sizeHint()
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.close()
def main():
app = Qt.QApplication(sys.argv)
read_formats = ', '.join([str(format).lower() \
for format in QtGui.QImageReader.supportedImageFormats()])
print("Qt4 plugin paths:", list(app.libraryPaths()))
print("Qt4 image read support:", read_formats)
print('Qt4 Libraries path:',
str(QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.LibrariesPath)))
ex = MyDialog()
app.exec_()
if __name__ == "__main__":
main()
| 29.148148
| 79
| 0.595934
|
38164f531acebbfc578e76147691281232892080
| 11,345
|
py
|
Python
|
sdk/python/pulumi_aws/ec2/peering_connection_options.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/peering_connection_options.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2/peering_connection_options.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class PeeringConnectionOptions(pulumi.CustomResource):
accepter: pulumi.Output[dict]
"""
An optional configuration block that allows for [VPC Peering Connection]
(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options to be set for the VPC that accepts
the peering connection (a maximum of one).
* `allowClassicLinkToRemoteVpc` (`bool`) - Allow a local linked EC2-Classic instance to communicate
with instances in a peer VPC. This enables an outbound communication from the local ClassicLink connection
to the remote VPC. This option is not supported for inter-region VPC peering.
* `allowRemoteVpcDnsResolution` (`bool`) - Allow a local VPC to resolve public DNS hostnames to
private IP addresses when queried from instances in the peer VPC.
* `allowVpcToRemoteClassicLink` (`bool`) - Allow a local VPC to communicate with a linked EC2-Classic
instance in a peer VPC. This enables an outbound communication from the local VPC to the remote ClassicLink
connection. This option is not supported for inter-region VPC peering.
"""
requester: pulumi.Output[dict]
"""
A optional configuration block that allows for [VPC Peering Connection]
(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options to be set for the VPC that requests
the peering connection (a maximum of one).
* `allowClassicLinkToRemoteVpc` (`bool`) - Allow a local linked EC2-Classic instance to communicate
with instances in a peer VPC. This enables an outbound communication from the local ClassicLink connection
to the remote VPC. This option is not supported for inter-region VPC peering.
* `allowRemoteVpcDnsResolution` (`bool`) - Allow a local VPC to resolve public DNS hostnames to
private IP addresses when queried from instances in the peer VPC.
* `allowVpcToRemoteClassicLink` (`bool`) - Allow a local VPC to communicate with a linked EC2-Classic
instance in a peer VPC. This enables an outbound communication from the local VPC to the remote ClassicLink
connection. This option is not supported for inter-region VPC peering.
"""
vpc_peering_connection_id: pulumi.Output[str]
"""
The ID of the requester VPC peering connection.
"""
def __init__(__self__, resource_name, opts=None, accepter=None, requester=None, vpc_peering_connection_id=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a resource to manage VPC peering connection options.
> **NOTE on VPC Peering Connections and VPC Peering Connection Options:** This provider provides
both a standalone VPC Peering Connection Options and a VPC Peering Connection
resource with `accepter` and `requester` attributes. Do not manage options for the same VPC peering
connection in both a VPC Peering Connection resource and a VPC Peering Connection Options resource.
Doing so will cause a conflict of options and will overwrite the options.
Using a VPC Peering Connection Options resource decouples management of the connection options from
management of the VPC Peering Connection and allows options to be set correctly in cross-region and
cross-account scenarios.
Basic usage:
Basic cross-account usage:
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] accepter: An optional configuration block that allows for [VPC Peering Connection]
(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options to be set for the VPC that accepts
the peering connection (a maximum of one).
:param pulumi.Input[dict] requester: A optional configuration block that allows for [VPC Peering Connection]
(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options to be set for the VPC that requests
the peering connection (a maximum of one).
:param pulumi.Input[str] vpc_peering_connection_id: The ID of the requester VPC peering connection.
The **accepter** object supports the following:
* `allowClassicLinkToRemoteVpc` (`pulumi.Input[bool]`) - Allow a local linked EC2-Classic instance to communicate
with instances in a peer VPC. This enables an outbound communication from the local ClassicLink connection
to the remote VPC. This option is not supported for inter-region VPC peering.
* `allowRemoteVpcDnsResolution` (`pulumi.Input[bool]`) - Allow a local VPC to resolve public DNS hostnames to
private IP addresses when queried from instances in the peer VPC.
* `allowVpcToRemoteClassicLink` (`pulumi.Input[bool]`) - Allow a local VPC to communicate with a linked EC2-Classic
instance in a peer VPC. This enables an outbound communication from the local VPC to the remote ClassicLink
connection. This option is not supported for inter-region VPC peering.
The **requester** object supports the following:
* `allowClassicLinkToRemoteVpc` (`pulumi.Input[bool]`) - Allow a local linked EC2-Classic instance to communicate
with instances in a peer VPC. This enables an outbound communication from the local ClassicLink connection
to the remote VPC. This option is not supported for inter-region VPC peering.
* `allowRemoteVpcDnsResolution` (`pulumi.Input[bool]`) - Allow a local VPC to resolve public DNS hostnames to
private IP addresses when queried from instances in the peer VPC.
* `allowVpcToRemoteClassicLink` (`pulumi.Input[bool]`) - Allow a local VPC to communicate with a linked EC2-Classic
instance in a peer VPC. This enables an outbound communication from the local VPC to the remote ClassicLink
connection. This option is not supported for inter-region VPC peering.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['accepter'] = accepter
__props__['requester'] = requester
if vpc_peering_connection_id is None:
raise TypeError("Missing required property 'vpc_peering_connection_id'")
__props__['vpc_peering_connection_id'] = vpc_peering_connection_id
super(PeeringConnectionOptions, __self__).__init__(
'aws:ec2/peeringConnectionOptions:PeeringConnectionOptions',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, accepter=None, requester=None, vpc_peering_connection_id=None):
"""
Get an existing PeeringConnectionOptions resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] accepter: An optional configuration block that allows for [VPC Peering Connection]
(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options to be set for the VPC that accepts
the peering connection (a maximum of one).
:param pulumi.Input[dict] requester: A optional configuration block that allows for [VPC Peering Connection]
(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options to be set for the VPC that requests
the peering connection (a maximum of one).
:param pulumi.Input[str] vpc_peering_connection_id: The ID of the requester VPC peering connection.
The **accepter** object supports the following:
* `allowClassicLinkToRemoteVpc` (`pulumi.Input[bool]`) - Allow a local linked EC2-Classic instance to communicate
with instances in a peer VPC. This enables an outbound communication from the local ClassicLink connection
to the remote VPC. This option is not supported for inter-region VPC peering.
* `allowRemoteVpcDnsResolution` (`pulumi.Input[bool]`) - Allow a local VPC to resolve public DNS hostnames to
private IP addresses when queried from instances in the peer VPC.
* `allowVpcToRemoteClassicLink` (`pulumi.Input[bool]`) - Allow a local VPC to communicate with a linked EC2-Classic
instance in a peer VPC. This enables an outbound communication from the local VPC to the remote ClassicLink
connection. This option is not supported for inter-region VPC peering.
The **requester** object supports the following:
* `allowClassicLinkToRemoteVpc` (`pulumi.Input[bool]`) - Allow a local linked EC2-Classic instance to communicate
with instances in a peer VPC. This enables an outbound communication from the local ClassicLink connection
to the remote VPC. This option is not supported for inter-region VPC peering.
* `allowRemoteVpcDnsResolution` (`pulumi.Input[bool]`) - Allow a local VPC to resolve public DNS hostnames to
private IP addresses when queried from instances in the peer VPC.
* `allowVpcToRemoteClassicLink` (`pulumi.Input[bool]`) - Allow a local VPC to communicate with a linked EC2-Classic
instance in a peer VPC. This enables an outbound communication from the local VPC to the remote ClassicLink
connection. This option is not supported for inter-region VPC peering.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["accepter"] = accepter
__props__["requester"] = requester
__props__["vpc_peering_connection_id"] = vpc_peering_connection_id
return PeeringConnectionOptions(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 63.735955
| 162
| 0.710621
|
9a239af58fbe2b0c80bc86a607e9aae74b4768b3
| 610
|
py
|
Python
|
project/scanner/helpers.py
|
Mozilla-GitHub-Standards/2c72a3dddbe69ad175e72f759ff33bc6dc22bec05d0e7c304b2fff865cac5390
|
59bbfbc9ce8fea695ac0cca038dc05033fb081ff
|
[
"BSD-3-Clause"
] | null | null | null |
project/scanner/helpers.py
|
Mozilla-GitHub-Standards/2c72a3dddbe69ad175e72f759ff33bc6dc22bec05d0e7c304b2fff865cac5390
|
59bbfbc9ce8fea695ac0cca038dc05033fb081ff
|
[
"BSD-3-Clause"
] | null | null | null |
project/scanner/helpers.py
|
Mozilla-GitHub-Standards/2c72a3dddbe69ad175e72f759ff33bc6dc22bec05d0e7c304b2fff865cac5390
|
59bbfbc9ce8fea695ac0cca038dc05033fb081ff
|
[
"BSD-3-Clause"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.utils.html import escapejs
from jingo import register
#
# This is the place to register Django template filters with the jingo
# template engine. If you see a filter in the Django documentation [1]
# that is not included with Jingo then you can simply import and
# register it here as shown below.
#
# [1] https://docs.djangoproject.com/en/dev/ref/templates/builtins/
#
register.filter(escapejs)
| 33.888889
| 70
| 0.757377
|
8da071939c45ba8026f73d06055c1d73504c698f
| 7,569
|
py
|
Python
|
tests/core/tests/api.py
|
mjschultz/django-tastefulpy
|
c81c7b32da16f9b181589a0311d9819718fdc960
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/tests/api.py
|
mjschultz/django-tastefulpy
|
c81c7b32da16f9b181589a0311d9819718fdc960
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/tests/api.py
|
mjschultz/django-tastefulpy
|
c81c7b32da16f9b181589a0311d9819718fdc960
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import TestCase
from tastefulpy.api import Api
from tastefulpy.exceptions import NotRegistered, BadRequest
from tastefulpy.resources import Resource, ModelResource
from tastefulpy.serializers import Serializer
from core.models import Note
class NoteResource(ModelResource):
class Meta:
resource_name = 'notes'
queryset = Note.objects.filter(is_active=True)
class UserResource(ModelResource):
class Meta:
resource_name = 'users'
queryset = User.objects.all()
class ApiTestCase(TestCase):
urls = 'core.tests.api_urls'
def test_register(self):
# NOTE: these have all been registered in core.tests.api_urls
api = Api()
self.assertEqual(len(api._registry), 0)
api.register(NoteResource())
self.assertEqual(len(api._registry), 1)
self.assertEqual(sorted(api._registry.keys()), ['notes'])
api.register(UserResource())
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
api.register(UserResource())
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
self.assertEqual(len(api._canonicals), 2)
api.register(UserResource(), canonical=False)
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
self.assertEqual(len(api._canonicals), 2)
def test_global_registry(self):
api = Api()
self.assertEqual(len(api._registry), 0)
api.register(NoteResource())
self.assertEqual(len(api._registry), 1)
self.assertEqual(sorted(api._registry.keys()), ['notes'])
api.register(UserResource())
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
api.register(UserResource())
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
self.assertEqual(len(api._canonicals), 2)
api.register(UserResource(), canonical=False)
self.assertEqual(len(api._registry), 2)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
self.assertEqual(len(api._canonicals), 2)
def test_unregister(self):
api = Api()
api.register(NoteResource())
api.register(UserResource(), canonical=False)
self.assertEqual(sorted(api._registry.keys()), ['notes', 'users'])
self.assertEqual(len(api._canonicals), 1)
api.unregister('users')
self.assertEqual(len(api._registry), 1)
self.assertEqual(sorted(api._registry.keys()), ['notes'])
self.assertEqual(len(api._canonicals), 1)
api.unregister('notes')
self.assertEqual(len(api._registry), 0)
self.assertEqual(sorted(api._registry.keys()), [])
api.unregister('users')
self.assertEqual(len(api._registry), 0)
self.assertEqual(sorted(api._registry.keys()), [])
def test_canonical_resource_for(self):
api = Api()
note_resource = NoteResource()
user_resource = UserResource()
api.register(note_resource)
api.register(user_resource)
self.assertEqual(len(api._canonicals), 2)
self.assertEqual(isinstance(api.canonical_resource_for('notes'), NoteResource), True)
api_2 = Api()
api.unregister(user_resource._meta.resource_name)
self.assertRaises(NotRegistered, api.canonical_resource_for, 'users')
def test_urls(self):
api = Api()
api.register(NoteResource())
api.register(UserResource())
patterns = api.urls
self.assertEqual(len(patterns), 3)
self.assertEqual(sorted([pattern.name for pattern in patterns if hasattr(pattern, 'name')]), ['api_v1_top_level'])
self.assertEqual([[pattern.name for pattern in include.url_patterns if hasattr(pattern, 'name')] for include in patterns if hasattr(include, 'reverse_dict')], [['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail'], ['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail']])
api = Api(api_name='v2')
api.register(NoteResource())
api.register(UserResource())
patterns = api.urls
self.assertEqual(len(patterns), 3)
self.assertEqual(sorted([pattern.name for pattern in patterns if hasattr(pattern, 'name')]), ['api_v2_top_level'])
self.assertEqual([[pattern.name for pattern in include.url_patterns if hasattr(pattern, 'name')] for include in patterns if hasattr(include, 'reverse_dict')], [['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail'], ['api_dispatch_list', 'api_get_schema', 'api_get_multiple', 'api_dispatch_detail']])
def test_top_level(self):
api = Api()
api.register(NoteResource())
api.register(UserResource())
request = HttpRequest()
resp = api.top_level(request)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content.decode('utf-8'), '{"notes": {"list_endpoint": "/api/v1/notes/", "schema": "/api/v1/notes/schema/"}, "users": {"list_endpoint": "/api/v1/users/", "schema": "/api/v1/users/schema/"}}')
def test_top_level_jsonp(self):
api = Api()
api.register(NoteResource())
api.register(UserResource())
request = HttpRequest()
request.META = {'HTTP_ACCEPT': 'text/javascript'}
request.GET = {'callback': 'foo'}
resp = api.top_level(request)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['content-type'].split(';')[0], 'text/javascript')
self.assertEqual(resp.content.decode('utf-8'), 'foo({"notes": {"list_endpoint": "/api/v1/notes/", "schema": "/api/v1/notes/schema/"}, "users": {"list_endpoint": "/api/v1/users/", "schema": "/api/v1/users/schema/"}})')
request = HttpRequest()
request.META = {'HTTP_ACCEPT': 'text/javascript'}
request.GET = {'callback': ''}
try:
resp = api.top_level(request)
self.fail("Broken callback didn't fail!")
except BadRequest:
# Regression: We expect this, which is fine, but this used to
# be an import error.
pass
def test_custom_api_serializer(self):
"""Confirm that an Api can use a custom serializer"""
# Origin: https://github.com/toastdriven/django-tastefulpy/pull/817
class JSONSerializer(Serializer):
formats = ('json', )
api = Api(serializer_class=JSONSerializer)
api.register(NoteResource())
request = HttpRequest()
request.META = {'HTTP_ACCEPT': 'text/javascript'}
resp = api.top_level(request)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['content-type'], 'application/json',
msg="Expected application/json response but received %s" % resp['content-type'])
request = HttpRequest()
request.META = {'HTTP_ACCEPT': 'application/xml'}
resp = api.top_level(request)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp['content-type'], 'application/json',
msg="Expected application/json response but received %s" % resp['content-type'])
| 40.913514
| 336
| 0.648302
|
57417c0e0ac1ae7175df666ec7745c5b4c0f940a
| 5,511
|
py
|
Python
|
SNAKE.py
|
burleyinnersbm07/pythonSnakeGame
|
9b669dc3d919a99af053d644fd8151181e5e00d0
|
[
"MIT"
] | null | null | null |
SNAKE.py
|
burleyinnersbm07/pythonSnakeGame
|
9b669dc3d919a99af053d644fd8151181e5e00d0
|
[
"MIT"
] | null | null | null |
SNAKE.py
|
burleyinnersbm07/pythonSnakeGame
|
9b669dc3d919a99af053d644fd8151181e5e00d0
|
[
"MIT"
] | null | null | null |
import pygame
import time
import random
pygame.init()
# Define colors, because the PyGame library
# does not do this by default
white = (255, 255, 255)
darkGray = (50, 50, 50)
lightGray = (150, 150, 150)
black = (0, 0, 0)
red = (245, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
display_width = 800
display_height = 600
gameDisplay = pygame.display.set_mode((display_width, display_height))
# the title of the window
pygame.display.set_caption('Snake')
# clock
clock = pygame.time.Clock()
FPS = 15
block_size = 20
font = pygame.font.SysFont(None, 25) # font-size 25
def snake(block_size, snakeList):
for XnY in snakeList:
pygame.draw.rect(gameDisplay, lightGray, [XnY[0], XnY[1], block_size, block_size])
def message_to_screen(msg, color):
screen_text = font.render(msg, True, color)
gameDisplay.blit(screen_text, [display_width/2, display_height/2])
# while gameExit is false
# the game loop
def gameLoop():
gameExit = False
gameOver = False
# leader/head of the snake starting coords
lead_x = display_width/2
lead_y = display_height/2
# track the change in position for constant motion
lead_x_change = 0
lead_y_change = 0
# randoms for apple and stuff
randAppleX = round(random.randrange(0, display_width - block_size))#/10.0) * 10.0
randAppleY = round(random.randrange(0, display_height - block_size))#/10.0) * 10.0
snakeList = []
snakeLength = 1
while not gameExit:
while gameOver == True:
gameDisplay.fill(darkGray)
message_to_screen("Game over, press C to play again, Q to quit", red)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = False
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
gameExit = True
gameOver = False
if event.key == pygame.K_c:
gameLoop()
for event in pygame.event.get():
if event.type == pygame.QUIT: # quit event
gameExit = True # stops the loop and makes the X work
# moving the snake
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT: # if left arrow
lead_x_change = -block_size
lead_y_change = 0
elif event.key == pygame.K_RIGHT: # if right arrow
lead_x_change = block_size
lead_y_change = 0
elif event.key == pygame.K_UP: # if up arrow
lead_y_change = -block_size
lead_x_change = 0
elif event.key == pygame.K_DOWN: # if down arrow
lead_y_change = block_size
lead_x_change = 0
if lead_x >= display_width or lead_x < 0 or lead_y >= display_height or lead_y < 0:
gameOver = True
#if event.type == pygame.KEYUP:
# if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
# lead_x_change = 0
lead_x += lead_x_change # sum the values
lead_y += lead_y_change # sum y values
gameDisplay.fill(darkGray)
AppleThickness = 30
# draw apple
pygame.draw.rect(gameDisplay, red, [randAppleX, randAppleY, AppleThickness, AppleThickness])
# the snakeList
snakeHead = []
snakeHead.append(lead_x)
snakeHead.append(lead_y)
snakeList.append(snakeHead)
if len(snakeList) > snakeLength:
del snakeList[0]
for eachSegment in snakeList[:-1]:
if eachSegment == snakeHead:
gameOver = True
# call snake function
# pass variables from the gameLoop()
snake(block_size, snakeList)
# rect(where, color, coordinates [startX, startY, w, h])
pygame.draw.rect(gameDisplay, lightGray, [lead_x, lead_y, block_size, block_size])
# using fill to draw rectangles
# using fill lets you use graphics acceleration
#gameDisplay.fill(red, rect=[200, 200, 50, 50])
# draw everything first, then render it
pygame.display.update()
# logic for collision of snake and apple
# if lead_x >= randAppleX and lead_x <= randAppleX + AppleThickness:
# if lead_y >= randAppleY and lead_y <= randAppleY + AppleThickness:
# randAppleX = round(random.randrange(0, display_width - block_size))#/10.0) * 10.0
# randAppleY = round(random.randrange(0, display_height - block_size))#/10.0) * 10.0
# snakeLength += 1
if lead_x > randAppleX and lead_x < randAppleX + AppleThickness or lead_x + block_size > randAppleX and lead_x + block_size < randAppleX + AppleThickness:
if lead_y > randAppleY and lead_y < randAppleY + AppleThickness or lead_y + block_size > randAppleY and lead_y + block_size < randAppleY + AppleThickness:
randAppleX = round(random.randrange(0, display_width - block_size))#/10.0) * 10.0
randAppleY = round(random.randrange(0, display_height - block_size))#/10.0) * 10.0
snakeLength += 1
# frames per second
# this is the speed at which the snake will move/refresh
clock.tick(FPS)
pygame.quit()
quit()
gameLoop()
| 34.44375
| 166
| 0.596625
|
3504dbf3bead03fe773dea13f1559b56efa25b6b
| 13,972
|
py
|
Python
|
tests/components/flux_led/test_init.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/flux_led/test_init.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/flux_led/test_init.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Tests for the flux_led component."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import AsyncMock, patch
import pytest
from homeassistant import config_entries
from homeassistant.components import flux_led
from homeassistant.components.flux_led.const import (
CONF_REMOTE_ACCESS_ENABLED,
CONF_REMOTE_ACCESS_HOST,
CONF_REMOTE_ACCESS_PORT,
DOMAIN,
)
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
CONF_HOST,
CONF_NAME,
EVENT_HOMEASSISTANT_STARTED,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from . import (
DEFAULT_ENTRY_TITLE,
DHCP_DISCOVERY,
FLUX_DISCOVERY,
FLUX_DISCOVERY_PARTIAL,
IP_ADDRESS,
MAC_ADDRESS,
MAC_ADDRESS_ONE_OFF,
_mocked_bulb,
_patch_discovery,
_patch_wifibulb,
)
from tests.common import MockConfigEntry, async_fire_time_changed
@pytest.mark.usefixtures("mock_single_broadcast_address")
async def test_configuring_flux_led_causes_discovery(hass: HomeAssistant) -> None:
"""Test that specifying empty config does discovery."""
with patch(
"homeassistant.components.flux_led.discovery.AIOBulbScanner.async_scan"
) as scan, patch(
"homeassistant.components.flux_led.discovery.AIOBulbScanner.getBulbInfo"
) as discover:
discover.return_value = [FLUX_DISCOVERY]
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert len(scan.mock_calls) == 1
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(scan.mock_calls) == 2
async_fire_time_changed(hass, utcnow() + flux_led.DISCOVERY_INTERVAL)
await hass.async_block_till_done()
assert len(scan.mock_calls) == 3
@pytest.mark.usefixtures("mock_multiple_broadcast_addresses")
async def test_configuring_flux_led_causes_discovery_multiple_addresses(
hass: HomeAssistant,
) -> None:
"""Test that specifying empty config does discovery."""
with patch(
"homeassistant.components.flux_led.discovery.AIOBulbScanner.async_scan"
) as scan, patch(
"homeassistant.components.flux_led.discovery.AIOBulbScanner.getBulbInfo"
) as discover:
discover.return_value = [FLUX_DISCOVERY]
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert len(scan.mock_calls) == 2
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(scan.mock_calls) == 4
async_fire_time_changed(hass, utcnow() + flux_led.DISCOVERY_INTERVAL)
await hass.async_block_till_done()
assert len(scan.mock_calls) == 6
async def test_config_entry_reload(hass: HomeAssistant) -> None:
"""Test that a config entry can be reloaded."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS}, unique_id=MAC_ADDRESS
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_wifibulb():
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert config_entry.state == ConfigEntryState.LOADED
await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state == ConfigEntryState.NOT_LOADED
async def test_config_entry_retry(hass: HomeAssistant) -> None:
"""Test that a config entry can be retried."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS}, unique_id=MAC_ADDRESS
)
config_entry.add_to_hass(hass)
with _patch_discovery(no_device=True), _patch_wifibulb(no_device=True):
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert config_entry.state == ConfigEntryState.SETUP_RETRY
async def test_config_entry_retry_right_away_on_discovery(hass: HomeAssistant) -> None:
"""Test discovery makes the config entry reload if its in a retry state."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS}, unique_id=MAC_ADDRESS
)
config_entry.add_to_hass(hass)
with _patch_discovery(no_device=True), _patch_wifibulb(no_device=True):
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert config_entry.state == ConfigEntryState.SETUP_RETRY
with _patch_discovery(), _patch_wifibulb():
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=DHCP_DISCOVERY,
)
await hass.async_block_till_done()
assert config_entry.state == ConfigEntryState.LOADED
async def test_coordinator_retry_right_away_on_discovery_already_setup(
hass: HomeAssistant,
) -> None:
"""Test discovery makes the coordinator force poll if its already setup."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE},
unique_id=MAC_ADDRESS,
)
config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
with _patch_discovery(), _patch_wifibulb(device=bulb):
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert config_entry.state == ConfigEntryState.LOADED
entity_id = "light.bulb_rgbcw_ddeeff"
entity_registry = er.async_get(hass)
assert entity_registry.async_get(entity_id).unique_id == MAC_ADDRESS
state = hass.states.get(entity_id)
assert state.state == STATE_ON
now = utcnow()
bulb.async_update = AsyncMock(side_effect=RuntimeError)
async_fire_time_changed(hass, now + timedelta(seconds=50))
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_UNAVAILABLE
bulb.async_update = AsyncMock()
with _patch_discovery(), _patch_wifibulb():
await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=DHCP_DISCOVERY,
)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ON
@pytest.mark.parametrize(
"discovery,title",
[
(FLUX_DISCOVERY, DEFAULT_ENTRY_TITLE),
(FLUX_DISCOVERY_PARTIAL, DEFAULT_ENTRY_TITLE),
],
)
async def test_config_entry_fills_unique_id_with_directed_discovery(
hass: HomeAssistant, discovery: dict[str, str], title: str
) -> None:
"""Test that the unique id is added if its missing via directed (not broadcast) discovery."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS}, unique_id=None, title=IP_ADDRESS
)
config_entry.add_to_hass(hass)
last_address = None
async def _discovery(self, *args, address=None, **kwargs):
# Only return discovery results when doing directed discovery
nonlocal last_address
last_address = address
return [discovery] if address == IP_ADDRESS else []
def _mock_getBulbInfo(*args, **kwargs):
nonlocal last_address
return [discovery] if last_address == IP_ADDRESS else []
with patch(
"homeassistant.components.flux_led.discovery.AIOBulbScanner.async_scan",
new=_discovery,
), patch(
"homeassistant.components.flux_led.discovery.AIOBulbScanner.getBulbInfo",
new=_mock_getBulbInfo,
), _patch_wifibulb():
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert config_entry.state == ConfigEntryState.LOADED
assert config_entry.unique_id == MAC_ADDRESS
assert config_entry.title == title
async def test_time_sync_startup_and_next_day(hass: HomeAssistant) -> None:
"""Test that time is synced on startup and next day."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS}, unique_id=MAC_ADDRESS
)
config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
with _patch_discovery(), _patch_wifibulb(device=bulb):
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert config_entry.state == ConfigEntryState.LOADED
assert len(bulb.async_set_time.mock_calls) == 1
async_fire_time_changed(hass, utcnow() + timedelta(hours=24))
await hass.async_block_till_done()
assert len(bulb.async_set_time.mock_calls) == 2
async def test_unique_id_migrate_when_mac_discovered(hass: HomeAssistant) -> None:
"""Test unique id migrated when mac discovered."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_REMOTE_ACCESS_HOST: "any",
CONF_REMOTE_ACCESS_ENABLED: True,
CONF_REMOTE_ACCESS_PORT: 1234,
CONF_HOST: IP_ADDRESS,
CONF_NAME: DEFAULT_ENTRY_TITLE,
},
)
config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
with _patch_discovery(no_device=True), _patch_wifibulb(device=bulb):
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert not config_entry.unique_id
entity_registry = er.async_get(hass)
assert (
entity_registry.async_get("light.bulb_rgbcw_ddeeff").unique_id
== config_entry.entry_id
)
assert (
entity_registry.async_get("switch.bulb_rgbcw_ddeeff_remote_access").unique_id
== f"{config_entry.entry_id}_remote_access"
)
with _patch_discovery(), _patch_wifibulb(device=bulb):
await hass.config_entries.async_reload(config_entry.entry_id)
await hass.async_block_till_done()
assert (
entity_registry.async_get("light.bulb_rgbcw_ddeeff").unique_id
== config_entry.unique_id
)
assert (
entity_registry.async_get("switch.bulb_rgbcw_ddeeff_remote_access").unique_id
== f"{config_entry.unique_id}_remote_access"
)
async def test_unique_id_migrate_when_mac_discovered_via_discovery(
hass: HomeAssistant,
) -> None:
"""Test unique id migrated when mac discovered via discovery and the mac address from dhcp was one off."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_REMOTE_ACCESS_HOST: "any",
CONF_REMOTE_ACCESS_ENABLED: True,
CONF_REMOTE_ACCESS_PORT: 1234,
CONF_HOST: IP_ADDRESS,
CONF_NAME: DEFAULT_ENTRY_TITLE,
},
unique_id=MAC_ADDRESS_ONE_OFF,
)
config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
with _patch_discovery(no_device=True), _patch_wifibulb(device=bulb):
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert config_entry.unique_id == MAC_ADDRESS_ONE_OFF
entity_registry = er.async_get(hass)
assert (
entity_registry.async_get("light.bulb_rgbcw_ddeeff").unique_id
== MAC_ADDRESS_ONE_OFF
)
assert (
entity_registry.async_get("switch.bulb_rgbcw_ddeeff_remote_access").unique_id
== f"{MAC_ADDRESS_ONE_OFF}_remote_access"
)
for _ in range(2):
with _patch_discovery(), _patch_wifibulb(device=bulb):
await hass.config_entries.async_reload(config_entry.entry_id)
await hass.async_block_till_done()
assert (
entity_registry.async_get("light.bulb_rgbcw_ddeeff").unique_id
== config_entry.unique_id
)
assert (
entity_registry.async_get(
"switch.bulb_rgbcw_ddeeff_remote_access"
).unique_id
== f"{config_entry.unique_id}_remote_access"
)
async def test_name_removed_when_it_matches_entry_title(hass: HomeAssistant) -> None:
"""Test name is removed when it matches the entry title."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_REMOTE_ACCESS_HOST: "any",
CONF_REMOTE_ACCESS_ENABLED: True,
CONF_REMOTE_ACCESS_PORT: 1234,
CONF_HOST: IP_ADDRESS,
CONF_NAME: DEFAULT_ENTRY_TITLE,
},
title=DEFAULT_ENTRY_TITLE,
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_wifibulb():
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
assert CONF_NAME not in config_entry.data
async def test_entry_is_reloaded_when_title_changes(hass: HomeAssistant) -> None:
"""Test the entry gets reloaded when the title changes."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_REMOTE_ACCESS_HOST: "any",
CONF_REMOTE_ACCESS_ENABLED: True,
CONF_REMOTE_ACCESS_PORT: 1234,
CONF_HOST: IP_ADDRESS,
},
title=DEFAULT_ENTRY_TITLE,
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_wifibulb():
await async_setup_component(hass, flux_led.DOMAIN, {flux_led.DOMAIN: {}})
await hass.async_block_till_done()
hass.config_entries.async_update_entry(config_entry, title="Shop Light")
assert config_entry.title == "Shop Light"
await hass.async_block_till_done()
assert (
hass.states.get("light.bulb_rgbcw_ddeeff").attributes[ATTR_FRIENDLY_NAME]
== "Shop Light"
)
| 36.671916
| 110
| 0.707272
|
12e3d0365cfd2ddb811d5f89a5c9a7ca0eec8f28
| 6,613
|
py
|
Python
|
custom_components/lovelace_gen/__init__.py
|
genestealer/Home-Assistant-Configuration
|
36995da36beddb5cdac46130ad1e3fcce350332b
|
[
"MIT"
] | 8
|
2019-01-04T08:10:29.000Z
|
2021-11-05T17:44:35.000Z
|
custom_components/lovelace_gen/__init__.py
|
genestealer/Home-Assistant-Configuration
|
36995da36beddb5cdac46130ad1e3fcce350332b
|
[
"MIT"
] | 1
|
2022-02-03T03:05:31.000Z
|
2022-02-05T19:01:34.000Z
|
custom_components/lovelace_gen/__init__.py
|
genestealer/Home-Assistant-Configuration
|
36995da36beddb5cdac46130ad1e3fcce350332b
|
[
"MIT"
] | 4
|
2020-05-24T18:04:21.000Z
|
2022-03-09T23:54:05.000Z
|
import os
import logging
import json
import io
import time
from collections import OrderedDict
import jinja2
from homeassistant.util.yaml import loader
from homeassistant.exceptions import HomeAssistantError
_LOGGER = logging.getLogger(__name__)
def fromjson(value):
return json.loads(value)
jinja = jinja2.Environment(loader=jinja2.FileSystemLoader("/"))
jinja.filters['fromjson'] = fromjson
llgen_config = {}
def load_yaml(fname, secrets = None, args={}):
try:
ll_gen = False
with open(fname, encoding="utf-8") as f:
if f.readline().lower().startswith("# lovelace_gen"):
ll_gen = True
if ll_gen:
stream = io.StringIO(jinja.get_template(fname).render({**args, "_global": llgen_config}))
stream.name = fname
return loader.yaml.load(stream, Loader=lambda _stream: loader.SafeLineLoader(_stream, secrets)) or OrderedDict()
else:
with open(fname, encoding="utf-8") as config_file:
return loader.yaml.load(config_file, Loader=lambda stream: loader.SafeLineLoader(stream, secrets)) or OrderedDict()
except loader.yaml.YAMLError as exc:
_LOGGER.error(str(exc))
raise HomeAssistantError(exc)
except UnicodeDecodeError as exc:
_LOGGER.error("Unable to read file %s: %s", fname, exc)
raise HomeAssistantError(exc)
def _include_yaml(ldr, node):
args = {}
if isinstance(node.value, str):
fn = node.value
else:
fn, args, *_ = ldr.construct_sequence(node)
fname = os.path.abspath(os.path.join(os.path.dirname(ldr.name), fn))
try:
return loader._add_reference(load_yaml(fname, ldr.secrets, args=args), ldr, node)
except FileNotFoundError as exc:
_LOGGER.error("Unable to include file %s: %s", fname, exc);
raise HomeAssistantError(exc)
def _uncache_file(ldr, node):
path = node.value
timestamp = str(time.time())
if '?' in path:
return f"{path}&{timestamp}"
return f"{path}?{timestamp}"
loader.load_yaml = load_yaml
loader.SafeLineLoader.add_constructor("!include", _include_yaml)
loader.SafeLineLoader.add_constructor("!file", _uncache_file)
async def async_setup(hass, config):
llgen_config.update(config.get("lovelace_gen"));
return True
# Allow redefinition of node anchors
import yaml
def compose_node(self, parent, index):
if self.check_event(yaml.events.AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise yaml.composer.ComposerError(None, None, "found undefined alias %r"
% anchor, event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
self.descend_resolver(parent, index)
if self.check_event(yaml.events.ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(yaml.events.SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(yaml.events.MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
yaml.composer.Composer.compose_node = compose_node
# import os
# import logging
# import json
# import io
# import time
# from collections import OrderedDict
# import jinja2
# from homeassistant.util.yaml import loader
# from homeassistant.exceptions import HomeAssistantError
# _LOGGER = logging.getLogger(__name__)
# def fromjson(value):
# return json.loads(value)
# jinja = jinja2.Environment(loader=jinja2.FileSystemLoader("/"))
# jinja.filters['fromjson'] = fromjson
# llgen_config = {}
# def load_yaml(fname, secrets = None, args={}):
# try:
# ll_gen = False
# with open(fname, encoding="utf-8") as f:
# if f.readline().lower().startswith("# lovelace_gen"):
# ll_gen = True
# if ll_gen:
# stream = io.StringIO(jinja.get_template(fname).render({**args, "_global": llgen_config}))
# stream.name = fname
# return loader.yaml.load(stream, Loader=lambda _stream: loader.SafeLineLoader(_stream, secrets)) or OrderedDict()
# else:
# with open(fname, encoding="utf-8") as config_file:
# return loader.yaml.load(config_file, Loader=lambda stream: loader.SafeLineLoader(stream, secrets)) or OrderedDict()
# except loader.yaml.YAMLError as exc:
# _LOGGER.error(str(exc))
# raise HomeAssistantError(exc)
# except UnicodeDecodeError as exc:
# _LOGGER.error("Unable to read file %s: %s", fname, exc)
# raise HomeAssistantError(exc)
# def _include_yaml(ldr, node):
# args = {}
# if isinstance(node.value, str):
# fn = node.value
# else:
# fn, args, *_ = ldr.construct_sequence(node)
# fname = os.path.abspath(os.path.join(os.path.dirname(ldr.name), fn))
# try:
# return loader._add_reference(load_yaml(fname, ldr.secrets, args=args), ldr, node)
# except FileNotFoundError as exc:
# _LOGGER.error("Unable to include file %s: %s", fname, exc);
# raise HomeAssistantError(exc)
# def _uncache_file(ldr, node):
# path = node.value
# timestamp = str(time.time())
# if '?' in path:
# return f"{path}&{timestamp}"
# return f"{path}?{timestamp}"
# loader.load_yaml = load_yaml
# loader.SafeLineLoader.add_constructor("!include", _include_yaml)
# loader.SafeLineLoader.add_constructor("!file", _uncache_file)
# async def async_setup(hass, config):
# llgen_config.update(config.get("lovelace_gen"));
# return True
# # Allow redefinition of node anchors
# import yaml
# def compose_node(self, parent, index):
# if self.check_event(yaml.events.AliasEvent):
# event = self.get_event()
# anchor = event.anchor
# if anchor not in self.anchors:
# raise yaml.composer.ComposerError(None, None, "found undefined alias %r"
# % anchor, event.start_mark)
# return self.anchors[anchor]
# event = self.peek_event()
# anchor = event.anchor
# self.descend_resolver(parent, index)
# if self.check_event(yaml.events.ScalarEvent):
# node = self.compose_scalar_node(anchor)
# elif self.check_event(yaml.events.SequenceStartEvent):
# node = self.compose_sequence_node(anchor)
# elif self.check_event(yaml.events.MappingStartEvent):
# node = self.compose_mapping_node(anchor)
# self.ascend_resolver()
# return node
# yaml.composer.Composer.compose_node = compose_node
| 33.39899
| 133
| 0.668683
|
8d46fc9c1136ab9aa0b90c8f8915fb4e900a44dd
| 166,351
|
py
|
Python
|
python/paddle/fluid/framework.py
|
sunhongwei123/Paddle
|
cadc6a97040965901adceb102c4946026abf2282
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/framework.py
|
sunhongwei123/Paddle
|
cadc6a97040965901adceb102c4946026abf2282
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/framework.py
|
sunhongwei123/Paddle
|
cadc6a97040965901adceb102c4946026abf2282
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
from collections import defaultdict
from collections import Iterable
import contextlib
from .wrapped_decorator import signature_safe_contextmanager, wrap_decorator
import os
import re
import traceback
import six
import numpy as np
import subprocess
import multiprocessing
import sys
import logging
from .. import compat as cpt
from .proto import framework_pb2
from . import core
from . import unique_name
import paddle.version as fluid_version
import warnings
__all__ = [
'Program',
'default_startup_program',
'default_main_program',
'program_guard',
'name_scope',
'cuda_places',
'cpu_places',
'cuda_pinned_places',
'in_dygraph_mode',
'is_compiled_with_cuda',
'Variable',
'load_op_library',
'require_version',
]
EMPTY_VAR_NAME = core.kEmptyVarName()
TEMP_VAR_NAME = core.kTempVarName()
GRAD_VAR_SUFFIX = core.kGradVarSuffix()
ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()
_dygraph_tracer_ = None
_dygraph_current_expected_place_ = None
def require_version(min_version, max_version=None):
"""
Check if the installed version of PaddlePaddle is in [min_version, max_version],
if the installed version is lower than ``min_version`` or higher than ``max_version``,
an exception will be thrown, NO returns if the installed version is satisfied.
Args:
min_version (str): the minimum version required (like '1.4.0').
max_version (str, optional): the max version required (like '1.6.0'), default is None,
meaning any version equal or higher than ``min_version`` is acceptable.
Returns:
None.
Raises:
TypeError: if the type of ``min_version`` is not str.
TypeError: if the type of ``max_version`` is not str or type(None).
ValueError: if the value of ``min_version`` is not in version format.
ValueError: if the value of ``max_version`` is not in version format or None.
Exception: if the installed version is lower than ``min_version`` or higher than ``max_version``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# any version >= 0.1.0 is acceptable.
fluid.require_version('0.1.0')
# if 0.1.0 <= version <= 10.0.0, it is acceptable.
fluid.require_version(min_version='0.1.0', max_version='10.0.0')
"""
if not isinstance(min_version, str):
raise TypeError(
"The type of 'min_version' in require_version must be str, but received %s."
% (type(min_version)))
if not isinstance(max_version, (str, type(None))):
raise TypeError(
"The type of 'max_version' in require_version must be str or type(None), but received %s."
% (type(max_version)))
check_format = re.match(r'\d+(\.\d+){0,3}', min_version)
if check_format is None or check_format.group() != min_version:
raise ValueError(
"The value of 'min_version' in require_version must be in format '\\d+(\\.\\d+){0,3}', "
"like '1.5.2.0', but received %s" % min_version)
if max_version is not None:
check_format = re.match(r'\d+(\.\d+){0,3}', max_version)
if check_format is None or check_format.group() != max_version:
raise ValueError(
"The value of 'max_version' in require_version must be in format '\\d+(\\.\\d+){0,3}', "
"like '1.5.2.0', but received %s" % max_version)
version_installed = [
fluid_version.major, fluid_version.minor, fluid_version.patch,
fluid_version.rc
]
zero_version = ['0', '0', '0', '0']
def version_cmp(ver_a, ver_b):
for i in six.moves.range(len(ver_a)):
if int(ver_a[i]) > int(ver_b[i]):
return 1
elif int(ver_a[i]) < int(ver_b[i]):
return -1
return 0
if version_cmp(version_installed, zero_version) == 0:
if max_version is not None:
warnings.warn(
"PaddlePaddle version in [%s, %s] required, but %s installed. "
"Maybe you are using a develop version, "
"please make sure the version is good with your code." %
(min_version, max_version, fluid_version.full_version))
else:
warnings.warn(
"PaddlePaddle version %s or higher is required, but %s installed, "
"Maybe you are using a develop version, "
"please make sure the version is good with your code." %
(min_version, fluid_version.full_version))
return
min_version_split = min_version.split('.')
min_version_to_check = min_version_split + zero_version[len(
min_version_split):]
if max_version is not None:
max_version_split = max_version.split('.')
max_version_to_check = max_version_split + zero_version[len(
max_version_split):]
if version_cmp(version_installed,
max_version_to_check) > 0 or version_cmp(
version_installed, min_version_to_check) < 0:
raise Exception(
"VersionError: PaddlePaddle version in [%s, %s] required, but %s installed."
% (min_version, max_version, fluid_version.full_version))
else:
if version_cmp(version_installed, min_version_to_check) < 0:
raise Exception(
"VersionError: PaddlePaddle version %s or higher is required, but %s installed, "
"please upgrade your PaddlePaddle to %s or other higher version."
% (min_version, fluid_version.full_version, min_version))
def in_dygraph_mode():
"""
This function checks whether the program runs in dynamic graph mode or not.
You can turn on dynamic graph mode with :ref:`api_fluid_dygraph_guard` api.
Returns:
bool: Whether the program is running in dynamic graph mode.
Examples:
.. code-block:: python
import paddle.fluid as fluid
if fluid.in_dygraph_mode():
print('running in dygraph mode')
else:
print('not running in dygraph mode')
"""
return _dygraph_tracer_ is not None
def _dygraph_not_support_(func):
def __impl__(*args, **kwargs):
assert not in_dygraph_mode(
), "We don't support %s in Dygraph mode" % func.__name__
return func(*args, **kwargs)
return __impl__
def _dygraph_only_(func):
def __impl__(*args, **kwargs):
assert in_dygraph_mode(
), "We Only support %s in Dygraph mode, please use fluid.dygraph.guard() as context to run it in Dygraph Mode" % func.__name__
return func(*args, **kwargs)
return __impl__
dygraph_not_support = wrap_decorator(_dygraph_not_support_)
dygraph_only = wrap_decorator(_dygraph_only_)
def _dygraph_tracer():
return _dygraph_tracer_
def _current_expected_place():
return _dygraph_current_expected_place_
def _cpu_num():
if "CPU_NUM" not in os.environ.keys():
if multiprocessing.cpu_count() > 1:
sys.stderr.write(
'!!! The CPU_NUM is not specified, you should set CPU_NUM in the environment variable list.\n'
'CPU_NUM indicates that how many CPUPlace are used in the current task.\n'
'And if this parameter are set as N (equal to the number of physical CPU core) the program may be faster.\n\n'
'export CPU_NUM={} # for example, set CPU_NUM as number of physical CPU core which is {}.\n\n'
'!!! The default number of CPU_NUM=1.\n'.format(
multiprocessing.cpu_count(), multiprocessing.cpu_count()))
os.environ['CPU_NUM'] = str(1)
cpu_num = os.environ.get('CPU_NUM')
return int(cpu_num)
def _cuda_ids():
gpus_env = os.getenv("FLAGS_selected_gpus")
if gpus_env:
device_ids = [int(s) for s in gpus_env.split(",")]
else:
device_ids = six.moves.range(core.get_cuda_device_count())
return device_ids
def is_compiled_with_cuda():
"""
Whether this whl package can be used to run the model on GPU.
Returns (bool): support gpu or not.
Examples:
.. code-block:: python
import paddle.fluid as fluid
support_gpu = fluid.is_compiled_with_cuda()
"""
return core.is_compiled_with_cuda()
def _var_base_to_np(var_base):
"""
convert VarBase tp numpy
Args:
var_base(VarBase) : the VarBase to convert
Returns (np.ndarray): the np.ndarray contain the value of VarBase
"""
var = var_base._copy_to(core.CPUPlace(), True)
return np.array(var.value().get_tensor())
def cuda_places(device_ids=None):
"""
**Note**:
For multi-card tasks, please use `FLAGS_selected_gpus` environment variable to set the visible GPU device.
The next version will fix the problem with `CUDA_VISIBLE_DEVICES` environment variable.
This function creates a list of :code:`fluid.CUDAPlace` objects.
If :code:`device_ids` is None, environment variable of
:code:`FLAGS_selected_gpus` would be checked first. For example, if
:code:`FLAGS_selected_gpus=0,1,2`, the returned list would
be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
If :code:`FLAGS_selected_gpus` is not set, all visible
gpu places would be returned according to the :code:`CUDA_VISIBLE_DEVICES` environment variable.
If :code:`device_ids` is not None, it should be the device
ids of GPUs. For example, if :code:`device_ids=[0,1,2]`,
the returned list would be
[fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)].
Parameters:
device_ids (list or tuple of int, optional): list of GPU device ids.
Returns:
list of fluid.CUDAPlace: Created GPU place list.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cuda_places = fluid.cuda_places()
"""
assert core.is_compiled_with_cuda(), \
"Not compiled with CUDA"
if device_ids is None:
device_ids = _cuda_ids()
elif not isinstance(device_ids, (list, tuple)):
device_ids = [device_ids]
return [core.CUDAPlace(dev_id) for dev_id in device_ids]
def cpu_places(device_count=None):
"""
This function creates a list of :code:`fluid.CPUPlace` objects, and returns the created list.
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the default value is 1,
i.e. CPU_NUM=1.
:code:`CPU_NUM` indicates the number of devices used in the current task.
The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.
Parameters:
device_count (int, optional): device number. Default: None.
Returns:
list of fluid.CPUPlace: Created list of CPU places.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cpu_places = fluid.cpu_places()
"""
if device_count is None:
device_count = _cpu_num()
return [core.CPUPlace()] * device_count
def cuda_pinned_places(device_count=None):
"""
This function creates a list of :code:`fluid.CUDAPinnedPlace` objects.
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the default value is 1,
i.e. CPU_NUM=1.
:code:`CPU_NUM` indicates the number of devices used in the current task.
The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.
Parameters:
device_count (int, optional): device number. Default: None.
Returns:
list of fluid.CUDAPinnedPlace: Created list of CUDA pinned places.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cuda_pinned_places_cpu_num = fluid.cuda_pinned_places()
# or
cuda_pinned_places = fluid.cuda_pinned_places(1)
"""
assert core.is_compiled_with_cuda(), \
"Not compiled with CUDA"
if device_count is None:
device_count = len(_cuda_ids())
return [core.CUDAPinnedPlace()] * device_count
class NameScope(object):
def __init__(self, name="", parent=None):
self._children = dict()
self._name = name
self._parent = parent
def child(self, prefix):
if prefix not in self._children:
new_child = NameScope(prefix, self)
self._children[prefix] = [new_child]
else:
new_child = NameScope(prefix + "_%d" % len(self._children[prefix]),
self)
self._children[prefix].append(new_child)
return new_child
def parent(self):
return self._parent
def name(self):
return self._name
_name_scope = NameScope()
@signature_safe_contextmanager
def name_scope(prefix=None):
"""
Generate hierarchical name prefix for the operators.
Note:
This should only used for debugging and visualization purpose.
Don't use it for serious analysis such as graph/program transformations.
Args:
prefix(str, optional): prefix. Default is none.
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.name_scope("s1"):
a = fluid.data(name='data', shape=[None, 1], dtype='int32')
b = a + 1
with fluid.name_scope("s2"):
c = b * 1
with fluid.name_scope("s3"):
d = c / 1
with fluid.name_scope("s1"):
f = fluid.layers.pow(d, 2.0)
with fluid.name_scope("s4"):
g = f - 1
# Op are created in the default main program.
for op in fluid.default_main_program().block(0).ops:
# elementwise_add is created in /s1/
if op.type == 'elementwise_add':
assert op.desc.attr("op_namescope") == '/s1/'
# elementwise_mul is created in '/s1/s2'
elif op.type == 'elementwise_mul':
assert op.desc.attr("op_namescope") == '/s1/s2/'
# elementwise_div is created in '/s1/s3'
elif op.type == 'elementwise_div':
assert op.desc.attr("op_namescope") == '/s1/s3/'
# elementwise_sum is created in '/s4'
elif op.type == 'elementwise_sub':
assert op.desc.attr("op_namescope") == '/s4/'
# pow is created in /s1_1/
elif op.type == 'pow':
assert op.desc.attr("op_namescope") == '/s1_1/'
"""
# TODO(panyx0718): Only [0-9a-z].
# in dygraph we don't need namescope since it will cause mem leak
if not in_dygraph_mode():
assert prefix, "namescope prefix cannot be empty."
global _name_scope
_name_scope = _name_scope.child(prefix)
yield
_name_scope = _name_scope.parent()
else:
yield
def _full_name_scope():
global _name_scope
scope = _name_scope
name = ""
while scope:
name = scope.name() + "/" + name
scope = scope.parent()
return name
def generate_control_dev_var_name():
import random
return CONTROL_DEP_VAR_PREFIX + "@" + str(random.random())
def grad_var_name(var_name):
"""
Returns:
str: gradient name for a certain var name
"""
return var_name + GRAD_VAR_SUFFIX
def convert_np_dtype_to_dtype_(np_dtype):
"""
Convert the data type in numpy to the data type in Paddle
Args:
np_dtype(np.dtype): the data type in numpy.
Returns:
core.VarDesc.VarType: the data type in Paddle.
"""
dtype = np.dtype(np_dtype)
if dtype == np.float32:
return core.VarDesc.VarType.FP32
elif dtype == np.float64:
return core.VarDesc.VarType.FP64
elif dtype == np.float16:
return core.VarDesc.VarType.FP16
elif dtype == np.int32:
return core.VarDesc.VarType.INT32
elif dtype == np.int16:
return core.VarDesc.VarType.INT16
elif dtype == np.int64:
return core.VarDesc.VarType.INT64
elif dtype == np.bool:
return core.VarDesc.VarType.BOOL
elif dtype == np.uint16:
return core.VarDesc.VarType.INT16
elif dtype == np.uint8:
return core.VarDesc.VarType.UINT8
elif dtype == np.int8:
return core.VarDesc.VarType.INT8
else:
raise ValueError("Not supported numpy dtype %s" % dtype)
def dtype_is_floating(dtype):
"""
Check the data type is floating or not.
Args:
dtype(np.dtype|core.VarDesc.VarType): data type.
Could be numpy format or Paddle format
Returns(bool): True if data type is a float value
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return dtype in [
core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32,
core.VarDesc.VarType.FP64
]
def _debug_string_(proto, throw_on_error=True):
"""
Get the debug string of a protobuf message. The message could be not
initialized.
Args:
proto(google.protobuf.message.Message): The protobuf message
throw_on_error(bool): True if raise an error when the protobuf message
is not initialized.
Returns(str): The debug string of the protobuf message
"""
error_fields = list()
if not proto.IsInitialized(error_fields) and throw_on_error:
raise ValueError("{0} are not initialized.\nThe message is {1}:\n".
format(error_fields, proto))
return proto.__str__()
class Variable(object):
"""
**Notes**:
**The constructor of Variable should not be invoked directly.**
**In Static Graph Mode: Please use** `Block.create_var` **to create a Static variable which has no data until being feed.**
**In Dygraph Mode: Please use** :ref:`api_fluid_dygraph_to_variable` **to create a dygraph variable with real data**
In Fluid, every input and output of an OP is a variable. In most
cases, variables are used for holding different kinds of data or training
labels. A variable belongs to a :ref:`api_guide_Block_en` . All variable has its own name and
two variables in different :ref:`api_guide_Block_en` could have the same name.
There are many kinds of variables. Each kind of them has its own attributes
and usages. Please refer to the `framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_ for details.
Most of a Variable's member variables can be setted to be None. It mean
it is not available or will be specified later.
Examples:
In Static Graph Mode:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
In `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ Mode:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
new_variable = fluid.dygraph.to_variable(np.arange(10))
"""
def __init__(self,
block,
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=None,
dtype=None,
lod_level=None,
capacity=None,
persistable=None,
error_clip=None,
stop_gradient=False,
is_data=False,
need_check_feed=False,
belong_to_optimizer=False,
**kwargs):
self.block = block
if name is None:
name = unique_name.generate('_generated_var')
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
self.belong_to_optimizer = belong_to_optimizer
if in_dygraph_mode():
# record vars in tracer rather than blocks
self._ivar = kwargs.get("ivar", None)
self.stop_gradient_ = kwargs.get("stop_gradient", True)
if not self._ivar:
self._ivar = core.VarBase(
name, type
if type else core.VarDesc.VarType.LOD_TENSOR, dtype
if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], True
if persistable else False)
if persistable:
_dygraph_tracer().trace_var(name, self)
self.op = None
else:
self.error_clip = error_clip
is_new_var = False
name = cpt.to_text(name)
self.desc = self.block.desc.find_var(cpt.to_bytes(name))
if self.desc is None:
self.desc = self.block.desc.var(cpt.to_bytes(name))
is_new_var = True
if is_new_var:
self.desc.set_type(type)
elif self.desc.type() != type:
raise ValueError(
"Variable {0} has been created before. The "
"previous type is {1}; the new type is {2}. They"
" are not matched".format(self.name, self.desc.type(),
type))
if shape is not None:
if is_new_var:
self.desc.set_shape(shape)
else:
old_shape = self.shape
shape = tuple(shape)
if shape != old_shape:
raise ValueError(
"Variable {0} has been created before. the previous "
"shape is {1}; the new shape is {2}. They are not "
"matched.".format(self.name, old_shape, shape))
if dtype is not None:
if is_new_var:
self.desc.set_dtype(dtype)
else:
old_dtype = self.dtype
if dtype != old_dtype:
raise ValueError(
"Variable {0} has been created before. "
"The previous data type is {1}; the new "
"data type is {2}. They are not "
"matched.".format(self.name, old_dtype, dtype))
if lod_level is not None:
if is_new_var:
self.desc.set_lod_level(lod_level)
else:
if lod_level != self.lod_level:
raise ValueError(
"Variable {0} has been created before. "
"The previous lod_level is {1}; the new "
"lod_level is {2}. They are not "
"matched".format(self.name, self.lod_level,
lod_level))
if persistable is not None:
if is_new_var:
self.desc.set_persistable(persistable)
else:
if persistable != self.persistable:
raise ValueError(
"Variable {0} has been created before."
"The previous persistable is {1}; the new "
"persistable is {2}. They are not matched".format(
self.name, self.persistable, persistable))
if need_check_feed and is_new_var:
self.desc.set_need_check_feed(need_check_feed)
if capacity is not None:
if is_new_var:
self.desc.set_capacity(capacity)
else:
# TODO(abhinavarora) : Compare with set capacity once,
# get_capacity is implemented
pass
self.block.vars[name] = self
self.op = None
self._stop_gradient = stop_gradient
self.is_data = is_data
@dygraph_only
def detach(self):
"""
**Notes**:
**This API is ONLY avaliable in Dygraph mode**
Returns a new Variable, detached from the current graph.
Returns:
( :ref:`api_guide_Variable_en` | dtype is same as current Variable): The detached Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import FC
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
fc = FC("fc", 64, num_flatten_dims=2)
data = to_variable(data)
x = fc(data)
y = x.detach()
"""
if in_dygraph_mode():
new_var = self._cloneVar()
self.block.append_op(
type="assign",
inputs={'X': [self]},
outputs={'Out': [new_var]},
stop_gradient=True)
return new_var
else:
raise AttributeError("static graph model DO NOT supprt detach")
@dygraph_only
def numpy(self):
"""
**Notes**:
**This API is ONLY avaliable in Dygraph mode**
Returns a numpy array shows the value of current :ref:`api_guide_Variable_en`
Returns:
ndarray: The numpy value of current Variable.
Returns type:
ndarray: dtype is same as current Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import FC
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
fc = FC("fc", 64, num_flatten_dims=2)
data = to_variable(data)
x = fc(data)
print(x.numpy())
"""
if not self._ivar.value().get_tensor()._is_initialized():
raise ValueError("%s is Empty, Please check if it has no data in" %
self.name)
new_ivar = self._ivar._copy_to(core.CPUPlace(), True)
return np.array(new_ivar.value().get_tensor())
@dygraph_only
def set_value(self, value):
"""
**Notes**:
**This API is ONLY avaliable in Dygraph mode**
Set a new value for this Variable.
Args:
value (Variable|np.ndarray): the new value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import FC
import numpy as np
data = np.ones([3, 32, 32], dtype='float32')
with fluid.dygraph.guard():
fc = fluid.dygraph.FC("fc", 4)
t = to_variable(data)
fc(t) # call with default weight
custom_weight = np.random.randn(1024, 4).astype("float32")
fc.weight.set_value(custom_weight) # change existing weight
out = fc(t) # call with different weight
"""
assert isinstance(value, (Variable, np.ndarray, core.VarBase)), \
"Variable set_value function, arguments type only support Variable, numpy, VarBase"
value_np = value
if isinstance(value, Variable):
value_np = value.numpy()
elif isinstance(value, core.VarBase):
value_np = _var_base_to_np(value)
self_tensor = self._ivar.value().get_tensor()
self_tensor_np = np.array(self_tensor)
assert self_tensor_np.shape == value_np.shape, \
"Variable Shape not match, Variable [ {} ] need tensor with shape {} but load set tensor with shape {}".format( self._ivar.name, self_tensor_np.shape, value_np.shape)
assert self_tensor_np.dtype == value_np.dtype, \
"Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( self._ivar.name, self_tensor_np.dtype, value_np.dtype)
self_tensor.set(value_np, _current_expected_place())
@dygraph_only
def backward(self, backward_strategy=None):
"""
**Notes**:
**This API is ONLY avaliable in Dygraph mode**
Run backward of current Graph which starts from current Variable
Args:
backward_strategy( :ref:`api_fluid_dygraph_BackwardStrategy` ): The Backward Strategy to run backward
Returns:
NoneType: None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
# if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since
# there is no one need gradient on it.
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
"""
if in_dygraph_mode():
from .dygraph import BackwardStrategy
if backward_strategy is None:
backward_strategy = BackwardStrategy()
backward_strategy.sort_sum_gradient = False
self._ivar._run_backward(backward_strategy, _dygraph_tracer())
else:
raise ValueError(
"Variable.backward() is only avaliable in DyGraph mode")
@dygraph_only
def gradient(self):
"""
**Notes**:
**This API is ONLY avaliable in Dygraph mode**
Get the Gradient of Current Variable
Returns:
ndarray: Numpy value of the gradient of current Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
print(loss2.gradient())
"""
if self._ivar._grad_ivar() is None:
raise ValueError("%s has no grad, Please set Variable.stop_gradient=False, or " \
"check if this is the first and only variable need grad, if so, please set its pre-Variable's " \
"stop_gradient=False, to make sure it has gradient " % self.name)
if not self._ivar._grad_ivar().value().get_tensor()._is_initialized():
raise ValueError(
"%s's Grad is Empty, Please check if it has no data in" %
self.name)
new_ivar = self._ivar._grad_ivar()._copy_to(core.CPUPlace(), True)
return np.array(new_ivar.value().get_tensor())
@dygraph_only
def clear_gradient(self):
"""
**Notes**:
**1. This API is ONLY avaliable in Dygraph mode**
**2. Use it only Variable has gradient, normally we use this for Parameters since other temporal Variable will be deleted by Python's GC**
Clear (set to ``0`` ) the Gradient of Current Variable
Returns: None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = True
loss2.backward(backward_strategy)
print(loss2.gradient())
loss2.clear_gradient()
print("After clear {}".format(loss2.gradient()))
"""
self._ivar._clear_gradient()
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error (bool): True if raise an exception when self is not initialized.
with_details (bool): more details about variables and parameters (e.g. trainable, optimize_attr, ...) will be printed when with_details is True. Default value is False;
Returns:
str: The debug string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print(new_variable.to_string(True))
print("=============with detail===============")
print(new_variable.to_string(True, True))
"""
if in_dygraph_mode():
# TODO(panyx0718): add more dygraph debug info.
tensor = self._ivar.value().get_tensor()
if tensor._is_initialized():
return 'name %s, dtype: %s shape: %s %s' % (
self.name, self.dtype, self.shape, str(tensor))
else:
return 'name %s, shape: %s, not inited' % (self.name,
self.shape)
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
protostr = self.desc.serialize_to_string()
proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
if with_details:
additional_attr = ("error_clip", "stop_gradient")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
cpt.to_text(getattr(self, attr_name)))
return res_str
__repr__ = __str__
@property
def stop_gradient(self):
"""
Indicating if we stop gradient from current Variable
**Notes: This Property has default value as** ``True`` **in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, while Parameter's default value is False. However, in Static Graph Mode all Variable's default stop_gradient value is** ``False``
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
value2 = np.arange(10).reshape(2, 5).astype("float32")
fc = fluid.FC("fc1", size=5, dtype="float32")
fc2 = fluid.FC("fc2", size=3, dtype="float32")
a = fluid.dygraph.to_variable(value0)
b = fluid.dygraph.to_variable(value1)
c = fluid.dygraph.to_variable(value2)
out1 = fc(a)
out2 = fc2(b)
out1.stop_gradient = True
out = fluid.layers.concat(input=[out1, out2, c], axis=1)
out.backward()
assert (fc._w.gradient() == 0).all()
assert (out1.gradient() == 0).all()
"""
if in_dygraph_mode():
return self._ivar.stop_gradient
else:
return self._stop_gradient
@stop_gradient.setter
def stop_gradient(self, s):
if in_dygraph_mode():
self._ivar.stop_gradient = s
else:
self._stop_gradient = s
@property
def persistable(self):
"""
Indicating if we current Variable should be long-term alive
**Notes: This Property will be deprecated and this API is just to help user understand concept**
**1. All Variable's persistable is** ``False`` **except Parameters.**
**2. In** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, this property should not be changed**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("persistable of current Var is: {}".format(new_variable.persistable))
"""
if in_dygraph_mode():
return self._ivar.persistable
else:
return self.desc.persistable()
@persistable.setter
def persistable(self, p):
if in_dygraph_mode():
logging.warn(
"There will be no use to set persistable in Dygraph Mode, since "
"you can just do it by hold it as normal Python variable")
else:
self.desc.set_persistable(p)
@property
def name(self):
"""
Indicating name of current Variable
**Notes: If it has two or more Varaible share the same name in the same** :ref:`api_guide_Block_en` **, it means these Variable will share content in no-** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode. This is how we achieve Parameter sharing**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("name of current Var is: {}".format(new_variable.name))
"""
if in_dygraph_mode():
return self._ivar.name
else:
return cpt.to_text(self.desc.name())
@name.setter
def name(self, new_name):
if in_dygraph_mode():
self._ivar.name = new_name
else:
self.desc.set_name(new_name)
@property
def shape(self):
"""
Indicating shape of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("shape of current Var is: {}".format(new_variable.shape))
"""
# convert to tuple, make it as same as numpy API.
if in_dygraph_mode():
return self._ivar.shape
else:
return tuple(self.desc.shape())
@property
def dtype(self):
"""
Indicating data type of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("Dtype of current Var is: {}".format(new_variable.dtype))
"""
if in_dygraph_mode():
return self._ivar.dtype
else:
return self.desc.dtype()
@property
@dygraph_not_support
def lod_level(self):
"""
Indicating ``LoD`` info of current Variable, please refer to :ref:`api_fluid_LoDTensor_en` to check the meaning
of ``LoD``
**Notes**:
**1. This is a read-only property**
**2. Don't support this property in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, it's value should be** ``0(int)``
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("LoD Level of current Var is: {}".format(new_variable.lod_level))
"""
# TODO(minqiyang): Support lod_level in dygraph mode
if in_dygraph_mode():
raise Exception("Dygraph model DO NOT supprt lod")
return self.desc.lod_level()
@property
def type(self):
"""
Indicating Type of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("Type of current Var is: {}".format(new_variable.type))
"""
if in_dygraph_mode():
return self._ivar.type
else:
return self.desc.type()
def _set_error_clip(self, error_clip):
"""
Set the error_clip.
Args:
error_clip(BaseErrorClipAttr) : The new error_clip.
Returns:
None
"""
self.error_clip = error_clip
def _slice_indices(self, slice, length):
"""
Reference implementation for the slice.indices method.
"""
# Compute step and length as integers.
step = 1 if slice.step is None else slice.step
# Raise ValueError for negative length or zero step.
if length < 0:
raise ValueError("length should not be negative")
if step == 0:
raise ValueError("slice step cannot be zero")
# Find lower and upper bounds for start and stop.
lower = -1 if step < 0 else 0
upper = length - 1 if step < 0 else length
# Compute start.
if slice.start is None:
start = upper if step < 0 else lower
else:
start = slice.start
start = max(start + length, lower) if start < 0 else min(start,
upper)
# Compute stop.
if slice.stop is None:
stop = lower if step < 0 else upper
else:
stop = slice.stop
stop = max(stop + length, lower) if stop < 0 else min(stop, upper)
return start, stop, step
def _detectEllipsis(self, item):
has_ellipsis = False
start = 0
end = len(self.shape)
for index, o in enumerate(item):
if o is Ellipsis:
if has_ellipsis:
raise ValueError("Index can have one ellipsis only.")
has_ellipsis = True
start = index
else:
if has_ellipsis:
end = index
return has_ellipsis, start, end
def _reconstructSliceinfo(self, item):
has_ellipsis, start, end = self._detectEllipsis(item)
if has_ellipsis:
newitem = []
for i in range(start):
newitem.append(item[i])
for i in range(start, end):
newitem.append(slice(None, None, None))
for i in range(end, len(item)):
newitem.append(item[i])
return newitem
else:
return None
def _detectContinuesSlice(self, item):
starts = []
ends = []
for index, o in enumerate(item):
if isinstance(o, int):
start = int(o)
if (index > 0 and index >= self.shape[index]) \
or (index < 0 and (index + self.shape[index]) < 0):
raise IndexError("invalid index")
start = max(start + self.shape[index], 0) if start < 0 else min(
start, self.shape[index])
starts.append(start)
ends.append(start + 1)
elif isinstance(o, slice):
start, stop, step = self._slice_indices(o, self.shape[index])
if step == 1 or step == -1:
starts.append(start)
ends.append(stop)
else:
return False, None
else:
raise IndexError("Valid index accept int or slice or ellipsis")
return True, [starts, ends]
def _cloneVar(self, copy=False):
if not copy:
return self.block.create_var(
name=unique_name.generate_with_ignorable_key(self.name),
dtype=self.dtype)
else:
return self
def _sliceVar(self, axes, starts, ends):
new_var = self._cloneVar()
self.block.append_op(
type="slice",
inputs={'Input': [self]},
outputs={'Out': [new_var]},
attrs={'axes': axes,
'starts': starts,
'ends': ends})
return new_var
def _concatVar(self, inputs, axis):
new_var = self._cloneVar()
self.block.append_op(
type="concat",
inputs={'X': inputs},
outputs={'Out': [new_var]},
attrs={'axis': axis, })
return new_var
def _sliceAndConcatVar(self, item, axis):
if isinstance(item, slice):
if self.shape[axis] < 0:
return self._cloneVar(True)
start, stop, step = self._slice_indices(item, self.shape[axis])
if step == 1:
return self._sliceVar([axis], [start], [stop])
else:
vars = []
if step > 0:
while start < stop:
vars.append(
self._sliceVar([axis], [start], [start + 1]))
start += step
else:
while start > stop:
vars.append(
self._sliceVar([axis], [start], [start + 1]))
start += step
return self._concatVar(vars, axis)
elif isinstance(item, int):
if self.shape[axis] < 0:
return self._cloneVar(True)
index = int(item)
if (index > 0 and index >= self.shape[axis]) \
or (index < 0 and (index + self.shape[axis]) < 0):
raise IndexError("invalid index")
return self._sliceVar([axis], [index], [index + 1])
else:
raise IndexError("Valid index accept int or slice or tuple")
def __getitem__(self, item):
"""
Slice the variable.
Args:
item(int/slice/tuple) : the index.
Returns:
Sliced variable
"""
if not isinstance(item, tuple):
item = [item]
decrease_axis = []
slice_axis = []
slice_start = []
slice_end = []
reverse_axis = []
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
self.block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [out]},
attrs={
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'force_cpu': force_cpu or force_init_on_cpu()
},
stop_gradient=True)
out.stop_gradient = True
return out
for dim, slice_item in enumerate(item):
if isinstance(slice_item, slice):
start = slice_item.start
end = slice_item.stop
step = slice_item.step if slice_item.step else 1
assert (step == 1 or step == -1)
if step == -1:
reverse_axis.append(dim)
assert (start is None and end is None)
if start is None and end is None:
continue
if start is None:
start = 0
if end is None:
end = 10000000
slice_axis.append(dim)
slice_start.append(start)
slice_end.append(end)
else:
decrease_axis.append(dim)
slice_axis.append(dim)
slice_start.append(slice_item)
if isinstance(slice_item, Variable):
temp_1 = self.block.create_var(dtype='int32')
fill_constant([1], 'int32', 1, force_cpu=True, out=temp_1)
temp_end = self.block.create_var(dtype='int32')
self.block.append_op(
type='elementwise_add',
inputs={'X': slice_item,
'Y': temp_1},
outputs={'Out': temp_end},
attrs={'axis': -1})
slice_end.append(temp_end)
else:
slice_end.append(slice_item + 1
if slice_item != -1 else 10000000)
def contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = self.block.create_var(dtype='int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': [self]}
attrs = {
'axes': slice_axis,
'starts': [],
'ends': [],
'decrease_axis': decrease_axis
}
infer_flags = list(1 for i in range(len(slice_axis)))
# starts
if not contain_var(slice_start):
attrs['starts'] = slice_start
else:
inputs['StartsTensorList'] = get_new_list_tensor(slice_start)
for i, dim in enumerate(slice_start):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
# ends
if not contain_var(slice_end):
attrs['ends'] = slice_end
else:
inputs['EndsTensorList'] = get_new_list_tensor(slice_end)
for i, dim in enumerate(slice_end):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
# infer_flags
attrs['infer_flags'] = infer_flags
out = self
if len(slice_axis) > 0:
# append slice_op here
slice_out_var = self.block.create_var(
name=unique_name.generate_with_ignorable_key(self.name +
"_slice"),
dtype=self.dtype)
self.block.append_op(
type="slice",
inputs=inputs,
outputs={'Out': [slice_out_var]},
attrs=attrs)
out = slice_out_var
if len(reverse_axis) > 0:
reverse_out_var = self.block.create_var(
name=unique_name.generate_with_ignorable_key(self.name +
"_slice_reverse"),
dtype=self.dtype)
self.block.append_op(
type="reverse",
inputs={'X': out},
outputs={'Out': [reverse_out_var]},
attrs={'axis': reverse_axis})
out = reverse_out_var
return out
def get_all_op_protos():
"""
Get all registered op proto from PaddlePaddle C++ end.
Returns:
list: list of OpProto.
"""
protostrs = core.get_all_op_protos()
ret_values = []
for pbstr in protostrs:
op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))
ret_values.append(op_proto)
return ret_values
class OpProtoHolder(object):
"""
A global variable to hold all OpProtos from C++ as a map
"""
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def __init__(self):
assert not hasattr(
self.__class__,
'_instance'), 'Please use `instance()` to get OpProtoHolder object!'
op_protos = get_all_op_protos()
self.op_proto_map = {}
for proto in op_protos:
self.op_proto_map[proto.type] = proto
def get_op_proto(self, type):
"""
Get OpProto by a type string.
Args:
type(str): The type that operator registered in C++ side.
Returns(framework_pb2.OpProto): The OpProto
"""
if type not in self.op_proto_map:
raise ValueError("Operator \"%s\" has not been registered." % type)
return self.op_proto_map[type]
def update_op_proto(self):
op_protos = get_all_op_protos()
for proto in op_protos:
if proto.type not in self.op_proto_map:
self.op_proto_map[proto.type] = proto
@staticmethod
def generated_op_attr_names():
return {
core.op_proto_and_checker_maker.kOpRoleAttrName(),
core.op_proto_and_checker_maker.kOpRoleVarAttrName(),
core.op_proto_and_checker_maker.kOpNameScopeAttrName(),
core.op_proto_and_checker_maker.kOpCreationCallstackAttrName()
}
class Operator(object):
"""
In Fluid, all the operation are represented by Operator, and Operator
is regarded as a build in an instruction of a Block. Users can use the
build in instructions to describe their neural network.
Args:
block(Block): The block has the current operator.
desc(core.OpDesc): The protobuf description of Operator.
type(str): The type of operator. Default None.
inputs(dict): The input of this Operator. it is a dictionary, for every
element, key is the input parameter name, and value is a list of
variables. Default None.
outputs(dict): The output of this Operator. it is a dictionary, for
every element, key is the input parameter name, and value is a list
of variables. Default None.
attrs(dict): The attributes of this Operator. it is a dictionary, for
every element, key is attribute name, and value is the attribute value.
The attribute type should be as same as the type registered in C++ side.
Default None.
Returns:
Operator: The initialized Operator.
Raises:
ValueError: If the passed input, output and attrs doesn't match the
initializing Operator's that registered in C++ side.
Notes:
The constructor of operator should not be invoked directly. Use
Block.append_op or Block._prepend_op instead.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
# var1 += var2 + var3
cur_block.append_op(type="sum",
inputs={"X": [var1, var2, var3]},
outputs={"Out": [var1]})
"""
OP_WITHOUT_KERNEL_SET = {
'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',
'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',
'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify',
'gen_nccl_id', 'c_gen_nccl_id', 'c_comm_init', 'c_sync_calc_stream',
'c_sync_comm_stream'
}
def __init__(self,
block,
desc,
type=None,
inputs=None,
outputs=None,
attrs=None):
if in_dygraph_mode():
if type is None:
raise ValueError(
"`type` to initialized an Operator can not be None.")
self._type = type
self.attrs = attrs if attrs else {}
else:
self.block = block
self.desc = desc
# note: not add self.attrs here:
# https://github.com/PaddlePaddle/Paddle/pull/12583#pullrequestreview-145093173
op_attrs = attrs
if op_attrs is None:
op_attrs = dict()
del attrs
op_maker = core.op_proto_and_checker_maker
if op_maker.kOpRoleAttrName() not in op_attrs:
op_attrs[op_maker.kOpRoleAttrName(
)] = self.block.program._op_role
role_var_name = op_maker.kOpRoleVarAttrName()
if len(self.block.program.
_op_role_var) != 0 and role_var_name not in op_attrs:
op_attrs[role_var_name] = self.block.program._op_role_var
if role_var_name in op_attrs and len(op_attrs[role_var_name]) == 0:
del op_attrs[role_var_name]
if len(self.desc.type()) != 0:
return
if type is None:
raise ValueError(
"`type` to initialized an Operator can not be None.")
else:
callstack_var_name = op_maker.kOpCreationCallstackAttrName()
op_attrs[callstack_var_name] = list(
reversed(traceback.format_stack()))[1:]
self.desc.set_type(type)
proto = OpProtoHolder.instance().get_op_proto(type)
namescope_var_name = op_maker.kOpNameScopeAttrName()
op_attrs[namescope_var_name] = _full_name_scope()
def find_name(var_list, name):
for var_name in var_list:
if var_list[var_name] is not None and var_name == name:
return True
return False
if inputs is not None:
for in_proto in proto.inputs:
found = find_name(inputs, in_proto.name)
assert found or in_proto.dispensable, "Input {} not found".format(
in_proto.name)
if found:
in_args = inputs[in_proto.name]
if not isinstance(in_args, list):
in_args = [in_args]
if not in_proto.duplicable and len(in_args) > 1:
raise ValueError(
"Input %s expects only one input, but %d are given."
% (in_proto.name, len(in_args)))
in_arg_names = []
for index, arg in enumerate(in_args):
if isinstance(arg, six.string_types):
in_arg_names.append(arg)
elif isinstance(arg, six.binary_type):
in_arg_names.append(arg.decode())
elif isinstance(arg, Variable):
in_arg_names.append(cpt.to_text(arg.name))
else:
raise ValueError(
"not suprt args type , should be[ string_type, binary_type, Varibale]"
)
self.desc.set_input(in_proto.name, in_arg_names)
else:
self.desc.set_input(in_proto.name, [])
if outputs is not None:
for m in proto.outputs:
if (m.name not in outputs) and m.dispensable:
continue
if not ((m.name in outputs) or m.dispensable):
raise ValueError(("Incorrect setting for output(s) of "
"operator \"%s\", should set: [%s].")
% (type, m.name))
for out_proto in proto.outputs:
if out_proto.name not in outputs:
continue
out_args = outputs[out_proto.name]
if not isinstance(out_args, list):
out_args = [out_args]
if not out_proto.duplicable and len(out_args) > 1:
raise ValueError(
"Output %s expects only one output, but %d are given."
% (out_proto.name, len(out_args)))
out_arg_names = []
for arg in out_args:
out_arg_names.append(cpt.to_text(arg.name))
# TODO(minqiyang): could we remove variable's op in static mode?
if not in_dygraph_mode():
arg.op = self
self.desc.set_output(out_proto.name, out_arg_names)
if op_attrs is not None:
if not isinstance(op_attrs, dict):
raise TypeError("'attrs' should be a dict.")
for attr in proto.attrs:
attr_name = attr.name
if (attr_name not in op_attrs) or (
op_attrs[attr_name] is None):
continue
attr_val = op_attrs[attr_name]
self._update_desc_attr(attr_name, attr_val)
self.desc.check_attrs()
if self._has_kernel(type):
self.desc.infer_var_type(self.block.desc)
self.desc.infer_shape(self.block.desc)
def _has_kernel(self, op_type):
return op_type not in self.OP_WITHOUT_KERNEL_SET
def to_string(self, throw_on_error):
"""
Get debug string.
Args:
throw_on_error(bool): Whether to raise exception if self is not
initialized.
Returns:
str: The debug string.
"""
protostr = self.desc.serialize_to_string()
proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))
return _debug_string_(proto, throw_on_error)
def __str__(self):
return self.to_string(True)
__repr__ = __str__
@property
def type(self):
if in_dygraph_mode():
return self._type
else:
return self.desc.type()
def input(self, name):
"""
Get the input arguments according to the input parameter name.
Args:
name(str): The input parameter name.
Returns:
list: return the list of argument names that associated with \
the specific parameter name.
"""
return self.desc.input(name)
def _rename_input(self, old_name, new_name):
"""
Rename the `old_name` to `new_name`.
Args:
old_name(str): The old name of the Operator's input.
new_name(str): The new name of the Operator's input.
Returns:
None
"""
self.desc._rename_input(old_name, new_name)
def _rename_output(self, old_name, new_name):
"""
Rename the `old_name` to `new_name`.
Args:
old_name(str): The old name of the Operator's output.
new_name(str): The new name of the Operator's output.
Returns:
None
"""
self.desc._rename_output(old_name, new_name)
@property
def input_names(self):
return self.desc.input_names()
@property
def input_arg_names(self):
return self.desc.input_arg_names()
@property
def output_arg_names(self):
return self.desc.output_arg_names()
def output(self, name):
"""
Get output arguments by the output parameter name.
Args:
name(str): The output parameter name.
Returns:
list: return the list of argument names associated with \
the specific parameter name.
"""
return self.desc.output(name)
@property
def output_names(self):
return self.desc.output_names()
@property
def idx(self):
for i, op in enumerate(self.block.ops):
if op == self:
return i
raise ValueError(
"Can't find op itself in it's block. It could be a bug of Paddle.")
def has_attr(self, name):
"""
Whether this Operator has the attribute with name or not.
Args:
name(str): the attribute name.
Returns:
bool: True if has this attribute.
"""
return self.desc.has_attr(name)
def attr_type(self, name):
"""
Get the type of attribute by attribute's name.
Args:
name(str): the attribute name.
Returns:
core.AttrType: the attribute type.
"""
return self.desc.attr_type(name)
def _set_attr(self, name, val):
"""
Set the value of attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
Raises:
ValueError: If the type of value doesn't match with desc.attr_type(name).
"""
self._update_desc_attr(name, val)
def _remove_attr(self, name):
self.desc.remove_attr(name)
def _update_desc_attr(self, name, val):
"""
Update the value of desc's attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
Raises:
ValueError: If the type of value doesn't match with desc.attr_type(name).
"""
if isinstance(val, Block):
self.desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and all(
isinstance(v, Block) for v in val):
self.desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
self.desc.set_serialized_attr(name, val.serialize_to_string())
else:
self.desc._set_attr(name, val)
@property
def attr_names(self):
return self.desc.attr_names()
def attr(self, name):
"""
Get the attribute by name.
Args:
name(str): the attribute name.
Returns:
bool|int|str|float|list: The attribute value. The return value
can be any valid attribute type.
"""
return self.desc.attr(name)
def _block_attr_id(self, name):
"""
Get the block attribute's id by name.
Args:
name(str): the attribute name.
Returns:
int: the block index.
"""
return self.desc._block_attr_id(name)
def _block_attr(self, name):
"""
Get the block attribute by name.
Args:
name(str): the attribute name.
Returns:
block: the block attribute.
"""
id = self._block_attr_id(name)
assert (id >= 0 and id < len(self.block.program.blocks))
return self.block.program.blocks[id]
def _blocks_attr(self, name):
"""
Get the blocks attribute by name.
Args:
name(str): the attribute name.
Returns:
list: list of the blocks attribute.
"""
attrs = []
for i in self._blocks_attr_ids(name):
assert (i >= 0 and i < len(self.block.program.blocks))
attrs.append(self.block.program.blocks[i])
return attrs
def _blocks_attr_ids(self, name):
"""
Get the blocks attribute's ids by name.
Args:
name(str): the attribute name.
Returns:
list: list of the blocks ids.
"""
return self.desc._blocks_attr_ids(name)
def all_attrs(self):
"""
Get the attribute dict.
Returns:
dict: The Operator's attribute dict, name->attr.
"""
attr_names = self.attr_names
attr_map = {}
for n in attr_names:
attr_type = self.desc.attr_type(n)
if attr_type == core.AttrType.BLOCK:
attr_map[n] = self._block_attr(n)
continue
if attr_type == core.AttrType.BLOCKS:
attr_map[n] = self._blocks_attr(n)
continue
attr_map[n] = self.attr(n)
return attr_map
class Block(object):
"""
In Fluid, a Program is consistence of multi-Block, and Block stores
VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name.
One block could have some child blocks, and child block's name scopes
should inherit the parent's so that OpDesc in child block can reference
a VarDesc that is stored in the parent block.
Please reference the framework.proto for details.
Args:
program(Program): The Program that the Block belongs to.
idx(int): The block's id in the Program.
Notes:
The constructor of Block should not be invoked directly. Please
use `Program._create_block()` to create a block.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
cur_block.append_op(type="abs",
inputs={"X": [var]},
outputs={"Out": [var]})
"""
def __init__(self, program, idx):
self.desc = program.desc.block(idx)
self.vars = collections.OrderedDict() # var_name --> var
self.ops = list() # operator list
self.program = program
self.removed_vars = collections.OrderedDict()
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True.
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when
with_details is True. Default False.
Returns:
str: The debug string.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
re_add_indent = re.compile(r"\n(.)")
res_str = "blocks {\n idx: %d\n parent_idx: %d" % (
self.idx, self.parent_idx)
for var in list(self.vars.values()):
res_str += "\n vars {\n %s }" % re_add_indent.sub(
r"\n \1", var.to_string(throw_on_error, with_details))
for op in self.ops:
res_str += "\n ops {\n %s }" % re_add_indent.sub(
r"\n \1", op.to_string(throw_on_error))
res_str += "\n}"
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.BlockDesc.FromString(
six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
__repr__ = __str__
@property
def parent_idx(self):
return self.desc.parent
@property
def forward_block_idx(self):
return self.desc.get_forward_block_idx()
def _set_forward_block_idx(self, idx):
"""
Set the forward block Idx.
Args:
idx(int): the block index.
Returns:
None
"""
self.desc._set_forward_block_idx(idx)
@property
def idx(self):
return self.desc.id
def var(self, name):
"""
Get a Variable by name from this block.
Args:
name(str): the Variable's name.
Raises:
ValueError: The If input's type is not str, or this block
doesn't have a Variable with the giving name.
Returns:
Variable: the Variable with the giving name.
"""
if not isinstance(name, six.string_types):
raise TypeError(
"var require string as parameter, but get %s instead." %
(type(name)))
v = self.vars.get(name, None)
if v is None:
raise ValueError("var %s not in this block" % name)
return v
def _find_var_recursive(self, name):
"""
Get a Variable by name from this block recursively.
Args:
name(str): the Variable's name.
Returns:
Variable: the Variable with the giving name. Or None if not found.
"""
frontier = list()
visited = set()
frontier.append(self)
prog = self.program
while len(frontier) != 0: # BFS
cur = frontier[0]
frontier = frontier[1:]
if id(cur) in visited:
continue
if cur.has_var(name):
return cur.var(name)
if cur.parent_idx != -1:
frontier.append(prog.block(cur.parent_idx))
if cur.forward_block_idx != -1:
frontier.append(prog.block(cur.forward_block_idx))
visited.add(id(cur))
return None
def _var_recursive(self, name):
"""
Get a Variable by name from this block recursively.
Args:
name(str): the Variable's name.
Raises:
ValueError: this block and this parent block doesn't
have a Variable with the giving name.
Returns:
Variable: the Variable with the giving name.
"""
var = self._find_var_recursive(name)
if var:
return var
else:
raise ValueError("Var {0} is not found recursively".format(name))
def all_parameters(self):
return list(self.iter_parameters())
def iter_parameters(self):
return (item[1] for item in six.iteritems(self.vars)
if isinstance(item[1], Parameter))
def create_var(self, *args, **kwargs):
var = Variable(block=self, *args, **kwargs)
if 'initializer' in kwargs:
kwargs['initializer'](var, self)
return var
def has_var(self, name):
return name in self.vars
def _rename_var(self, name, new_name):
"""
Rename variable in vars and ops' inputs and outputs
Args:
name(str): the name that need to be renamed.
new_name(str): the name that need to rename to.
Raises:
ValueError: If this block doesn't have this the giving name,
or the type of the var with the giving name is not Parameter
or Variable.
Returns:
Variable: the Variable with the giving name.
"""
name = cpt.to_text(name)
new_name = cpt.to_text(new_name)
if not self.has_var(name):
raise ValueError("var %s is not in current block" % name)
v = self.var(name)
if type(v) == Parameter:
var_type = "Parameter"
stop_gradient = v.stop_gradient
trainable = v.trainable
optimize_attr = v.optimize_attr
regularizer = v.regularizer
gradient_clip_attr = v.gradient_clip_attr
error_clip = v.error_clip
elif type(v) == Variable:
var_type = "Variable"
error_clip = v.error_clip
stop_gradient = v.stop_gradient
else:
raise ValueError("unsupported var type: %s", type(v))
orig_var_type = v.type
self.desc._rename_var(cpt.to_bytes(name), cpt.to_bytes(new_name))
# NOTE: v is destroyed by C++ after calling _rename_var.
d = self.desc.find_var(cpt.to_bytes(new_name))
if var_type == "Parameter":
var = Parameter(
self,
d.shape(),
d.dtype(),
type=orig_var_type,
name=new_name,
stop_gradient=stop_gradient,
trainable=trainable,
optimize_attr=optimize_attr,
regularizer=regularizer,
gradient_clip_attr=gradient_clip_attr,
error_clip=error_clip)
elif var_type == "Variable":
var = Variable(
self,
type=orig_var_type,
name=new_name,
error_clip=error_clip,
stop_gradient=stop_gradient)
# rename the python side, _sync_with_cpp will only add
# new vars/ops to python side.
self.vars[new_name] = var
del self.vars[name]
self._sync_with_cpp()
return var
def _remove_var(self, name):
self._sync_with_cpp()
self.desc._remove_var(cpt.to_bytes(name))
del self.vars[name]
def create_parameter(self, *args, **kwargs):
global_block = self.program.global_block()
param = Parameter(global_block, *args, **kwargs)
if 'initializer' in kwargs:
def _is_inited_by(block, var):
init_ops = []
for op in block.ops:
if var.name in op.output_arg_names:
# In startup_program, "c_broadcast" and "c_sync_comm_stream"
# are treated as initialization ops that cause error.
# Think of "c_broadcast" and "c_sync_comm_stream" as a special case here.
if op.type in ["c_broadcast", "c_sync_comm_stream"]:
continue
init_ops.append(op)
return init_ops
initializer = kwargs['initializer']
init_ops = _is_inited_by(global_block, param)
init_ops_len = len(init_ops)
if init_ops_len > 1:
raise RuntimeError("param " + param.name +
" is inited by multiple init ops " + str(
init_ops))
elif init_ops_len == 1:
#TODO already inited, do nothing, should log a warning
pass
else:
initializer(param, self)
param.stop_gradient = False
return param
def append_op(self, *args, **kwargs):
"""
Appends a new Operator according to the giving arguments.
Returns:
Operator: the append Operator.
"""
if in_dygraph_mode():
attrs = kwargs.get("attrs", {})
if _dygraph_tracer_._train_mode == False:
# eval mode
if ('trainable_statistics' not in attrs
) or not attrs['trainable_statistics']:
attrs['is_test'] = True
else:
attrs['is_test'] = False
type = kwargs.get("type", None)
op = Operator(
block=self,
desc=None,
type=type,
inputs=None,
outputs=None,
attrs=attrs)
# record ops in tracer rather than blocks
#
# TODO(minqiyang): add op stop_gradient support in static mode too.
# currently, we only support stop_gradient in dygraph mode.
_dygraph_tracer().trace_op(type,
kwargs.get("inputs", {}),
kwargs.get("outputs", {}), attrs
if attrs else {},
kwargs.get("stop_gradient", False))
else:
op_desc = self.desc.append_op()
op = Operator(
block=self,
desc=op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.append(op)
return op
def _insert_op(self, index, *args, **kwargs):
"""
Insert a Operator according to the giving arguments.
Args:
index(int): the place that the operator to insert.
Returns:
Operator: the insert Operator.
"""
self._sync_with_cpp()
op_desc = self.desc._insert_op(index)
op = Operator(block=self, desc=op_desc, *args, **kwargs)
self.ops.insert(index, op)
return op
def _remove_op(self, index):
"""
Remove the specific position operator.
Args:
index(int): the position that the operator to insert.
Returns:
None
"""
self._sync_with_cpp()
self.desc._remove_op(index, index + 1)
del self.ops[index]
def _slice_ops(self, start, end):
"""
Return the Operator between start and end.
Args:
start(int): the start position.
end(int): the end position.
Returns:
list: the Operators between start and end.
"""
return self.ops[start:end]
def _prepend_op(self, *args, **kwargs):
if in_dygraph_mode():
type = kwargs.get("type", None)
attrs = kwargs.get("attrs", {})
op = Operator(
self, None, type=type, inputs=None, outputs=None, attrs=attrs)
_dygraph_tracer().trace_op(type,
kwargs.get("inputs", {}),
kwargs.get("outputs", {}), attrs
if attrs else {},
kwargs.get("stop_gradient", False))
else:
op_desc = self.desc._prepend_op()
op = Operator(
self,
op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.insert(0, op)
return op
def _sync_with_cpp(self):
"""
Sync from the desc on the c++ end. This method is used to synchronize
the c++ desc instance generated by backward.
"""
# sync variables from cpp
for var in self.desc.all_vars():
if not self.has_var(var.name()):
self.create_var(name=var.name(), desc=var, type=var.type())
# sync variables removed from c++ end
for var in list(self.vars.keys()):
if not self.desc.find_var(cpt.to_bytes(var)):
self.vars.pop(var)
# sync operators from cpp
ops_in_cpp = []
for op_idx in range(0, self.desc.op_size()):
ops_in_cpp.append(self.desc.op(op_idx))
if len(self.ops) != 0:
first_op_in_python = self.ops[0].desc
last_op_in_python = self.ops[len(self.ops) - 1].desc
start_index = None
end_index = None
for index in range(len(ops_in_cpp)):
if first_op_in_python == ops_in_cpp[index]:
start_index = index
if last_op_in_python == ops_in_cpp[index]:
end_index = index
assert start_index is not None
assert end_index is not None
assert start_index <= end_index
else:
start_index = 0
end_index = -1
# sync ops append to the head of cpp_ops
for index in range((start_index - 1 - 1), -1, -1):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.insert(0, op)
# sync ops append to the end of cpp_ops
for index in range((end_index + 1), len(ops_in_cpp)):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.append(op)
# sync ops removed from c++ end
if end_index != -1 and end_index < len(self.ops):
ops_in_cpp_index = 0
ops_in_python_index = 0
while ops_in_python_index < len(
self.ops) and ops_in_cpp_index < len(ops_in_cpp):
if self.ops[ops_in_python_index].desc != ops_in_cpp[
ops_in_cpp_index]:
del self.ops[ops_in_python_index]
else:
ops_in_cpp_index += 1
ops_in_python_index += 1
assert len(self.ops) == len(ops_in_cpp)
for index in range(len(self.ops)):
assert self.ops[index].desc == ops_in_cpp[index]
def _copy_param_info_from(self, other):
"""
Copy the information of parameters from the other block.
Args:
other(Block): the other block.
Raises:
ValueError: If type of input is not Block, or the `other` and this
block is not in the same topology.
Returns:
None
"""
if not isinstance(other, Block):
raise TypeError(
"_copy_param_info_from should be invoked with Block")
for p in other.iter_parameters():
assert isinstance(p, Parameter)
v = self.vars.get(p.name, None)
if v is None:
raise ValueError("_copy_param_info_from should be invoked with "
"same topology")
assert isinstance(v, Variable)
new_p = Parameter(
block=self,
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
gradient_clip_attr=p.gradient_clip_attr,
error_clip=p.error_clip,
name=v.name)
self.vars[new_p.name] = new_p
def _clone_variable(self, var, force_persistable=True):
"""
Clone a variable into current block.
Args:
var: the variable to be cloned.
force_persistable(bool): True means setting the result variable to being persistable.
False means setting the persistable the same with that of input var.
default: True.
Returns:
Variable: the new variable cloned from 'var' in current block.
"""
assert isinstance(var, Variable)
ret_var = None
# make STEP_SCOPES var can be safely cloned.
if var.type == core.VarDesc.VarType.STEP_SCOPES:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.RAW:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.SELECTED_ROWS:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
persistable=True if force_persistable else var.persistable,
is_data=var.is_data,
need_check_feed=var.desc.need_check_feed())
else:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True if force_persistable else var.persistable,
is_data=var.is_data,
need_check_feed=var.desc.need_check_feed())
return ret_var
class IrNode(object):
"""
Python IrNode. Beneath it is a core.Node, which is used for Ir Pass.
"""
def __init__(self, node):
"""
Construct an IrNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node,
core.Node), 'node must be the instance of core.Node.'
self.node = node
def name(self):
"""
Return the node name.
Returns:
str: node name.
"""
return self.node.name()
def node_type(self):
"""
Return the node type.
Returns:
core.Node.Type: node type(core.Node.Type.Operation or core.Node.Type.Variable).
"""
return self.node.node_type()
def var(self):
"""
Return the node variable description.
Returns:
core.VarDesc: node variable description.
"""
return self.node.var()
def op(self):
"""
Return the node operator description.
Returns:
core.OpDesc: node operator description.
"""
return self.node.op()
def id(self):
"""
Return the node id.
Returns:
int: node id.
"""
return self.node.id()
def is_op(self):
"""
If the node is an operator, then return true.
Returns:
bool: indicate whether the node is an operator.
"""
return self.node.is_op()
def is_var(self):
"""
If the node is a variable, then return true.
Returns:
bool: indicate whether the node is a variable.
"""
return self.node.is_var()
def is_ctrl_var(self):
"""
If the node is a control dependence variable, then return true.
Returns:
bool: indicate whether the node is a control dependence variable.
"""
return self.node.is_ctrl_var()
def clear_inputs(self):
"""
Clear the node inputs. After executing the `clear_inputs` function,
the node inputs will be empty.
"""
self.node.clear_inputs()
def remove_input_by_id(self, node_id):
"""
Remove a node from inputs by the given node id.
Args:
node_id(int): the given node id.
"""
self.node.remove_input(node_id)
def remove_input(self, node):
"""
Remove a node from inputs.
Args:
node(IrNode): the node being removed.
"""
self.node.remove_input(node.node)
def append_input(self, node):
"""
Append a node in inputs.
Args:
node(IrNode): the node being appended.
"""
self.node.append_input(node.node)
def clear_outputs(self):
"""
Clear the node outputs. After executing the `clear_outputs` function,
the node outputs will be empty.
"""
self.node.clear_outputs()
def remove_output_by_id(self, node_id):
"""
Remove a node from outputs by the given node id.
Args:
node_id(int): the given node id.
"""
self.node.remove_output(node_id)
def remove_output(self, node):
"""
Remove a node from outputs.
Args:
node(IrNode): the node being removed.
"""
self.node.remove_output(node.node)
def append_output(self, node):
"""
Append a node in outputs.
Args:
node(IrNode): the node being appended.
"""
self.node.append_output(node.node)
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrNode): node inputs wrapped by IrNode.
"""
return [IrNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrNode): node outputs wrapped by IrNode.
"""
return [IrNode(n) for n in self.node.outputs]
class IrVarNode(IrNode):
"""
Python IrVarNode. Beneath it is a core.Node, it inherits from IrNode.
"""
def __init__(self, node):
"""
Construct an IrVarNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node, core.Node) and node.is_var(), \
'node must be the instance of core.Node and it must be a variable node.'
super(IrVarNode, self).__init__(node)
self.node = node
def set_shape(self, shape):
"""
Set the node variable shape.
Args:
shape(list): shape to be set.
"""
assert self.node.var() is not None, \
"The node variable description cannot be None."
self.node.var().set_shape(shape)
def persistable(self):
"""
If the variable node is a persistable variable, then return true.
Returns:
bool: indicate whether the variable is persistable.
"""
assert self.node.var() is not None, \
"The node variable description cannot be None."
return self.node.var().persistable()
def type(self):
"""
Return the variable type.
Returns:
core.VarDesc.VarType: the variable type.
"""
assert self.node.var() is not None, \
"The node variable description cannot be None."
return self.node.var().type()
def dtype(self):
"""
Return the variable data type.
Returns:
core.VarDesc.VarType: the variable data type.
"""
assert self.node.var() is not None, \
"The node variable description cannot be None."
return self.node.var().dtype()
def shape(self):
"""
Return the variable shape.
Returns:
list: the variable shape.
"""
assert self.node.var() is not None, \
"The node variable description cannot be None."
return self.node.var().shape()
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrOpNode): node inputs wrapped by IrOpNode.
"""
return [IrOpNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrOpNode): node outputs wrapped by IrOpNode.
"""
return [IrOpNode(n) for n in self.node.outputs]
class IrOpNode(IrNode):
"""
Python IrOpNode. Beneath it is a core.Node, it inherits from IrNode.
"""
def __init__(self, node):
"""
Construct an IrOpNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node, core.Node) and node.is_op(), \
'node must be the instance of core.Node and it must be a operator node.'
super(IrOpNode, self).__init__(node)
self.node = node
def rename_input(self, old_input_name, new_input_name):
"""
Rename the input of this node.
Args:
old_input_name(str): the old input name.
new_input_name(str): the new input name.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
self.node.op()._rename_input(old_input_name, new_input_name)
def rename_output(self, old_output_name, new_output_name):
"""
Rename the output of this node.
Args:
old_output_name(str): the old output name.
new_output_name(str): the new output name.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
print("op: {}, old: {}, new: {}\n".format(self.node.op().type(
), old_output_name, new_output_name))
self.node.op()._rename_output(old_output_name, new_output_name)
def input(self, name):
"""
Get the argument name list by the parameter name for input.
Args:
name(str): the parameter name.
Returns:
list(str): the argument name list.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
return self.node.op().input(name)
def output(self, name):
"""
Get the argument name list by the parameter name for output.
Args:
name(str): the parameter name.
Returns:
list(str): the argument name list.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
return self.node.op().output(name)
def set_type(self, new_type):
"""
Change the operator type into new type.
Args:
new_type(str): new operator type to be set.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
return self.node.op().set_type(new_type)
def set_attr(self, name, val):
"""
Set the value of attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
"""
self._update_desc_attr(name, val)
def _update_desc_attr(self, name, val):
"""
Update the value of the op desc's attribute by attribute's name.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
desc = self.node.op()
if isinstance(val, Block):
desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and \
all(isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string())
else:
desc._set_attr(name, val)
def input_arg_names(self):
"""
Return input arguments' names of this op node.
Returns:
list(str): input arguments' names of this op node.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
return self.node.op().input_arg_names()
def output_arg_names(self):
"""
Return output arguments' names of this op node.
Returns:
list(str): output arguments' names of this op node.
"""
assert self.node.op() is not None, \
"The node operator description cannot be None."
return self.node.op().output_arg_names()
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrVarNode): node inputs wrapped by IrVarNode.
"""
return [IrVarNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrVarNode): node outputs wrapped by IrVarNode.
"""
return [IrVarNode(n) for n in self.node.outputs]
class IrGraph(object):
"""
Python IrGraph. Beneath it is a core.Graph, which is used for
creating a c++ Ir Pass Graph. An IrGraph is just a graph view of
a Program. In an IrGraph, both Variables and Operators are graph
nodes.
"""
def __init__(self, graph, for_test=False):
"""
Construct an IrGraph using core.Graph.
Args:
graph(core.Graph): C++ Graph.
for_test(bool): True for the test graph and false for the train graph.
"""
assert isinstance(
graph, core.Graph), 'graph must be the instance of core.Graph.'
self.graph = graph
self._for_test = for_test
def clone(self):
"""
Create a new and duplicated IrGraph.
Warns:
The method only clones the graph structure, not its attributes.
Returns:
IrGraph: A new and duplicated graph.
"""
g = self.graph.clone()
return IrGraph(g, self._for_test)
def is_test(self):
"""
If the graph is used for testing, the function returns true. Otherwise, returns false.
"""
return self._for_test
def all_nodes(self):
"""
Return all nodes included in the graph as a set.
"""
return {IrNode(node) for node in self.graph.nodes()}
def all_var_nodes(self):
"""
Return all variable nodes included in the graph as a set.
"""
return {IrVarNode(node) for node in self.graph.nodes() if node.is_var()}
def all_persistable_nodes(self):
"""
Return all persistable variable nodes included in the graph as a set.
"""
persistable_nodes = set()
for node in self.graph.nodes():
if node.is_var() and node.var() is not None and node.var(
).persistable():
persistable_nodes.add(node)
return {IrVarNode(p) for p in persistable_nodes}
def all_op_nodes(self):
"""
Return all operator nodes included in the graph as a set.
"""
return {IrOpNode(node) for node in self.graph.nodes() if node.is_op()}
def create_persistable_node(self, name, var_type, shape, var_dtype):
"""
Create a persistable variable node in the graph. In IrGraph,
it can not distinguish between persistable variables and parameters.
Args:
name(str): the name of the persistable variable node.
vart_type(core.VarDesc.VarType): the type of the persistable variable node.
shape(list): the shape of the persistable variable node.
var_dtype(core.VarDesc.VarType): the data type of the persistable variable node.
Returns:
IrVarNode: the created persistable variable node.
"""
var_desc = core.VarDesc(name)
var_desc.set_type(var_type)
var_desc.set_shape(shape)
var_desc.set_dtype(var_dtype)
var_desc.set_persistable(True)
return IrVarNode(self.graph.create_var_node(var_desc))
def create_var_node(self, name, var_type, shape, var_dtype):
"""
Create a variable node in the graph. The created variable node is
not persistable.
Args:
name(str): the name of the variable node.
vart_type(core.VarDesc.VarType): the type of the variable node.
shape(list): the shape of the variable node.
var_dtype(core.VarDesc.VarType): the data type of the variable node.
Returns:
IrVarNode: the created variable node.
"""
var_desc = core.VarDesc(name)
var_desc.set_type(var_type)
var_desc.set_shape(shape)
var_desc.set_dtype(var_dtype)
return IrVarNode(self.graph.create_var_node(var_desc))
def create_var_node_from_desc(self, var_desc):
"""
Create a variable node by using an existing VarDesc in the graph.
Depend on the giving VarDesc, the created variable node may be persistable.
Args:
var_desc(core.VarDesc): the giving variable description.
Returns:
IrVarNode: the created variable node.
"""
return IrVarNode(self.graph.create_var_node(var_desc))
def create_op_node(self, op_type, attrs, inputs, outputs):
"""
Create a operator node in the graph.
Args:
op_type(str): the type of the operator node.
attrs(dict): the attributes of the operator node.
inputs(dict): the inputs of the operator node.
outputs(dict): the outpus of the operator node.
Returns:
IrOpNode: the created operator node.
"""
op_desc = core.OpDesc()
op_desc.set_type(op_type)
for attr, value in six.iteritems(attrs):
self._update_desc_attr(op_desc, attr, value)
for input_name, var_nodes in six.iteritems(inputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_input(input_name,
[var_node.name() for var_node in var_nodes])
for output_name, var_nodes in six.iteritems(outputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_output(output_name,
[var_node.name() for var_node in var_nodes])
return IrOpNode(self.graph.create_op_node(op_desc))
def create_op_node_from_desc(self, op_desc):
"""
Create a operator node by using an existing OpDesc in the graph.
Args:
op_desc(core.VarDesc): the giving operator description.
Returns:
IrOpNode: the created operator node.
"""
return IrOpNode(self.graph.create_op_node(op_desc))
def update_input_link(self, old_input_node, new_input_node, op_node):
"""
Update the input's link of a operator node.
Args:
old_input_node(IrNode): the old input node of the giving op_node.
new_input_node(IrNode): the new input node of the giving op_node.
op_node(IrOpNode): the operator node that is needed to update input's link.
"""
assert old_input_node.node in self.graph.nodes() and new_input_node.node in \
self.graph.nodes() and op_node.node in self.graph.nodes(), \
'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.'
old_input_node.remove_output(op_node)
op_node.remove_input(old_input_node)
new_input_node.append_output(op_node)
op_node.append_input(new_input_node)
op_node.rename_input(old_input_node.name(), new_input_node.name())
def update_output_link(self, old_output_node, new_output_node, op_node):
"""
Update the output's link of an operator node.
Args:
old_output_node(IrNode): the old output node of the giving op_node.
new_output_node(IrNode): the new output node of the giving op_node.
op_node(IrOpNode): the operator node that is needed to update input's link.
"""
assert old_output_node.node in self.graph.nodes() and new_output_node.node in \
self.graph.nodes() and op_node.node in self.graph.nodes(), \
'The three arguments(old_output_node &new_output_node &op_node) must be in the graph nodes.'
old_output_node.remove_input(op_node)
op_node.remove_output(old_output_node)
new_output_node.append_input(op_node)
op_node.append_output(new_output_node)
op_node.rename_output(old_output_node.name(), new_output_node.name())
def link_to(self, node_in, node_out):
"""
Connect two nodes.
Args:
node_in(IrNode): the input node.
node_out(IrNode): the output node.
"""
assert node_in.node in self.graph.nodes() and node_out.node in self.graph.nodes(), \
'The two arguments(node_in&node_out) must be in the graph nodes.'
node_in.append_output(node_out)
node_out.append_input(node_in)
def safe_remove_nodes(self, remove_nodes):
"""
Remove nodes safely since links connected to these removed nodes are
also removed.
Args:
remove_nodes(set): the nodes prepared to be removed.
"""
if not isinstance(remove_nodes, set):
if isinstance(remove_nodes, Iterable):
remove_nodes = set(remove_nodes)
else:
remove_nodes = {remove_nodes}
original_nodes = {n.node for n in remove_nodes}
core.graph_safe_remove_nodes(self.graph, original_nodes)
def resolve_hazard(self):
ordered_nodes = core.topology_sort(self.graph)
var_nodes = dict()
for node in ordered_nodes:
if node.is_op() and node.op() is not None:
for each_var_name in node.op().input_arg_names():
if each_var_name not in var_nodes:
var_nodes[each_var_name] = [
self._find_node_by_name(node.inputs, each_var_name)
]
for each_var_name in node.op().output_arg_names():
if each_var_name not in var_nodes:
var_nodes[each_var_name] = [
self._find_node_by_name(node.outputs, each_var_name)
]
else:
var_nodes[each_var_name].append(
self._find_node_by_name(node.outputs,
each_var_name))
self.graph.resolve_hazard(var_nodes)
def has_circle(self):
"""
Check if the graph has a circle.
Returns:
bool: True if the graph has a circle else False.
"""
return core.has_circle(self.graph)
def graph_num(self):
"""
Count the number of unconnected graphs in this graph.
Returns:
int: the number of unconnected graphs.
"""
return core.graph_num(self.graph)
def topology_sort(self):
"""
Perform the topology sort operation on the graph.
Notes: the `graph` cannot contain a circle.
Returns:
list(IrNode): nodes in topology order.
"""
ordered_nodes = core.topology_sort(self.graph)
return [IrNode(n) for n in ordered_nodes]
def build_adjacency_list(self):
"""
Build an adjacency list of operations for the `graph`.
Returns:
dict{IrNode: set(IrNode)}: the adjacency list.
"""
adj_list = core.build_adjacency_list(self.graph)
wrapped_adj_list = dict()
for k, v in six.iteritems(adj_list):
wrapped_adj_list[IrNode(k)] = {IrNode(n) for n in v}
return wrapped_adj_list
def draw(self, save_path, name, marked_nodes=None, remove_ctr_var=True):
"""
Draw the graph. If `dot` command is installed, the drawn graph
will be saved as pdf file type, otherwise dot file type is used.
Args:
save_path(str): the save path of drawn graph.
name(str): the name of drawn graph.
marked_nodes(set(IrNode)): nodes that are needed to be marked.
Default value is None.
remove_ctr_var(bool): If it is set True, all control variable nodes
in the graph will be removed. Default value is True.
"""
def _convert_to_pdf(dot_file_path):
pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf'
exited_code = subprocess.call('dot -Tpdf ' + dot_file_path \
+ ' -o ' + pdf_save_path, shell=True)
if exited_code != 0:
print('The dot command is needed for creating pdf files.')
print('The {} is saved as the dot filetype.'.format(
dot_file_path))
remove_ctr_vars = set()
if remove_ctr_var:
for node in self.all_var_nodes():
if node.is_ctrl_var():
remove_ctr_vars.add(node)
self.safe_remove_nodes(remove_ctr_vars)
print('Total ops num = {}.'.format(len(self.all_op_nodes())))
if marked_nodes is not None:
if not isinstance(marked_nodes, set):
if isinstance(marked_nodes, Iterable):
marked_nodes = set(marked_nodes)
else:
marked_nodes = {marked_nodes}
marked_nodes = {n.node for n in marked_nodes}
remove_ctr_vars = {n.node for n in remove_ctr_vars}
marked_nodes = marked_nodes - remove_ctr_vars
if self.graph.has('__graphviz__marked_node__'):
self.graph.erase('__graphviz__marked_node__')
self.graph.set('__graphviz__marked_node__', marked_nodes)
if not os.path.exists(save_path):
os.makedirs(save_path)
viz_dot_path = os.path.join(save_path, name) + '.dot'
viz_pass = core.get_pass('graph_viz_pass')
viz_pass.set('graph_viz_path', viz_dot_path)
viz_pass.apply(self.graph)
_convert_to_pdf(viz_dot_path)
def to_program(self):
"""
Convert the graph into a Program.
WARN: When the graph includes backward operator nodes, the
conversion process may be failed. Usually, this function is
only used to convert a test graph.
Returns:
Program: a program converted from the graph.
"""
convert_pass = core.get_pass('graph_to_program_pass')
desc = core.ProgramDesc()
convert_pass.set_not_owned('program', desc)
convert_pass.apply(self.graph)
program = Program._construct_from_desc(desc)
return program
def _find_node_by_name(self, nodes, node_name):
"""
Find a node in the giving nodes set by the name.
"""
target_node = None
for n in nodes:
if n.name() == node_name:
target_node = n
assert target_node is not None, "Cannot find the target node in the giving set."
return target_node
def _update_desc_attr(self, desc, name, val):
"""
Update the value of desc's attribute by attribute's name.
"""
if isinstance(val, Block):
desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and all(
isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string())
else:
desc._set_attr(name, val)
class Program(object):
"""
Create Python Program. It has at least one :ref:`api_guide_Block_en`, when the
control flow op like conditional_block, while :ref:`api_fluid_layers_While` is included,
it will contain nested block.
Please reference the
`framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_
for details.
A set of Program usually contains startup program and main program.
A startup program is set to contain some initial work, eg. initialize the ``Parameter``, and the main
program will contain the network structure and vars for train.
A set of Program can be used for test or train, in train program ,
Paddle will contain all content to build a train network, in test
program Paddle will prune some content which is irrelevant to test, eg.
backward ops and vars.
**Notes**:
**we have** :ref:`api_fluid_default_startup_program` **and** :ref:`api_fluid_default_main_program`
**by default, a pair of them will shared the parameters. The** :ref:`api_fluid_default_startup_program` **only run once to initialize parameters,**
:ref:`api_fluid_default_main_program` **run in every mini batch and adjust the weights.**
Returns:
Program: An empty Program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
z = fluid.layers.fc(name="fc", input=x, size=10, act="relu")
print("main program is: {}".format(main_program))
print("start up program is: {}".format(startup_program))
"""
def __init__(self):
self.desc = core.ProgramDesc()
self.blocks = [Block(self, 0)]
self.current_block_idx = 0
self._seed = 0
self._current_role = core.op_proto_and_checker_maker.OpRole.Forward
self.__op_role_var = []
# for distribute training
# _is_distributed = True if under distributed training
self._is_distributed = False
# _is_chief = True if the trainer is the first one, usually No.0
self._is_chief = False
# _parameters_on_pservers records all the parameters distributed on parameter servers.
self._parameters_on_pservers = None
# _endpoints is a list about parameter servers ip:port, such as ["ip:port","ip:port"]
self._endpoints = []
# if current role is parameter server, the _ps_endpoint is its "ip:port"
self._ps_endpoint = None
# trainers_endpoints, it is used for distribution.
self._trainers_endpoints = []
# the distributed lookup table names
self._distributed_lookup_table = None
# use Deep gradient comrepssion or not
self._enable_dgc = False
self._use_lamb = False
self._nccl_comm_num = 1
self._use_hierarchical_allreduce = False
self._hierarchical_allreduce_inter_nranks = 0
# if this program has been optimized by distributed optimizer
# fleet_opt will be given a value
self._fleet_opt = None
self._program_config = None
# assigned if this program has been parsed by a pipeline optimizer
self._pipeline_opt = None
# appending gradients times
self._appending_grad_times = 0
@property
def _op_role(self):
"""
The operator role. In a enum {Forward, Backward, Optimize}.
Notes: this is a low level API. It is used only for ParallelExecutor to
duplicate or schedule operator to devices.
For example, the forward operator should be executed on every device.
The backward operator should be executed on every device and the
parameter gradient of backward (use :code:`_op_role_var` to get this
variable) operator should be merged to one device. The optimization
operators should be executed on only one device and broadcast the
optimization result, i.e., the new parameter, to every other device.
"""
return self._current_role
@_op_role.setter
def _op_role(self, role):
self._current_role = role
@property
def _op_role_var(self):
"""
The auxiliary variables for :code:`_op_role` property.
See Also: :code:`Program._op_role`'s documentation for details.
Notes: This is a very low-level API. Users should not use it directly.
"""
return self.__op_role_var
@contextlib.contextmanager
def _backward_role_guard(self):
tmp_role = self._current_role
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.Backward
yield
self._current_role = tmp_role
@signature_safe_contextmanager
def _optimized_guard(self, param_and_grads):
"""
A with guard to set :code:`Optimization` :code:`OpRole` and
:code:`OpRoleVar` automatically.
Notes: This is a very low level API. Users should not use it directly.
Args:
param_and_grads(list): The variables (names) to be optimized.
Examples:
>>> import paddle.fluid as fluid
>>> p, g = backward(...)
>>> with program._optimized_guard([p,g]):
>>> p = p - 0.001 * g
"""
tmp_role = self._current_role
tmp_var = self.__op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.Optimize
self.__op_role_var = [
var.name if isinstance(var, Variable) else var
for var in param_and_grads
]
yield
self.__op_role_var = tmp_var
self._current_role = tmp_role
@signature_safe_contextmanager
def _lr_schedule_guard(self, is_with_opt=False):
"""
A with guard to set :code:`LRSched` :code:`OpRole` and
:code:`OpRoleVar` automatically. The :code:`OpRoleVar` is
set to the target learning rate.
Notes: This is a very low level API. Users should not use it directly.
Args:
is_with_opt: Only set to true if these ops a in the middle
of a bunch of optimize ops so that it can be treated
correctly. For example, sgd->lr_op->sgd->lr_op->sgd.
Examples:
>>> import paddle.fluid as fluid
>>> p, g = backward(...)
>>> with program.lr_schedule_guard():
>>> lr = lr * decay
"""
tmp_role = self._current_role
tmp_var = self.__op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.LRSched
if is_with_opt:
self._current_role = int(OpRole.LRSched) | int(OpRole.Optimize)
# TODO(typhoonzero): how to set target learning rate var
self.__op_role_var = []
yield
self.__op_role_var = tmp_var
self._current_role = tmp_role
def __str__(self):
"""
Get the protobuf debug string of this Program.
Returns:
(str): The protobuf debug string.
Raises:
ValueError: If any of required fields is not set.
"""
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error (bool): raise Value error when any of required fields is not set.
with_details (bool): True if more details about variables and parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need to print.
Returns:
str: The debug string describe current Program.
Raises:
ValueError: If any of required fields is not set and throw_on_error is True.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
prog_string = prog.to_string(throw_on_error=True, with_details=False)
print("program string without detial: {}".format(prog_string))
prog_string_with_detail = prog.to_string(throw_on_error=True, with_details=True)
print("program string with detial: {}".format(prog_string_with_detail))
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
res_str = ""
for block in self.blocks:
res_str += block.to_string(throw_on_error, with_details)
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.ProgramDesc.FromString(
six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
def _get_desc(self):
"""
Get the C++ side of `ProgramDesc` object pointer. The C++ object is
exposed by :code:`pybind`.
Notes: This is a very low level API. Users should not use this API
directly.
"""
return self.desc
def _version(self):
return self.desc._version()
@dygraph_not_support
def clone(self, for_test=False):
"""
**Notes**:
**1.** :code:`Program.clone()` **method DOES NOT clone** :ref:`api_fluid_io_DataLoader` .
**2. Recommend you to use** :code:`clone` **before using** :code:`Opimizer.minimize`.
**3. This API has no effect in Dygraph Mode**
Create a new Program with forward content of original one when ``for_test=True``.
Create a new Program as the same as original one when ``for_test=False``
Some operators, e.g., :ref:`api_fluid_layers_batch_norm` , behave differently between
training and testing. They have an attribute, :code:`is_test`, to
control this behaviour. This method will change the :code:`is_test`
attribute of them to :code:`True` when :code:`for_test=True`.
* Set for_test to False when we want to clone the program for training.
* Set for_test to True when we want to clone the program for testing.
We will prune the backward and optimize part of the program when you
use :code:`clone` after :code:`Opimizer.minimize`, but we still
recommend you to use :code:`clone` before using :code:`Opimizer.minimize`.
For Example:
.. code-block:: python
test_program = fluid.default_main_program().clone(for_test=True)
# Here we use clone before Momentum
optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9)
optimizer.minimize()
Args:
for_test (bool): True if change the :code:`is_test` attribute of operators to :code:`True`.
Returns:
Program: A new Program with forward content of original one when ``for_test=True``. A new Program as the same as original one when ``for_test=False``
Examples:
**Notes: The Program's order maybe different after** :code:`clone` **and
this will not affect your training or testing progress. In the following
example we give you an simple method** :code:`print_prog(program)` **to
print Program Descs inorder to make sure you have same print result
after** :code:`clone`:
.. code-block:: python
import paddle.fluid as fluid
import six
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
1. To clone a test program, the sample code is:
.. code-block:: python
import paddle.fluid as fluid
import six
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
train_program = fluid.Program()
startup_program = fluid.Program()
# startup_program is used to do some parameter init work,
# and main program is used to hold the network
with fluid.program_guard(train_program, startup_program):
with fluid.unique_name.guard():
img = fluid.layers.data(name='image', shape=[784])
hidden = fluid.layers.fc(input=img, size=200, act='relu')
hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)
loss = fluid.layers.cross_entropy(
input=fluid.layers.fc(hidden, size=10, act='softmax'),
label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
avg_loss = fluid.layers.mean(loss)
test_program = train_program.clone(for_test=False)
print_prog(test_program)
# Due to parameter sharing usage for train and test, so we need to use startup program of train
# instead of using test startup program, while nothing is in test's startup program
# In Paddle Fluid we will share weights by using the same Variable name. In train and test program
# all parameters will have the same name and this can make train and test program sharing parameters,
# that's why we need to use startup program of train. And for startup program of test, it has nothing,
# since it is a new program.
with fluid.program_guard(train_program, startup_program):
with fluid.unique_name.guard():
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(avg_loss)
2. The clone method can be avoid if you create program for training and program for testing individually.
.. code-block:: python
import paddle.fluid as fluid
import six
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
def network(is_test):
img = fluid.layers.data(name='image', shape=[784])
hidden = fluid.layers.fc(input=img, size=200, act='relu')
hidden = fluid.layers.dropout(hidden, dropout_prob=0.5)
loss = fluid.layers.cross_entropy(
input=fluid.layers.fc(hidden, size=10, act='softmax'),
label=fluid.layers.data(name='label', shape=[1], dtype='int64'))
avg_loss = fluid.layers.mean(loss)
return avg_loss
train_program_2 = fluid.Program()
startup_program_2 = fluid.Program()
test_program_2 = fluid.Program()
with fluid.program_guard(train_program_2, startup_program_2):
with fluid.unique_name.guard():
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(avg_loss)
# the test startup program is not used.
with fluid.program_guard(test_program_2, fluid.Program()):
with fluid.unique_name.guard():
loss = network(is_test=True)
print(test_program_2)
The two code snippets above will generate and print same programs.
"""
if for_test:
if self._appending_grad_times > 0:
forward_prog = Program()
forward_prog.desc = core.prune_backward(self.desc)
forward_prog.blocks = [
Block(forward_prog, i)
for i in six.moves.range(forward_prog.desc.num_blocks())
]
forward_prog._sync_with_cpp()
p = forward_prog._inference_optimize(prune_read_op=False)
else:
p = self._inference_optimize(prune_read_op=False)
else:
p = Program()
p.current_block_idx = self.current_block_idx
p._seed = self._seed
p.desc = core.ProgramDesc(self.desc)
p.blocks = [
Block(p, i) for i in six.moves.range(self.desc.num_blocks())
]
p._current_role = self._current_role
p.__op_role_var = self.__op_role_var
p._appending_grad_times = self._appending_grad_times
p._sync_with_cpp()
p._copy_param_info_from(self)
p._copy_data_info_from(self)
p._copy_dist_param_info_from(self)
return p
def _prune(self, targets):
"""
Prune operators and variables which are not needed to generate
:code:`targets`.
Notes: This is a very low level API. Users should not use this API
directly. This API is in flux and not stable.
Args:
targets(list|Variable|Operator): A list of variables or operators
need to be pruned
Returns:
Program: A new, pruned program.
"""
if not isinstance(targets, list):
targets = [targets]
targets_idx = []
for t in targets:
if not isinstance(t, Operator):
if isinstance(t, Variable):
# After transpiler processing, the op that output this
# variable maybe has been changed, so t.op is not reliable
# and we need to find the current op that generate this
# variable here.
t.op = None
global_block = self.global_block()
for idx, op in enumerate(global_block.ops):
if t.name in op.output_arg_names:
t.op = op
break
t = t.op
if t is None:
raise ValueError(
"The target variable must have an "
"associated operator that generates it.")
else:
raise ValueError("All targets of prune() can only be "
"Variable or Operator.")
targets_idx.append([t.block.idx, t.idx])
res = Program()
res.desc = core.prune(self.desc, set(), targets_idx)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
return res
def _prune_with_input(self, feeded_var_names, targets):
"""
Prune operators and variables which are not needed to generate
:code:`targets`. Prune operators and variables which are needed
to generate feeded_var
Notes: This is a very low level API. Users should not use this API
directly. This API is in flux and not stable.
Args:
feeded_var_names(list|str): A list of variable names from where
pruning start. If it is set as [], this API works just like _prune()
targets(list|Variable|Operator): A list of variables or operators
need to be pruned
Returns:
Program: A new, pruned program.
"""
if not isinstance(feeded_var_names, list):
feeded_var_names = [feeded_var_names]
if not isinstance(targets, list):
targets = [targets]
for var in feeded_var_names:
if not isinstance(var, six.string_types):
raise ValueError("All feeded_var_names of prune() can only be "
"str.")
targets_idx = []
for t in targets:
if not isinstance(t, Operator):
if isinstance(t, Variable):
# After transpiler processing, the op that output this
# variable maybe has been changed, so t.op is not reliable
# and we need to find the current op that generate this
# variable here.
t.op = None
global_block = self.global_block()
for idx, op in enumerate(global_block.ops):
if t.name in op.output_arg_names:
t.op = op
break
t = t.op
if t is None:
raise ValueError(
"The target variable must have an "
"associated operator that generates it.")
else:
raise ValueError("All targets of prune() can only be "
"Variable or Operator.")
targets_idx.append([t.block.idx, t.idx])
res = Program()
res.desc = core.prune(self.desc, set(feeded_var_names), targets_idx)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
return res
def _inference_optimize(self, prune_read_op=True):
"""
This method will create a new program and do following adjustments on it:
1. Remove all reader variables and their creator ops if exist.
2. Remove the :code:`read_op` if exists.
3. change the :code:`is_test`
attribute of operators to :code:`True`. All the :code:`Parameter`
information will be lost.
Args:
prune_read_op(bool): remove the read ops that are added by py_reader
for cpp inference library
Notes: This API is a very low level API. Use
:code:`Program.clone(for_test=True)` instead.
Returns:
Program: The new program.
"""
res = Program()
res.desc = core.ProgramDesc(self.desc)
# remove all readers and the read_op if exist
read_op_idx = 0
root_block = res.desc.block(0)
if prune_read_op:
while True:
if read_op_idx >= root_block.op_size() or root_block.op(
read_op_idx).type() == 'read':
break
read_op_idx += 1
if read_op_idx < root_block.op_size():
root_block._remove_op(0, read_op_idx + 1)
for var in root_block.all_vars():
if var.type() == core.VarDesc.VarType.READER:
root_block._remove_var(cpt.to_bytes(var.name()))
# change all `is_test` attributes to True
for i in six.moves.range(res.desc.num_blocks()):
block = res.desc.block(i)
for j in six.moves.range(block.op_size()):
op = block.op(j)
if op.has_attr('is_test'):
op._set_attr('is_test', True)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
return res
@staticmethod
def parse_from_string(binary_str):
"""
**Notes**:
**1. All information about parameters will be lost after serialization**
**2. This API has no effect in Dygraph mode**
Deserialize a Program from `protobuf <https://en.wikipedia.org/wiki/Protocol_Buffers>`_ binary string.
This method always use to save and load model
Args:
binary_str_type (str): the binary prootbuf string.
Returns:
Program: A deserialized Program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_prog = fluid.Program()
with fluid.program_guard(startup_prog, main_prog):
x = fluid.layers.data(
name='X', shape=[1000, 784], dtype='float32', append_batch_size=False)
y = fluid.layers.data(
name='Y', shape=[784, 100], dtype='float32', append_batch_size=False)
z = fluid.layers.mul(x=x, y=y)
binary_str = fluid.default_main_program().desc.serialize_to_string()
prog_restored = fluid.default_main_program().parse_from_string(binary_str)
print(fluid.default_main_program())
print(prog_restored)
"""
p = Program()
p.desc = core.ProgramDesc(binary_str)
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
@staticmethod
def _construct_from_desc(desc):
"""
Construct a program from program desc.
Args:
desc(core.ProgramDesc): The program desc for constructing.
Returns:
Program: A program.
"""
p = Program()
p.desc = desc
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
@property
def random_seed(self):
"""
The default random seed for random operators in Program. ``0`` means get
the random seed from random device.
**Notes: It must be set before the operators have been added.**
Returns:
int64: Random seed in current Program
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
random_seed = prog.random_seed
x_var = fluid.layers.data(name="X", shape=[3,3], dtype="float32", append_batch_size=False)
# Here we need to set random seed before we use fluid.layers.dropout
print(random_seed)
prog.random_seed = 1
z_var = fluid.layers.dropout(x_var, 0.7)
print(prog.random_seed)
"""
return self._seed
@property
def num_blocks(self):
"""
The number of :ref:`api_guide_Block_en` in this Program.
**Notes: This API has no effect in Dygraph mode**
Returns:
int(Platform-dependent size): num of :ref:`api_guide_Block_en` in current Program
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
num_blocks = prog.num_blocks
print(num_blocks)
"""
return self.desc.num_blocks()
@random_seed.setter
def random_seed(self, seed):
if not isinstance(seed, int):
raise ValueError("Seed must be a integer.")
self._seed = seed
def __repr__(self):
return self.__str__()
def global_block(self):
"""
**Notes**:
**This API has no effect in Dygraph mode**
Get the first :ref:`api_guide_Block_en` of this Program.
Returns:
:ref:`api_guide_Block_en`: The first :ref:`api_guide_Block_en` of this Program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
gb_block = prog.global_block()
print(gb_block)
"""
return self.blocks[0]
def block(self, index):
"""
**Notes**:
**This API has no effect in Dygraph mode**
Get the :code:`index` :ref:`api_guide_Block_en` of this Program
Args:
index (int) - The index of :ref:`api_guide_Block_en` to get
Returns:
:ref:`api_guide_Block_en`: The :code:`index` block
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
block_0 = prog.block(0)
print(block_0)
"""
return self.blocks[index]
def current_block(self):
"""
**Notes**:
**This API has no effect in Dygraph mode**
Get the current :ref:`api_guide_Block_en` . The :code:`current` :ref:`api_guide_Block_en`
is the :ref:`api_guide_Block_en` to append operators.
Returns:
:ref:`api_guide_Block_en`: The :code:`index` :ref:`api_guide_Block_en`
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
current_blk = prog.current_block()
print(current_blk)
"""
return self.blocks[self.current_block_idx]
def _create_block(self, parent_idx=None):
"""
Create a new block with the :code:`parent_idx` and change the current block
to new block.
Args:
parent_idx(int): The parent block index.
Returns:
Block: The new block.
"""
new_block_idx = len(self.blocks)
parent = self.current_block() if parent_idx is None else self.block(
parent_idx)
self.desc.append_block(parent.desc)
self.current_block_idx = new_block_idx
self.blocks.append(Block(self, self.current_block_idx))
return self.current_block()
def _rollback(self):
"""
Exit a code block, i.e., roll back to the parent block.
Returns:
None
"""
self.current_block_idx = self.current_block().parent_idx
def _sync_with_cpp(self):
"""
Synchronize Python instance to its binding C++ object instance.
If the program is modified in C++ space, this method should be invoked.
Notes: This is a very low level API. Users should not invoke it
directly.
Returns:
None
"""
for block_idx in range(len(self.blocks), self.desc.num_blocks()):
self.blocks.append(Block(self, block_idx))
for block in self.blocks:
block._sync_with_cpp()
def _copy_param_info_from(self, other):
"""
Copy the information of parameters from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError("_copy_param_info_from should be invoked with "
"Program")
if len(self.blocks) != len(other.blocks):
raise ValueError("_copy_param_info_from should be invoked with two "
"program, with represent the same topology")
self.global_block()._copy_param_info_from(other.global_block())
def _copy_dist_param_info_from(self, other):
"""
Copy the information of distributed information from other program.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError("_copy_dist_param_info_from should be invoked with "
"Program")
self._is_distributed = other._is_distributed
self._is_chief = other._is_chief
self._parameters_on_pservers = other._parameters_on_pservers
self._endpoints = other._endpoints
self._ps_endpoint = other._ps_endpoint
self._distributed_lookup_table = other._distributed_lookup_table
def _copy_data_info_from(self, other):
"""
Copy the information of data variables from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError("_copy_param_info_from should be invoked with "
"Program")
if len(self.blocks) != len(other.blocks):
raise ValueError("_copy_param_info_from should be invoked with two "
"program, with represent the same topology")
for var in list(other.global_block().vars.values()):
if var.is_data:
self.global_block().var(var.name).is_data = True
if var.desc.need_check_feed():
self.global_block().var(var.name).desc.set_need_check_feed(True)
@dygraph_not_support
def list_vars(self):
"""
Get all :ref:`api_guide_Variable_en` from this Program. A iterable object is returned.
Returns:
iterable :ref:`api_guide_Variable_en`: The Generator will yield every variable in this program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
img = fluid.layers.data(name='img', shape=[1,28,28], dtype='float32')
label = fluid.layers.data(name='label', shape=[128,1], dtype='int64')
for var in prog.list_vars():
print(var)
"""
for each_block in self.blocks:
for each_var in list(each_block.vars.values()):
yield each_var
class Parameter(Variable):
"""
Parameter is derived from Variable. A parameter is a persistable
Variable, and will be updated by optimizers after each iteration.
The training of a neural network is essentially the updating of
its parameters.
Relative to a general Variable, a Parameter has several its own
member variables:
Args:
trainable(bool): True if the parameter need to be updated after
iterations.
optimize_attr(map): Parameter attributes related with optimizing.
Currently, it only contains 'learning_rate'.
Default: {'learning_rate': 1.0}
regularizer(WeightDecayRegularizer): The Regularizer which will
be applied on the parameter. Default: None
gradient_clip_attr(BaseGradientClipAttr): The gradint clip strategy
which will be applied on the parameter. Default: None
do_model_average(bool): True if the model average strategy will
be applied on this parameter.
"""
def __init__(self, block, shape, dtype, **kwargs):
if shape is None:
raise ValueError("The shape of Parameter should not be None")
if dtype is None:
raise ValueError("The dtype of Parameter should not be None")
if len(shape) == 0:
raise ValueError(
"The dimensions of shape for Parameter must be greater than 0")
for each in shape:
if each < 0:
raise ValueError(
"Each dimension of shape for Parameter must be greater than 0, but received %s"
% list(shape))
Variable.__init__(
self, block, persistable=True, shape=shape, dtype=dtype, **kwargs)
self.trainable = kwargs.get('trainable', True)
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self.regularizer = kwargs.get('regularizer', None)
self.gradient_clip_attr = kwargs.get('gradient_clip_attr', None)
self.do_model_average = kwargs.get('do_model_average', None)
self.is_distributed = False
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
rlt = fluid.layers.data("fake_data", shape=[1,1], dtype='float32')
debug_str = prog.to_string(throw_on_error=True, with_details=False)
print(debug_str)
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
res_str = Variable.to_string(self, throw_on_error, True)
additional_attr = ("trainable", "optimize_attr", "regularizer",
"gradient_clip_attr", "do_model_average")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
cpt.to_text(getattr(self, attr_name)))
else:
res_str = Variable.to_string(self, throw_on_error, False)
return res_str
__repr__ = __str__
# program is a global instance.
_main_program_ = Program()
_startup_program_ = Program()
def default_startup_program():
"""
Get default/global startup program.
The layer function in :ref:`api_fluid_layers` will create parameters, :ref:`api_paddle_data_reader_reader` ,
`NCCL <https://developer.nvidia.com/nccl>`_ handles as global variables. The :code:`startup_program` will
initialize them by the OPs in startup :ref:`api_fluid_Program` . The :ref:`api_fluid_layers` function will
append these initialization operators into startup program.
This method will return the :code:`default` or the :code:`current` startup
program. Users can use :ref:`api_fluid_program_guard` to switch :ref:`api_fluid_Program` .
Returns: current default startup :ref:`api_fluid_Program`
Returns type: :ref:`api_fluid_Program`
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program=main_program, startup_program=startup_program):
x = fluid.layers.data(name="x", shape=[-1, 784], dtype='float32')
y = fluid.layers.data(name="y", shape=[-1, 1], dtype='int32')
z = fluid.layers.fc(name="fc", input=x, size=10, act="relu")
print("main program is: {}".format(fluid.default_main_program()))
print("start up program is: {}".format(fluid.default_startup_program()))
"""
return _startup_program_
def default_main_program():
"""
This API can be used to get ``default main program`` which store the
descriptions of ``op`` and ``variable``.
For example ``z = fluid.layers.elementwise_add(x, y)`` will create a new ``elementwise_add``
``op`` and a new ``z`` ``variable``, and they will be recorded in ``default main program``
The ``default_main_program`` is the default value for ``Program`` parameter in
a lot of ``fluid`` APIs. For example, the :code:`Executor.run()` will execute the
:code:`default_main_program` when the program is not specified.
If you want to replace the ``default main program``, you can use :ref:`api_fluid_program_guard`
Returns:
:ref:`api_fluid_Program`: a ``Program`` which holding the descriptions of ops and variables in the network.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# Sample Network:
data = fluid.data(name='image', shape=[None, 3, 224, 224], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None)
bn1 = fluid.layers.batch_norm(conv1, act='relu')
pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2)
conv2 = fluid.layers.conv2d(pool1, 16, 5, 1, act=None)
bn2 = fluid.layers.batch_norm(conv2, act='relu')
pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2)
fc1 = fluid.layers.fc(pool2, size=50, act='relu')
fc2 = fluid.layers.fc(fc1, size=102, act='softmax')
loss = fluid.layers.cross_entropy(input=fc2, label=label)
loss = fluid.layers.mean(loss)
opt = fluid.optimizer.Momentum(
learning_rate=0.1,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
opt.minimize(loss)
#print the number of blocks in the program, 1 in this case
print(fluid.default_main_program().num_blocks)
#print the description of variable 'image'
print(fluid.default_main_program().blocks[0].var('image'))
"""
return _main_program_
def switch_main_program(program):
"""
Switch the main program to a new program.
Args:
program(Program): The new main program
Returns:
Program: The previous main program
"""
global _main_program_
prev_program = _main_program_
_main_program_ = program
return prev_program
def switch_startup_program(program):
"""
Switch the startup program to a new program
Args:
program(Program): The new startup program
Returns:
Program: The previous startup program
"""
global _startup_program_
prev_program = _startup_program_
_startup_program_ = program
return prev_program
@signature_safe_contextmanager
def program_guard(main_program, startup_program=None):
"""
Change the global main program and startup program with `"with"` statement.
Layer functions in the Python `"with"` block will append operators and
variables to the new main programs.
Args:
main_program(Program): New main program inside `"with"` statement.
startup_program(Program, optional): New startup program inside `"with"`
statement. :code:`None` means not changing startup program,
default_startup_program is still used.
Default: None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10, act='relu')
Notes: The temporary :code:`Program` can be used if the user does not need
to construct either of startup program or main program.
Examples:
.. code-block:: python
import paddle.fluid as fluid
main_program = fluid.Program()
# does not care about startup program. Just pass a temporary value.
with fluid.program_guard(main_program, fluid.Program()):
data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
"""
if not isinstance(main_program, Program):
raise TypeError("main_program should be Program")
main_program = switch_main_program(main_program)
if startup_program is not None:
if not isinstance(startup_program, Program):
raise TypeError("startup_program should be Program")
startup_program = switch_startup_program(startup_program)
yield
switch_main_program(main_program)
if startup_program is not None:
switch_startup_program(startup_program)
def _get_var(name, program=None):
"""
Get a variable by name from the global block of a program.
Args:
name(str): name of the variable
program(Program|None): program object.
If None, default_global_program() will be used.
Returns:
Variable
"""
if program is None:
program = default_main_program()
assert isinstance(name, str)
assert isinstance(program, Program)
return program.global_block().var(name)
@signature_safe_contextmanager
def _dygraph_guard(tracer):
global _dygraph_tracer_
tmp_trace = _dygraph_tracer_
_dygraph_tracer_ = tracer
yield
_dygraph_tracer_ = tmp_trace
@signature_safe_contextmanager
def _dygraph_place_guard(place):
global _dygraph_current_expected_place_
tmp_place = _dygraph_current_expected_place_
_dygraph_current_expected_place_ = place
yield
_dygraph_current_expected_place_ = tmp_place
def load_op_library(lib_filename):
"""
Load a dynamic library, including custom operators and kernels.
When library is loaded, ops and kernels registered in the library
will be available in PaddlePaddle main process.
Please note, the type of custom operators cann't have the same type
with the existing operators in the framework.
Args:
lib_filename (str): name of dynamic library.
Examples:
.. code-block:: python
import paddle.fluid as fluid
#fluid.load_op_library('custom_op.so')
"""
core.load_op_library(lib_filename)
OpProtoHolder.instance().update_op_proto()
| 35.124789
| 272
| 0.562407
|
fe06211fe6dd8f63a49201f071fd5065fe0a89e2
| 5,422
|
py
|
Python
|
tests/test_call_wasm_contract/test.py
|
uuosio/python-contract-demos
|
7d56ba371f2115b0ab895fca3e71092c2523f25d
|
[
"MIT"
] | 2
|
2020-12-08T13:15:06.000Z
|
2020-12-29T10:06:44.000Z
|
tests/test_call_wasm_contract/test.py
|
uuosio/python-contract-demos
|
7d56ba371f2115b0ab895fca3e71092c2523f25d
|
[
"MIT"
] | null | null | null |
tests/test_call_wasm_contract/test.py
|
uuosio/python-contract-demos
|
7d56ba371f2115b0ab895fca3e71092c2523f25d
|
[
"MIT"
] | null | null | null |
'''
Example for call wasm contract.
'''
from uuoskit import chainapi, config, wallet
from uuoskit import test_helper
wasm_abi = {
"version": "eosio::abi/1.0",
"types": [],
"structs": [],
"actions": [],
"tables": [],
"ricardian_clauses": [],
"error_messages": [],
"abi_extensions": []
}
'''c++ code
#include <eosio/eosio.hpp>
#include <eosio/action.hpp>
#include <eosio/print.hpp>
extern "C" {
__attribute__((eosio_wasm_import))
int call_contract_get_args(void* args, size_t size1);
__attribute__((eosio_wasm_import))
int call_contract_set_results(void* result, size_t size1);
void apply( uint64_t receiver, uint64_t code, uint64_t action ) {
if (receiver == code) {
uint64_t args[2];
int args_size = ::call_contract_get_args(&args, sizeof(args));
eosio::print("+++++++++++call: arg size:", args_size, "\n");
eosio::check(args_size == 16, "bad args size");
if (args[0] == eosio::name("calltest1").value) {
eosio::print("+++++++++++call: args[1]:", args[1], "\n");
args[1] += 1;
::call_contract_set_results(&args[1], sizeof(uint64_t));
}
}
}
}
'''
wasm_code = b'\x00asm\x01\x00\x00\x00\x01)\x08`\x02\x7f\x7f\x01\x7f`\x01\x7f\x00`\x01~\x00`\x02\x7f\x7f\x00`\x03\x7f\x7f\x7f\x01\x7f`\x00\x00`\x01\x7f\x01\x7f`\x03~~~\x00\x02\x86\x01\x07\x03env\x16call_contract_get_args\x00\x00\x03env\x06prints\x00\x01\x03env\x06printi\x00\x02\x03env\x0ceosio_assert\x00\x03\x03env\x07printui\x00\x02\x03env\x19call_contract_set_results\x00\x00\x03env\x06memset\x00\x04\x03\x07\x06\x05\x06\x05\x01\x07\x00\x04\x05\x01p\x01\x01\x01\x05\x03\x01\x00\x01\x06\x16\x03\x7f\x01A\x80\xc0\x00\x0b\x7f\x00A\xfc\xc1\x00\x0b\x7f\x00A\xfc\xc1\x00\x0b\x07\t\x01\x05apply\x00\x0b\n\xcc\x06\x06\x04\x00\x10\t\x0b\xad\x01\x01\x03\x7f \x00!\x01\x02@\x02@\x02@ \x00A\x03qE\r\x00 \x00-\x00\x00E\r\x01 \x00A\x01j!\x01\x03@ \x01A\x03qE\r\x01 \x01-\x00\x00!\x02 \x01A\x01j"\x03!\x01 \x02\r\x00\x0b \x03A\x7fj \x00k\x0f\x0b \x01A|j!\x01\x03@ \x01A\x04j"\x01(\x02\x00"\x02A\x7fs \x02A\xff\xfd\xfbwjqA\x80\x81\x82\x84xqE\r\x00\x0b \x02A\xff\x01qE\r\x01\x03@ \x01-\x00\x01!\x02 \x01A\x01j"\x03!\x01 \x02\r\x00\x0b \x03 \x00k\x0f\x0b \x00 \x00k\x0f\x0b \x01 \x00k\x0b6\x01\x01\x7f#\x00A\x10k"\x00A\x006\x02\x0cA\x00 \x00(\x02\x0c(\x02\x00A\x07jAxq"\x006\x02\x84@A\x00 \x006\x02\x80@A\x00?\x006\x02\x8c@\x0b\x02\x00\x0b\xc1\x01\x01\x02\x7f#\x00A0k"\x03$\x00\x10\x07\x02@ \x00 \x01R\r\x00 \x03A jA\x10\x10\x00!\x04A\x90\xc0\x00\x10\x01 \x04\xac\x10\x02A\xab\xc0\x00\x10\x01\x02@ \x04A\x10F\r\x00A\x00A\xad\xc0\x00\x10\x03\x0b \x03)\x03 !\x00 \x03A\xbb\xc0\x006\x02\x10 \x03A\xbb\xc0\x00\x10\x086\x02\x14 \x03 \x03)\x03\x107\x03\x08 \x03A\x18j \x03A\x08j\x10\x0c\x1a \x00B\x80\x80\xa0\xc8\xb1\x95\xc7\xd1\xc1\x00R\r\x00A\xc5\xc0\x00\x10\x01 \x03)\x03(\x10\x04A\xab\xc0\x00\x10\x01 \x03 \x03)\x03(B\x01|7\x03( \x03A jA\x08rA\x08\x10\x05\x1a\x0bA\x00\x10\n \x03A0j$\x00\x0b\x98\x03\x03\x02\x7f\x01~\x03\x7f \x00B\x007\x03\x00\x02@\x02@\x02@\x02@\x02@ \x01(\x02\x04"\x02A\x0eI\r\x00A\x00A\xdf\xc0\x00\x10\x03A\x0c!\x03\x0c\x01\x0b \x02E\r\x03 \x02A\x0c \x02A\x0cI\x1b"\x03E\r\x01\x0b \x00)\x03\x00!\x04 \x01(\x02\x00!\x05A\x00!\x06\x03@ \x00 \x04B\x05\x86"\x047\x03\x00\x02@\x02@ \x05 \x06j-\x00\x00"\x07A.G\r\x00A\x00!\x07\x0c\x01\x0b\x02@ \x07AOjA\xff\x01qA\x04K\r\x00 \x07APj!\x07\x0c\x01\x0b\x02@ \x07A\x9f\x7fjA\xff\x01qA\x19K\r\x00 \x07A\xa5\x7fj!\x07\x0c\x01\x0bA\x00!\x07A\x00A\xc8\xc1\x00\x10\x03 \x00)\x03\x00!\x04\x0b \x00 \x04 \x07\xadB\xff\x01\x83\x84"\x047\x03\x00 \x06A\x01j"\x06 \x03I\r\x00\x0c\x02\x0b\x0b \x00)\x03\x00!\x04A\x00!\x03\x0b \x00 \x04A\x0c \x03kA\x05lA\x04j\xad\x867\x03\x00 \x02A\rG\r\x00B\x00!\x04\x02@ \x01(\x02\x00-\x00\x0c"\x06A.F\r\x00\x02@ \x06AOjA\xff\x01qA\x04K\r\x00 \x06APj\xadB\xff\x01\x83!\x04\x0c\x01\x0b\x02@ \x06A\x9f\x7fjA\xff\x01qA\x1aO\r\x00 \x06A\xa5\x7fj"\x06\xadB\xff\x01\x83!\x04 \x06A\xff\x01qA\x10I\r\x01A\x00A\x85\xc1\x00\x10\x03\x0c\x01\x0bA\x00A\xc8\xc1\x00\x10\x03\x0b \x00 \x00)\x03\x00 \x04\x847\x03\x00\x0b \x00\x0b\x0b\xae\x02\t\x00A\x90\xc0\x00\x0b\x1b+++++++++++call: arg size:\x00\x00A\xab\xc0\x00\x0b\x02\n\x00\x00A\xad\xc0\x00\x0b\x0ebad args size\x00\x00A\xbb\xc0\x00\x0b\ncalltest1\x00\x00A\xc5\xc0\x00\x0b\x1a+++++++++++call: args[1]:\x00\x00A\xdf\xc0\x00\x0b&string is too long to be a valid name\x00\x00A\x85\xc1\x00\x0bCthirteenth character in name cannot be a letter that comes after j\x00\x00A\xc8\xc1\x00\x0b4character is not in allowed character set for names\x00\x00A\x00\x0b\x04\x00!\x00\x00'
src, abi = test_helper.load_code()
test_account1 = test_helper.test_account1
test_account2 = test_helper.test_account2
async def run_test():
uuosapi = chainapi.ChainApiAsync(config.network_url)
code = uuosapi.mp_compile(test_account1, src)
print(test_account1, test_account2)
try:
r = await uuosapi.deploy_contract(test_account2, wasm_code, wasm_abi, vm_type=0)
except chainapi.ChainException as e:
print('+++deploy error:', e.json['message'])
r = await uuosapi.deploy_python_contract(test_account1, code, abi)
args = 'hello,world'
try:
r = await uuosapi.push_action(test_account1, 'sayhello', args, {test_account1: 'active'})
test_helper.print_console(r)
except chainapi.ChainException as e:
print('+++test error:', e)
| 74.273973
| 3,373
| 0.68554
|
6b871a800f1f4f7c0ee8a262f88f897afd71191c
| 17,532
|
py
|
Python
|
code/mohdashboardapi3.py
|
yuval-harpaz/covid-19-israel-matlab
|
cc8619a830bcc10053fc84f96befb869f5523f7b
|
[
"CC0-1.0"
] | 5
|
2021-04-16T15:12:45.000Z
|
2021-12-31T22:20:42.000Z
|
code/mohdashboardapi3.py
|
yuval-harpaz/covid-19-israel-matlab
|
cc8619a830bcc10053fc84f96befb869f5523f7b
|
[
"CC0-1.0"
] | null | null | null |
code/mohdashboardapi3.py
|
yuval-harpaz/covid-19-israel-matlab
|
cc8619a830bcc10053fc84f96befb869f5523f7b
|
[
"CC0-1.0"
] | 2
|
2020-07-08T20:44:28.000Z
|
2020-08-28T17:41:37.000Z
|
import os
import requests
import pandas as pd
GIT_DIR = '/home/innereye/Repos/israel_moh_covid_dashboard_data'
if os.path.isdir(r'C:\Users\User\Documents\Corona'):
GIT_DIR = r'C:\Users\User\Documents\Corona'
api_query = {'requests': [
{'id': '0', 'queryName': 'lastUpdate', 'single': True, 'parameters': {}},
{'id': '1', 'queryName': 'patientsPerDate', 'single': False, 'parameters': {}},
{'id': '2', 'queryName': 'testResultsPerDate', 'single': False, 'parameters': {}},
{'id': '3', 'queryName': 'contagionDataPerCityPublic', 'single': False, 'parameters': {}},
{'id': '4', 'queryName': 'infectedByPeriodAndAgeAndGender',
'single': False, 'parameters': {}},
{'id': '5', 'queryName': 'hospitalStatus', 'single': False, 'parameters': {}},
{'id': '6', 'queryName': 'isolatedDoctorsAndNurses', 'single': False, 'parameters': {}},
{'id': '7', 'queryName': 'otherHospitalizedStaff', 'single': False, 'parameters': {}},
{'id': '8', 'queryName': 'infectedPerDate', 'single': False, 'parameters': {}},
{'id': '9', 'queryName': 'updatedPatientsOverallStatus', 'single': False, 'parameters': {}},
{'id': '10', 'queryName': 'sickPerDateTwoDays', 'single': False, 'parameters': {}},
{'id': '11', 'queryName': 'sickPerLocation', 'single': False, 'parameters': {}},
{'id': '12', 'queryName': 'deadPatientsPerDate', 'single': False, 'parameters': {}},
{'id': '13', 'queryName': 'recoveredPerDay', 'single': False, 'parameters': {}},
{'id': '14', 'queryName': 'doublingRate', 'single': False, 'parameters': {}},
{'id': '15', 'queryName': 'CalculatedVerified', 'single': False, 'parameters': {}},
{'id': '16',
'queryName': 'deadByPeriodAndAgeAndGender',
'single': False, 'parameters': {}},
{'id': '17',
'queryName': 'breatheByPeriodAndAgeAndGender',
'single': False, 'parameters': {}},
{'id': '18',
'queryName': 'severeByPeriodAndAgeAndGender',
'single': False, 'parameters': {}},
{'id': '19', 'queryName': 'spotlightLastupdate', 'single': False, 'parameters': {}},
{'id': '20', 'queryName': 'patientsStatus', 'single': False, 'parameters': {}},
{'id': '21', 'queryName': 'cumSeriusAndBreath', 'single': False, 'parameters': {}},
{'id': '22', 'queryName': 'LastWeekLabResults', 'single': False, 'parameters': {}},
{'id': '23', 'queryName': 'verifiedDoctorsAndNurses', 'single': False, 'parameters': {}},
{'id': '24', 'queryName': 'isolatedVerifiedDoctorsAndNurses', 'single': False, 'parameters': {}},
{'id': '25', 'queryName': 'spotlightPublic', 'single': False, 'parameters': {}},
{'id': '26', 'queryName': 'vaccinated', 'single': False, 'parameters': {}},
{'id': '27', 'queryName': 'vaccinationsPerAge', 'single': False, 'parameters': {}},
{'id': '28', 'queryName': 'testsPerDate', 'single': False, 'parameters': {}},
{'id': '29', 'queryName': 'averageInfectedPerWeek', 'single': False, 'parameters': {}},
{'id': '30', 'queryName': 'spotlightAggregatedPublic', 'single': True, 'parameters': {}},
{'id': '31', 'queryName': 'HospitalBedStatusSegmentation', 'single': False, 'parameters': {}},
{'id': '32', 'queryName': 'infectionFactor', 'single': False, 'parameters': {}},
{'id': '33', 'queryName': 'vaccinatedVerifiedDaily', 'single': False, 'parameters': {'days': 0}},
{'id': '34', 'queryName': 'vaccinatedVerifiedByAge', 'single': False, 'parameters': {}},
{'id': '35', 'queryName': 'researchGraph', 'single': False, 'parameters': {}},
]}
api_address = 'https://datadashboardapi.health.gov.il/api/queries/_batch'
def get_api_data():
data = requests.post(api_address, json=api_query).json()
data_dict = {r['queryName']:data[int(r['id'])]['data'] for r in api_query['requests']}
return data_dict
data = get_api_data()
os.chdir(GIT_DIR)
DATA_FNAME = 'moh_dashboard_api_data.json'
COMMIT_HIST_FNAME = 'commit_history.json'
AGES_FNAME = 'ages_dists.csv'
ALL_AGES_FNAMES = {'infected':'ages_dists_v2.csv', 'dead':'deaths_ages_dists_v2.csv',
'severe':'severe_ages_dists_v2.csv', 'breathe':'ventilated_ages_dists_v2.csv'}
HOSP_FNAME = 'hospitalized_and_infected.csv'
VAC_FNAME = 'vaccinated.csv'
VAC_AGES_FNAME = 'vaccinated_by_age.csv'
VAC_CASES_DAILY = 'cases_by_vaccination_daily.csv'
VAC_CASES_AGES = 'cases_by_vaccination_ages.csv'
HOSPITALS_FNAME = 'hospital_occupancy.csv'
HOSP_HEB_FIELD_NAMES = [
'\xd7\xaa\xd7\xa4\xd7\x95\xd7\xa1\xd7\x94 \xd7\x9b\xd7\x9c\xd7\x9c\xd7\x99\xd7\xaa',
'\xd7\xaa\xd7\xa4\xd7\x95\xd7\xa1\xd7\xaa \xd7\xa7\xd7\x95\xd7\xa8\xd7\x95\xd7\xa0\xd7\x94',
'\xd7\xa6\xd7\x95\xd7\x95\xd7\xaa \xd7\x91\xd7\x91\xd7\x99\xd7\x93\xd7\x95\xd7\x93']
ISOLATED_FNAME = 'isolated_staff.csv'
names_trans = {
'doctors' : u'\u05e8\u05d5\u05e4\u05d0\u05d9\u05dd/\u05d5\u05ea',
'nurses' : u'\u05d0\u05d7\u05d9\u05dd/\u05d5\u05ea',
'others' : u'\u05de\u05e7\u05e6\u05d5\u05e2\u05d5\u05ea\n\u05d0\u05d7\u05e8\u05d9\u05dd'}
heb_map = {
u'\u05e6\u05d4\u05d5\u05d1': 'yellow',
u'\u05e6\u05d4\u05d5\u05d1 ': 'yellow',
u'\u05d0\u05d3\u05d5\u05dd': 'red',
u'\u05d0\u05d3\u05d5\u05dd ': 'red',
u'\u05db\u05ea\u05d5\u05dd': 'orange',
u'\u05db\u05ea\u05d5\u05dd ': 'orange',
u'\u05d9\u05e8\u05d5\u05e7': 'green',
u'\u05d9\u05e8\u05d5\u05e7 ': 'green',
u'\u05d0\u05e4\u05d5\u05e8': 'gray',
u'\u05d0\u05e4\u05d5\u05e8 ': 'gray',
u' \u05e7\u05d8\u05df \u05de-15 ': '<15'
}
heb_translit = {
u'\u05d0': 'a',
u'\u05d1': 'b',
u'\u05d2': 'g',
u'\u05d3': 'd',
u'\u05d4': 'h',
u'\u05d5': 'v',
u'\u05d6': 'z',
u'\u05d7': 'j',
u'\u05d8': 't',
u'\u05d9': 'y',
u'\u05da': 'C',
u'\u05db': 'c',
u'\u05dc': 'l',
u'\u05dd': 'M',
u'\u05de': 'm',
u'\u05df': 'N',
u'\u05e0': 'n',
u'\u05e1': 's',
u'\u05e2': 'e',
u'\u05e3': 'f',
u'\u05e4': 'p',
u'\u05e5': 'X',
u'\u05e6': 'x',
u'\u05e7': 'q',
u'\u05e8': 'r',
u'\u05e9': 'SH',
u'\u05ea': 'T',
'"' : '',
' ': '_'
}
def safe_str(s):
return '%s'%(heb_map.get(s, s))
def safe_int(x):
# converts possible None returned by API to 0
return x if x else 0
def add_line_to_file(fname, new_line):
opr = open(fname,'r')
prev_file = opr.read()
# prev_file = file(fname, 'r').read()
new_file = prev_file + new_line + '\n'
# file(fname, 'w').write(new_file)
opf = open(fname,'w')
opf.write(new_file)
def ages_csv_line(data, prefix='infected'):
date = data['lastUpdate']['lastUpdate']
ages_dicts = data[prefix + 'ByPeriodAndAgeAndGender']
period = u'\u05de\u05ea\u05d7\u05d9\u05dc\u05ea \u05e7\u05d5\u05e8\u05d5\u05e0\u05d4'
secs = [ent for ent in ages_dicts if ent['period'] == period]
## assert ''.join([s['section'][0] for s in secs]) == '0123456789'
assert [s['section'] for s in secs] == [
'0-9', '10-11', '12-15', '16-19', '20-29', '30-39',
'40-49', '50-59', '60-69', '70-74', '75+']
males = [safe_int(sec['male']['amount']) for sec in secs]
females = [safe_int(sec['female']['amount']) for sec in secs]
totals = [m+f for m,f in zip(males, females)]
return ','.join([date]+map(str,totals) + map(str, males) + map(str,females))
def update_ages_csv(data):
ages_line = ages_csv_line(data)
add_line_to_file(AGES_FNAME, ages_line)
def update_specific_ages_csv(data, prefix):
fname = ALL_AGES_FNAMES[prefix]
ages_line = ages_csv_line(data, prefix)
add_line_to_file(fname, ages_line)
def update_all_ages_csvs(data):
for prefix in ALL_AGES_FNAMES.keys():
update_specific_ages_csv(data, prefix)
def update_age_vaccinations_csv(data):
vac_ages = data['vaccinationsPerAge']
# Check for surprising age group
assert len(vac_ages) == 10
new_line = data['lastUpdate']['lastUpdate']+',' + ','.join(['%d,%d,%d'%(
g['age_group_population'],g['vaccinated_first_dose'],g['vaccinated_second_dose'])
for g in vac_ages])
add_line_to_file(VAC_AGES_FNAME, new_line)
def patients_to_csv_line(pat):
keys = ['Counthospitalized', 'Counthospitalized_without_release',
'CountEasyStatus', 'CountMediumStatus', 'CountHardStatus',
'CountCriticalStatus' ,'CountBreath', 'CountDeath',
'CountSeriousCriticalCum', 'CountBreathCum', 'CountDeathCum',
'new_hospitalized', 'serious_critical_new',
'patients_hotel', 'patients_home',
]
return ','.join([pat['date'][:10]]+[str(pat[key]) for key in keys])
def create_patients_csv(data):
start_date = u'2020-03-02T00:00:00.000Z'
patients = data['patientsPerDate']
assert patients[0]['date'] == start_date
N = len(patients)
# Sometimes the json contains multiple entires... argh
if len(set([p['date'] for p in patients])) != N:
rev_pat_dates = [p['date'] for p in patients[::-1]]
pat_dates_fil = sorted(set(rev_pat_dates))
patients = [patients[N-1-rev_pat_dates.index(date)] for date in pat_dates_fil]
N = len(patients)
pat_lines = map(patients_to_csv_line, patients)
recs = data['recoveredPerDay'][-N:]
inf = data['infectedPerDate'][-N:]
assert recs[0]['date'] == inf[0]['date'] == start_date
# assert recs[0]['date'] == start_date
tests = [t for t in data['testResultsPerDate'] if t['positiveAmount']!=-1][-N:]
tests2 = data['testsPerDate'][-N:]
assert tests[0]['date'] == tests2[0]['date'] == start_date
epi_lines = [','.join(map(str, [t['positiveAmount'], i['sum'],
i['amount'], r['amount'],
t['amount'], t['amountVirusDiagnosis'],
t['amountPersonTested'], t['amountMagen'],
t2['amountSurvey']])) for \
i, r, t, t2 in zip(inf, recs, tests, tests2)]
inff = data['infectionFactor']
def repr_if_not_none(x):
if x is None: return ''
return repr(x)
inff_dict = {i['day_date']:repr_if_not_none(i['R']) for i in inff}
inff_lines = [inff_dict.get(p['date'], '') for p in patients]
title_line = ','.join(['Date', 'Hospitalized', 'Hospitalized without release',
'Easy', 'Medium', 'Hard', 'Critical', 'Ventilated', 'New deaths',
'Serious (cumu)', 'Ventilated (cumu)', 'Dead (cumu)',
'New hosptialized', 'New serious', 'In hotels', 'At home',
'Positive results', 'Total infected', 'New infected',
'New receovered', 'Total tests', 'Tests for idenitifaction',
'People tested', 'Tests for Magen', 'Survey tests',
'Official R'])
csv_data = '\n'.join([title_line] + [
','.join([p,e,i]) for p,e,i in zip(pat_lines, epi_lines, inff_lines)])
opf = open(HOSP_FNAME,'w')
opf.write(csv_data+'\n')
def create_vaccinated_csv(data):
vac = data['vaccinated']
title_line = ','.join([
'Date', 'Vaccinated (daily)','Vaccinated (cumu)','Vaccinated population percentage',
'Second dose (daily)','Second dose (cumu)','Second dose population precentage'])
data_lines = [','.join([d['Day_Date'][:10]]+list(map(str, [
d['vaccinated'], d['vaccinated_cum'], d['vaccinated_population_perc'],
d['vaccinated_seconde_dose'], d['vaccinated_seconde_dose_cum'],
d['vaccinated_seconde_dose_population_perc']]))) for d in vac]
csv_data = '\n'.join([title_line]+data_lines)
# file(VAC_FNAME, 'w').write(csv_data+'\n')
# assert os.system('git add '+VAC_FNAME) == 0
opf = open(VAC_FNAME,'w')
opf.write(csv_data+'\n')
def extend_hospital_csv(data):
csv_prev_lines = file(HOSPITALS_FNAME).read().splitlines()
keys = [k.split(':')[0] for k in csv_prev_lines[0].split(',')[1::3]]
hosp_dict = dict([(z['name'].encode('utf8').replace('"','').replace("'",""),
(z['normalOccupancy'], z['coronaOccupancy'], z['isolatedTeam']))
for z in data['hospitalStatus']])
new_line = [data['lastUpdate']['lastUpdate'].encode('utf8')]
for k in keys:
if k in hosp_dict.keys():
no, co, it = hosp_dict[k]
if no is None:
no = 'None'
else:
no = '%.2f'%(no)
new_line.append('%s,%.2f,%d'%(no,co,it))
else:
new_line.append(',,')
a,b,c = HOSP_HEB_FIELD_NAMES
for k in sorted(list(set(hosp_dict.keys()).difference(set(keys)))):
csv_prev_lines[0] += ',%s: %s,%s: %s,%s :%s'%(k,a,k,b,k,c)
for j in range(1,len(csv_prev_lines)):
csv_prev_lines[j] += ',,,'
no, co, it = hosp_dict[k]
if no is None:
no = 'None'
else:
no = '%.2f'%(no)
new_line.append('%s,%.2f,%d'%(no,co,it))
csv_prev_lines.append(','.join(new_line))
file(HOSPITALS_FNAME, 'w').write('\n'.join(csv_prev_lines))
assert os.system('git add '+HOSPITALS_FNAME) == 0
def update_isolated_csv(data):
csv_lines = file(ISOLATED_FNAME).read().splitlines()
isols = {item['name'] : item['amount'] for item in data['isolatedDoctorsAndNurses']}
veris = {item['name'] : item['amount'] for item in data['verifiedDoctorsAndNurses']}
new_line = [data['lastUpdate']['lastUpdate']] + [str(dic[names_trans[k]]) for dic,k in
[(isols, 'doctors'),(veris, 'doctors'),
(isols, 'nurses'), (veris, 'nurses'),
(isols, 'others'), (veris, 'others')]]
if new_line[1:] == csv_lines[-1].split(',')[1:]: return
file(ISOLATED_FNAME, 'w').write('\n'.join(csv_lines + [','.join(new_line)]))
assert os.system('git add '+ISOLATED_FNAME) == 0
city_title_line = ','.join(['Date']+[
'sickCount', 'actualSick', 'patientDiffPopulationForTenThousands', 'testLast7Days',
'verifiedLast7Days'] + [
u'activeSick', u'activeSickTo1000',u'sickTo10000', u'growthLastWeek', u'positiveTests',
u'score', u'color', u'governmentColor', u'firstDose', u'secondDose'
])
def create_city_line(cpp_ent, spp_ent, date):
cpp_keys = ['sickCount', 'actualSick', 'patientDiffPopulationForTenThousands', 'testLast7Days',
'verifiedLast7Days']
spp_keys = [u'activeSick', u'activeSickTo1000',u'sickTo10000', u'growthLastWeek', u'positiveTests',
u'score', u'color', u'governmentColor', u'firstDose', u'secondDose']
line = ','.join([date]+[safe_str(cpp_ent.get(key, '')) for key in cpp_keys] + \
[safe_str(spp_ent.get(key, '')) for key in spp_keys])
return line
def strip_name(name):
return ''.join([heb_translit.get(c,c) for c in name])
def update_cities(new_data):
date = new_data['lastUpdate']['lastUpdate']
cd_dict = {a['city'] : a for a in new_data['contagionDataPerCityPublic']}
sp_dict = {a['name'] : a for a in new_data['spotlightPublic']}
for n in set(sp_dict.keys())|set(cd_dict.keys()):
line = create_city_line(cd_dict.get(n, {}), sp_dict.get(n, {}), date)
fname = 'cities/%s.csv'%(strip_name(n))
try:
add_line_to_file(fname, line)
except IOError:
# file didn't exist - new city name encountered
print('New city!')
print (fname)
file(fname, 'w').write(city_title_line+'\n'+line+'\n')
assert os.system('git add ' + fname) == 0
add_line_to_file('cities_transliteration.csv', ('%s,%s'%(n, strip_name(n))).encode('utf-8'))
def create_cases_by_vaccinations_daily(data):
## res = ',' + (','*9).join(['All ages', 'Above 60', 'Below 60']) + ','*8 + '\n'
res = ',' + ',,,'.join([pre+' - '+suf
for pre in ['All ages', 'Above 60', 'Below 60']
for suf in ['fully vaccinated', 'partially vaccinated', 'not vaccinated']
]) + ','*2 + '\n'
res += 'Date' + ',Total Amount,Daily verified,Total serious'*9 + '\n'
vvd = data['vaccinatedVerifiedDaily']
for i in range(0, len(vvd), 3):
s = sorted(vvd[i:i+3], key=lambda x: x['age_group'])
assert s[0]['day_date'] == s[2]['day_date'] == s[2]['day_date']
line = s[0]['day_date']+','
line += ','.join([
str(ss[case_type%vacc_type])
for ss in s
for vacc_type in ['vaccinated', 'vaccinated_procces', 'not_vaccinated']
for case_type in ['%s_amount_cum', 'verified_amount_%s', 'Serious_amount_%s']])
res += line + '\n'
# file(VAC_CASES_DAILY, 'w').write(res)
opf = open(VAC_CASES_DAILY,'w')
opf.write(res+'\n')
data = get_api_data()
create_patients_csv(data)
create_vaccinated_csv(data)
create_cases_by_vaccinations_daily(data)
update_age_vaccinations_csv(data)
# vacc = pd.read_json('https://datadashboardapi.health.gov.il/api/queries/vaccinatedVerifiedDaily')
# vacc.to_csv('vaccinatedVerifiedDaily.csv')
research = pd.read_json('https://datadashboardapi.health.gov.il/api/queries/researchGraph')
research.to_csv('researchGraph.csv')
# vacc = pd.read_json('https://datadashboardapi.health.gov.il/api/queries/vaccinatedVerifiedDaily')
# vacc.to_csv('vaccinatedVerifiedDaily.csv')
vacc = pd.read_json('https://datadashboardapi.health.gov.il/api/queries/vaccinationsPerAge')
vacc.to_csv('vaccinationsPerAge.csv')
| 44.953846
| 104
| 0.597878
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.