hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b505a0cc8f3a985629823c99bd91b07762dddac6
| 4,299
|
py
|
Python
|
ppci/arch/example.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | null | null | null |
ppci/arch/example.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | null | null | null |
ppci/arch/example.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | 1
|
2021-11-23T14:23:04.000Z
|
2021-11-23T14:23:04.000Z
|
"""
This is an example target with some instructions. It is used in test-cases
and serves as a minimal example.
"""
from .arch import Architecture
from .arch_info import ArchInfo, TypeInfo
from .encoding import Instruction, Syntax, Operand
from .registers import Register, RegisterClass
from .. import ir
from .isa import Isa
class ExampleArch(Architecture):
""" Simple example architecture. This is intended as starting point
when creating a new backend """
name = "example"
def __init__(self, options=None):
super().__init__(options=options)
register_classes = [
RegisterClass(
"reg", [ir.i32, ir.ptr], ExampleRegister, [R0, R1, R2, R3, R10]
),
RegisterClass("hreg", [ir.i16], HalfExampleRegister, [R10l]),
]
self.gdb_registers = gdb_registers
self.gdb_pc = R0
self.isa = Isa()
self.info = ArchInfo(
type_infos={
ir.i8: TypeInfo(1, 1),
ir.u8: TypeInfo(1, 1),
ir.i16: TypeInfo(2, 2),
ir.u16: TypeInfo(2, 2),
ir.i32: TypeInfo(4, 4),
ir.u32: TypeInfo(4, 4),
ir.f32: TypeInfo(4, 4),
ir.f64: TypeInfo(8, 8),
"int": ir.i32,
"ptr": ir.u32,
},
register_classes=register_classes,
)
def gen_prologue(self, frame):
return []
def gen_epilogue(self, frame):
return []
def gen_call(self, label, args, rv):
return []
def gen_function_enter(self, args):
return []
def gen_function_exit(self, rv):
return []
def determine_arg_locations(self, arg_types):
""" Given a set of argument types, determine locations
"""
arg_locs = []
regs = [R0, R1, R2, R3]
for a in arg_types:
r = regs.pop(0)
arg_locs.append(r)
return arg_locs
def determine_rv_location(self, ret_type):
rv = R0
return rv
class ExampleRegister(Register):
""" Example register class """
bitsize = 32
class HalfExampleRegister(Register):
""" Example register class """
bitsize = 16
R0 = ExampleRegister("r0", 0)
R1 = ExampleRegister("r1", 1)
R2 = ExampleRegister("r2", 2)
R3 = ExampleRegister("r3", 3)
R4 = ExampleRegister("r4", 4)
R5 = ExampleRegister("r5", 5)
R6 = ExampleRegister("r6", 6)
# Two aliasing registers:
R10 = ExampleRegister("r10", 10)
R10l = HalfExampleRegister("r10l", 100, aliases=(R10,))
gdb_registers = (R0, R1, R2)
class ExampleInstruction(Instruction):
""" Base class for all example instructions """
tokens = []
class Def(ExampleInstruction):
rd = Operand("rd", ExampleRegister, write=True)
syntax = Syntax(["def", " ", rd])
class DefHalf(ExampleInstruction):
rd = Operand("rd", HalfExampleRegister, write=True)
syntax = Syntax(["def", " ", rd])
class Use(ExampleInstruction):
rn = Operand("rn", ExampleRegister, read=True)
syntax = Syntax(["use", " ", rn])
class UseHalf(ExampleInstruction):
rn = Operand("rn", HalfExampleRegister, read=True)
syntax = Syntax(["use", " ", rn])
class DefUse(ExampleInstruction):
rd = Operand("rd", ExampleRegister, write=True)
rn = Operand("rn", ExampleRegister, read=True)
syntax = Syntax(["cpy", " ", rd, ",", " ", rn])
class Add(ExampleInstruction):
rd = Operand("rd", ExampleRegister, write=True)
rm = Operand("rm", ExampleRegister, read=True)
rn = Operand("rn", ExampleRegister, read=True)
syntax = Syntax(["add", " ", rd, ",", " ", rm, ",", " ", rn])
class Cmp(ExampleInstruction):
rm = Operand("rm", ExampleRegister, read=True)
rn = Operand("rn", ExampleRegister, read=True)
syntax = Syntax(["cmp", " ", rm, ",", " ", rn])
class Use3(ExampleInstruction):
rm = Operand("rm", ExampleRegister, read=True)
rn = Operand("rn", ExampleRegister, read=True)
ro = Operand("ro", ExampleRegister, read=True)
syntax = Syntax(["use3", " ", rm, ",", " ", rn, ",", " ", ro])
class Mov(ExampleInstruction):
rd = Operand("rd", ExampleRegister, write=True)
rm = Operand("rm", ExampleRegister, read=True)
syntax = Syntax(["mov", " ", rd, ",", " ", rm])
| 26.86875
| 79
| 0.592929
|
e77b61c6ccaa67a120ee4b1d377264e2846813f6
| 27,563
|
py
|
Python
|
tests/sensors/test_base.py
|
JGoldman110/airflow
|
93e2c945b1be5b7c9700e780d2aa67846503763b
|
[
"Apache-2.0"
] | 1
|
2022-03-25T23:49:03.000Z
|
2022-03-25T23:49:03.000Z
|
tests/sensors/test_base.py
|
JGoldman110/airflow
|
93e2c945b1be5b7c9700e780d2aa67846503763b
|
[
"Apache-2.0"
] | null | null | null |
tests/sensors/test_base.py
|
JGoldman110/airflow
|
93e2c945b1be5b7c9700e780d2aa67846503763b
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import timedelta
from unittest.mock import Mock, patch
import pytest
from freezegun import freeze_time
from airflow.exceptions import AirflowException, AirflowRescheduleException, AirflowSensorTimeout
from airflow.models import TaskReschedule
from airflow.models.xcom import XCom
from airflow.operators.dummy import DummyOperator
from airflow.sensors.base import BaseSensorOperator, PokeReturnValue, poke_mode_only
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
from airflow.utils import timezone
from airflow.utils.context import Context
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils import db
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_dag'
DUMMY_OP = 'dummy_op'
SENSOR_OP = 'sensor_op'
DEV_NULL = 'dev/null'
class DummySensor(BaseSensorOperator):
def __init__(self, return_value=False, **kwargs):
super().__init__(**kwargs)
self.return_value = return_value
def poke(self, context: Context):
return self.return_value
class DummySensorWithXcomValue(BaseSensorOperator):
def __init__(self, return_value=False, xcom_value=None, **kwargs):
super().__init__(**kwargs)
self.xcom_value = xcom_value
self.return_value = return_value
def poke(self, context: Context):
return PokeReturnValue(self.return_value, self.xcom_value)
class TestBaseSensor:
@staticmethod
def clean_db():
db.clear_db_runs()
db.clear_db_task_reschedule()
db.clear_db_xcom()
@pytest.fixture(autouse=True)
def _auto_clean(self, dag_maker):
"""(auto use)"""
self.clean_db()
yield
self.clean_db()
@pytest.fixture
def make_sensor(self, dag_maker):
"""Create a DummySensor and associated DagRun"""
def _make_sensor(return_value, task_id=SENSOR_OP, **kwargs):
poke_interval = 'poke_interval'
timeout = 'timeout'
if poke_interval not in kwargs:
kwargs[poke_interval] = 0
if timeout not in kwargs:
kwargs[timeout] = 0
with dag_maker(TEST_DAG_ID):
if "xcom_value" in kwargs:
sensor = DummySensorWithXcomValue(task_id=task_id, return_value=return_value, **kwargs)
else:
sensor = DummySensor(task_id=task_id, return_value=return_value, **kwargs)
dummy_op = DummyOperator(task_id=DUMMY_OP)
sensor >> dummy_op
return sensor, dag_maker.create_dagrun()
return _make_sensor
@classmethod
def _run(cls, task, **kwargs):
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True, **kwargs)
def test_ok(self, make_sensor):
sensor, dr = make_sensor(True)
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.SUCCESS
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
def test_fail(self, make_sensor):
sensor, dr = make_sensor(False)
with pytest.raises(AirflowSensorTimeout):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.FAILED
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
def test_soft_fail(self, make_sensor):
sensor, dr = make_sensor(False, soft_fail=True)
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.SKIPPED
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
def test_soft_fail_with_retries(self, make_sensor):
sensor, dr = make_sensor(
return_value=False, soft_fail=True, retries=1, retry_delay=timedelta(milliseconds=1)
)
# first run times out and task instance is skipped
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.SKIPPED
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
def test_ok_with_reschedule(self, make_sensor):
sensor, dr = make_sensor(return_value=None, poke_interval=10, timeout=25, mode='reschedule')
sensor.poke = Mock(side_effect=[False, False, True])
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
with freeze_time(date1):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
# verify task is re-scheduled, i.e. state set to NONE
assert ti.state == State.UP_FOR_RESCHEDULE
# verify task start date is the initial one
assert ti.start_date == date1
# verify one row in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
assert len(task_reschedules) == 1
assert task_reschedules[0].start_date == date1
assert task_reschedules[0].reschedule_date == date1 + timedelta(seconds=sensor.poke_interval)
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
# second poke returns False and task is re-scheduled
date2 = date1 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date2):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
# verify task is re-scheduled, i.e. state set to NONE
assert ti.state == State.UP_FOR_RESCHEDULE
# verify task start date is the initial one
assert ti.start_date == date1
# verify two rows in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
assert len(task_reschedules) == 2
assert task_reschedules[1].start_date == date2
assert task_reschedules[1].reschedule_date == date2 + timedelta(seconds=sensor.poke_interval)
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
# third poke returns True and task succeeds
date3 = date2 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date3):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.SUCCESS
# verify task start date is the initial one
assert ti.start_date == date1
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
def test_fail_with_reschedule(self, make_sensor):
sensor, dr = make_sensor(return_value=False, poke_interval=10, timeout=5, mode='reschedule')
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
with freeze_time(date1):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.UP_FOR_RESCHEDULE
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
# second poke returns False, timeout occurs
date2 = date1 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date2):
with pytest.raises(AirflowSensorTimeout):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.FAILED
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
def test_soft_fail_with_reschedule(self, make_sensor):
sensor, dr = make_sensor(
return_value=False, poke_interval=10, timeout=5, soft_fail=True, mode='reschedule'
)
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
with freeze_time(date1):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.UP_FOR_RESCHEDULE
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
# second poke returns False, timeout occurs
date2 = date1 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date2):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.SKIPPED
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
def test_ok_with_reschedule_and_retry(self, make_sensor):
sensor, dr = make_sensor(
return_value=None,
poke_interval=10,
timeout=5,
retries=1,
retry_delay=timedelta(seconds=10),
mode='reschedule',
)
sensor.poke = Mock(side_effect=[False, False, False, True])
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
with freeze_time(date1):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.UP_FOR_RESCHEDULE
# verify one row in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
assert len(task_reschedules) == 1
assert task_reschedules[0].start_date == date1
assert task_reschedules[0].reschedule_date == date1 + timedelta(seconds=sensor.poke_interval)
assert task_reschedules[0].try_number == 1
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
# second poke timesout and task instance is failed
date2 = date1 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date2):
with pytest.raises(AirflowSensorTimeout):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.FAILED
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
# Task is cleared
sensor.clear()
# third poke returns False and task is rescheduled again
date3 = date2 + timedelta(seconds=sensor.poke_interval) + sensor.retry_delay
with freeze_time(date3):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.UP_FOR_RESCHEDULE
# verify one row in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
assert len(task_reschedules) == 1
assert task_reschedules[0].start_date == date3
assert task_reschedules[0].reschedule_date == date3 + timedelta(seconds=sensor.poke_interval)
assert task_reschedules[0].try_number == 2
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
# fourth poke return True and task succeeds
date4 = date3 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date4):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.SUCCESS
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
@pytest.mark.parametrize("mode", ["poke", "reschedule"])
def test_should_include_ready_to_reschedule_dep(self, mode):
sensor = DummySensor(task_id='a', return_value=True, mode=mode)
deps = sensor.deps
assert ReadyToRescheduleDep() in deps
def test_invalid_mode(self):
with pytest.raises(AirflowException):
DummySensor(task_id='a', mode='foo')
def test_ok_with_custom_reschedule_exception(self, make_sensor):
sensor, dr = make_sensor(return_value=None, mode='reschedule')
date1 = timezone.utcnow()
date2 = date1 + timedelta(seconds=60)
date3 = date1 + timedelta(seconds=120)
sensor.poke = Mock(
side_effect=[AirflowRescheduleException(date2), AirflowRescheduleException(date3), True]
)
# first poke returns False and task is re-scheduled
with freeze_time(date1):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
# verify task is re-scheduled, i.e. state set to NONE
assert ti.state == State.UP_FOR_RESCHEDULE
# verify one row in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
assert len(task_reschedules) == 1
assert task_reschedules[0].start_date == date1
assert task_reschedules[0].reschedule_date == date2
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
# second poke returns False and task is re-scheduled
with freeze_time(date2):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
# verify task is re-scheduled, i.e. state set to NONE
assert ti.state == State.UP_FOR_RESCHEDULE
# verify two rows in task_reschedule table
task_reschedules = TaskReschedule.find_for_task_instance(ti)
assert len(task_reschedules) == 2
assert task_reschedules[1].start_date == date2
assert task_reschedules[1].reschedule_date == date3
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
# third poke returns True and task succeeds
with freeze_time(date3):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.SUCCESS
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
def test_reschedule_with_test_mode(self, make_sensor):
sensor, dr = make_sensor(return_value=None, poke_interval=10, timeout=25, mode='reschedule')
sensor.poke = Mock(side_effect=[False])
# poke returns False and AirflowRescheduleException is raised
date1 = timezone.utcnow()
with freeze_time(date1):
self._run(sensor, test_mode=True)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
# in test mode state is not modified
assert ti.state == State.NONE
# in test mode no reschedule request is recorded
task_reschedules = TaskReschedule.find_for_task_instance(ti)
assert len(task_reschedules) == 0
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
def test_sensor_with_invalid_poke_interval(self):
negative_poke_interval = -10
non_number_poke_interval = "abcd"
positive_poke_interval = 10
with pytest.raises(AirflowException):
DummySensor(
task_id='test_sensor_task_1',
return_value=None,
poke_interval=negative_poke_interval,
timeout=25,
)
with pytest.raises(AirflowException):
DummySensor(
task_id='test_sensor_task_2',
return_value=None,
poke_interval=non_number_poke_interval,
timeout=25,
)
DummySensor(
task_id='test_sensor_task_3', return_value=None, poke_interval=positive_poke_interval, timeout=25
)
def test_sensor_with_invalid_timeout(self):
negative_timeout = -25
non_number_timeout = "abcd"
positive_timeout = 25
with pytest.raises(AirflowException):
DummySensor(
task_id='test_sensor_task_1', return_value=None, poke_interval=10, timeout=negative_timeout
)
with pytest.raises(AirflowException):
DummySensor(
task_id='test_sensor_task_2', return_value=None, poke_interval=10, timeout=non_number_timeout
)
DummySensor(
task_id='test_sensor_task_3', return_value=None, poke_interval=10, timeout=positive_timeout
)
def test_sensor_with_exponential_backoff_off(self):
sensor = DummySensor(
task_id=SENSOR_OP, return_value=None, poke_interval=5, timeout=60, exponential_backoff=False
)
started_at = timezone.utcnow() - timedelta(seconds=10)
def run_duration():
return (timezone.utcnow - started_at).total_seconds()
assert sensor._get_next_poke_interval(started_at, run_duration, 1) == sensor.poke_interval
assert sensor._get_next_poke_interval(started_at, run_duration, 2) == sensor.poke_interval
def test_sensor_with_exponential_backoff_on(self):
sensor = DummySensor(
task_id=SENSOR_OP, return_value=None, poke_interval=5, timeout=60, exponential_backoff=True
)
with patch('airflow.utils.timezone.utcnow') as mock_utctime:
mock_utctime.return_value = DEFAULT_DATE
started_at = timezone.utcnow() - timedelta(seconds=10)
def run_duration():
return (timezone.utcnow - started_at).total_seconds()
interval1 = sensor._get_next_poke_interval(started_at, run_duration, 1)
interval2 = sensor._get_next_poke_interval(started_at, run_duration, 2)
assert interval1 >= 0
assert interval1 <= sensor.poke_interval
assert interval2 >= sensor.poke_interval
assert interval2 > interval1
@pytest.mark.backend("mysql")
def test_reschedule_poke_interval_too_long_on_mysql(self, make_sensor):
with pytest.raises(AirflowException) as ctx:
make_sensor(poke_interval=863998946, mode="reschedule", return_value="irrelevant")
assert str(ctx.value) == (
"Cannot set poke_interval to 863998946 seconds in reschedule mode "
"since it will take reschedule time over MySQL's TIMESTAMP limit."
)
@pytest.mark.backend("mysql")
def test_reschedule_date_too_late_on_mysql(self, make_sensor):
sensor, _ = make_sensor(poke_interval=60 * 60 * 24, mode="reschedule", return_value=False)
# A few hours until TIMESTAMP's limit, the next poke will take us over.
with freeze_time(datetime(2038, 1, 19, tzinfo=timezone.utc)):
with pytest.raises(AirflowSensorTimeout) as ctx:
self._run(sensor)
assert str(ctx.value) == (
"Cannot reschedule DAG unit_test_dag to 2038-01-20T00:00:00+00:00 "
"since it is over MySQL's TIMESTAMP storage limit."
)
def test_reschedule_and_retry_timeout(self, make_sensor):
"""
Test mode="reschedule", retries and timeout configurations interact correctly.
Given a sensor configured like this:
poke_interval=5
timeout=10
retries=2
retry_delay=timedelta(seconds=3)
If the second poke raises RuntimeError, all other pokes return False, this is how it should
behave:
00:00 Returns False try_number=1, max_tries=2, state=up_for_reschedule
00:05 Raises RuntimeError try_number=2, max_tries=2, state=up_for_retry
00:08 Returns False try_number=2, max_tries=2, state=up_for_reschedule
00:13 Raises AirflowSensorTimeout try_number=3, max_tries=2, state=failed
And then the sensor is cleared at 00:19. It should behave like this:
00:19 Returns False try_number=3, max_tries=4, state=up_for_reschedule
00:24 Returns False try_number=3, max_tries=4, state=up_for_reschedule
00:26 Returns False try_number=3, max_tries=4, state=up_for_reschedule
00:31 Raises AirflowSensorTimeout, try_number=4, max_tries=4, state=failed
"""
sensor, dr = make_sensor(
return_value=None,
poke_interval=5,
timeout=10,
retries=2,
retry_delay=timedelta(seconds=3),
mode='reschedule',
)
sensor.poke = Mock(side_effect=[False, RuntimeError, False, False, False, False, False, False])
def assert_ti_state(try_number, max_tries, state):
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.try_number == try_number
assert ti.max_tries == max_tries
assert ti.state == state
break
else:
self.fail("sensor not found")
# first poke returns False and task is re-scheduled
date1 = timezone.utcnow()
with freeze_time(date1):
self._run(sensor)
assert_ti_state(1, 2, State.UP_FOR_RESCHEDULE)
# second poke raises RuntimeError and task instance retries
date2 = date1 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date2), pytest.raises(RuntimeError):
self._run(sensor)
assert_ti_state(2, 2, State.UP_FOR_RETRY)
# third poke returns False and task is rescheduled again
date3 = date2 + sensor.retry_delay + timedelta(seconds=1)
with freeze_time(date3):
self._run(sensor)
assert_ti_state(2, 2, State.UP_FOR_RESCHEDULE)
# fourth poke times out and raises AirflowSensorTimeout
date4 = date3 + timedelta(seconds=sensor.poke_interval)
with freeze_time(date4), pytest.raises(AirflowSensorTimeout):
self._run(sensor)
assert_ti_state(3, 2, State.FAILED)
# Clear the failed sensor
sensor.clear()
date_i = date4 + timedelta(seconds=20)
for _ in range(3):
date_i += timedelta(seconds=sensor.poke_interval)
with freeze_time(date_i):
self._run(sensor)
assert_ti_state(3, 4, State.UP_FOR_RESCHEDULE)
# Last poke times out and raises AirflowSensorTimeout
date8 = date_i + timedelta(seconds=sensor.poke_interval)
with freeze_time(date8), pytest.raises(AirflowSensorTimeout):
self._run(sensor)
assert_ti_state(4, 4, State.FAILED)
def test_sensor_with_xcom(self, make_sensor):
xcom_value = "TestValue"
sensor, dr = make_sensor(True, xcom_value=xcom_value)
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.SUCCESS
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
actual_xcom_value = XCom.get_one(
key="return_value", task_id=SENSOR_OP, dag_id=dr.dag_id, run_id=dr.run_id
)
assert actual_xcom_value == xcom_value
def test_sensor_with_xcom_fails(self, make_sensor):
xcom_value = "TestValue"
sensor, dr = make_sensor(False, xcom_value=xcom_value)
with pytest.raises(AirflowSensorTimeout):
self._run(sensor)
tis = dr.get_task_instances()
assert len(tis) == 2
for ti in tis:
if ti.task_id == SENSOR_OP:
assert ti.state == State.FAILED
if ti.task_id == DUMMY_OP:
assert ti.state == State.NONE
actual_xcom_value = XCom.get_one(
key="return_value", task_id=SENSOR_OP, dag_id=dr.dag_id, run_id=dr.run_id
)
assert actual_xcom_value is None
@poke_mode_only
class DummyPokeOnlySensor(BaseSensorOperator):
def __init__(self, poke_changes_mode=False, **kwargs):
self.mode = kwargs['mode']
super().__init__(**kwargs)
self.poke_changes_mode = poke_changes_mode
self.return_value = True
def poke(self, context: Context):
if self.poke_changes_mode:
self.change_mode('reschedule')
return self.return_value
def change_mode(self, mode):
self.mode = mode
class TestPokeModeOnly:
def test_poke_mode_only_allows_poke_mode(self):
try:
sensor = DummyPokeOnlySensor(task_id='foo', mode='poke', poke_changes_mode=False)
except ValueError:
self.fail("__init__ failed with mode='poke'.")
try:
sensor.poke({})
except ValueError:
self.fail("poke failed without changing mode from 'poke'.")
try:
sensor.change_mode('poke')
except ValueError:
self.fail("class method failed without changing mode from 'poke'.")
def test_poke_mode_only_bad_class_method(self):
sensor = DummyPokeOnlySensor(task_id='foo', mode='poke', poke_changes_mode=False)
with pytest.raises(ValueError):
sensor.change_mode('reschedule')
def test_poke_mode_only_bad_init(self):
with pytest.raises(ValueError):
DummyPokeOnlySensor(task_id='foo', mode='reschedule', poke_changes_mode=False)
def test_poke_mode_only_bad_poke(self):
sensor = DummyPokeOnlySensor(task_id='foo', mode='poke', poke_changes_mode=True)
with pytest.raises(ValueError):
sensor.poke({})
| 39.096454
| 109
| 0.61949
|
703a292bcb677b4d069025242dc6e5a1528ffaf1
| 308
|
py
|
Python
|
torrents/admin.py
|
kevincornish/Genesis
|
6bc424fe97be954776dec2bdc4c7d214992cc3e2
|
[
"MIT"
] | null | null | null |
torrents/admin.py
|
kevincornish/Genesis
|
6bc424fe97be954776dec2bdc4c7d214992cc3e2
|
[
"MIT"
] | null | null | null |
torrents/admin.py
|
kevincornish/Genesis
|
6bc424fe97be954776dec2bdc4c7d214992cc3e2
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Torrent,Category
class CategoryAdmin(admin.ModelAdmin):
list_display = ['title']
admin.site.register( Category, CategoryAdmin)
class TorrentAdmin(admin.ModelAdmin):
list_display = ['name','category']
admin.site.register( Torrent, TorrentAdmin)
| 25.666667
| 45
| 0.775974
|
fc09683a6ffeec1817143fc921d6d2e7f3579578
| 3,598
|
py
|
Python
|
unit_tests/test_utils.py
|
openstack/charm-heat
|
d337e71a288ead3fb2dd335e22146e8a36d15f3b
|
[
"ECL-2.0",
"Apache-2.0"
] | 16
|
2016-04-17T04:00:48.000Z
|
2020-05-06T14:07:36.000Z
|
unit_tests/test_utils.py
|
openstack/charm-heat
|
d337e71a288ead3fb2dd335e22146e8a36d15f3b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
unit_tests/test_utils.py
|
openstack/charm-heat
|
d337e71a288ead3fb2dd335e22146e8a36d15f3b
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2016-03-23T20:32:46.000Z
|
2019-10-22T03:49:29.000Z
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import unittest
import yaml
from contextlib import contextmanager
from unittest.mock import patch, MagicMock
def load_config():
"""Loads config.
Walk backwords from __file__ looking for config.yaml, load and return the
'options' section'
"""
config = None
f = __file__
while config is None:
d = os.path.dirname(f)
if os.path.isfile(os.path.join(d, 'config.yaml')):
config = os.path.join(d, 'config.yaml')
break
f = d
if not config:
logging.error('Could not find config.yaml in any parent directory '
'of %s. ' % __file__)
raise Exception
return yaml.safe_load(open(config).read())['options']
def get_default_config():
"""Load default charm config from config.yaml return as a dict.
If no default is set in config.yaml, its value is None.
"""
default_config = {}
config = load_config()
for k, v in config.items():
if 'default' in v:
default_config[k] = v['default']
else:
default_config[k] = None
return default_config
class CharmTestCase(unittest.TestCase):
def setUp(self, obj, patches):
super(CharmTestCase, self).setUp()
self.patches = patches
self.obj = obj
self.test_config = TestConfig()
self.test_relation = TestRelation()
self.patch_all()
def patch(self, method):
_m = patch.object(self.obj, method)
mock = _m.start()
self.addCleanup(_m.stop)
return mock
def patch_all(self):
for method in self.patches:
setattr(self, method, self.patch(method))
class TestConfig(object):
def __init__(self):
self.config = get_default_config()
def get(self, attr=None):
if not attr:
return self.get_all()
try:
return self.config[attr]
except KeyError:
return None
def get_all(self):
return self.config
def set(self, attr, value):
if attr not in self.config:
raise KeyError
self.config[attr] = value
class TestRelation(object):
def __init__(self, relation_data={}):
self.relation_data = relation_data
def set(self, relation_data):
self.relation_data = relation_data
def get(self, attr=None, unit=None, rid=None):
if attr is None:
return self.relation_data
elif attr in self.relation_data:
return self.relation_data[attr]
return None
@contextmanager
def patch_open():
'''Patch open().
To allow mocking both open() itself and the file that is yielded.
Yields the mock for "open" and "file", respectively.
'''
mock_open = MagicMock(spec=open)
mock_file = MagicMock()
@contextmanager
def stub_open(*args, **kwargs):
mock_open(*args, **kwargs)
yield mock_file
with patch('builtins.open', stub_open):
yield mock_open, mock_file
| 26.455882
| 77
| 0.637298
|
05fe9832afc309b298c1b81ed9f4cc58ec225546
| 225
|
py
|
Python
|
src/accounts/serializers.py
|
elephantatech/django-docker-template
|
9e0cb87c717933b7cadf8d6ef206fb55317c5eeb
|
[
"Apache-2.0"
] | null | null | null |
src/accounts/serializers.py
|
elephantatech/django-docker-template
|
9e0cb87c717933b7cadf8d6ef206fb55317c5eeb
|
[
"Apache-2.0"
] | 3
|
2021-06-04T23:48:24.000Z
|
2021-06-10T20:00:48.000Z
|
src/accounts/serializers.py
|
elephantatech/django-docker-template
|
9e0cb87c717933b7cadf8d6ef206fb55317c5eeb
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.auth import get_user_model
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('id', 'username',)
| 28.125
| 50
| 0.733333
|
f3b31551dc4760b2a392277c6ce4adbbe9d40b21
| 32,141
|
py
|
Python
|
mne/preprocessing/tests/test_ica.py
|
christian-oreilly/mne-python
|
d925082e5b1e6f0f3649d8b909340e7b79a632ec
|
[
"BSD-3-Clause"
] | null | null | null |
mne/preprocessing/tests/test_ica.py
|
christian-oreilly/mne-python
|
d925082e5b1e6f0f3649d8b909340e7b79a632ec
|
[
"BSD-3-Clause"
] | null | null | null |
mne/preprocessing/tests/test_ica.py
|
christian-oreilly/mne-python
|
d925082e5b1e6f0f3649d8b909340e7b79a632ec
|
[
"BSD-3-Clause"
] | 1
|
2017-12-05T05:13:56.000Z
|
2017-12-05T05:13:56.000Z
|
from __future__ import print_function
# Author: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import os
import os.path as op
import warnings
from nose.tools import (assert_true, assert_raises, assert_equal, assert_false,
assert_not_equal, assert_is_none)
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from scipy import stats
from itertools import product
from mne import (Epochs, read_events, pick_types, create_info, EpochsArray,
EvokedArray, Annotations)
from mne.cov import read_cov
from mne.preprocessing import (ICA, ica_find_ecg_events, ica_find_eog_events,
read_ica, run_ica)
from mne.preprocessing.ica import (get_score_funcs, corrmap, _sort_components,
_ica_explained_variance)
from mne.io import read_raw_fif, Info, RawArray
from mne.io.meas_info import _kind_dict
from mne.io.pick import _DATA_CH_TYPES_SPLIT
from mne.tests.common import assert_naming
from mne.utils import (catch_logging, _TempDir, requires_sklearn,
run_tests_if_main)
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
test_cov_name = op.join(data_dir, 'test-cov.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 6
score_funcs_unsuited = ['pointbiserialr', 'ansari']
try:
from sklearn.utils.validation import NonBLASDotWarning
warnings.simplefilter('error', NonBLASDotWarning)
except Exception:
pass
@requires_sklearn
def test_ica_full_data_recovery():
"""Test recovery of full data when no source is rejected."""
# Most basic recovery
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
with warnings.catch_warnings(record=True): # bad proj
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
evoked = epochs.average()
n_channels = 5
data = raw._data[:n_channels].copy()
data_epochs = epochs.get_data()
data_evoked = evoked.data
raw.annotations = Annotations([0.5], [0.5], ['BAD'])
for method in ['fastica']:
stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
for n_components, n_pca_components, ok in stuff:
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components,
method=method, max_iter=1)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=list(range(n_channels)))
raw2 = ica.apply(raw.copy(), exclude=[])
if ok:
assert_allclose(data[:n_channels], raw2._data[:n_channels],
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
assert_true(np.max(diff) > 1e-14)
ica = ICA(n_components=n_components,
max_pca_components=n_pca_components,
n_pca_components=n_pca_components)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=list(range(n_channels)))
epochs2 = ica.apply(epochs.copy(), exclude=[])
data2 = epochs2.get_data()[:, :n_channels]
if ok:
assert_allclose(data_epochs[:, :n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data_epochs[:, :n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
evoked2 = ica.apply(evoked.copy(), exclude=[])
data2 = evoked2.data[:n_channels]
if ok:
assert_allclose(data_evoked[:n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(evoked.data[:n_channels] - data2)
assert_true(np.max(diff) > 1e-14)
assert_raises(ValueError, ICA, method='pizza-decomposision')
@requires_sklearn
def test_ica_rank_reduction():
"""Test recovery ICA rank reduction."""
# Most basic recovery
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
n_components = 5
max_pca_components = len(picks)
for n_pca_components in [6, 10]:
with warnings.catch_warnings(record=True): # non-convergence
warnings.simplefilter('always')
ica = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components,
method='fastica', max_iter=1).fit(raw, picks=picks)
rank_before = raw.estimate_rank(picks=picks)
assert_equal(rank_before, len(picks))
raw_clean = ica.apply(raw.copy())
rank_after = raw_clean.estimate_rank(picks=picks)
# interaction between ICA rejection and PCA components difficult
# to preduct. Rank_after often seems to be 1 higher then
# n_pca_components
assert_true(n_components < n_pca_components <= rank_after <=
rank_before)
@requires_sklearn
def test_ica_reset():
"""Test ICA resetting."""
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
run_time_attrs = (
'_pre_whitener',
'unmixing_matrix_',
'mixing_matrix_',
'n_components_',
'n_samples_',
'pca_components_',
'pca_explained_variance_',
'pca_mean_'
)
with warnings.catch_warnings(record=True): # convergence
ica = ICA(
n_components=3, max_pca_components=3, n_pca_components=3,
method='fastica', max_iter=1).fit(raw, picks=picks)
assert_true(all(hasattr(ica, attr) for attr in run_time_attrs))
assert_not_equal(ica.labels_, None)
ica._reset()
assert_true(not any(hasattr(ica, attr) for attr in run_time_attrs))
assert_not_equal(ica.labels_, None)
@requires_sklearn
def test_ica_core():
"""Test ICA on raw and epochs."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
# XXX. The None cases helped revealing bugs but are time consuming.
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
noise_cov = [None, test_cov]
# removed None cases to speed up...
n_components = [2, 1.0] # for future dbg add cases
max_pca_components = [3]
picks_ = [picks]
methods = ['fastica']
iter_ica_params = product(noise_cov, n_components, max_pca_components,
picks_, methods)
# # test init catchers
assert_raises(ValueError, ICA, n_components=3, max_pca_components=2)
assert_raises(ValueError, ICA, n_components=2.3, max_pca_components=2)
# test essential core functionality
for n_cov, n_comp, max_n, pcks, method in iter_ica_params:
# Test ICA raw
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0, method=method, max_iter=1)
assert_raises(ValueError, ica.__contains__, 'mag')
print(ica) # to test repr
# test fit checker
assert_raises(RuntimeError, ica.get_sources, raw)
assert_raises(RuntimeError, ica.get_sources, epochs)
# test decomposition
with warnings.catch_warnings(record=True): # convergence
ica.fit(raw, picks=pcks, start=start, stop=stop)
repr(ica) # to test repr
assert_true('mag' in ica) # should now work without error
# test re-fit
unmixing1 = ica.unmixing_matrix_
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=pcks, start=start, stop=stop)
assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)
raw_sources = ica.get_sources(raw)
# test for #3804
assert_equal(raw_sources._filenames, [None])
print(raw_sources)
sources = raw_sources[:, :][0]
assert_true(sources.shape[0] == ica.n_components_)
# test preload filter
raw3 = raw.copy()
raw3.preload = False
assert_raises(ValueError, ica.apply, raw3,
include=[1, 2])
#######################################################################
# test epochs decomposition
ica = ICA(noise_cov=n_cov, n_components=n_comp,
max_pca_components=max_n, n_pca_components=max_n,
random_state=0)
with warnings.catch_warnings(record=True):
ica.fit(epochs, picks=picks)
data = epochs.get_data()[:, 0, :]
n_samples = np.prod(data.shape)
assert_equal(ica.n_samples_, n_samples)
print(ica) # to test repr
sources = ica.get_sources(epochs).get_data()
assert_true(sources.shape[1] == ica.n_components_)
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# test preload filter
epochs3 = epochs.copy()
epochs3.preload = False
assert_raises(ValueError, ica.apply, epochs3,
include=[1, 2])
# test for bug with whitener updating
_pre_whitener = ica._pre_whitener.copy()
epochs._data[:, 0, 10:15] *= 1e12
ica.apply(epochs.copy())
assert_array_equal(_pre_whitener, ica._pre_whitener)
# test expl. var threshold leading to empty sel
ica.n_components = 0.1
assert_raises(RuntimeError, ica.fit, epochs)
offender = 1, 2, 3,
assert_raises(ValueError, ica.get_sources, offender)
assert_raises(ValueError, ica.fit, offender)
assert_raises(ValueError, ica.apply, offender)
@pytest.mark.slowtest
@requires_sklearn
def test_ica_additional():
"""Test additional ICA functionality."""
import matplotlib.pyplot as plt
tempdir = _TempDir()
stop2 = 500
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
raw.annotations = Annotations([0.5], [0.5], ['BAD'])
# XXX This breaks the tests :(
# raw.info['bads'] = [raw.ch_names[1]]
test_cov = read_cov(test_cov_name)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
# test if n_components=None works
with warnings.catch_warnings(record=True):
ica = ICA(n_components=None,
max_pca_components=None,
n_pca_components=None, random_state=0)
ica.fit(epochs, picks=picks, decim=3)
# for testing eog functionality
picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=True, exclude='bads')
epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
baseline=(None, 0), preload=True)
test_cov2 = test_cov.copy()
ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
n_pca_components=4)
assert_true(ica.info is None)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5])
assert_true(isinstance(ica.info, Info))
assert_true(ica.n_components_ < 5)
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
assert_raises(RuntimeError, ica.save, '')
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)
# check passing a ch_name to find_bads_ecg
with warnings.catch_warnings(record=True): # filter length
_, scores_1 = ica.find_bads_ecg(raw)
_, scores_2 = ica.find_bads_ecg(raw, raw.ch_names[1])
assert_false(scores_1[0] == scores_2[0])
# test corrmap
ica2 = ica.copy()
ica3 = ica.copy()
corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
ch_type="mag")
corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
assert_true(0 in ica.labels_["blinks"])
# test retrieval of component maps as arrays
components = ica.get_components()
template = components[:, 0]
EvokedArray(components, ica.info, tmin=0.).plot_topomap([0])
corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True,
ch_type="mag")
assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])
plt.close('all')
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
ica.save(ica_badname)
read_ica(ica_badname)
assert_naming(w, 'test_ica.py', 2)
# test decim
ica = ICA(n_components=3, max_pca_components=4,
n_pca_components=4)
raw_ = raw.copy()
for _ in range(3):
raw_.append(raw_)
n_samples = raw_._data.shape[1]
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(raw_._data.shape[1], n_samples)
# test expl var
ica = ICA(n_components=1.0, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True):
ica.fit(raw, picks=None, decim=3)
assert_true(ica.n_components_ == 4)
ica_var = _ica_explained_variance(ica, raw, normalize=True)
assert_true(np.all(ica_var[:-1] >= ica_var[1:]))
# test ica sorting
ica.exclude = [0]
ica.labels_ = dict(blink=[0], think=[1])
ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
assert_equal(ica_sorted.exclude, [3])
assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))
# epochs extraction from raw fit
assert_raises(RuntimeError, ica.get_sources, epochs)
# test reading and writing
test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
for cov in (None, test_cov):
ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
n_pca_components=4)
with warnings.catch_warnings(record=True): # ICA does not converge
ica.fit(raw, picks=picks, start=start, stop=stop2)
sources = ica.get_sources(epochs).get_data()
assert_true(ica.mixing_matrix_.shape == (2, 2))
assert_true(ica.unmixing_matrix_.shape == (2, 2))
assert_true(ica.pca_components_.shape == (4, len(picks)))
assert_true(sources.shape[1] == ica.n_components_)
for exclude in [[], [0]]:
ica.exclude = exclude
ica.labels_ = {'foo': [0]}
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.exclude == ica_read.exclude)
assert_equal(ica.labels_, ica_read.labels_)
ica.exclude = []
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [])
ica.exclude = [0, 1]
ica.apply(raw, exclude=[1])
assert_true(ica.exclude == [0, 1])
ica_raw = ica.get_sources(raw)
assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
ica_raw.info['bads']])
# test filtering
d1 = ica_raw._data[0].copy()
ica_raw.filter(4, 20, fir_design='firwin2')
assert_equal(ica_raw.info['lowpass'], 20.)
assert_equal(ica_raw.info['highpass'], 4.)
assert_true((d1 != ica_raw._data[0]).any())
d1 = ica_raw._data[0].copy()
ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin')
assert_true((d1 != ica_raw._data[0]).any())
ica.n_pca_components = 2
ica.method = 'fake'
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert_true(ica.n_pca_components == ica_read.n_pca_components)
assert_equal(ica.method, ica_read.method)
assert_equal(ica.labels_, ica_read.labels_)
# check type consistency
attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
'pca_explained_variance_ _pre_whitener')
def f(x, y):
return getattr(x, y).dtype
for attr in attrs.split():
assert_equal(f(ica_read, attr), f(ica, attr))
ica.n_pca_components = 4
ica_read.n_pca_components = 4
ica.exclude = []
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
'pca_mean_', 'pca_explained_variance_',
'_pre_whitener']:
assert_array_almost_equal(getattr(ica, attr),
getattr(ica_read, attr))
assert_true(ica.ch_names == ica_read.ch_names)
assert_true(isinstance(ica_read.info, Info))
sources = ica.get_sources(raw)[:, :][0]
sources2 = ica_read.get_sources(raw)[:, :][0]
assert_array_almost_equal(sources, sources2)
_raw1 = ica.apply(raw, exclude=[1])
_raw2 = ica_read.apply(raw, exclude=[1])
assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
os.remove(test_ica_fname)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(raw, target='EOG 061', score_func=func,
start=0, stop=10)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(raw, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, raw,
target=np.arange(1))
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx params
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
eog_ch=ch_name, skew_criterion=idx,
var_criterion=idx, kurt_criterion=idx)
evoked = epochs.average()
evoked_data = evoked.data.copy()
raw_data = raw[:][0].copy()
epochs_data = epochs.get_data().copy()
with warnings.catch_warnings(record=True):
idx, scores = ica.find_bads_ecg(raw, method='ctps')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(raw, method='correlation')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(epochs, method='ctps')
assert_equal(len(scores), ica.n_components_)
assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
method='ctps')
assert_raises(ValueError, ica.find_bads_ecg, raw,
method='crazy-coupling')
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
idx, scores = ica.find_bads_eog(raw)
assert_true(isinstance(scores, list))
assert_equal(len(scores[0]), ica.n_components_)
idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(evoked, method='correlation')
assert_equal(len(scores), ica.n_components_)
assert_array_equal(raw_data, raw[:][0])
assert_array_equal(epochs_data, epochs.get_data())
assert_array_equal(evoked_data, evoked.data)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(epochs_eog, target='EOG 061',
score_func=func)
assert_true(ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(epochs, score_func=stats.skew)
# check exception handling
assert_raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# ecg functionality
ecg_scores = ica.score_sources(raw, target='MEG 1531',
score_func='pearsonr')
with warnings.catch_warnings(record=True): # filter attenuation warning
ecg_events = ica_find_ecg_events(raw,
sources[np.abs(ecg_scores).argmax()])
assert_true(ecg_events.ndim == 2)
# eog functionality
eog_scores = ica.score_sources(raw, target='EOG 061',
score_func='pearsonr')
with warnings.catch_warnings(record=True): # filter attenuation warning
eog_events = ica_find_eog_events(raw,
sources[np.abs(eog_scores).argmax()])
assert_true(eog_events.ndim == 2)
# Test ica fiff export
ica_raw = ica.get_sources(raw, start=0, stop=100)
assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
assert_equal(len(ica_raw._filenames), 1) # API consistency
ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
assert_true(ica.n_components_ == len(ica_chans))
test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
ica.n_components = np.int32(ica.n_components)
ica_raw.save(test_ica_fname, overwrite=True)
ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
ica_raw2.close()
os.remove(test_ica_fname)
# Test ica epochs export
ica_epochs = ica.get_sources(epochs)
assert_true(ica_epochs.events.shape == epochs.events.shape)
ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
assert_true(ica.n_components_ == len(ica_chans))
assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
assert_true(ica_epochs._raw is None)
assert_true(ica_epochs.preload is True)
# test float n pca components
ica.pca_explained_variance_ = np.array([0.2] * 5)
ica.n_components_ = 0
for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
ncomps_ = ica._check_n_pca_components(ncomps)
assert_true(ncomps_ == expected)
ica = ICA()
ica.fit(raw, picks=picks[:5])
with warnings.catch_warnings(record=True): # filter length
ica.find_bads_ecg(raw)
ica.find_bads_eog(epochs, ch_name='MEG 0121')
assert_array_equal(raw_data, raw[:][0])
raw.drop_channels(['MEG 0122'])
with warnings.catch_warnings(record=True): # filter length
assert_raises(RuntimeError, ica.find_bads_eog, raw)
assert_raises(RuntimeError, ica.find_bads_ecg, raw)
@requires_sklearn
def test_run_ica():
"""Test run_ica function."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
params = []
params += [(None, -1, slice(2), [0, 1])] # varicance, kurtosis idx
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
warnings.simplefilter('always')
with warnings.catch_warnings(record=True):
run_ica(raw, n_components=2, start=0, stop=6, start_find=0,
stop_find=5, ecg_ch=ch_name, eog_ch=ch_name,
skew_criterion=idx, var_criterion=idx, kurt_criterion=idx)
@requires_sklearn
def test_ica_reject_buffer():
"""Test ICA data raw buffer rejection."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
raw._data[2, 1000:1005] = 5e-12
with catch_logging() as drop_log:
with warnings.catch_warnings(record=True):
ica.fit(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2,
tstep=0.01, verbose=True, reject_by_annotation=False)
assert_true(raw._data[:5, ::2].shape[1] - 4 == ica.n_samples_)
log = [l for l in drop_log.getvalue().split('\n') if 'detected' in l]
assert_equal(len(log), 1)
@requires_sklearn
def test_ica_twice():
"""Test running ICA twice."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
picks = pick_types(raw.info, meg='grad', exclude='bads')
n_components = 0.9
max_pca_components = None
n_pca_components = 1.1
with warnings.catch_warnings(record=True):
ica1 = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components, random_state=0)
ica1.fit(raw, picks=picks, decim=3)
raw_new = ica1.apply(raw, n_pca_components=n_pca_components)
ica2 = ICA(n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=1.0, random_state=0)
ica2.fit(raw_new, picks=picks, decim=3)
assert_equal(ica1.n_components_, ica2.n_components_)
@requires_sklearn
def test_fit_params():
"""Test fit_params for ICA."""
assert_raises(ValueError, ICA, fit_params=dict(extended=True))
fit_params = {}
ICA(fit_params=fit_params) # test no side effects
assert_equal(fit_params, {})
@requires_sklearn
def test_bad_channels():
"""Test exception when unsupported channels are used."""
chs = [i for i in _kind_dict]
data_chs = _DATA_CH_TYPES_SPLIT + ['eog']
chs_bad = list(set(chs) - set(data_chs))
info = create_info(len(chs), 500, chs)
data = np.random.rand(len(chs), 50)
raw = RawArray(data, info)
data = np.random.rand(100, len(chs), 50)
epochs = EpochsArray(data, info)
n_components = 0.9
ica = ICA(n_components=n_components, method='fastica')
for inst in [raw, epochs]:
for ch in chs_bad:
# Test case for only bad channels
picks_bad1 = pick_types(inst.info, meg=False,
**{str(ch): True})
# Test case for good and bad channels
picks_bad2 = pick_types(inst.info, meg=True,
**{str(ch): True})
assert_raises(ValueError, ica.fit, inst, picks=picks_bad1)
assert_raises(ValueError, ica.fit, inst, picks=picks_bad2)
assert_raises(ValueError, ica.fit, inst, picks=[])
@requires_sklearn
def test_eog_channel():
"""Test that EOG channel is included when performing ICA."""
raw = read_raw_fif(raw_fname, preload=True)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=True, ecg=False,
eog=True, exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
n_components = 0.9
ica = ICA(n_components=n_components, method='fastica')
# Test case for MEG and EOG data. Should have EOG channel
for inst in [raw, epochs]:
picks1a = pick_types(inst.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:4]
picks1b = pick_types(inst.info, meg=False, stim=False, ecg=False,
eog=True, exclude='bads')
picks1 = np.append(picks1a, picks1b)
ica.fit(inst, picks=picks1)
assert_true(any('EOG' in ch for ch in ica.ch_names))
# Test case for MEG data. Should have no EOG channel
for inst in [raw, epochs]:
picks1 = pick_types(inst.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:5]
ica.fit(inst, picks=picks1)
assert_false(any('EOG' in ch for ch in ica.ch_names))
@requires_sklearn
def test_max_pca_components_none():
"""Test max_pca_components=None."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
max_pca_components = None
n_components = 10
random_state = 12345
tempdir = _TempDir()
output_fname = op.join(tempdir, 'test_ica-ica.fif')
ica = ICA(max_pca_components=max_pca_components,
n_components=n_components, random_state=random_state)
with warnings.catch_warnings(record=True): # convergence
ica.fit(epochs)
ica.save(output_fname)
ica = read_ica(output_fname)
# ICA.fit() replaced max_pca_components, which was previously None,
# with the appropriate integer value.
assert_equal(ica.max_pca_components, epochs.info['nchan'])
assert_equal(ica.n_components, 10)
@requires_sklearn
def test_n_components_none():
"""Test n_components=None."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
max_pca_components = 10
n_components = None
random_state = 12345
tempdir = _TempDir()
output_fname = op.join(tempdir, 'test_ica-ica.fif')
ica = ICA(max_pca_components=max_pca_components,
n_components=n_components, random_state=random_state)
with warnings.catch_warnings(record=True): # convergence
ica.fit(epochs)
ica.save(output_fname)
ica = read_ica(output_fname)
# ICA.fit() replaced max_pca_components, which was previously None,
# with the appropriate integer value.
assert_equal(ica.max_pca_components, 10)
assert_is_none(ica.n_components)
@requires_sklearn
def test_n_components_and_max_pca_components_none():
"""Test n_components and max_pca_components=None."""
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
max_pca_components = None
n_components = None
random_state = 12345
tempdir = _TempDir()
output_fname = op.join(tempdir, 'test_ica-ica.fif')
ica = ICA(max_pca_components=max_pca_components,
n_components=n_components, random_state=random_state)
with warnings.catch_warnings(record=True): # convergence
ica.fit(epochs)
ica.save(output_fname)
ica = read_ica(output_fname)
# ICA.fit() replaced max_pca_components, which was previously None,
# with the appropriate integer value.
assert_equal(ica.max_pca_components, epochs.info['nchan'])
assert_is_none(ica.n_components)
run_tests_if_main()
| 39.340269
| 79
| 0.636259
|
84a59366fb513734250aada0cc52440407bbef04
| 392
|
py
|
Python
|
app/wsgi.py
|
miraculixx/tastypie-async
|
6da1e92730954f1c3a4234f64c9a828dd4102ae9
|
[
"MIT"
] | 3
|
2016-03-29T18:17:51.000Z
|
2021-08-19T20:12:33.000Z
|
app/wsgi.py
|
miraculixx/tastypie-async
|
6da1e92730954f1c3a4234f64c9a828dd4102ae9
|
[
"MIT"
] | 2
|
2015-07-20T07:06:12.000Z
|
2016-10-03T06:44:52.000Z
|
app/wsgi.py
|
miraculixx/tastypie-async
|
6da1e92730954f1c3a4234f64c9a828dd4102ae9
|
[
"MIT"
] | 4
|
2015-07-21T11:27:00.000Z
|
2021-08-19T20:12:10.000Z
|
"""
WSGI config for tastypie-async project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 26.133333
| 78
| 0.788265
|
ea68dce7e4a8b0d1a9f2f46f756fecd373452448
| 378
|
py
|
Python
|
angr/procedures/posix/recvfrom.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | 1
|
2021-07-07T11:18:34.000Z
|
2021-07-07T11:18:34.000Z
|
angr/procedures/posix/recvfrom.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/posix/recvfrom.py
|
mariusmue/angr
|
f8304c4b1f0097a721a6692b02a45cabaae137c5
|
[
"BSD-2-Clause"
] | 1
|
2022-02-10T02:29:38.000Z
|
2022-02-10T02:29:38.000Z
|
import angr
######################################
# recvfrom
######################################
class recvfrom(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, fd, dst, length, flags, src_addr, addrlen): #pylint:disable=unused-argument
bytes_recvd = self.state.posix.read(fd, dst, self.state.se.eval(length))
return bytes_recvd
| 29.076923
| 93
| 0.568783
|
f2dd0435d6af445b6029cfcdbe5768ae0e406519
| 656
|
py
|
Python
|
manage.py
|
sling254/MitPitch
|
68b1a62f04aac3ef6cda0c1b575b65de2a887acb
|
[
"MIT"
] | null | null | null |
manage.py
|
sling254/MitPitch
|
68b1a62f04aac3ef6cda0c1b575b65de2a887acb
|
[
"MIT"
] | 1
|
2021-11-19T04:37:04.000Z
|
2021-11-19T04:37:04.000Z
|
manage.py
|
sling254/MitPitch
|
68b1a62f04aac3ef6cda0c1b575b65de2a887acb
|
[
"MIT"
] | 1
|
2021-11-17T12:16:04.000Z
|
2021-11-17T12:16:04.000Z
|
from flask_script import Manager, Server
from flask_migrate import Migrate, MigrateCommand
from app import create_app,db
from app.models import User
# Creating app instance
app = create_app('production')
manager = Manager(app)
manager.add_command('server',Server)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
@manager.shell
def make_shell_context():
return dict(app = app,db = db,User = User)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| 21.16129
| 51
| 0.737805
|
470d26d3ab2a3352ee9cf216094b5cc46d6a662a
| 5,992
|
py
|
Python
|
qiskit_aqua/components/initial_states/custom.py
|
gitcyberian/aqua
|
617bafe8654b49e80f7593e4f634e8662e1d3796
|
[
"Apache-2.0"
] | 1
|
2019-01-03T20:06:19.000Z
|
2019-01-03T20:06:19.000Z
|
qiskit_aqua/components/initial_states/custom.py
|
jodyburksphd/qiskit-aqua
|
d1050e3362276894b0e3442717f0f2a774a177b0
|
[
"Apache-2.0"
] | null | null | null |
qiskit_aqua/components/initial_states/custom.py
|
jodyburksphd/qiskit-aqua
|
d1050e3362276894b0e3442717f0f2a774a177b0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.circuit import CompositeGate
from qiskit.extensions.standard.ry import RYGate
from qiskit.extensions.standard.rz import RZGate
from qiskit.extensions.standard.cx import CnotGate
from qiskit.extensions.standard.u1 import U1Gate
from qiskit.extensions.standard.u3 import U3Gate
import numpy as np
from qiskit_aqua.components.initial_states import InitialState
class Custom(InitialState):
"""A custom initial state."""
CONFIGURATION = {
'name': 'CUSTOM',
'description': 'Custom initial state',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'custom_state_schema',
'type': 'object',
'properties': {
'state': {
'type': 'string',
'default': 'zero',
'oneOf': [
{'enum': ['zero', 'uniform', 'random']}
]
},
'state_vector': {
'type': ['array', 'null'],
"items": {
"type": "number"
},
'default': None
}
},
'additionalProperties': False
}
}
def __init__(self, num_qubits, state="zero", state_vector=None):
"""Constructor.
Args:
num_qubits (int): number of qubits
state (str): `zero`, `uniform` or `random`
state_vector: customized vector
"""
loc = locals().copy()
del loc['state_vector']
self.validate(loc)
super().__init__()
# since state_vector is a numpy array of complex numbers which aren't json valid,
# remove it from validation
self._num_qubits = num_qubits
self._state = state
size = np.power(2, self._num_qubits)
if state_vector is None:
if self._state == 'zero':
self._state_vector = np.array([1.0] + [0.0] * (size - 1))
elif self._state == 'uniform':
self._state_vector = np.array([1.0 / np.sqrt(size)] * size)
elif self._state == 'random':
self._state_vector = Custom._normalize(np.random.rand(size))
else:
raise ValueError('Unknown state {}'.format(self._state))
else:
if len(state_vector) != np.power(2, self._num_qubits):
raise ValueError('State vector length {} incompatible with num qubits {}'
.format(len(state_vector), self._num_qubits))
self._state_vector = Custom._normalize(state_vector)
self._state = None
@staticmethod
def _normalize(vector):
return vector / np.linalg.norm(vector)
@staticmethod
def _convert_to_basis_gates(gates):
if isinstance(gates, list):
return [Custom._convert_to_basis_gates(gate) for gate in gates]
elif isinstance(gates, CompositeGate):
gates_data = [Custom._convert_to_basis_gates(
gate) for gate in gates.data]
gates = CompositeGate(gates.name, gates.param,
gates.qargs, circuit=gates.circuit)
gates.data = gates_data
return gates
else:
if isinstance(gates, RYGate):
return U3Gate(gates.param[0], 0, 0, gates.qargs[0])
elif isinstance(gates, RZGate):
return U1Gate(gates.param[0], gates.qargs[0])
elif isinstance(gates, CnotGate):
return gates
else:
raise RuntimeError(
'Unexpected component {} from the initialization circuit.'.format(gates.qasm()))
def construct_circuit(self, mode, register=None):
"""
Construct the statevector of desired initial state.
Args:
mode (string): `vector` or `circuit`. The `vector` mode produces the vector.
While the `circuit` constructs the quantum circuit corresponding that
vector.
register (QuantumRegister): register for circuit construction.
Returns:
QuantumCircuit or numpy.ndarray: statevector.
Raises:
ValueError: when mode is not 'vector' or 'circuit'.
"""
if mode == 'vector':
return self._state_vector
elif mode == 'circuit':
if register is None:
register = QuantumRegister(self._num_qubits, name='q')
circuit = QuantumCircuit(register)
if self._state is None or self._state == 'random':
circuit.initialize(self._state_vector, [
register[i] for i in range(self._num_qubits)])
circuit.data = Custom._convert_to_basis_gates(circuit.data)
elif self._state == 'zero':
pass
elif self._state == 'uniform':
for i in range(self._num_qubits):
circuit.u2(0.0, np.pi, register[i])
else:
pass
return circuit
else:
raise ValueError('Mode should be either "vector" or "circuit"')
| 38.165605
| 100
| 0.556409
|
a9a6dcf2bdd5fe96e42fe8473f0fd5246a76710f
| 1,453
|
py
|
Python
|
prompt_toolkit/contrib/shell/layout.py
|
mfussenegger/python-prompt-toolkit
|
193c1f528ee69f4cdbc2dc710ebbaee8a3655814
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T02:41:57.000Z
|
2019-01-31T02:41:57.000Z
|
prompt_toolkit/contrib/shell/layout.py
|
Carreau/python-prompt-toolkit
|
e95726b6a8325a44a753d68a49ceb7d657ba3930
|
[
"BSD-3-Clause"
] | null | null | null |
prompt_toolkit/contrib/shell/layout.py
|
Carreau/python-prompt-toolkit
|
e95726b6a8325a44a753d68a49ceb7d657ba3930
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from pygments.token import Token
from .rules import TokenStream
from .lexer import lex_document
class CompletionHint(object):
def __init__(self, grammar):
self.grammar = grammar
def write(self, cli, screen):
if not (cli.is_exiting or cli.is_aborting or cli.is_returning):
screen.write_highlighted(self.tokens(cli))
def tokens(self, cli):
def _():
document = cli.line.document
parts, last_part_token = lex_document(document, only_before_cursor=False)
# Don't show help when you're in the middle of typing a 'token'.
# (Show after having typed the space, or at the start of the line.)
if not last_part_token.unescaped_text:
# Parse grammar
stream = TokenStream(parts)
trees = list(self.grammar.parse(stream))
# print (trees) ### debug
if len(trees) > 1:
yield (Token.Placeholder.Bracket, '[')
first = True
for tree in trees:
if not first:
yield (Token.Placeholder.Separator, '|')
first = False
for t in tree.get_help_tokens():
yield t
if len(trees) > 1:
yield (Token.Placeholder.Bracket, ']')
return list(_())
| 30.914894
| 85
| 0.549209
|
ccffec93d0c86349835a6f5512536dd65637e582
| 2,843
|
py
|
Python
|
misc/config_tools/service_vm_config/serial_config.py
|
jackwhich/acrn-hypervisor-1
|
2ff11c2ef04a2668979b3e363e25f13cf48376ac
|
[
"BSD-3-Clause"
] | null | null | null |
misc/config_tools/service_vm_config/serial_config.py
|
jackwhich/acrn-hypervisor-1
|
2ff11c2ef04a2668979b3e363e25f13cf48376ac
|
[
"BSD-3-Clause"
] | null | null | null |
misc/config_tools/service_vm_config/serial_config.py
|
jackwhich/acrn-hypervisor-1
|
2ff11c2ef04a2668979b3e363e25f13cf48376ac
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (C) 2021 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
import argparse
import lxml.etree
import common
#vuart devices name is configured to start from /dev/ttyS8
START_VUART_DEV_NAME_NO = 8
VUART_DEV_NAME_NUM = 8
stadard_uart_port = {'0x3F8', '0x2F8', '0x3E8', '0x2E8'}
UART_IRQ_BAUD = " irq 0 uart 16550A baud_base 115200"
def find_non_standard_uart(vm, scenario_etree, allocation_etree):
uart_list = []
vmname = common.get_node("./name/text()", vm)
connection_list0 = scenario_etree.xpath(f"//vuart_connection[endpoint/vm_name = '{vmname}']")
connection_list1 = allocation_etree.xpath(f"//vuart_connection[endpoint/vm_name = '{vmname}']")
for connection in (connection_list0 + connection_list1):
type = common.get_node(f"./type/text()", connection)
if (type != "legacy") :
continue
port = common.get_node(f".//endpoint[vm_name = '{vmname}']/io_port/text()", connection)
if port not in stadard_uart_port:
target_vm_name = common.get_node(f".//endpoint[vm_name != '{vmname}']/vm_name/text()", connection)
target_vm_id = common.get_node(f"//vm[name = '{target_vm_name}']/@id", scenario_etree)
uart_list.append({"io_port" : port, "target_vm_id" : target_vm_id})
return uart_list
def main(args):
"""
Generate serial configuration file for service VM
:param args: command line args
"""
scenario_etree = lxml.etree.parse(args.scenario)
allocation_etree = lxml.etree.parse(args.allocation)
vuart_target_vmid = {}
vm_list = scenario_etree.xpath("//vm[load_order = 'SERVICE_VM']")
for vm in vm_list:
vuart_list = find_non_standard_uart(vm, scenario_etree, allocation_etree)
vmname = common.get_node("./name/text()", vm)
if len(vuart_list) != 0:
with open(args.out, "w+") as config_f:
for uart_start_num, vuart in enumerate(vuart_list, start=START_VUART_DEV_NAME_NO):
base = " port " + vuart["io_port"]
vm_id_note = "# User_VM_id: " + str(vuart["target_vm_id"])+ '\n'
config_f.write(vm_id_note)
conf = "/dev/ttyS" + str(uart_start_num) + base + UART_IRQ_BAUD + '\n'
config_f.write(conf)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--allocation", help="the XML file summarizing resource allocated by config tool")
parser.add_argument("--scenario", help="the XML file specifying the scenario to be set up")
parser.add_argument("--out", help="location of the output serial configuration file")
args = parser.parse_args()
main(args)
| 42.432836
| 110
| 0.663384
|
e538f89361def811e56e17fd0b1bb6df348acb2e
| 818
|
py
|
Python
|
flexpart_alto/scratch/Untitled.py
|
daliagachc/flexpart-alto
|
2663fad47239989bb4aff0e2058bfd62e2489d07
|
[
"Xnet",
"X11"
] | null | null | null |
flexpart_alto/scratch/Untitled.py
|
daliagachc/flexpart-alto
|
2663fad47239989bb4aff0e2058bfd62e2489d07
|
[
"Xnet",
"X11"
] | null | null | null |
flexpart_alto/scratch/Untitled.py
|
daliagachc/flexpart-alto
|
2663fad47239989bb4aff0e2058bfd62e2489d07
|
[
"Xnet",
"X11"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.7.0-rc1
# kernelspec:
# display_name: Python [conda env:b36]
# language: python
# name: conda-env-b36-py
# ---
# %%
from useful_scit.imps2.defs import *
# %%
path = '../resources/wrf-flexpart-required-fields.xlsx'
# %%
df = pd.read_excel(path)
# %%
b = df['used'] == 1
df1 = df[b]
# %%
df1.head()
# %%
wrf_path = '/Volumes/Transcend/Downloads/wrfout_d01_2017-12-21_20:00:00'
ds = xr.open_dataset(wrf_path)
# %%
WF = 'WRF variable'
va = df1[WF].values
# %%
for v in var:
ds[va]
# %%
ds1 = ds[va]
# %%
za.compressed_netcdf_save(ds1,'/tmp/w0.nc',complevel=4)
za.compressed_netcdf_save(ds,'/tmp/wF.nc')
# %%
45/70
# %%
| 14.872727
| 72
| 0.601467
|
aaf6cf992782cf91f0848810b2fad985bfd2deb7
| 6,611
|
py
|
Python
|
hysds/user_rules_job.py
|
fgreg/hysds
|
74a1019665b02f0f475cc4e7fc0a993dd71d7a53
|
[
"Apache-2.0"
] | null | null | null |
hysds/user_rules_job.py
|
fgreg/hysds
|
74a1019665b02f0f475cc4e7fc0a993dd71d7a53
|
[
"Apache-2.0"
] | null | null | null |
hysds/user_rules_job.py
|
fgreg/hysds
|
74a1019665b02f0f475cc4e7fc0a993dd71d7a53
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import os
import sys
import json
import requests
import time
import backoff
import socket
import traceback
import hysds
from hysds.celery import app
from hysds.log_utils import logger, backoff_max_tries, backoff_max_value
@backoff.on_exception(backoff.expo,
Exception,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def ensure_job_indexed(job_id, es_url, alias):
"""Ensure job is indexed."""
query = {
"query": {
"bool": {
"must": [
{'term': {'_id': job_id}}
]
}
},
"fields": [],
}
logger.info("ensure_job_indexed query: %s" % json.dumps(query, indent=2))
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, alias)
else:
search_url = '%s/%s/_search' % (es_url, alias)
logger.info("ensure_job_indexed url: %s" % search_url)
r = requests.post(search_url, data=json.dumps(query))
logger.info("ensure_job_indexed status: %s" % r.status_code)
r.raise_for_status()
result = r.json()
logger.info("ensure_job_indexed result: %s" % json.dumps(result, indent=2))
total = result['hits']['total']
if total == 0:
raise RuntimeError("Failed to find indexed job: {}".format(job_id))
def get_job(job_id, rule, result):
"""Return generic json job configuration."""
priority = rule.get('priority', 0)
return {
"job_type": "job:%s" % rule['job_type'],
"priority": priority,
"payload": {
"job_id": job_id,
"rule": rule,
"rule_hit": result,
}
}
def update_query(job_id, rule):
"""Update final query."""
# build query
query = rule['query']
# filters
filts = []
# query all?
if rule.get('query_all', False) is False:
filts.append({'ids': {'values': [job_id]}})
# build final query
if 'filtered' in query:
final_query = copy.deepcopy(query)
if 'and' in query['filtered']['filter']:
final_query['filtered']['filter']['and'].extend(filts)
else:
filts.append(final_query['filtered']['filter'])
final_query['filtered']['filter'] = {
'and': filts,
}
else:
final_query = {
'filtered': {
'query': query,
'filter': {
'and': filts,
}
}
}
final_query = {"query": final_query}
logger.info("Final query: %s" % json.dumps(final_query, indent=2))
rule['query'] = final_query
rule['query_string'] = json.dumps(final_query)
def evaluate_user_rules_job(job_id, es_url=app.conf.JOBS_ES_URL,
alias=app.conf.STATUS_ALIAS,
user_rules_idx=app.conf.USER_RULES_JOB_INDEX,
job_queue=app.conf.JOBS_PROCESSED_QUEUE):
"""Process all user rules in ES database and check if this job ID matches.
If so, submit jobs. Otherwise do nothing."""
# sleep 10 seconds to allow ES documents to be indexed
time.sleep(10)
# ensure job is indexed
ensure_job_indexed(job_id, es_url, alias)
# get all enabled user rules
query = {"query": {"term": {"enabled": True}}}
r = requests.post('%s/%s/.percolator/_search?search_type=scan&scroll=10m&size=100' %
(es_url, user_rules_idx), data=json.dumps(query))
r.raise_for_status()
scan_result = r.json()
count = scan_result['hits']['total']
scroll_id = scan_result['_scroll_id']
rules = []
while True:
r = requests.post('%s/_search/scroll?scroll=10m' %
es_url, data=scroll_id)
res = r.json()
scroll_id = res['_scroll_id']
if len(res['hits']['hits']) == 0:
break
for hit in res['hits']['hits']:
rules.append(hit['_source'])
logger.info("Got %d enabled rules to check." % len(rules))
# process rules
for rule in rules:
# sleep between queries
time.sleep(1)
# check for matching rules
update_query(job_id, rule)
final_qs = rule['query_string']
try:
r = requests.post('%s/job_status-current/job/_search' %
es_url, data=final_qs)
r.raise_for_status()
except:
logger.error("Failed to query ES. Got status code %d:\n%s" %
(r.status_code, traceback.format_exc()))
continue
result = r.json()
if result['hits']['total'] == 0:
logger.info("Rule '%s' didn't match for %s" %
(rule['rule_name'], job_id))
continue
else:
doc_res = result['hits']['hits'][0]
logger.info("Rule '%s' successfully matched for %s" %
(rule['rule_name'], job_id))
#logger.info("doc_res: %s" % json.dumps(doc_res, indent=2))
# submit trigger task
queue_job_trigger(doc_res, rule, es_url)
logger.info("Trigger task submitted for %s: %s" %
(job_id, rule['job_type']))
return True
@backoff.on_exception(backoff.expo,
socket.error,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def queue_finished_job(id):
"""Queue job id for user_rules_job evaluation."""
payload = {
'type': 'user_rules_job',
'function': 'hysds.user_rules_job.evaluate_user_rules_job',
'args': [id],
}
hysds.task_worker.run_task.apply_async((payload,),
queue=app.conf.USER_RULES_JOB_QUEUE)
@backoff.on_exception(backoff.expo,
socket.error,
max_tries=backoff_max_tries,
max_value=backoff_max_value)
def queue_job_trigger(doc_res, rule, es_url):
"""Trigger job rule execution."""
payload = {
'type': 'user_rules_trigger',
'function': 'hysds_commons.job_utils.submit_mozart_job',
'args': [doc_res, rule],
'kwargs': {'es_hysdsio_url': es_url},
}
hysds.task_worker.run_task.apply_async((payload,),
queue=app.conf.USER_RULES_TRIGGER_QUEUE)
| 31.783654
| 88
| 0.566329
|
67e3a0a298ed87092953c78d658a1b0a70a6a974
| 3,412
|
py
|
Python
|
src/vbs_to_js/vbs_checker.py
|
ITh4cker/auto_tools
|
8e0e00cdf8bf60ee3f26fa5ae8f18c376298d0aa
|
[
"Apache-2.0"
] | null | null | null |
src/vbs_to_js/vbs_checker.py
|
ITh4cker/auto_tools
|
8e0e00cdf8bf60ee3f26fa5ae8f18c376298d0aa
|
[
"Apache-2.0"
] | null | null | null |
src/vbs_to_js/vbs_checker.py
|
ITh4cker/auto_tools
|
8e0e00cdf8bf60ee3f26fa5ae8f18c376298d0aa
|
[
"Apache-2.0"
] | 1
|
2021-06-16T07:32:17.000Z
|
2021-06-16T07:32:17.000Z
|
# auther = Cheng Chang(SA)
# Date = 2016/12/16
import os
import sys
import shutil
from vbs2js import VBSConverter
sys.path.append("..\..")
import third_party.wrappers.SALineup_python.pysal as SA
def get_parent_path(path, grade):
if grade > 0 and path.count('\\') >= grade:
l = path.split('\\')
return '\\'.join(l[:0-grade])
else:
return path
class VBSChecker:
"""
"""
def __init__(self, salWrapper):
self.root_path_ = os.path.split(os.path.realpath(__file__))[0]
self.sal_ = salWrapper
self.sal_opt_args_ = '--productname=sc --script-malware=true --loglevel=all '
project_dir = get_parent_path(self.root_path_, 2)
self.sal_path_ = os.path.join(project_dir, 'third_party', 'wrappers', 'SALineup_python')
self.js_path_ = os.path.join(self.root_path_, 'js')
def check_env(self):
return True
def clear_env(self):
# clean up js folder
if os.path.exists('js'):
shutil.rmtree('js')
os.mkdir('js')
# clean up result folder
if os.path.exists('behavior'):
shutil.rmtree('behavior')
def process_dir(self, dir_path):
for root, dirs, files in os.walk(dir_path):
for name in files:
file_path = os.path.join(root, name)
if name.endswith('.vbs'):
js_file_path = os.path.join(self.js_path_, name + '.js')
converter = VBSConverter(file_path, js_file_path)
converter.convert()
else:
new_js_name = os.path.join(self.js_path_,name)
shutil.copy2(file_path, new_js_name)
self.process_internal(self.js_path_)
def process_file(self, file_path):
prefix_path, file_name = os.path.split(file_path)
file_name_without_ext, ext = os.path.splitext(file_name)
# vbs2js
if ext == '.vbs':
js_path = os.path.join(self.js_path_, file_name + '.js')
converter = VBSConverter(file_path, js_path)
converter.convert()
else:
js_path = file_path
self.process_internal(js_path)
def process_internal(self, js_path):
# sal
self.sal_.process(self.sal_opt_args_+ js_path)
# move behavior to the current path
behavior_path = os.path.join(self.sal_path_, 'result')
new_behavior_path = os.path.join(self.root_path_, 'behavior')
shutil.move(behavior_path, new_behavior_path)
self.dump_log()
def dump_log(self):
log_file = os.path.join(self.root_path_, 'log.txt')
log = open(log_file, 'w')
log_content = ''.join(self.sal_.output_)
log.write(log_content)
log.close()
def print_help():
print """
Usage:
python vbs_checker.py vbs_file/dir
"""
def main():
if len(sys.argv) != 2:
print_help()
exit(0)
salWrapper = SA.PySalHelper()
vbsChecker = VBSChecker(salWrapper)
if not vbsChecker.check_env():
exit(-1)
vbsChecker.clear_env()
if os.path.isfile(sys.argv[1]):
vbsChecker.process_file(sys.argv[1])
elif os.path.isdir(sys.argv[1]):
vbsChecker.process_dir(sys.argv[1])
if __name__ == '__main__':
main()
| 32.188679
| 97
| 0.580598
|
22514f28481b5364873fea007c702466c338d2d4
| 11,522
|
py
|
Python
|
greentest/test__pool.py
|
bkad/gevent
|
185b71cc472db413515059ab4a197207cdaf1f6c
|
[
"MIT"
] | 2
|
2015-12-19T01:34:43.000Z
|
2018-02-02T12:32:01.000Z
|
greentest/test__pool.py
|
alex/gevent
|
454a77ca561868854760b2d9cbfa3bf3bbd2e062
|
[
"MIT"
] | null | null | null |
greentest/test__pool.py
|
alex/gevent
|
454a77ca561868854760b2d9cbfa3bf3bbd2e062
|
[
"MIT"
] | 2
|
2019-11-24T12:11:50.000Z
|
2020-12-26T19:00:20.000Z
|
from __future__ import with_statement
from time import time
import gevent
from gevent import pool
from gevent.event import Event
import greentest
import random
from greentest import ExpectedException
import six
import unittest
class TestCoroutinePool(unittest.TestCase):
klass = pool.Pool
def test_apply_async(self):
done = Event()
def some_work(x):
done.set()
pool = self.klass(2)
pool.apply_async(some_work, ('x', ))
done.wait()
def test_apply(self):
value = 'return value'
def some_work():
return value
pool = self.klass(2)
result = pool.apply(some_work)
self.assertEqual(value, result)
def test_multiple_coros(self):
evt = Event()
results = []
def producer():
gevent.sleep(0.001)
results.append('prod')
evt.set()
def consumer():
results.append('cons1')
evt.wait()
results.append('cons2')
pool = self.klass(2)
done = pool.spawn(consumer)
pool.apply_async(producer)
done.get()
self.assertEqual(['cons1', 'prod', 'cons2'], results)
def dont_test_timer_cancel(self):
timer_fired = []
def fire_timer():
timer_fired.append(True)
def some_work():
gevent.timer(0, fire_timer)
pool = self.klass(2)
pool.apply(some_work)
gevent.sleep(0)
self.assertEqual(timer_fired, [])
def test_reentrant(self):
pool = self.klass(1)
result = pool.apply(pool.apply, (lambda a: a + 1, (5, )))
self.assertEqual(result, 6)
evt = Event()
pool.apply_async(evt.set)
evt.wait()
def test_stderr_raising(self):
# testing that really egregious errors in the error handling code
# (that prints tracebacks to stderr) don't cause the pool to lose
# any members
import sys
pool = self.klass(size=1)
# we're going to do this by causing the traceback.print_exc in
# safe_apply to raise an exception and thus exit _main_loop
normal_err = sys.stderr
try:
sys.stderr = FakeFile()
waiter = pool.spawn(crash)
with gevent.Timeout(2):
self.assertRaises(RuntimeError, waiter.get)
# the pool should have something free at this point since the
# waiter returned
# pool.Pool change: if an exception is raised during execution of a link,
# the rest of the links are scheduled to be executed on the next hub iteration
# this introduces a delay in updating pool.sem which makes pool.free_count() report 0
# therefore, sleep:
gevent.sleep(0)
self.assertEqual(pool.free_count(), 1)
# shouldn't block when trying to get
t = gevent.Timeout.start_new(0.1)
try:
pool.apply(gevent.sleep, (0, ))
finally:
t.cancel()
finally:
sys.stderr = normal_err
pool.join()
def crash(*args, **kw):
raise RuntimeError("Whoa")
class FakeFile(object):
def write(*args):
raise RuntimeError('Whaaa')
class PoolBasicTests(greentest.TestCase):
klass = pool.Pool
def test_execute_async(self):
p = self.klass(size=2)
self.assertEqual(p.free_count(), 2)
r = []
first = p.spawn(r.append, 1)
self.assertEqual(p.free_count(), 1)
first.get()
self.assertEqual(r, [1])
gevent.sleep(0)
self.assertEqual(p.free_count(), 2)
#Once the pool is exhausted, calling an execute forces a yield.
p.apply_async(r.append, (2, ))
self.assertEqual(1, p.free_count())
self.assertEqual(r, [1])
p.apply_async(r.append, (3, ))
self.assertEqual(0, p.free_count())
self.assertEqual(r, [1])
p.apply_async(r.append, (4, ))
self.assertEqual(r, [1])
gevent.sleep(0.01)
self.assertEqual(sorted(r), [1, 2, 3, 4])
def test_discard(self):
p = self.klass(size=1)
first = p.spawn(gevent.sleep, 1000)
p.discard(first)
first.kill()
assert not first, first
self.assertEqual(len(p), 0)
self.assertEqual(p._semaphore.counter, 1)
def test_add_method(self):
p = self.klass(size=1)
first = gevent.spawn(gevent.sleep, 1000)
try:
second = gevent.spawn(gevent.sleep, 1000)
try:
self.assertEqual(p.free_count(), 1)
self.assertEqual(len(p), 0)
p.add(first)
timeout = gevent.Timeout(0.1)
timeout.start()
try:
p.add(second)
except gevent.Timeout:
pass
else:
raise AssertionError('Expected timeout')
finally:
timeout.cancel()
self.assertEqual(p.free_count(), 0)
self.assertEqual(len(p), 1)
finally:
second.kill()
finally:
first.kill()
def test_apply(self):
p = self.klass()
result = p.apply(lambda a: ('foo', a), (1, ))
self.assertEqual(result, ('foo', 1))
def test_init_error(self):
self.switch_expected = False
self.assertRaises(ValueError, self.klass, -1)
#
# tests from standard library test/test_multiprocessing.py
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time() - t
def sqr(x, wait=0.0):
gevent.sleep(wait)
return x * x
def squared(x):
return x * x
def sqr_random_sleep(x):
gevent.sleep(random.random() * 0.1)
return x * x
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.082, 0.035, 0.14
class TestPool(greentest.TestCase):
__timeout__ = 5
size = 1
def setUp(self):
greentest.TestCase.setUp(self)
self.pool = pool.Pool(self.size)
def cleanup(self):
self.pool.join()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), 25)
self.assertEqual(papply(sqr, (), {'x': 3}), 9)
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), list(map(squared, range(10))))
self.assertEqual(pmap(sqr, range(100)), list(map(squared, range(100))))
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertAlmostEqual(get.elapsed, TIMEOUT1, 1)
def test_async_callback(self):
result = []
res = self.pool.apply_async(sqr, (7, TIMEOUT1,), callback=lambda x: result.append(x))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertAlmostEqual(get.elapsed, TIMEOUT1, 1)
gevent.sleep(0) # let's the callback run
assert result == [49], result
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(gevent.Timeout, get, timeout=TIMEOUT2)
self.assertAlmostEqual(get.elapsed, TIMEOUT2, 1)
self.pool.join()
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), list(map(squared, range(10))))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(six.advance_iterator(it), i * i)
self.assertRaises(StopIteration, lambda: six.advance_iterator(it))
it = self.pool.imap(sqr, range(1000))
for i in range(1000):
self.assertEqual(six.advance_iterator(it), i * i)
self.assertRaises(StopIteration, lambda: six.advance_iterator(it))
def test_imap_random(self):
it = self.pool.imap(sqr_random_sleep, range(10))
self.assertEqual(list(it), list(map(squared, range(10))))
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), list(map(squared, range(1000))))
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), list(map(squared, range(1000))))
def test_imap_unordered_random(self):
it = self.pool.imap_unordered(sqr_random_sleep, range(10))
self.assertEqual(sorted(it), list(map(squared, range(10))))
def test_empty(self):
it = self.pool.imap_unordered(sqr, [])
self.assertEqual(list(it), [])
it = self.pool.imap(sqr, [])
self.assertEqual(list(it), [])
self.assertEqual(self.pool.map(sqr, []), [])
def test_terminate(self):
result = self.pool.map_async(gevent.sleep, [0.1] * ((self.size or 10) * 2))
gevent.sleep(0.1)
kill = TimingWrapper(self.pool.kill)
kill()
assert kill.elapsed < 0.5, kill.elapsed
result.join()
def sleep(self, x):
gevent.sleep(float(x) / 10.)
return str(x)
def test_imap_unordered_sleep(self):
# testing that imap_unordered returns items in competion order
result = list(self.pool.imap_unordered(self.sleep, [10, 1, 2]))
if self.pool.size == 1:
expected = ['10', '1', '2']
else:
expected = ['1', '2', '10']
self.assertEqual(result, expected)
class TestPool2(TestPool):
size = 2
class TestPool3(TestPool):
size = 3
class TestPool10(TestPool):
size = 10
class TestPoolUnlimit(TestPool):
size = None
class TestJoinSleep(greentest.GenericWaitTestCase):
def wait(self, timeout):
p = pool.Pool()
g = p.spawn(gevent.sleep, 10)
try:
p.join(timeout=timeout)
finally:
g.kill()
class TestJoinSleep_raise_error(greentest.GenericWaitTestCase):
def wait(self, timeout):
p = pool.Pool()
g = p.spawn(gevent.sleep, 10)
try:
p.join(timeout=timeout, raise_error=True)
finally:
g.kill()
class TestJoinEmpty(greentest.TestCase):
switch_expected = False
def test(self):
p = pool.Pool()
p.join()
class TestSpawn(greentest.TestCase):
switch_expected = True
def test(self):
p = pool.Pool(1)
self.assertEqual(len(p), 0)
p.spawn(gevent.sleep, 0.1)
self.assertEqual(len(p), 1)
p.spawn(gevent.sleep, 0.1) # this spawn blocks until the old one finishes
self.assertEqual(len(p), 1)
gevent.sleep(0.19)
self.assertEqual(len(p), 0)
def error_iter():
yield 1
yield 2
raise ExpectedException
class TestErrorInIterator(greentest.TestCase):
error_fatal = False
def test(self):
p = pool.Pool(3)
self.assertRaises(ExpectedException, p.map, lambda x: None, error_iter())
gevent.sleep(0.001)
def test_unordered(self):
p = pool.Pool(3)
def unordered():
return list(p.imap_unordered(lambda x: None, error_iter()))
self.assertRaises(ExpectedException, unordered)
gevent.sleep(0.001)
if __name__ == '__main__':
greentest.main()
| 27.368171
| 97
| 0.582104
|
7d6bcf098a527eb8f141ad2155193f3ead334ad7
| 23,788
|
py
|
Python
|
src/pitchly/pitch_control.py
|
opunsoars/pitchly
|
ad3bba4ab7ce1f2dc5cb6d184aac14a487d20056
|
[
"MIT"
] | 7
|
2021-04-12T18:28:44.000Z
|
2021-12-24T06:13:34.000Z
|
src/pitchly/pitch_control.py
|
opunsoars/pitchly
|
ad3bba4ab7ce1f2dc5cb6d184aac14a487d20056
|
[
"MIT"
] | 1
|
2021-08-11T13:58:38.000Z
|
2021-08-11T15:06:23.000Z
|
src/pitchly/pitch_control.py
|
opunsoars/pitchly
|
ad3bba4ab7ce1f2dc5cb6d184aac14a487d20056
|
[
"MIT"
] | 1
|
2021-08-11T13:21:37.000Z
|
2021-08-11T13:21:37.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# this file is modified from https://github.com/Friends-of-Tracking-Data-FoTD/LaurieOnTracking/blob/master/Metrica_PitchControl.py
# original author: Laurie Shaw (research work developed by William Spearman)
import numpy as np
"""
Created on Mon Apr 19 14:52:19 2020
Module for calculating a Pitch Control surface using MetricaSports's tracking & event data.
Pitch control (at a given location on the field) is the probability that a team will gain
possession if the ball is moved to that location on the field.
Methdology is described in "Off the ball scoring opportunities" by William Spearman:
http://www.sloansportsconference.com/wp-content/uploads/2018/02/2002.pdf
GitHub repo for this code can be found here:
https://github.com/Friends-of-Tracking-Data-FoTD/LaurieOnTracking
Data can be found at: https://github.com/metrica-sports/sample-data
Functions
----------
calculate_pitch_control_at_target(): calculate the pitch control probability for the attacking
and defending teams at a specified target position on the ball.
generate_pitch_control_for_event(): this function evaluates pitch control surface over the
entire field at the moment
of the given event (determined by the index of the event passed as an input)
Classes
---------
The 'player' class collects and stores trajectory information for each player required by the
pitch control calculations.
@author: Laurie Shaw (@EightyFivePoint)
Modified for pitchly by @author: Vinay Warrier (@opunsoars)
"""
def initialise_players(frame_data, params):
"""
initialise_players(team,teamname,params)
create a list of player objects that holds their positions and velocities from the tracking data dataframe
Parameters
-----------
team: row (i.e. instant) of either the home or away team tracking Dataframe
teamname: team name "Home" or "Away"
params: Dictionary of model parameters (default model parameters can be generated using default_model_params() )
Returns
-----------
team_players: list of player objects for the team at at given instant
"""
# get player ids
player_ids = np.unique([x.split("_")[0] for x in frame_data.keys()])
# create list
team_players = []
for p in player_ids:
# create a player object for player_id 'p'
team_player = player(p, frame_data, params)
if team_player.inframe:
team_players.append(team_player)
return team_players
class player(object):
"""
player() class
Class defining a player object that stores position, velocity, time-to-intercept and pitch control contributions for a player
__init__ Parameters
-----------
pid: id (jersey number) of player
team: row of tracking data for team
teamname: team name "Home" or "Away"
params: Dictionary of model parameters (default model parameters can be generated using default_model_params() )
methods include:
-----------
simple_time_to_intercept(r_final): time take for player to get to target position (r_final) given current position
probability_intercept_ball(T): probability player will have controlled ball at time T given their expected time_to_intercept
"""
# player object holds position, velocity, time-to-intercept and pitch control contributions for each player
def __init__(self, pid, frame_data, params):
self.id = pid
# player max speed in m/s. Could be individualised
self.vmax = params["max_player_speed"]
# player reaction time in 's'. Could be individualised
self.reaction_time = params["reaction_time"]
# standard deviation of sigmoid function (see Eq 4 in Spearman, 2018)
self.tti_sigma = params["tti_sigma"]
self.get_position(frame_data)
self.get_velocity(frame_data)
self.PPCF = 0.0 # initialise this for later
def get_position(self, frame_data):
self.position = np.array(
[frame_data[f"{self.id}_x"], frame_data[f"{self.id}_y"]]
)
self.inframe = not np.any(np.isnan(self.position))
def get_velocity(self, frame_data):
self.velocity = np.array(
[frame_data[f"{self.id}_vx"], frame_data[f"{self.id}_vy"]]
)
if np.any(np.isnan(self.velocity)):
self.velocity = np.array([0.0, 0.0])
def simple_time_to_intercept(self, r_final):
self.PPCF = 0.0 # initialise this for later
# Time to intercept assumes that the player continues moving at current velocity for 'reaction_time' seconds
# and then runs at full speed to the target position.
r_reaction = self.position + self.velocity * self.reaction_time
self.time_to_intercept = (
self.reaction_time
+ np.linalg.norm(r_final - r_reaction) / self.vmax
)
return self.time_to_intercept
def probability_intercept_ball(self, T):
# probability of a player arriving at target location at time 'T' given their expected
# time_to_intercept (time of arrival), as described in Spearman 2018
f = 1 / (
1.0
+ np.exp(
-np.pi
/ np.sqrt(3.0)
/ self.tti_sigma
* (T - self.time_to_intercept)
)
)
return f
""" Generate pitch control map """
def default_model_params(time_to_control_veto=3):
"""
default_model_params()
Returns the default parameters that define and evaluate the model.
See Spearman 2018 for more details.
Parameters
-----------
time_to_control_veto: If the probability that another team or player can get
to the ball and control it is less than 10^-time_to_control_veto,
ignore that player.
Returns
-----------
params: dictionary of parameters required to determine and calculate the model
"""
# key parameters for the model, as described in Spearman 2018
params = {}
# model parameters
params[
"max_player_accel"
] = 7.0 # maximum player acceleration m/s/s, not used in this implementation
params["max_player_speed"] = 5.0 # maximum player speed m/s
# seconds, time taken for player to react and change trajectory. Roughly
# determined as vmax/amax
params["reaction_time"] = 0.7
# Standard deviation of sigmoid function in Spearman 2018 ('s') that
# determines uncertainty in player arrival time
params["tti_sigma"] = 0.45
# kappa parameter in Spearman 2018 (=1.72 in the paper) that gives the
# advantage defending players to control ball, I have set to 1 so that
# home & away players have same ball control probability
params["kappa_def"] = 1.0
params["lambda_att"] = 4.3 # ball control parameter for attacking team
# ball control parameter for defending team
params["lambda_def"] = 4.3 * params["kappa_def"]
params["average_ball_speed"] = 15.0 # average ball travel speed in m/s
# numerical parameters for model evaluation
params["int_dt"] = 0.04 # integration timestep (dt)
params["max_int_time"] = 10 # upper limit on integral time
# assume convergence when PPCF>0.99 at a given location.
params["model_converge_tol"] = 0.01
# The following are 'short-cut' parameters. We do not need to calculated
# PPCF explicitly when a player has a sufficient head start.
# A sufficient head start is when the a player arrives at the target
# location at least 'time_to_control' seconds before the next player
params["time_to_control_att"] = (
time_to_control_veto
* np.log(10)
* (np.sqrt(3) * params["tti_sigma"] / np.pi + 1 / params["lambda_att"])
)
params["time_to_control_def"] = (
time_to_control_veto
* np.log(10)
* (np.sqrt(3) * params["tti_sigma"] / np.pi + 1 / params["lambda_def"])
)
return params
def generate_pitch_control_for_event(
event_id,
events,
tracking_home,
tracking_away,
params,
field_dimen=(106.0, 68.0),
n_grid_cells_x=50,
):
"""generate_pitch_control_for_event
Evaluates pitch control surface over the entire field at the moment of the
given event (determined by the index of the event passed as an input)
Parameters
-----------
event_id: Index (not row) of the event that describes the instant at
which the pitch control surface should be calculated
events: Dataframe containing the event data
tracking_home: tracking DataFrame for the Home team
tracking_away: tracking DataFrame for the Away team
params: Dictionary of model parameters (default model parameters can be
generated using default_model_params() )
field_dimen: tuple containing the length and width of the pitch in
meters. Default is (106,68)
n_grid_cells_x: Number of pixels in the grid (in the x-direction) that
covers the surface. Default is 50.
n_grid_cells_y will be calculated based on n_grid_cells_x
and the field dimensions
Returrns
-----------
PPCFa: Pitch control surface (dimen (n_grid_cells_x,n_grid_cells_y) )
containing pitch control probability for the attcking team.
Surface for the defending team is just 1-PPCFa.
xgrid: Positions of the pixels in the x-direction (field length)
ygrid: Positions of the pixels in the y-direction (field width)
"""
# get the details of the event (frame, team in possession, ball_start_position)
pass_frame = events.loc[event_id]["Start Frame"]
pass_team = events.loc[event_id].Team
# print(pass_team)
ball_start_pos = np.array(
[events.loc[event_id]["Start X"], events.loc[event_id]["Start Y"]]
)
# break the pitch down into a grid
n_grid_cells_y = int(n_grid_cells_x * field_dimen[1] / field_dimen[0])
xgrid = np.linspace(
-field_dimen[0] / 2.0, field_dimen[0] / 2.0, n_grid_cells_x
)
ygrid = np.linspace(
-field_dimen[1] / 2.0, field_dimen[1] / 2.0, n_grid_cells_y
)
# initialise pitch control grids for attacking and defending teams
PPCFa = np.zeros(shape=(len(ygrid), len(xgrid)))
PPCFd = np.zeros(shape=(len(ygrid), len(xgrid)))
# initialise player positions and velocities for pitch control calc (so that
# we're not repeating this at each grid cell position)
if pass_team == "Home":
attacking_players = initialise_players(
tracking_home.loc[pass_frame], "Home", params
)
defending_players = initialise_players(
tracking_away.loc[pass_frame], "Away", params
)
elif pass_team == "Away":
defending_players = initialise_players(
tracking_home.loc[pass_frame], "Home", params
)
attacking_players = initialise_players(
tracking_away.loc[pass_frame], "Away", params
)
else:
assert False, "Team in possession must be either home or away"
# calculate pitch pitch control model at each location on the pitch
for i in range(len(ygrid)):
for j in range(len(xgrid)):
target_position = np.array([xgrid[j], ygrid[i]])
PPCFa[i, j], PPCFd[i, j] = calculate_pitch_control_at_target(
target_position,
attacking_players,
defending_players,
ball_start_pos,
params,
)
# check probabilitiy sums within convergence
checksum = np.sum(PPCFa + PPCFd) / float(n_grid_cells_y * n_grid_cells_x)
assert (
1 - checksum < params["model_converge_tol"]
), "Checksum failed: %1.3f" % (1 - checksum)
return PPCFa, xgrid, ygrid
def calculate_pitch_control_at_target(
target_position,
attacking_players,
defending_players,
ball_start_pos,
params=default_model_params(),
return_individual=False,
):
"""calculate_pitch_control_at_target
Calculates the pitch control probability for the attacking and defending
teams at a specified target position on the ball.
Parameters
-----------
target_position: size 2 numpy array containing the (x,y) position of the
position on the field to evaluate pitch control
attacking_players: list of 'player' objects (see player class above) for
the players on the attacking team (team in possession)
defending_players: list of 'player' objects (see player class above) for
the players on the defending team
ball_start_pos: Current position of the ball (start position for a pass).
If set to NaN, function will assume that the ball is
already at the target position.
params: Dictionary of model parameters (default model parameters can be
generated using default_model_params() )
Returrns
-----------
PPCFatt: Pitch control probability for the attacking team
PPCFdef: Pitch control probability for the defending team
( 1-PPCFatt-PPCFdef < params['model_converge_tol'] )
"""
# calculate ball travel time from start position to end position.
# assume that ball is already at location
if ball_start_pos is None or any(np.isnan(ball_start_pos)):
ball_travel_time = 0.0
else:
# ball travel time is distance to target position from current ball
# position divided assumed average ball speed
ball_travel_time = (
np.linalg.norm(target_position - ball_start_pos)
/ params["average_ball_speed"]
)
# first get arrival time of 'nearest' attacking player (nearest also
# dependent on current velocity)
tau_min_att = np.nanmin(
[p.simple_time_to_intercept(target_position) for p in attacking_players]
)
tau_min_def = np.nanmin(
[p.simple_time_to_intercept(target_position) for p in defending_players]
)
# check whether we actually need to solve equation 3
if (
tau_min_att - max(ball_travel_time, tau_min_def)
>= params["time_to_control_def"]
):
# if defending team can arrive significantly before attacking team,
# no need to solve pitch control model
return 0.0, 1.0
elif (
tau_min_def - max(ball_travel_time, tau_min_att)
>= params["time_to_control_att"]
):
# if attacking team can arrive significantly before defending team,
# no need to solve pitch control model
return 1.0, 0.0
else:
# solve pitch control model by integrating equation 3 in Spearman et al.
# first remove any player that is far (in time) from the target location
attacking_players = [
p
for p in attacking_players
if p.time_to_intercept - tau_min_att < params["time_to_control_att"]
]
defending_players = [
p
for p in defending_players
if p.time_to_intercept - tau_min_def < params["time_to_control_def"]
]
# set up integration arrays
dT_array = np.arange(
ball_travel_time - params["int_dt"],
ball_travel_time + params["max_int_time"],
params["int_dt"],
)
PPCFatt = np.zeros_like(dT_array)
PPCFdef = np.zeros_like(dT_array)
# integration equation 3 of Spearman 2018 until convergence or tolerance
# limit hit (see 'params')
ptot = 0.0
i = 1
Patt = {}
Pdef = {}
while 1 - ptot > params["model_converge_tol"] and i < dT_array.size:
T = dT_array[i]
for player in attacking_players:
# calculate ball control probablity for 'player' in time interval
# T+dt
dPPCFdT = (
(1 - PPCFatt[i - 1] - PPCFdef[i - 1])
* player.probability_intercept_ball(T)
* params["lambda_att"]
)
# make sure it's greater than zero
assert (
dPPCFdT >= 0
), "Invalid attacking player probability \
(calculate_pitch_control_at_target)"
# total contribution from individual player
player.PPCF += dPPCFdT * params["int_dt"]
# add to sum over players in the attacking team (remembering
# array element is zero at the start of each integration iteration)
PPCFatt[i] += player.PPCF
Patt[player.id] = player.PPCF
for player in defending_players:
# calculate ball control probablity for 'player' in time interval
# T+dt
dPPCFdT = (
(1 - PPCFatt[i - 1] - PPCFdef[i - 1])
* player.probability_intercept_ball(T)
* params["lambda_def"]
)
# make sure it's greater than zero
assert (
dPPCFdT >= 0
), "Invalid defending player probability \
(calculate_pitch_control_at_target)"
# total contribution from individual player
player.PPCF += dPPCFdT * params["int_dt"]
# add to sum over players in the defending team
PPCFdef[i] += player.PPCF
Pdef[player.id] = player.PPCF
ptot = PPCFdef[i] + PPCFatt[i] # total pitch control probability
i += 1
if i >= dT_array.size:
print("Integration failed to converge: %1.3f" % (ptot))
if return_individual == True:
return PPCFatt[i - 1], PPCFdef[i - 1], Patt, Pdef
else:
return PPCFatt[i - 1], PPCFdef[i - 1]
def generate_pitch_control_for_frame(
frame_data,
home_cols,
away_cols,
params=default_model_params(),
attacking="Home",
field_dimen=(
106.0,
68.0,
),
n_grid_cells_x=50,
return_individual=False,
):
"""generate_pitch_control_for_frame
Evaluates pitch control surface over the entire field at the moment of the
given event (determined by the index of the event passed as an input)
Parameters
-----------
event_id: Index (not row) of the event that describes the instant at
which the pitch control surface should be calculated
events: Dataframe containing the event data
tracking_home: tracking DataFrame for the Home team
tracking_away: tracking DataFrame for the Away team
params: Dictionary of model parameters (default model parameters can be
generated using default_model_params() )
field_dimen: tuple containing the length and width of the pitch in meters.
Default is (106,68)
n_grid_cells_x: Number of pixels in the grid (in the x-direction) that
covers the surface. Default is 50.
n_grid_cells_y will be calculated based on n_grid_cells_x
and the field dimensions
Returrns
-----------
PPCFa: Pitch control surface (dimen (n_grid_cells_x,n_grid_cells_y) )
containing pitch control probability for the attcking team.
Surface for the defending team is just 1-PPCFa.
xgrid: Positions of the pixels in the x-direction (field length)
ygrid: Positions of the pixels in the y-direction (field width)
"""
# get the details of the frame: team in possession, ball_start_position)
ball_start_pos = frame_data[["ball_x", "ball_y"]].to_list()
# break the pitch down into a grid
n_grid_cells_y = int(n_grid_cells_x * field_dimen[1] / field_dimen[0])
xgrid = np.linspace(
-field_dimen[0] / 2.0, field_dimen[0] / 2.0, n_grid_cells_x
)
ygrid = np.linspace(
-field_dimen[1] / 2.0, field_dimen[1] / 2.0, n_grid_cells_y
)
# initialise pitch control grids for attacking and defending teams
PPCFa = np.zeros(shape=(len(ygrid), len(xgrid)))
PPCFd = np.zeros(shape=(len(ygrid), len(xgrid)))
# pick only the columns representing players whose data is available for
# this match
# Basically playerIDs that have data for this row/frame
homeplayers = [
x.split("_")[0]
for x in frame_data[[c for c in home_cols if c.endswith("_x")]]
.dropna()
.keys()
]
awayplayers = [
x.split("_")[0]
for x in frame_data[[c for c in away_cols if c.endswith("_x")]]
.dropna()
.keys()
]
# initialise pitch control grids for individual players in attacking and
# defending teams
PPCFa_pax = {
pid: np.zeros(shape=(len(ygrid), len(xgrid))) for pid in homeplayers
}
PPCFd_pax = {
pid: np.zeros(shape=(len(ygrid), len(xgrid))) for pid in awayplayers
}
# initialise player positions and velocities for pitch control calc
# (so that we're not repeating this at each grid cell position)
if attacking == "Home":
attacking_players = initialise_players(frame_data[home_cols], params)
defending_players = initialise_players(frame_data[away_cols], params)
# opp = "Away"
elif attacking == "Away":
defending_players = initialise_players(frame_data[home_cols], params)
attacking_players = initialise_players(frame_data[away_cols], params)
# opp = "Home"
else:
assert False, "Team in possession must be either home or away"
# calculate pitch pitch control model at each location on the pitch
for i in range(len(ygrid)):
for j in range(len(xgrid)):
target_position = np.array([xgrid[j], ygrid[i]])
if return_individual == True:
# print (target_position)
out = calculate_pitch_control_at_target(
target_position,
attacking_players,
defending_players,
ball_start_pos,
params,
return_individual=True,
)
if len(out) < 4:
PPCFa[i, j], PPCFd[i, j] = out
else:
# print (target_position, out, [type(x) for x in out])
PPCFa[i, j], PPCFd[i, j], Patt, Pdef = out
for pid, ppcf_pax in Patt.items():
PPCFa_pax[pid][i, j] = ppcf_pax
for pid, ppcf_pax in Pdef.items():
PPCFd_pax[pid][i, j] = ppcf_pax
else:
PPCFa[i, j], PPCFd[i, j] = calculate_pitch_control_at_target(
target_position,
attacking_players,
defending_players,
ball_start_pos,
params,
)
# check probabilitiy sums within convergence
checksum = np.sum(PPCFa + PPCFd) / float(n_grid_cells_y * n_grid_cells_x)
assert (
1 - checksum < params["model_converge_tol"]
), "Checksum failed: %1.3f" % (1 - checksum)
pitch_control_dict = dict()
if return_individual == True:
pitch_control_dict["PPCFa"] = PPCFa
pitch_control_dict["xgrid"] = xgrid
pitch_control_dict["ygrid"] = ygrid
pitch_control_dict["PPCFa_pax"] = PPCFa_pax
return pitch_control_dict
else:
pitch_control_dict["PPCFa"] = PPCFa
pitch_control_dict["xgrid"] = xgrid
pitch_control_dict["ygrid"] = ygrid
return pitch_control_dict
| 38.869281
| 130
| 0.630654
|
130b9c30414551712569888766316bbebf4fd066
| 135
|
py
|
Python
|
app_metrics/tests/__init__.py
|
pivotal-energy-solutions/django-app-metrics
|
d2d016595375a65414f9028b13a413fb740967b3
|
[
"BSD-3-Clause"
] | 1
|
2018-01-18T11:48:22.000Z
|
2018-01-18T11:48:22.000Z
|
app_metrics/tests/__init__.py
|
pivotal-energy-solutions/django-app-metrics
|
d2d016595375a65414f9028b13a413fb740967b3
|
[
"BSD-3-Clause"
] | null | null | null |
app_metrics/tests/__init__.py
|
pivotal-energy-solutions/django-app-metrics
|
d2d016595375a65414f9028b13a413fb740967b3
|
[
"BSD-3-Clause"
] | 1
|
2016-04-11T18:39:52.000Z
|
2016-04-11T18:39:52.000Z
|
# -*- coding: utf-8 -*-
from .base_tests import *
from .mixpanel_tests import *
from .statsd_tests import *
from .redis_tests import *
| 22.5
| 29
| 0.718519
|
9142a8af3c41e634916b50f3ba0995d823f3bc53
| 3,986
|
py
|
Python
|
Python/ShapeCohortGenPackage/ShapeCohortGen/CohortGenerator.py
|
SCIInstitute/shapeworks
|
cbd44fdeb83270179c2331f2ba8431cf7330a4ff
|
[
"MIT"
] | 3
|
2016-04-26T15:29:58.000Z
|
2018-10-05T18:39:12.000Z
|
Python/ShapeCohortGenPackage/ShapeCohortGen/CohortGenerator.py
|
ben2k/ShapeWorks
|
a61d2710c5592db1dc00b4fe11990e512220161f
|
[
"MIT"
] | 35
|
2015-05-22T18:26:16.000Z
|
2019-06-03T18:09:40.000Z
|
Python/ShapeCohortGenPackage/ShapeCohortGen/CohortGenerator.py
|
ben2k/ShapeWorks
|
a61d2710c5592db1dc00b4fe11990e512220161f
|
[
"MIT"
] | 7
|
2015-06-18T18:56:12.000Z
|
2019-06-17T19:15:06.000Z
|
from ShapeCohortGen import Supershapes,Ellipsoids,EllipsoidJoints,CohortGenUtils,Tori
class CohortGenerator():
def __init__(self,out_dir):
self.out_dir = out_dir
self.meshes = []
self.contours = []
self.segs = []
self.images = []
def generate_segmentations(self, randomize_size=True, spacing=[1.0,1.0,1.0], allow_on_boundary=True):
if not self.meshes:
print("Error: No meshes have been generated to get segmentations from.\n Call 'generate' first.")
return
self.segs = CohortGenUtils.generate_segmentations(self.meshes, self.out_dir, randomize_size, spacing, allow_on_boundary)
return self.segs
def generate_images(self, blur_factor=1, foreground_mean=180, foreground_var=30, background_mean=80, background_var=30):
if not self.segs:
print("Error: No segmentations have been generated to get images from.\n Call 'generate_segmentations' first.")
return
self.images = CohortGenUtils.generate_images(self.segs, self.out_dir, blur_factor, foreground_mean, foreground_var, background_mean, background_var)
return self.images
class EllipsoidCohortGenerator(CohortGenerator):
def __init__(self,out_dir):
super().__init__(out_dir)
def generate(self, num_samples=3, randomize_center=True, randomize_rotation=True, randomize_x_radius=True, randomize_y_radius=True, randomize_z_radius=True):
self.meshes = Ellipsoids.generate(num_samples, self.out_dir, randomize_center, randomize_rotation, randomize_x_radius, randomize_y_radius, randomize_z_radius)
return self.meshes
class SupershapesCohortGenerator(CohortGenerator):
def __init__(self, out_dir):
super().__init__(out_dir)
def generate(self, num_samples=3, randomize_center=True, randomize_rotation=True, m=3, start_id=0, size=20):
self.meshes = Supershapes.generate(num_samples, self.out_dir, randomize_center, randomize_rotation, m, start_id, size)
return self.meshes
class EllipsoidJointsCohortGenerator(CohortGenerator):
def __init__(self,out_dir):
super().__init__(out_dir)
def generate(self, num_samples=3, randomize_center=True, randomize_x_radius=True, randomize_y_radius=True, randomize_z_radius=True,mode_size=False,mode_rotation=True,separation=2):
self.meshes = EllipsoidJoints.generate(num_samples, self.out_dir, randomize_center, randomize_x_radius, randomize_y_radius, randomize_z_radius,mode_size,mode_rotation,separation)
return self.meshes
class Supershapes2DCohortGenerator(CohortGenerator):
def __init__(self, out_dir):
super().__init__(out_dir)
def generate(self, num_samples=3, m=3, n1_degree=4.0, n2_degree=None, n3_degree=None, default_n=5.0, seed=41):
self.contours = Supershapes.generate_2D(num_samples, 250, self.out_dir, m, n1_degree, n2_degree, n3_degree, default_n, seed)
return self.contours
def generate_segmentations(self, randomize_size=True, spacing=[1.0,1.0], allow_on_boundary=True):
if not self.contours:
print("Error: No contours have been generated to get segmentations from.\n Call 'generate' first.")
return
self.segs = CohortGenUtils.generate_2Dsegmentations(self.contours, self.out_dir, randomize_size, spacing, allow_on_boundary)
return self.segs
def generate_images(self, blur_factor=1, foreground_mean=180, foreground_var=30, background_mean=80, background_var=30):
if not self.segs:
print("Error: No segmentations have been generated to get images from.\n Call 'generate_segmentations' first.")
return
self.images = CohortGenUtils.generate_2Dimages(self.segs, self.out_dir, blur_factor, foreground_mean, foreground_var, background_mean, background_var)
return self.images
class ToriCohortGenerator(CohortGenerator):
def __init__(self,out_dir):
super().__init__(out_dir)
def generate(self, num_samples=3, randomize_center=True, randomize_rotation=True, randomize_ring_radius=True, randomize_cross_section_radius=True):
self.meshes = Tori.generate(num_samples, self.out_dir, randomize_center, randomize_rotation, randomize_ring_radius, randomize_cross_section_radius)
return self.meshes
| 56.942857
| 181
| 0.804315
|
dae1401ff06dc73ad6961384daaf70da9189d55a
| 2,413
|
py
|
Python
|
tests/test_client.py
|
loretoparisi/stanfordnlp
|
8e3af00671d92898ab0a2eaaba825694ccfb82ca
|
[
"Apache-2.0"
] | 2
|
2019-03-01T15:32:03.000Z
|
2020-01-08T21:46:16.000Z
|
tests/test_client.py
|
loretoparisi/stanfordnlp
|
8e3af00671d92898ab0a2eaaba825694ccfb82ca
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
loretoparisi/stanfordnlp
|
8e3af00671d92898ab0a2eaaba825694ccfb82ca
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests that call a running CoreNLPClient.
"""
import stanfordnlp.server as corenlp
TEXT = "Chris wrote a simple sentence that he parsed with Stanford CoreNLP.\n"
def test_connect():
with corenlp.CoreNLPClient() as client:
client.ensure_alive()
assert client.is_active
assert client.is_alive()
def test_annotate():
with corenlp.CoreNLPClient(annotators="tokenize ssplit".split()) as client:
ann = client.annotate(TEXT)
assert corenlp.to_text(ann.sentence[0]) == TEXT[:-1]
def test_update():
with corenlp.CoreNLPClient(annotators="tokenize ssplit".split()) as client:
ann = client.annotate(TEXT)
ann = client.update(ann)
assert corenlp.to_text(ann.sentence[0]) == TEXT[:-1]
def test_tokensregex():
with corenlp.CoreNLPClient(annotators='tokenize ssplit ner depparse'.split(), timeout=60000) as client:
# Example pattern from: https://nlp.stanford.edu/software/tokensregex.shtml
pattern = '([ner: PERSON]+) /wrote/ /an?/ []{0,3} /sentence|article/'
matches = client.tokensregex(TEXT, pattern)
assert len(matches["sentences"]) == 1
assert matches["sentences"][0]["length"] == 1
assert matches == {
"sentences": [{
"0": {
"text": "Chris wrote a simple sentence",
"begin": 0,
"end": 5,
"1": {
"text": "Chris",
"begin": 0,
"end": 1
}},
"length": 1
},]}
def test_semgrex():
with corenlp.CoreNLPClient(annotators='tokenize ssplit pos lemma ner depparse'.split(), timeout=60000) as client:
pattern = '{word:wrote} >nsubj {}=subject >dobj {}=object'
matches = client.semgrex(TEXT, pattern, to_words=True)
assert matches == [
{
"text": "wrote",
"begin": 1,
"end": 2,
"$subject": {
"text": "Chris",
"begin": 0,
"end": 1
},
"$object": {
"text": "sentence",
"begin": 4,
"end": 5
},
"sentence": 0,}]
| 36.560606
| 117
| 0.491504
|
885188e7b914a5da05cce5c86ca5c3d0f8cfd48f
| 115
|
py
|
Python
|
Python/Uri 1930 - Tomadas.py
|
Gui25Reis/URI
|
3df11b4eb27513b336bdff1e56b7707568b249e3
|
[
"MIT"
] | null | null | null |
Python/Uri 1930 - Tomadas.py
|
Gui25Reis/URI
|
3df11b4eb27513b336bdff1e56b7707568b249e3
|
[
"MIT"
] | null | null | null |
Python/Uri 1930 - Tomadas.py
|
Gui25Reis/URI
|
3df11b4eb27513b336bdff1e56b7707568b249e3
|
[
"MIT"
] | null | null | null |
print(sum([int(x) for x in input().split() ])-3) # Pede as entradas, soma elas e tira 3 (3 tomadas forma usadas)
| 115
| 115
| 0.66087
|
51885e5882cdfc8a16bd3a583a6dc38b6705e164
| 1,678
|
py
|
Python
|
mindsdb/integrations/trino_handler/tests/test_trino_handler.py
|
mindsdb/main
|
2c7c09a756c17a47f2ff4a38bf45203d706240ee
|
[
"MIT"
] | 261
|
2018-09-28T02:32:17.000Z
|
2018-12-10T06:30:54.000Z
|
mindsdb/integrations/trino_handler/tests/test_trino_handler.py
|
mindsdb/main
|
2c7c09a756c17a47f2ff4a38bf45203d706240ee
|
[
"MIT"
] | 27
|
2018-09-26T08:49:11.000Z
|
2018-12-10T14:42:52.000Z
|
mindsdb/integrations/trino_handler/tests/test_trino_handler.py
|
mindsdb/main
|
2c7c09a756c17a47f2ff4a38bf45203d706240ee
|
[
"MIT"
] | 46
|
2018-10-06T10:11:18.000Z
|
2018-12-10T04:02:17.000Z
|
import unittest
from mindsdb.api.mysql.mysql_proxy.mysql_proxy import RESPONSE_TYPE
from mindsdb.integrations.trino_handler.trino_handler import TrinoHandler
class TrinoHandlerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.kwargs = {
"host": "qa.analytics.quantum.site.gs.com",
"port": "8090",
"user": "dqsvcuat",
"password": "",
"catalog": "gsam_dev2imddata_elastic",
"schema": "default",
"service_name": "HTTP/qa.analytics.quantum.site.gs.com",
"config_file_name": "test_trino_config.ini"
}
cls.handler = TrinoHandler('test_trino_handler', **cls.kwargs)
def test_0_canary(self):
print('Running canary test')
assert True
print('Canary test ran successfully')
def test_1_check_status(self):
conn_status = self.handler.check_status()
print('Trino connection status: ', conn_status)
assert conn_status.get('success')
def test_2_get_tables(self):
tables = self.handler.get_tables()
assert tables
def test_3_describe_table(self):
described = self.handler.describe_table("axioma_att_2021-12")
assert described['type'] is not RESPONSE_TYPE.ERROR
# TODO: complete tests implementation
# def test_3_get_views(self):
# views = self.handler.get_views()
# assert views['type'] is not RESPONSE_TYPE.ERROR
#
# def test_4_select_query(self):
# query = "SELECT * FROM data.test_mdb WHERE 'id'='1'"
# result = self.handler.query(query)
# assert result['type'] is RESPONSE_TYPE.TABLE
#
| 32.901961
| 73
| 0.64124
|
66d46ad775615b295a293f451ac53628a65f6cd7
| 2,365
|
py
|
Python
|
venv/Lib/site-packages/pyrogram/raw/types/input_user.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/input_user.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/input_user.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class InputUser(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.InputUser`.
Details:
- Layer: ``117``
- ID: ``0xd8292816``
Parameters:
user_id: ``int`` ``32-bit``
access_hash: ``int`` ``64-bit``
"""
__slots__: List[str] = ["user_id", "access_hash"]
ID = 0xd8292816
QUALNAME = "types.InputUser"
def __init__(self, *, user_id: int, access_hash: int) -> None:
self.user_id = user_id # int
self.access_hash = access_hash # long
@staticmethod
def read(data: BytesIO, *args: Any) -> "InputUser":
# No flags
user_id = Int.read(data)
access_hash = Long.read(data)
return InputUser(user_id=user_id, access_hash=access_hash)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(Int(self.user_id))
data.write(Long(self.access_hash))
return data.getvalue()
| 31.533333
| 103
| 0.615645
|
12c1cdf222d0466ecc48e7e292558364bb5dac6c
| 561
|
py
|
Python
|
process_presentations.py
|
SmartPhoenix/Persistent-Kingdoms
|
772574471279825b43788c87df4eab44444912a3
|
[
"Unlicense"
] | 3
|
2018-04-13T15:45:21.000Z
|
2018-12-16T16:50:37.000Z
|
process_presentations.py
|
SmartPhoenix/Persistent-Kingdoms
|
772574471279825b43788c87df4eab44444912a3
|
[
"Unlicense"
] | 249
|
2018-03-26T14:04:11.000Z
|
2020-04-13T22:32:36.000Z
|
process_presentations.py
|
SmartPhoenix/Persistent-Kingdoms
|
772574471279825b43788c87df4eab44444912a3
|
[
"Unlicense"
] | 11
|
2018-04-10T15:56:01.000Z
|
2019-12-10T21:39:32.000Z
|
import process_operations as po
import module_presentations
def process_entry(processor, txt_file, entry, index):
output_list = ["prsnt_%s %d %d " % (entry[0], entry[1], processor.process_id(entry[2], "mesh"))]
output_list.extend(processor.process_triggers(entry[3], entry[0]))
output_list.append("\r\n\r\n")
txt_file.write("".join(output_list))
export = po.make_export(data=module_presentations.presentations, data_name="presentations", tag="prsnt",
header_format="presentationsfile version 1\r\n %d\r\n", process_entry=process_entry)
| 46.75
| 105
| 0.741533
|
17357b6569b9655f2f9bb985b2dd37f76d4fb8e8
| 303
|
gyp
|
Python
|
binding.gyp
|
Himujjal/tree-sitter-sveltealt
|
d16c4b74df8a8e8cfb13a94ca2eb75f4061dc93b
|
[
"MIT"
] | null | null | null |
binding.gyp
|
Himujjal/tree-sitter-sveltealt
|
d16c4b74df8a8e8cfb13a94ca2eb75f4061dc93b
|
[
"MIT"
] | null | null | null |
binding.gyp
|
Himujjal/tree-sitter-sveltealt
|
d16c4b74df8a8e8cfb13a94ca2eb75f4061dc93b
|
[
"MIT"
] | null | null | null |
{
"targets": [
{
"target_name": "tree_sitter_sveltealt_binding",
"include_dirs": [
"<!(node -e \"require('nan')\")",
"src"
],
"sources": [
"src/parser.c",
"src/binding.cc"
],
"cflags_c": [
"-std=c99",
]
}
]
}
| 15.947368
| 53
| 0.39604
|
f836c1ad8b37af676cfaa0df9ef6edd9953d2e7a
| 1,684
|
py
|
Python
|
bot/power_device.py
|
KapJI/moonraker-telegram-bot
|
fce9b572cd3b10bf3d445cf677d0ce8b83155d24
|
[
"CC0-1.0"
] | null | null | null |
bot/power_device.py
|
KapJI/moonraker-telegram-bot
|
fce9b572cd3b10bf3d445cf677d0ce8b83155d24
|
[
"CC0-1.0"
] | null | null | null |
bot/power_device.py
|
KapJI/moonraker-telegram-bot
|
fce9b572cd3b10bf3d445cf677d0ce8b83155d24
|
[
"CC0-1.0"
] | null | null | null |
import logging
import threading
import requests
logger = logging.getLogger(__name__)
class PowerDevice(object):
def __new__(cls, name: str, moonraker_host: str):
if name:
return super(PowerDevice, cls).__new__(cls)
else:
return None
def __init__(self, name: str, moonraker_host: str):
self.name: str = name
self._moonraker_host = moonraker_host
self._state_lock = threading.Lock()
self._device_on: bool = False
@property
def device_state(self) -> bool:
with self._state_lock:
return self._device_on
@device_state.setter
def device_state(self, state: bool):
with self._state_lock:
self._device_on = state
def toggle_device(self) -> bool:
return self.switch_device(not self.device_state)
# Fixme: add auth params
# Todo: return exception?
def switch_device(self, state: bool) -> bool:
with self._state_lock:
if state:
res = requests.post(f"http://{self._moonraker_host}/machine/device_power/device?device={self.name}&action=on")
if res.ok:
self._device_on = True
return True
else:
logger.error(f'Power device switch failed: {res.reason}')
else:
res = requests.post(f"http://{self._moonraker_host}/machine/device_power/device?device={self.name}&action=off")
if res.ok:
self._device_on = False
return False
else:
logger.error(f'Power device switch failed: {res.reason}')
| 31.773585
| 127
| 0.584917
|
c2a3ec07dfbfd8dcdb23a915c2fb51f470de3b18
| 2,239
|
py
|
Python
|
testing/watchdog/examplefiles/dylansawesome/app/views.py
|
zinglax/SPA-BoilerPlate2017
|
b3a22a828546d97589093a7e2abd2a37a7785959
|
[
"MIT"
] | 1
|
2017-03-18T15:34:26.000Z
|
2017-03-18T15:34:26.000Z
|
testing/watchdog/examplefiles/dylansawesome/app/views.py
|
zinglax/SPA-BoilerPlate2017
|
b3a22a828546d97589093a7e2abd2a37a7785959
|
[
"MIT"
] | null | null | null |
testing/watchdog/examplefiles/dylansawesome/app/views.py
|
zinglax/SPA-BoilerPlate2017
|
b3a22a828546d97589093a7e2abd2a37a7785959
|
[
"MIT"
] | null | null | null |
import os
from app import app
from flask import render_template, request, jsonify, url_for, flash, redirect
import json
import jinja2
script_args = {}
@app.route('/', methods=['GET', 'POST'])
def page_index():
page_args = script_args.copy()
# AJAX Action Occurs.
if request.method == 'POST' and 'action' in request.get_json():
return process_ajax_action(request, page_args=page_args)
return render_template('./index.html', **page_args)
def process_ajax_action(request, **kwargs):
"""AJAX Action Occurs. Process the specific action & return JSON response.
"""
print(request.get_json()['action'])
if 'page_args' in kwargs:
# Values common to the specific page.
page_args = kwargs['page_args']
# Actions
# ==========================================================================
if request.get_json()['action'] == "init":
'''init
'''
contents_html = render_html_from_action('init', {})
return json.dumps({'status': 'OK', "init": contents_html})
# No action found
return json.dumps({'status': 'OK',
'message':
'No action for ' + request.get_json()['action']})
def render_html_from_action(action, data):
"""Render HTML For an Action.
Args:
action (String): name of the action (for the template file name).
data (List): Data passed to the template.
Returns:
String: Rendered HTML.
"""
action_templates = os.path.join(app.config['TEMPLATES_DIR'], 'actions')
template_dirs = [x[0] for x in os.walk(action_templates)]
jinja_env = create_jinja2_env(template_dirs=template_dirs)
print(action_templates)
print(template_dirs)
# app.logger.info(data)
return jinja_env.get_template("%s.jinja" % action).render(data=data)
def create_jinja2_env(**kwargs):
"""A jinja2 Environment with templates loaded."""
print("JINJA2 ENV CREATED")
if "template_dirs" in kwargs:
print("TEMPLATE DIRS: " + str(kwargs["template_dirs"]))
template_loader = jinja2.FileSystemLoader(kwargs["template_dirs"])
template_env = jinja2.Environment(loader=template_loader)
return template_env
| 28.705128
| 80
| 0.629299
|
97c5287b9bbb567f4dfecf6d13579e5990aa82fc
| 3,649
|
py
|
Python
|
web_server/parsers/WIP/RIA.py
|
yutkin/News-Aggregator
|
b35b2cdd873121aab03cb14c191b2a3b4d3d5180
|
[
"MIT"
] | 17
|
2017-05-09T13:03:21.000Z
|
2022-01-08T18:32:01.000Z
|
web_server/parsers/WIP/RIA.py
|
uav-profile/News-Aggregator
|
b35b2cdd873121aab03cb14c191b2a3b4d3d5180
|
[
"MIT"
] | null | null | null |
web_server/parsers/WIP/RIA.py
|
uav-profile/News-Aggregator
|
b35b2cdd873121aab03cb14c191b2a3b4d3d5180
|
[
"MIT"
] | 6
|
2018-04-23T03:28:33.000Z
|
2021-04-02T06:29:23.000Z
|
import urllib
import urllib.request
import re
from datetime import datetime
from bs4 import BeautifulSoup
from pprint import pprint
from BaseParser import BaseParser
class RIA(BaseParser):
"""docstring for RIA"""
def __init__(self):
super(RIA, self).__init__(
'RIA', 'https://ria.ru',
'https://ria.ru/archive/more.html')
def get_news(self, start_time=datetime.now(),
until_time=datetime(2004, 1, 2),
news_count=None, topic_filter=None):
last_time = start_time
last_news_ID = ''
done = False
count_ = 0
while not done:
params = {'id': 0, 'date': self._time_to_str(last_time)}
for news in self.parse_page(params):
if ((news_count is not None and count_ > news_count) or
(until_time is not None and last_time < until_time)):
done = True
print("END OF PARSING " + str(start_time)
+ " -- " + str(last_time))
break
if (news is None): continue
if (topic_filter is None or news['topic'] in topic_filter):
last_time = news['date']
if last_news_ID == news['url']:
continue
last_news_ID = news['url']
count_ += 1
yield news
# print('--------------------------------------------------\
# END OF PAGE\
# --------------------------------------------------')
def parse_page(self, params):
params_str = urllib.parse.urlencode(params)
request_url = self.api_url + '?' + params_str
html = self._get_html(request_url)
if html is None:
yield None
for item in html.find_all('div', 'b-list__item'):
try:
title = item.a.find('span', 'b-list__item-title').text
url = item.a['href']
date = (item.find('div', 'b-list__item-time').text
+ ' ' + item.find('div', 'b-list__item-date').text)
popularity = item.find('span', 'b-statistic__item m-views').text
text = self.get_news_text(self.root_url + url)
topic = re.match(r'^\/([A-z0-9]+)\/', url).group(1)
if text is None:
continue
news = {
'site_id': self.id,
'title': title,
'url': self.root_url + url,
'date': self._str_to_time(date),
'topic': topic,
'text': text,
'popularity': popularity,
'tags': ''
}
yield news
except Exception as e:
print(e)
yield None
def get_news_text(self, news_url):
html = self._get_html(news_url)
if html is None:
return None
text_element = html.find('div', 'b-article__body')
if text_element is None:
return None
for div in text_element.find_all('div'):
div.decompose()
return text_element.text
def _time_to_str(self, time):
return time.strftime('%Y%m%dT%H%M%S')
def _str_to_time(self, time_str):
return datetime.strptime(time_str, '%H:%M %d.%m.%Y')
def main():
parser = RIA()
for news in parser.get_news():
print(news['title'], news['popularity'])
# pass
if __name__ == '__main__':
main()
| 36.49
| 80
| 0.479035
|
13ceaac1e1fa7562c7ca27aa2f3ed706d7a653c2
| 9,476
|
py
|
Python
|
colour/models/cie_lab.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/cie_lab.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/cie_lab.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
"""
CIE L*a*b* Colourspace
======================
Defines the *CIE L\\*a\\*b\\** colourspace transformations:
- :func:`colour.XYZ_to_Lab`
- :func:`colour.Lab_to_XYZ`
- :func:`colour.Lab_to_LCHab`
- :func:`colour.LCHab_to_Lab`
References
----------
- :cite:`CIETC1-482004m` : CIE TC 1-48. (2004). CIE 1976 uniform colour
spaces. In CIE 015:2004 Colorimetry, 3rd Edition (p. 24).
ISBN:978-3-901906-33-6
"""
from __future__ import annotations
from colour.colorimetry import (
CCS_ILLUMINANTS,
intermediate_lightness_function_CIE1976,
intermediate_luminance_function_CIE1976,
)
from colour.hints import ArrayLike, NDArray
from colour.models import xy_to_xyY, xyY_to_XYZ, Jab_to_JCh, JCh_to_Jab
from colour.utilities import (
from_range_1,
from_range_100,
to_domain_1,
to_domain_100,
tsplit,
tstack,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"XYZ_to_Lab",
"Lab_to_XYZ",
"Lab_to_LCHab",
"LCHab_to_Lab",
]
def XYZ_to_Lab(
XYZ: ArrayLike,
illuminant: ArrayLike = CCS_ILLUMINANTS[
"CIE 1931 2 Degree Standard Observer"
]["D65"],
) -> NDArray:
"""
Convert from *CIE XYZ* tristimulus values to *CIE L\\*a\\*b\\**
colourspace.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values.
illuminant
Reference *illuminant* *CIE xy* chromaticity coordinates or *CIE xyY*
colourspace array.
Returns
-------
:class:`numpy.ndarray`
*CIE L\\*a\\*b\\** colourspace array.
Notes
-----
+----------------+-----------------------+-----------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+================+=======================+=================+
| ``XYZ`` | [0, 1] | [0, 1] |
+----------------+-----------------------+-----------------+
| ``illuminant`` | [0, 1] | [0, 1] |
+----------------+-----------------------+-----------------+
+----------------+-----------------------+-----------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+================+=======================+=================+
| ``Lab`` | ``L`` : [0, 100] | ``L`` : [0, 1] |
| | | |
| | ``a`` : [-100, 100] | ``a`` : [-1, 1] |
| | | |
| | ``b`` : [-100, 100] | ``b`` : [-1, 1] |
+----------------+-----------------------+-----------------+
References
----------
:cite:`CIETC1-482004m`
Examples
--------
>>> import numpy as np
>>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
>>> XYZ_to_Lab(XYZ) # doctest: +ELLIPSIS
array([ 41.5278752..., 52.6385830..., 26.9231792...])
"""
X, Y, Z = tsplit(to_domain_1(XYZ))
X_n, Y_n, Z_n = tsplit(xyY_to_XYZ(xy_to_xyY(illuminant)))
f_X_X_n = intermediate_lightness_function_CIE1976(X, X_n)
f_Y_Y_n = intermediate_lightness_function_CIE1976(Y, Y_n)
f_Z_Z_n = intermediate_lightness_function_CIE1976(Z, Z_n)
L = 116 * f_Y_Y_n - 16
a = 500 * (f_X_X_n - f_Y_Y_n)
b = 200 * (f_Y_Y_n - f_Z_Z_n)
Lab = tstack([L, a, b])
return from_range_100(Lab)
def Lab_to_XYZ(
Lab: ArrayLike,
illuminant: ArrayLike = CCS_ILLUMINANTS[
"CIE 1931 2 Degree Standard Observer"
]["D65"],
) -> NDArray:
"""
Convert from *CIE L\\*a\\*b\\** colourspace to *CIE XYZ* tristimulus
values.
Parameters
----------
Lab
*CIE L\\*a\\*b\\** colourspace array.
illuminant
Reference *illuminant* *CIE xy* chromaticity coordinates or *CIE xyY*
colourspace array.
Returns
-------
:class:`numpy.ndarray`
*CIE XYZ* tristimulus values.
Notes
-----
+----------------+-----------------------+-----------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+================+=======================+=================+
| ``Lab`` | ``L`` : [0, 100] | ``L`` : [0, 1] |
| | | |
| | ``a`` : [-100, 100] | ``a`` : [-1, 1] |
| | | |
| | ``b`` : [-100, 100] | ``b`` : [-1, 1] |
+----------------+-----------------------+-----------------+
| ``illuminant`` | [0, 1] | [0, 1] |
+----------------+-----------------------+-----------------+
+----------------+-----------------------+-----------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+================+=======================+=================+
| ``XYZ`` | [0, 1] | [0, 1] |
+----------------+-----------------------+-----------------+
References
----------
:cite:`CIETC1-482004m`
Examples
--------
>>> import numpy as np
>>> Lab = np.array([41.52787529, 52.63858304, 26.92317922])
>>> Lab_to_XYZ(Lab) # doctest: +ELLIPSIS
array([ 0.2065400..., 0.1219722..., 0.0513695...])
"""
L, a, b = tsplit(to_domain_100(Lab))
X_n, Y_n, Z_n = tsplit(xyY_to_XYZ(xy_to_xyY(illuminant)))
f_Y_Y_n = (L + 16) / 116
f_X_X_n = a / 500 + f_Y_Y_n
f_Z_Z_n = f_Y_Y_n - b / 200
X = intermediate_luminance_function_CIE1976(f_X_X_n, X_n)
Y = intermediate_luminance_function_CIE1976(f_Y_Y_n, Y_n)
Z = intermediate_luminance_function_CIE1976(f_Z_Z_n, Z_n)
XYZ = tstack([X, Y, Z])
return from_range_1(XYZ)
def Lab_to_LCHab(Lab: ArrayLike) -> NDArray:
"""
Convert from *CIE L\\*a\\*b\\** colourspace to *CIE L\\*C\\*Hab*
colourspace.
Parameters
----------
Lab
*CIE L\\*a\\*b\\** colourspace array.
Returns
-------
:class:`numpy.ndarray`
*CIE L\\*C\\*Hab* colourspace array.
Notes
-----
+------------+-----------------------+-----------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+=================+
| ``Lab`` | ``L`` : [0, 100] | ``L`` : [0, 1] |
| | | |
| | ``a`` : [-100, 100] | ``a`` : [-1, 1] |
| | | |
| | ``b`` : [-100, 100] | ``b`` : [-1, 1] |
+------------+-----------------------+-----------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``LCHab`` | ``L`` : [0, 100] | ``L`` : [0, 1] |
| | | |
| | ``C`` : [0, 100] | ``C`` : [0, 1] |
| | | |
| | ``Hab`` : [0, 360] | ``Hab`` : [0, 1] |
+------------+-----------------------+------------------+
References
----------
:cite:`CIETC1-482004m`
Examples
--------
>>> import numpy as np
>>> Lab = np.array([41.52787529, 52.63858304, 26.92317922])
>>> Lab_to_LCHab(Lab) # doctest: +ELLIPSIS
array([ 41.5278752..., 59.1242590..., 27.0884878...])
"""
return Jab_to_JCh(Lab)
def LCHab_to_Lab(LCHab: ArrayLike) -> NDArray:
"""
Convert from *CIE L\\*C\\*Hab* colourspace to *CIE L\\*a\\*b\\**
colourspace.
Parameters
----------
LCHab
*CIE L\\*C\\*Hab* colourspace array.
Returns
-------
:class:`numpy.ndarray`
*CIE L\\*a\\*b\\** colourspace array.
Notes
-----
+-------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+=============+=======================+==================+
| ``LCHab`` | ``L`` : [0, 100] | ``L`` : [0, 1] |
| | | |
| | ``C`` : [0, 100] | ``C`` : [0, 1] |
| | | |
| | ``Hab`` : [0, 360] | ``Hab`` : [0, 1] |
+-------------+-----------------------+------------------+
+-------------+-----------------------+-----------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+=============+=======================+=================+
| ``Lab`` | ``L`` : [0, 100] | ``L`` : [0, 1] |
| | | |
| | ``a`` : [-100, 100] | ``a`` : [-1, 1] |
| | | |
| | ``b`` : [-100, 100] | ``b`` : [-1, 1] |
+-------------+-----------------------+-----------------+
References
----------
:cite:`CIETC1-482004m`
Examples
--------
>>> import numpy as np
>>> LCHab = np.array([41.52787529, 59.12425901, 27.08848784])
>>> LCHab_to_Lab(LCHab) # doctest: +ELLIPSIS
array([ 41.5278752..., 52.6385830..., 26.9231792...])
"""
return JCh_to_Jab(LCHab)
| 31.905724
| 78
| 0.372837
|
1cfe0fad76ab6e7281946bb8830a350c7f9a2b33
| 1,047
|
py
|
Python
|
blast_parser_grep_high_evalues.py
|
chnops/code
|
cd48843ed7b7239fdb85bff66f02510a7c7899b5
|
[
"MIT"
] | null | null | null |
blast_parser_grep_high_evalues.py
|
chnops/code
|
cd48843ed7b7239fdb85bff66f02510a7c7899b5
|
[
"MIT"
] | null | null | null |
blast_parser_grep_high_evalues.py
|
chnops/code
|
cd48843ed7b7239fdb85bff66f02510a7c7899b5
|
[
"MIT"
] | 1
|
2020-10-19T02:22:06.000Z
|
2020-10-19T02:22:06.000Z
|
#! /usr/bin/env python
#Parser for blast -m 8 or blast+ -m 6 and 7 output file with description inserted
import sys
##Query id,Subject id,Description, % identity, alignment length,mismatches,gap openings,q. start,q. end,s. start,s. end,e-value,bit score
dict1={}
for line in open(sys.argv[1]):
data = line.split('\t')
# print(data)
query = data[0]
hit = data[1]
#print hit
hit_desc = data[2]
#print hit_desc
identity = float(data[3])
length = int(data[4])
q_start = int(data[7])
q_end = int(data[8])
e_value = float(data[-2])
bit_score = float(data[-1])
if e_value > 1e-5:
if dict1.has_key(query):
dict1[query].append([hit, hit_desc, identity, length, q_start, q_end, e_value, bit_score])
else:
dict1[query] = [[hit, hit_desc, identity, length, q_start, q_end, e_value, bit_score]]
print '%s\t%s\t%s' % (query, dict1[query][0][3], dict1[query][0][-2])
| 36.103448
| 137
| 0.567335
|
41c13dc6e8dac3bf4c33b2f57ef32472bc8d8795
| 2,193
|
py
|
Python
|
Zhihu/dir.py
|
gongjunhuang/Spider
|
c683137dafac9c7f4afd359baf9d0717d1a127e2
|
[
"Apache-2.0"
] | 1
|
2018-02-26T15:45:17.000Z
|
2018-02-26T15:45:17.000Z
|
Zhihu/dir.py
|
gongjunhuang/Spider
|
c683137dafac9c7f4afd359baf9d0717d1a127e2
|
[
"Apache-2.0"
] | null | null | null |
Zhihu/dir.py
|
gongjunhuang/Spider
|
c683137dafac9c7f4afd359baf9d0717d1a127e2
|
[
"Apache-2.0"
] | null | null | null |
from numpy import *
import os
import operator
def img2vector(filename):
returnVect = zeros((1, 1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0, 32*i+j] = int(lineStr[j])
return returnVect
def classify(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize, 1)) - dataSet # 将分类向量拓展到训练集同一个维度,求差
sqDiffMat = diffMat ** 2
sqDistance = sqDiffMat.sum(axis=1)
distances = sqDistance ** 0.5
sortedDistIndices = distances.argsort()
classCount = {}
for i in range(k):
voteIlabel = labels[sortedDistIndices[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def handwritingClassTest():
hwLabels = []
trainingFileList = os.listdir('E:\\BaiduNetdiskDownload\\源码\\第7周\\traindata')
m = len(trainingFileList)
trainingMat = zeros((m, 1024))
for i in range(m):
filenameStr = trainingFileList[i]
fileStr = filenameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i, :] = img2vector("E:\\BaiduNetdiskDownload\\源码\\第7周\\traindata\\%s" % filenameStr)
testFileList = os.listdir('E:\\BaiduNetdiskDownload\\源码\\第7周\\testdata')
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
filenameStr = testFileList[i]
fileStr = filenameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
vecUnderTest = img2vector("E:\\BaiduNetdiskDownload\\源码\\第7周\\testdata\\%s" % filenameStr)
classifierResult = classify(vecUnderTest, trainingMat, hwLabels, 4)
print('the classifier came back with: %d, the real answer is: %d' %(classifierResult, classNumStr))
if classifierResult != classNumStr:
errorCount += 1
print('\nthe total number of errors is: %d\n' % errorCount)
print('the total error rate is: %f'% (errorCount / float(mTest)))
if __name__ == '__main__':
handwritingClassTest()
| 39.872727
| 107
| 0.653443
|
c22477c87be326981e07496b9a5b69b0c2e240c0
| 2,037
|
py
|
Python
|
tests/unit/python/__template__.py
|
DDC-NDRS/fledge-iot_fledge
|
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
|
[
"Apache-2.0"
] | 69
|
2019-12-03T17:54:33.000Z
|
2022-03-13T07:05:23.000Z
|
tests/unit/python/__template__.py
|
DDC-NDRS/fledge-iot_fledge
|
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
|
[
"Apache-2.0"
] | 125
|
2020-02-13T15:11:28.000Z
|
2022-03-29T14:42:36.000Z
|
tests/unit/python/__template__.py
|
DDC-NDRS/fledge-iot_fledge
|
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
|
[
"Apache-2.0"
] | 24
|
2019-12-27T07:48:45.000Z
|
2022-03-13T07:05:28.000Z
|
# -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
"""Example of docstring of test purpose"""
# package imports, utilities that will be used for running this module., e.g:
import pytest
from unittest import mock
from unittest.mock import patch
# Fledge imports
# from fledge.common.storage_client.payload_builder import PayloadBuilder
__author__ = "${FULL_NAME}"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("test_module")
class UnitTestTemplateClass:
"""
Example of docstring of Test Class. This class organises the unit tests of test_module
"""
@pytest.fixture(scope="", params="", autouse=False, ids=None, name=None)
def _module_fixture(self):
"""Test fixtures that is specific for this class. This fixture can be used with any test definition"""
pass
@pytest.mark.parametrize("input, expected", [
("input_data1", "expected_result_1"),
("input_data1", "expected_result_2")
])
def test_some_unit(self, _module_fixture, input, expected):
"""Purpose of the test, This test is called twice with different test inputs and expected values.
"""
# assertions to verify that the actual output after running a code block is equal to the expected output
# Use test doubles (like mocks and patch) to remove dependencies on the external services/code referred in your function under test
mock_dependency = mock.MagicMock()
with patch.object(mock_dependency, 'some_method', return_value='bla'):
actual = None
# actual = code_under_test(input)
assert expected == actual
def test_other_unit_component(self, _module_fixture):
"""Purpose of the test, This test is called once.
"""
# assertions to verify that the actual output of a component is equal to the expected output
assert "expected" == "actual"
| 36.375
| 139
| 0.694158
|
dd835a698819ae141c0b549bfa25908887906c01
| 11,008
|
py
|
Python
|
aea/cli/publish.py
|
bryanchriswhite/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 126
|
2019-09-07T09:32:44.000Z
|
2022-03-29T14:28:41.000Z
|
aea/cli/publish.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 1,814
|
2019-08-24T10:08:07.000Z
|
2022-03-31T14:28:36.000Z
|
aea/cli/publish.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 46
|
2019-09-03T22:13:58.000Z
|
2022-03-22T01:25:16.000Z
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Implementation of the 'aea publish' subcommand."""
import os
from abc import ABC, abstractmethod
from contextlib import suppress
from pathlib import Path
from shutil import copyfile
from typing import cast
import click
from aea.cli.push import _save_item_locally as _push_item_locally
from aea.cli.registry.publish import publish_agent
from aea.cli.registry.push import push_item as _push_item_remote
from aea.cli.registry.utils import get_package_meta
from aea.cli.utils.click_utils import registry_flag
from aea.cli.utils.config import validate_item_config
from aea.cli.utils.context import Context
from aea.cli.utils.decorators import check_aea_project
from aea.cli.utils.exceptions import AEAConfigException
from aea.cli.utils.package_utils import (
try_get_item_source_path,
try_get_item_target_path,
)
from aea.configurations.base import AgentConfig, CRUDCollection, PublicId
from aea.configurations.constants import (
AGENT,
AGENTS,
CONNECTIONS,
CONTRACTS,
DEFAULT_AEA_CONFIG_FILE,
ITEM_TYPE_PLURAL_TO_TYPE,
PROTOCOLS,
SKILLS,
)
PUSH_ITEMS_FLAG = "--push-missing"
@click.command(name="publish")
@registry_flag(
help_local="For publishing agent to local folder.",
help_remote="For publishing agent to remote registry.",
)
@click.option(
"--push-missing", is_flag=True, help="Push missing components to registry."
)
@click.pass_context
@check_aea_project
def publish(
click_context: click.Context, local: bool, remote: bool, push_missing: bool
) -> None: # pylint: disable=unused-argument
"""Publish the agent to the registry."""
ctx = cast(Context, click_context.obj)
_validate_pkp(ctx.agent_config.private_key_paths)
_validate_config(ctx)
if remote:
_publish_agent_remote(ctx, push_missing=push_missing)
else:
_save_agent_locally(
ctx, is_mixed=not local and not remote, push_missing=push_missing
)
def _validate_config(ctx: Context) -> None:
"""
Validate agent config.
:param ctx: Context object.
:raises ClickException: if validation is failed.
"""
try:
validate_item_config(AGENT, Path(ctx.cwd))
except AEAConfigException as e: # pragma: no cover
raise click.ClickException("Failed to validate agent config. {}".format(str(e)))
def _validate_pkp(private_key_paths: CRUDCollection) -> None:
"""
Prevent to publish agents with non-empty private_key_paths.
:param private_key_paths: private_key_paths from agent config.
:raises ClickException: if private_key_paths is not empty.
"""
if private_key_paths.read_all() != []:
raise click.ClickException(
"You are not allowed to publish agents with non-empty private_key_paths. Use the `aea remove-key` command to remove key paths from `private_key_paths: {}` in `aea-config.yaml`."
)
class BaseRegistry(ABC):
"""Base registry class."""
@abstractmethod
def check_item_present(self, item_type_plural: str, public_id: PublicId) -> None:
"""
Check item present in registry.
Raise ClickException if not found.
:param item_type_plural: str, item type.
:param public_id: PublicId of the item to check.
:return: None
"""
@abstractmethod
def push_item(self, item_type_plural: str, public_id: PublicId) -> None:
"""
Push item to registry.
:param item_type_plural: str, item type.
:param public_id: PublicId of the item to check.
:return: None
"""
def check_item_present_and_push(
self, item_type_plural: str, public_id: PublicId
) -> None:
"""
Check item present in registry and push if needed.
Raise ClickException if not found.
:param item_type_plural: str, item type.
:param public_id: PublicId of the item to check.
:return: None
"""
with suppress(click.ClickException):
return self.check_item_present(item_type_plural, public_id)
try:
self.push_item(item_type_plural, public_id)
except Exception as e:
raise click.ClickException(
f"Failed to push missing item: {item_type_plural} {public_id}: {e}"
) from e
try:
self.check_item_present(item_type_plural, public_id)
except Exception as e:
raise click.ClickException(
f"Failed to find item after push: {item_type_plural} {public_id}: {e}"
) from e
class LocalRegistry(BaseRegistry):
"""Local directory registry."""
def __init__(self, ctx: Context):
"""Init registry."""
self.ctx = ctx
try:
self.registry_path = ctx.registry_path
except ValueError as e: # pragma: nocover
raise click.ClickException(str(e))
def check_item_present(self, item_type_plural: str, public_id: PublicId) -> None:
"""
Check item present in registry.
Raise ClickException if not found.
:param item_type_plural: str, item type.
:param public_id: PublicId of the item to check.
"""
try:
try_get_item_source_path(
self.registry_path, public_id.author, item_type_plural, public_id.name
)
except click.ClickException as e:
raise click.ClickException(
f"Dependency is missing. {str(e)}\nPlease push it first and then retry or use {PUSH_ITEMS_FLAG} flag to push automatically."
)
def push_item(self, item_type_plural: str, public_id: PublicId) -> None:
"""
Push item to registry.
:param item_type_plural: str, item type.
:param public_id: PublicId of the item to check.
"""
item_type = ITEM_TYPE_PLURAL_TO_TYPE[item_type_plural]
_push_item_locally(self.ctx, item_type, public_id)
class MixedRegistry(LocalRegistry):
"""Mixed remote and local component registry."""
def check_item_present(self, item_type_plural: str, public_id: PublicId) -> None:
"""
Check item present in registry.
Raise ClickException if not found.
:param item_type_plural: str, item type.
:param public_id: PublicId of the item to check.
"""
item_type = ITEM_TYPE_PLURAL_TO_TYPE[item_type_plural]
try:
LocalRegistry.check_item_present(self, item_type_plural, public_id)
except click.ClickException:
click.echo(
f"Can not find dependency locally: {item_type} {public_id}. Trying remote registry..."
)
try:
RemoteRegistry(self.ctx).check_item_present(item_type_plural, public_id)
except click.ClickException:
raise click.ClickException(
f"Can not find dependency locally or remotely: {item_type} {public_id}. Try to add flag `{PUSH_ITEMS_FLAG}` to push dependency package to the registry."
)
class RemoteRegistry(BaseRegistry):
"""Remote components registry."""
def __init__(self, ctx: Context) -> None:
"""Init registry."""
self.ctx = ctx
def check_item_present(self, item_type_plural: str, public_id: PublicId) -> None:
"""
Check item present in registry.
Raise ClickException if not found.
:param item_type_plural: str, item type.
:param public_id: PublicId of the item to check.
"""
item_type = ITEM_TYPE_PLURAL_TO_TYPE[item_type_plural]
try:
get_package_meta(item_type, public_id)
except click.ClickException as e:
raise click.ClickException(
f"Package not found in remote registry: {str(e)}. You can try to add {PUSH_ITEMS_FLAG} flag."
)
def push_item(self, item_type_plural: str, public_id: PublicId) -> None:
"""
Push item to registry.
:param item_type_plural: str, item type.
:param public_id: PublicId of the item to check.
"""
item_type = ITEM_TYPE_PLURAL_TO_TYPE[item_type_plural]
_push_item_remote(self.ctx, item_type, public_id)
def _check_dependencies_in_registry(
registry: BaseRegistry, agent_config: AgentConfig, push_missing: bool
) -> None:
"""Check all agent dependencies present in registry."""
for item_type_plural in (PROTOCOLS, CONTRACTS, CONNECTIONS, SKILLS):
dependencies = getattr(agent_config, item_type_plural)
for public_id in dependencies:
if push_missing:
registry.check_item_present_and_push(item_type_plural, public_id)
else:
registry.check_item_present(item_type_plural, public_id)
def _save_agent_locally(
ctx: Context, is_mixed: bool = False, push_missing: bool = False
) -> None:
"""
Save agent to local packages.
:param ctx: the context
:param is_mixed: whether or not to fetch in mixed mode
:param push_missing: bool. flag to push missing items
"""
try:
registry_path = ctx.registry_path
except ValueError as e: # pragma: nocover
raise click.ClickException(str(e))
registry = MixedRegistry(ctx) if is_mixed else LocalRegistry(ctx)
_check_dependencies_in_registry(registry, ctx.agent_config, push_missing)
item_type_plural = AGENTS
target_dir = try_get_item_target_path(
registry_path, ctx.agent_config.author, item_type_plural, ctx.agent_config.name,
)
if not os.path.exists(target_dir):
os.makedirs(target_dir, exist_ok=True)
source_path = os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE)
target_path = os.path.join(target_dir, DEFAULT_AEA_CONFIG_FILE)
copyfile(source_path, target_path)
click.echo(
f'Agent "{ctx.agent_config.name}" successfully saved in packages folder.'
)
def _publish_agent_remote(ctx: Context, push_missing: bool) -> None:
"""
Push agent to remote registry.
:param ctx: the context
:param push_missing: bool. flag to push missing items
"""
registry = RemoteRegistry(ctx)
_check_dependencies_in_registry(registry, ctx.agent_config, push_missing)
publish_agent(ctx)
| 33.156627
| 189
| 0.667605
|
64eee8595ad2902c7c4925b3206048d3c627550e
| 1,966
|
py
|
Python
|
katie/SVD_Code_OLD/coviddataanalysis/nyhialfl.py
|
S-I-SVD/Randomized-SVD
|
82108238a53c70938af87417f98aadc7f74b2a87
|
[
"MIT"
] | 1
|
2021-12-09T13:34:44.000Z
|
2021-12-09T13:34:44.000Z
|
katie/SVD_Code_OLD/coviddataanalysis/nyhialfl.py
|
S-I-SVD/Randomized-SVD
|
82108238a53c70938af87417f98aadc7f74b2a87
|
[
"MIT"
] | null | null | null |
katie/SVD_Code_OLD/coviddataanalysis/nyhialfl.py
|
S-I-SVD/Randomized-SVD
|
82108238a53c70938af87417f98aadc7f74b2a87
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 19:03:01 2020
@author: katie
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 16:24:08 2020
@author: katie
"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import os
from mpl_toolkits.mplot3d import Axes3D
coviddata = pd.read_csv('/Users/katie/Downloads/nyhialfl_covid.csv')
coviddata = coviddata.drop(columns=['stateFIPS','countyFIPS'])
coviddata_meta = coviddata.loc[:, ['County Name','State']]
coviddata = coviddata.drop(columns=['County Name','State'])
coviddata_scaled = (coviddata-coviddata.mean())
u, s, v = np.linalg.svd(coviddata_scaled, full_matrices=True)
var_explained = np.round(s**2/np.sum(s**2), decimals=3)
var_explained = var_explained[:20]
sns.barplot(x=list(range(1,len(var_explained)+1)), y=var_explained, color="limegreen")
plt.xlabel('SVs', fontsize=16)
plt.ylabel('Percent Variance Explained', fontsize=16)
plt.savefig('svd_scree_plot.png',dpi=100)
plt.show()
labels= ['SV'+str(i) for i in range(1,5)]
svd_df = pd.DataFrame(u[:,0:4], index=coviddata_meta["County Name"].tolist(), columns=labels)
svd_df=svd_df.reset_index()
svd_df.rename(columns={'index':'County Name'}, inplace=True)
labels= ['SV'+str(i) for i in range(1,5)]
svd_df = pd.DataFrame(u[:,0:4], index=coviddata_meta["State"].tolist(), columns=labels)
svd_df=svd_df.reset_index()
svd_df.rename(columns={'index':'State'}, inplace=True)
color_dict = dict({'FL':'Black',
'AL': 'Red',
'HI':'Green',
'NY':'Yellow'})
# Scatter plot: SV1 and SV2
sns.scatterplot(x="SV1", y="SV2", hue="State",
palette=color_dict,
data=svd_df, s=100,
alpha=0.9)
plt.xlabel('Singular Value 1: {0}%'.format(var_explained[0]*100), fontsize=16)
plt.ylabel('Singular Value 2: {0}%'.format(var_explained[1]*100), fontsize=16)
plt.show()
| 28.492754
| 93
| 0.669888
|
841b830c3b1db7ca077549f44bb42d779b80653c
| 171,957
|
py
|
Python
|
src/transformers/__init__.py
|
dctelus/transformers
|
6786cbc4b14ebff0ac59c768cadd109391db9a08
|
[
"Apache-2.0"
] | 3
|
2022-01-15T08:06:07.000Z
|
2022-03-10T07:13:18.000Z
|
src/transformers/__init__.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/__init__.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# When adding a new object to this init, remember to add it twice: once inside the `_import_structure` dictionary and
# once inside the `if TYPE_CHECKING` branch. The `TYPE_CHECKING` should have import statements as usual, but they are
# only there for type checking. The `_import_structure` is a dictionary submodule to list of object names, and is used
# to defer the actual importing for when the objects are requested. This way `import transformers` provides the names
# in the namespace without actually importing anything (and especially none of the backends).
__version__ = "4.18.0.dev0"
from typing import TYPE_CHECKING
# Check the dependencies satisfy the minimal versions required.
from . import dependency_versions_check
from .utils import (
_LazyModule,
is_flax_available,
is_scatter_available,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
logging,
)
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
# Base objects, independent of any specific backend
_import_structure = {
"benchmark": [],
"commands": [],
"configuration_utils": ["PretrainedConfig"],
"convert_graph_to_onnx": [],
"convert_slow_tokenizers_checkpoints_to_fast": [],
"convert_tf_hub_seq_to_seq_bert_to_pytorch": [],
"data": [
"DataProcessor",
"InputExample",
"InputFeatures",
"SingleSentenceClassificationProcessor",
"SquadExample",
"SquadFeatures",
"SquadV1Processor",
"SquadV2Processor",
"glue_compute_metrics",
"glue_convert_examples_to_features",
"glue_output_modes",
"glue_processors",
"glue_tasks_num_labels",
"squad_convert_examples_to_features",
"xnli_compute_metrics",
"xnli_output_modes",
"xnli_processors",
"xnli_tasks_num_labels",
],
"data.data_collator": [
"DataCollator",
"DataCollatorForLanguageModeling",
"DataCollatorForPermutationLanguageModeling",
"DataCollatorForSeq2Seq",
"DataCollatorForSOP",
"DataCollatorForTokenClassification",
"DataCollatorForWholeWordMask",
"DataCollatorWithPadding",
"DefaultDataCollator",
"default_data_collator",
],
"data.metrics": [],
"data.processors": [],
"debug_utils": [],
"dependency_versions_check": [],
"dependency_versions_table": [],
"dynamic_module_utils": [],
"feature_extraction_sequence_utils": ["SequenceFeatureExtractor"],
"feature_extraction_utils": ["BatchFeature", "FeatureExtractionMixin"],
"file_utils": [],
"hf_argparser": ["HfArgumentParser"],
"integrations": [
"is_comet_available",
"is_optuna_available",
"is_ray_available",
"is_ray_tune_available",
"is_sigopt_available",
"is_tensorboard_available",
"is_wandb_available",
],
"modelcard": ["ModelCard"],
"modeling_tf_pytorch_utils": [
"convert_tf_weight_name_to_pt_weight_name",
"load_pytorch_checkpoint_in_tf2_model",
"load_pytorch_model_in_tf2_model",
"load_pytorch_weights_in_tf2_model",
"load_tf2_checkpoint_in_pytorch_model",
"load_tf2_model_in_pytorch_model",
"load_tf2_weights_in_pytorch_model",
],
"models": [],
# Models
"models.albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig"],
"models.auto": [
"ALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CONFIG_MAPPING",
"FEATURE_EXTRACTOR_MAPPING",
"MODEL_NAMES_MAPPING",
"PROCESSOR_MAPPING",
"TOKENIZER_MAPPING",
"AutoConfig",
"AutoFeatureExtractor",
"AutoProcessor",
"AutoTokenizer",
],
"models.bart": ["BartConfig", "BartTokenizer"],
"models.barthez": [],
"models.bartpho": [],
"models.beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig"],
"models.bert": [
"BERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BasicTokenizer",
"BertConfig",
"BertTokenizer",
"WordpieceTokenizer",
],
"models.bert_generation": ["BertGenerationConfig"],
"models.bert_japanese": ["BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer"],
"models.bertweet": ["BertweetTokenizer"],
"models.big_bird": ["BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdConfig"],
"models.bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
],
"models.blenderbot": ["BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotTokenizer"],
"models.blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallTokenizer",
],
"models.bort": [],
"models.byt5": ["ByT5Tokenizer"],
"models.camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig"],
"models.canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig", "CanineTokenizer"],
"models.clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPTextConfig",
"CLIPTokenizer",
"CLIPVisionConfig",
],
"models.convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertTokenizer"],
"models.convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig"],
"models.cpm": ["CpmTokenizer"],
"models.ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig", "CTRLTokenizer"],
"models.data2vec": ["DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig", "Data2VecTextConfig"],
"models.deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaTokenizer"],
"models.deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config"],
"models.decision_transformer": ["DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "DecisionTransformerConfig"],
"models.deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig"],
"models.detr": ["DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetrConfig"],
"models.dialogpt": [],
"models.distilbert": ["DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertTokenizer"],
"models.dit": [],
"models.dpr": [
"DPR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DPRConfig",
"DPRContextEncoderTokenizer",
"DPRQuestionEncoderTokenizer",
"DPRReaderOutput",
"DPRReaderTokenizer",
],
"models.dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"],
"models.electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraTokenizer"],
"models.encoder_decoder": ["EncoderDecoderConfig"],
"models.flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertTokenizer"],
"models.fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig", "FNetTokenizer"],
"models.fsmt": ["FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig", "FSMTTokenizer"],
"models.funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig", "FunnelTokenizer"],
"models.glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"],
"models.gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2Tokenizer"],
"models.gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig"],
"models.gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig"],
"models.herbert": ["HerbertTokenizer"],
"models.hubert": ["HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "HubertConfig"],
"models.ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig"],
"models.imagegpt": ["IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ImageGPTConfig"],
"models.layoutlm": ["LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMConfig", "LayoutLMTokenizer"],
"models.layoutlmv2": [
"LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv2Config",
"LayoutLMv2FeatureExtractor",
"LayoutLMv2Processor",
"LayoutLMv2Tokenizer",
],
"models.layoutxlm": ["LayoutXLMProcessor"],
"models.led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig", "LEDTokenizer"],
"models.longformer": ["LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerTokenizer"],
"models.luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig", "LukeTokenizer"],
"models.lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig", "LxmertTokenizer"],
"models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"],
"models.marian": ["MarianConfig"],
"models.maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"models.mbart": ["MBartConfig"],
"models.mbart50": [],
"models.megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
"models.megatron_gpt2": [],
"models.mluke": [],
"models.mmbt": ["MMBTConfig"],
"models.mobilebert": ["MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertTokenizer"],
"models.mpnet": ["MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "MPNetConfig", "MPNetTokenizer"],
"models.mt5": ["MT5Config"],
"models.nystromformer": [
"NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NystromformerConfig",
],
"models.openai": ["OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenAIGPTConfig", "OpenAIGPTTokenizer"],
"models.pegasus": ["PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusConfig", "PegasusTokenizer"],
"models.perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverTokenizer"],
"models.phobert": ["PhobertTokenizer"],
"models.plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"],
"models.poolformer": ["POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig"],
"models.prophetnet": ["PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ProphetNetConfig", "ProphetNetTokenizer"],
"models.qdqbert": ["QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "QDQBertConfig"],
"models.rag": ["RagConfig", "RagRetriever", "RagTokenizer"],
"models.realm": ["REALM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RealmConfig", "RealmTokenizer"],
"models.reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"],
"models.rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig"],
"models.resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig"],
"models.retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig", "RetriBertTokenizer"],
"models.roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaTokenizer"],
"models.roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerTokenizer"],
"models.segformer": ["SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegformerConfig"],
"models.sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"],
"models.sew_d": ["SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWDConfig"],
"models.speech_encoder_decoder": ["SpeechEncoderDecoderConfig"],
"models.speech_to_text": [
"SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Speech2TextConfig",
],
"models.speech_to_text_2": [
"SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Speech2Text2Config",
"Speech2Text2Processor",
"Speech2Text2Tokenizer",
],
"models.splinter": ["SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SplinterConfig", "SplinterTokenizer"],
"models.squeezebert": ["SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SqueezeBertConfig", "SqueezeBertTokenizer"],
"models.swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig"],
"models.t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config"],
"models.tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig", "TapasTokenizer"],
"models.transfo_xl": [
"TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TransfoXLConfig",
"TransfoXLCorpus",
"TransfoXLTokenizer",
],
"models.trocr": [
"TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrOCRConfig",
"TrOCRProcessor",
],
"models.unispeech": [
"UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP",
"UniSpeechConfig",
],
"models.unispeech_sat": [
"UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"UniSpeechSatConfig",
],
"models.van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"],
"models.vilt": ["VILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViltConfig", "ViltFeatureExtractor", "ViltProcessor"],
"models.vision_encoder_decoder": ["VisionEncoderDecoderConfig"],
"models.vision_text_dual_encoder": ["VisionTextDualEncoderConfig", "VisionTextDualEncoderProcessor"],
"models.visual_bert": ["VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VisualBertConfig"],
"models.vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"],
"models.vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"],
"models.wav2vec2": [
"WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Wav2Vec2Config",
"Wav2Vec2CTCTokenizer",
"Wav2Vec2FeatureExtractor",
"Wav2Vec2Processor",
"Wav2Vec2Tokenizer",
],
"models.wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"],
"models.wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"],
"models.wavlm": [
"WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"WavLMConfig",
],
"models.xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"],
"models.xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMTokenizer"],
"models.xlm_prophetnet": ["XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMProphetNetConfig"],
"models.xlm_roberta": ["XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig"],
"models.xlm_roberta_xl": ["XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaXLConfig"],
"models.xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"],
"models.yoso": ["YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP", "YosoConfig"],
"onnx": [],
"pipelines": [
"AudioClassificationPipeline",
"AutomaticSpeechRecognitionPipeline",
"Conversation",
"ConversationalPipeline",
"CsvPipelineDataFormat",
"FeatureExtractionPipeline",
"FillMaskPipeline",
"ImageClassificationPipeline",
"ImageSegmentationPipeline",
"JsonPipelineDataFormat",
"NerPipeline",
"ObjectDetectionPipeline",
"PipedPipelineDataFormat",
"Pipeline",
"PipelineDataFormat",
"QuestionAnsweringPipeline",
"SummarizationPipeline",
"TableQuestionAnsweringPipeline",
"Text2TextGenerationPipeline",
"TextClassificationPipeline",
"TextGenerationPipeline",
"TokenClassificationPipeline",
"TranslationPipeline",
"ZeroShotClassificationPipeline",
"ZeroShotImageClassificationPipeline",
"pipeline",
],
"processing_utils": ["ProcessorMixin"],
"testing_utils": [],
"tokenization_utils": ["PreTrainedTokenizer"],
"tokenization_utils_base": [
"AddedToken",
"BatchEncoding",
"CharSpan",
"PreTrainedTokenizerBase",
"SpecialTokensMixin",
"TokenSpan",
],
"trainer_callback": [
"DefaultFlowCallback",
"EarlyStoppingCallback",
"PrinterCallback",
"ProgressCallback",
"TrainerCallback",
"TrainerControl",
"TrainerState",
],
"trainer_utils": ["EvalPrediction", "IntervalStrategy", "SchedulerType", "set_seed"],
"training_args": ["TrainingArguments"],
"training_args_seq2seq": ["Seq2SeqTrainingArguments"],
"training_args_tf": ["TFTrainingArguments"],
"utils": [
"CONFIG_NAME",
"MODEL_CARD_NAME",
"PYTORCH_PRETRAINED_BERT_CACHE",
"PYTORCH_TRANSFORMERS_CACHE",
"SPIECE_UNDERLINE",
"TF2_WEIGHTS_NAME",
"TF_WEIGHTS_NAME",
"TRANSFORMERS_CACHE",
"WEIGHTS_NAME",
"TensorType",
"add_end_docstrings",
"add_start_docstrings",
"cached_path",
"is_apex_available",
"is_datasets_available",
"is_faiss_available",
"is_flax_available",
"is_phonemizer_available",
"is_psutil_available",
"is_py3nvml_available",
"is_pyctcdecode_available",
"is_scipy_available",
"is_sentencepiece_available",
"is_sklearn_available",
"is_speech_available",
"is_tf_available",
"is_timm_available",
"is_tokenizers_available",
"is_torch_available",
"is_torch_tpu_available",
"is_vision_available",
"logging",
],
}
# sentencepiece-backed objects
if is_sentencepiece_available():
_import_structure["models.albert"].append("AlbertTokenizer")
_import_structure["models.barthez"].append("BarthezTokenizer")
_import_structure["models.bartpho"].append("BartphoTokenizer")
_import_structure["models.bert_generation"].append("BertGenerationTokenizer")
_import_structure["models.big_bird"].append("BigBirdTokenizer")
_import_structure["models.camembert"].append("CamembertTokenizer")
_import_structure["models.deberta_v2"].append("DebertaV2Tokenizer")
_import_structure["models.layoutxlm"].append("LayoutXLMTokenizer")
_import_structure["models.m2m_100"].append("M2M100Tokenizer")
_import_structure["models.marian"].append("MarianTokenizer")
_import_structure["models.mbart"].append("MBartTokenizer")
_import_structure["models.mbart50"].append("MBart50Tokenizer")
_import_structure["models.mluke"].append("MLukeTokenizer")
_import_structure["models.mt5"].append("MT5Tokenizer")
_import_structure["models.pegasus"].append("PegasusTokenizer")
_import_structure["models.plbart"].append("PLBartTokenizer")
_import_structure["models.reformer"].append("ReformerTokenizer")
_import_structure["models.rembert"].append("RemBertTokenizer")
_import_structure["models.speech_to_text"].append("Speech2TextTokenizer")
_import_structure["models.t5"].append("T5Tokenizer")
_import_structure["models.xglm"].append("XGLMTokenizer")
_import_structure["models.xlm_prophetnet"].append("XLMProphetNetTokenizer")
_import_structure["models.xlm_roberta"].append("XLMRobertaTokenizer")
_import_structure["models.xlnet"].append("XLNetTokenizer")
else:
from .utils import dummy_sentencepiece_objects
_import_structure["utils.dummy_sentencepiece_objects"] = [
name for name in dir(dummy_sentencepiece_objects) if not name.startswith("_")
]
# tokenizers-backed objects
if is_tokenizers_available():
# Fast tokenizers
_import_structure["models.realm"].append("RealmTokenizerFast")
_import_structure["models.xglm"].append("XGLMTokenizerFast")
_import_structure["models.fnet"].append("FNetTokenizerFast")
_import_structure["models.roformer"].append("RoFormerTokenizerFast")
_import_structure["models.clip"].append("CLIPTokenizerFast")
_import_structure["models.convbert"].append("ConvBertTokenizerFast")
_import_structure["models.blenderbot_small"].append("BlenderbotSmallTokenizerFast")
_import_structure["models.albert"].append("AlbertTokenizerFast")
_import_structure["models.bart"].append("BartTokenizerFast")
_import_structure["models.barthez"].append("BarthezTokenizerFast")
_import_structure["models.bert"].append("BertTokenizerFast")
_import_structure["models.big_bird"].append("BigBirdTokenizerFast")
_import_structure["models.blenderbot"].append("BlenderbotTokenizerFast")
_import_structure["models.camembert"].append("CamembertTokenizerFast")
_import_structure["models.deberta"].append("DebertaTokenizerFast")
_import_structure["models.distilbert"].append("DistilBertTokenizerFast")
_import_structure["models.dpr"].extend(
["DPRContextEncoderTokenizerFast", "DPRQuestionEncoderTokenizerFast", "DPRReaderTokenizerFast"]
)
_import_structure["models.electra"].append("ElectraTokenizerFast")
_import_structure["models.funnel"].append("FunnelTokenizerFast")
_import_structure["models.gpt2"].append("GPT2TokenizerFast")
_import_structure["models.herbert"].append("HerbertTokenizerFast")
_import_structure["models.layoutlm"].append("LayoutLMTokenizerFast")
_import_structure["models.layoutlmv2"].append("LayoutLMv2TokenizerFast")
_import_structure["models.layoutxlm"].append("LayoutXLMTokenizerFast")
_import_structure["models.led"].append("LEDTokenizerFast")
_import_structure["models.longformer"].append("LongformerTokenizerFast")
_import_structure["models.lxmert"].append("LxmertTokenizerFast")
_import_structure["models.mbart"].append("MBartTokenizerFast")
_import_structure["models.mbart50"].append("MBart50TokenizerFast")
_import_structure["models.mobilebert"].append("MobileBertTokenizerFast")
_import_structure["models.mpnet"].append("MPNetTokenizerFast")
_import_structure["models.mt5"].append("MT5TokenizerFast")
_import_structure["models.openai"].append("OpenAIGPTTokenizerFast")
_import_structure["models.pegasus"].append("PegasusTokenizerFast")
_import_structure["models.reformer"].append("ReformerTokenizerFast")
_import_structure["models.rembert"].append("RemBertTokenizerFast")
_import_structure["models.retribert"].append("RetriBertTokenizerFast")
_import_structure["models.roberta"].append("RobertaTokenizerFast")
_import_structure["models.splinter"].append("SplinterTokenizerFast")
_import_structure["models.squeezebert"].append("SqueezeBertTokenizerFast")
_import_structure["models.t5"].append("T5TokenizerFast")
_import_structure["models.xlm_roberta"].append("XLMRobertaTokenizerFast")
_import_structure["models.xlnet"].append("XLNetTokenizerFast")
_import_structure["tokenization_utils_fast"] = ["PreTrainedTokenizerFast"]
else:
from .utils import dummy_tokenizers_objects
_import_structure["utils.dummy_tokenizers_objects"] = [
name for name in dir(dummy_tokenizers_objects) if not name.startswith("_")
]
if is_sentencepiece_available() and is_tokenizers_available():
_import_structure["convert_slow_tokenizer"] = ["SLOW_TO_FAST_CONVERTERS", "convert_slow_tokenizer"]
else:
from .utils import dummy_sentencepiece_and_tokenizers_objects
_import_structure["utils.dummy_sentencepiece_and_tokenizers_objects"] = [
name for name in dir(dummy_sentencepiece_and_tokenizers_objects) if not name.startswith("_")
]
# Speech-specific objects
if is_speech_available():
_import_structure["models.speech_to_text"].append("Speech2TextFeatureExtractor")
else:
from .utils import dummy_speech_objects
_import_structure["utils.dummy_speech_objects"] = [
name for name in dir(dummy_speech_objects) if not name.startswith("_")
]
if is_sentencepiece_available() and is_speech_available():
_import_structure["models.speech_to_text"].append("Speech2TextProcessor")
else:
from .utils import dummy_sentencepiece_and_speech_objects
_import_structure["utils.dummy_sentencepiece_and_speech_objects"] = [
name for name in dir(dummy_sentencepiece_and_speech_objects) if not name.startswith("_")
]
# Vision-specific objects
if is_vision_available():
_import_structure["image_utils"] = ["ImageFeatureExtractionMixin"]
_import_structure["models.beit"].append("BeitFeatureExtractor")
_import_structure["models.clip"].append("CLIPFeatureExtractor")
_import_structure["models.clip"].append("CLIPProcessor")
_import_structure["models.convnext"].append("ConvNextFeatureExtractor")
_import_structure["models.deit"].append("DeiTFeatureExtractor")
_import_structure["models.detr"].append("DetrFeatureExtractor")
_import_structure["models.dpt"].append("DPTFeatureExtractor")
_import_structure["models.glpn"].append("GLPNFeatureExtractor")
_import_structure["models.imagegpt"].append("ImageGPTFeatureExtractor")
_import_structure["models.layoutlmv2"].append("LayoutLMv2FeatureExtractor")
_import_structure["models.layoutlmv2"].append("LayoutLMv2Processor")
_import_structure["models.layoutxlm"].append("LayoutXLMProcessor")
_import_structure["models.maskformer"].append("MaskFormerFeatureExtractor")
_import_structure["models.perceiver"].append("PerceiverFeatureExtractor")
_import_structure["models.poolformer"].append("PoolFormerFeatureExtractor")
_import_structure["models.segformer"].append("SegformerFeatureExtractor")
_import_structure["models.vilt"].append("ViltFeatureExtractor")
_import_structure["models.vilt"].append("ViltProcessor")
_import_structure["models.vit"].append("ViTFeatureExtractor")
else:
from .utils import dummy_vision_objects
_import_structure["utils.dummy_vision_objects"] = [
name for name in dir(dummy_vision_objects) if not name.startswith("_")
]
# Timm-backed objects
if is_timm_available() and is_vision_available():
_import_structure["models.detr"].extend(
[
"DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"DetrForObjectDetection",
"DetrForSegmentation",
"DetrModel",
"DetrPreTrainedModel",
]
)
else:
from .utils import dummy_timm_objects
_import_structure["utils.dummy_timm_objects"] = [
name for name in dir(dummy_timm_objects) if not name.startswith("_")
]
if is_scatter_available():
_import_structure["models.tapas"].extend(
[
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
)
else:
from .utils import dummy_scatter_objects
_import_structure["utils.dummy_scatter_objects"] = [
name for name in dir(dummy_scatter_objects) if not name.startswith("_")
]
# PyTorch-backed objects
if is_torch_available():
_import_structure["activations"] = []
_import_structure["benchmark.benchmark"] = ["PyTorchBenchmark"]
_import_structure["benchmark.benchmark_args"] = ["PyTorchBenchmarkArguments"]
_import_structure["data.datasets"] = [
"GlueDataset",
"GlueDataTrainingArguments",
"LineByLineTextDataset",
"LineByLineWithRefDataset",
"LineByLineWithSOPTextDataset",
"SquadDataset",
"SquadDataTrainingArguments",
"TextDataset",
"TextDatasetForNextSentencePrediction",
]
_import_structure["deepspeed"] = []
_import_structure["generation_beam_constraints"] = [
"Constraint",
"ConstraintListState",
"DisjunctiveConstraint",
"PhrasalConstraint",
]
_import_structure["generation_beam_search"] = ["BeamScorer", "BeamSearchScorer", "ConstrainedBeamSearchScorer"]
_import_structure["generation_logits_process"] = [
"ForcedBOSTokenLogitsProcessor",
"ForcedEOSTokenLogitsProcessor",
"HammingDiversityLogitsProcessor",
"InfNanRemoveLogitsProcessor",
"LogitsProcessor",
"LogitsProcessorList",
"LogitsWarper",
"MinLengthLogitsProcessor",
"NoBadWordsLogitsProcessor",
"NoRepeatNGramLogitsProcessor",
"PrefixConstrainedLogitsProcessor",
"RepetitionPenaltyLogitsProcessor",
"TemperatureLogitsWarper",
"TopKLogitsWarper",
"TopPLogitsWarper",
]
_import_structure["generation_stopping_criteria"] = [
"MaxLengthCriteria",
"MaxTimeCriteria",
"StoppingCriteria",
"StoppingCriteriaList",
]
_import_structure["generation_utils"] = ["top_k_top_p_filtering"]
_import_structure["modeling_outputs"] = []
_import_structure["modeling_utils"] = ["Conv1D", "PreTrainedModel", "apply_chunking_to_forward", "prune_layer"]
# PyTorch models structure
_import_structure["models.albert"].extend(
[
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
)
_import_structure["models.auto"].extend(
[
"MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
"MODEL_FOR_AUDIO_XVECTOR_MAPPING",
"MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING",
"MODEL_FOR_CAUSAL_LM_MAPPING",
"MODEL_FOR_CTC_MAPPING",
"MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
"MODEL_FOR_IMAGE_SEGMENTATION_MAPPING",
"MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING",
"MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING",
"MODEL_FOR_MASKED_LM_MAPPING",
"MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
"MODEL_FOR_OBJECT_DETECTION_MAPPING",
"MODEL_FOR_PRETRAINING_MAPPING",
"MODEL_FOR_QUESTION_ANSWERING_MAPPING",
"MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING",
"MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
"MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
"MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
"MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
"MODEL_FOR_VISION_2_SEQ_MAPPING",
"MODEL_MAPPING",
"MODEL_WITH_LM_HEAD_MAPPING",
"AutoModel",
"AutoModelForAudioClassification",
"AutoModelForAudioFrameClassification",
"AutoModelForAudioXVector",
"AutoModelForCausalLM",
"AutoModelForCTC",
"AutoModelForImageClassification",
"AutoModelForImageSegmentation",
"AutoModelForInstanceSegmentation",
"AutoModelForMaskedImageModeling",
"AutoModelForMaskedLM",
"AutoModelForMultipleChoice",
"AutoModelForNextSentencePrediction",
"AutoModelForObjectDetection",
"AutoModelForPreTraining",
"AutoModelForQuestionAnswering",
"AutoModelForSemanticSegmentation",
"AutoModelForSeq2SeqLM",
"AutoModelForSequenceClassification",
"AutoModelForSpeechSeq2Seq",
"AutoModelForTableQuestionAnswering",
"AutoModelForTokenClassification",
"AutoModelForVision2Seq",
"AutoModelWithLMHead",
]
)
_import_structure["models.bart"].extend(
[
"BART_PRETRAINED_MODEL_ARCHIVE_LIST",
"BartForCausalLM",
"BartForConditionalGeneration",
"BartForQuestionAnswering",
"BartForSequenceClassification",
"BartModel",
"BartPretrainedModel",
"PretrainedBartModel",
]
)
_import_structure["models.beit"].extend(
[
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
)
_import_structure["models.bert"].extend(
[
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
)
_import_structure["models.bert_generation"].extend(
[
"BertGenerationDecoder",
"BertGenerationEncoder",
"BertGenerationPreTrainedModel",
"load_tf_weights_in_bert_generation",
]
)
_import_structure["models.big_bird"].extend(
[
"BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdForCausalLM",
"BigBirdForMaskedLM",
"BigBirdForMultipleChoice",
"BigBirdForPreTraining",
"BigBirdForQuestionAnswering",
"BigBirdForSequenceClassification",
"BigBirdForTokenClassification",
"BigBirdLayer",
"BigBirdModel",
"BigBirdPreTrainedModel",
"load_tf_weights_in_big_bird",
]
)
_import_structure["models.bigbird_pegasus"].extend(
[
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
)
_import_structure["models.blenderbot"].extend(
[
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
)
_import_structure["models.blenderbot_small"].extend(
[
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
)
_import_structure["models.camembert"].extend(
[
"CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CamembertForCausalLM",
"CamembertForMaskedLM",
"CamembertForMultipleChoice",
"CamembertForQuestionAnswering",
"CamembertForSequenceClassification",
"CamembertForTokenClassification",
"CamembertModel",
]
)
_import_structure["models.canine"].extend(
[
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
)
_import_structure["models.clip"].extend(
[
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPVisionModel",
]
)
_import_structure["models.convbert"].extend(
[
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
)
_import_structure["models.convnext"].extend(
[
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
]
)
_import_structure["models.ctrl"].extend(
[
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
)
_import_structure["models.data2vec"].extend(
[
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
)
_import_structure["models.deberta"].extend(
[
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
)
_import_structure["models.deberta_v2"].extend(
[
"DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaV2ForMaskedLM",
"DebertaV2ForQuestionAnswering",
"DebertaV2ForSequenceClassification",
"DebertaV2ForTokenClassification",
"DebertaV2Model",
"DebertaV2PreTrainedModel",
]
)
_import_structure["models.decision_transformer"].extend(
[
"DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DecisionTransformerGPT2Model",
"DecisionTransformerGPT2PreTrainedModel",
"DecisionTransformerModel",
"DecisionTransformerPreTrainedModel",
]
)
_import_structure["models.deit"].extend(
[
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
)
_import_structure["models.distilbert"].extend(
[
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
)
_import_structure["models.dpr"].extend(
[
"DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPRContextEncoder",
"DPRPretrainedContextEncoder",
"DPRPreTrainedModel",
"DPRPretrainedQuestionEncoder",
"DPRPretrainedReader",
"DPRQuestionEncoder",
"DPRReader",
]
)
_import_structure["models.dpt"].extend(
[
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
)
_import_structure["models.electra"].extend(
[
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
)
_import_structure["models.encoder_decoder"].append("EncoderDecoderModel")
_import_structure["models.flaubert"].extend(
[
"FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaubertForMultipleChoice",
"FlaubertForQuestionAnswering",
"FlaubertForQuestionAnsweringSimple",
"FlaubertForSequenceClassification",
"FlaubertForTokenClassification",
"FlaubertModel",
"FlaubertWithLMHeadModel",
]
)
_import_structure["models.fnet"].extend(
[
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
)
_import_structure["models.fsmt"].extend(["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"])
_import_structure["models.funnel"].extend(
[
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
)
_import_structure["models.glpn"].extend(
[
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNModel",
"GLPNPreTrainedModel",
]
)
_import_structure["models.gpt2"].extend(
[
"GPT2_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPT2DoubleHeadsModel",
"GPT2ForSequenceClassification",
"GPT2ForTokenClassification",
"GPT2LMHeadModel",
"GPT2Model",
"GPT2PreTrainedModel",
"load_tf_weights_in_gpt2",
]
)
_import_structure["models.gpt_neo"].extend(
[
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForSequenceClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
)
_import_structure["models.gptj"].extend(
[
"GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTJForCausalLM",
"GPTJForQuestionAnswering",
"GPTJForSequenceClassification",
"GPTJModel",
"GPTJPreTrainedModel",
]
)
_import_structure["models.hubert"].extend(
[
"HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"HubertForCTC",
"HubertForSequenceClassification",
"HubertModel",
"HubertPreTrainedModel",
]
)
_import_structure["models.ibert"].extend(
[
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
)
_import_structure["models.imagegpt"].extend(
[
"IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ImageGPTForCausalImageModeling",
"ImageGPTForImageClassification",
"ImageGPTModel",
"ImageGPTPreTrainedModel",
"load_tf_weights_in_imagegpt",
]
)
_import_structure["models.layoutlm"].extend(
[
"LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMForMaskedLM",
"LayoutLMForSequenceClassification",
"LayoutLMForTokenClassification",
"LayoutLMModel",
"LayoutLMPreTrainedModel",
]
)
_import_structure["models.layoutlmv2"].extend(
[
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
)
_import_structure["models.led"].extend(
[
"LED_PRETRAINED_MODEL_ARCHIVE_LIST",
"LEDForConditionalGeneration",
"LEDForQuestionAnswering",
"LEDForSequenceClassification",
"LEDModel",
"LEDPreTrainedModel",
]
)
_import_structure["models.longformer"].extend(
[
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
)
_import_structure["models.luke"].extend(
[
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
)
_import_structure["models.lxmert"].extend(
[
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
)
_import_structure["models.m2m_100"].extend(
[
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
)
_import_structure["models.marian"].extend(["MarianForCausalLM", "MarianModel", "MarianMTModel"])
_import_structure["models.maskformer"].extend(
[
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
)
_import_structure["models.mbart"].extend(
[
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
)
_import_structure["models.megatron_bert"].extend(
[
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
)
_import_structure["models.mmbt"].extend(["MMBTForClassification", "MMBTModel", "ModalEmbeddings"])
_import_structure["models.mobilebert"].extend(
[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
)
_import_structure["models.mpnet"].extend(
[
"MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"MPNetForMaskedLM",
"MPNetForMultipleChoice",
"MPNetForQuestionAnswering",
"MPNetForSequenceClassification",
"MPNetForTokenClassification",
"MPNetLayer",
"MPNetModel",
"MPNetPreTrainedModel",
]
)
_import_structure["models.mt5"].extend(["MT5EncoderModel", "MT5ForConditionalGeneration", "MT5Model"])
_import_structure["models.nystromformer"].extend(
[
"NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"NystromformerForMaskedLM",
"NystromformerForMultipleChoice",
"NystromformerForQuestionAnswering",
"NystromformerForSequenceClassification",
"NystromformerForTokenClassification",
"NystromformerLayer",
"NystromformerModel",
"NystromformerPreTrainedModel",
]
)
_import_structure["models.openai"].extend(
[
"OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OpenAIGPTDoubleHeadsModel",
"OpenAIGPTForSequenceClassification",
"OpenAIGPTLMHeadModel",
"OpenAIGPTModel",
"OpenAIGPTPreTrainedModel",
"load_tf_weights_in_openai_gpt",
]
)
_import_structure["models.pegasus"].extend(
["PegasusForCausalLM", "PegasusForConditionalGeneration", "PegasusModel", "PegasusPreTrainedModel"]
)
_import_structure["models.perceiver"].extend(
[
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
)
_import_structure["models.plbart"].extend(
[
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
)
_import_structure["models.poolformer"].extend(
[
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
)
_import_structure["models.prophetnet"].extend(
[
"PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ProphetNetDecoder",
"ProphetNetEncoder",
"ProphetNetForCausalLM",
"ProphetNetForConditionalGeneration",
"ProphetNetModel",
"ProphetNetPreTrainedModel",
]
)
_import_structure["models.qdqbert"].extend(
[
"QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"QDQBertForMaskedLM",
"QDQBertForMultipleChoice",
"QDQBertForNextSentencePrediction",
"QDQBertForQuestionAnswering",
"QDQBertForSequenceClassification",
"QDQBertForTokenClassification",
"QDQBertLayer",
"QDQBertLMHeadModel",
"QDQBertModel",
"QDQBertPreTrainedModel",
"load_tf_weights_in_qdqbert",
]
)
_import_structure["models.rag"].extend(
["RagModel", "RagPreTrainedModel", "RagSequenceForGeneration", "RagTokenForGeneration"]
)
_import_structure["models.realm"].extend(
[
"REALM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RealmEmbedder",
"RealmForOpenQA",
"RealmKnowledgeAugEncoder",
"RealmPreTrainedModel",
"RealmReader",
"RealmRetriever",
"RealmScorer",
"load_tf_weights_in_realm",
]
)
_import_structure["models.reformer"].extend(
[
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
)
_import_structure["models.rembert"].extend(
[
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
)
_import_structure["models.resnet"].extend(
[
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
]
)
_import_structure["models.retribert"].extend(
["RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RetriBertModel", "RetriBertPreTrainedModel"]
)
_import_structure["models.roberta"].extend(
[
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
)
_import_structure["models.roformer"].extend(
[
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
)
_import_structure["models.segformer"].extend(
[
"SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SegformerDecodeHead",
"SegformerForImageClassification",
"SegformerForSemanticSegmentation",
"SegformerLayer",
"SegformerModel",
"SegformerPreTrainedModel",
]
)
_import_structure["models.sew"].extend(
[
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
)
_import_structure["models.sew_d"].extend(
[
"SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWDForCTC",
"SEWDForSequenceClassification",
"SEWDModel",
"SEWDPreTrainedModel",
]
)
_import_structure["models.speech_encoder_decoder"].extend(["SpeechEncoderDecoderModel"])
_import_structure["models.speech_to_text"].extend(
[
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
)
_import_structure["models.speech_to_text_2"].extend(["Speech2Text2ForCausalLM", "Speech2Text2PreTrainedModel"])
_import_structure["models.splinter"].extend(
[
"SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SplinterForQuestionAnswering",
"SplinterLayer",
"SplinterModel",
"SplinterPreTrainedModel",
]
)
_import_structure["models.squeezebert"].extend(
[
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
)
_import_structure["models.swin"].extend(
[
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
]
)
_import_structure["models.t5"].extend(
[
"T5_PRETRAINED_MODEL_ARCHIVE_LIST",
"T5EncoderModel",
"T5ForConditionalGeneration",
"T5Model",
"T5PreTrainedModel",
"load_tf_weights_in_t5",
]
)
_import_structure["models.transfo_xl"].extend(
[
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
)
_import_structure["models.trocr"].extend(
["TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel"]
)
_import_structure["models.unispeech"].extend(
[
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
)
_import_structure["models.unispeech_sat"].extend(
[
"UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechSatForAudioFrameClassification",
"UniSpeechSatForCTC",
"UniSpeechSatForPreTraining",
"UniSpeechSatForSequenceClassification",
"UniSpeechSatForXVector",
"UniSpeechSatModel",
"UniSpeechSatPreTrainedModel",
]
)
_import_structure["models.van"].extend(
[
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
)
_import_structure["models.vilt"].extend(
[
"VILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViltForImageAndTextRetrieval",
"ViltForImagesAndTextClassification",
"ViltForMaskedLM",
"ViltForQuestionAnswering",
"ViltLayer",
"ViltModel",
"ViltPreTrainedModel",
]
)
_import_structure["models.vision_encoder_decoder"].extend(["VisionEncoderDecoderModel"])
_import_structure["models.vision_text_dual_encoder"].extend(["VisionTextDualEncoderModel"])
_import_structure["models.visual_bert"].extend(
[
"VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VisualBertForMultipleChoice",
"VisualBertForPreTraining",
"VisualBertForQuestionAnswering",
"VisualBertForRegionToPhraseAlignment",
"VisualBertForVisualReasoning",
"VisualBertLayer",
"VisualBertModel",
"VisualBertPreTrainedModel",
]
)
_import_structure["models.vit"].extend(
[
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
)
_import_structure["models.vit_mae"].extend(
[
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
)
_import_structure["models.wav2vec2"].extend(
[
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
)
_import_structure["models.wavlm"].extend(
[
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
)
_import_structure["models.xglm"].extend(
[
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
)
_import_structure["models.xlm"].extend(
[
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
)
_import_structure["models.xlm_prophetnet"].extend(
[
"XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMProphetNetDecoder",
"XLMProphetNetEncoder",
"XLMProphetNetForCausalLM",
"XLMProphetNetForConditionalGeneration",
"XLMProphetNetModel",
]
)
_import_structure["models.xlm_roberta"].extend(
[
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
]
)
_import_structure["models.xlm_roberta_xl"].extend(
[
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
)
_import_structure["models.xlnet"].extend(
[
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
)
_import_structure["models.yoso"].extend(
[
"YOSO_PRETRAINED_MODEL_ARCHIVE_LIST",
"YosoForMaskedLM",
"YosoForMultipleChoice",
"YosoForQuestionAnswering",
"YosoForSequenceClassification",
"YosoForTokenClassification",
"YosoLayer",
"YosoModel",
"YosoPreTrainedModel",
]
)
_import_structure["optimization"] = [
"Adafactor",
"AdamW",
"get_constant_schedule",
"get_constant_schedule_with_warmup",
"get_cosine_schedule_with_warmup",
"get_cosine_with_hard_restarts_schedule_with_warmup",
"get_linear_schedule_with_warmup",
"get_polynomial_decay_schedule_with_warmup",
"get_scheduler",
]
_import_structure["pytorch_utils"] = []
_import_structure["sagemaker"] = []
_import_structure["trainer"] = ["Trainer"]
_import_structure["trainer_pt_utils"] = ["torch_distributed_zero_first"]
_import_structure["trainer_seq2seq"] = ["Seq2SeqTrainer"]
else:
from .utils import dummy_pt_objects
_import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")]
# TensorFlow-backed objects
if is_tf_available():
_import_structure["activations_tf"] = []
_import_structure["benchmark.benchmark_args_tf"] = ["TensorFlowBenchmarkArguments"]
_import_structure["benchmark.benchmark_tf"] = ["TensorFlowBenchmark"]
_import_structure["generation_tf_logits_process"] = [
"TFLogitsProcessor",
"TFLogitsProcessorList",
"TFLogitsWarper",
"TFMinLengthLogitsProcessor",
"TFNoBadWordsLogitsProcessor",
"TFNoRepeatNGramLogitsProcessor",
"TFRepetitionPenaltyLogitsProcessor",
"TFTemperatureLogitsWarper",
"TFTopKLogitsWarper",
"TFTopPLogitsWarper",
]
_import_structure["generation_tf_utils"] = ["tf_top_k_top_p_filtering"]
_import_structure["keras_callbacks"] = ["KerasMetricCallback", "PushToHubCallback"]
_import_structure["modeling_tf_outputs"] = []
_import_structure["modeling_tf_utils"] = [
"TFPreTrainedModel",
"TFSequenceSummary",
"TFSharedEmbeddings",
"shape_list",
]
# TensorFlow models structure
_import_structure["models.albert"].extend(
[
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
)
_import_structure["models.auto"].extend(
[
"TF_MODEL_FOR_CAUSAL_LM_MAPPING",
"TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
"TF_MODEL_FOR_MASKED_LM_MAPPING",
"TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
"TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
"TF_MODEL_FOR_PRETRAINING_MAPPING",
"TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
"TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
"TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
"TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
"TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
"TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
"TF_MODEL_FOR_VISION_2_SEQ_MAPPING",
"TF_MODEL_MAPPING",
"TF_MODEL_WITH_LM_HEAD_MAPPING",
"TFAutoModel",
"TFAutoModelForCausalLM",
"TFAutoModelForImageClassification",
"TFAutoModelForMaskedLM",
"TFAutoModelForMultipleChoice",
"TFAutoModelForPreTraining",
"TFAutoModelForQuestionAnswering",
"TFAutoModelForSeq2SeqLM",
"TFAutoModelForSequenceClassification",
"TFAutoModelForSpeechSeq2Seq",
"TFAutoModelForTableQuestionAnswering",
"TFAutoModelForTokenClassification",
"TFAutoModelForVision2Seq",
"TFAutoModelWithLMHead",
]
)
_import_structure["models.bart"].extend(["TFBartForConditionalGeneration", "TFBartModel", "TFBartPretrainedModel"])
_import_structure["models.bert"].extend(
[
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
)
_import_structure["models.blenderbot"].extend(
["TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel"]
)
_import_structure["models.blenderbot_small"].extend(
["TFBlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallModel", "TFBlenderbotSmallPreTrainedModel"]
)
_import_structure["models.camembert"].extend(
[
"TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCamembertForCausalLM",
"TFCamembertForMaskedLM",
"TFCamembertForMultipleChoice",
"TFCamembertForQuestionAnswering",
"TFCamembertForSequenceClassification",
"TFCamembertForTokenClassification",
"TFCamembertModel",
]
)
_import_structure["models.clip"].extend(
[
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
)
_import_structure["models.convbert"].extend(
[
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
)
_import_structure["models.convnext"].extend(
[
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
)
_import_structure["models.ctrl"].extend(
[
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
)
_import_structure["models.deberta"].extend(
[
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
)
_import_structure["models.deberta_v2"].extend(
[
"TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaV2ForMaskedLM",
"TFDebertaV2ForQuestionAnswering",
"TFDebertaV2ForSequenceClassification",
"TFDebertaV2ForTokenClassification",
"TFDebertaV2Model",
"TFDebertaV2PreTrainedModel",
]
)
_import_structure["models.distilbert"].extend(
[
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
)
_import_structure["models.dpr"].extend(
[
"TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDPRContextEncoder",
"TFDPRPretrainedContextEncoder",
"TFDPRPretrainedQuestionEncoder",
"TFDPRPretrainedReader",
"TFDPRQuestionEncoder",
"TFDPRReader",
]
)
_import_structure["models.electra"].extend(
[
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
)
_import_structure["models.encoder_decoder"].append("TFEncoderDecoderModel")
_import_structure["models.flaubert"].extend(
[
"TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFlaubertForMultipleChoice",
"TFFlaubertForQuestionAnsweringSimple",
"TFFlaubertForSequenceClassification",
"TFFlaubertForTokenClassification",
"TFFlaubertModel",
"TFFlaubertPreTrainedModel",
"TFFlaubertWithLMHeadModel",
]
)
_import_structure["models.funnel"].extend(
[
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
)
_import_structure["models.gpt2"].extend(
[
"TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGPT2DoubleHeadsModel",
"TFGPT2ForSequenceClassification",
"TFGPT2LMHeadModel",
"TFGPT2MainLayer",
"TFGPT2Model",
"TFGPT2PreTrainedModel",
]
)
_import_structure["models.gptj"].extend(
[
"TFGPTJForCausalLM",
"TFGPTJForQuestionAnswering",
"TFGPTJForSequenceClassification",
"TFGPTJModel",
"TFGPTJPreTrainedModel",
]
)
_import_structure["models.hubert"].extend(
[
"TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFHubertForCTC",
"TFHubertModel",
"TFHubertPreTrainedModel",
]
)
_import_structure["models.layoutlm"].extend(
[
"TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMForMaskedLM",
"TFLayoutLMForSequenceClassification",
"TFLayoutLMForTokenClassification",
"TFLayoutLMMainLayer",
"TFLayoutLMModel",
"TFLayoutLMPreTrainedModel",
]
)
_import_structure["models.led"].extend(["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"])
_import_structure["models.longformer"].extend(
[
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
)
_import_structure["models.lxmert"].extend(
[
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
)
_import_structure["models.marian"].extend(["TFMarianModel", "TFMarianMTModel", "TFMarianPreTrainedModel"])
_import_structure["models.mbart"].extend(
["TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel"]
)
_import_structure["models.mobilebert"].extend(
[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
)
_import_structure["models.mpnet"].extend(
[
"TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMPNetForMaskedLM",
"TFMPNetForMultipleChoice",
"TFMPNetForQuestionAnswering",
"TFMPNetForSequenceClassification",
"TFMPNetForTokenClassification",
"TFMPNetMainLayer",
"TFMPNetModel",
"TFMPNetPreTrainedModel",
]
)
_import_structure["models.mt5"].extend(["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"])
_import_structure["models.openai"].extend(
[
"TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFOpenAIGPTDoubleHeadsModel",
"TFOpenAIGPTForSequenceClassification",
"TFOpenAIGPTLMHeadModel",
"TFOpenAIGPTMainLayer",
"TFOpenAIGPTModel",
"TFOpenAIGPTPreTrainedModel",
]
)
_import_structure["models.pegasus"].extend(
["TFPegasusForConditionalGeneration", "TFPegasusModel", "TFPegasusPreTrainedModel"]
)
_import_structure["models.rag"].extend(
[
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
)
_import_structure["models.rembert"].extend(
[
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
)
_import_structure["models.roberta"].extend(
[
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
)
_import_structure["models.roformer"].extend(
[
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
)
_import_structure["models.speech_to_text"].extend(
[
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
)
_import_structure["models.t5"].extend(
[
"TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFT5EncoderModel",
"TFT5ForConditionalGeneration",
"TFT5Model",
"TFT5PreTrainedModel",
]
)
_import_structure["models.tapas"].extend(
[
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
)
_import_structure["models.transfo_xl"].extend(
[
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
)
_import_structure["models.vision_encoder_decoder"].extend(["TFVisionEncoderDecoderModel"])
_import_structure["models.vit"].extend(
[
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
)
_import_structure["models.vit_mae"].extend(
[
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
)
_import_structure["models.wav2vec2"].extend(
[
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
]
)
_import_structure["models.xlm"].extend(
[
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
)
_import_structure["models.xlm_roberta"].extend(
[
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
]
)
_import_structure["models.xlnet"].extend(
[
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
)
_import_structure["optimization_tf"] = ["AdamWeightDecay", "GradientAccumulator", "WarmUp", "create_optimizer"]
_import_structure["tf_utils"] = []
_import_structure["trainer_tf"] = ["TFTrainer"]
else:
from .utils import dummy_tf_objects
_import_structure["utils.dummy_tf_objects"] = [name for name in dir(dummy_tf_objects) if not name.startswith("_")]
# FLAX-backed objects
if is_flax_available():
_import_structure["generation_flax_logits_process"] = [
"FlaxForcedBOSTokenLogitsProcessor",
"FlaxForcedEOSTokenLogitsProcessor",
"FlaxLogitsProcessor",
"FlaxLogitsProcessorList",
"FlaxLogitsWarper",
"FlaxMinLengthLogitsProcessor",
"FlaxTemperatureLogitsWarper",
"FlaxTopKLogitsWarper",
"FlaxTopPLogitsWarper",
]
_import_structure["generation_flax_utils"] = []
_import_structure["modeling_flax_outputs"] = []
_import_structure["modeling_flax_utils"] = ["FlaxPreTrainedModel"]
_import_structure["models.albert"].extend(
[
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
)
_import_structure["models.auto"].extend(
[
"FLAX_MODEL_FOR_CAUSAL_LM_MAPPING",
"FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
"FLAX_MODEL_FOR_MASKED_LM_MAPPING",
"FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
"FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
"FLAX_MODEL_FOR_PRETRAINING_MAPPING",
"FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
"FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
"FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
"FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
"FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING",
"FLAX_MODEL_MAPPING",
"FlaxAutoModel",
"FlaxAutoModelForCausalLM",
"FlaxAutoModelForImageClassification",
"FlaxAutoModelForMaskedLM",
"FlaxAutoModelForMultipleChoice",
"FlaxAutoModelForNextSentencePrediction",
"FlaxAutoModelForPreTraining",
"FlaxAutoModelForQuestionAnswering",
"FlaxAutoModelForSeq2SeqLM",
"FlaxAutoModelForSequenceClassification",
"FlaxAutoModelForTokenClassification",
"FlaxAutoModelForVision2Seq",
]
)
# Flax models structure
_import_structure["models.bart"].extend(
[
"FlaxBartDecoderPreTrainedModel",
"FlaxBartForCausalLM",
"FlaxBartForConditionalGeneration",
"FlaxBartForQuestionAnswering",
"FlaxBartForSequenceClassification",
"FlaxBartModel",
"FlaxBartPreTrainedModel",
]
)
_import_structure["models.beit"].extend(
[
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
)
_import_structure["models.bert"].extend(
[
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
)
_import_structure["models.big_bird"].extend(
[
"FlaxBigBirdForMaskedLM",
"FlaxBigBirdForMultipleChoice",
"FlaxBigBirdForPreTraining",
"FlaxBigBirdForQuestionAnswering",
"FlaxBigBirdForSequenceClassification",
"FlaxBigBirdForTokenClassification",
"FlaxBigBirdModel",
"FlaxBigBirdPreTrainedModel",
]
)
_import_structure["models.blenderbot"].extend(
["FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel"]
)
_import_structure["models.blenderbot_small"].extend(
[
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
)
_import_structure["models.clip"].extend(
[
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
)
_import_structure["models.distilbert"].extend(
[
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
)
_import_structure["models.electra"].extend(
[
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
)
_import_structure["models.encoder_decoder"].append("FlaxEncoderDecoderModel")
_import_structure["models.gpt2"].extend(["FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPT2PreTrainedModel"])
_import_structure["models.gpt_neo"].extend(
["FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel"]
)
_import_structure["models.gptj"].extend(["FlaxGPTJForCausalLM", "FlaxGPTJModel", "FlaxGPTJPreTrainedModel"])
_import_structure["models.marian"].extend(
[
"FlaxMarianModel",
"FlaxMarianMTModel",
"FlaxMarianPreTrainedModel",
]
)
_import_structure["models.mbart"].extend(
[
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
)
_import_structure["models.mt5"].extend(["FlaxMT5ForConditionalGeneration", "FlaxMT5Model"])
_import_structure["models.pegasus"].extend(
[
"FlaxPegasusForConditionalGeneration",
"FlaxPegasusModel",
"FlaxPegasusPreTrainedModel",
]
)
_import_structure["models.roberta"].extend(
[
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
)
_import_structure["models.roformer"].extend(
[
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
)
_import_structure["models.speech_encoder_decoder"].append("FlaxSpeechEncoderDecoderModel")
_import_structure["models.t5"].extend(["FlaxT5ForConditionalGeneration", "FlaxT5Model", "FlaxT5PreTrainedModel"])
_import_structure["models.vision_encoder_decoder"].append("FlaxVisionEncoderDecoderModel")
_import_structure["models.vision_text_dual_encoder"].extend(["FlaxVisionTextDualEncoderModel"])
_import_structure["models.vit"].extend(["FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel"])
_import_structure["models.wav2vec2"].extend(
["FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel"]
)
_import_structure["models.xglm"].extend(
[
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
)
_import_structure["models.xlm_roberta"].extend(
[
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
]
)
else:
from .utils import dummy_flax_objects
_import_structure["utils.dummy_flax_objects"] = [
name for name in dir(dummy_flax_objects) if not name.startswith("_")
]
# Direct imports for type-checking
if TYPE_CHECKING:
# Configuration
from .configuration_utils import PretrainedConfig
# Data
from .data import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadV1Processor,
SquadV2Processor,
glue_compute_metrics,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_compute_metrics,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
from .data.data_collator import (
DataCollator,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeq2Seq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .feature_extraction_sequence_utils import SequenceFeatureExtractor
# Feature Extractor
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .hf_argparser import HfArgumentParser
# Integrations
from .integrations import (
is_comet_available,
is_optuna_available,
is_ray_available,
is_ray_tune_available,
is_sigopt_available,
is_tensorboard_available,
is_wandb_available,
)
# Model Cards
from .modelcard import ModelCard
# TF 2.0 <=> PyTorch conversion utilities
from .modeling_tf_pytorch_utils import (
convert_tf_weight_name_to_pt_weight_name,
load_pytorch_checkpoint_in_tf2_model,
load_pytorch_model_in_tf2_model,
load_pytorch_weights_in_tf2_model,
load_tf2_checkpoint_in_pytorch_model,
load_tf2_model_in_pytorch_model,
load_tf2_weights_in_pytorch_model,
)
from .models.albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
from .models.auto import (
ALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
MODEL_NAMES_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
)
from .models.bart import BartConfig, BartTokenizer
from .models.beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig
from .models.bert import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BasicTokenizer,
BertConfig,
BertTokenizer,
WordpieceTokenizer,
)
from .models.bert_generation import BertGenerationConfig
from .models.bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
from .models.bertweet import BertweetTokenizer
from .models.big_bird import BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdConfig
from .models.bigbird_pegasus import BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig
from .models.blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotTokenizer
from .models.blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallTokenizer,
)
from .models.byt5 import ByT5Tokenizer
from .models.camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig
from .models.canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig, CanineTokenizer
from .models.clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPTextConfig,
CLIPTokenizer,
CLIPVisionConfig,
)
from .models.convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertTokenizer
from .models.convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig
from .models.cpm import CpmTokenizer
from .models.ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer
from .models.data2vec import DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Data2VecAudioConfig, Data2VecTextConfig
from .models.deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaTokenizer
from .models.deberta_v2 import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config
from .models.decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
DecisionTransformerConfig,
)
from .models.deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig
from .models.detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig
from .models.distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertTokenizer
from .models.dpr import (
DPR_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPRConfig,
DPRContextEncoderTokenizer,
DPRQuestionEncoderTokenizer,
DPRReaderOutput,
DPRReaderTokenizer,
)
from .models.dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
from .models.electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer
from .models.encoder_decoder import EncoderDecoderConfig
from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer
from .models.fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig, FNetTokenizer
from .models.fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig, FSMTTokenizer
from .models.funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig, FunnelTokenizer
from .models.glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
from .models.gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2Tokenizer
from .models.gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig
from .models.gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig
from .models.herbert import HerbertTokenizer
from .models.hubert import HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, HubertConfig
from .models.ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig
from .models.imagegpt import IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ImageGPTConfig
from .models.layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig, LayoutLMTokenizer
from .models.layoutlmv2 import (
LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMv2Config,
LayoutLMv2FeatureExtractor,
LayoutLMv2Processor,
LayoutLMv2Tokenizer,
)
from .models.layoutxlm import LayoutXLMProcessor
from .models.led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig, LEDTokenizer
from .models.longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerTokenizer
from .models.luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig, LukeTokenizer
from .models.lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig, LxmertTokenizer
from .models.m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config
from .models.marian import MarianConfig
from .models.maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .models.mbart import MBartConfig
from .models.megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
from .models.mmbt import MMBTConfig
from .models.mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertTokenizer
from .models.mpnet import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNetConfig, MPNetTokenizer
from .models.mt5 import MT5Config
from .models.nystromformer import NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, NystromformerConfig
from .models.openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig, OpenAIGPTTokenizer
from .models.pegasus import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusConfig, PegasusTokenizer
from .models.perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverTokenizer
from .models.phobert import PhobertTokenizer
from .models.plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
from .models.poolformer import POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig
from .models.prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig, ProphetNetTokenizer
from .models.qdqbert import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, QDQBertConfig
from .models.rag import RagConfig, RagRetriever, RagTokenizer
from .models.realm import REALM_PRETRAINED_CONFIG_ARCHIVE_MAP, RealmConfig, RealmTokenizer
from .models.reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
from .models.rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig
from .models.resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig
from .models.retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig, RetriBertTokenizer
from .models.roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaTokenizer
from .models.roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerTokenizer
from .models.segformer import SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SegformerConfig
from .models.sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
from .models.sew_d import SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWDConfig
from .models.speech_encoder_decoder import SpeechEncoderDecoderConfig
from .models.speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig
from .models.speech_to_text_2 import (
SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
Speech2Text2Config,
Speech2Text2Processor,
Speech2Text2Tokenizer,
)
from .models.splinter import SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP, SplinterConfig, SplinterTokenizer
from .models.squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertTokenizer
from .models.swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig
from .models.t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
from .models.tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig, TapasTokenizer
from .models.transfo_xl import (
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
TransfoXLConfig,
TransfoXLCorpus,
TransfoXLTokenizer,
)
from .models.trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig, TrOCRProcessor
from .models.unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
from .models.unispeech_sat import UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechSatConfig
from .models.van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
from .models.vilt import VILT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViltConfig, ViltFeatureExtractor, ViltProcessor
from .models.vision_encoder_decoder import VisionEncoderDecoderConfig
from .models.vision_text_dual_encoder import VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor
from .models.visual_bert import VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, VisualBertConfig
from .models.vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig
from .models.vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
from .models.wav2vec2 import (
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
Wav2Vec2Config,
Wav2Vec2CTCTokenizer,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor,
Wav2Vec2Tokenizer,
)
from .models.wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizer
from .models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM
from .models.wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
from .models.xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
from .models.xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMTokenizer
from .models.xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig
from .models.xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig
from .models.xlm_roberta_xl import XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig
from .models.xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
from .models.yoso import YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP, YosoConfig
# Pipelines
from .pipelines import (
AudioClassificationPipeline,
AutomaticSpeechRecognitionPipeline,
Conversation,
ConversationalPipeline,
CsvPipelineDataFormat,
FeatureExtractionPipeline,
FillMaskPipeline,
ImageClassificationPipeline,
ImageSegmentationPipeline,
JsonPipelineDataFormat,
NerPipeline,
ObjectDetectionPipeline,
PipedPipelineDataFormat,
Pipeline,
PipelineDataFormat,
QuestionAnsweringPipeline,
SummarizationPipeline,
TableQuestionAnsweringPipeline,
Text2TextGenerationPipeline,
TextClassificationPipeline,
TextGenerationPipeline,
TokenClassificationPipeline,
TranslationPipeline,
ZeroShotClassificationPipeline,
ZeroShotImageClassificationPipeline,
pipeline,
)
from .processing_utils import ProcessorMixin
# Tokenization
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_utils_base import (
AddedToken,
BatchEncoding,
CharSpan,
PreTrainedTokenizerBase,
SpecialTokensMixin,
TokenSpan,
)
# Trainer
from .trainer_callback import (
DefaultFlowCallback,
EarlyStoppingCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_utils import EvalPrediction, IntervalStrategy, SchedulerType, set_seed
from .training_args import TrainingArguments
from .training_args_seq2seq import Seq2SeqTrainingArguments
from .training_args_tf import TFTrainingArguments
# Files and general utilities
from .utils import (
CONFIG_NAME,
MODEL_CARD_NAME,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
TensorType,
add_end_docstrings,
add_start_docstrings,
cached_path,
is_apex_available,
is_datasets_available,
is_faiss_available,
is_flax_available,
is_phonemizer_available,
is_psutil_available,
is_py3nvml_available,
is_pyctcdecode_available,
is_scipy_available,
is_sentencepiece_available,
is_sklearn_available,
is_speech_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_tpu_available,
is_vision_available,
logging,
)
if is_sentencepiece_available():
from .models.albert import AlbertTokenizer
from .models.barthez import BarthezTokenizer
from .models.bartpho import BartphoTokenizer
from .models.bert_generation import BertGenerationTokenizer
from .models.big_bird import BigBirdTokenizer
from .models.camembert import CamembertTokenizer
from .models.deberta_v2 import DebertaV2Tokenizer
from .models.layoutxlm import LayoutXLMTokenizer
from .models.m2m_100 import M2M100Tokenizer
from .models.marian import MarianTokenizer
from .models.mbart import MBart50Tokenizer, MBartTokenizer
from .models.mluke import MLukeTokenizer
from .models.mt5 import MT5Tokenizer
from .models.pegasus import PegasusTokenizer
from .models.plbart import PLBartTokenizer
from .models.reformer import ReformerTokenizer
from .models.rembert import RemBertTokenizer
from .models.speech_to_text import Speech2TextTokenizer
from .models.t5 import T5Tokenizer
from .models.xglm import XGLMTokenizer
from .models.xlm_prophetnet import XLMProphetNetTokenizer
from .models.xlm_roberta import XLMRobertaTokenizer
from .models.xlnet import XLNetTokenizer
else:
from .utils.dummy_sentencepiece_objects import *
if is_tokenizers_available():
from .models.albert import AlbertTokenizerFast
from .models.bart import BartTokenizerFast
from .models.barthez import BarthezTokenizerFast
from .models.bert import BertTokenizerFast
from .models.big_bird import BigBirdTokenizerFast
from .models.blenderbot import BlenderbotTokenizerFast
from .models.blenderbot_small import BlenderbotSmallTokenizerFast
from .models.camembert import CamembertTokenizerFast
from .models.clip import CLIPTokenizerFast
from .models.convbert import ConvBertTokenizerFast
from .models.deberta import DebertaTokenizerFast
from .models.distilbert import DistilBertTokenizerFast
from .models.dpr import DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast, DPRReaderTokenizerFast
from .models.electra import ElectraTokenizerFast
from .models.fnet import FNetTokenizerFast
from .models.funnel import FunnelTokenizerFast
from .models.gpt2 import GPT2TokenizerFast
from .models.herbert import HerbertTokenizerFast
from .models.layoutlm import LayoutLMTokenizerFast
from .models.layoutlmv2 import LayoutLMv2TokenizerFast
from .models.layoutxlm import LayoutXLMTokenizerFast
from .models.led import LEDTokenizerFast
from .models.longformer import LongformerTokenizerFast
from .models.lxmert import LxmertTokenizerFast
from .models.mbart import MBartTokenizerFast
from .models.mbart50 import MBart50TokenizerFast
from .models.mobilebert import MobileBertTokenizerFast
from .models.mpnet import MPNetTokenizerFast
from .models.mt5 import MT5TokenizerFast
from .models.openai import OpenAIGPTTokenizerFast
from .models.pegasus import PegasusTokenizerFast
from .models.realm import RealmTokenizerFast
from .models.reformer import ReformerTokenizerFast
from .models.rembert import RemBertTokenizerFast
from .models.retribert import RetriBertTokenizerFast
from .models.roberta import RobertaTokenizerFast
from .models.roformer import RoFormerTokenizerFast
from .models.splinter import SplinterTokenizerFast
from .models.squeezebert import SqueezeBertTokenizerFast
from .models.t5 import T5TokenizerFast
from .models.xglm import XGLMTokenizerFast
from .models.xlm_roberta import XLMRobertaTokenizerFast
from .models.xlnet import XLNetTokenizerFast
from .tokenization_utils_fast import PreTrainedTokenizerFast
else:
from .utils.dummy_tokenizers_objects import *
if is_sentencepiece_available() and is_tokenizers_available():
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer
else:
from .utils.dummies_sentencepiece_and_tokenizers_objects import *
if is_speech_available():
from .models.speech_to_text import Speech2TextFeatureExtractor
else:
from .utils.dummy_speech_objects import *
if is_speech_available() and is_sentencepiece_available():
from .models.speech_to_text import Speech2TextProcessor
else:
from .utils.dummy_sentencepiece_and_speech_objects import *
if is_vision_available():
from .image_utils import ImageFeatureExtractionMixin
from .models.beit import BeitFeatureExtractor
from .models.clip import CLIPFeatureExtractor, CLIPProcessor
from .models.convnext import ConvNextFeatureExtractor
from .models.deit import DeiTFeatureExtractor
from .models.detr import DetrFeatureExtractor
from .models.dpt import DPTFeatureExtractor
from .models.glpn import GLPNFeatureExtractor
from .models.imagegpt import ImageGPTFeatureExtractor
from .models.layoutlmv2 import LayoutLMv2FeatureExtractor, LayoutLMv2Processor
from .models.layoutxlm import LayoutXLMProcessor
from .models.maskformer import MaskFormerFeatureExtractor
from .models.perceiver import PerceiverFeatureExtractor
from .models.poolformer import PoolFormerFeatureExtractor
from .models.segformer import SegformerFeatureExtractor
from .models.vilt import ViltFeatureExtractor, ViltProcessor
from .models.vit import ViTFeatureExtractor
else:
from .utils.dummy_vision_objects import *
# Modeling
if is_timm_available() and is_vision_available():
from .models.detr import (
DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
DetrForObjectDetection,
DetrForSegmentation,
DetrModel,
DetrPreTrainedModel,
)
else:
from .utils.dummy_timm_objects import *
if is_scatter_available():
from .models.tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
else:
from .utils.dummy_scatter_objects import *
if is_torch_available():
# Benchmarks
from .benchmark.benchmark import PyTorchBenchmark
from .benchmark.benchmark_args import PyTorchBenchmarkArguments
from .data.datasets import (
GlueDataset,
GlueDataTrainingArguments,
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
SquadDataset,
SquadDataTrainingArguments,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .generation_beam_constraints import (
Constraint,
ConstraintListState,
DisjunctiveConstraint,
PhrasalConstraint,
)
from .generation_beam_search import BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer
from .generation_logits_process import (
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitsProcessor,
LogitsProcessorList,
LogitsWarper,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from .generation_stopping_criteria import (
MaxLengthCriteria,
MaxTimeCriteria,
StoppingCriteria,
StoppingCriteriaList,
)
from .generation_utils import top_k_top_p_filtering
from .modeling_utils import Conv1D, PreTrainedModel, apply_chunking_to_forward, prune_layer
from .models.albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
from .models.auto import (
MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
MODEL_FOR_AUDIO_XVECTOR_MAPPING,
MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_CTC_MAPPING,
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING,
MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
MODEL_FOR_OBJECT_DETECTION_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_FOR_VISION_2_SEQ_MAPPING,
MODEL_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoModel,
AutoModelForAudioClassification,
AutoModelForAudioFrameClassification,
AutoModelForAudioXVector,
AutoModelForCausalLM,
AutoModelForCTC,
AutoModelForImageClassification,
AutoModelForImageSegmentation,
AutoModelForInstanceSegmentation,
AutoModelForMaskedImageModeling,
AutoModelForMaskedLM,
AutoModelForMultipleChoice,
AutoModelForNextSentencePrediction,
AutoModelForObjectDetection,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSemanticSegmentation,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForSpeechSeq2Seq,
AutoModelForTableQuestionAnswering,
AutoModelForTokenClassification,
AutoModelForVision2Seq,
AutoModelWithLMHead,
)
from .models.bart import (
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BartForCausalLM,
BartForConditionalGeneration,
BartForQuestionAnswering,
BartForSequenceClassification,
BartModel,
BartPretrainedModel,
PretrainedBartModel,
)
from .models.beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
from .models.bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
from .models.bert_generation import (
BertGenerationDecoder,
BertGenerationEncoder,
BertGenerationPreTrainedModel,
load_tf_weights_in_bert_generation,
)
from .models.big_bird import (
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdForCausalLM,
BigBirdForMaskedLM,
BigBirdForMultipleChoice,
BigBirdForPreTraining,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
BigBirdLayer,
BigBirdModel,
BigBirdPreTrainedModel,
load_tf_weights_in_big_bird,
)
from .models.bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
from .models.blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
from .models.blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
from .models.camembert import (
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
CamembertForCausalLM,
CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForQuestionAnswering,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
)
from .models.canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
from .models.clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPVisionModel,
)
from .models.convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
from .models.convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
from .models.ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
from .models.data2vec import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
Data2VecAudioForAudioFrameClassification,
Data2VecAudioForCTC,
Data2VecAudioForSequenceClassification,
Data2VecAudioForXVector,
Data2VecAudioModel,
Data2VecAudioPreTrainedModel,
Data2VecTextForCausalLM,
Data2VecTextForMaskedLM,
Data2VecTextForMultipleChoice,
Data2VecTextForQuestionAnswering,
Data2VecTextForSequenceClassification,
Data2VecTextForTokenClassification,
Data2VecTextModel,
Data2VecTextPreTrainedModel,
)
from .models.deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
from .models.deberta_v2 import (
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaV2ForMaskedLM,
DebertaV2ForQuestionAnswering,
DebertaV2ForSequenceClassification,
DebertaV2ForTokenClassification,
DebertaV2Model,
DebertaV2PreTrainedModel,
)
from .models.decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
DecisionTransformerGPT2Model,
DecisionTransformerGPT2PreTrainedModel,
DecisionTransformerModel,
DecisionTransformerPreTrainedModel,
)
from .models.deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
from .models.distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
from .models.dpr import (
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPRContextEncoder,
DPRPretrainedContextEncoder,
DPRPreTrainedModel,
DPRPretrainedQuestionEncoder,
DPRPretrainedReader,
DPRQuestionEncoder,
DPRReader,
)
from .models.dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
from .models.electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
from .models.encoder_decoder import EncoderDecoderModel
from .models.flaubert import (
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from .models.fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
from .models.fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel
from .models.funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
from .models.glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNModel,
GLPNPreTrainedModel,
)
from .models.gpt2 import (
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2DoubleHeadsModel,
GPT2ForSequenceClassification,
GPT2ForTokenClassification,
GPT2LMHeadModel,
GPT2Model,
GPT2PreTrainedModel,
load_tf_weights_in_gpt2,
)
from .models.gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForSequenceClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
from .models.gptj import (
GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTJForCausalLM,
GPTJForQuestionAnswering,
GPTJForSequenceClassification,
GPTJModel,
GPTJPreTrainedModel,
)
from .models.hubert import (
HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
HubertForCTC,
HubertForSequenceClassification,
HubertModel,
HubertPreTrainedModel,
)
from .models.ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
from .models.imagegpt import (
IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
ImageGPTForCausalImageModeling,
ImageGPTForImageClassification,
ImageGPTModel,
ImageGPTPreTrainedModel,
load_tf_weights_in_imagegpt,
)
from .models.layoutlm import (
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMForMaskedLM,
LayoutLMForSequenceClassification,
LayoutLMForTokenClassification,
LayoutLMModel,
LayoutLMPreTrainedModel,
)
from .models.layoutlmv2 import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMv2ForQuestionAnswering,
LayoutLMv2ForSequenceClassification,
LayoutLMv2ForTokenClassification,
LayoutLMv2Model,
LayoutLMv2PreTrainedModel,
)
from .models.led import (
LED_PRETRAINED_MODEL_ARCHIVE_LIST,
LEDForConditionalGeneration,
LEDForQuestionAnswering,
LEDForSequenceClassification,
LEDModel,
LEDPreTrainedModel,
)
from .models.longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
from .models.luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeModel,
LukePreTrainedModel,
)
from .models.lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
from .models.m2m_100 import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
M2M100ForConditionalGeneration,
M2M100Model,
M2M100PreTrainedModel,
)
from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel
from .models.maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .models.mbart import (
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
from .models.megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
from .models.mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
from .models.mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
from .models.mpnet import (
MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetLayer,
MPNetModel,
MPNetPreTrainedModel,
)
from .models.mt5 import MT5EncoderModel, MT5ForConditionalGeneration, MT5Model
from .models.nystromformer import (
NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerLayer,
NystromformerModel,
NystromformerPreTrainedModel,
)
from .models.openai import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
OpenAIGPTPreTrainedModel,
load_tf_weights_in_openai_gpt,
)
from .models.pegasus import (
PegasusForCausalLM,
PegasusForConditionalGeneration,
PegasusModel,
PegasusPreTrainedModel,
)
from .models.perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
from .models.plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
from .models.poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
from .models.prophetnet import (
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ProphetNetDecoder,
ProphetNetEncoder,
ProphetNetForCausalLM,
ProphetNetForConditionalGeneration,
ProphetNetModel,
ProphetNetPreTrainedModel,
)
from .models.qdqbert import (
QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
QDQBertForMaskedLM,
QDQBertForMultipleChoice,
QDQBertForNextSentencePrediction,
QDQBertForQuestionAnswering,
QDQBertForSequenceClassification,
QDQBertForTokenClassification,
QDQBertLayer,
QDQBertLMHeadModel,
QDQBertModel,
QDQBertPreTrainedModel,
load_tf_weights_in_qdqbert,
)
from .models.rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
from .models.realm import (
REALM_PRETRAINED_MODEL_ARCHIVE_LIST,
RealmEmbedder,
RealmForOpenQA,
RealmKnowledgeAugEncoder,
RealmPreTrainedModel,
RealmReader,
RealmRetriever,
RealmScorer,
load_tf_weights_in_realm,
)
from .models.reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
from .models.rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
from .models.resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
from .models.retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel
from .models.roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
from .models.roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
from .models.segformer import (
SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SegformerDecodeHead,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerLayer,
SegformerModel,
SegformerPreTrainedModel,
)
from .models.sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
from .models.sew_d import (
SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWDForCTC,
SEWDForSequenceClassification,
SEWDModel,
SEWDPreTrainedModel,
)
from .models.speech_encoder_decoder import SpeechEncoderDecoderModel
from .models.speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
Speech2TextForConditionalGeneration,
Speech2TextModel,
Speech2TextPreTrainedModel,
)
from .models.speech_to_text_2 import Speech2Text2ForCausalLM, Speech2Text2PreTrainedModel
from .models.splinter import (
SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST,
SplinterForQuestionAnswering,
SplinterLayer,
SplinterModel,
SplinterPreTrainedModel,
)
from .models.squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
from .models.swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
from .models.t5 import (
T5_PRETRAINED_MODEL_ARCHIVE_LIST,
T5EncoderModel,
T5ForConditionalGeneration,
T5Model,
T5PreTrainedModel,
load_tf_weights_in_t5,
)
from .models.transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
from .models.trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
from .models.unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
from .models.unispeech_sat import (
UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForCTC,
UniSpeechSatForPreTraining,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
UniSpeechSatModel,
UniSpeechSatPreTrainedModel,
)
from .models.van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
from .models.vilt import (
VILT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltLayer,
ViltModel,
ViltPreTrainedModel,
)
from .models.vision_encoder_decoder import VisionEncoderDecoderModel
from .models.vision_text_dual_encoder import VisionTextDualEncoderModel
from .models.visual_bert import (
VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForRegionToPhraseAlignment,
VisualBertForVisualReasoning,
VisualBertLayer,
VisualBertModel,
VisualBertPreTrainedModel,
)
from .models.vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
from .models.vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
from .models.wav2vec2 import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
Wav2Vec2ForAudioFrameClassification,
Wav2Vec2ForCTC,
Wav2Vec2ForMaskedLM,
Wav2Vec2ForPreTraining,
Wav2Vec2ForSequenceClassification,
Wav2Vec2ForXVector,
Wav2Vec2Model,
Wav2Vec2PreTrainedModel,
)
from .models.wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
from .models.xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
from .models.xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
from .models.xlm_prophetnet import (
XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMProphetNetDecoder,
XLMProphetNetEncoder,
XLMProphetNetForCausalLM,
XLMProphetNetForConditionalGeneration,
XLMProphetNetModel,
)
from .models.xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
)
from .models.xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
from .models.xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
from .models.yoso import (
YOSO_PRETRAINED_MODEL_ARCHIVE_LIST,
YosoForMaskedLM,
YosoForMultipleChoice,
YosoForQuestionAnswering,
YosoForSequenceClassification,
YosoForTokenClassification,
YosoLayer,
YosoModel,
YosoPreTrainedModel,
)
# Optimization
from .optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
# Trainer
from .trainer import Trainer
from .trainer_pt_utils import torch_distributed_zero_first
from .trainer_seq2seq import Seq2SeqTrainer
else:
from .utils.dummy_pt_objects import *
# TensorFlow
if is_tf_available():
from .benchmark.benchmark_args_tf import TensorFlowBenchmarkArguments
# Benchmarks
from .benchmark.benchmark_tf import TensorFlowBenchmark
from .generation_tf_logits_process import (
TFLogitsProcessor,
TFLogitsProcessorList,
TFLogitsWarper,
TFMinLengthLogitsProcessor,
TFNoBadWordsLogitsProcessor,
TFNoRepeatNGramLogitsProcessor,
TFRepetitionPenaltyLogitsProcessor,
TFTemperatureLogitsWarper,
TFTopKLogitsWarper,
TFTopPLogitsWarper,
)
from .generation_tf_utils import tf_top_k_top_p_filtering
from .keras_callbacks import KerasMetricCallback, PushToHubCallback
from .modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMMainLayer,
TFLayoutLMModel,
TFLayoutLMPreTrainedModel,
)
from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list
from .models.albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
from .models.auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
TF_MODEL_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForImageClassification,
TFAutoModelForMaskedLM,
TFAutoModelForMultipleChoice,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForSpeechSeq2Seq,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelForVision2Seq,
TFAutoModelWithLMHead,
)
from .models.bart import TFBartForConditionalGeneration, TFBartModel, TFBartPretrainedModel
from .models.bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
from .models.blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
from .models.blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
from .models.camembert import (
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCamembertForCausalLM,
TFCamembertForMaskedLM,
TFCamembertForMultipleChoice,
TFCamembertForQuestionAnswering,
TFCamembertForSequenceClassification,
TFCamembertForTokenClassification,
TFCamembertModel,
)
from .models.clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
from .models.convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
from .models.convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
from .models.ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
from .models.deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
from .models.deberta_v2 import (
TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaV2ForMaskedLM,
TFDebertaV2ForQuestionAnswering,
TFDebertaV2ForSequenceClassification,
TFDebertaV2ForTokenClassification,
TFDebertaV2Model,
TFDebertaV2PreTrainedModel,
)
from .models.distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
from .models.dpr import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDPRContextEncoder,
TFDPRPretrainedContextEncoder,
TFDPRPretrainedQuestionEncoder,
TFDPRPretrainedReader,
TFDPRQuestionEncoder,
TFDPRReader,
)
from .models.electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
from .models.encoder_decoder import TFEncoderDecoderModel
from .models.flaubert import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertPreTrainedModel,
TFFlaubertWithLMHeadModel,
)
from .models.funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
from .models.gpt2 import (
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGPT2DoubleHeadsModel,
TFGPT2ForSequenceClassification,
TFGPT2LMHeadModel,
TFGPT2MainLayer,
TFGPT2Model,
TFGPT2PreTrainedModel,
)
from .models.gptj import (
TFGPTJForCausalLM,
TFGPTJForQuestionAnswering,
TFGPTJForSequenceClassification,
TFGPTJModel,
TFGPTJPreTrainedModel,
)
from .models.hubert import (
TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFHubertForCTC,
TFHubertModel,
TFHubertPreTrainedModel,
)
from .models.led import TFLEDForConditionalGeneration, TFLEDModel, TFLEDPreTrainedModel
from .models.longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
from .models.lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
from .models.marian import TFMarianModel, TFMarianMTModel, TFMarianPreTrainedModel
from .models.mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
from .models.mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
from .models.mpnet import (
TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMPNetForMaskedLM,
TFMPNetForMultipleChoice,
TFMPNetForQuestionAnswering,
TFMPNetForSequenceClassification,
TFMPNetForTokenClassification,
TFMPNetMainLayer,
TFMPNetModel,
TFMPNetPreTrainedModel,
)
from .models.mt5 import TFMT5EncoderModel, TFMT5ForConditionalGeneration, TFMT5Model
from .models.openai import (
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFOpenAIGPTDoubleHeadsModel,
TFOpenAIGPTForSequenceClassification,
TFOpenAIGPTLMHeadModel,
TFOpenAIGPTMainLayer,
TFOpenAIGPTModel,
TFOpenAIGPTPreTrainedModel,
)
from .models.pegasus import TFPegasusForConditionalGeneration, TFPegasusModel, TFPegasusPreTrainedModel
from .models.rag import TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration
from .models.rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
from .models.roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
from .models.roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
from .models.speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeech2TextForConditionalGeneration,
TFSpeech2TextModel,
TFSpeech2TextPreTrainedModel,
)
from .models.t5 import (
TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST,
TFT5EncoderModel,
TFT5ForConditionalGeneration,
TFT5Model,
TFT5PreTrainedModel,
)
from .models.tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
from .models.transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
from .models.vision_encoder_decoder import TFVisionEncoderDecoderModel
from .models.vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
from .models.vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
from .models.wav2vec2 import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWav2Vec2ForCTC,
TFWav2Vec2Model,
TFWav2Vec2PreTrainedModel,
)
from .models.xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
from .models.xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
)
from .models.xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
# Optimization
from .optimization_tf import AdamWeightDecay, GradientAccumulator, WarmUp, create_optimizer
# Trainer
from .trainer_tf import TFTrainer
else:
# Import the same objects as dummies to get them in the namespace.
# They will raise an import error if the user tries to instantiate / use them.
from .utils.dummy_tf_objects import *
if is_flax_available():
from .generation_flax_logits_process import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessor,
FlaxLogitsProcessorList,
FlaxLogitsWarper,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
from .modeling_flax_utils import FlaxPreTrainedModel
from .models.albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
from .models.auto import (
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
FLAX_MODEL_FOR_PRETRAINING_MAPPING,
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
FLAX_MODEL_MAPPING,
FlaxAutoModel,
FlaxAutoModelForCausalLM,
FlaxAutoModelForImageClassification,
FlaxAutoModelForMaskedLM,
FlaxAutoModelForMultipleChoice,
FlaxAutoModelForNextSentencePrediction,
FlaxAutoModelForPreTraining,
FlaxAutoModelForQuestionAnswering,
FlaxAutoModelForSeq2SeqLM,
FlaxAutoModelForSequenceClassification,
FlaxAutoModelForTokenClassification,
FlaxAutoModelForVision2Seq,
)
from .models.bart import (
FlaxBartDecoderPreTrainedModel,
FlaxBartForCausalLM,
FlaxBartForConditionalGeneration,
FlaxBartForQuestionAnswering,
FlaxBartForSequenceClassification,
FlaxBartModel,
FlaxBartPreTrainedModel,
)
from .models.beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
from .models.bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
from .models.big_bird import (
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
FlaxBigBirdPreTrainedModel,
)
from .models.blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
from .models.blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
from .models.clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
from .models.distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
from .models.electra import (
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
from .models.encoder_decoder import FlaxEncoderDecoderModel
from .models.gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model, FlaxGPT2PreTrainedModel
from .models.gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
from .models.gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel
from .models.marian import FlaxMarianModel, FlaxMarianMTModel, FlaxMarianPreTrainedModel
from .models.mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
from .models.mt5 import FlaxMT5ForConditionalGeneration, FlaxMT5Model
from .models.pegasus import FlaxPegasusForConditionalGeneration, FlaxPegasusModel, FlaxPegasusPreTrainedModel
from .models.roberta import (
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
from .models.roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
from .models.speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
from .models.t5 import FlaxT5ForConditionalGeneration, FlaxT5Model, FlaxT5PreTrainedModel
from .models.vision_encoder_decoder import FlaxVisionEncoderDecoderModel
from .models.vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
from .models.vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
from .models.wav2vec2 import (
FlaxWav2Vec2ForCTC,
FlaxWav2Vec2ForPreTraining,
FlaxWav2Vec2Model,
FlaxWav2Vec2PreTrainedModel,
)
from .models.xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
from .models.xlm_roberta import (
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
)
else:
# Import the same objects as dummies to get them in the namespace.
# They will raise an import error if the user tries to instantiate / use them.
from .utils.dummy_flax_objects import *
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
extra_objects={"__version__": __version__},
)
if not is_tf_available() and not is_torch_available() and not is_flax_available():
logger.warning(
"None of PyTorch, TensorFlow >= 2.0, or Flax have been found. "
"Models won't be available and only tokenizers, configuration "
"and file/data utilities can be used."
)
| 38.834011
| 119
| 0.658921
|
73f1af0e66018590cf308cee1b23b3a0ad4f86b3
| 494
|
py
|
Python
|
tests/unit/test_nba_teams.py
|
JosephDErwin/sportsreference
|
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
|
[
"MIT"
] | 1
|
2020-03-08T20:17:39.000Z
|
2020-03-08T20:17:39.000Z
|
tests/unit/test_nba_teams.py
|
JosephDErwin/sportsreference
|
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
|
[
"MIT"
] | null | null | null |
tests/unit/test_nba_teams.py
|
JosephDErwin/sportsreference
|
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
|
[
"MIT"
] | 1
|
2020-07-08T16:05:25.000Z
|
2020-07-08T16:05:25.000Z
|
from flexmock import flexmock
from sportsreference.nba.schedule import Schedule
from sportsreference.nba.teams import Team
class TestNBATeams:
def test_nba_schedule_returns_schedule(self, *args, **kwargs):
flexmock(Team) \
.should_receive('_parse_team_data') \
.and_return(None)
flexmock(Schedule) \
.should_receive('_pull_schedule') \
.and_return(None)
team = Team(None, 1)
assert len(team.schedule) == 0
| 27.444444
| 66
| 0.65587
|
5b0d01193f1da18faf4fdf6c077b1af43ff15dba
| 902
|
py
|
Python
|
glance/notifier/notify_noop.py
|
citrix-openstack-build/glance
|
5048ceea989d93c8819d2dc6377803fc74620d14
|
[
"Apache-2.0"
] | 1
|
2018-05-03T03:52:39.000Z
|
2018-05-03T03:52:39.000Z
|
glance/notifier/notify_noop.py
|
citrix-openstack-build/glance
|
5048ceea989d93c8819d2dc6377803fc74620d14
|
[
"Apache-2.0"
] | null | null | null |
glance/notifier/notify_noop.py
|
citrix-openstack-build/glance
|
5048ceea989d93c8819d2dc6377803fc74620d14
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011, OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance.notifier import strategy
class NoopStrategy(strategy.Strategy):
"""A notifier that does nothing when called."""
def warn(self, msg):
pass
def info(self, msg):
pass
def error(self, msg):
pass
| 28.1875
| 78
| 0.697339
|
a5db863db4017738bee7ad885c75edeba4e779ea
| 2,783
|
py
|
Python
|
spidermon/contrib/stats/counters.py
|
zanachka/spidermon
|
d2840b6bbb6ba6d8a0ef633deac66588d243e615
|
[
"BSD-3-Clause"
] | 405
|
2019-01-10T13:06:09.000Z
|
2022-03-30T20:14:58.000Z
|
spidermon/contrib/stats/counters.py
|
zanachka/spidermon
|
d2840b6bbb6ba6d8a0ef633deac66588d243e615
|
[
"BSD-3-Clause"
] | 226
|
2019-01-04T13:31:17.000Z
|
2022-03-28T21:06:10.000Z
|
spidermon/contrib/stats/counters.py
|
zanachka/spidermon
|
d2840b6bbb6ba6d8a0ef633deac66588d243e615
|
[
"BSD-3-Clause"
] | 87
|
2019-01-07T10:23:26.000Z
|
2022-02-22T04:38:04.000Z
|
import collections
class PercentCounterBase:
def __init__(self, total=0):
self._total = total
@property
def count(self):
raise NotImplementedError
@property
def percent(self):
if self._total <= 0 or self.count <= 0:
return 0
else:
return float(self.count) / float(self._total)
def __str__(self):
return "(count=%d, percent=%.2f)" % (self.count, self.percent)
def __repr__(self):
return self.__str__()
class PercentCounter(PercentCounterBase):
def __init__(self, count=0, total=0):
super().__init__(total)
self._count = count
@property
def count(self):
return self._count
def inc_value(self, value):
self._count += value
class DictPercentCounter(PercentCounterBase, collections.abc.MutableMapping):
__items_class__ = PercentCounter
def __init__(self, total):
super().__init__(total)
self._dict = dict()
@property
def count(self):
return sum([e.count for e in self._dict.values()])
def add_value(self, key, value):
if key not in self._dict:
self._create_item(key)
self[key].inc_value(value)
def _create_item(self, key):
self._dict[key] = self.__items_class__(total=self._total)
def __getitem__(self, key):
if key not in self._dict:
return self.__items_class__(total=self._total)
else:
return self._dict[self.__keytransform__(key)]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __keytransform__(self, key):
return key
def _immutable(self, *args, **kws):
raise TypeError
def __str__(self):
return "(count=%d, percent=%.2f, %s)" % (
self.count,
self.percent,
str(self._dict),
)
__setitem__ = _immutable
__delitem__ = _immutable
class AttributeDictPercentCounter(PercentCounterBase):
__attribute_dict_name__ = "dict"
def __init__(self, total):
super().__init__(total)
setattr(self, self.__attribute_dict_name__, DictPercentCounter(total))
@property
def attribute_dict(self):
return getattr(self, self.__attribute_dict_name__)
@property
def count(self):
return sum([e.count for e in self.attribute_dict.values()])
def add_value(self, key, value):
self.attribute_dict.add_value(key, value)
def __str__(self):
return "(count=%d, percent=%.2f, %s=%s)" % (
self.count,
self.percent,
self.__attribute_dict_name__,
str(self.attribute_dict),
)
def __repr__(self):
return self.__str__()
| 24.2
| 78
| 0.613008
|
af045923a7a4b90b07e016c00428e2fbfbba0468
| 339
|
py
|
Python
|
fireup/__init__.py
|
kashif/firedup
|
9e6605dbb6f564c8e1e35121681581e103c476b5
|
[
"MIT"
] | 121
|
2019-01-18T19:32:04.000Z
|
2022-02-28T13:07:40.000Z
|
fireup/__init__.py
|
arnaudvl/firedup
|
9629fffc8af7d9607025d82ad757a67baa805285
|
[
"MIT"
] | 11
|
2019-02-19T12:35:11.000Z
|
2020-08-03T04:08:05.000Z
|
fireup/__init__.py
|
arnaudvl/firedup
|
9629fffc8af7d9607025d82ad757a67baa805285
|
[
"MIT"
] | 27
|
2019-01-19T14:16:05.000Z
|
2021-12-30T22:19:09.000Z
|
# Algorithms
from fireup.algos.ddpg.ddpg import ddpg
from fireup.algos.ppo.ppo import ppo
from fireup.algos.sac.sac import sac
from fireup.algos.td3.td3 import td3
from fireup.algos.trpo.trpo import trpo
from fireup.algos.vpg.vpg import vpg
from fireup.algos.dqn.dqn import dqn
# Loggers
from fireup.utils.logx import Logger, EpochLogger
| 28.25
| 49
| 0.811209
|
9176abae1de424faba48ca68ef1e645c159c3607
| 8,760
|
py
|
Python
|
arjuna/core/error.py
|
bhargavkumar-65/arjuna
|
400dfd598096199e89d64eb6e8d2932892d37f6d
|
[
"Apache-2.0"
] | null | null | null |
arjuna/core/error.py
|
bhargavkumar-65/arjuna
|
400dfd598096199e89d64eb6e8d2932892d37f6d
|
[
"Apache-2.0"
] | null | null | null |
arjuna/core/error.py
|
bhargavkumar-65/arjuna
|
400dfd598096199e89d64eb6e8d2932892d37f6d
|
[
"Apache-2.0"
] | null | null | null |
# This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################
# Raised and consumed by internal implementation of Arjuna
##########################################################
from arjuna.interact.gui.auto.finder.wmd import GuiWidgetMetaData
class ArjunaException(Exception):
def __init__(self, message, scrreenshots_path=None, child=None):
# child is child exception
super().__init__(message)
self.screenshot_path = scrreenshots_path
self.child_Exceptions = []
if child:
try:
raise child
except ArjunaException as e:
self.__extract_child_Exceptions(e);
# } catch (java.lang.reflect.Invocation_target_exception f) {
# if (ArjunaException.class.is_assignable_from(f.get_target_exception().get_class())) {
# ArjunaException z = (ArjunaException) f.get_target_exception();
# extract_child_Exceptions(z);
# } else {
# insert_child_Exception(f);
# }
except Exception as f:
self.__insert_child_Exception(f);
def contains_screenshot(self):
return self.screenshot_path is not None
def get_screenshot_path(self):
return self.screenshot_path
def set_screenshot_path(self, path):
self.screenshot_path = path
def __insert_child_Exception(self, e):
self.child_Exceptions.append(e)
def __extract_child_Exceptions(self, e):
child_Exceptions = e.get_child_Exceptions()
self.child_Exceptions.extend(child_Exceptions)
def get_child_Exceptions(self):
return self.child_Exceptions
def message_formatter(text, component=None, object_name=None, method_name=None, code=None):
m = ""
m = m + component and "{}::".format(component) or ""
m = m + object_name and "{}::".format(object_name) or ""
m = m + method_name and "{}::".format(method_name) or ""
m = m + code and "{}::".format(code) or ""
m = m + text
return m
class Problem(ArjunaException):
def __init__(self, text, screenshot_path, exc, component=None, object_name=None, method_name=None, code=None):
super().__init__(message_formatter(text, component, object_name, method_name, code), exc, screenshot_path);
self.problem_component = component;
self.problem_object = object_name;
self.problem_method = method_name;
self.problem_code = code;
def get_problem_component(self):
return self.problem_component
def get_problem_object(self):
return self.problem_object
def get_problem_method(self):
return self.problem_method
def get_problem_code(self):
return self.problem_code
def get_problem_text(self):
return self.get_message()
class DirReaderFinishedException(Exception):
def __init__(self):
super().__init__("No more files.")
class UnsupportedRepresentationException(Exception):
def __init__(self, klass_user_friendly_name, method, str_source_value, target_value_type):
super().__init__(
"Value.{}(): Can not represent {} types containing >>{}<< as {}.".format(method, str_source_value,
klass_user_friendly_name,
target_value_type))
class IncompatibleInputForValueException(Exception):
def __init__(self, value, actual, value_type):
super().__init__(
"Incompatible input types >>{}<< (type: {}) supplied for creating {}.".format(value, actual, value_type))
class StringKeyValueContainerLookupException(Exception):
def __init__(self,key):
super().__init__("Invalid Key [{}] used for string key types container lookup.".format(key))
class WaitableError(Exception):
def __init__(self, message):
super().__init__(message)
def format_msg(msg):
return msg and "Error message: {}".format(msg) or ""
class _WidgetNotFoundError(WaitableError):
def __init__(self, elem_name, *locators, container=None, relations=None, filters=None, message=None):
container = container and " in {}".format(container) or ""
relations = relations and " with relations {}".format(relations) or ""
filters = filters and " and filters {}".format(filters) or ""
message = format_msg(message)
super().__init__("{} not found using any of the locators: {}{}{}{}.{}".format(elem_name, GuiWidgetMetaData.locators_as_str(locators), relations, filters, container, message))
class _WidgetPresentError(WaitableError):
def __init__(self, elem_type, *locators, message=None):
message = message = format_msg(message)
super().__init__("{} expected to be absent but still present for one of the locators: {}.{}".format(elem_type, GuiWidgetMetaData.locators_as_str(locators), message))
class GuiWidgetNotFoundError(_WidgetNotFoundError):
def __init__(self, *locators, container=None, relations=None, filters=None, message=None):
super().__init__("GuiWidget", *locators, container=container, relations=relations, filters=filters, message=message)
class _GuiWidgetPresentError(_WidgetPresentError):
def __init__(self, *locators, message=None):
super().__init__("GuiWidget", *locators, message=message)
class GuiWidgetNotReadyError(WaitableError):
def __init__(self, message):
super().__init__("GuiWidget is NOT ready for interaction. Tool message: {}".format(message))
class GuiWidgetTextNotSetError(WaitableError):
def __init__(self, message):
super().__init__(". Tool message: {}".format(message))
class ChildWindowNotFoundError(_WidgetNotFoundError):
def __init__(self, *locators):
super().__init__("Child window", *locators)
class ChildFrameNotFoundError(_WidgetNotFoundError):
def __init__(self, *locators):
super().__init__("Frame", *locators)
class ArjunaTimeoutError(WaitableError):
def __init__(self, context, message):
super().__init__(". Timeout in {}. Error Message: {}".format(context, message))
class DataSourceFinished(StopIteration):
def __init__(self, msg=None):
super().__init__(msg is None and "Done" or msg)
class YamlError(Exception):
'''
Raised when there is an eror in Yaml structure or there is an error in its expected format in the context where it is used in Arjuna.
'''
def __init__(self, msg):
super().__init__(msg)
class YamlUndefinedSectionError(Exception):
'''
Raised when the YamlList does not have the provided section key.
'''
def __init__(self, msg):
super().__init__(msg)
class YamlListIndexError(Exception):
'''
Raised when the YamlList does not have the provided index.
'''
def __init__(self, msg):
super().__init__(msg)
class TestGroupsFinished(Exception):
pass
class ExclusionRuleMet(Exception):
def __init__(self, rule):
super().__init__("An exclusion rule was met for the object.")
self.__rule = rule
@property
def rule(self):
return self.__rule
class NoInclusionRuleMet(Exception):
def __init__(self):
super().__init__("None of the include rules were met.")
class InvalidSelectionRule(Exception):
pass
class RulePatternDoesNotMatchError(Exception):
def __init__(self, rule_str, pattern_class, expected_format):
super().__init__(f"{rule_str} is not a {pattern_class.__name__}. Expected format: {expected_format}")
class EmailBoxNotConnected(WaitableError):
def __init__(self, server, mailbox, msg):
super().__init__(f"Mailbox >{mailbox}< not selected for email IMAP server >{server.host}<. {msg}")
class EmailNotReceived(WaitableError):
def __init__(self, server, mailbox, msg, **kwargs):
super().__init__(f"Email not received in mailbox >{mailbox}< at email IMAP server >{server.host}< as per conditions: >>{kwargs}<<. {msg}")
| 35.465587
| 182
| 0.665525
|
64f76736343282fe38084973c26034acaadb4017
| 1,125
|
py
|
Python
|
examples/ex1_ODE.py
|
Foxp1/chebyspectral
|
4456e534304f2dafd74a252d74a9fd5f185fd1cf
|
[
"MIT"
] | null | null | null |
examples/ex1_ODE.py
|
Foxp1/chebyspectral
|
4456e534304f2dafd74a252d74a9fd5f185fd1cf
|
[
"MIT"
] | null | null | null |
examples/ex1_ODE.py
|
Foxp1/chebyspectral
|
4456e534304f2dafd74a252d74a9fd5f185fd1cf
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pyplot as plt
from chebyspectral import *
deg = 16 # degree of Chebyshev polynomial
# Source term
C = -4*np.exp(1)/(1+np.exp(2))
f = lambda xq: np.exp(xq) + C
fHat = chebfit(f, deg)
# Boundary conditions
bc_value_1 = 0
bc_derivative_order_1 = 0 # Dirichlet (0th order derivative)
bc_position_1 = -1
bc_axis_1 = 0
bc_1 = [bc_value_1, bc_derivative_order_1, bc_position_1, bc_axis_1]
bc_value_2 = 0
bc_derivative_order_2 = 0 # Dirichlet
bc_position_2 = 1
bc_axis_2 = 0
bc_2 = [bc_value_2, bc_derivative_order_2, bc_position_2, bc_axis_2]
# Differentiation matrix
l_operator = [4, -4, 1]
L = chebdiff(l_operator, deg)
L, fHat = chebbc(L, fHat, [bc_1, bc_2])
# Compute solution
u = np.dot(np.linalg.pinv(L), fHat)
# Plot solution
N = 100
x = np.linspace(-1, 1, N)
fig, ax = plt.subplots()
ax.plot(x, chebeval(u, x), 'b', label='Approximation')
ax.plot(x, np.exp(x)-np.sinh(1)/np.sinh(2)*np.exp(2*x) + C/4, 'r--', label='Exact')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
| 24.456522
| 83
| 0.705778
|
ba3e60697087f34c303a11ac200f6ef3d6d8d6db
| 2,523
|
py
|
Python
|
models/utils.py
|
kilsenp/person-multi-task-dataset
|
2f186cafa3db2c77d8c6c4309b2cadc13d4f92ab
|
[
"MIT"
] | 4
|
2020-10-08T03:31:36.000Z
|
2021-03-06T08:06:23.000Z
|
models/utils.py
|
kilianyp/person-multi-task-dataset
|
2f186cafa3db2c77d8c6c4309b2cadc13d4f92ab
|
[
"MIT"
] | 7
|
2021-06-08T20:55:10.000Z
|
2022-02-10T00:38:32.000Z
|
models/utils.py
|
kilsenp/person-multi-task-dataset
|
2f186cafa3db2c77d8c6c4309b2cadc13d4f92ab
|
[
"MIT"
] | null | null | null |
import torch
from builders import model_builder
import torch.nn as nn
class InferenceModel(object):
def __init__(self, model_path, augmentation, cuda=True):
raise NotImplementedError
self.cuda = cuda
args = load_args(model_path)
augment_fn, num_augmentations = augmentation_fn_builder(augmentation)
self.transform = restore_transform(args, augment_fn)
model, endpoints = restore_model(args, model_path)
if self.cuda:
model = model.cuda()
self.model = model
self.model.eval()
self.endpoints = endpoints
self.num_augmentations = num_augmentations
def __call__(self, x):
with torch.no_grad():
self.endpoints = self.model(x, self.endpoints)
return self.endpoints
def on_pil_list(self, images):
"""Forward pass on an image.
Args:
data: A PIL image
"""
data = []
for image in images:
data.append(self.transform(image))
data = torch.cat(data)
if self.cuda:
data = data.cuda()
with torch.no_grad():
self.endpoints = self.model(data, self.endpoints)
#result = self.endpoints["emb"]
# mean over crops
# TODO this depends on the data augmentation
#self.endpoints["emb"] = result.mean(0)
# COPY otherwise a reference is passed that will be overwritten
# by the next forward pass
return self.endpoints.copy()
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
| 30.39759
| 77
| 0.615141
|
03b13e6b0f1876bac6f488a685395c59344ff576
| 14,682
|
py
|
Python
|
xslack/xslack.py
|
Lilleengen/xSlack
|
05fa204276816b707f970f893349d34ef1dd8f6c
|
[
"MIT"
] | 1
|
2016-11-26T12:57:46.000Z
|
2016-11-26T12:57:46.000Z
|
xslack/xslack.py
|
Lilleengen/xSlack
|
05fa204276816b707f970f893349d34ef1dd8f6c
|
[
"MIT"
] | null | null | null |
xslack/xslack.py
|
Lilleengen/xSlack
|
05fa204276816b707f970f893349d34ef1dd8f6c
|
[
"MIT"
] | null | null | null |
import json
import re
import threading
import time
import urllib
import requests
from slackclient import SlackClient
config = dict()
c = threading.Condition()
threads = dict()
shared_files = dict()
def run(token, other_tokens, channel_names):
print(other_tokens)
print(channel_names)
members = dict()
ts_dict = dict()
channels = dict()
clients = list()
clients_users = dict()
clients_channels = dict()
for other_token in other_tokens:
if other_token != token:
clients.append(SlackClient(other_token))
for client in clients:
clients_users[client.token] = dict()
ts_dict[client.token] = dict()
for user in client.api_call("users.list")["members"]:
clients_users[client.token][user["name"]] = user["id"]
for channel in client.api_call("channels.list")["channels"]:
clients_channels[channel["name"]] = channel["id"]
for group in client.api_call("groups.list")["groups"]:
clients_channels[group["name"]] = group["id"]
sc = SlackClient(token)
if sc.rtm_connect(): # connect to a Slack RTM websocket
team_name = sc.api_call("team.info")["team"]["name"]
for member in sc.api_call("users.list")["members"]:
members[member["id"]] = dict()
members[member["id"]]["name"] = member["name"] # member_info["name"]
members[member["id"]]["image"] = member["profile"]["image_32"]
for channel in sc.api_call("channels.list")["channels"]:
channels[channel["id"]] = channel["name"]
for group in sc.api_call("groups.list")["groups"]:
channels[group["id"]] = group["name"]
while True:
if token not in config:
break
if channel_names != config[token]:
channel_names = config[token]
if (len(channel_names) == 0):
break
print(channel_names)
if other_tokens != get_other_tokens(token):
other_tokens = get_other_tokens(token)
if (len(other_tokens) == 0):
break
clients = list()
for other_token in other_tokens:
if other_token != token:
clients.append(SlackClient(other_token))
for client in clients:
clients_users[client.token] = dict()
ts_dict[client.token] = dict()
for user in client.api_call("users.list")["members"]:
clients_users[client.token][user["name"]] = user["id"]
for channel in client.api_call("channels.list")["channels"]:
clients_channels[channel["name"]] = channel["id"]
for group in client.api_call("groups.list")["groups"]:
clients_channels[group["name"]] = group["id"]
for action in sc.rtm_read():
if "type" in action and (action["type"] == "team_join" or action["type"] == "user_change"):
members[action["user"]["id"]] = dict()
members[action["user"]["id"]]["name"] = action["user"]["name"]
members[action["user"]["id"]]["image"] = action["user"]["profile"]["image_32"]
elif "channel" in action and action["channel"] in channels and channels[
action["channel"]] in channel_names and action["type"] == "message" and "subtype" in action and \
action["subtype"] == "message_changed":
text = re.sub(r"<@U(?:\d|\w){8}>", lambda m: "-----@-----" + members[m.group(0)[2:-1]]["name"],
action["message"]["text"])
for client in clients:
if action["message"]["ts"] in ts_dict[client.token] and channels[action["channel"]] in config[
client.token]:
text = re.sub(r"(?:^| |\n)@(?:\d|[a-z]){1,23}(?:$| |\n)",
lambda m: re.sub(r"@(?:\d|[a-z]){1,23}",
lambda m2: m2.group(0) if m2.group(0)[1:] not in
clients_users[
client.token] else "<@" +
clients_users[
client.token][
m2.group(
0)[
1:]] + ">",
m.group(0)), text)
text = re.sub(r"-----@-----", "@", text)
print(client.api_call("chat.update", ts=ts_dict[client.token][action["message"]["ts"]],
channel=clients_channels[channels[action["channel"]]], text=text))
elif "channel" in action and action["channel"] in channels and channels[
action["channel"]] in channel_names and action["type"] == "message" and "subtype" in action and \
action["subtype"] == "group_leave":
response = client.api_call("chat.postMessage",
channel=clients_channels[channels[action["channel"]]],
text="@" + members[action["user"]][
"name"] + " was removed from the chat in " + team_name,
username="xSlack",
icon_emoji=":heavy_minus_sign:")
elif "channel" in action and action["channel"] in channels and channels[
action["channel"]] in channel_names and action["type"] == "message" and "subtype" in action and \
action["subtype"] == "group_join":
response = client.api_call("chat.postMessage",
channel=clients_channels[channels[action["channel"]]],
text="@" + members[action["user"]][
"name"] + " was added to the chat in " + team_name,
username="xSlack",
icon_emoji=":heavy_plus_sign:")
elif "ts" in action and "channel" in action and action["channel"] in channels and channels[
action["channel"]] in channel_names and "type" in action and action[
"type"] == "message" and "text" in action and "user" in action:
text = re.sub(r"<@U(?:\d|\w){8}>", lambda m: "-----@-----" + members[m.group(0)[2:-1]]["name"],
action["text"])
text = re.sub(r"<@U(?:\d|\w){8}\|(?:\d|[a-z]){1,23}>", lambda m: "-----@-----" + m.group(0)[12:-1],
text)
for client in clients:
if channels[action["channel"]] in config[client.token]:
text = re.sub(r"(?:^| |\n)@(?:\d|[a-z]){1,23}(?:$| |\n)",
lambda m: re.sub(r"@L(?:\d|[a-z]){1,23}",
lambda m2: m2.group(0) if m2.group(0)[1:] not in
clients_users[
client.token] else "<@" +
clients_users[
client.token][
m2.group(
0)[
1:]] + ">",
m.group(0)), text)
text = re.sub(r"-----@-----", "@", text)
if "subtype" in action and action["subtype"] == "file_share":
c.acquire()
file_info = {"ts": action["ts"], "name": action["file"]["name"],
"id": action["file"]["id"],
"child_ids": dict()}
file_is_old = False
for file in shared_files[client.token]:
if sc.token in file["child_ids"] and file["child_ids"][sc.token] == action["file"][
"id"] and float(file["ts"]) > time.time() - 2 * 60:
file_is_old = True
if not file_is_old:
req = urllib.request.Request(action["file"]["url_private_download"])
req.add_header('Authorization', 'Bearer ' + sc.token)
resp = urllib.request.urlopen(req)
files = dict()
files['file'] = resp.read()
get = dict()
get["filename"] = action["file"]["name"]
get["title"] = action["file"]["title"]
get["channels"] = clients_channels[channels[action["channel"]]]
get["filetype"] = action["file"]["filetype"]
get["token"] = client.token
get["username"] = members[action["user"]]["name"] + " @ " + team_name
get["icon_url"] = members[action["user"]]["image"]
if "initial_comment" in action["file"] and "comment" in action["file"][
"initial_comment"]:
get["initial_comment"] = "File uploaded by @" + members[action["user"]][
"name"] + " in " + team_name + "\n\n" + action["file"]["initial_comment"][
"comment"]
else:
get["initial_comment"] = "File uploaded by @" + members[action["user"]][
"name"] + " in " + team_name
upload_response = json.loads(
requests.post(
'https://slack.com/api/files.upload?' + urllib.parse.urlencode(get),
files=files).text)
file_info["child_ids"][client.token] = upload_response["file"]["id"]
shared_files[sc.token].append(file_info)
c.release()
else:
if "attachments" in action:
response = client.api_call("chat.postMessage",
channel=clients_channels[channels[action["channel"]]],
text=text,
username=members[action["user"]][
"name"] + " @ " + team_name,
icon_url=members[action["user"]]["image"],
attachments=action["attachments"])
else:
response = client.api_call("chat.postMessage",
channel=clients_channels[channels[action["channel"]]],
text=text,
username=members[action["user"]][
"name"] + " @ " + team_name,
icon_url=members[action["user"]]["image"])
if "ts" in response:
ts_dict[client.token][action["ts"]] = response["ts"]
else:
print(response)
time.sleep(0.1)
else:
print('Connection Failed, invalid token?')
def add_token_channel(token, channel):
if token not in config:
config[token] = list()
config[token].append(channel)
if token not in threads:
shared_files[token] = list()
threads[token] = threading.Thread(target=run, args=(token, get_other_tokens(token), config[token]))
threads[token].start()
def remove_token_channel(token, channel):
if token in config and channel in config[token]:
config[token].remove(channel)
if len(config[token]) == 0:
config.pop(token)
def get_other_tokens(token):
other_tokens = list()
for channel in config[token]:
for other_token, other_channels in config.items():
if channel in other_channels and other_token != token and other_token not in other_tokens:
other_tokens.append(other_token)
return other_tokens
| 61.689076
| 126
| 0.387141
|
2839143a4def11458504d877324b5ef866e48d3e
| 5,036
|
py
|
Python
|
batchup/datasets/cifar100.py
|
Britefury/batchup
|
eba03676d9707133a67184200c10d9c5b575c6a5
|
[
"MIT"
] | 89
|
2017-04-19T12:13:32.000Z
|
2021-08-31T12:42:16.000Z
|
batchup/datasets/cifar100.py
|
Britefury/batchup
|
eba03676d9707133a67184200c10d9c5b575c6a5
|
[
"MIT"
] | 4
|
2017-04-28T03:11:43.000Z
|
2017-11-29T15:27:28.000Z
|
batchup/datasets/cifar100.py
|
Britefury/batchup
|
eba03676d9707133a67184200c10d9c5b575c6a5
|
[
"MIT"
] | 10
|
2017-04-19T18:56:55.000Z
|
2021-04-13T04:35:55.000Z
|
import os
import sys
import shutil
import tarfile
import pickle
import numpy as np
import tables
from .. import config
from ..image.utils import ImageArrayUInt8ToFloat32
from . import dataset
# Pickle encoding parameters depend on Python version
_PICKLE_ENC = {} if sys.version_info[0] == 2 else {'encoding': 'latin1'}
_CIFAR100_BASE_URL = 'http://www.cs.toronto.edu/~kriz/'
_SHA256_CIFAR100_TARBALL = \
'85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7'
_H5_FILENAME = 'cifar100.h5'
_CIFAR100_SRC = dataset.DownloadSourceFile(
'cifar-100-python.tar.gz', base_url=_CIFAR100_BASE_URL,
sha256=_SHA256_CIFAR100_TARBALL)
@dataset.fetch_and_convert_dataset([_CIFAR100_SRC], _H5_FILENAME)
def fetch_cifar100(source_paths, target_path):
tarball_path = source_paths[0]
# Get the paths to the member files
download_dir = os.path.dirname(tarball_path)
train_path = os.path.join(download_dir,
'cifar-100-python', 'train')
test_path = os.path.join(download_dir,
'cifar-100-python', 'test')
meta_path = os.path.join(download_dir,
'cifar-100-python', 'meta')
# Unpack
print('Unpacking CIFAR-100 tarball {}'.format(tarball_path))
tarfile.open(name=tarball_path, mode='r:gz').extractall(
path=download_dir)
# Create HDF5 output file
f_out = tables.open_file(target_path, mode='w')
g_out = f_out.create_group(f_out.root, 'cifar100', 'CIFAR-100 data')
print('Converting CIFAR-100 training set to HDF5')
train_batch = pickle.load(open(train_path, 'rb'), **_PICKLE_ENC)
train_X_u8 = train_batch['data'].reshape((-1, 3, 32, 32))
train_y = np.array(train_batch['fine_labels'], dtype=np.int32)
train_y_coarse = np.array(train_batch['coarse_labels'], dtype=np.int32)
f_out.create_array(g_out, 'train_X_u8', train_X_u8)
f_out.create_array(g_out, 'train_y', train_y)
f_out.create_array(g_out, 'train_y_coarse', train_y_coarse)
print('Converting CIFAR-100 test set to HDF5')
tst_batch = pickle.load(open(test_path, 'rb'), **_PICKLE_ENC)
test_X_u8 = tst_batch['data'].reshape((-1, 3, 32, 32))
test_y = np.array(tst_batch['fine_labels'], dtype=np.int32)
test_y_coarse = np.array(tst_batch['coarse_labels'], dtype=np.int32)
f_out.create_array(g_out, 'test_X_u8', test_X_u8)
f_out.create_array(g_out, 'test_y', test_y)
f_out.create_array(g_out, 'test_y_coarse', test_y_coarse)
print('Converting CIFAR-100 metadata to HDF5')
meta = pickle.load(open(meta_path, 'rb'), **_PICKLE_ENC)
class_names = meta['fine_label_names']
class_names_coarse = meta['coarse_label_names']
f_out.create_array(g_out, 'class_names', class_names)
f_out.create_array(g_out, 'class_names_coarse', class_names_coarse)
f_out.close()
# Remove the contents unpacked from the tarball
shutil.rmtree(os.path.join(download_dir, 'cifar-100-python'))
return target_path
def delete_cache(): # pragma: no cover
dataset.delete_dataset_cache(_H5_FILENAME)
class CIFAR100 (object):
def __init__(self, n_val=10000, val_lower=0.0, val_upper=1.0):
h5_path = fetch_cifar100()
if h5_path is not None:
f = tables.open_file(h5_path, mode='r')
train_X_u8 = f.root.cifar100.train_X_u8
train_y = f.root.cifar100.train_y
train_y_coarse = f.root.cifar100.train_y_coarse
self.test_X_u8 = f.root.cifar100.test_X_u8
self.test_y = f.root.cifar100.test_y
self.test_y_coarse = f.root.cifar100.test_y_coarse
self.class_names = dataset.classnames_from_h5(
f.root.cifar100.class_names)
self.class_names_coarse = dataset.classnames_from_h5(
f.root.cifar100.class_names_coarse)
if n_val == 0 or n_val is None:
self.train_X_u8 = train_X_u8
self.train_y = train_y
self.train_y_coarse = train_y_coarse
self.val_X_u8 = np.zeros((0, 3, 32, 32), dtype=np.uint8)
self.val_y = np.zeros((0,), dtype=np.int32)
self.val_y_coarse = np.zeros((0,), dtype=np.int32)
else:
self.train_X_u8 = train_X_u8[:-n_val]
self.val_X_u8 = train_X_u8[-n_val:]
self.train_y, self.val_y = train_y[:-n_val], train_y[-n_val:]
self.train_y_coarse, self.val_y_coarse = \
(train_y_coarse[:-n_val], train_y_coarse[-n_val:])
else:
raise RuntimeError('Could not load CIFAR-10 dataset')
self.train_X = ImageArrayUInt8ToFloat32(self.train_X_u8, val_lower,
val_upper)
self.val_X = ImageArrayUInt8ToFloat32(self.val_X_u8, val_lower,
val_upper)
self.test_X = ImageArrayUInt8ToFloat32(self.test_X_u8, val_lower,
val_upper)
| 39.968254
| 77
| 0.654289
|
e232c3d0d6f4dd0d426df8504c6e9184882a2277
| 3,060
|
py
|
Python
|
pytorch_toolkit/face_antispoofing/demo_tools/ie_tools.py
|
abhatikar/training_extensions
|
1c96e0f5f39688f8b79735e8dfa90646afc3d5e6
|
[
"Apache-2.0"
] | 3
|
2020-12-29T02:47:32.000Z
|
2021-11-12T08:12:51.000Z
|
pytorch_toolkit/face_antispoofing/demo_tools/ie_tools.py
|
abhatikar/training_extensions
|
1c96e0f5f39688f8b79735e8dfa90646afc3d5e6
|
[
"Apache-2.0"
] | 9
|
2021-09-08T03:12:59.000Z
|
2022-03-12T00:57:19.000Z
|
pytorch_toolkit/face_antispoofing/demo_tools/ie_tools.py
|
abhatikar/training_extensions
|
1c96e0f5f39688f8b79735e8dfa90646afc3d5e6
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
import glog as log
import numpy as np
from openvino.inference_engine import IENetwork, IEPlugin
class IEModel:
"""Class for inference of models in the Inference Engine format"""
def __init__(self, exec_net, inputs_info, input_key, output_key):
self.net = exec_net
self.inputs_info = inputs_info
self.input_key = input_key
self.output_key = output_key
def forward(self, img):
"""Performs forward pass of the wrapped IE model"""
res = self.net.infer(inputs={self.input_key: np.expand_dims(img.transpose(2, 0, 1), axis=0)})
return np.copy(res[self.output_key])
def get_input_shape(self):
"""Returns an input shape of the wrapped IE model"""
return self.inputs_info[self.input_key]
def load_ie_model(model_xml, device, plugin_dir, cpu_extension=''):
"""Loads a model in the Inference Engine format"""
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
if cpu_extension and 'CPU' in device:
plugin.add_cpu_extension(cpu_extension)
# Read IR
log.info("Loading network files:\n\t%s\n\t%s", model_xml, model_bin)
net = IENetwork(model=model_xml, weights=model_bin)
if "CPU" in plugin.device:
supported_layers = plugin.get_supported_layers(net)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if not_supported_layers:
log.error("Following layers are not supported by the plugin for specified device %s:\n %s",
plugin.device, ', '.join(not_supported_layers))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
assert len(net.inputs.keys()) == 1, "Checker supports only single input topologies"
assert len(net.outputs) == 1, "Checker supports only single output topologies"
log.info("Preparing input blobs")
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
net.batch_size = 1
# Loading model to the plugin
log.info("Loading model to the plugin")
exec_net = plugin.load(network=net)
model = IEModel(exec_net, net.inputs, input_blob, out_blob)
del net
return model
| 41.351351
| 119
| 0.701961
|
8587e842d9dbc10835c15638ad7eb7457d62e91b
| 16,999
|
py
|
Python
|
tests/event_test.py
|
djotaku/matrix-nio
|
fa5d887c76e51034c81197523cd505d3b2df1b3e
|
[
"Apache-2.0"
] | 1
|
2021-01-05T19:45:40.000Z
|
2021-01-05T19:45:40.000Z
|
tests/event_test.py
|
djotaku/matrix-nio
|
fa5d887c76e51034c81197523cd505d3b2df1b3e
|
[
"Apache-2.0"
] | null | null | null |
tests/event_test.py
|
djotaku/matrix-nio
|
fa5d887c76e51034c81197523cd505d3b2df1b3e
|
[
"Apache-2.0"
] | 1
|
2020-06-24T01:41:44.000Z
|
2020-06-24T01:41:44.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import pdb
from nio.events import (
BadEvent,
OlmEvent,
PowerLevelsEvent,
RedactedEvent,
RedactionEvent,
RoomAliasEvent,
RoomCreateEvent,
RoomGuestAccessEvent,
RoomHistoryVisibilityEvent,
RoomJoinRulesEvent,
RoomMemberEvent,
RoomMessageEmote,
RoomMessageNotice,
RoomMessageText,
RoomNameEvent,
RoomTopicEvent,
RoomAvatarEvent,
ToDeviceEvent,
UnknownBadEvent,
Event,
RoomEncryptionEvent,
InviteEvent,
RoomKeyEvent,
ForwardedRoomKeyEvent,
MegolmEvent,
UnknownEncryptedEvent,
InviteMemberEvent,
InviteAliasEvent,
InviteNameEvent,
EphemeralEvent,
TypingNoticeEvent,
Receipt,
ReceiptEvent,
AccountDataEvent,
UnknownAccountDataEvent,
FullyReadEvent,
CallEvent,
CallAnswerEvent,
CallHangupEvent,
CallInviteEvent,
CallCandidatesEvent,
KeyVerificationStart,
KeyVerificationAccept,
KeyVerificationCancel,
RoomEncryptedImage,
KeyVerificationKey,
KeyVerificationMac,
TagEvent,
DummyEvent,
RoomKeyRequest,
RoomKeyRequestCancellation,
)
class TestClass:
@staticmethod
def _load_response(filename):
with open(filename) as f:
return json.loads(f.read())
def test_redacted_event(self):
parsed_dict = TestClass._load_response("tests/data/events/redacted.json")
response = RedactedEvent.from_dict(parsed_dict)
assert isinstance(response, RedactedEvent)
def test_malformed_event(self):
parsed_dict = TestClass._load_response(
"tests/data/events/redacted_invalid.json"
)
response = RedactedEvent.from_dict(parsed_dict)
assert isinstance(response, BadEvent)
def test_create_event(self):
parsed_dict = TestClass._load_response("tests/data/events/create.json")
event = RoomCreateEvent.from_dict(parsed_dict)
assert isinstance(event, RoomCreateEvent)
def test_guest_access_event(self):
parsed_dict = TestClass._load_response("tests/data/events/guest_access.json")
event = RoomGuestAccessEvent.from_dict(parsed_dict)
assert isinstance(event, RoomGuestAccessEvent)
def test_join_rules_event(self):
parsed_dict = TestClass._load_response("tests/data/events/join_rules.json")
event = RoomJoinRulesEvent.from_dict(parsed_dict)
assert isinstance(event, RoomJoinRulesEvent)
def test_history_visibility_event(self):
parsed_dict = TestClass._load_response(
"tests/data/events/history_visibility.json"
)
event = RoomHistoryVisibilityEvent.from_dict(parsed_dict)
assert isinstance(event, RoomHistoryVisibilityEvent)
def test_topic_event(self):
parsed_dict = TestClass._load_response("tests/data/events/topic.json")
event = RoomTopicEvent.from_dict(parsed_dict)
assert isinstance(event, RoomTopicEvent)
def test_room_avatar_event(self):
parsed_dict = TestClass._load_response("tests/data/events/room_avatar.json")
event = RoomAvatarEvent.from_dict(parsed_dict)
assert isinstance(event, RoomAvatarEvent)
def test_room_avatar_event_no_url(self):
parsed_dict = TestClass._load_response("tests/data/events/room_avatar.json")
parsed_dict["content"].pop("url")
event = RoomAvatarEvent.from_dict(parsed_dict)
assert isinstance(event, BadEvent)
def test_tag_event(self):
parsed_dict = TestClass._load_response("tests/data/events/tag.json")
event = AccountDataEvent.parse_event(parsed_dict)
assert isinstance(event, TagEvent)
def test_name_event(self):
parsed_dict = TestClass._load_response("tests/data/events/name.json")
event = RoomNameEvent.from_dict(parsed_dict)
assert isinstance(event, RoomNameEvent)
def test_alias_event(self):
parsed_dict = TestClass._load_response("tests/data/events/alias.json")
event = RoomAliasEvent.from_dict(parsed_dict)
assert isinstance(event, RoomAliasEvent)
def test_message_text(self):
parsed_dict = TestClass._load_response("tests/data/events/message_text.json")
event = RoomMessageText.from_dict(parsed_dict)
assert isinstance(event, RoomMessageText)
def test_message_emote(self):
parsed_dict = TestClass._load_response("tests/data/events/message_emote.json")
event = RoomMessageEmote.from_dict(parsed_dict)
assert isinstance(event, RoomMessageEmote)
def test_message_notice(self):
parsed_dict = TestClass._load_response("tests/data/events/message_notice.json")
event = RoomMessageNotice.from_dict(parsed_dict)
assert isinstance(event, RoomMessageNotice)
def test_power_levels(self):
parsed_dict = TestClass._load_response("tests/data/events/power_levels.json")
event = PowerLevelsEvent.from_dict(parsed_dict)
assert isinstance(event, PowerLevelsEvent)
levels = event.power_levels
admin = "@example:localhost"
mod = "@alice:localhost"
higher_user = "@carol:localhost"
user = "@bob:localhost"
assert levels.get_state_event_required_level("m.room.name") == 50
assert levels.get_state_event_required_level("m.room.undefined") == 50
assert levels.get_message_event_required_level("m.room.message") == 25
assert levels.get_message_event_required_level("m.room.undefined") == 0
assert levels.get_user_level(admin) == 100
assert levels.get_user_level(user) == 0
assert levels.can_user_send_state(admin, "m.room.name") is True
assert levels.can_user_send_state(user, "m.room.name") is False
assert levels.can_user_send_message(admin) is True
assert levels.can_user_send_message(user, "m.room.message") is False
assert levels.can_user_invite(admin) is True
assert levels.can_user_invite(user) is True
assert levels.can_user_kick(admin) is True
assert levels.can_user_kick(user) is False
assert levels.can_user_kick(admin, admin) is False
assert levels.can_user_kick(admin, mod) is True
assert levels.can_user_kick(mod, admin) is False
assert levels.can_user_kick(mod, higher_user) is True
assert levels.can_user_kick(higher_user, user) is False
assert levels.can_user_ban(admin) is True
assert levels.can_user_ban(user) is False
assert levels.can_user_ban(admin, admin) is False
assert levels.can_user_ban(admin, mod) is True
assert levels.can_user_ban(mod, admin) is False
assert levels.can_user_ban(mod, higher_user) is True
assert levels.can_user_ban(higher_user, user) is False
assert levels.can_user_redact(admin) is True
assert levels.can_user_redact(user) is False
def test_membership(self):
parsed_dict = TestClass._load_response("tests/data/events/member.json")
event = RoomMemberEvent.from_dict(parsed_dict)
assert isinstance(event, RoomMemberEvent)
def test_redaction(self):
parsed_dict = TestClass._load_response("tests/data/events/redaction.json")
event = RedactionEvent.from_dict(parsed_dict)
assert isinstance(event, RedactionEvent)
def test_empty_event(self):
parsed_dict = {}
response = RedactedEvent.from_dict(parsed_dict)
assert isinstance(response, UnknownBadEvent)
def test_room_encryption(self):
parsed_dict = TestClass._load_response("tests/data/events/room_encryption.json")
event = Event.parse_event(parsed_dict)
assert isinstance(event, RoomEncryptionEvent)
def test_room_key(self):
parsed_dict = TestClass._load_response("tests/data/events/room_key.json")
event = RoomKeyEvent.from_dict(parsed_dict, "@alice:example.org", "alice_key")
assert isinstance(event, RoomKeyEvent)
def test_forwarded_room_key(self):
parsed_dict = TestClass._load_response(
"tests/data/events/forwarded_room_key.json"
)
event = ForwardedRoomKeyEvent.from_dict(
parsed_dict, "@alice:example.org", "alice_key"
)
assert isinstance(event, ForwardedRoomKeyEvent)
def test_invalid_state_event(self):
for event_type, event_file in [
("m.room.create", "create.json"),
("m.room.guest_access", "guest_access.json"),
("m.room.join_rules", "join_rules.json"),
("m.room.history_visibility", "history_visibility.json"),
("m.room.member", "member.json"),
("m.room.canonical_alias", "alias.json"),
("m.room.name", "name.json"),
("m.room.topic", "topic.json"),
("m.room.avatar", "room_avatar.json"),
("m.room.power_levels", "power_levels.json"),
("m.room.encryption", "room_encryption.json"),
]:
parsed_dict = TestClass._load_response(
"tests/data/events/{}".format(event_file)
)
parsed_dict.pop("state_key")
event = Event.parse_event(parsed_dict)
assert isinstance(event, BadEvent)
assert event.source["type"] == event_type
def test_invalid_invite_state_events(self):
for event_type, event_file in [
("m.room.member", "member.json"),
("m.room.canonical_alias", "alias.json"),
("m.room.name", "name.json"),
]:
parsed_dict = TestClass._load_response(
"tests/data/events/{}".format(event_file)
)
parsed_dict.pop("state_key")
event = InviteEvent.parse_event(parsed_dict)
assert isinstance(event, BadEvent)
assert event.source["type"] == event_type
for event_type, event_file in [
("m.room.member", "member.json"),
("m.room.canonical_alias", "alias.json"),
("m.room.name", "name.json"),
]:
parsed_dict = TestClass._load_response(
"tests/data/events/{}".format(event_file)
)
parsed_dict.pop("type")
event = InviteEvent.parse_event(parsed_dict)
assert not event
def test_invite_events(self):
for event_type, event_file in [
(InviteMemberEvent, "member.json"),
(InviteAliasEvent, "alias.json"),
(InviteNameEvent, "name.json"),
]:
parsed_dict = TestClass._load_response(
"tests/data/events/{}".format(event_file)
)
event = InviteEvent.parse_event(parsed_dict)
assert isinstance(event, event_type)
def test_megolm_event(self):
parsed_dict = TestClass._load_response("tests/data/events/megolm.json")
event = Event.parse_event(parsed_dict)
assert isinstance(event, MegolmEvent)
parsed_dict["content"]["algorithm"] = "m.megolm.unknown"
event = Event.parse_event(parsed_dict)
assert isinstance(event, UnknownEncryptedEvent)
def test_olm_event(self):
parsed_dict = TestClass._load_response("tests/data/events/olm.json")
event = ToDeviceEvent.parse_event(parsed_dict)
assert isinstance(event, OlmEvent)
parsed_dict["content"]["algorithm"] = "m.megolm.unknown"
event = ToDeviceEvent.parse_event(parsed_dict)
assert not event
def test_ephemeral_event(self):
event = EphemeralEvent.parse_event({})
assert not event
event = EphemeralEvent.parse_event({"type": "m.unknown", "content": {}})
assert not event
def test_typing_event(self):
parsed_dict = TestClass._load_response("tests/data/events/typing.json")
event = EphemeralEvent.parse_event(parsed_dict)
assert isinstance(event, TypingNoticeEvent)
assert "@bob:example.com" in event.users
def test_read_receipt_event(self):
parsed_dict = TestClass._load_response("tests/data/events/receipt.json")
event = EphemeralEvent.parse_event(parsed_dict)
# Warning: this is directly tied to the above file; any changes below
# need to be reflected there too.
receipt = Receipt(
"$152037280074GZeOm:localhost",
"m.read",
"@bob:example.com",
1520372804619
)
assert isinstance(event, ReceiptEvent)
assert receipt in event.receipts
def test_account_data_event(self):
event = AccountDataEvent.parse_event({})
assert isinstance(event, UnknownBadEvent)
event = AccountDataEvent.parse_event({"type": "m.unknown", "content": {}})
assert isinstance(event, UnknownAccountDataEvent)
def test_fully_read_event(self):
parsed_dict = TestClass._load_response("tests/data/events/fully_read.json")
event = AccountDataEvent.parse_event(parsed_dict)
assert isinstance(event, FullyReadEvent)
def test_invalid_call_events(self):
for _, event_file in [
(CallInviteEvent, "call_invite.json"),
(CallAnswerEvent, "call_answer.json"),
(CallCandidatesEvent, "call_candidates.json"),
(CallHangupEvent, "call_hangup.json"),
]:
parsed_dict = TestClass._load_response(
"tests/data/events/{}".format(event_file)
)
parsed_dict["content"].pop("call_id")
event = CallEvent.parse_event(parsed_dict)
assert isinstance(event, BadEvent)
def test_call_events(self):
for event_type, event_file in [
(CallInviteEvent, "call_invite.json"),
(CallAnswerEvent, "call_answer.json"),
(CallCandidatesEvent, "call_candidates.json"),
(CallHangupEvent, "call_hangup.json"),
]:
parsed_dict = TestClass._load_response(
"tests/data/events/{}".format(event_file)
)
event = CallEvent.parse_event(parsed_dict)
assert isinstance(event, event_type)
def test_key_verification_events(self):
for event_type, event_file in [
(KeyVerificationStart, "key_start.json"),
(KeyVerificationAccept, "key_accept.json"),
(KeyVerificationKey, "key_key.json"),
(KeyVerificationMac, "key_mac.json"),
(KeyVerificationCancel, "key_cancel.json"),
]:
parsed_dict = TestClass._load_response(
"tests/data/events/{}".format(event_file)
)
event = ToDeviceEvent.parse_event(parsed_dict)
assert isinstance(event, event_type)
def test_invalid_key_verification(self):
for _, event_file in [
(KeyVerificationStart, "key_start.json"),
(KeyVerificationAccept, "key_accept.json"),
(KeyVerificationKey, "key_key.json"),
(KeyVerificationMac, "key_mac.json"),
(KeyVerificationCancel, "key_cancel.json"),
]:
parsed_dict = TestClass._load_response(
"tests/data/events/{}".format(event_file)
)
parsed_dict["content"].pop("transaction_id")
event = ToDeviceEvent.parse_event(parsed_dict)
assert isinstance(event, UnknownBadEvent)
def test_invalid_room_event(self):
event = Event.parse_event({"type": "m.unknown"})
assert isinstance(event, UnknownBadEvent)
def test_redacted_state_event(self):
parsed_dict = TestClass._load_response("tests/data/events/redacted_state.json")
event = Event.parse_event(parsed_dict)
assert isinstance(event, RedactedEvent)
def test_dummy_event(self):
parsed_dict = TestClass._load_response("tests/data/events/dummy.json")
event = DummyEvent.from_dict(parsed_dict, "@alice:example.org", "alice_key")
assert isinstance(event, DummyEvent)
def test_room_key_request(self):
parsed_dict = TestClass._load_response(
"tests/data/events/room_key_request.json"
)
event = ToDeviceEvent.parse_event(parsed_dict)
assert isinstance(event, RoomKeyRequest)
assert event.room_id is not None
parsed_dict = TestClass._load_response(
"tests/data/events/room_key_request_cancel.json"
)
event = ToDeviceEvent.parse_event(parsed_dict)
assert isinstance(event, RoomKeyRequestCancellation)
def test_encrypted_media_thumbnails(self):
parsed_dict = TestClass._load_response(
"tests/data/events/room_encrypted_image.json"
)
event = Event.parse_decrypted_event(parsed_dict)
assert isinstance(event, RoomEncryptedImage)
assert event.thumbnail_url
assert event.thumbnail_key
assert event.thumbnail_hashes
assert event.thumbnail_iv
| 36.556989
| 88
| 0.664921
|
15e432bd32fe0dc38e5923de5bcf6095c7d93732
| 2,706
|
py
|
Python
|
respx/transports.py
|
shadchin/respx
|
3981b5b4e3301bb6099bed8b45ecbdd8c5aec89f
|
[
"BSD-3-Clause"
] | 297
|
2019-11-18T09:56:16.000Z
|
2022-03-31T12:33:56.000Z
|
respx/transports.py
|
shadchin/respx
|
3981b5b4e3301bb6099bed8b45ecbdd8c5aec89f
|
[
"BSD-3-Clause"
] | 161
|
2019-11-16T11:39:58.000Z
|
2022-03-16T15:07:30.000Z
|
respx/transports.py
|
shadchin/respx
|
3981b5b4e3301bb6099bed8b45ecbdd8c5aec89f
|
[
"BSD-3-Clause"
] | 30
|
2019-11-18T20:45:57.000Z
|
2021-12-29T13:34:37.000Z
|
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
List,
Optional,
Type,
Union,
cast,
)
from warnings import warn
import httpx
from httpx import AsyncBaseTransport, BaseTransport
from .models import PassThrough
if TYPE_CHECKING:
from .router import Router # pragma: nocover
RequestHandler = Callable[[httpx.Request], httpx.Response]
AsyncRequestHandler = Callable[[httpx.Request], Awaitable[httpx.Response]]
class MockTransport(httpx.MockTransport):
_router: Optional["Router"]
def __init__(
self,
*,
handler: Optional[RequestHandler] = None,
async_handler: Optional[AsyncRequestHandler] = None,
router: Optional["Router"] = None,
):
if router:
super().__init__(router.handler)
self._router = router
elif handler:
super().__init__(handler)
self._router = None
elif async_handler:
super().__init__(async_handler)
self._router = None
else:
raise RuntimeError(
"Missing a MockTransport required handler or router argument"
)
warn(
"MockTransport is deprecated. "
"Please use `httpx.MockTransport(respx_router.handler)`.",
category=DeprecationWarning,
)
def __exit__(
self,
exc_type: Type[BaseException] = None,
exc_value: BaseException = None,
traceback: TracebackType = None,
) -> None:
if not exc_type and self._router and self._router._assert_all_called:
self._router.assert_all_called()
async def __aexit__(self, *args: Any) -> None:
self.__exit__(*args)
class TryTransport(BaseTransport, AsyncBaseTransport):
def __init__(
self, transports: List[Union[BaseTransport, AsyncBaseTransport]]
) -> None:
self.transports = transports
def handle_request(self, request: httpx.Request) -> httpx.Response:
for transport in self.transports:
try:
transport = cast(BaseTransport, transport)
return transport.handle_request(request)
except PassThrough:
continue
raise RuntimeError() # pragma: nocover
async def handle_async_request(self, request: httpx.Request) -> httpx.Response:
for transport in self.transports:
try:
transport = cast(AsyncBaseTransport, transport)
return await transport.handle_async_request(request)
except PassThrough:
continue
raise RuntimeError() # pragma: nocover
| 28.787234
| 83
| 0.624908
|
4c78bac5ffa8cf5a5803a39f789c86aaab0278d3
| 2,653
|
py
|
Python
|
pspnet.py
|
KruskalLin/Segmentation
|
ba097d1d20eeba6f8d1851709aa4daad78865c1b
|
[
"MIT"
] | 1
|
2021-04-27T05:22:31.000Z
|
2021-04-27T05:22:31.000Z
|
pspnet.py
|
KruskalLin/Segmentation
|
ba097d1d20eeba6f8d1851709aa4daad78865c1b
|
[
"MIT"
] | null | null | null |
pspnet.py
|
KruskalLin/Segmentation
|
ba097d1d20eeba6f8d1851709aa4daad78865c1b
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torch.nn import functional as F
import extractors
class PSPModule(nn.Module):
def __init__(self, features, out_features=1024, sizes=(1, 2, 3, 6)):
super().__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes])
self.bottleneck = nn.Conv2d(features * (len(sizes) + 1), out_features, kernel_size=1)
self.relu = nn.ReLU()
def _make_stage(self, features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, features, kernel_size=1, bias=False)
return nn.Sequential(prior, conv)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear') for stage in self.stages] + [feats]
bottle = self.bottleneck(torch.cat(priors, 1))
return self.relu(bottle)
class PSPUpsample(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.PReLU()
)
def forward(self, x):
h, w = 2 * x.size(2), 2 * x.size(3)
p = F.upsample(input=x, size=(h, w), mode='bilinear')
return self.conv(p)
class PSPNet(nn.Module):
def __init__(self, n_classes=18, sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34',
pretrained=True):
super().__init__()
self.feats = getattr(extractors, backend)(pretrained)
self.psp = PSPModule(psp_size, 1024, sizes)
self.drop_1 = nn.Dropout2d(p=0.3)
self.up_1 = PSPUpsample(1024, 256)
self.up_2 = PSPUpsample(256, 64)
self.up_3 = PSPUpsample(64, 64)
self.drop_2 = nn.Dropout2d(p=0.15)
self.final = nn.Sequential(
nn.Conv2d(64, n_classes, kernel_size=1),
nn.LogSoftmax()
)
self.classifier = nn.Sequential(
nn.Linear(deep_features_size, 256),
nn.ReLU(),
nn.Linear(256, n_classes)
)
def forward(self, x):
f, class_f = self.feats(x)
p = self.psp(f)
p = self.drop_1(p)
p = self.up_1(p)
p = self.drop_2(p)
p = self.up_2(p)
p = self.drop_2(p)
p = self.up_3(p)
p = self.drop_2(p)
auxiliary = F.adaptive_max_pool2d(input=class_f, output_size=(1, 1)).view(-1, class_f.size(1))
return self.final(p), self.classifier(auxiliary)
| 31.963855
| 114
| 0.592914
|
adf31351884d9c28889566105f35ce88772cc56c
| 2,991
|
py
|
Python
|
tardis/plasma/properties/continuum_processes.py
|
subhayu99/tardis
|
50f13ba63499d5371af790025c659483ef52bfd1
|
[
"BSD-3-Clause"
] | 1
|
2021-06-24T17:16:53.000Z
|
2021-06-24T17:16:53.000Z
|
tardis/plasma/properties/continuum_processes.py
|
dhiganthrao/tardis
|
7792e722f929d661244a819332a031cee7d1b3bf
|
[
"BSD-3-Clause"
] | null | null | null |
tardis/plasma/properties/continuum_processes.py
|
dhiganthrao/tardis
|
7792e722f929d661244a819332a031cee7d1b3bf
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import numpy as np
import pandas as pd
from numba import prange, njit
from astropy import constants as const
from tardis.plasma.properties.base import ProcessingPlasmaProperty
__all__ = ["SpontRecombRateCoeff"]
logger = logging.getLogger(__name__)
njit_dict = {"fastmath": False, "parallel": False}
@njit(**njit_dict)
def integrate_array_by_blocks(f, x, block_references):
"""
Integrates a function f defined at locations x over blocks
given in block_references.
Parameters
----------
f : Two-dimensional Numpy Array, dtype float
x : One-dimensional Numpy Array, dtype float
block_references : One-dimensional Numpy Array, dtype int
Returns
-------
integrated : Two-dimensional Numpy Array, dtype float
"""
integrated = np.zeros((len(block_references) - 1, f.shape[1]))
for i in prange(f.shape[1]): # columns
for j in prange(len(integrated)): # rows
start = block_references[j]
stop = block_references[j + 1]
integrated[j, i] = np.trapz(f[start:stop, i], x[start:stop])
return integrated
def get_ion_multi_index(multi_index_full, next_higher=True):
"""
Integrates a function f defined at locations x over blocks
given in block_references.
Parameters
----------
multi_index_full : Pandas MultiIndex (atomic_number, ion_number,
level_number)
next_higher : bool
If true use ion number of next higher ion, else use ion_number from
multi_index_full.
Returns
-------
multi_index : Pandas MultiIndex (atomic_number, ion_number)
"""
atomic_number = multi_index_full.get_level_values(0)
ion_number = multi_index_full.get_level_values(1)
if next_higher is True:
ion_number += 1
return pd.MultiIndex.from_arrays([atomic_number, ion_number])
class SpontRecombRateCoeff(ProcessingPlasmaProperty):
"""
Attributes
----------
alpha_sp : Pandas DataFrame, dtype float
The rate coefficient for spontaneous recombination.
"""
outputs = ("alpha_sp",)
latex_name = (r"\alpha^{\textrm{sp}}",)
def calculate(
self,
photo_ion_cross_sections,
t_electrons,
photo_ion_block_references,
photo_ion_index,
phi_ik,
):
x_sect = photo_ion_cross_sections["x_sect"].values
nu = photo_ion_cross_sections["nu"].values
alpha_sp = 8 * np.pi * x_sect * nu ** 2 / (const.c.cgs.value) ** 2
alpha_sp = alpha_sp[:, np.newaxis]
boltzmann_factor = np.exp(
-nu[np.newaxis].T
/ t_electrons
* (const.h.cgs.value / const.k_B.cgs.value)
)
alpha_sp = alpha_sp * boltzmann_factor
alpha_sp = integrate_array_by_blocks(
alpha_sp, nu, photo_ion_block_references
)
alpha_sp = pd.DataFrame(alpha_sp, index=photo_ion_index)
return alpha_sp * phi_ik
| 29.323529
| 75
| 0.645938
|
0755941a21294dd9c82fe44dd3a95b18ddebc264
| 4,868
|
py
|
Python
|
allennlp/data/dataset_readers/dataset_reader.py
|
CuriousKomodo/allennlp
|
38f74bfb56326137a67e9442346425e639614605
|
[
"Apache-2.0"
] | null | null | null |
allennlp/data/dataset_readers/dataset_reader.py
|
CuriousKomodo/allennlp
|
38f74bfb56326137a67e9442346425e639614605
|
[
"Apache-2.0"
] | null | null | null |
allennlp/data/dataset_readers/dataset_reader.py
|
CuriousKomodo/allennlp
|
38f74bfb56326137a67e9442346425e639614605
|
[
"Apache-2.0"
] | null | null | null |
from typing import Iterable, Iterator, Callable
import logging
from allennlp.data.instance import Instance
from allennlp.common import Tqdm
from allennlp.common.checks import ConfigurationError
from allennlp.common.registrable import Registrable
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class _LazyInstances(Iterable):
"""
An ``Iterable`` that just wraps a thunk for generating instances and calls it for
each call to ``__iter__``.
"""
def __init__(self, instance_generator: Callable[[], Iterator[Instance]]) -> None:
super().__init__()
self.instance_generator = instance_generator
def __iter__(self) -> Iterator[Instance]:
instances = self.instance_generator()
if isinstance(instances, list):
raise ConfigurationError("For a lazy dataset reader, _read() must return a generator")
return instances
class DatasetReader(Registrable):
"""
A ``DatasetReader`` knows how to turn a file containing a dataset into a collection
of ``Instance`` s. To implement your own, just override the `_read(file_path)` method
to return an ``Iterable`` of the instances. This could be a list containing the instances
or a lazy generator that returns them one at a time.
All parameters necessary to _read the data apart from the filepath should be passed
to the constructor of the ``DatasetReader``.
Parameters
----------
lazy : ``bool``, optional (default=False)
If this is true, ``instances()`` will return an object whose ``__iter__`` method
reloads the dataset each time it's called. Otherwise, ``instances()`` returns a list.
"""
def __init__(self, lazy: bool = False) -> None:
self.lazy = lazy
def read(self, file_path: str) -> Iterable[Instance]:
"""
Returns an ``Iterable`` containing all the instances
in the specified dataset.
If ``self.lazy`` is False, this calls ``self._read()``,
ensures that the result is a list, then returns the resulting list.
If ``self.lazy`` is True, this returns an object whose
``__iter__`` method calls ``self._read()`` each iteration.
In this case your implementation of ``_read()`` must also be lazy
(that is, not load all instances into memory at once), otherwise
you will get a ``ConfigurationError``.
In either case, the returned ``Iterable`` can be iterated
over multiple times. It's unlikely you want to override this function,
but if you do your result should likewise be repeatedly iterable.
"""
lazy = getattr(self, 'lazy', None)
if lazy is None:
logger.warning("DatasetReader.lazy is not set, "
"did you forget to call the superclass constructor?")
if lazy:
return _LazyInstances(lambda: iter(self._read(file_path)))
else:
instances = self._read(file_path)
if not isinstance(instances, list):
instances = [instance for instance in Tqdm.tqdm(instances)]
if not instances:
raise ConfigurationError("No instances were read from the given filepath {}. "
"Is the path correct?".format(file_path))
return instances
def _read(self, file_path: str) -> Iterable[Instance]:
"""
Reads the instances from the given file_path and returns them as an
`Iterable` (which could be a list or could be a generator).
You are strongly encouraged to use a generator, so that users can
read a dataset in a lazy way, if they so choose.
"""
#raise NotImplementedError
yield instance
def text_to_instance(self, *inputs) -> Instance:
"""
Does whatever tokenization or processing is necessary to go from textual input to an
``Instance``. The primary intended use for this is with a
:class:`~allennlp.service.predictors.predictor.Predictor`, which gets text input as a JSON
object and needs to process it to be input to a model.
The intent here is to share code between :func:`_read` and what happens at
model serving time, or any other time you want to make a prediction from new data. We need
to process the data in the same way it was done at training time. Allowing the
``DatasetReader`` to process new text lets us accomplish this, as we can just call
``DatasetReader.text_to_instance`` when serving predictions.
The input type here is rather vaguely specified, unfortunately. The ``Predictor`` will
have to make some assumptions about the kind of ``DatasetReader`` that it's using, in order
to pass it the right information.
"""
raise NotImplementedError
| 45.495327
| 99
| 0.661463
|
76e5af815fc87cf5b4d549b43a2d7049aa7b0cf3
| 2,400
|
py
|
Python
|
flask_admin/contrib/mongoengine/fields.py
|
UUDigitalHumanitieslab/flask-admin
|
8d7b0a50f2547b6549dd2a3ee6fdab9de3798b76
|
[
"BSD-3-Clause"
] | 2
|
2015-01-04T15:56:55.000Z
|
2015-06-23T19:55:07.000Z
|
flask_admin/contrib/mongoengine/fields.py
|
xelez/flask-admin
|
a671952f498d9a355d15ec332d4e01e621bf1e6d
|
[
"BSD-3-Clause"
] | null | null | null |
flask_admin/contrib/mongoengine/fields.py
|
xelez/flask-admin
|
a671952f498d9a355d15ec332d4e01e621bf1e6d
|
[
"BSD-3-Clause"
] | null | null | null |
from werkzeug.datastructures import FileStorage
from wtforms import fields
try:
from wtforms.fields.core import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from . import widgets
from flask.ext.admin.model.fields import InlineFormField
def is_empty(file_object):
file_object.seek(0)
first_char = file_object.read(1)
file_object.seek(0)
return not bool(first_char)
class ModelFormField(InlineFormField):
"""
Customized ModelFormField for MongoEngine EmbeddedDocuments.
"""
def __init__(self, model, view, form_class, form_opts=None, **kwargs):
super(ModelFormField, self).__init__(form_class, **kwargs)
self.model = model
self.view = view
self.form_opts = form_opts
def populate_obj(self, obj, name):
candidate = getattr(obj, name, None)
if candidate is None:
candidate = self.model()
setattr(obj, name, candidate)
self.form.populate_obj(candidate)
self.view.on_model_change(self.form, candidate)
class MongoFileField(fields.FileField):
"""
GridFS file field.
"""
widget = widgets.MongoFileInput()
def __init__(self, label=None, validators=None, **kwargs):
super(MongoFileField, self).__init__(label, validators, **kwargs)
self._should_delete = False
def process(self, formdata, data=unset_value):
if formdata:
marker = '_%s-delete' % self.name
if marker in formdata:
self._should_delete = True
return super(MongoFileField, self).process(formdata, data)
def populate_obj(self, obj, name):
field = getattr(obj, name, None)
if field is not None:
# If field should be deleted, clean it up
if self._should_delete:
field.delete()
return
if isinstance(self.data, FileStorage) and not is_empty(self.data.stream):
if not field.grid_id:
func = field.put
else:
func = field.replace
func(self.data.stream,
filename=self.data.filename,
content_type=self.data.content_type)
class MongoImageField(MongoFileField):
"""
GridFS image field.
"""
widget = widgets.MongoImageInput()
| 27.586207
| 85
| 0.625833
|
2e8b7f8c147b5ca8886eb1d27fbcad7fdffb3ec9
| 8,648
|
py
|
Python
|
test/unit/test_mvt_manager.py
|
neogeo-technologies/djangorestframework-mvt
|
138a09f092c04247f3f9dfc4546e5dad5d04ccc6
|
[
"BSD-3-Clause"
] | 43
|
2019-06-17T19:18:24.000Z
|
2022-03-09T19:52:07.000Z
|
test/unit/test_mvt_manager.py
|
neogeo-technologies/djangorestframework-mvt
|
138a09f092c04247f3f9dfc4546e5dad5d04ccc6
|
[
"BSD-3-Clause"
] | 15
|
2019-12-18T14:27:43.000Z
|
2021-05-05T13:01:11.000Z
|
test/unit/test_mvt_manager.py
|
neogeo-technologies/djangorestframework-mvt
|
138a09f092c04247f3f9dfc4546e5dad5d04ccc6
|
[
"BSD-3-Clause"
] | 10
|
2020-01-13T11:37:17.000Z
|
2022-02-07T12:58:51.000Z
|
from django.core.exceptions import FieldError
from rest_framework_mvt.managers import MVTManager
from rest_framework.serializers import ValidationError
from mock import patch, MagicMock
import pytest
@pytest.fixture
def mvt_manager():
mvt_manager = MVTManager(geo_col="jazzy_geo")
meta = MagicMock(db_table="test_table")
fields = [
MagicMock(
get_attname_column=MagicMock(return_value=("other_column", "other_column"))
),
MagicMock(
get_attname_column=MagicMock(return_value=("jazzy_geo", "jazzy_geo"))
),
MagicMock(get_attname_column=MagicMock(return_value=("city", "city"))),
]
meta.get_fields.return_value = fields
mvt_manager.model = MagicMock(_meta=meta)
return mvt_manager
@pytest.fixture
def mvt_manager_no_col():
mvt_manager_no_col = MVTManager()
meta = MagicMock(db_table="test_table")
fields = [
MagicMock(
get_attname_column=MagicMock(return_value=("other_column", "other_column"))
),
MagicMock(
get_attname_column=MagicMock(return_value=("jazzy_geo", "jazzy_geo"))
),
MagicMock(get_attname_column=MagicMock(return_value=("city", "city"))),
MagicMock(
get_attname_column=MagicMock(return_value=("generic_relation", None))
),
]
meta.get_fields.return_value = fields
mvt_manager_no_col.model = MagicMock(_meta=meta)
return mvt_manager_no_col
@patch("rest_framework_mvt.managers.MVTManager._get_connection")
def test_mvt_manager_intersect__calls__build_query(get_conn, mvt_manager):
mvt_manager._build_query = MagicMock()
mvt_manager._build_query.return_value = ("foo", ["bar"])
mvt_manager.intersect(bbox="", limit=10, offset=7)
mvt_manager._build_query.assert_called_once_with(filters={})
@patch("rest_framework_mvt.managers.MVTManager.only")
@patch("rest_framework_mvt.managers.MVTManager._get_connection")
def test_mvt_manager_build_query__all(get_conn, only, mvt_manager):
query = MagicMock()
query.sql_with_params.return_value = ("SELECT other_column, city FROM table", [])
only.return_value = MagicMock(query=query)
expected_query = """
SELECT NULL AS id, ST_AsMVT(q, 'default', 4096, 'mvt_geom')
FROM (SELECT other_column, city,
ST_AsMVTGeom(ST_Transform(test_table.jazzy_geo, 3857),
ST_Transform(ST_SetSRID(ST_GeomFromText(%s), 4326), 3857), 4096, 0, false) AS mvt_geom
FROM test_table
WHERE ST_Intersects(ST_Transform(test_table.jazzy_geo, 4326), ST_SetSRID(ST_GeomFromText(%s), 4326))
LIMIT %s
OFFSET %s) AS q;
""".strip()
expected_parameters = []
query, parameters = mvt_manager._build_query()
assert expected_query == query
assert expected_parameters == parameters
@patch("rest_framework_mvt.managers.MVTManager.only")
@patch("rest_framework_mvt.managers.MVTManager._get_connection")
def test_mvt_manager_build_query__no_geo_col(get_conn, only, mvt_manager_no_col):
query = MagicMock()
query.sql_with_params.return_value = ("SELECT other_column, city FROM table", [])
only.return_value = MagicMock(query=query)
expected_query = """
SELECT NULL AS id, ST_AsMVT(q, 'default', 4096, 'mvt_geom')
FROM (SELECT other_column, city,
ST_AsMVTGeom(ST_Transform(test_table.geom, 3857),
ST_Transform(ST_SetSRID(ST_GeomFromText(%s), 4326), 3857), 4096, 0, false) AS mvt_geom
FROM test_table
WHERE ST_Intersects(ST_Transform(test_table.geom, 4326), ST_SetSRID(ST_GeomFromText(%s), 4326))
LIMIT %s
OFFSET %s) AS q;
""".strip()
expected_parameters = []
query, parameters = mvt_manager_no_col._build_query()
assert expected_query == query
assert expected_parameters == parameters
only.assert_called_once_with("other_column", "jazzy_geo", "city")
@patch("rest_framework_mvt.managers.MVTManager.filter")
@patch("rest_framework_mvt.managers.MVTManager.only")
@patch("rest_framework_mvt.managers.MVTManager._get_connection")
def test_mvt_manager_build_query__filter(get_conn, only, orm_filter, mvt_manager):
query = MagicMock()
query.sql_with_params.return_value = (
"SELECT other_column, city FROM table WHERE (city = %s)",
["johnston"],
)
only.return_value = MagicMock(query=query)
orm_filter.return_value = MagicMock(query=query)
expected_query = """
SELECT NULL AS id, ST_AsMVT(q, 'default', 4096, 'mvt_geom')
FROM (SELECT other_column, city,
ST_AsMVTGeom(ST_Transform(test_table.jazzy_geo, 3857),
ST_Transform(ST_SetSRID(ST_GeomFromText(%s), 4326), 3857), 4096, 0, false) AS mvt_geom
FROM test_table
WHERE ST_Intersects(ST_Transform(test_table.jazzy_geo, 4326), ST_SetSRID(ST_GeomFromText(%s), 4326)) AND (city = %s)
LIMIT %s
OFFSET %s) AS q;
""".strip()
expected_parameters = ["johnston"]
query, parameters = mvt_manager._build_query(filters={"city": "johnston"})
assert expected_query == query
assert expected_parameters == parameters
@patch("rest_framework_mvt.managers.MVTManager.filter")
@patch("rest_framework_mvt.managers.MVTManager.only")
@patch("rest_framework_mvt.managers.MVTManager._get_connection")
def test_mvt_manager_build_query__multiple_filters(
get_conn, only, orm_filter, mvt_manager
):
query = MagicMock()
query.sql_with_params.return_value = (
"SELECT other_column, city FROM table WHERE (city = %s AND other_column = %s)",
["johnston", "IA"],
)
only.return_value = MagicMock(query=query)
orm_filter.return_value = MagicMock(query=query)
expected_query = """
SELECT NULL AS id, ST_AsMVT(q, 'default', 4096, 'mvt_geom')
FROM (SELECT other_column, city,
ST_AsMVTGeom(ST_Transform(test_table.jazzy_geo, 3857),
ST_Transform(ST_SetSRID(ST_GeomFromText(%s), 4326), 3857), 4096, 0, false) AS mvt_geom
FROM test_table
WHERE ST_Intersects(ST_Transform(test_table.jazzy_geo, 4326), ST_SetSRID(ST_GeomFromText(%s), 4326)) AND (city = %s AND other_column = %s)
LIMIT %s
OFFSET %s) AS q;
""".strip()
expected_parameters = ["johnston", "IA"]
query, parameters = mvt_manager._build_query(
filters={"city": "johnston", "other_column": "IA"}
)
assert expected_query == query
assert expected_parameters == parameters
@patch("rest_framework_mvt.managers.MVTManager.filter")
@patch("rest_framework_mvt.managers.MVTManager.only")
@patch("rest_framework_mvt.managers.MVTManager._get_connection")
def test_mvt_manager_build_query__validation_error(
get_conn, only, orm_filter, mvt_manager
):
query = MagicMock()
query.sql_with_params.return_value = (
"SELECT other_column, city FROM table WHERE (city = %s AND other_column = %s)",
["johnston", "IA"],
)
only.return_value = MagicMock(query=query)
orm_filter.side_effect = FieldError
with pytest.raises(ValidationError) as e:
query = mvt_manager._build_query(filters={"not_a_filter": "oops"})
@patch("rest_framework_mvt.managers.MVTManager.filter")
@patch("rest_framework_mvt.managers.MVTManager._get_connection")
def test_mvt_manager_create_where_clause_with_params(get_conn, orm_filter, mvt_manager):
query_filter = MagicMock()
query_filter.sql_with_params.return_value = (
(
'SELECT "my_schema"."my_table"."id", "my_schema"."my_table"."foreign_key_id", '
'"my_schema"."my_table"."col_1", "my_schema"."my_table"."geom"::bytea FROM '
'"my_schema"."my_table" WHERE ("my_schema"."my_table"."col_1" = %s AND '
'"my_schema"."my_table"."foreign_key_id" = %s)'
),
("filter_1", 1),
)
orm_filter.return_value = MagicMock(query=query_filter)
(
parameterized_where_clause,
where_clause_parameters,
) = mvt_manager._create_where_clause_with_params(
"my_schema.my_table", {"col_1": "filter_1", "foreign_key": 1}
)
orm_filter.assert_called_once_with(col_1="filter_1", foreign_key=1)
query_filter.sql_with_params.assert_called_once()
assert parameterized_where_clause == (
"ST_Intersects(ST_Transform(my_schema.my_table.jazzy_geo, 4326), ST_SetSRID(ST_GeomFromText(%s), 4326)) "
'AND ("my_schema"."my_table"."col_1" = %s AND "my_schema"."my_table"."foreign_key_id" = %s)'
)
assert where_clause_parameters == ["filter_1", 1]
| 39.852535
| 150
| 0.690796
|
7a3755e53a9f13b6314a8dca1b28613ad81236a0
| 24,813
|
py
|
Python
|
zerver/webhooks/bitbucket2/tests.py
|
shreyanshdwivedi/zulip
|
fe39ad04e191c4d0d4a4b54fd94529a9df9f72ae
|
[
"Apache-2.0"
] | null | null | null |
zerver/webhooks/bitbucket2/tests.py
|
shreyanshdwivedi/zulip
|
fe39ad04e191c4d0d4a4b54fd94529a9df9f72ae
|
[
"Apache-2.0"
] | 7
|
2020-09-06T14:54:30.000Z
|
2022-02-10T18:51:14.000Z
|
zerver/webhooks/bitbucket2/tests.py
|
b-randall/SpecialEdd
|
014fd2b220f52762848592cab90c493d1c77682d
|
[
"Apache-2.0"
] | 9
|
2019-11-04T18:59:29.000Z
|
2022-03-22T17:46:37.000Z
|
# -*- coding: utf-8 -*-
from typing import Optional
from mock import MagicMock, patch
from zerver.lib.test_classes import WebhookTestCase
class Bitbucket2HookTests(WebhookTestCase):
STREAM_NAME = 'bitbucket2'
URL_TEMPLATE = "/api/v1/external/bitbucket2?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'bitbucket2'
EXPECTED_SUBJECT = u"Repository name"
EXPECTED_SUBJECT_PR_EVENTS = u"Repository name / PR #1 new commit"
EXPECTED_SUBJECT_ISSUE_EVENTS = u"Repository name / Issue #1 Bug"
EXPECTED_SUBJECT_BRANCH_EVENTS = u"Repository name / master"
def test_bitbucket2_on_push_event(self) -> None:
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))'
expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n{}".format(commit_info)
self.send_and_test_stream_message('push', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_multiple_committers(self) -> None:
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n'
expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 3 commits to branch master. Commits by zbenjamin (2) and kolaszek (1).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*2)
self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None:
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n'
expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by james (3), Brendon (2), Tomasz (2) and others (3).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*9)
self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_multiple_committers_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n'
expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 3 commits to branch master. Commits by zbenjamin (2) and kolaszek (1).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*2)
self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_multiple_committers_with_others_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n'
expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by james (3), Brendon (2), Tomasz (2) and others (3).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*9)
self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_event_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))'
expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n{}".format(commit_info)
self.send_and_test_stream_message('push', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_above_limit_event(self) -> None:
commit_info = '* a ([6f161a7](https://bitbucket.org/kolaszek/repository-name/commits/6f161a7bced94430ac8947d87dbf45c6deee3fb0))\n'
expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branches/compare/6f161a7bced94430ac8947d87dbf45c6deee3fb0..1221f2fda6f1e3654b09f1f3a08390e4cb25bb48) 5 commits to branch master. Commits by Tomasz (5).\n\n{}[and more commit(s)]".format(
(commit_info * 5),
)
self.send_and_test_stream_message('push_commits_above_limit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_above_limit_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
commit_info = '* a ([6f161a7](https://bitbucket.org/kolaszek/repository-name/commits/6f161a7bced94430ac8947d87dbf45c6deee3fb0))\n'
expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branches/compare/6f161a7bced94430ac8947d87dbf45c6deee3fb0..1221f2fda6f1e3654b09f1f3a08390e4cb25bb48) 5 commits to branch master. Commits by Tomasz (5).\n\n{}[and more commit(s)]".format(
(commit_info * 5),
)
self.send_and_test_stream_message('push_commits_above_limit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_force_push_event(self) -> None:
expected_message = u"kolaszek [force pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) to branch master. Head is now 25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12"
self.send_and_test_stream_message('force_push', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_force_push_event_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
expected_message = u"kolaszek [force pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) to branch master. Head is now 25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12"
self.send_and_test_stream_message('force_push', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_remove_branch_event(self) -> None:
expected_message = u"kolaszek deleted branch master"
self.send_and_test_stream_message('remove_branch', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_fork_event(self) -> None:
expected_message = u"User Tomasz(login: kolaszek) forked the repository into [kolaszek/repository-name2](https://bitbucket.org/kolaszek/repository-name2)."
self.send_and_test_stream_message('fork', self.EXPECTED_SUBJECT, expected_message)
def test_bitbucket2_on_commit_comment_created_event(self) -> None:
expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/commits/32c4ea19aa3af10acd08e419e2c354941a365d74#comment-3354963) on [32c4ea1](https://bitbucket.org/kolaszek/repository-name/commits/32c4ea19aa3af10acd08e419e2c354941a365d74)\n~~~ quote\nNice fix!\n~~~"
self.send_and_test_stream_message('commit_comment_created', self.EXPECTED_SUBJECT, expected_message)
def test_bitbucket2_on_commit_status_changed_event(self) -> None:
expected_message = u"[System mybuildtool](https://my-build-tool.com/builds/MY-PROJECT/BUILD-777) changed status of https://bitbucket.org/kolaszek/repository-name/9fec847784abb10b2fa567ee63b85bd238955d0e to SUCCESSFUL."
self.send_and_test_stream_message('commit_status_changed', self.EXPECTED_SUBJECT, expected_message)
def test_bitbucket2_on_issue_created_event(self) -> None:
expected_message = u"kolaszek created [Issue #1](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)(assigned to kolaszek)\n\n~~~ quote\nSuch a bug\n~~~"
self.send_and_test_stream_message('issue_created', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message)
def test_bitbucket2_on_issue_created_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_subject = u"notifications"
expected_message = u"kolaszek created [Issue #1 Bug](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)(assigned to kolaszek)\n\n~~~ quote\nSuch a bug\n~~~"
self.send_and_test_stream_message('issue_created', expected_subject, expected_message)
def test_bitbucket2_on_issue_updated_event(self) -> None:
expected_message = u"kolaszek updated [Issue #1](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)"
self.send_and_test_stream_message('issue_updated', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message)
def test_bitbucket2_on_issue_commented_event(self) -> None:
expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/issues/2#comment-28973596) on [Issue #1](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)"
self.send_and_test_stream_message('issue_commented', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message)
def test_bitbucket2_on_issue_commented_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_subject = u"notifications"
expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/issues/2#comment-28973596) on [Issue #1 Bug](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)"
self.send_and_test_stream_message('issue_commented', expected_subject, expected_message)
def test_bitbucket2_on_pull_request_created_event(self) -> None:
expected_message = u"kolaszek created [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)(assigned to tkolek)\nfrom `new-branch` to `master`\n\n~~~ quote\ndescription\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:created'
}
self.send_and_test_stream_message('pull_request_created_or_updated', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_created_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_subject = u"notifications"
expected_message = u"kolaszek created [PR #1 new commit](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)(assigned to tkolek)\nfrom `new-branch` to `master`\n\n~~~ quote\ndescription\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:created'
}
self.send_and_test_stream_message('pull_request_created_or_updated', expected_subject, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_updated_event(self) -> None:
expected_message = u"kolaszek updated [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)(assigned to tkolek)\nfrom `new-branch` to `master`\n\n~~~ quote\ndescription\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:updated'
}
self.send_and_test_stream_message('pull_request_created_or_updated', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_approved_event(self) -> None:
expected_message = u"kolaszek approved [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:approved'
}
self.send_and_test_stream_message('pull_request_approved_or_unapproved', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_approved_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_subject = u"notifications"
expected_message = u"kolaszek approved [PR #1 new commit](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:approved'
}
self.send_and_test_stream_message('pull_request_approved_or_unapproved', expected_subject, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_unapproved_event(self) -> None:
expected_message = u"kolaszek unapproved [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:unapproved'
}
self.send_and_test_stream_message('pull_request_approved_or_unapproved', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_declined_event(self) -> None:
expected_message = u"kolaszek rejected [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:rejected'
}
self.send_and_test_stream_message('pull_request_fulfilled_or_rejected', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_fulfilled_event(self) -> None:
expected_message = u"kolaszek merged [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:fulfilled'
}
self.send_and_test_stream_message('pull_request_fulfilled_or_rejected', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_comment_created_event(self) -> None:
expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/3)\n\n~~~ quote\nComment1\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:comment_created'
}
self.send_and_test_stream_message('pull_request_comment_action', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_comment_created_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_subject = u"notifications"
expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1 new commit](https://bitbucket.org/kolaszek/repository-name/pull-requests/3)\n\n~~~ quote\nComment1\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:comment_created'
}
self.send_and_test_stream_message('pull_request_comment_action', expected_subject, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_comment_updated_event(self) -> None:
expected_message = u"kolaszek updated a [comment](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/3)\n\n~~~ quote\nComment1\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:comment_updated'
}
self.send_and_test_stream_message('pull_request_comment_action', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_comment_updated_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_subject = u"notifications"
expected_message = u"kolaszek updated a [comment](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1 new commit](https://bitbucket.org/kolaszek/repository-name/pull-requests/3)\n\n~~~ quote\nComment1\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:comment_updated'
}
self.send_and_test_stream_message('pull_request_comment_action', expected_subject, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_comment_deleted_event(self) -> None:
expected_message = u"kolaszek deleted a [comment](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/3)\n\n~~~ quote\nComment1\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:comment_deleted'
}
self.send_and_test_stream_message('pull_request_comment_action', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_repo_updated_event(self) -> None:
expected_message = u"eeshangarg changed the website of the **new-name** repo to **http://zulipchat.com**\neeshangarg changed the name of the **new-name** repo from **test-repo** to **new-name**\neeshangarg changed the language of the **new-name** repo to **python**\neeshangarg changed the full name of the **new-name** repo from **webhooktest/test-repo** to **webhooktest/new-name**\neeshangarg changed the description of the **new-name** repo to **Random description.**"
expected_subject = u"new-name"
kwargs = {"HTTP_X_EVENT_KEY": 'repo:updated'}
self.send_and_test_stream_message('repo_updated', expected_subject,
expected_message, **kwargs)
def test_bitbucket2_on_push_one_tag_event(self) -> None:
expected_message = u"kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
self.send_and_test_stream_message('push_one_tag', self.EXPECTED_SUBJECT, expected_message, **kwargs)
def test_bitbucket2_on_push_remove_tag_event(self) -> None:
expected_message = u"kolaszek removed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
self.send_and_test_stream_message('push_remove_tag', self.EXPECTED_SUBJECT, expected_message, **kwargs)
def test_bitbucket2_on_push_more_than_one_tag_event(self) -> None:
expected_message = u"kolaszek pushed tag [{name}](https://bitbucket.org/kolaszek/repository-name/commits/tag/{name})"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
self.send_and_test_stream_message('push_more_than_one_tag', **kwargs)
msg = self.get_last_message()
self.do_test_subject(msg, self.EXPECTED_SUBJECT)
self.do_test_message(msg, expected_message.format(name='b'))
msg = self.get_second_to_last_message()
self.do_test_subject(msg, self.EXPECTED_SUBJECT)
self.do_test_message(msg, expected_message.format(name='a'))
def test_bitbucket2_on_more_than_one_push_event(self) -> None:
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
self.send_and_test_stream_message('more_than_one_push_event', **kwargs)
msg = self.get_second_to_last_message()
self.do_test_message(msg, 'kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))')
self.do_test_subject(msg, self.EXPECTED_SUBJECT_BRANCH_EVENTS)
msg = self.get_last_message()
self.do_test_message(msg, 'kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)')
self.do_test_subject(msg, self.EXPECTED_SUBJECT)
def test_bitbucket2_on_more_than_one_push_event_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
self.send_and_test_stream_message('more_than_one_push_event', **kwargs)
msg = self.get_second_to_last_message()
self.do_test_message(msg, 'kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))')
self.do_test_subject(msg, self.EXPECTED_SUBJECT_BRANCH_EVENTS)
msg = self.get_last_message()
self.do_test_message(msg, 'kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)')
self.do_test_subject(msg, self.EXPECTED_SUBJECT)
def test_bitbucket2_on_more_than_one_push_event_filtered_by_branches_ignore(self) -> None:
self.url = self.build_webhook_url(branches='changes,development')
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
expected_message = u"kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)"
self.send_and_test_stream_message('more_than_one_push_event',
self.EXPECTED_SUBJECT,
expected_message, **kwargs)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_push_event_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,devlopment')
payload = self.get_body('push')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_push_commits_above_limit_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,devlopment')
payload = self.get_body('push_commits_above_limit')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_force_push_event_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,devlopment')
payload = self.get_body('force_push')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_push_multiple_committers_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,devlopment')
payload = self.get_body('push_multiple_committers')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_push_multiple_committers_with_others_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,devlopment')
payload = self.get_body('push_multiple_committers_with_others')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_push_without_changes_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
payload = self.get_body('push_without_changes')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
| 74.290419
| 480
| 0.750615
|
ad9a4929e056025f566448d9ef7de8f698786173
| 4,891
|
py
|
Python
|
misc/hybrid_CNN_legacy.py
|
genglinliu/Hybrid-Convolution-Shortfuse
|
a3d691d64c6bf879e25f509c773e4d9ab9392da1
|
[
"MIT"
] | 1
|
2021-12-21T22:32:02.000Z
|
2021-12-21T22:32:02.000Z
|
misc/hybrid_CNN_legacy.py
|
genglinliu/celebA
|
a3d691d64c6bf879e25f509c773e4d9ab9392da1
|
[
"MIT"
] | null | null | null |
misc/hybrid_CNN_legacy.py
|
genglinliu/celebA
|
a3d691d64c6bf879e25f509c773e4d9ab9392da1
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
[deprecated - only process covariate as scalar values and processes images one by one]
You got a d-dim covaraite vector as input
K_l = W_0 + W_1 * S_l
S_0 = male (1)
S_1 = female (0)
for i, (images, labels) in enumerate(train_loader):
labels = labels[:, 2] # attractiveness label
The rest is just tensor computation
on every iteration, only one image sample passes through the hybrid layer
so we pass in this one scaler (1 or -1) to the hybrid layer as a parameter
We define s_l as a scaler, so the '1' samples will make w_1 activate
K_l = W_0 + W_1 * S_l
When S_l is a batch input, kernel param k_l still needs to be updated per data point (image)
So we need to handle that
"""
###################
# Our hybrid layer
###################
class Hybrid_Conv2d(nn.Module):
"""
(self, channel_in, channel_out, kernel_size, stride=1, padding=0, cov=0)
kernel_size are 4d weights: (out_channel, in_channel, height, width)
"""
def __init__(self, channel_in, channel_out, kernel_size, stride=1, padding=0, cov=0):
super(Hybrid_Conv2d, self).__init__()
self.kernel_size = kernel_size # 4D weight (out_channel, in_channel, height, width)
self.channel_in = channel_in
self.channel_out = channel_out
self.stride = stride
self.padding = padding
self.cov = cov # currently a scalar; cov vector of shape = (minibatch,)
# initialization: gaussian random
self.W_0 = nn.Parameter(torch.randn(kernel_size), requires_grad=True)
self.W_1 = nn.Parameter(torch.randn(kernel_size), requires_grad=True)
# N = cov.shape[0] # length of covariate vector is the batchsize
# weights = []
# for _ in range(N):
# weight = nn.Parameter(torch.randn(15, 3, 5, 5))
# weights.append(weight)
self._initialize_weights()
# weight initialization
def _initialize_weights(self):
nn.init.kaiming_normal_(self.W_0, mode='fan_out', nonlinearity='relu')
nn.init.kaiming_normal_(self.W_1, mode='fan_out', nonlinearity='relu')
def forward(self, x):
# input x is of shape = (minibatch, channel=3, width, height) e.g. (32, 3, 224, 224)
cov = self.cov # (minibatch,)
W_0 = self.W_0
W_1 = self.W_1
kernel = W_0 + torch.mul(W_1, cov)
out = F.conv2d(x, kernel, stride=self.stride, padding=self.padding)
return out
# experiment with two layer CNN
class HybridConvNet(nn.Module):
"""
Simple two-layer CNN with hybrid layer
"""
def __init__(self):
super(HybridConvNet, self).__init__()
self.hybrid_conv1 = Hybrid_Conv2d(3, 16, kernel_size=(16, 3, 3, 3), cov=0)
self.hybrid_conv2 = Hybrid_Conv2d(3, 16, kernel_size=(16, 3, 3, 3), cov=1)
self.conv2 = nn.Conv2d(16, 32, 3)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(387200, 128)
self.fc2 = nn.Linear(128, 2) # binary classification
def forward(self, x, cov):
if cov==0:
x = F.relu(self.hybrid_conv1(x))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(1, -1) # flatten
x = F.relu(self.fc1(x))
x = self.fc2(x)
elif cov==1:
x = F.relu(self.hybrid_conv2(x))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(1, -1) # flatten
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class ConvNet_v1(nn.Module):
"""
Simple two-layer CNN with sequential container
"""
def __init__(self):
super(ConvNet_v1, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3, 16, 3),
nn.ReLU(),
nn.Conv2d(16, 32, 3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Flatten()
)
self.layer2 = nn.Sequential(
nn.Linear(387200, 128),
nn.ReLU(),
nn.Linear(128, 2)
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
return out
class ConvNet_v2(nn.Module):
"""
Simple two-layer CNN with no Sequential container
"""
def __init__(self):
super(ConvNet_v2, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3)
self.conv2 = nn.Conv2d(16, 32, 3)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(387200, 128)
self.fc2 = nn.Linear(128, 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
| 31.75974
| 92
| 0.579841
|
b66dbfb826dabbbfcd7bef668f8ef8a24a00eb19
| 2,538
|
py
|
Python
|
Program/thread_database.py
|
edgecomp/academic-ranking
|
2cb0b9117e3e7abcb42f1ae30349e80169898535
|
[
"MIT"
] | null | null | null |
Program/thread_database.py
|
edgecomp/academic-ranking
|
2cb0b9117e3e7abcb42f1ae30349e80169898535
|
[
"MIT"
] | null | null | null |
Program/thread_database.py
|
edgecomp/academic-ranking
|
2cb0b9117e3e7abcb42f1ae30349e80169898535
|
[
"MIT"
] | 1
|
2020-02-23T02:32:28.000Z
|
2020-02-23T02:32:28.000Z
|
import psycopg2
from psycopg2.pool import ThreadedConnectionPool
link_base_dblp = "https://dblp.org/search/venue/api?q="
DSN = "host='localhost' dbname='rankedcategories' user='lucas'"
tcp = ThreadedConnectionPool(1, 18, DSN)
# Threaded version of the db class
class ThreadDb:
def __init__(self):
self.conn = tcp.getconn()
self.c = self.conn.cursor()
def put_connection(self):
tcp.putconn(self.conn)
def close_db(self):
self.c.close()
self.conn.close()
def add_paper(self, title, conference_id):
# Adds paper to the database
try:
self.c.execute("INSERT INTO papers(title, conference_id) VALUES (%s,%s)", (title, conference_id))
self.conn.commit()
self.c.execute("SELECT id FROM papers WHERE title=%s", (title,))
return self.c.fetchone()
except psycopg2.IntegrityError:
# print("Value " + title + " already exists! Try again.")
self.conn.rollback()
self.c.execute("SELECT id FROM papers WHERE title=%s", (title,))
return self.c.fetchone()
def add_affiliation(self, aff_id, affiliation, country):
# Adds affiliation to the database
try:
print(affiliation)
self.c.execute('''INSERT INTO affiliations(id, affiliation, country) VALUES (%s, %s, %s)''',
(aff_id, affiliation, country))
self.conn.commit()
except psycopg2.IntegrityError:
self.conn.rollback()
print("AFFILIATION ALREADY EXISTS")
def add_author(self, user_id, first_name, last_name, url, aff_id):
# Adds author to the database
try:
self.c.execute('''INSERT INTO authors(user_id, first_name, last_name, url, affiliation_id)
VALUES (%s,%s,%s,%s,%s)''', (user_id, first_name, last_name, url, aff_id))
self.conn.commit()
# print("Added author: " + first_name + " " + last_name)
except psycopg2.IntegrityError:
self.conn.rollback()
print("Record already exists! Try again.")
def add_author_paper(self, author_id, paper_id):
# Adds author paper records to the database
try:
self.c.execute("INSERT INTO authors_papers(author_id, paper_id) VALUES (%s,%s)", (author_id, paper_id))
self.conn.commit()
except psycopg2.IntegrityError:
self.conn.rollback()
print("Record already exists! Try again.")
| 37.323529
| 115
| 0.604807
|
8d92a3098dcc6e026a15b6b0af7dc11b04e7b14f
| 9,426
|
py
|
Python
|
paddleseg/models/losses/cross_entropy_loss.py
|
simuler/ESPNet
|
618768f494edbd7fa82e51b0b3d88244e19370ee
|
[
"Apache-2.0"
] | 1
|
2022-03-28T14:10:39.000Z
|
2022-03-28T14:10:39.000Z
|
paddleseg/models/losses/cross_entropy_loss.py
|
simuler/ESPNet
|
618768f494edbd7fa82e51b0b3d88244e19370ee
|
[
"Apache-2.0"
] | null | null | null |
paddleseg/models/losses/cross_entropy_loss.py
|
simuler/ESPNet
|
618768f494edbd7fa82e51b0b3d88244e19370ee
|
[
"Apache-2.0"
] | 1
|
2021-12-03T08:55:13.000Z
|
2021-12-03T08:55:13.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddleseg.cvlibs import manager
@manager.LOSSES.add_component
class CrossEntropyLoss(nn.Layer):
"""
Implements the cross entropy loss function.
Args:
weight (tuple|list|ndarray|Tensor, optional): A manual rescaling weight
given to each class. Its length must be equal to the number of classes.
Default ``None``.
ignore_index (int64, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. Default ``255``.
top_k_percent_pixels (float, optional): the value lies in [0.0, 1.0].
When its value < 1.0, only compute the loss for the top k percent pixels
(e.g., the top 20% pixels). This is useful for hard pixel mining. Default ``1.0``.
data_format (str, optional): The tensor format to use, 'NCHW' or 'NHWC'. Default ``'NCHW'``.
"""
def __init__(self,
weight=None,
ignore_index=255,
top_k_percent_pixels=1.0,
data_format='NCHW'):
super(CrossEntropyLoss, self).__init__()
self.ignore_index = ignore_index
self.top_k_percent_pixels = top_k_percent_pixels
self.EPS = 1e-8
self.data_format = data_format
if weight is not None:
self.weight = paddle.to_tensor(weight, dtype='float32')
long_weight = weight #+ [0] * (256 - len(weight))
self.long_weight = paddle.to_tensor(long_weight, dtype='float32')
else:
self.weight = None
self.long_weight = None
def forward(self, logit, label, semantic_weights=None):
"""
Forward computation.
Args:
logit (Tensor): Logit tensor, the data type is float32, float64. Shape is
(N, C), where C is number of classes, and if shape is more than 2D, this
is (N, C, D1, D2,..., Dk), k >= 1.
label (Tensor): Label tensor, the data type is int64. Shape is (N), where each
value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is
(N, D1, D2,..., Dk), k >= 1.
semantic_weights (Tensor, optional): Weights about loss for each pixels,
shape is the same as label. Default: None.
Returns:
(Tensor): The average loss.
"""
channel_axis = 1 if self.data_format == 'NCHW' else -1
if self.weight is not None and logit.shape[channel_axis] != len(
self.weight):
raise ValueError(
'The number of weights = {} must be the same as the number of classes = {}.'
.format(len(self.weight), logit.shape[channel_axis]))
if channel_axis == 1:
logit = paddle.transpose(logit, [0, 2, 3, 1])
label = label.astype('int64')
# In F.cross_entropy, the ignore_index is invalid, which needs to be fixed.
loss = F.cross_entropy(
logit,
label,
ignore_index=self.ignore_index,
reduction='none',
weight=self.long_weight)
return self._post_process_loss(logit, label, semantic_weights, loss)
def _post_process_loss(self, logit, label, semantic_weights, loss):
"""
Consider mask and top_k to calculate the final loss.
Args:
logit (Tensor): Logit tensor, the data type is float32, float64. Shape is
(N, C), where C is number of classes, and if shape is more than 2D, this
is (N, C, D1, D2,..., Dk), k >= 1.
label (Tensor): Label tensor, the data type is int64. Shape is (N), where each
value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is
(N, D1, D2,..., Dk), k >= 1.
semantic_weights (Tensor, optional): Weights about loss for each pixels,
shape is the same as label.
loss (Tensor): Loss tensor which is the output of cross_entropy. If soft_label
is False in cross_entropy, the shape of loss should be the same as the label.
If soft_label is True in cross_entropy, the shape of loss should be
(N, D1, D2,..., Dk, 1).
Returns:
(Tensor): The average loss.
"""
mask = label != self.ignore_index
mask = paddle.cast(mask, 'float32')
label.stop_gradient = True
mask.stop_gradient = True
if loss.ndim > mask.ndim:
loss = paddle.squeeze(loss, axis=-1)
loss = loss * mask
if semantic_weights is not None:
loss = loss * semantic_weights
if self.weight is not None:
_one_hot = F.one_hot(label, logit.shape[-1])
coef = paddle.sum(_one_hot * self.weight, axis=-1)
else:
coef = paddle.ones_like(label)
if self.top_k_percent_pixels == 1.0:
avg_loss = paddle.mean(loss) / (paddle.mean(mask * coef) + self.EPS)
else:
loss = loss.reshape((-1, ))
top_k_pixels = int(self.top_k_percent_pixels * loss.numel())
loss, indices = paddle.topk(loss, top_k_pixels)
coef = coef.reshape((-1, ))
coef = paddle.gather(coef, indices)
coef.stop_gradient = True
coef = coef.astype('float32')
avg_loss = loss.mean() / (paddle.mean(coef) + self.EPS)
return avg_loss
@manager.LOSSES.add_component
class DistillCrossEntropyLoss(CrossEntropyLoss):
"""
The implementation of distill cross entropy loss.
Args:
weight (tuple|list|ndarray|Tensor, optional): A manual rescaling weight
given to each class. Its length must be equal to the number of classes.
Default ``None``.
ignore_index (int64, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. Default ``255``.
top_k_percent_pixels (float, optional): the value lies in [0.0, 1.0].
When its value < 1.0, only compute the loss for the top k percent pixels
(e.g., the top 20% pixels). This is useful for hard pixel mining.
Default ``1.0``.
data_format (str, optional): The tensor format to use, 'NCHW' or 'NHWC'.
Default ``'NCHW'``.
"""
def __init__(self,
weight=None,
ignore_index=255,
top_k_percent_pixels=1.0,
data_format='NCHW'):
super().__init__(weight, ignore_index, top_k_percent_pixels,
data_format)
def forward(self,
student_logit,
teacher_logit,
label,
semantic_weights=None):
"""
Forward computation.
Args:
student_logit (Tensor): Logit tensor, the data type is float32, float64. Shape is
(N, C), where C is number of classes, and if shape is more than 2D, this
is (N, C, D1, D2,..., Dk), k >= 1.
teacher_logit (Tensor): Logit tensor, the data type is float32, float64. The shape
is the same as the student_logit.
label (Tensor): Label tensor, the data type is int64. Shape is (N), where each
value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is
(N, D1, D2,..., Dk), k >= 1.
semantic_weights (Tensor, optional): Weights about loss for each pixels,
shape is the same as label. Default: None.
"""
if student_logit.shape != teacher_logit.shape:
raise ValueError(
'The shape of student_logit = {} must be the same as the shape of teacher_logit = {}.'
.format(student_logit.shape, teacher_logit.shape))
channel_axis = 1 if self.data_format == 'NCHW' else -1
if self.weight is not None and student_logit.shape[channel_axis] != len(
self.weight):
raise ValueError(
'The number of weights = {} must be the same as the number of classes = {}.'
.format(len(self.weight), student_logit.shape[channel_axis]))
if channel_axis == 1:
student_logit = paddle.transpose(student_logit, [0, 2, 3, 1])
teacher_logit = paddle.transpose(teacher_logit, [0, 2, 3, 1])
teacher_logit = F.softmax(teacher_logit)
loss = F.cross_entropy(
student_logit,
teacher_logit,
weight=self.weight,
reduction='none',
soft_label=True)
return self._post_process_loss(student_logit, label, semantic_weights,
loss)
| 42.651584
| 102
| 0.58763
|
3e3d918e901cc7bdbc33ce5061a27e495a1de415
| 672
|
py
|
Python
|
bh_modules/foldbracket.py
|
jfcherng-sublime/ST-BracketHighlighter
|
223ffd4ceafd58686503e3328934c039e959a88c
|
[
"Unlicense",
"MIT"
] | 1,047
|
2015-01-01T16:11:42.000Z
|
2022-03-12T08:29:13.000Z
|
bh_modules/foldbracket.py
|
jfcherng-sublime/ST-BracketHighlighter
|
223ffd4ceafd58686503e3328934c039e959a88c
|
[
"Unlicense",
"MIT"
] | 374
|
2015-01-07T02:47:55.000Z
|
2022-03-24T12:59:09.000Z
|
bh_modules/foldbracket.py
|
jfcherng-sublime/ST-BracketHighlighter
|
223ffd4ceafd58686503e3328934c039e959a88c
|
[
"Unlicense",
"MIT"
] | 223
|
2015-01-11T04:21:06.000Z
|
2021-10-05T15:00:32.000Z
|
"""
BracketHighlighter.
Copyright (c) 2013 - 2016 Isaac Muse <isaacmuse@gmail.com>
License: MIT
"""
from BracketHighlighter import bh_plugin
import sublime
class FoldBrackets(bh_plugin.BracketPluginCommand):
"""Fold bracket plugin."""
def run(self, edit, name):
"""Fold the content between the bracket."""
content = sublime.Region(self.left.end, self.right.begin)
new_content = [content]
if content.size() > 0:
if self.view.fold(content) is False:
new_content = self.view.unfold(content)
self.selection = new_content
def plugin():
"""Make plugin available."""
return FoldBrackets
| 23.172414
| 65
| 0.65625
|
6c7191dbf910cb9131843f77b6efb0a3f1b90fbd
| 1,376
|
py
|
Python
|
app/jobs.py
|
averycrespi/statice
|
bd6158595106df90fcabd8ac16e899bf58db1a3b
|
[
"MIT"
] | null | null | null |
app/jobs.py
|
averycrespi/statice
|
bd6158595106df90fcabd8ac16e899bf58db1a3b
|
[
"MIT"
] | 40
|
2020-01-23T01:45:20.000Z
|
2020-03-24T18:48:25.000Z
|
app/jobs.py
|
averycrespi/statice
|
bd6158595106df90fcabd8ac16e899bf58db1a3b
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import http.client
import requests
from requests.exceptions import (
ConnectionError,
RequestException,
Timeout,
TooManyRedirects,
)
from app import rq
from app.models import Response, Status
@rq.job
def send_request(check, timeout):
"""Send an HTTP GET request for a check."""
start_time = datetime.utcnow()
try:
r = requests.get(check.url, timeout=timeout)
status = Status.SUCCESS if r.status_code < 400 else Status.FAILURE
description = "HTTP {code}: {msg}".format(
code=r.status_code, msg=http.client.responses[r.status_code]
)
except ConnectionError:
status = Status.FAILURE
description = "Error: connection failed"
except Timeout:
status = Status.FAILURE
description = "Error: request timed out"
except TooManyRedirects:
status = Status.FAILURE
description = "Error: too many redirects"
except RequestException as e:
status = Status.FAILURE
description = "Unknown error: {}".format(str(e))
finally:
elapsed_ms = int((datetime.utcnow() - start_time).total_seconds() * 1000)
return Response(
check_id=check.id,
start_time=start_time,
elapsed_ms=elapsed_ms,
status=status,
description=description,
)
| 29.913043
| 81
| 0.645349
|
10a7fbbacfdccf1bb5d3a1bb98258f8eb2f1268a
| 549
|
py
|
Python
|
gssetting/model/gssetting.py
|
deresmos/gssetting
|
6c6a30bc048194930337b3253b14aba65144e06a
|
[
"MIT"
] | null | null | null |
gssetting/model/gssetting.py
|
deresmos/gssetting
|
6c6a30bc048194930337b3253b14aba65144e06a
|
[
"MIT"
] | null | null | null |
gssetting/model/gssetting.py
|
deresmos/gssetting
|
6c6a30bc048194930337b3253b14aba65144e06a
|
[
"MIT"
] | null | null | null |
from sys import exit
from typing import List
class GSSetting:
headers: List[str] = []
indices: List[int] = []
def __init__(self) -> None:
if not (len(self.headers) or len(self.indices)):
print("Must set headers or indices of model.")
exit(1)
@classmethod
def is_cls_headers(cls) -> bool:
if len(cls.headers):
return True
return False
@classmethod
def is_cls_indices(cls) -> bool:
if len(cls.indices):
return True
return False
| 20.333333
| 58
| 0.57377
|
b8850cfdfe3bba3a99e8f18cb6ca76a687a0f004
| 8,045
|
py
|
Python
|
scraper.py
|
amikar/flickr-scraper
|
a63787a0ee2b4bfa7add191526c9ff7f01038dc7
|
[
"Unlicense"
] | 2
|
2018-05-30T13:17:45.000Z
|
2019-02-04T17:19:17.000Z
|
scraper.py
|
amikar/flickr-scraper
|
a63787a0ee2b4bfa7add191526c9ff7f01038dc7
|
[
"Unlicense"
] | null | null | null |
scraper.py
|
amikar/flickr-scraper
|
a63787a0ee2b4bfa7add191526c9ff7f01038dc7
|
[
"Unlicense"
] | null | null | null |
import requests
import re
from bs4 import BeautifulSoup
import urllib2
import string
import json
import time
import threading
import os
import sqlite3
'''
Get the html using beautifulsoup, not a needed wrapper since its being used only once at the moment. can be erased in the future
'''
def get_soup(url,header):
return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),"html.parser")
'''
find the particular element from requests as json
'''
def find(key, dictionary):
for key_item, vert in dictionary.iteritems():
if key_item == key:
yield vert
elif isinstance(vert, dict):
for result in find(key, vert):
yield result
elif isinstance(vert, list):
for dicto in vert:
for result in find(key, dicto):
yield result
'''
save image by takings its final name, file will be saved according to its name
'''
def save_image(url, fname):
r = requests.get(url, stream=True)
with open(fname, 'a') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
return True
return False
'''
get all the images present in the particular page, takes the number of pages as argument to decide how many pages
you want to be quried and have the images extracted from
link is designed to load 500 images per page, does not go higher
'''
def get_image_list(clean_query,urls_api_key,urls_reqID,total_pages):
original_name = clean_query.replace('%20', '-')
if not os.path.isdir(IMG_DIR):
os.makedirs(IMG_DIR % original_name)
for i in range(1,9): #change range(1,total_pages) for all images , flickr loads only 4000 images on its server for any given search so ideal is range (1,9)
page_number = str(i)
#hard coding the ajax call , can be extracted from search_url
response = requests.get('https://api.flickr.com/services/rest?sort=relevance&parse_tags=1&content_type=7&extras=can_comment%2Ccount_comments%2Ccount_faves%2Cdescription%2Cisfavorite%2Clicense%2Cmedia%2Cneeds_interstitial%2Cowner_name%2Cpath_alias%2Crealname%2Crotation%2Curl_c%2Curl_l%2Curl_m%2Curl_n%2Curl_q%2Curl_s%2Curl_sq%2Curl_t%2Curl_z&per_page=500&page='+page_number+'&lang=en-US&text='+clean_query+'&viewerNSID=&method=flickr.photos.search&csrf=&api_key='+urls_api_key+'&format=json&hermes=1&hermesClient=1&reqId='+urls_reqID+'&nojsoncallback=1').json()
#photos_data = (response['photos']['photo'][0]['url_sq'])
#important piece of code to make the directories for image (remember to add rewriting images)
photos_data = (list(find('url_l', response))) #replace url_sq with url_l for high def images
for img_file in photos_data:
'''
take image link to extract the name
for flickr images https:\/\/farm8.staticflickr.com\/7101\/26278960313_64709fce17_b.jpg
26278960313 is its photo_id
64709fce17_b is its name _b is the quality of image (_b is high)
_s is for small sized images
'''
img_name_regex = '_(.*?)_b.jpg' #change _s to _b if using url_l in photos_data
img_name = re.search(img_name_regex, str(img_file)).group(1)
img_fname = IMG_FNAME % (original_name,img_name)
photo_id_regex = '(.*?)_'
photo_fid = re.search(photo_id_regex, str(img_file.split("/")[-1])).group(1)
get_location_with_name(photo_fid,urls_api_key,urls_reqID)
save_image(img_file,img_fname)
'''
#code where multithreading can be implemented to be tried with a better db than sqlite3
p = threading.Thread(target =get_location_with_name, args=(photo_fid,urls_api_key,urls_reqID,) )
p.start()
s = threading.Thread(target = save_image, args = (img_file,img_fname))
s.start()
'''
'''
#if one wants to search a term through console
#str(raw_input('Enter a search term: ')).replace(' ', '_')
'''
'''
the url to find the location of items, not every image would have this particular data.
can be automated to extract data or use geocoder to find using latitude and longitude
loc_response['photo']['location']['latitude']
loc_response['photo']['location']['longitude']
-(makes things slower upon test)
Thread deadlock can occur, better implementation needed
check_same_thread=False temperory implementation
'''
def get_location_with_name(photo_id,urls_api_key,urls_reqID):
img_title = ''
region = ''
#hard coding the ajax call , can be extracted from response in get_image_list
loc_response = requests.get('https://api.flickr.com/services/rest?datecreate=1&extras=sizes%2Cicon_urls%2Cignored%2Crev_ignored%2Crev_contacts%2Cvenue%2Cdatecreate%2Ccan_addmeta%2Ccan_comment%2Ccan_download%2Ccan_share%2Ccontact%2Ccount_comments%2Ccount_faves%2Ccount_views%2Cdate_taken%2Cdate_upload%2Cdescription%2Cicon_urls_deep%2Cisfavorite%2Cispro%2Clicense%2Cmedia%2Cneeds_interstitial%2Cowner_name%2Cowner_datecreate%2Cpath_alias%2Crealname%2Crotation%2Csafety_level%2Csecret_k%2Csecret_h%2Curl_c%2Curl_f%2Curl_h%2Curl_k%2Curl_l%2Curl_m%2Curl_n%2Curl_o%2Curl_q%2Curl_s%2Curl_sq%2Curl_t%2Curl_z%2Cvisibility%2Cvisibility_source%2Co_dims%2Cis_marketplace_printable%2Cis_marketplace_licensable%2Cpubliceditability%2Cstatic_maps&photo_id='+photo_id+'&static_map_zoom=3%2C6%2C14&static_map_width=245&static_map_height=100&viewerNSID=&method=flickr.photos.getInfo&csrf=&api_key='+urls_api_key+'&format=json&hermes=1&hermesClient=1&reqId='+urls_reqID+'&nojsoncallback=1').json()
if 'photo' in loc_response:
img_title = loc_response['photo']['title']['_content']
if 'location' in loc_response['photo']:
region = loc_response['photo']['location']['region']['_content']
db = sqlite3.connect('./my.db', check_same_thread=False)
cursor2 = db.cursor()
cursor2.execute('''INSERT INTO users(Title,Region) VALUES(?,?)''',(img_title,region))
db.commit()
else:
print 'No Image information'
else:
print 'No data available'
# key and reqid, key expries so better to take key from page everytime
#print urls_api_key
#print urls_reqID
#get_image_list(search_term,urls_api_key,urls_reqID,total_pages)
'''
root.YUI_config.flickr.api.site_key = stores the api key to be used (usually expires after a mentioned time)
root.reqId = also important to get from site
total_pages can be really high but flickr loads only 4000 images per item and starts repeating images once we have
got 4000
tried with advanced search and got the same result
keep total_pages in code to scale further if possible
'''
def get_page_info(keywords):
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
clean_query = keywords.replace(' ', '%20')
search_url = 'https://www.flickr.com/search?q='+clean_query
soup = get_soup(search_url,header)
total_imgs_unclean = soup.find(class_='view-more-link').text
api_key_regex = 'root.YUI_config.flickr.api.site_key = "(.*?)";'
urls_api_key = re.search(api_key_regex, str(soup)).group(1)
api_reqID_regex = 'root.reqId = "(.*?)";'
urls_reqID = re.search(api_reqID_regex, str(soup)).group(1)
total_imgs = int(string.split(total_imgs_unclean," ")[-1].replace(",", ""))
total_pages = total_imgs/500
get_image_list(clean_query,urls_api_key,urls_reqID,total_pages)
'''
take each term from keyword.txt and initiate a thread of each term
'''
def keywords_search(keywords):
for i,search_term in enumerate(keywords):
t = threading.Thread(target=get_page_info, args=(search_term,))
t.start()
'''
keeps adding data to the same table if table exists, might not be a good thing based on needs
console -
-> sqlite3 my.db
-> .tables
-> select * from users
'''
if __name__ == '__main__':
photos_data = []
IMG_FNAME = './images/%s/%s.jpg'
IMG_DIR = './images/%s'
keywords = []
db = sqlite3.connect('./my.db')
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, Title TEXT,Region TEXT)''')
db.commit()
cursor.close()
db.close()
with open('keywords.txt') as f:
keywords = [e.strip() for e in f.readlines()]
keywords_search(keywords)
| 36.90367
| 980
| 0.740211
|
0d6a05505e29b0e524d1716eabed99ec27d0421f
| 409
|
py
|
Python
|
gunicorn_config.py
|
geocompass/robosat.pink
|
44d95183322ba68c0728de44a66e50f510bfd919
|
[
"MIT"
] | 31
|
2019-10-19T04:07:51.000Z
|
2022-02-25T11:14:04.000Z
|
gunicorn_config.py
|
geocompass/robosat.pink
|
44d95183322ba68c0728de44a66e50f510bfd919
|
[
"MIT"
] | 2
|
2020-03-13T01:26:15.000Z
|
2020-10-30T01:59:29.000Z
|
gunicorn_config.py
|
geocompass/robosat.pink
|
44d95183322ba68c0728de44a66e50f510bfd919
|
[
"MIT"
] | 19
|
2019-10-31T04:52:08.000Z
|
2022-03-04T06:24:56.000Z
|
# config.py 教程:https://www.jianshu.com/p/fecf15ad0c9a
import os
import gevent.monkey
gevent.monkey.patch_all()
import multiprocessing
# debug = True
loglevel = 'debug'
bind = "0.0.0.0:5000"
pidfile = "log/gunicorn.pid"
accesslog = "log/access.log"
errorlog = "log/debug.log"
daemon = True
# 启动的进程数
workers = multiprocessing.cpu_count()*2+1
worker_class = 'gevent'
x_forwarded_for_header = 'X-FORWARDED-FOR'
| 21.526316
| 53
| 0.748166
|
9cb0fa365ff9b97d483fa9e80f7d301466d69d27
| 1,607
|
py
|
Python
|
library/r1soft_volume.py
|
vexxhost/ansible-role-r1soft
|
1cd17f5f3bb37280e790b90b061a3cae5b881a72
|
[
"Apache-1.1"
] | 1
|
2017-11-02T16:02:34.000Z
|
2017-11-02T16:02:34.000Z
|
library/r1soft_volume.py
|
vexxhost/ansible-role-r1soft
|
1cd17f5f3bb37280e790b90b061a3cae5b881a72
|
[
"Apache-1.1"
] | null | null | null |
library/r1soft_volume.py
|
vexxhost/ansible-role-r1soft
|
1cd17f5f3bb37280e790b90b061a3cae5b881a72
|
[
"Apache-1.1"
] | 2
|
2021-01-30T04:48:07.000Z
|
2021-01-31T20:10:35.000Z
|
#!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
from requests.auth import HTTPBasicAuth
from zeep import Client
from zeep.exceptions import Fault
from zeep.transports import Transport
def _volume_exists(client, path):
volumes = client.service.getVolumes()
for vol in volumes:
if path == vol.path:
return True
return False
def main():
module = AnsibleModule(
argument_spec=dict(
username=dict(required=True),
password=dict(required=True),
name=dict(required=True),
description=dict(),
path=dict(required=True, type='path')
),
supports_check_mode=True
)
name = module.params.get('name')
description = module.params.get('description')
path = module.params.get('path')
username = module.params.get('username')
password = module.params.get('password')
http_auth = HTTPBasicAuth(username, password)
transport = Transport(http_auth=http_auth)
client = Client('http://localhost:9080/Volume?wsdl', transport=transport)
# Volume already exists
if _volume_exists(client, path):
module.exit_json(changed=False)
# Not check mode, create the volume
if not module.check_mode:
try:
client.service.createVolume(name=name, description=description,
path=path, quotaType='NONE')
except Fault as ex:
module.fail_json(msg=ex.message)
# New volume created!
module.exit_json(changed=True)
if __name__ == '__main__':
main()
| 27.237288
| 77
| 0.647169
|
9a401cac9127daed78da8eda43d279990f8a37ae
| 6,664
|
py
|
Python
|
drf_spectacular/views.py
|
AlexChalk/drf-spectacular
|
abff8b315718c380c3a26abef3b8a74a639f9a36
|
[
"BSD-3-Clause"
] | null | null | null |
drf_spectacular/views.py
|
AlexChalk/drf-spectacular
|
abff8b315718c380c3a26abef3b8a74a639f9a36
|
[
"BSD-3-Clause"
] | null | null | null |
drf_spectacular/views.py
|
AlexChalk/drf-spectacular
|
abff8b315718c380c3a26abef3b8a74a639f9a36
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from collections import namedtuple
from typing import Any, Dict
from django.conf import settings
from django.utils import translation
from django.utils.translation import gettext_lazy as _
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from drf_spectacular.plumbing import get_relative_url, set_query_parameters
from drf_spectacular.renderers import (
OpenApiJsonRenderer, OpenApiJsonRenderer2, OpenApiYamlRenderer, OpenApiYamlRenderer2,
)
from drf_spectacular.settings import spectacular_settings
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import OpenApiParameter, extend_schema
if spectacular_settings.SERVE_INCLUDE_SCHEMA:
SCHEMA_KWARGS: Dict[str, Any] = {'responses': {200: OpenApiTypes.OBJECT}}
if settings.USE_I18N:
SCHEMA_KWARGS['parameters'] = [
OpenApiParameter(
'lang', str, OpenApiParameter.QUERY, enum=list(dict(settings.LANGUAGES).keys())
)
]
else:
SCHEMA_KWARGS = {'exclude': True}
if spectacular_settings.SERVE_AUTHENTICATION is not None:
AUTHENTICATION_CLASSES = spectacular_settings.SERVE_AUTHENTICATION
else:
AUTHENTICATION_CLASSES = api_settings.DEFAULT_AUTHENTICATION_CLASSES
class SpectacularAPIView(APIView):
__doc__ = _("""
OpenApi3 schema for this API. Format can be selected via content negotiation.
- YAML: application/vnd.oai.openapi
- JSON: application/vnd.oai.openapi+json
""")
renderer_classes = [
OpenApiYamlRenderer, OpenApiYamlRenderer2, OpenApiJsonRenderer, OpenApiJsonRenderer2
]
permission_classes = spectacular_settings.SERVE_PERMISSIONS
authentication_classes = AUTHENTICATION_CLASSES
generator_class = spectacular_settings.DEFAULT_GENERATOR_CLASS
serve_public = spectacular_settings.SERVE_PUBLIC
urlconf = spectacular_settings.SERVE_URLCONF
api_version = None
@extend_schema(**SCHEMA_KWARGS)
def get(self, request, *args, **kwargs):
if isinstance(self.urlconf, list) or isinstance(self.urlconf, tuple):
ModuleWrapper = namedtuple('ModuleWrapper', ['urlpatterns'])
self.urlconf = ModuleWrapper(tuple(self.urlconf))
if settings.USE_I18N and request.GET.get('lang'):
with translation.override(request.GET.get('lang')):
return self._get_schema_response(request)
else:
return self._get_schema_response(request)
def _get_schema_response(self, request):
# version specified as parameter to the view always takes precedence. after
# that we try to source version through the schema view's own versioning_class.
version = self.api_version or request.version
generator = self.generator_class(urlconf=self.urlconf, api_version=version)
return Response(generator.get_schema(request=request, public=self.serve_public))
class SpectacularYAMLAPIView(SpectacularAPIView):
renderer_classes = [OpenApiYamlRenderer, OpenApiYamlRenderer2]
class SpectacularJSONAPIView(SpectacularAPIView):
renderer_classes = [OpenApiJsonRenderer, OpenApiJsonRenderer2]
class SpectacularSwaggerView(APIView):
renderer_classes = [TemplateHTMLRenderer]
permission_classes = spectacular_settings.SERVE_PERMISSIONS
authentication_classes = AUTHENTICATION_CLASSES
url_name = 'schema'
url = None
template_name = 'drf_spectacular/swagger_ui.html'
template_name_js = 'drf_spectacular/swagger_ui.js'
@extend_schema(exclude=True)
def get(self, request, *args, **kwargs):
schema_url = self.url or get_relative_url(reverse(self.url_name, request=request))
return Response(
data={
'dist': spectacular_settings.SWAGGER_UI_DIST,
'favicon_href': spectacular_settings.SWAGGER_UI_FAVICON_HREF,
'schema_url': set_query_parameters(
url=schema_url,
lang=request.GET.get('lang')
),
'settings': json.dumps(spectacular_settings.SWAGGER_UI_SETTINGS),
'template_name_js': self.template_name_js
},
template_name=self.template_name,
)
class SpectacularSwaggerSplitView(SpectacularSwaggerView):
"""
Alternate Swagger UI implementation that separates the html request from the
javascript request to cater to web servers with stricter CSP policies.
"""
url_self = None
@extend_schema(exclude=True)
def get(self, request, *args, **kwargs):
if request.GET.get('script') is not None:
schema_url = self.url or get_relative_url(reverse(self.url_name, request=request))
return Response(
data={
'schema_url': set_query_parameters(
url=schema_url,
lang=request.GET.get('lang')
),
'settings': json.dumps(spectacular_settings.SWAGGER_UI_SETTINGS),
},
template_name=self.template_name_js,
content_type='application/javascript',
)
else:
script_url = self.url_self or request.get_full_path()
return Response(
data={
'dist': spectacular_settings.SWAGGER_UI_DIST,
'favicon_href': spectacular_settings.SWAGGER_UI_FAVICON_HREF,
'script_url': set_query_parameters(
url=script_url,
lang=request.GET.get('lang'),
script='' # signal to deliver init script
)
},
template_name=self.template_name,
)
class SpectacularRedocView(APIView):
renderer_classes = [TemplateHTMLRenderer]
permission_classes = spectacular_settings.SERVE_PERMISSIONS
authentication_classes = AUTHENTICATION_CLASSES
url_name = 'schema'
url = None
template_name = 'drf_spectacular/redoc.html'
@extend_schema(exclude=True)
def get(self, request, *args, **kwargs):
schema_url = self.url or get_relative_url(reverse(self.url_name, request=request))
schema_url = set_query_parameters(schema_url, lang=request.GET.get('lang'))
return Response(
data={
'dist': spectacular_settings.REDOC_DIST,
'schema_url': schema_url,
},
template_name=self.template_name
)
| 39.431953
| 95
| 0.684424
|
76a55c569d1b3f55cbc291a008deb58ff8e1e5ec
| 3,705
|
py
|
Python
|
test/functional/wallet_keypool_topup.py
|
pbitmonkey/bitmonkey-debug
|
f48bab02e88b1fcf445c59380e6fda018d86f462
|
[
"MIT"
] | null | null | null |
test/functional/wallet_keypool_topup.py
|
pbitmonkey/bitmonkey-debug
|
f48bab02e88b1fcf445c59380e6fda018d86f462
|
[
"MIT"
] | null | null | null |
test/functional/wallet_keypool_topup.py
|
pbitmonkey/bitmonkey-debug
|
f48bab02e88b1fcf445c59380e6fda018d86f462
|
[
"MIT"
] | 1
|
2020-11-04T06:59:13.000Z
|
2020-11-04T06:59:13.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The bitmonkey Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import os
import shutil
from test_framework.test_framework import bitmonkeyTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
)
class KeypoolRestoreTest(bitmonkeyTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
wallet_path = os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat")
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(wallet_path, wallet_backup_path)
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]):
self.log.info("Generate keys for wallet with address type: {}".format(output_type))
idx = i+1
for _ in range(90):
addr_oldpool = self.nodes[idx].getnewaddress(address_type=output_type)
for _ in range(20):
addr_extpool = self.nodes[idx].getnewaddress(address_type=output_type)
# Make sure we're creating the outputs we expect
address_details = self.nodes[idx].validateaddress(addr_extpool)
if i == 0:
assert not address_details["isscript"] and not address_details["iswitness"]
elif i == 1:
assert address_details["isscript"] and not address_details["iswitness"]
else:
assert not address_details["isscript"] and address_details["iswitness"]
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
self.sync_blocks()
self.log.info("Restart node with wallet backup")
self.stop_node(idx)
shutil.copyfile(wallet_backup_path, wallet_path)
self.start_node(idx, self.extra_args[idx])
connect_nodes_bi(self.nodes, 0, idx)
self.sync_all()
self.log.info("Verify keypool is restored and balance is correct")
assert_equal(self.nodes[idx].getbalance(), 15)
assert_equal(self.nodes[idx].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress())['hdkeypath'], "m/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| 42.586207
| 164
| 0.659919
|
91ecdc915ac6137d69216770f5a5dcc36a462c2d
| 4,106
|
py
|
Python
|
cluster/juju/charms/trusty/kubernetes-master/unit_tests/test_install.py
|
dnwake/kubernetes_trust
|
96a313825a7ef88ac37133bd3f7e7523aceae385
|
[
"Apache-2.0"
] | 6
|
2021-05-01T14:35:57.000Z
|
2022-03-09T13:31:26.000Z
|
cluster/juju/charms/trusty/kubernetes-master/unit_tests/test_install.py
|
dnwake/kubernetes_trust
|
96a313825a7ef88ac37133bd3f7e7523aceae385
|
[
"Apache-2.0"
] | 1
|
2016-08-05T22:00:45.000Z
|
2016-08-05T22:00:45.000Z
|
cluster/juju/charms/trusty/kubernetes-master/unit_tests/test_install.py
|
dnwake/kubernetes_trust
|
96a313825a7ef88ac37133bd3f7e7523aceae385
|
[
"Apache-2.0"
] | 3
|
2021-05-01T14:36:03.000Z
|
2022-03-09T13:30:54.000Z
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, Mock, MagicMock
from path import Path
import pytest
import sys
# Munge the python path so we can find our hook code
d = Path('__file__').parent.abspath() / 'hooks'
sys.path.insert(0, d.abspath())
# Import the modules from the hook
import install
class TestInstallHook():
@patch('install.path')
def test_update_rc_files(self, pmock):
"""
Test happy path on updating env files. Assuming everything
exists and is in place.
"""
pmock.return_value.lines.return_value = ['line1', 'line2']
install.update_rc_files(['test1', 'test2'])
pmock.return_value.write_lines.assert_called_with(['line1', 'line2',
'test1', 'test2'])
def test_update_rc_files_with_nonexistent_path(self):
"""
Test an unhappy path if the bashrc/users do not exist.
"""
with pytest.raises(OSError) as exinfo:
install.update_rc_files(['test1','test2'])
@patch('install.fetch')
@patch('install.hookenv')
def test_package_installation(self, hemock, ftmock):
"""
Verify we are calling the known essentials to build and syndicate
kubes.
"""
pkgs = ['build-essential', 'git',
'make', 'nginx', 'python-pip']
install.install_packages()
hemock.log.assert_called_with('Installing Debian packages')
ftmock.filter_installed_packages.assert_called_with(pkgs)
@patch('install.archiveurl.ArchiveUrlFetchHandler')
def test_go_download(self, aumock):
"""
Test that we are actually handing off to charm-helpers to
download a specific archive of Go. This is non-configurable so
its reasonably safe to assume we're going to always do this,
and when it changes we shall curse the brittleness of this test.
"""
ins_mock = aumock.return_value.install
install.download_go()
url = 'https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz'
sha1='5020af94b52b65cc9b6f11d50a67e4bae07b0aff'
ins_mock.assert_called_with(url, '/usr/local', sha1, 'sha1')
@patch('install.subprocess')
def test_clone_repository(self, spmock):
"""
We're not using a unit-tested git library - so ensure our subprocess
call is consistent. If we change this, we want to know we've broken it.
"""
install.clone_repository()
repo = 'https://github.com/kubernetes/kubernetes.git'
direct = '/opt/kubernetes'
spmock.check_output.assert_called_with(['git', 'clone', repo, direct])
@patch('install.install_packages')
@patch('install.download_go')
@patch('install.clone_repository')
@patch('install.update_rc_files')
@patch('install.hookenv')
def test_install_main(self, hemock, urmock, crmock, dgmock, ipmock):
"""
Ensure the driver/main method is calling all the supporting methods.
"""
strings = [
'export GOROOT=/usr/local/go\n',
'export PATH=$PATH:$GOROOT/bin\n',
'export KUBE_MASTER_IP=0.0.0.0\n',
'export KUBERNETES_MASTER=http://$KUBE_MASTER_IP\n',
]
install.install()
crmock.assert_called_once()
dgmock.assert_called_once()
crmock.assert_called_once()
urmock.assert_called_with(strings)
hemock.open_port.assert_called_with(8080)
| 37.669725
| 80
| 0.657574
|
37b6fce6f6d467b70763f4341b50930db92a50f4
| 1,809
|
py
|
Python
|
src/scripts/extract_scored.py
|
Clinical-Genomics/CADD-scripts
|
d5a53004da6f6c8b597174d9ba45ef73ae635855
|
[
"Unlicense"
] | 1
|
2021-05-11T09:44:29.000Z
|
2021-05-11T09:44:29.000Z
|
src/scripts/extract_scored.py
|
Clinical-Genomics/CADD-scripts
|
d5a53004da6f6c8b597174d9ba45ef73ae635855
|
[
"Unlicense"
] | null | null | null |
src/scripts/extract_scored.py
|
Clinical-Genomics/CADD-scripts
|
d5a53004da6f6c8b597174d9ba45ef73ae635855
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: ASCII -*-
import sys, os
import pysam
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p","--path", dest="path", help="Path to scored variants.")
parser.add_option("--found_out", dest="found_out", help="Write found variants to file (default 'output.tsv')",default=None)
parser.add_option("--header", dest="header", help="Write full header to output (default none)",default=False, action="store_true")
(options, args) = parser.parse_args()
if options.found_out:
found_out = open(options.found_out,'w')
else:
found_out = sys.stdout
fref,falt = 2,3
if os.path.exists(options.path) and os.path.exists(options.path+".tbi"):
filename = options.path
sys.stderr.write("Opening %s...\n"%(filename))
regionTabix = pysam.Tabixfile(filename,'r')
header = list(regionTabix.header)
for line in header:
if options.header:
found_out.write(line+"\n")
try:
fref = line.split('\t').index('Ref')
falt = line.split('\t').index('Alt')
except ValueError:
pass
else:
raise IOError("No valid file with pre-scored variants.\n")
for line in sys.stdin:
if line.startswith('#'):
sys.stdout.write(line + '\n')
continue
try:
fields = line.rstrip().split('\t')
found = False
chrom = fields[0]
pos = int(fields[1])
lref,allele = fields[-2],fields[-1]
for regionHit in regionTabix.fetch(chrom, pos-1, pos):
vfields = regionHit.rstrip().split('\t')
if (vfields[fref] == lref) and (vfields[falt] == allele):
found_out.write(regionHit+"\n")
found = True
if not found:
sys.stdout.write(line)
except ValueError:
sys.stderr.write('Encountered uncovered chromosome\n')
sys.stdout.write(line)
if options.found_out:
found_out.close()
| 29.177419
| 130
| 0.66335
|
fccc3faa81d4bae5846f7c485f3deb52d37d42e3
| 1,885
|
py
|
Python
|
aiida/backends/profile.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/profile.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/profile.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | 1
|
2018-12-21T11:10:09.000Z
|
2018-12-21T11:10:09.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from aiida.backends import settings
from aiida.common.exceptions import InvalidOperation
from aiida.common.setup import get_default_profile, get_profile_config
# Possible choices for backend
BACKEND_DJANGO = "django"
BACKEND_SQLA = "sqlalchemy"
def load_profile(profile=None):
"""
Load the profile. This function is called by load_dbenv and SHOULD NOT
be called by the user by hand.
"""
if settings.LOAD_PROFILE_CALLED:
raise InvalidOperation('You cannot call multiple times!')
settings.LOAD_PROFILE_CALLED = True
if settings.AIIDADB_PROFILE is not None:
if profile is not None and profile != settings.AIIDADB_PROFILE:
raise ValueError('Error in profile loading')
else:
if profile is None:
profile = get_default_profile()
settings.AIIDADB_PROFILE = profile
config = get_profile_config(settings.AIIDADB_PROFILE)
# Check if AIIDADB_BACKEND is set and if not error (with message)
# Migration script should put it in profile (config.json)
settings.BACKEND = config.get('AIIDADB_BACKEND', BACKEND_DJANGO)
def is_profile_loaded():
"""
Return True if the profile has already been loaded
"""
return settings.LOAD_PROFILE_CALLED
| 36.25
| 75
| 0.606897
|
97eff68a111e8a04c968e50b3ac86b890adddde9
| 1,885
|
py
|
Python
|
art/tests.py
|
Fahari/museum
|
c3368464ea04e426a88b8d2ca4337eb7a773ca31
|
[
"MIT"
] | null | null | null |
art/tests.py
|
Fahari/museum
|
c3368464ea04e426a88b8d2ca4337eb7a773ca31
|
[
"MIT"
] | null | null | null |
art/tests.py
|
Fahari/museum
|
c3368464ea04e426a88b8d2ca4337eb7a773ca31
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from .models import Photo,Location,Category
# Create your tests here.
class CategoryTestClass(TestCase):
def setUp(self):
self.cat= Category(name = 'birds')
def test_instance(self):
self.assertTrue(isinstance(self.cat,Category))
# Testing save method
def test_save_category(self):
self.cat.save_category()
categories=Category.objects.all()
self.assertTrue(len(categories)>0)
def test_delete_category(self):
self.cat.save_category()
self.cat.delete_category()
categories=Category.objects.all()
self.assertTrue(len(categories)==0)
class LocationTestClass(TestCase):
def setUp(self):
self.loc= Location(name = 'booth')
def test_instance(self):
self.assertTrue(isinstance(self.loc,Location))
# Testing save method
def test_save_location(self):
self.loc.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations) > 0)
def test_delete_location(self):
self.loc.save_location()
self.loc.delete_location()
locations=Location.objects.all()
self.assertTrue(len(locations)==0)
class PhotoTestClass(TestCase):
def setUp(self):
self.loc=Location(name='Miami')
self.loc.save_location()
self.cat=Category(name='Food')
self.cat.save_category()
self.image= Photo(photo_name = 'black',description ='image of a black man',category=self.cat,location=self.loc)
def test_instance(self):
self.assertTrue(isinstance(self.image,Photo))
def test_save_image(self):
self.image.save_image()
images = Photo.objects.all()
self.assertTrue(len(images) > 0)
def tearDown(self):
Location.objects.all().delete()
Category.objects.all().delete()
Photo.objects.all().delete()
| 33.660714
| 119
| 0.666313
|
a788c76cfa2fa3e309f44ce0760fbc6d84210ca8
| 81
|
py
|
Python
|
src/babysitter/models/__init__.py
|
alexm92/babysitter
|
30f610fe1686c23b2261a9cd7d2f41eaade615b9
|
[
"MIT"
] | null | null | null |
src/babysitter/models/__init__.py
|
alexm92/babysitter
|
30f610fe1686c23b2261a9cd7d2f41eaade615b9
|
[
"MIT"
] | null | null | null |
src/babysitter/models/__init__.py
|
alexm92/babysitter
|
30f610fe1686c23b2261a9cd7d2f41eaade615b9
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from .user import *
from .issue import *
| 16.2
| 38
| 0.790123
|
e32e8d1314c097c07aa1c0f72461d55d40e2a4b7
| 215
|
py
|
Python
|
src/CallOnce/Enums.py
|
RobertOlechowski/RR_Utils_Python
|
e25375638ba765c5f7bab545e63d1fdc9743d4c4
|
[
"MIT"
] | null | null | null |
src/CallOnce/Enums.py
|
RobertOlechowski/RR_Utils_Python
|
e25375638ba765c5f7bab545e63d1fdc9743d4c4
|
[
"MIT"
] | null | null | null |
src/CallOnce/Enums.py
|
RobertOlechowski/RR_Utils_Python
|
e25375638ba765c5f7bab545e63d1fdc9743d4c4
|
[
"MIT"
] | null | null | null |
from enum import IntEnum
class ArgumentsMode(IntEnum):
IGNORE = 1
SERIALIZE_AND_HASH = 2
class HashFunction(IntEnum):
PYTHON = 1
MD5 = 2
class StorageMode(IntEnum):
Memory = 1
Disk = 2
| 12.647059
| 29
| 0.660465
|
45522705c4a00e5d752d69bf224a09299fdc263e
| 1,991
|
py
|
Python
|
source/machine_learning/trainModel.py
|
HuyTu7/face_rating
|
a7d689c6cab3cee9f445c40c0267525641090c5a
|
[
"MIT"
] | 70
|
2017-09-22T09:28:33.000Z
|
2022-03-06T12:06:39.000Z
|
source/machine_learning/trainModel.py
|
HuyTu7/face_rating
|
a7d689c6cab3cee9f445c40c0267525641090c5a
|
[
"MIT"
] | 1
|
2017-07-08T08:11:57.000Z
|
2018-07-16T03:04:38.000Z
|
source/machine_learning/trainModel.py
|
HuyTu7/face_rating
|
a7d689c6cab3cee9f445c40c0267525641090c5a
|
[
"MIT"
] | 18
|
2018-07-11T05:16:08.000Z
|
2021-09-06T14:41:19.000Z
|
import argparse
import numpy as np
import matplotlib.pyplot as plt
from sklearn import decomposition
from sklearn import linear_model
from sklearn.ensemble import RandomForestRegressor
from sklearn import svm
from sklearn import gaussian_process
parser = argparse.ArgumentParser()
parser.add_argument('-model', type=str, default='linear_model')
parser.add_argument('-featuredim', type=int, default=20)
parser.add_argument('-inputfeatures', type=str, default='../data/features_ALL.txt')
parser.add_argument('-labels', type=str, default='../data/ratings.txt')
args = parser.parse_args()
features = np.loadtxt(args.inputfeatures, delimiter=',')
#features = preprocessing.scale(features)
features_train = features[0:-51]
features_test = features[-51:-1]
test = features[-1]
pca = decomposition.PCA(n_components=args.featuredim)
pca.fit(features_train)
features_train = pca.transform(features_train)
features_test = pca.transform(features_test)
test = pca.transform(test)
ratings = np.loadtxt(args.labels, delimiter=',')
#ratings = preprocessing.scale(ratings)
ratings_train = ratings[0:-50]
ratings_test = ratings[-50:]
if args.model == 'linear_model':
regr = linear_model.LinearRegression()
elif args.model == 'svm':
regr = svm.SVR()
elif args.model == 'rf':
regr = RandomForestRegressor(n_estimators=50, random_state=0)
elif args.model == 'gpr':
regr = gaussian_process.GaussianProcess(theta0=1e-2, thetaL=1e-4, thetaU=1e-1)
else:
raise NameError('Unknown machine learning model. Please us one of: rf, svm, linear_model, gpr')
regr.fit(features_train, ratings_train)
ratings_predict = regr.predict(features_test)
corr = np.corrcoef(ratings_predict, ratings_test)[0, 1]
print 'Correlation:', corr
residue = np.mean((ratings_predict - ratings_test) ** 2)
print 'Residue:', residue
print 'Test: %d' % regr.predict(test)
truth, = plt.plot(ratings_test, 'r')
prediction, = plt.plot(ratings_predict, 'b')
plt.legend([truth, prediction], ["Ground Truth", "Prediction"])
plt.show()
| 31.603175
| 96
| 0.762933
|
11c4a48caf4308268d328747a41e1c796973493e
| 260
|
py
|
Python
|
test_haystack/core/custom_identifier.py
|
amir-khakshour/django-haystack
|
622921f55cda896868b284ff4c7f0413cd2c52ba
|
[
"BSD-3-Clause"
] | null | null | null |
test_haystack/core/custom_identifier.py
|
amir-khakshour/django-haystack
|
622921f55cda896868b284ff4c7f0413cd2c52ba
|
[
"BSD-3-Clause"
] | 4
|
2015-09-25T13:14:49.000Z
|
2020-04-24T19:56:19.000Z
|
test_haystack/core/custom_identifier.py
|
amir-khakshour/django-haystack
|
622921f55cda896868b284ff4c7f0413cd2c52ba
|
[
"BSD-3-Clause"
] | 1
|
2016-09-01T14:09:13.000Z
|
2016-09-01T14:09:13.000Z
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
def get_identifier_method(key):
"""
Custom get_identifier method used for testing the
setting HAYSTACK_IDENTIFIER_MODULE
"""
return key
| 21.666667
| 82
| 0.757692
|
a56793a8e01a3bae33e9c397e66c4e623f009482
| 16,463
|
py
|
Python
|
frappe/desk/desktop.py
|
alijasim/frappe
|
17803d5408d0a0257ab6968acb1a847a582b07ce
|
[
"MIT"
] | null | null | null |
frappe/desk/desktop.py
|
alijasim/frappe
|
17803d5408d0a0257ab6968acb1a847a582b07ce
|
[
"MIT"
] | null | null | null |
frappe/desk/desktop.py
|
alijasim/frappe
|
17803d5408d0a0257ab6968acb1a847a582b07ce
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Author - Shivam Mishra <shivam@frappe.io>
from __future__ import unicode_literals
import frappe
from json import loads, dumps
from frappe import _, DoesNotExistError, ValidationError, _dict
from frappe.boot import get_allowed_pages, get_allowed_reports
from six import string_types
from functools import wraps
from frappe.cache_manager import (
build_domain_restriced_doctype_cache,
build_domain_restriced_page_cache,
build_table_count_cache
)
def handle_not_exist(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except DoesNotExistError:
if frappe.message_log:
frappe.message_log.pop()
return []
return wrapper
class Workspace:
def __init__(self, page_name, minimal=False):
self.page_name = page_name
self.extended_links = []
self.extended_charts = []
self.extended_shortcuts = []
self.user = frappe.get_user()
self.allowed_modules = self.get_cached('user_allowed_modules', self.get_allowed_modules)
self.doc = self.get_page_for_user()
if self.doc.module and self.doc.module not in self.allowed_modules:
raise frappe.PermissionError
self.can_read = self.get_cached('user_perm_can_read', self.get_can_read_items)
self.allowed_pages = get_allowed_pages(cache=True)
self.allowed_reports = get_allowed_reports(cache=True)
if not minimal:
self.onboarding_doc = self.get_onboarding_doc()
self.onboarding = None
self.table_counts = get_table_with_counts()
self.restricted_doctypes = frappe.cache().get_value("domain_restricted_doctypes") or build_domain_restriced_doctype_cache()
self.restricted_pages = frappe.cache().get_value("domain_restricted_pages") or build_domain_restriced_page_cache()
def is_page_allowed(self):
cards = self.doc.get_link_groups() + get_custom_reports_and_doctypes(self.doc.module) + self.extended_links
shortcuts = self.doc.shortcuts + self.extended_shortcuts
for section in cards:
links = loads(section.get('links')) if isinstance(section.get('links'), string_types) else section.get('links')
for item in links:
if self.is_item_allowed(item.get('link_to'), item.get('link_type')):
return True
def _in_active_domains(item):
if not item.restrict_to_domain:
return True
else:
return item.restrict_to_domain in frappe.get_active_domains()
for item in shortcuts:
if self.is_item_allowed(item.link_to, item.type) and _in_active_domains(item):
return True
return False
def get_cached(self, cache_key, fallback_fn):
_cache = frappe.cache()
value = _cache.get_value(cache_key, user=frappe.session.user)
if value:
return value
value = fallback_fn()
# Expire every six hour
_cache.set_value(cache_key, value, frappe.session.user, 21600)
return value
def get_can_read_items(self):
if not self.user.can_read:
self.user.build_permissions()
return self.user.can_read
def get_allowed_modules(self):
if not self.user.allow_modules:
self.user.build_permissions()
return self.user.allow_modules
def get_page_for_user(self):
filters = {
'extends': self.page_name,
'for_user': frappe.session.user
}
user_pages = frappe.get_all("Workspace", filters=filters, limit=1)
if user_pages:
return frappe.get_cached_doc("Workspace", user_pages[0])
filters = {
'extends_another_page': 1,
'extends': self.page_name,
'is_default': 1
}
default_page = frappe.get_all("Workspace", filters=filters, limit=1)
if default_page:
return frappe.get_cached_doc("Workspace", default_page[0])
self.get_pages_to_extend()
return frappe.get_cached_doc("Workspace", self.page_name)
def get_onboarding_doc(self):
# Check if onboarding is enabled
if not frappe.get_system_settings("enable_onboarding"):
return None
if not self.doc.onboarding:
return None
if frappe.db.get_value("Module Onboarding", self.doc.onboarding, "is_complete"):
return None
doc = frappe.get_doc("Module Onboarding", self.doc.onboarding)
# Check if user is allowed
allowed_roles = set(doc.get_allowed_roles())
user_roles = set(frappe.get_roles())
if not allowed_roles & user_roles:
return None
# Check if already complete
if doc.check_completion():
return None
return doc
def get_pages_to_extend(self):
pages = frappe.get_all("Workspace", filters={
"extends": self.page_name,
'restrict_to_domain': ['in', frappe.get_active_domains()],
'for_user': '',
'module': ['in', self.allowed_modules]
})
pages = [frappe.get_cached_doc("Workspace", page['name']) for page in pages]
for page in pages:
self.extended_links = self.extended_links + page.get_link_groups()
self.extended_charts = self.extended_charts + page.charts
self.extended_shortcuts = self.extended_shortcuts + page.shortcuts
def is_item_allowed(self, name, item_type):
if frappe.session.user == "Administrator":
return True
item_type = item_type.lower()
if item_type == "doctype":
return (name in self.can_read or [] and name in self.restricted_doctypes or [])
if item_type == "page":
return (name in self.allowed_pages and name in self.restricted_pages)
if item_type == "report":
return name in self.allowed_reports
if item_type == "help":
return True
if item_type == "dashboard":
return True
return False
def build_workspace(self):
self.cards = {
'label': _(self.doc.cards_label),
'items': self.get_links()
}
self.charts = {
'label': _(self.doc.charts_label),
'items': self.get_charts()
}
self.shortcuts = {
'label': _(self.doc.shortcuts_label),
'items': self.get_shortcuts()
}
if self.onboarding_doc:
self.onboarding = {
'label': _(self.onboarding_doc.title),
'subtitle': _(self.onboarding_doc.subtitle),
'success': _(self.onboarding_doc.success_message),
'docs_url': self.onboarding_doc.documentation_url,
'items': self.get_onboarding_steps()
}
def _doctype_contains_a_record(self, name):
exists = self.table_counts.get(name, False)
if not exists and frappe.db.exists(name):
if not frappe.db.get_value('DocType', name, 'issingle'):
exists = bool(frappe.db.get_all(name, limit=1))
else:
exists = True
self.table_counts[name] = exists
return exists
def _prepare_item(self, item):
if item.dependencies:
dependencies = [dep.strip() for dep in item.dependencies.split(",")]
incomplete_dependencies = [d for d in dependencies if not self._doctype_contains_a_record(d)]
if len(incomplete_dependencies):
item.incomplete_dependencies = incomplete_dependencies
else:
item.incomplete_dependencies = ""
if item.onboard:
# Mark Spotlights for initial
if item.get("type") == "doctype":
name = item.get("name")
count = self._doctype_contains_a_record(name)
item["count"] = count
# Translate label
item["label"] = _(item.label) if item.label else _(item.name)
return item
@handle_not_exist
def get_links(self):
cards = self.doc.get_link_groups()
if not self.doc.hide_custom:
cards = cards + get_custom_reports_and_doctypes(self.doc.module)
if len(self.extended_links):
cards = merge_cards_based_on_label(cards + self.extended_links)
default_country = frappe.db.get_default("country")
new_data = []
for card in cards:
new_items = []
card = _dict(card)
links = card.get('links', [])
for item in links:
item = _dict(item)
# Condition: based on country
if item.country and item.country != default_country:
continue
# Check if user is allowed to view
if self.is_item_allowed(item.link_to, item.link_type):
prepared_item = self._prepare_item(item)
new_items.append(prepared_item)
if new_items:
if isinstance(card, _dict):
new_card = card.copy()
else:
new_card = card.as_dict().copy()
new_card["links"] = new_items
new_card["label"] = _(new_card["label"])
new_data.append(new_card)
return new_data
@handle_not_exist
def get_charts(self):
all_charts = []
if frappe.has_permission("Dashboard Chart", throw=False):
charts = self.doc.charts
if len(self.extended_charts):
charts = charts + self.extended_charts
for chart in charts:
if frappe.has_permission('Dashboard Chart', doc=chart.chart_name):
# Translate label
chart.label = _(chart.label) if chart.label else _(chart.chart_name)
all_charts.append(chart)
return all_charts
@handle_not_exist
def get_shortcuts(self):
def _in_active_domains(item):
if not item.restrict_to_domain:
return True
else:
return item.restrict_to_domain in frappe.get_active_domains()
items = []
shortcuts = self.doc.shortcuts
if len(self.extended_shortcuts):
shortcuts = shortcuts + self.extended_shortcuts
for item in shortcuts:
new_item = item.as_dict().copy()
if self.is_item_allowed(item.link_to, item.type) and _in_active_domains(item):
if item.type == "Report":
report = self.allowed_reports.get(item.link_to, {})
if report.get("report_type") in ["Query Report", "Script Report", "Custom Report"]:
new_item['is_query_report'] = 1
else:
new_item['ref_doctype'] = report.get('ref_doctype')
# Translate label
new_item["label"] = _(item.label) if item.label else _(item.link_to)
items.append(new_item)
return items
@handle_not_exist
def get_onboarding_steps(self):
steps = []
for doc in self.onboarding_doc.get_steps():
step = doc.as_dict().copy()
step.label = _(doc.title)
if step.action == "Create Entry":
step.is_submittable = frappe.db.get_value("DocType", step.reference_document, 'is_submittable', cache=True)
steps.append(step)
return steps
@frappe.whitelist()
@frappe.read_only()
def get_desktop_page(page):
"""Applies permissions, customizations and returns the configruration for a page
on desk.
Args:
page (string): page name
Returns:
dict: dictionary of cards, charts and shortcuts to be displayed on website
"""
try:
wspace = Workspace(page)
wspace.build_workspace()
return {
'charts': wspace.charts,
'shortcuts': wspace.shortcuts,
'cards': wspace.cards,
'onboarding': wspace.onboarding,
'allow_customization': not wspace.doc.disable_user_customization
}
except DoesNotExistError:
frappe.log_error(frappe.get_traceback())
return {}
@frappe.whitelist()
def get_desk_sidebar_items():
"""Get list of sidebar items for desk"""
# don't get domain restricted pages
blocked_modules = frappe.get_doc('User', frappe.session.user).get_blocked_modules()
filters = {
'restrict_to_domain': ['in', frappe.get_active_domains()],
'extends_another_page': 0,
'for_user': '',
'module': ['not in', blocked_modules]
}
if not frappe.local.conf.developer_mode:
filters['developer_mode_only'] = '0'
# pages sorted based on pinned to top and then by name
order_by = "pin_to_top desc, pin_to_bottom asc, name asc"
all_pages = frappe.get_all("Workspace", fields=["name", "category", "icon", "module"],
filters=filters, order_by=order_by, ignore_permissions=True)
pages = []
# Filter Page based on Permission
for page in all_pages:
try:
wspace = Workspace(page.get('name'), True)
if wspace.is_page_allowed():
pages.append(page)
page['label'] = _(page.get('name'))
except frappe.PermissionError:
pass
return pages
def get_table_with_counts():
counts = frappe.cache().get_value("information_schema:counts")
if not counts:
counts = build_table_count_cache()
return counts
def get_custom_reports_and_doctypes(module):
return [
_dict({
"label": _("Custom Documents"),
"links": get_custom_doctype_list(module)
}),
_dict({
"label": _("Custom Reports"),
"links": get_custom_report_list(module)
}),
]
def get_custom_doctype_list(module):
doctypes = frappe.get_all("DocType", fields=["name"], filters={"custom": 1, "istable": 0, "module": module}, order_by="name")
out = []
for d in doctypes:
out.append({
"type": "Link",
"link_type": "doctype",
"link_to": d.name,
"label": _(d.name)
})
return out
def get_custom_report_list(module):
"""Returns list on new style reports for modules."""
reports = frappe.get_all("Report", fields=["name", "ref_doctype", "report_type"], filters=
{"is_standard": "No", "disabled": 0, "module": module},
order_by="name")
out = []
for r in reports:
out.append({
"type": "Link",
"link_type": "report",
"doctype": r.ref_doctype,
"dependencies": r.ref_doctype,
"is_query_report": 1 if r.report_type in ("Query Report", "Script Report", "Custom Report") else 0,
"label": _(r.name),
"link_to": r.name,
})
return out
def get_custom_workspace_for_user(page):
"""Get custom page from workspace if exists or create one
Args:
page (stirng): Page name
Returns:
Object: Document object
"""
filters = {
'extends': page,
'for_user': frappe.session.user
}
pages = frappe.get_list("Workspace", filters=filters)
if pages:
return frappe.get_doc("Workspace", pages[0])
doc = frappe.new_doc("Workspace")
doc.extends = page
doc.for_user = frappe.session.user
return doc
@frappe.whitelist()
def save_customization(page, config):
"""Save customizations as a separate doctype in Workspace per user
Args:
page (string): Name of the page to be edited
config (dict): Dictionary config of al widgets
Returns:
Boolean: Customization saving status
"""
original_page = frappe.get_doc("Workspace", page)
page_doc = get_custom_workspace_for_user(page)
# Update field values
page_doc.update({
"icon": original_page.icon,
"charts_label": original_page.charts_label,
"cards_label": original_page.cards_label,
"shortcuts_label": original_page.shortcuts_label,
"module": original_page.module,
"onboarding": original_page.onboarding,
"developer_mode_only": original_page.developer_mode_only,
"category": original_page.category
})
config = _dict(loads(config))
if config.charts:
page_doc.charts = prepare_widget(config.charts, "Workspace Chart", "charts")
if config.shortcuts:
page_doc.shortcuts = prepare_widget(config.shortcuts, "Workspace Shortcut", "shortcuts")
if config.cards:
page_doc.build_links_table_from_cards(config.cards)
# Set label
page_doc.label = page + '-' + frappe.session.user
try:
if page_doc.is_new():
page_doc.insert(ignore_permissions=True)
else:
page_doc.save(ignore_permissions=True)
except (ValidationError, TypeError) as e:
# Create a json string to log
json_config = dumps(config, sort_keys=True, indent=4)
# Error log body
log = \
"""
page: {0}
config: {1}
exception: {2}
""".format(page, json_config, e)
frappe.log_error(log, _("Could not save customization"))
return False
return True
def prepare_widget(config, doctype, parentfield):
"""Create widget child table entries with parent details
Args:
config (dict): Dictionary containing widget config
doctype (string): Doctype name of the child table
parentfield (string): Parent field for the child table
Returns:
TYPE: List of Document objects
"""
if not config:
return []
order = config.get('order')
widgets = config.get('widgets')
prepare_widget_list = []
for idx, name in enumerate(order):
wid_config = widgets[name].copy()
# Some cleanup
wid_config.pop("name", None)
# New Doc
doc = frappe.new_doc(doctype)
doc.update(wid_config)
# Manually Set IDX
doc.idx = idx + 1
# Set Parent Field
doc.parentfield = parentfield
prepare_widget_list.append(doc)
return prepare_widget_list
@frappe.whitelist()
def update_onboarding_step(name, field, value):
"""Update status of onboaridng step
Args:
name (string): Name of the doc
field (string): field to be updated
value: Value to be updated
"""
frappe.db.set_value("Onboarding Step", name, field, value)
@frappe.whitelist()
def reset_customization(page):
"""Reset workspace customizations for a user
Args:
page (string): Name of the page to be reset
"""
page_doc = get_custom_workspace_for_user(page)
page_doc.delete()
def merge_cards_based_on_label(cards):
"""Merge cards with common label."""
cards_dict = {}
for card in cards:
label = card.get('label')
if label in cards_dict:
links = cards_dict[label].links + card.links
cards_dict[label].update(dict(links=links))
cards_dict[label] = cards_dict.pop(label)
else:
cards_dict[label] = card
return list(cards_dict.values())
| 26.682334
| 127
| 0.719371
|
5f0e8dfc04b8946ba753102fcac5bf7542486a1d
| 9,842
|
py
|
Python
|
preprocessing/LibriSpeech.py
|
amari97/sfc-audio
|
7e919079f163696775cfae049cadcc5f33746c5e
|
[
"MIT"
] | null | null | null |
preprocessing/LibriSpeech.py
|
amari97/sfc-audio
|
7e919079f163696775cfae049cadcc5f33746c5e
|
[
"MIT"
] | null | null | null |
preprocessing/LibriSpeech.py
|
amari97/sfc-audio
|
7e919079f163696775cfae049cadcc5f33746c5e
|
[
"MIT"
] | null | null | null |
import os
from typing import List, Tuple, Union
from torch import Tensor
import torch
from torch.utils.data import Dataset
import soundfile as sf
import h5py
from glob import glob
from tqdm import tqdm
import json
from nltk.tokenize import RegexpTokenizer
SAMPLING_RATE_LIBRISPEECH = 16000
LENGTH_LIBRISPEECH=SAMPLING_RATE_LIBRISPEECH# 1 second
class LibriSpeechWord(Dataset):
"""
A class to load the speech command dataset.
"""
def __init__(self, path: str, class_names: List[str] = None, split: str = "train-clean-100",
preprocess=None, sr: int = SAMPLING_RATE_LIBRISPEECH,length:int=LENGTH_LIBRISPEECH, most_freq=1000,KFold=None,
subset_Kfold=['train-clean-360','dev-clean','test-clean'], **kwargs):
"""
Args:
path (str): the path of the root directory
class_names (list(str)): a (optional) list of the class names to use
split (str): the split used. Either
|____ test-clean
|____ test-other
|____ dev-clean
|____ dev-other
|____ train-clean-100
|____ train-clean-360
|____ train-other-500
preprocess (callable): (optional) take data,y, class_names, kwargs as argument and must return input,label pairs
sr (int): sampling rate
length (int): the length of a sample (i.e. duration=length/sr)
most freq (int): choose between 1000/3000/6000 most frequent words
KFold (KFold object): specifies folds
subset_Kfold (list): the list of the splits where the samples are taken when computing the K folds
"""
assert most_freq in [1000,3000,6000]
self.most_freq=most_freq
assert split in [
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
]
# find the path (from base folder ./path)
self._path = os.path.join(path, "LibriSpeech")
# splits
split_dir = os.path.join(self._path, "split")
# find all the most frequent names
dict_file = os.path.join(self._path, 'word_labels','{}-mostfreq'.format(self.most_freq))
dictionary = open(dict_file, "r").read().split('\n')
# remove empty line
if dictionary[-1] == '': del dictionary[-1]
self.preprocessing = preprocess
self.sr = sr
self.length=length
# select the classes
if class_names is None:
self.class_names = dictionary
else:
# check that class_names is a subset
assert all([name in dictionary for name in class_names])
self.class_names = class_names
# sort class by alphabetical order
self.class_names.sort()
# store preprocessed files
folder_name="preprocess"
dest_path=os.path.join(self._path,folder_name)
if KFold is None:
self._check_folder(dest_path,split,split_dir)
else:
for s in subset_Kfold:
self._check_folder(dest_path,s,split_dir)
# store kwargs for preprocess function
self.kwargs = kwargs
# define the type of split (compatibility with the other datasets)
if split.startswith("dev"):
self.dataset_type = "validation"
elif split.startswith("train"):
self.dataset_type = "training"
elif split.startswith("test"):
self.dataset_type = "testing"
else:
print("Should not happen")
self._Walker=[]
self._walker=[]
self.KFold=KFold
if KFold is not None:
for s in subset_Kfold:
self._walker.extend(glob(os.path.join(dest_path,s) + '/*.h5'))
# compute the splits and sets self._Walker
self.KFold.setup(self)
else:
# find all .h5 file in the destination path
self._Walker = glob(os.path.join(dest_path,split) + '/*.h5')
def _check_folder(self,dest_path,split,split_dir):
dest_path_split=os.path.join(dest_path,split)
# Create output directories if don't exist
if os.path.exists(dest_path_split):
# check that the class_names coincide
with open(os.path.join(dest_path_split,"class_names.txt"),'r') as classes:
name_class_in_folder = json.load(classes)
if set(self.class_names) == set(name_class_in_folder["class_names"]):
print("Folder {} already exists and classes are the SAME. Not recomputed.".format(dest_path_split))
else:
print("Folder {} already exists, but classes are DIFFERENT. Must be first deleted before recomputed.".format(dest_path_split))
else:
self._extract_word(split,split_dir,dest_path_split)
def _extract_word(self, split:str, split_dir:str, dest_path_split:str)->None:
os.makedirs(dest_path_split)
print('Pre-processing: {}'.format(split))
# Get file names
word_file = os.path.join(self._path,'word_labels', split+ '-selected-' + str(self.most_freq) + '.txt')
current_file_name = ''
current_file_trans=''
transf=None
audio = 0
# write the name of the classes in the destination folder
with open(os.path.join(dest_path_split,"class_names.txt"),'w') as f:
json.dump({"class_names":self.class_names},f)
def find_line(string, fp,previous_pos):
fp.seek(previous_pos)
for i,line in enumerate(fp):
if string in line:
return line,i
raise ValueError("not found")
with open(word_file) as wf:
segment_num = 0
# loop over all examples
for line in tqdm(wf.readlines()):
# remove endline if present
line = line[:line.find('\n')]
segment_name, _, time_beg, time_len, word, _ = line.split(' ')
folders=segment_name.split("-")[:-1]
# check the word can be found in the transcript
trans = os.path.join(split_dir, split, *folders,"-".join(folders)+".trans.txt")
if current_file_trans != trans:
# file is still open
if transf is not None:
transf.close()
# close it and open the new one
previous_pos=0
transf=open(trans,"r")
current_file_trans=trans
# read from the previous position
line,previous_pos=find_line(segment_name,transf,previous_pos)
if word.upper() not in RegexpTokenizer(r'\w+').tokenize(line):
continue
file_name = os.path.join(split_dir, split, *segment_name.split("-")[:-1], segment_name + '.flac')
if word.lower() not in self.class_names:
continue
# if audio comes from the same file_name
if file_name != current_file_name:
audio,sr = sf.read(file_name)
current_file_name = file_name
segment_num = 0
start = int(float(time_beg) * self.sr)
end = int((float(time_beg) + float(time_len)) * self.sr)
# extract word
audio_=audio[start:min(end,len(audio))]
# store audio,label in a h5 file
h5f = h5py.File(os.path.join(dest_path_split,segment_name + '_' + str(segment_num) + '.h5'), 'w')
h5f.create_dataset('label', data=[word.lower().encode("utf-8")])
h5f.create_dataset('audio', data=audio_)
h5f.close()
segment_num = segment_num + 1
# close the last file
transf.close()
def update_folds(self):
self._Walker=self.KFold.subset(self._walker,self.dataset_type)
def __len__(self):
return len(self._Walker)
def __getitem__(self, n: int) -> Tuple[Tensor, Union[str, List[str]]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(preprocessed waveform, label)``
"""
fileid = self._Walker[n]
if not isinstance(fileid, list):
fileid = [fileid]
return self.load_item(fileid, self._path)
def load_item(self, filepaths: List[str], path: str) -> Tuple[Tensor, Union[str, List[str]]]:
# store data
waves_padded = torch.zeros(len(filepaths),self.length)
labels = []
# loop over all files
for i, filepath in enumerate(filepaths):
# load file
h5f = h5py.File(filepath, 'r')
label=h5f["label"][:][0].decode("utf-8")
audio=h5f["audio"][:]
h5f.close()
# convert to tensor
waveform = torch.from_numpy(audio)
waveform = waveform.squeeze()
# fixed length
duration = min(waveform.size(0), self.length)
waves_padded[i, :duration] = waveform[:duration]
labels.append(label)
if len(labels) == 1:
labels = labels[0]
# preprocess input
if self.preprocessing is not None:
return self.preprocessing(waves_padded, labels, self.class_names, subset=self.dataset_type, sr=self.sr,length=self.length, **self.kwargs)
return waves_padded, labels
| 38.901186
| 149
| 0.567669
|
4bac92b33d0e8d5c248cf9b70f30fb2c648842c8
| 2,620
|
py
|
Python
|
tests/test_error.py
|
timgates42/rest_toolkit
|
e7f9682dbbdc1ce50b9e2d7d30e80cae68110ec7
|
[
"BSD-2-Clause"
] | 27
|
2015-01-02T23:52:44.000Z
|
2018-07-10T04:20:40.000Z
|
tests/test_error.py
|
timgates42/rest_toolkit
|
e7f9682dbbdc1ce50b9e2d7d30e80cae68110ec7
|
[
"BSD-2-Clause"
] | 9
|
2015-02-06T04:14:20.000Z
|
2022-03-21T22:18:04.000Z
|
tests/test_error.py
|
timgates42/rest_toolkit
|
e7f9682dbbdc1ce50b9e2d7d30e80cae68110ec7
|
[
"BSD-2-Clause"
] | 8
|
2015-07-31T13:21:16.000Z
|
2020-03-29T10:03:04.000Z
|
from webtest import TestApp
from pyramid.config import Configurator
def make_app(config):
return TestApp(config.make_wsgi_app())
def test_resource_constructor_exception():
config = Configurator()
config.include('rest_toolkit')
config.scan('resource_error')
app = make_app(config)
r = app.get('/keyerror', status=500)
assert r.content_type == 'application/json'
assert set(r.json) == {'message'}
assert r.json['message'] == 'BOOM!'
assert 'traceback' not in r.json
def test_add_traceback_in_debug_mode():
config = Configurator()
config.include('rest_toolkit')
config.scan('resource_error')
config.registry.settings['rest_toolkit.debug'] = True
app = make_app(config)
r = app.get('/keyerror', status=500)
assert 'traceback' in r.json
def test_resource_constructor_http_exception():
config = Configurator()
config.include('rest_toolkit')
config.scan('resource_error')
app = make_app(config)
r = app.get('/http-error', status=402)
assert r.content_type == 'application/json'
assert set(r.json) == {'message'}
assert r.json['message'] == 'BOOM!'
def test_resource_constructor_raises_notfound():
config = Configurator()
config.include('rest_toolkit')
config.include('pyramid_tm')
app = make_app(config)
r = app.get('/http-not-found', status=404)
assert r.content_type == 'application/json'
assert set(r.json) == {'message'}
def test_preserve_custom_json_response():
config = Configurator()
config.include('rest_toolkit')
config.scan('resource_error')
app = make_app(config)
r = app.get('/custom-json-exception', status=400)
assert r.content_type == 'application/json'
assert r.json == {'foo': 'bar'}
def test_notfound_response():
config = Configurator()
config.include('rest_toolkit')
app = make_app(config)
r = app.get('/', status=404)
assert r.content_type == 'application/json'
assert set(r.json) == {'message'}
def test_found_exception():
config = Configurator()
config.include('rest_toolkit')
config.scan('resource_error')
app = make_app(config)
r = app.get('/http-found', status=302)
assert r.headers['Location'] == 'http://www.wiggy.net'
assert r.content_type == 'application/json'
assert set(r.json) == {'message'}
def test_method_not_allowed():
config = Configurator()
config.include('rest_toolkit')
config.scan('resource_get')
app = make_app(config)
r = app.put('/', status=405)
assert r.content_type == 'application/json'
assert set(r.json) == {'message'}
| 28.791209
| 58
| 0.675191
|
f58cf307a0de146f2b4a54c2774a6f38dad5bc01
| 109,674
|
py
|
Python
|
numpy/core/tests/test_datetime.py
|
mbkumar/numpy
|
0645461254a2110438b6df63ef193c1138c306ec
|
[
"BSD-3-Clause"
] | 3
|
2021-02-06T06:47:30.000Z
|
2021-08-11T10:05:27.000Z
|
numpy/core/tests/test_datetime.py
|
mbkumar/numpy
|
0645461254a2110438b6df63ef193c1138c306ec
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/tests/test_datetime.py
|
mbkumar/numpy
|
0645461254a2110438b6df63ef193c1138c306ec
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy
import numpy as np
import datetime
import pytest
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
assert_raises_regex,
)
from numpy.compat import pickle
# Use pytz to test out various time zones if available
try:
from pytz import timezone as tz
_has_pytz = True
except ImportError:
_has_pytz = False
try:
RecursionError
except NameError:
RecursionError = RuntimeError # python < 3.5
class TestDateTime:
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'μs', # alias for us
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
dt2 = np.dtype('m8[%s]' % unit)
assert_(dt2 == np.dtype('timedelta64[%s]' % unit))
# Generic units shouldn't add [] to the end
assert_equal(str(np.dtype("M8")), "datetime64")
# Should be possible to specify the endianness
assert_equal(np.dtype("=M8"), np.dtype("M8"))
assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
assert_(np.dtype(">M8") == np.dtype("M8") or
np.dtype("<M8") == np.dtype("M8"))
assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or
np.dtype("<M8[D]") == np.dtype("M8[D]"))
assert_(np.dtype(">M8") != np.dtype("<M8"))
assert_equal(np.dtype("=m8"), np.dtype("m8"))
assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]"))
assert_(np.dtype(">m8") == np.dtype("m8") or
np.dtype("<m8") == np.dtype("m8"))
assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or
np.dtype("<m8[D]") == np.dtype("m8[D]"))
assert_(np.dtype(">m8") != np.dtype("<m8"))
# Check that the parser rejects bad datetime types
assert_raises(TypeError, np.dtype, 'M8[badunit]')
assert_raises(TypeError, np.dtype, 'm8[badunit]')
assert_raises(TypeError, np.dtype, 'M8[YY]')
assert_raises(TypeError, np.dtype, 'm8[YY]')
assert_raises(TypeError, np.dtype, 'm4')
assert_raises(TypeError, np.dtype, 'M7')
assert_raises(TypeError, np.dtype, 'm7')
assert_raises(TypeError, np.dtype, 'M16')
assert_raises(TypeError, np.dtype, 'm16')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
assert_(not np.can_cast('m8', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8', 'M8', casting='safe'))
assert_(not np.can_cast('M8', 'm8', casting='safe'))
# Can cast safely/same_kind from integer to timedelta
assert_(np.can_cast('i8', 'm8', casting='same_kind'))
assert_(np.can_cast('i8', 'm8', casting='safe'))
assert_(np.can_cast('i4', 'm8', casting='same_kind'))
assert_(np.can_cast('i4', 'm8', casting='safe'))
assert_(np.can_cast('u4', 'm8', casting='same_kind'))
assert_(np.can_cast('u4', 'm8', casting='safe'))
# Cannot cast safely from unsigned integer of the same size, which
# could overflow
assert_(np.can_cast('u8', 'm8', casting='same_kind'))
assert_(not np.can_cast('u8', 'm8', casting='safe'))
# Cannot cast safely/same_kind from float to timedelta
assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
assert_(not np.can_cast('f4', 'm8', casting='safe'))
# Cannot cast safely/same_kind from integer to datetime
assert_(not np.can_cast('i8', 'M8', casting='same_kind'))
assert_(not np.can_cast('i8', 'M8', casting='safe'))
# Cannot cast safely/same_kind from bool to datetime
assert_(not np.can_cast('b1', 'M8', casting='same_kind'))
assert_(not np.can_cast('b1', 'M8', casting='safe'))
# Can cast safely/same_kind from bool to timedelta
assert_(np.can_cast('b1', 'm8', casting='same_kind'))
assert_(np.can_cast('b1', 'm8', casting='safe'))
# Can cast datetime safely from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe'))
# Cannot cast timedelta safely from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe'))
# Can cast datetime same_kind from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind'))
# Can't cast timedelta same_kind from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind'))
# Can cast datetime same_kind across the date/time boundary
assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind'))
# Can cast timedelta same_kind across the date/time boundary
assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind'))
# Cannot cast safely if the integer multiplier doesn't divide
assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe'))
assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe'))
# But can cast same_kind
assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind'))
# Can cast safely if the integer multiplier does divide
assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe'))
# We can always cast types with generic units (corresponding to NaT) to
# more specific types
assert_(np.can_cast('m8', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8', 'm8[h]', casting='safe'))
assert_(np.can_cast('M8', 'M8[h]', casting='same_kind'))
assert_(np.can_cast('M8', 'M8[h]', casting='safe'))
# but not the other way around
assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8[h]', 'm8', casting='safe'))
assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_compare_generic_nat(self):
# regression tests for gh-6452
assert_(np.datetime64('NaT') !=
np.datetime64('2000') + np.timedelta64('NaT'))
assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_datetime_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.datetime64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_timedelta_nat_argsort_stability(self, size):
# NaT < NaT should be False internally for
# sort stability
expected = np.arange(size)
arr = np.tile(np.timedelta64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("arr, expected", [
# the example provided in gh-12629
(['NaT', 1, 2, 3],
[1, 2, 3, 'NaT']),
# multiple NaTs
(['NaT', 9, 'NaT', -707],
[-707, 9, 'NaT', 'NaT']),
# this sort explores another code path for NaT
([1, -2, 3, 'NaT'],
[-2, 1, 3, 'NaT']),
# 2-D array
([[51, -220, 'NaT'],
[-17, 'NaT', -90]],
[[-220, 51, 'NaT'],
[-90, -17, 'NaT']]),
])
@pytest.mark.parametrize("dtype", [
'M8[ns]', 'M8[us]',
'm8[ns]', 'm8[us]'])
def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):
# fix for gh-12629 and gh-15063; NaT sorting to end of array
arr = np.array(arr, dtype=dtype)
expected = np.array(expected, dtype=dtype)
arr.sort()
assert_equal(arr, expected)
def test_datetime_scalar_construction(self):
# Construct with different units
assert_equal(np.datetime64('1950-03-12', 'D'),
np.datetime64('1950-03-12'))
assert_equal(np.datetime64('1950-03-12T13', 's'),
np.datetime64('1950-03-12T13', 'm'))
# Default construction means NaT
assert_equal(np.datetime64(), np.datetime64('NaT'))
# Some basic strings and repr
assert_equal(str(np.datetime64('NaT')), 'NaT')
assert_equal(repr(np.datetime64('NaT')),
"numpy.datetime64('NaT')")
assert_equal(str(np.datetime64('2011-02')), '2011-02')
assert_equal(repr(np.datetime64('2011-02')),
"numpy.datetime64('2011-02')")
# None gets constructed as NaT
assert_equal(np.datetime64(None), np.datetime64('NaT'))
# Default construction of NaT is in generic units
assert_equal(np.datetime64().dtype, np.dtype('M8'))
assert_equal(np.datetime64('NaT').dtype, np.dtype('M8'))
# Construction from integers requires a specified unit
assert_raises(ValueError, np.datetime64, 17)
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.datetime64('2000-03-18T16', 'h')
b = np.array('2000-03-18T16', dtype='M8[h]')
assert_equal(a.dtype, np.dtype('M8[h]'))
assert_equal(b.dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a), a)
assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(b), a)
assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a, 's'), a)
assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]'))
assert_equal(np.datetime64(b, 's'), a)
assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]'))
# Construction from datetime.date
assert_equal(np.datetime64('1945-03-25'),
np.datetime64(datetime.date(1945, 3, 25)))
assert_equal(np.datetime64('2045-03-25', 'D'),
np.datetime64(datetime.date(2045, 3, 25), 'D'))
# Construction from datetime.datetime
assert_equal(np.datetime64('1980-01-25T14:36:22.5'),
np.datetime64(datetime.datetime(1980, 1, 25,
14, 36, 22, 500000)))
# Construction with time units from a date is okay
assert_equal(np.datetime64('1920-03-13', 'h'),
np.datetime64('1920-03-13T00'))
assert_equal(np.datetime64('1920-03', 'm'),
np.datetime64('1920-03-01T00:00'))
assert_equal(np.datetime64('1920', 's'),
np.datetime64('1920-01-01T00:00:00'))
assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'),
np.datetime64('2045-03-25T00:00:00.000'))
# Construction with date units from a datetime is also okay
assert_equal(np.datetime64('1920-03-13T18', 'D'),
np.datetime64('1920-03-13'))
assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'),
np.datetime64('1920-03'))
assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'),
np.datetime64('1920'))
def test_datetime_scalar_construction_timezone(self):
# verify that supplying an explicit timezone works, but is deprecated
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00Z'),
np.datetime64('2000-01-01T00'))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00-08'),
np.datetime64('2000-01-01T08'))
def test_datetime_array_find_type(self):
dt = np.datetime64('1970-01-01', 'M')
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('M8[M]'))
# at the moment, we don't automatically convert these to datetime64
dt = datetime.date(1970, 1, 1)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
# find "supertype" for non-dates and dates
b = np.bool_(True)
dm = np.datetime64('1970-01-01', 'M')
d = datetime.date(1970, 1, 1)
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([d, d]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[D]'))
arr = np.array([dt, dt]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[us]'))
@pytest.mark.parametrize("unit", [
# test all date / time units and use
# "generic" to select generic unit
("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
("s"), ("ms"), ("us"), ("ns"), ("ps"),
("fs"), ("as"), ("generic") ])
def test_timedelta_np_int_construction(self, unit):
# regression test for gh-7617
if unit != "generic":
assert_equal(np.timedelta64(np.int64(123), unit),
np.timedelta64(123, unit))
else:
assert_equal(np.timedelta64(np.int64(123)),
np.timedelta64(123))
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
np.timedelta64(1, 'W'))
assert_equal(np.timedelta64(120, 's'),
np.timedelta64(2, 'm'))
# Default construction means 0
assert_equal(np.timedelta64(), np.timedelta64(0))
# None gets constructed as NaT
assert_equal(np.timedelta64(None), np.timedelta64('NaT'))
# Some basic strings and repr
assert_equal(str(np.timedelta64('NaT')), 'NaT')
assert_equal(repr(np.timedelta64('NaT')),
"numpy.timedelta64('NaT')")
assert_equal(str(np.timedelta64(3, 's')), '3 seconds')
assert_equal(repr(np.timedelta64(-3, 's')),
"numpy.timedelta64(-3,'s')")
assert_equal(repr(np.timedelta64(12)),
"numpy.timedelta64(12)")
# Construction from an integer produces generic units
assert_equal(np.timedelta64(12).dtype, np.dtype('m8'))
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.timedelta64(2, 'h')
b = np.array(2, dtype='m8[h]')
assert_equal(a.dtype, np.dtype('m8[h]'))
assert_equal(b.dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a), a)
assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(b), a)
assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a, 's'), a)
assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]'))
assert_equal(np.timedelta64(b, 's'), a)
assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]'))
# Construction from datetime.timedelta
assert_equal(np.timedelta64(5, 'D'),
np.timedelta64(datetime.timedelta(days=5)))
assert_equal(np.timedelta64(102347621, 's'),
np.timedelta64(datetime.timedelta(seconds=102347621)))
assert_equal(np.timedelta64(-10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=-10234760000)))
assert_equal(np.timedelta64(10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=10234760000)))
assert_equal(np.timedelta64(1023476, 'ms'),
np.timedelta64(datetime.timedelta(milliseconds=1023476)))
assert_equal(np.timedelta64(10, 'm'),
np.timedelta64(datetime.timedelta(minutes=10)))
assert_equal(np.timedelta64(281, 'h'),
np.timedelta64(datetime.timedelta(hours=281)))
assert_equal(np.timedelta64(28, 'W'),
np.timedelta64(datetime.timedelta(weeks=28)))
# Cannot construct across nonlinear time unit boundaries
a = np.timedelta64(3, 's')
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = np.timedelta64(6, 'M')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'h')
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
a = datetime.timedelta(seconds=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta(weeks=3)
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = datetime.timedelta()
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
def test_timedelta_object_array_conversion(self):
# Regression test for gh-11096
inputs = [datetime.timedelta(28),
datetime.timedelta(30),
datetime.timedelta(31)]
expected = np.array([28, 30, 31], dtype='timedelta64[D]')
actual = np.array(inputs, dtype='timedelta64[D]')
assert_equal(expected, actual)
def test_timedelta_0_dim_object_array_conversion(self):
# Regression test for gh-11151
test = np.array(datetime.timedelta(seconds=20))
actual = test.astype(np.timedelta64)
# expected value from the array constructor workaround
# described in above issue
expected = np.array(datetime.timedelta(seconds=20),
np.timedelta64)
assert_equal(actual, expected)
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
assert_equal(np.datetime64('2010').dtype,
np.dtype('M8[Y]'))
assert_equal(np.datetime64('2010-03').dtype,
np.dtype('M8[M]'))
assert_equal(np.datetime64('2010-03-12').dtype,
np.dtype('M8[D]'))
assert_equal(np.datetime64('2010-03-12T17').dtype,
np.dtype('M8[h]'))
assert_equal(np.datetime64('2010-03-12T17:15').dtype,
np.dtype('M8[m]'))
assert_equal(np.datetime64('2010-03-12T17:15:08').dtype,
np.dtype('M8[s]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123456').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234567').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345678').dtype,
np.dtype('M8[as]'))
# Python date object
assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype,
np.dtype('M8[D]'))
# Python datetime object
assert_equal(np.datetime64(
datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype,
np.dtype('M8[us]'))
# 'today' special value
assert_equal(np.datetime64('today').dtype,
np.dtype('M8[D]'))
# 'now' special value
assert_equal(np.datetime64('now').dtype,
np.dtype('M8[s]'))
def test_datetime_nat_casting(self):
a = np.array('NaT', dtype='M8[D]')
b = np.datetime64('NaT', '[D]')
# Arrays
assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))
assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))
assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))
assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))
assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))
# Scalars -> Scalars
assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
# Arrays -> Scalars
assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
# NaN -> NaT
nan = np.array([np.nan] * 8)
fnan = nan.astype('f')
lnan = nan.astype('g')
cnan = nan.astype('D')
cfnan = nan.astype('F')
clnan = nan.astype('G')
nat = np.array([np.datetime64('NaT')] * 8)
assert_equal(nan.astype('M8[ns]'), nat)
assert_equal(fnan.astype('M8[ns]'), nat)
assert_equal(lnan.astype('M8[ns]'), nat)
assert_equal(cnan.astype('M8[ns]'), nat)
assert_equal(cfnan.astype('M8[ns]'), nat)
assert_equal(clnan.astype('M8[ns]'), nat)
nat = np.array([np.timedelta64('NaT')] * 8)
assert_equal(nan.astype('timedelta64[ns]'), nat)
assert_equal(fnan.astype('timedelta64[ns]'), nat)
assert_equal(lnan.astype('timedelta64[ns]'), nat)
assert_equal(cnan.astype('timedelta64[ns]'), nat)
assert_equal(cfnan.astype('timedelta64[ns]'), nat)
assert_equal(clnan.astype('timedelta64[ns]'), nat)
def test_days_creation(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 - 365)
assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3)
assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 + 366)
assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4)
assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4 + 365)
assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)
assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)
assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)
assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)
assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)
assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)
assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)
assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)
assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4)
assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366)
assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3)
assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)
assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)
assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)
def test_days_to_pydate(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
datetime.date(1599, 1, 1))
assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
datetime.date(1600, 1, 1))
assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
datetime.date(1601, 1, 1))
assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
datetime.date(1900, 1, 1))
assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
datetime.date(1901, 1, 1))
assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
datetime.date(2000, 1, 1))
assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
datetime.date(2001, 1, 1))
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
datetime.date(1600, 2, 29))
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
datetime.date(1600, 3, 1))
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
datetime.date(2001, 3, 22))
def test_dtype_comparison(self):
assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
def test_pydatetime_creation(self):
a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
assert_equal(a[0], a[1])
# Will fail if the date changes during the exact right moment
a = np.array(['today', datetime.date.today()], dtype='M8[D]')
assert_equal(a[0], a[1])
# datetime.datetime.now() returns local time, not UTC
#a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')
#assert_equal(a[0], a[1])
# we can give a datetime.date time units
assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
np.array(np.datetime64('1960-03-12T00:00:00')))
def test_datetime_string_conversion(self):
a = ['2011-03-16', '1920-01-01', '2013-05-19']
str_a = np.array(a, dtype='S')
uni_a = np.array(a, dtype='U')
dt_a = np.array(a, dtype='M')
# String to datetime
assert_equal(dt_a, str_a.astype('M'))
assert_equal(dt_a.dtype, str_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = str_a
assert_equal(dt_a, dt_b)
# Datetime to string
assert_equal(str_a, dt_a.astype('S0'))
str_b = np.empty_like(str_a)
str_b[...] = dt_a
assert_equal(str_a, str_b)
# Unicode to datetime
assert_equal(dt_a, uni_a.astype('M'))
assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = uni_a
assert_equal(dt_a, dt_b)
# Datetime to unicode
assert_equal(uni_a, dt_a.astype('U'))
uni_b = np.empty_like(uni_a)
uni_b[...] = dt_a
assert_equal(uni_a, uni_b)
# Datetime to long string - gh-9712
assert_equal(str_a, dt_a.astype((np.string_, 128)))
str_b = np.empty(str_a.shape, dtype=(np.string_, 128))
str_b[...] = dt_a
assert_equal(str_a, str_b)
def test_datetime_array_str(self):
a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
assert_equal(np.array2string(a, separator=', ',
formatter={'datetime': lambda x:
"'%s'" % np.datetime_as_string(x, timezone='UTC')}),
"['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
# Check that one NaT doesn't corrupt subsequent entries
a = np.array(['2010', 'NaT', '2030']).astype('M')
assert_equal(str(a), "['2010' 'NaT' '2030']")
def test_timedelta_array_str(self):
a = np.array([-1, 0, 100], dtype='m')
assert_equal(str(a), "[ -1 0 100]")
a = np.array(['NaT', 'NaT'], dtype='m')
assert_equal(str(a), "['NaT' 'NaT']")
# Check right-alignment with NaTs
a = np.array([-1, 'NaT', 0], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 0]")
a = np.array([-1, 'NaT', 1234567], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
# Test with other byteorder:
a = np.array([-1, 'NaT', 1234567], dtype='>m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
a = np.array([-1, 'NaT', 1234567], dtype='<m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
def test_pickle(self):
# Check that pickle roundtripping works
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dt = np.dtype('M8[7D]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
dt = np.dtype('M8[W]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
scalar = np.datetime64('2016-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),
scalar)
delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),
delta)
# Check that loading pickles from 1.6 works
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \
b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
def test_setstate(self):
"Verify that datetime dtype __setstate__ can handle bad arguments"
dt = np.dtype('>M8[us]')
assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
def test_dtype_promotion(self):
# datetime <op> datetime computes the metadata gcd
# timedelta <op> timedelta computes the metadata gcd
for mM in ['m', 'M']:
assert_equal(
np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),
np.dtype(mM+'8[2Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),
np.dtype(mM+'8[3Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),
np.dtype(mM+'8[2M]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),
np.dtype(mM+'8[1D]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),
np.dtype(mM+'8[s]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),
np.dtype(mM+'8[7s]'))
# timedelta <op> timedelta raises when there is no reasonable gcd
assert_raises(TypeError, np.promote_types,
np.dtype('m8[Y]'), np.dtype('m8[D]'))
assert_raises(TypeError, np.promote_types,
np.dtype('m8[M]'), np.dtype('m8[W]'))
# timedelta and float cannot be safely cast with each other
assert_raises(TypeError, np.promote_types, "float32", "m8")
assert_raises(TypeError, np.promote_types, "m8", "float32")
assert_raises(TypeError, np.promote_types, "uint64", "m8")
assert_raises(TypeError, np.promote_types, "m8", "uint64")
# timedelta <op> timedelta may overflow with big unit ranges
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[W]'), np.dtype('m8[fs]'))
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[s]'), np.dtype('m8[as]'))
def test_cast_overflow(self):
# gh-4486
def cast():
numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]")
assert_raises(OverflowError, cast)
def cast2():
numpy.datetime64("2014").astype("<M8[fs]")
assert_raises(OverflowError, cast2)
def test_pyobject_roundtrip(self):
# All datetime types should be able to roundtrip through object
a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,
-1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],
dtype=np.int64)
# With date units
for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01'
b[1] = '-0001-12-31'
b[2] = '0000-01-01'
b[3] = '0001-01-01'
b[4] = '1969-12-31'
b[5] = '1970-01-01'
b[6] = '9999-12-31'
b[7] = '10000-01-01'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
# With time units
for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',
'M8[300as]', 'M8[20us]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01T00'
b[1] = '-0001-12-31T00'
b[2] = '0000-01-01T00'
b[3] = '0001-01-01T00'
b[4] = '1969-12-31T23:59:59.999999'
b[5] = '1970-01-01T00'
b[6] = '9999-12-31T23:59:59.999999'
b[7] = '10000-01-01T00'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
def test_month_truncation(self):
# Make sure that months are truncating correctly
assert_equal(np.array('1945-03-01', dtype='M8[M]'),
np.array('1945-03-31', dtype='M8[M]'))
assert_equal(np.array('1969-11-01', dtype='M8[M]'),
np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1969-12-01', dtype='M8[M]'),
np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1970-01-01', dtype='M8[M]'),
np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1980-02-01', dtype='M8[M]'),
np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))
def test_different_unit_comparison(self):
# Check some years with date units
for unit1 in ['Y', 'M', 'D']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['Y', 'M', 'D']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945', dtype=dt1),
np.array('1945', dtype=dt2))
assert_equal(np.array('1970', dtype=dt1),
np.array('1970', dtype=dt2))
assert_equal(np.array('9999', dtype=dt1),
np.array('9999', dtype=dt2))
assert_equal(np.array('10000', dtype=dt1),
np.array('10000-01-01', dtype=dt2))
assert_equal(np.datetime64('1945', unit1),
np.datetime64('1945', unit2))
assert_equal(np.datetime64('1970', unit1),
np.datetime64('1970', unit2))
assert_equal(np.datetime64('9999', unit1),
np.datetime64('9999', unit2))
assert_equal(np.datetime64('10000', unit1),
np.datetime64('10000-01-01', unit2))
# Check some datetimes with time units
for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945-03-12T18', dtype=dt1),
np.array('1945-03-12T18', dtype=dt2))
assert_equal(np.array('1970-03-12T18', dtype=dt1),
np.array('1970-03-12T18', dtype=dt2))
assert_equal(np.array('9999-03-12T18', dtype=dt1),
np.array('9999-03-12T18', dtype=dt2))
assert_equal(np.array('10000-01-01T00', dtype=dt1),
np.array('10000-01-01T00', dtype=dt2))
assert_equal(np.datetime64('1945-03-12T18', unit1),
np.datetime64('1945-03-12T18', unit2))
assert_equal(np.datetime64('1970-03-12T18', unit1),
np.datetime64('1970-03-12T18', unit2))
assert_equal(np.datetime64('9999-03-12T18', unit1),
np.datetime64('9999-03-12T18', unit2))
assert_equal(np.datetime64('10000-01-01T00', unit1),
np.datetime64('10000-01-01T00', unit2))
# Check some days with units that won't overflow
for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1),
np.array('1932-02-17T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1),
np.array('10000-04-27T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
# Shouldn't be able to compare datetime and timedelta
# TODO: Changing to 'same_kind' or 'safe' casting in the ufuncs by
# default is needed to properly catch this kind of thing...
a = np.array('2012-12-21', dtype='M8[D]')
b = np.array(3, dtype='m8[D]')
#assert_raises(TypeError, np.less, a, b)
assert_raises(TypeError, np.less, a, b, casting='same_kind')
def test_datetime_like(self):
a = np.array([3], dtype='m8[4D]')
b = np.array(['2012-12-21'], dtype='M8[D]')
assert_equal(np.ones_like(a).dtype, a.dtype)
assert_equal(np.zeros_like(a).dtype, a.dtype)
assert_equal(np.empty_like(a).dtype, a.dtype)
assert_equal(np.ones_like(b).dtype, b.dtype)
assert_equal(np.zeros_like(b).dtype, b.dtype)
assert_equal(np.empty_like(b).dtype, b.dtype)
def test_datetime_unary(self):
for tda, tdb, tdzero, tdone, tdmone in \
[
# One-dimensional arrays
(np.array([3], dtype='m8[D]'),
np.array([-3], dtype='m8[D]'),
np.array([0], dtype='m8[D]'),
np.array([1], dtype='m8[D]'),
np.array([-1], dtype='m8[D]')),
# NumPy scalars
(np.timedelta64(3, '[D]'),
np.timedelta64(-3, '[D]'),
np.timedelta64(0, '[D]'),
np.timedelta64(1, '[D]'),
np.timedelta64(-1, '[D]'))]:
# negative ufunc
assert_equal(-tdb, tda)
assert_equal((-tdb).dtype, tda.dtype)
assert_equal(np.negative(tdb), tda)
assert_equal(np.negative(tdb).dtype, tda.dtype)
# positive ufunc
assert_equal(np.positive(tda), tda)
assert_equal(np.positive(tda).dtype, tda.dtype)
assert_equal(np.positive(tdb), tdb)
assert_equal(np.positive(tdb).dtype, tdb.dtype)
# absolute ufunc
assert_equal(np.absolute(tdb), tda)
assert_equal(np.absolute(tdb).dtype, tda.dtype)
# sign ufunc
assert_equal(np.sign(tda), tdone)
assert_equal(np.sign(tdb), tdmone)
assert_equal(np.sign(tdzero), tdzero)
assert_equal(np.sign(tda).dtype, tda.dtype)
# The ufuncs always produce native-endian results
assert_
def test_datetime_add(self):
for dta, dtb, dtc, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['2012-12-21T11'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 + 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('2012-12-21T11', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 + 11, '[h]'))]:
# m8 + m8
assert_equal(tda + tdb, tdc)
assert_equal((tda + tdb).dtype, np.dtype('m8[h]'))
# m8 + bool
assert_equal(tdb + True, tdb + 1)
assert_equal((tdb + True).dtype, np.dtype('m8[h]'))
# m8 + int
assert_equal(tdb + 3*24, tdc)
assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]'))
# bool + m8
assert_equal(False + tdb, tdb)
assert_equal((False + tdb).dtype, np.dtype('m8[h]'))
# int + m8
assert_equal(3*24 + tdb, tdc)
assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]'))
# M8 + bool
assert_equal(dta + True, dta + 1)
assert_equal(dtnat + True, dtnat)
assert_equal((dta + True).dtype, np.dtype('M8[D]'))
# M8 + int
assert_equal(dta + 3, dtb)
assert_equal(dtnat + 3, dtnat)
assert_equal((dta + 3).dtype, np.dtype('M8[D]'))
# bool + M8
assert_equal(False + dta, dta)
assert_equal(False + dtnat, dtnat)
assert_equal((False + dta).dtype, np.dtype('M8[D]'))
# int + M8
assert_equal(3 + dta, dtb)
assert_equal(3 + dtnat, dtnat)
assert_equal((3 + dta).dtype, np.dtype('M8[D]'))
# M8 + m8
assert_equal(dta + tda, dtb)
assert_equal(dtnat + tda, dtnat)
assert_equal((dta + tda).dtype, np.dtype('M8[D]'))
# m8 + M8
assert_equal(tda + dta, dtb)
assert_equal(tda + dtnat, dtnat)
assert_equal((tda + dta).dtype, np.dtype('M8[D]'))
# In M8 + m8, the result goes to higher precision
assert_equal(np.add(dta, tdb, casting='unsafe'), dtc)
assert_equal(np.add(dta, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
assert_equal(np.add(tdb, dta, casting='unsafe'), dtc)
assert_equal(np.add(tdb, dta, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 + M8
assert_raises(TypeError, np.add, dta, dtb)
def test_datetime_subtract(self):
for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['1940-12-24'], dtype='M8[D]'),
np.array(['1940-12-24T00'], dtype='M8[h]'),
np.array(['1940-12-23T13'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 - 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('1940-12-24', '[D]'),
np.datetime64('1940-12-24T00', '[h]'),
np.datetime64('1940-12-23T13', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 - 11, '[h]'))]:
# m8 - m8
assert_equal(tda - tdb, tdc)
assert_equal((tda - tdb).dtype, np.dtype('m8[h]'))
assert_equal(tdb - tda, -tdc)
assert_equal((tdb - tda).dtype, np.dtype('m8[h]'))
# m8 - bool
assert_equal(tdc - True, tdc - 1)
assert_equal((tdc - True).dtype, np.dtype('m8[h]'))
# m8 - int
assert_equal(tdc - 3*24, -tdb)
assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(False - tdb, -tdb)
assert_equal((False - tdb).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(3*24 - tdb, tdc)
assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]'))
# M8 - bool
assert_equal(dtb - True, dtb - 1)
assert_equal(dtnat - True, dtnat)
assert_equal((dtb - True).dtype, np.dtype('M8[D]'))
# M8 - int
assert_equal(dtb - 3, dta)
assert_equal(dtnat - 3, dtnat)
assert_equal((dtb - 3).dtype, np.dtype('M8[D]'))
# M8 - m8
assert_equal(dtb - tda, dta)
assert_equal(dtnat - tda, dtnat)
assert_equal((dtb - tda).dtype, np.dtype('M8[D]'))
# In M8 - m8, the result goes to higher precision
assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte)
assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 - M8 with different goes to higher precision
assert_equal(np.subtract(dtc, dtd, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype,
np.dtype('m8[h]'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype,
np.dtype('m8[h]'))
# m8 - M8
assert_raises(TypeError, np.subtract, tda, dta)
# bool - M8
assert_raises(TypeError, np.subtract, False, dta)
# int - M8
assert_raises(TypeError, np.subtract, 3, dta)
def test_datetime_multiply(self):
for dta, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'))]:
# m8 * int
assert_equal(tda * 2, tdc)
assert_equal((tda * 2).dtype, np.dtype('m8[h]'))
# int * m8
assert_equal(2 * tda, tdc)
assert_equal((2 * tda).dtype, np.dtype('m8[h]'))
# m8 * float
assert_equal(tda * 1.5, tdb)
assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))
# float * m8
assert_equal(1.5 * tda, tdb)
assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))
# m8 * m8
assert_raises(TypeError, np.multiply, tda, tdb)
# m8 * M8
assert_raises(TypeError, np.multiply, dta, tda)
# M8 * m8
assert_raises(TypeError, np.multiply, tda, dta)
# M8 * int
assert_raises(TypeError, np.multiply, dta, 2)
# int * M8
assert_raises(TypeError, np.multiply, 2, dta)
# M8 * float
assert_raises(TypeError, np.multiply, dta, 1.5)
# float * M8
assert_raises(TypeError, np.multiply, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
nat = np.timedelta64('NaT')
def check(a, b, res):
assert_equal(a * b, res)
assert_equal(b * a, res)
for tp in (int, float):
check(nat, tp(2), nat)
check(nat, tp(0), nat)
for f in (float('inf'), float('nan')):
check(np.timedelta64(1), f, nat)
check(np.timedelta64(0), f, nat)
check(nat, f, nat)
@pytest.mark.parametrize("op1, op2, exp", [
# m8 same units round down
(np.timedelta64(7, 's'),
np.timedelta64(4, 's'),
1),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's'),
-2),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's'),
1),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31),
60),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M'),
1),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8'),
np.array([0, 1, 1], dtype=np.int64)),
])
def test_timedelta_floor_divide(self, op1, op2, exp):
assert_equal(op1 // op2, exp)
@pytest.mark.parametrize("op1, op2", [
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_floor_div_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
actual = op1 // op2
assert_equal(actual, 0)
assert_equal(actual.dtype, np.int64)
@pytest.mark.parametrize("val1, val2", [
# the smallest integer that can't be represented
# exactly in a double should be preserved if we avoid
# casting to double in floordiv operation
(9007199254740993, 1),
# stress the alternate floordiv code path where
# operand signs don't match and remainder isn't 0
(9007199254740999, -2),
])
def test_timedelta_floor_div_precision(self, val1, val2):
op1 = np.timedelta64(val1)
op2 = np.timedelta64(val2)
actual = op1 // op2
# Python reference integer floor
expected = val1 // val2
assert_equal(actual, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
# divided for floor division operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_floor_div_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 // val2
@pytest.mark.parametrize("op1, op2", [
# reuse the test cases from floordiv
(np.timedelta64(7, 's'),
np.timedelta64(4, 's')),
# m8 same units round down with negative
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's')),
# m8 same units negative no round down
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's')),
# m8 different units
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's')),
# m8 generic units
(np.timedelta64(1890),
np.timedelta64(31)),
# Y // M works
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M')),
# handle 1D arrays
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8')),
])
def test_timedelta_divmod(self, op1, op2):
expected = (op1 // op2, op1 % op2)
assert_equal(divmod(op1, op2), expected)
@pytest.mark.parametrize("op1, op2", [
# reuse cases from floordiv
# div by 0
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
# div with NaT
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
# special case for int64 min
# in integer floor division
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_divmod_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
expected = (op1 // op2, op1 % op2)
with assert_warns(RuntimeWarning):
actual = divmod(op1, op2)
assert_equal(actual, expected)
def test_datetime_divide(self):
for dta, tda, tdb, tdc, tdd in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]'),
np.array([6], dtype='m8[m]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'),
np.timedelta64(6, '[m]'))]:
# m8 / int
assert_equal(tdc / 2, tda)
assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))
# m8 / float
assert_equal(tda / 0.5, tdc)
assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
# m8 / m8
assert_equal(tda / tdb, 6.0 / 9.0)
assert_equal(np.divide(tda, tdb), 6.0 / 9.0)
assert_equal(np.true_divide(tda, tdb), 6.0 / 9.0)
assert_equal(tdb / tda, 9.0 / 6.0)
assert_equal((tda / tdb).dtype, np.dtype('f8'))
assert_equal(tda / tdd, 60.0)
assert_equal(tdd / tda, 1.0 / 60.0)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
# float / m8
assert_raises(TypeError, np.divide, 0.5, tdb)
# m8 / M8
assert_raises(TypeError, np.divide, dta, tda)
# M8 / m8
assert_raises(TypeError, np.divide, tda, dta)
# M8 / int
assert_raises(TypeError, np.divide, dta, 2)
# int / M8
assert_raises(TypeError, np.divide, 2, dta)
# M8 / float
assert_raises(TypeError, np.divide, dta, 1.5)
# float / M8
assert_raises(TypeError, np.divide, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, r".*encountered in true\_divide")
nat = np.timedelta64('NaT')
for tp in (int, float):
assert_equal(np.timedelta64(1) / tp(0), nat)
assert_equal(np.timedelta64(0) / tp(0), nat)
assert_equal(nat / tp(0), nat)
assert_equal(nat / tp(2), nat)
# Division by inf
assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))
assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))
assert_equal(nat / float('inf'), nat)
# Division by nan
assert_equal(np.timedelta64(1) / float('nan'), nat)
assert_equal(np.timedelta64(0) / float('nan'), nat)
assert_equal(nat / float('nan'), nat)
def test_datetime_compare(self):
# Test all the comparison operators
a = np.datetime64('2000-03-12T18:00:00.000000')
b = np.array(['2000-03-12T18:00:00.000000',
'2000-03-12T17:59:59.999999',
'2000-03-12T18:00:00.000001',
'1970-01-11T12:00:00.909090',
'2016-01-11T12:00:00.909090'],
dtype='datetime64[us]')
assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])
assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])
assert_equal(np.less(a, b), [0, 0, 1, 0, 1])
assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])
assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])
assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])
def test_datetime_compare_nat(self):
dt_nat = np.datetime64('NaT', 'D')
dt_other = np.datetime64('2000-01-01')
td_nat = np.timedelta64('NaT', 'h')
td_other = np.timedelta64(1, 'h')
for op in [np.equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
assert_(not op(dt_nat, dt_nat))
assert_(not op(dt_nat, dt_other))
assert_(not op(dt_other, dt_nat))
assert_(not op(td_nat, td_nat))
assert_(not op(td_nat, td_other))
assert_(not op(td_other, td_nat))
assert_(np.not_equal(dt_nat, dt_nat))
assert_(np.not_equal(dt_nat, dt_other))
assert_(np.not_equal(dt_other, dt_nat))
assert_(np.not_equal(td_nat, td_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
def test_datetime_minmax(self):
# The metadata of the result should become the GCD
# of the operand metadata
a = np.array('1999-03-12T13', dtype='M8[2m]')
b = np.array('1999-03-12T12', dtype='M8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# Interaction with NaT
a = np.array('1999-03-12T13', dtype='M8[2m]')
dtnat = np.array('NaT', dtype='M8[h]')
assert_equal(np.minimum(a, dtnat), dtnat)
assert_equal(np.minimum(dtnat, a), dtnat)
assert_equal(np.maximum(a, dtnat), dtnat)
assert_equal(np.maximum(dtnat, a), dtnat)
assert_equal(np.fmin(dtnat, a), a)
assert_equal(np.fmin(a, dtnat), a)
assert_equal(np.fmax(dtnat, a), a)
assert_equal(np.fmax(a, dtnat), a)
# Also do timedelta
a = np.array(3, dtype='m8[h]')
b = np.array(3*3600 - 3, dtype='m8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# should raise between datetime and timedelta
#
# TODO: Allowing unsafe casting by
# default in ufuncs strikes again... :(
a = np.array(3, dtype='m8[h]')
b = np.array('1999-03-12T12', dtype='M8[s]')
#assert_raises(TypeError, np.minimum, a, b)
#assert_raises(TypeError, np.maximum, a, b)
#assert_raises(TypeError, np.fmin, a, b)
#assert_raises(TypeError, np.fmax, a, b)
assert_raises(TypeError, np.minimum, a, b, casting='same_kind')
assert_raises(TypeError, np.maximum, a, b, casting='same_kind')
assert_raises(TypeError, np.fmin, a, b, casting='same_kind')
assert_raises(TypeError, np.fmax, a, b, casting='same_kind')
def test_hours(self):
t = np.ones(3, dtype='M8[s]')
t[0] = 60*60*24 + 60*60*10
assert_(t[0].item().hour == 10)
def test_divisor_conversion_year(self):
assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))
assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))
assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))
def test_divisor_conversion_month(self):
assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))
assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))
assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))
def test_divisor_conversion_week(self):
assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))
assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))
assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))
def test_divisor_conversion_day(self):
assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))
assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))
assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))
def test_divisor_conversion_hour(self):
assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))
assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))
def test_divisor_conversion_minute(self):
assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))
assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))
def test_divisor_conversion_second(self):
assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))
assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
# Allow space instead of 'T' between date and time
assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow positive years
assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow negative years
assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
# UTC specifier
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
# Time zone offset
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('1977-03-02T12:30-0230'),
np.datetime64('1977-03-02T15:00'))
def test_string_parser_error_check(self):
# Arbitrary bad string
assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]'))
# Character after year must be '-'
assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]'))
# Month must be in range [1,12]
assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]'))
# Month must have two digits
assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]'))
# 'Mor' is not a valid month
assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]'))
# Day must be in range [1,len(month)]
assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]'))
# Cannot have trailing characters
assert_raises(ValueError, np.array, ['1980-02-03%'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 q'],
np.dtype('M8[us]'))
# Hours must be in range [0, 23]
assert_raises(ValueError, np.array, ['1980-02-03 25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 -1'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:'],
np.dtype('M8[us]'))
# Minutes must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:60'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:60:'],
np.dtype('M8[us]'))
# Seconds must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'],
np.dtype('M8[us]'))
# Timezone offset must within a reasonable range
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'],
np.dtype('M8[us]'))
def test_creation_overflow(self):
date = '1980-03-23 20:00:00'
timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
for unit in ['ms', 'us', 'ns']:
timesteps *= 1000
x = np.array([date], dtype='datetime64[%s]' % unit)
assert_equal(timesteps, x[0].astype(np.int64),
err_msg='Datetime conversion error for unit %s' % unit)
assert_equal(x[0].astype(np.int64), 322689600000000000)
# gh-13062
with pytest.raises(OverflowError):
np.datetime64(2**64, 'D')
with pytest.raises(OverflowError):
np.timedelta64(2**64, 'D')
def test_datetime_as_string(self):
# Check all the units with default string conversion
date = '1959-10-13'
datetime = '1959-10-13T12:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')),
'1959')
assert_equal(np.datetime_as_string(np.datetime64(date, 'M')),
'1959-10')
assert_equal(np.datetime_as_string(np.datetime64(date, 'D')),
'1959-10-13')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')),
'1959-10-13T12')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')),
'1959-10-13T12:34')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')),
'1959-10-13T12:34:56')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')),
'1959-10-13T12:34:56.789')
for us in ['us', 'μs', b'us']: # check non-ascii and bytes too
assert_equal(np.datetime_as_string(np.datetime64(datetime, us)),
'1959-10-13T12:34:56.789012')
datetime = '1969-12-31T23:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1969-12-31T23:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1969-12-31T23:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1969-12-31T23:34:56.789012345678901')
datetime = '1969-12-31T23:59:57.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
datetime = '1970-01-01T00:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1970-01-01T00:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1970-01-01T00:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1970-01-01T00:34:56.789012345678901')
datetime = '1970-01-01T00:00:05.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
# String conversion with the unit= parameter
a = np.datetime64('2032-07-18T12:23:34.123456', 'us')
assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'),
'2032')
assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'),
'2032-07')
assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12')
assert_equal(np.datetime_as_string(a, unit='m'),
'2032-07-18T12:23')
assert_equal(np.datetime_as_string(a, unit='s'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(a, unit='ms'),
'2032-07-18T12:23:34.123')
assert_equal(np.datetime_as_string(a, unit='us'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(a, unit='ns'),
'2032-07-18T12:23:34.123456000')
assert_equal(np.datetime_as_string(a, unit='ps'),
'2032-07-18T12:23:34.123456000000')
assert_equal(np.datetime_as_string(a, unit='fs'),
'2032-07-18T12:23:34.123456000000000')
assert_equal(np.datetime_as_string(a, unit='as'),
'2032-07-18T12:23:34.123456000000000000')
# unit='auto' parameter
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'),
'2032-07-18T12:23:34.120')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'),
'2032-07-18T12:23')
# 'auto' doesn't split up hour and minute
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'),
'2032-07-18T12:00')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'),
'2032-07-18')
# 'auto' doesn't split up the date
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'),
'2032-07-01')
assert_equal(np.datetime_as_string(
np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),
'2032-01-01')
@pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.")
def test_datetime_as_string_timezone(self):
# timezone='local' vs 'UTC'
a = np.datetime64('2010-03-15T06:30', 'm')
assert_equal(np.datetime_as_string(a),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='naive'),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='UTC'),
'2010-03-15T06:30Z')
assert_(np.datetime_as_string(a, timezone='local') !=
'2010-03-15T06:30')
b = np.datetime64('2010-02-15T06:30', 'm')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')),
'2010-03-15T01:30-0500')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')),
'2010-03-15T02:30-0400')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')),
'2010-03-14T23:30-0700')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')),
'2010-02-15T00:30-0600')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')),
'2010-02-15T01:30-0500')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')),
'2010-02-14T22:30-0800')
# Dates to strings with a timezone attached is disabled by default
assert_raises(TypeError, np.datetime_as_string, a, unit='D',
timezone=tz('US/Pacific'))
# Check that we can print out the date in the specified time zone
assert_equal(np.datetime_as_string(a, unit='D',
timezone=tz('US/Pacific'), casting='unsafe'),
'2010-03-14')
assert_equal(np.datetime_as_string(b, unit='D',
timezone=tz('US/Central'), casting='unsafe'),
'2010-02-15')
def test_datetime_arange(self):
# With two datetimes provided as strings
a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['2010-01-05', '2010-01-06', '2010-01-07',
'2010-01-08', '2010-01-09'], dtype='M8[D]'))
a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['1950-02-10', '1950-02-09', '1950-02-08',
'1950-02-07'], dtype='M8[D]'))
# Unit should be detected as months here
a = np.arange('1969-05', '1970-05', 2, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[M]'))
assert_equal(a,
np.datetime64('1969-05') + np.arange(12, step=2))
# datetime, integer|timedelta works as well
# produces arange (start, start + stop) in this case
a = np.arange('1969', 18, 3, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[Y]'))
assert_equal(a,
np.datetime64('1969') + np.arange(18, step=3))
a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.datetime64('1969-12-19') + np.arange(22, step=2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.datetime64('today'),
np.datetime64('today') + 3, 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange,
np.datetime64('2012-02-03T14', 's'),
np.timedelta64(5, 'Y'))
def test_datetime_arange_no_dtype(self):
d = np.array('2010-01-04', dtype="M8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_raises(ValueError, np.arange, d)
def test_timedelta_arange(self):
a = np.arange(3, 10, dtype='m8')
assert_equal(a.dtype, np.dtype('m8'))
assert_equal(a, np.timedelta64(0) + np.arange(3, 10))
a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')
assert_equal(a.dtype, np.dtype('m8[s]'))
assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.timedelta64(0),
np.timedelta64(5), 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
@pytest.mark.parametrize("val1, val2, expected", [
# case from gh-12092
(np.timedelta64(7, 's'),
np.timedelta64(3, 's'),
np.timedelta64(1, 's')),
# negative value cases
(np.timedelta64(3, 's'),
np.timedelta64(-2, 's'),
np.timedelta64(-1, 's')),
(np.timedelta64(-3, 's'),
np.timedelta64(2, 's'),
np.timedelta64(1, 's')),
# larger value cases
(np.timedelta64(17, 's'),
np.timedelta64(22, 's'),
np.timedelta64(17, 's')),
(np.timedelta64(22, 's'),
np.timedelta64(17, 's'),
np.timedelta64(5, 's')),
# different units
(np.timedelta64(1, 'm'),
np.timedelta64(57, 's'),
np.timedelta64(3, 's')),
(np.timedelta64(1, 'us'),
np.timedelta64(727, 'ns'),
np.timedelta64(273, 'ns')),
# NaT is propagated
(np.timedelta64('NaT'),
np.timedelta64(50, 'ns'),
np.timedelta64('NaT')),
# Y % M works
(np.timedelta64(2, 'Y'),
np.timedelta64(22, 'M'),
np.timedelta64(2, 'M')),
])
def test_timedelta_modulus(self, val1, val2, expected):
assert_equal(val1 % val2, expected)
@pytest.mark.parametrize("val1, val2", [
# years and months sometimes can't be unambiguously
# divided for modulus operation
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 % val2
def test_timedelta_modulus_div_by_zero(self):
with assert_warns(RuntimeWarning):
actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
assert_equal(actual, np.timedelta64('NaT'))
@pytest.mark.parametrize("val1, val2", [
# cases where one operand is not
# timedelta64
(np.timedelta64(7, 'Y'),
15,),
(7.5,
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_type_resolution(self, val1, val2):
# NOTE: some of the operations may be supported
# in the future
with assert_raises_regex(TypeError,
"'remainder' cannot use operands with types"):
val1 % val2
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_equal(np.arange(d), np.arange(0, d))
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))
assert_equal(np.maximum.reduce(a),
np.datetime64('2010-01-02'))
a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum.reduce(a),
np.timedelta64(7, 's'))
def test_datetime_busday_offset(self):
# First Monday in June
assert_equal(
np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-06'))
# Last Monday in June
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
# Default M-F business days, different roll modes
assert_equal(np.busday_offset('2010-08', 0, roll='backward'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='preceding'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='forward'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='following'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-10-30', 0, roll='following'),
np.datetime64('2010-11-01'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-18'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-15'))
# roll='raise' by default
assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)
# Bigger offset values
assert_equal(np.busday_offset('2006-02-01', 25),
np.datetime64('2006-03-08'))
assert_equal(np.busday_offset('2006-03-08', -25),
np.datetime64('2006-02-01'))
assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'),
np.datetime64('2007-04-07'))
assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'),
np.datetime64('2007-02-25'))
# NaT values when roll is not raise
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),
np.datetime64('NaT'))
def test_datetime_busdaycalendar(self):
# Check that it removes NaT, duplicates, and weekends
# and sorts the result.
bdd = np.busdaycalendar(
holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',
'2011-12-26', '2011-05-30', '2011-01-17'])
assert_equal(bdd.holidays,
np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))
# Default M-F weekmask
assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))
# Check string weekmask with varying whitespace.
bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri")
assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))
# Check length 7 0/1 string
bdd = np.busdaycalendar(weekmask="0011001")
assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))
# Check length 7 string weekmask.
bdd = np.busdaycalendar(weekmask="Mon Tue")
assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])
# weekday names must be correct case
assert_raises(ValueError, np.busdaycalendar, weekmask="satsun")
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="")
# Invalid weekday name codes should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We")
assert_raises(ValueError, np.busdaycalendar, weekmask="Max")
assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue")
def test_datetime_busday_holidays_offset(self):
# With exactly one holiday
assert_equal(
np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-18'))
assert_equal(
np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-04'))
# With the holiday appearing twice
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-10'))
# With a NaT holiday
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', 'NaT']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['NaT', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# With another holiday before
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday before and after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# A bigger forward jump across more than one week/holiday
holidays = ['2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21',
'2011-12-26', '2012-01-02']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
assert_equal(
np.busday_offset('2011-10-03', 4, holidays=holidays),
np.busday_offset('2011-10-03', 4))
assert_equal(
np.busday_offset('2011-10-03', 5, holidays=holidays),
np.busday_offset('2011-10-03', 5 + 1))
assert_equal(
np.busday_offset('2011-10-03', 27, holidays=holidays),
np.busday_offset('2011-10-03', 27 + 1))
assert_equal(
np.busday_offset('2011-10-03', 28, holidays=holidays),
np.busday_offset('2011-10-03', 28 + 2))
assert_equal(
np.busday_offset('2011-10-03', 35, holidays=holidays),
np.busday_offset('2011-10-03', 35 + 2))
assert_equal(
np.busday_offset('2011-10-03', 36, holidays=holidays),
np.busday_offset('2011-10-03', 36 + 3))
assert_equal(
np.busday_offset('2011-10-03', 56, holidays=holidays),
np.busday_offset('2011-10-03', 56 + 3))
assert_equal(
np.busday_offset('2011-10-03', 57, holidays=holidays),
np.busday_offset('2011-10-03', 57 + 4))
assert_equal(
np.busday_offset('2011-10-03', 60, holidays=holidays),
np.busday_offset('2011-10-03', 60 + 4))
assert_equal(
np.busday_offset('2011-10-03', 61, holidays=holidays),
np.busday_offset('2011-10-03', 61 + 5))
assert_equal(
np.busday_offset('2011-10-03', 61, busdaycal=bdd),
np.busday_offset('2011-10-03', 61 + 5))
# A bigger backward jump across more than one week/holiday
assert_equal(
np.busday_offset('2012-01-03', -1, holidays=holidays),
np.busday_offset('2012-01-03', -1 - 1))
assert_equal(
np.busday_offset('2012-01-03', -4, holidays=holidays),
np.busday_offset('2012-01-03', -4 - 1))
assert_equal(
np.busday_offset('2012-01-03', -5, holidays=holidays),
np.busday_offset('2012-01-03', -5 - 2))
assert_equal(
np.busday_offset('2012-01-03', -25, holidays=holidays),
np.busday_offset('2012-01-03', -25 - 2))
assert_equal(
np.busday_offset('2012-01-03', -26, holidays=holidays),
np.busday_offset('2012-01-03', -26 - 3))
assert_equal(
np.busday_offset('2012-01-03', -33, holidays=holidays),
np.busday_offset('2012-01-03', -33 - 3))
assert_equal(
np.busday_offset('2012-01-03', -34, holidays=holidays),
np.busday_offset('2012-01-03', -34 - 4))
assert_equal(
np.busday_offset('2012-01-03', -56, holidays=holidays),
np.busday_offset('2012-01-03', -56 - 4))
assert_equal(
np.busday_offset('2012-01-03', -57, holidays=holidays),
np.busday_offset('2012-01-03', -57 - 5))
assert_equal(
np.busday_offset('2012-01-03', -57, busdaycal=bdd),
np.busday_offset('2012-01-03', -57 - 5))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
holidays=holidays, busdaycal=bdd)
# Roll with the holidays
assert_equal(
np.busday_offset('2011-12-25', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='backward', holidays=holidays),
np.datetime64('2011-12-23'))
assert_equal(
np.busday_offset('2012-02-27', 0,
roll='modifiedfollowing',
holidays=['2012-02-27', '2012-02-26', '2012-02-28',
'2012-03-01', '2012-02-29']),
np.datetime64('2012-02-24'))
assert_equal(
np.busday_offset('2012-03-06', 0,
roll='modifiedpreceding',
holidays=['2012-03-02', '2012-03-03', '2012-03-01',
'2012-03-05', '2012-03-07', '2012-03-06']),
np.datetime64('2012-03-08'))
def test_datetime_busday_holidays_count(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Validate against busday_offset broadcast against
# a range of offsets
dates = np.busday_offset('2011-01-01', np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),
-np.arange(366))
dates = np.busday_offset('2011-12-31', -np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),
-np.arange(366))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
holidays=holidays, busdaycal=bdd)
# Number of Mondays in March 2011
assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)
# Returns negative value when reversed
assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)
def test_datetime_is_busday(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',
'NaT']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Weekend/weekday tests
assert_equal(np.is_busday('2011-01-01'), False)
assert_equal(np.is_busday('2011-01-02'), False)
assert_equal(np.is_busday('2011-01-03'), True)
# All the holidays are not business days
assert_equal(np.is_busday(holidays, busdaycal=bdd),
np.zeros(len(holidays), dtype='?'))
def test_datetime_y2038(self):
# Test parsing on either side of the Y2038 boundary
a = np.datetime64('2038-01-19T03:14:07')
assert_equal(a.view(np.int64), 2**31 - 1)
a = np.datetime64('2038-01-19T03:14:08')
assert_equal(a.view(np.int64), 2**31)
# Test parsing on either side of the Y2038 boundary with
# a manually specified timezone offset
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:07+0100')
assert_equal(a.view(np.int64), 2**31 - 1)
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:08+0100')
assert_equal(a.view(np.int64), 2**31)
# Test parsing a date after Y2038
a = np.datetime64('2038-01-20T13:21:14')
assert_equal(str(a), '2038-01-20T13:21:14')
def test_isnat(self):
assert_(np.isnat(np.datetime64('NaT', 'ms')))
assert_(np.isnat(np.datetime64('NaT', 'ns')))
assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))
assert_(np.isnat(np.timedelta64('NaT', "ms")))
assert_(not np.isnat(np.timedelta64(34, "ms")))
res = np.array([False, False, True])
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
def test_isnat_error(self):
# Test that only datetime dtype arrays are accepted
for t in np.typecodes["All"]:
if t in np.typecodes["Datetime"]:
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
def test_isfinite_scalar(self):
assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
assert_(np.isfinite(np.timedelta64(34, "ms")))
@pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
'us', 'ns', 'ps', 'fs', 'as'])
@pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
'<timedelta64[%s]', '>timedelta64[%s]'])
def test_isfinite_isinf_isnan_units(self, unit, dstr):
'''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes
'''
arr_val = [123, -321, "NaT"]
arr = np.array(arr_val, dtype= dstr % unit)
pos = np.array([True, True, False])
neg = np.array([False, False, True])
false = np.array([False, False, False])
assert_equal(np.isfinite(arr), pos)
assert_equal(np.isinf(arr), false)
assert_equal(np.isnan(arr), neg)
def test_assert_equal(self):
assert_raises(AssertionError, assert_equal,
np.datetime64('nat'), np.timedelta64('nat'))
def test_corecursive_input(self):
# construct a co-recursive list
a, b = [], []
a.append(b)
b.append(a)
obj_arr = np.array([None])
obj_arr[0] = a
# At some point this caused a stack overflow (gh-11154). Now raises
# ValueError since the nested list cannot be converted to a datetime.
assert_raises(ValueError, obj_arr.astype, 'M8')
assert_raises(ValueError, obj_arr.astype, 'm8')
@pytest.mark.parametrize("shape", [(), (1,)])
def test_discovery_from_object_array(self, shape):
arr = np.array("2020-10-10", dtype=object).reshape(shape)
res = np.array("2020-10-10", dtype="M8").reshape(shape)
assert res.dtype == np.dtype("M8[D]")
assert_equal(arr.astype("M8"), res)
arr[...] = np.bytes_("2020-10-10") # try a numpy string type
assert_equal(arr.astype("M8"), res)
arr = arr.astype("S")
assert_equal(arr.astype("S").astype("M8"), res)
@pytest.mark.parametrize("time_unit", [
"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
# compound units
"10D", "2M",
])
def test_limit_symmetry(self, time_unit):
"""
Dates should have symmetric limits around the unix epoch at +/-np.int64
"""
epoch = np.datetime64(0, time_unit)
latest = np.datetime64(np.iinfo(np.int64).max, time_unit)
earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)
# above should not have overflowed
assert earliest < epoch < latest
@pytest.mark.parametrize("time_unit", [
"Y", "M",
pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")),
"D", "h", "m",
"s", "ms", "us", "ns", "ps", "fs", "as",
pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")),
])
@pytest.mark.parametrize("sign", [-1, 1])
def test_limit_str_roundtrip(self, time_unit, sign):
"""
Limits should roundtrip when converted to strings.
This tests the conversion to and from npy_datetimestruct.
"""
# TODO: add absolute (gold standard) time span limit strings
limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)
# Convert to string and back. Explicit unit needed since the day and
# week reprs are not distinguishable.
limit_via_str = np.datetime64(str(limit), time_unit)
assert limit_via_str == limit
class TestDateTimeData:
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
def test_bytes(self):
# byte units are converted to unicode
dt = np.datetime64('2000', (b'ms', 5))
assert np.datetime_data(dt.dtype) == ('ms', 5)
dt = np.datetime64('2000', b'5ms')
assert np.datetime_data(dt.dtype) == ('ms', 5)
def test_non_ascii(self):
# μs is normalized to μ
dt = np.datetime64('2000', ('μs', 5))
assert np.datetime_data(dt.dtype) == ('us', 5)
dt = np.datetime64('2000', '5μs')
assert np.datetime_data(dt.dtype) == ('us', 5)
| 45.507884
| 101
| 0.538414
|
a93fc680bf9d8f9b05ed7862ae2d1435b39a9e8e
| 376
|
py
|
Python
|
ocpmodels/trainers/__init__.py
|
Irlirion/ocp
|
6fb3e794eef31559db990300198eca20f41d8f37
|
[
"MIT",
"BSD-3-Clause"
] | 242
|
2020-10-14T11:10:43.000Z
|
2022-03-29T07:50:18.000Z
|
ocpmodels/trainers/__init__.py
|
Irlirion/ocp
|
6fb3e794eef31559db990300198eca20f41d8f37
|
[
"MIT",
"BSD-3-Clause"
] | 100
|
2020-10-13T23:27:04.000Z
|
2022-03-23T16:50:26.000Z
|
ocpmodels/trainers/__init__.py
|
Irlirion/ocp
|
6fb3e794eef31559db990300198eca20f41d8f37
|
[
"MIT",
"BSD-3-Clause"
] | 86
|
2020-10-15T05:56:28.000Z
|
2022-03-16T16:11:45.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = [
"BaseTrainer",
"ForcesTrainer",
"EnergyTrainer",
]
from .base_trainer import BaseTrainer
from .energy_trainer import EnergyTrainer
from .forces_trainer import ForcesTrainer
| 25.066667
| 65
| 0.757979
|
8005cfb1ab0caaeec62ec6984bec758a2a69ca11
| 756
|
py
|
Python
|
simulation/sim.py
|
raad1masum/ParrotNAV
|
09c1a31453da5aa4c6a572bdaa5e2b06b6dc5f5c
|
[
"MIT"
] | 1
|
2020-06-08T20:10:13.000Z
|
2020-06-08T20:10:13.000Z
|
simulation/sim.py
|
raad1masum/ParrotNAV
|
09c1a31453da5aa4c6a572bdaa5e2b06b6dc5f5c
|
[
"MIT"
] | null | null | null |
simulation/sim.py
|
raad1masum/ParrotNAV
|
09c1a31453da5aa4c6a572bdaa5e2b06b6dc5f5c
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from time import sleep
from controls import controls
DRIVER_LOCATION = './drivers/edgedriver_win64/msedgedriver.exe'
driver = webdriver.Edge(DRIVER_LOCATION)
driver.get('https://iss-sim.spacex.com/')
begin_button = "//*[@id='begin-button']"
# get info from HUD
def get_info(info):
return driver.find_element_by_xpath(info).text
# control vehicle
def control(control):
driver.find_element_by_xpath(control).click()
print('ParrotNAV: Starting Simulation')
sleep(20)
print('ParrotNAV: Entering Simulation')
# begin simulation
driver.find_element_by_xpath(begin_button).click()
sleep(10)
# enable speed boost
control(controls.speed_boost)
print('ParrotNAV: Starting Controller')
| 22.909091
| 64
| 0.746032
|
32bf3976675381c9f116e4719e534ba7fc7c09b0
| 726
|
py
|
Python
|
docs/demos/multi-page-example1/app.py
|
johnkangw/dash-labs
|
6c34eba81faf1cb0cfd79961e54673326639d13a
|
[
"MIT"
] | 110
|
2021-04-16T14:41:54.000Z
|
2022-03-24T22:29:41.000Z
|
docs/demos/multi-page-example1/app.py
|
johnkangw/dash-labs
|
6c34eba81faf1cb0cfd79961e54673326639d13a
|
[
"MIT"
] | 59
|
2021-04-16T10:42:34.000Z
|
2022-03-21T18:43:25.000Z
|
docs/demos/multi-page-example1/app.py
|
johnkangw/dash-labs
|
6c34eba81faf1cb0cfd79961e54673326639d13a
|
[
"MIT"
] | 28
|
2021-04-16T16:26:32.000Z
|
2022-03-28T17:32:42.000Z
|
import dash
import dash_labs as dl
import dash_bootstrap_components as dbc
app = dash.Dash(
__name__, plugins=[dl.plugins.pages], external_stylesheets=[dbc.themes.BOOTSTRAP]
)
navbar = dbc.NavbarSimple(
dbc.DropdownMenu(
[
dbc.DropdownMenuItem(page["name"], href=page["path"])
for page in dash.page_registry.values()
if page["module"] != "pages.not_found_404"
],
nav=True,
label="More Pages",
),
brand="Multi Page App Plugin Demo",
color="primary",
dark=True,
className="mb-2",
)
app.layout = dbc.Container(
[navbar, dl.plugins.page_container],
fluid=True,
)
if __name__ == "__main__":
app.run_server(debug=True)
| 22
| 85
| 0.632231
|
68ec6ab1b86308958861c497b2233ee300fd9f62
| 1,476
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/task_agent/v4_1/models/task_agent_queue.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/task_agent/v4_1/models/task_agent_queue.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/task_agent/v4_1/models/task_agent_queue.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TaskAgentQueue(Model):
"""TaskAgentQueue.
:param id: Id of the queue
:type id: int
:param name: Name of the queue
:type name: str
:param pool: Pool reference for this queue
:type pool: :class:`TaskAgentPoolReference <task-agent.v4_1.models.TaskAgentPoolReference>`
:param project_id: Project Id
:type project_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'project_id': {'key': 'projectId', 'type': 'str'}
}
def __init__(self, id=None, name=None, pool=None, project_id=None):
super(TaskAgentQueue, self).__init__()
self.id = id
self.name = name
self.pool = pool
self.project_id = project_id
| 38.842105
| 96
| 0.506098
|
5e113ae8641c3d2b266c50232cea695ddae35f25
| 4,630
|
py
|
Python
|
nucanvas/nucanvas.py
|
JacoboSpain/symbolator
|
a9b24e4d6cf2a5369cd2a4a9604025f1c9e2ecdb
|
[
"MIT"
] | 124
|
2017-06-07T15:08:26.000Z
|
2022-03-21T06:10:32.000Z
|
nucanvas/nucanvas.py
|
JacoboSpain/symbolator
|
a9b24e4d6cf2a5369cd2a4a9604025f1c9e2ecdb
|
[
"MIT"
] | 13
|
2017-12-21T07:18:07.000Z
|
2022-01-06T15:42:39.000Z
|
nucanvas/nucanvas.py
|
JacoboSpain/symbolator
|
a9b24e4d6cf2a5369cd2a4a9604025f1c9e2ecdb
|
[
"MIT"
] | 33
|
2018-06-25T07:28:59.000Z
|
2022-02-16T11:59:39.000Z
|
# -*- coding: utf-8 -*-
# Copyright © 2017 Kevin Thibedeau
# Distributed under the terms of the MIT license
from __future__ import print_function
from shapes import GroupShape, DrawStyle
class NuCanvas(GroupShape):
'''This is a clone of the Tk canvas subset used by the original Tcl
It implements an abstracted canvas that can render objects to different
backends other than just a Tk canvas widget.
'''
def __init__(self, surf):
GroupShape.__init__(self, surf, 0, 0, {})
self.markers = {}
def set_surface(self, surf):
self.surf = surf
def clear_shapes(self):
self.shapes = []
def _get_shapes(self, item=None):
# Filter shapes
if item is None or item == 'all':
shapes = self.shapes
else:
shapes = [s for s in self.shapes if s.is_tagged(item)]
return shapes
def render(self):
self.surf.render(self)
def add_marker(self, name, shape, ref=(0,0), orient='auto', units='stroke'):
self.markers[name] = (shape, ref, orient, units)
def bbox(self, item=None):
bx0 = 0
bx1 = 0
by0 = 0
by1 = 0
boxes = [s.bbox for s in self._get_shapes(item)]
boxes = list(zip(*boxes))
if len(boxes) > 0:
bx0 = min(boxes[0])
by0 = min(boxes[1])
bx1 = max(boxes[2])
by1 = max(boxes[3])
return [bx0, by0, bx1, by1]
def move(self, item, dx, dy):
for s in self._get_shapes(item):
s.move(dx, dy)
def tag_raise(self, item):
to_raise = self._get_shapes(item)
for s in to_raise:
self.shapes.remove(s)
self.shapes.extend(to_raise)
def addtag_withtag(self, tag, item):
for s in self._get_shapes(item):
s.addtag(tag)
def dtag(self, item, tag=None):
for s in self._get_shapes(item):
s.dtag(tag)
def draw(self, c):
'''Draw all shapes on the canvas'''
for s in self.shapes:
tk_draw_shape(s, c)
def delete(self, item):
for s in self._get_shapes(item):
self.shapes.remove(s)
if __name__ == '__main__':
from svg_backend import SvgSurface
from cairo_backend import CairoSurface
from shapes import PathShape
#surf = CairoSurface('nc.png', DrawStyle(), padding=5, scale=2)
surf = SvgSurface('nc.svg', DrawStyle(), padding=5, scale=2)
#surf.add_shape_class(DoubleRectShape, cairo_draw_DoubleRectShape)
nc = NuCanvas(surf)
nc.add_marker('arrow_fwd',
PathShape(((0,-4), (2,-1, 2,1, 0,4), (8,0), 'z'), fill=(0,0,0, 120), width=0),
(3.2,0), 'auto')
nc.add_marker('arrow_back',
PathShape(((0,-4), (-2,-1, -2,1, 0,4), (-8,0), 'z'), fill=(0,0,0, 120), width=0),
(-3.2,0), 'auto')
#
# nc.create_rectangle(5,5, 20,20, fill=(255,0,0,127))
# nc.create_rectangle(35,2, 40,60, width=2, fill=(255,0,0))
#
# nc.create_rectangle(65,35, 150,50, width=0, fill=(255,0,0))
# nc.create_oval(65,35, 150,50, width=2, fill=(0,100,255))
##
## nc.create_shape(DoubleRectShape, 10,80, 40,140, width=3, fill=(0,255,255))
# g = nc.create_group(30,20, angle=-30, scale=2)
# g.create_rectangle(0,0,40,40, fill=(100,100,200, 150), width=3, line_color=(0,0,0))
# g.create_rectangle(0,50,40,70, fill=(100,200,100), width=3)
#
# gbb = g.bbox
# nc.create_rectangle(*gbb, width=1)
# abox = (60,60, 120,100)
# nc.create_rectangle(*abox, line_color=(10,255,10), width=4)
# arc = nc.create_arc(*abox, start=-45, extent=170, width=4, line_color=(255,0,0), fill=(0,255,0,127))
# abb = arc.bbox
# nc.create_rectangle(*abb, width=1)
#
# nc.create_path([(14,14), (30,4), (150,-60, 190,110, 140,110), (20,120), 'z'], fill=(255,100,0,127), width=2)
#
# nc.create_path([(20,40), (30,70), (40,120, 60,50, 10), (60, 50, 80,90, 10), (80, 90, 150,89, 15),
# (150, 89), (130,20), 'z'], width=1)
nc.create_line(30,50, 200,100, width=5, line_color=(200,100,50,100), marker_start='arrow_back',
marker_end='arrow_fwd')
nc.create_rectangle(30,85, 60,105, width=1, line_color=(255,0,0))
nc.create_line(30,90, 60,90, width=2, marker_start='arrow_back',
marker_end='arrow_fwd')
nc.create_line(30,100, 60,100, width=2, marker_start='arrow_back',
marker_end='arrow_fwd', marker_adjust=1.0)
# ls.options['marker_start'] = 'arrow_back'
# ls.options['marker_end'] = 'arrow_fwd'
# ls.options['marker_adjust'] = 0.8
nc.create_oval(50-2,80-2, 50+2,80+2, width=0, fill=(255,0,0))
nc.create_text(50,80, text='Hello world', anchor='nw', font=('Helvetica', 14, 'normal'), text_color=(0,0,0), spacing=-8)
nc.create_oval(50-2,100-2, 50+2,100+2, width=0, fill=(255,0,0))
nc.create_text(50,100, text='Hello world', anchor='ne')
surf.draw_bbox = True
nc.render()
| 29.490446
| 122
| 0.631749
|
af42272b02097c2b990c3d943801b0344d68dcc0
| 5,219
|
py
|
Python
|
litex_boards/targets/linsn_rv901t.py
|
chmousset/litex-boards
|
c081177d77f37a4ea6cff150d42a69bd6f0abbc2
|
[
"BSD-2-Clause"
] | null | null | null |
litex_boards/targets/linsn_rv901t.py
|
chmousset/litex-boards
|
c081177d77f37a4ea6cff150d42a69bd6f0abbc2
|
[
"BSD-2-Clause"
] | null | null | null |
litex_boards/targets/linsn_rv901t.py
|
chmousset/litex-boards
|
c081177d77f37a4ea6cff150d42a69bd6f0abbc2
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from litex.build.io import DDROutput
from litex_boards.platforms import linsn_rv901t
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.clock import S6PLL
from litex.soc.cores.led import LedChaser
from litedram.modules import M12L64322A
from litedram.phy import GENSDRPHY
from liteeth.phy.s6rgmii import LiteEthPHYRGMII
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys_ps = ClockDomain(reset_less=True)
# # #
clk25 = platform.request("clk25")
self.submodules.pll = pll = S6PLL(speedgrade=-2)
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(clk25, 25e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys_ps, sys_clk_freq, phase=90)
# SDRAM clock
self.specials += DDROutput(1, 0, platform.request("sdram_clock"), ClockSignal("sys_ps"))
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(75e6), with_ethernet=False, with_etherbone=False, eth_phy=0, with_led_chaser=True, **kwargs):
platform = linsn_rv901t.Platform()
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Linsn RV901T",
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# SDR SDRAM --------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.sdrphy = GENSDRPHY(platform.request("sdram"), sys_clk_freq)
self.add_sdram("sdram",
phy = self.sdrphy,
module = M12L64322A(sys_clk_freq, "1:1"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Ethernet / Etherbone ---------------------------------------------------------------------
if with_ethernet or with_etherbone:
self.submodules.ethphy = LiteEthPHYRGMII(
clock_pads = self.platform.request("eth_clocks", eth_phy),
pads = self.platform.request("eth", eth_phy),
tx_delay = 0e-9)
if with_ethernet:
self.add_ethernet(phy=self.ethphy, with_timing_constraints=False)
if with_etherbone:
self.add_etherbone(phy=self.ethphy, with_timing_constraints=False)
# Timing Constraints.
platform.add_period_constraint(platform.lookup_request("eth_clocks", eth_phy).rx, 1e9/125e6)
platform.add_false_path_constraints(self.crg.cd_sys.clk, platform.lookup_request("eth_clocks", eth_phy).rx)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
from litex.soc.integration.soc import LiteXSoCArgumentParser
parser = LiteXSoCArgumentParser(description="LiteX SoC on Linsn RV901T")
target_group = parser.add_argument_group(title="Target options")
target_group.add_argument("--build", action="store_true", help="Build bitstream.")
target_group.add_argument("--load", action="store_true", help="Load bitstream.")
target_group.add_argument("--sys-clk-freq", default=75e6, help="System clock frequency.")
ethopts = target_group.add_mutually_exclusive_group()
ethopts.add_argument("--with-ethernet", action="store_true", help="Enable Ethernet support.")
ethopts.add_argument("--with-etherbone", action="store_true", help="Enable Etherbone support.")
target_group.add_argument("--eth-phy", default=0, type=int, help="Ethernet PHY (0 or 1).")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
with_ethernet = args.with_ethernet,
with_etherbone = args.with_etherbone,
eth_phy = int(args.eth_phy),
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(builder.get_bitstream_filename(mode="sram"))
if __name__ == "__main__":
main()
| 42.778689
| 133
| 0.572907
|
d85bd687c8e2f1e02956de17e531058bd0966fab
| 204
|
py
|
Python
|
odoo-13.0/addons/l10n_pt/__init__.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
odoo-13.0/addons/l10n_pt/__init__.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/l10n_pt/__init__.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2012 Thinkopen Solutions, Lda. All Rights Reserved
# http://www.thinkopensolutions.com.
| 34
| 74
| 0.730392
|
0f5b81b7ff27faad8e883183428f4b59597f4e25
| 858
|
py
|
Python
|
referral_system/users/tests/test_drf_views.py
|
todd-sudo/referral_system
|
a2220a780e7705f3f235ac5cb99f0b8802925300
|
[
"MIT"
] | null | null | null |
referral_system/users/tests/test_drf_views.py
|
todd-sudo/referral_system
|
a2220a780e7705f3f235ac5cb99f0b8802925300
|
[
"MIT"
] | 1
|
2022-03-24T17:24:11.000Z
|
2022-03-24T17:24:11.000Z
|
referral_system/users/tests/test_drf_views.py
|
todd-sudo/referral_system
|
a2220a780e7705f3f235ac5cb99f0b8802925300
|
[
"MIT"
] | null | null | null |
import pytest
from django.test import RequestFactory
from referral_system.users.api.views import UserViewSet
from referral_system.users.models import User
pytestmark = pytest.mark.django_db
class TestUserViewSet:
def test_get_queryset(self, user: User, rf: RequestFactory):
view = UserViewSet()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert user in view.get_queryset()
def test_me(self, user: User, rf: RequestFactory):
view = UserViewSet()
request = rf.get("/fake-url/")
request.user = user
view.request = request
response = view.me(request)
assert response.data == {
"username": user.username,
"name": user.name,
"url": f"http://testserver/api/users/{user.username}/",
}
| 25.235294
| 67
| 0.630536
|
ab08fa13ba5c2a327065b7ab4beedeed8c7dd107
| 293
|
py
|
Python
|
cart/urls.py
|
mmeerrccyy/eltexua_async
|
a5587317b790796bd072fa116ff7bba268643948
|
[
"CC0-1.0"
] | null | null | null |
cart/urls.py
|
mmeerrccyy/eltexua_async
|
a5587317b790796bd072fa116ff7bba268643948
|
[
"CC0-1.0"
] | 1
|
2021-05-31T22:14:41.000Z
|
2021-05-31T22:14:41.000Z
|
cart/urls.py
|
mmeerrccyy/eltexua_async
|
a5587317b790796bd072fa116ff7bba268643948
|
[
"CC0-1.0"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.CartDetail.as_view(), name='cart_detail'),
path('add/<int:product_id>/', views.CartAdd.as_view(), name='cart_add'),
path('remove/<int:product_id>/', views.CartRemove.as_view(), name='cart_remove'),
]
| 32.555556
| 85
| 0.68942
|
cdbb83abf68faae652de7d81c5417098baa1e4fb
| 19,935
|
py
|
Python
|
install/app_store/tk-framework-qtwidgets/v2.6.6/python/shotgun_fields/shotgun_field_delegate.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/app_store/tk-framework-qtwidgets/v2.6.6/python/shotgun_fields/shotgun_field_delegate.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/app_store/tk-framework-qtwidgets/v2.6.6/python/shotgun_fields/shotgun_field_delegate.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | 1
|
2020-02-15T10:42:56.000Z
|
2020-02-15T10:42:56.000Z
|
# Copyright (c) 2016 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from sgtk.platform.qt import QtCore, QtGui
views = sgtk.platform.current_bundle().import_module("views")
shotgun_globals = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "shotgun_globals"
)
shotgun_model = sgtk.platform.import_framework(
"tk-framework-shotgunutils", "shotgun_model"
)
class ShotgunFieldDelegateGeneric(views.WidgetDelegate):
"""
A generic, model-agnostic, shotgun field widget delegate.
This class is designed to be used with any model that represents data that
can be stored in Shotgun fields.
The included subclass, ``ShotgunFieldDelegate``, is designed to work
specifically with ``ShotgunModel`` instances. For other model types use this
class and supply a ``field_data_role`` to this class constructor. The
default is ``QtCore.Qt.EditRole``.
"""
def __init__(self, sg_entity_type, field_name, display_class, editor_class,
view, bg_task_manager=None,
field_data_role=QtCore.Qt.EditRole):
"""
Constructor
:param sg_entity_type: Shotgun entity type
:type sg_entity_type: String
:param field_name: Shotgun field name
:type field_name: String
:param display_class: A shotgun field :class:`~PySide.QtGui.QWidget` to
display the field info
:param editor_class: A shotgun field :class:`~PySide.QtGui.QWidget` to
edit the field info
:param view: The parent view for this delegate
:type view: :class:`~PySide.QtGui.QWidget`
:param bg_task_manager: Optional Task manager. If this is not passed in
one will be created when the delegate widget is created.
:type bg_task_manager: :class:`~task_manager.BackgroundTaskManager`
:param int field_data_role: The data role that stores SG field data in
the model where this delegate is to be used.
"""
views.WidgetDelegate.__init__(self, view)
# The model role used to get/set values for editing the field widget
self._field_data_role = field_data_role
self._entity_type = sg_entity_type
self._field_name = field_name
self._display_class = display_class
self._editor_class = editor_class
self._bg_task_manager = bg_task_manager
@property
def field_data_role(self):
"""
The item role used to get and set data associated with the fields being
represented by this delegate.
"""
return self._field_data_role
def paint(self, painter, style_options, model_index):
"""
Paint method to handle all cells that are not being currently edited.
:param painter: The painter instance to use when painting
:param style_options: The style options to use when painting
:param model_index: The index in the data model that needs to be painted
"""
# let the base class do all the heavy lifting
super(ShotgunFieldDelegateGeneric, self).paint(
painter,
style_options,
model_index
)
# clear out the paint widget's contents to prevent it from showing in
# other places in the view (since the widget is shared)
widget = self._get_painter_widget(model_index, self.view)
widget.set_value(None)
def _create_widget(self, parent):
"""
Creates a widget to use for the delegate.
:param parent: QWidget to parent the widget to
:type parent: :class:`~PySide.QtGui.QWidget`
:returns: QWidget that will be used to paint grid cells in the view.
:rtype: :class:`~PySide.QtGui.QWidget`
"""
widget = self._display_class(
parent=parent,
entity_type=self._entity_type,
field_name=self._field_name,
bg_task_manager=self._bg_task_manager,
delegate=True,
)
if self._display_class == self._editor_class:
# display and edit classes are the same. we need to make sure
# we disable the editing so that the delegate isn't drawn in its
# edit state.
widget.enable_editing(False)
return widget
def sizeHint(self, style_options, model_index):
"""
Returns the size needed by the delegate to display the item specified by
``model_index``, taking into account the style information provided by
``style_options``.
Reimplemented from ``QStyledItemDelegate.sizeHint``
:param style_options: Style information for the item.
:type style_options: :class:`~PySide.QtGui.QStyleOptionViewItem`
:param model_index: The index of the item to return the size of.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
:returns: size required by the delegate
:rtype: :class:`~PySide.QtCore.QSize`
"""
if not model_index.isValid():
return QtCore.QSize()
size_hint = QtCore.QSize()
painter_widget = self._get_painter_widget(model_index, self.view)
if painter_widget:
size_hint = painter_widget.size()
return size_hint
def _create_editor_widget(self, model_index, style_options, parent):
"""
Create an editor widget for the supplied model index.
:param model_index: The index of the item in the model to return a
widget for
:type model_index: :class:`~PySide.QtCore.QModelIndex`
:param style_options: Specifies the current Qt style options for this
index
:type style_options: :class:`~PySide.QtGui.QStyleOptionViewItem`
:param parent: The parent view that the widget should be parented to
:type parent: :class:`~PySide.QtGui.QWidget`
:returns: A QWidget to be used for editing the current index
:rtype: :class:`~PySide.QtGui.QWidget`
"""
# ensure the field is editable
if not shotgun_globals.field_is_editable(self._entity_type,
self._field_name):
return None
if not model_index.isValid():
return None
if not self._editor_class:
return None
widget = self._editor_class(
parent=parent,
entity_type=self._entity_type,
field_name=self._field_name,
bg_task_manager=self._bg_task_manager,
delegate=True,
)
if self._display_class == self._editor_class:
# display and edit classes are the same. we need to make sure
# we enable the editing
widget.enable_editing(True)
# auto fill the background color so that the display widget doesn't show
# behind.
widget.setAutoFillBackground(True)
return widget
def _on_before_paint(self, widget, model_index, style_options):
"""
Update the display widget with the value stored in the supplied model
index. The value is retrieved for the role supplied to the
``field_data_role`` argument supplied to the constructor.
:param widget: The QWidget (constructed in _create_widget()) which will
be used to paint the cell.
:param model_index: object representing the data of the object that is
about to be drawn.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
:param style_options: Object containing specifics about the
view related state of the cell.
:type style_options: :class:`~PySide.QtGui.QStyleOptionViewItem`
"""
# make sure the display widget is populated with the correct data
self._set_widget_value(widget, model_index)
def setEditorData(self, editor, model_index):
"""
Sets the data to be displayed and edited by the editor from the data
model item specified by the model index.
:param editor: The editor widget.
:type editor: :class:`~PySide.QtGui.QWidget`
:param model_index: The index of the model to be edited.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
# make sure the editor widget is populated with the correct data
self._set_widget_value(editor, model_index)
def setModelData(self, editor, model, index):
"""
Gets data from the editor widget and stores it in the specified model at
the item index.
:param editor: The editor widget.
:type editor: :class:`~PySide.QtGui.QWidget`
:param model: The SG model where the data lives.
:type model: :class:`~PySide.QtCore.QAbstractItemModel`
:param index: The index of the model to be edited.
:type index: :class:`~PySide.QtCore.QModelIndex`
"""
src_index = _map_to_source(index)
if not src_index or not src_index.isValid():
# invalid index, do nothing
return
# compare the new/old values to see if there is a change
new_value = editor.get_value()
cur_value = src_index.data(self.field_data_role)
if cur_value == new_value:
# value didn't change. nothing to do here.
return
# attempt to set the new value in the model
successful = src_index.model().setData(
src_index, new_value, self.field_data_role)
if not successful:
bundle = sgtk.platform.current_bundle()
bundle.log_error(
"Unable to set model data for widget delegate: %s, %s" %
(self._entity_type, self._field_name)
)
def editorEvent(self, event, model, option, index):
"""
Handles mouse events on the editor.
:param event: The event that occurred.
:type event: :class:`~PySide.QtCore.QEvent`
:param model: The SG model where the data lives.
:type model: :class:`~PySide.QtCore.QAbstractItemModel`
:param option: Options for rendering the item.
:type option: :class:`~PySide.QtQui.QStyleOptionViewItem`
:param index: The index of the model to be edited.
:type index: :class:`~PySide.QtCore.QModelIndex`
:return: ``True``, if the event was handled, ``False`` otherwise.
:rtype: ``bool``
"""
# The primary use for this is labels displaying clickable links (entity,
# multi-entity, etc). By default, they're painted into the view via the
# delegate, so you can't interact with them. There were some suggestions
# online how to work around this that seemed really complex. This is a
# solution rob suggested which I tried and it seems to work... and is
# much simpler! Basically, detect a mouse click (release is all we have
# really) in the delegate, populate the underlying widget with the data
# from the index, then forward the event to the widget. The result is a
# simulation of clicking on the actual widget.
# Forward mouse clicks to the underlying display widget. This only kicks
# in if the editor widget isn't displayed or doesn't process a mouse
# event for some reason. If you're having trouble with editors
# disappearing, it may be because they can't receive focus or aren't
# handling a mouse click.
if event.type() == QtCore.QEvent.MouseButtonRelease:
self._forward_mouse_event(event, index)
return True
return False
def _forward_mouse_event(self, mouse_event, index):
"""
Forward the mouse event to the display widget to simulate
interacting with the widget. This is necessary since the delegate only
paints the widget in the view rather than being an actual widget
instance.
:param mouse_event: The event that occured on the delegate.
:type mouse_event: :class:`~PySide.QtCore.QEvent`
:param index: The model index that was acted on.
:type index: :class:`~PySide.QtCore.QModelIndex`
"""
# get the widget used to paint this index, populate it with the
# value for this index
widget = self._get_painter_widget(index, self.view)
self._set_widget_value(widget, index)
item_rect = self.view.visualRect(index)
# get the rect of the item in the view
widget.resize(item_rect.size())
# move the widget to 0, 0 so we know exactly where it is
widget.move(0, 0)
# map global mouse position to within item_rect
view_pos = self.view.viewport().mapFromGlobal(QtGui.QCursor.pos())
# calculate the offset from the item rect
widget_x = view_pos.x() - item_rect.x()
widget_y = view_pos.y() - item_rect.y()
# forward the mouse event to the display widget
forward_event = QtGui.QMouseEvent(
mouse_event.type(),
QtCore.QPoint(widget_x, widget_y),
mouse_event.button(),
mouse_event.buttons(),
mouse_event.modifiers(),
)
QtGui.QApplication.sendEvent(widget, forward_event)
def _set_widget_value(self, widget, model_index):
"""
Updates the supplied widget with data from the supplied model index.
:param widget: The widget to set the value for
:param model_index: The index of the model where the data comes from
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
src_index = _map_to_source(model_index)
if not src_index or not src_index.isValid():
# invalid index, do nothing
return
value = src_index.data(self.field_data_role)
widget.set_value(shotgun_model.sanitize_qt(value))
class ShotgunFieldDelegate(ShotgunFieldDelegateGeneric):
"""
A delegate for a given type of Shotgun field. This delegate is designed to
work with indexes from a ``ShotgunModel`` where the value of the field is
stored in the ``SG_ASSOCIATED_FIELD_ROLE`` role.
"""
def __init__(self, sg_entity_type, field_name, display_class, editor_class,
view, bg_task_manager=None):
"""
Constructor
:param sg_entity_type: Shotgun entity type
:type sg_entity_type: String
:param field_name: Shotgun field name
:type field_name: String
:param display_class: A shotgun field :class:`~PySide.QtGui.QWidget` to
display the field info
:param editor_class: A shotgun field :class:`~PySide.QtGui.QWidget` to
edit the field info
:param view: The parent view for this delegate
:type view: :class:`~PySide.QtGui.QWidget`
:param bg_task_manager: Optional Task manager. If this is not passed in
one will be created when the delegate widget is created.
:type bg_task_manager: :class:`~task_manager.BackgroundTaskManager`
"""
field_data_role = shotgun_model.ShotgunModel.SG_ASSOCIATED_FIELD_ROLE
super(ShotgunFieldDelegate, self).__init__(
sg_entity_type, field_name, display_class, editor_class, view,
bg_task_manager=bg_task_manager, field_data_role=field_data_role
)
def setModelData(self, editor, model, index):
"""
Gets data from the editor widget and stores it in the specified model at
the item index.
:param editor: The editor widget.
:type editor: :class:`~PySide.QtGui.QWidget`
:param model: The SG model where the data lives.
:type model: :class:`~PySide.QtCore.QAbstractItemModel`
:param index: The index of the model to be edited.
:type index: :class:`~PySide.QtCore.QModelIndex`
"""
src_index = _map_to_source(index)
if not src_index or not src_index.isValid():
# invalid index, do nothing
return
# compare the new/old values to see if there is a change
new_value = editor.get_value()
cur_value = src_index.data(self.field_data_role)
if cur_value == new_value:
# value didn't change. nothing to do here.
return
bundle = sgtk.platform.current_bundle()
# special case for image fields in the ShotgunModel. The SG model stores
# the image field in the first column. If the value has changed, set the
# icon value there.
if editor.get_field_name() == "image":
primary_item = src_index.model().item(src_index.row(), 0)
try:
if new_value:
# update the value locally in the model
primary_item.setIcon(QtGui.QIcon(new_value))
else:
primary_item.setIcon(QtGui.QIcon())
except Exception, e:
bundle.log_error(
"Unable to set icon for widget delegate: %s" % (e,))
return
successful = src_index.model().setData(
src_index,
new_value,
self.field_data_role
)
if not successful:
bundle.log_error(
"Unable to set model data for widget delegate: %s, %s" %
(self._entity_type, self._field_name)
)
def _set_widget_value(self, widget, model_index):
"""
Updates the supplied widget with data from the supplied model index.
:param widget: The widget to set the value for
:param model_index: The index of the model where the data comes from
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
src_index = _map_to_source(model_index)
if not src_index or not src_index.isValid():
# invalid index, do nothing
return
# special case for image fields in the ShotgunModel. The SG model has
# the ability to pre-query thumbnails for entities for efficiency. If
# this is the image field for an entity in the SG model, we can make use
# of the potentially pre-queried image available in the first column.
if widget.get_field_name() == "image":
primary_item = src_index.model().item(src_index.row(), 0)
icon = primary_item.icon()
if icon:
widget.set_value(icon.pixmap(QtCore.QSize(256, 256)))
return
value = src_index.data(self.field_data_role)
widget.set_value(shotgun_model.sanitize_qt(value))
def _map_to_source(idx, recursive=True):
"""
Map the specified index to it's source model. This can be done recursively
to map back through a chain of proxy models to the source model at the
beginning of the chain
:param idx: The index to map from
:param recursive: If true then the function will recurse up the model chain
until it finds an index belonging to a model that doesn't derive from
QAbstractProxyModel. If false then it will just return the index from
the imediate parent model.
:returns: QModelIndex in the source model or the first model in the chain
that isn't a proxy model if recursive is True.
"""
src_idx = idx
while src_idx.isValid() and isinstance(
src_idx.model(), QtGui.QAbstractProxyModel):
src_idx = src_idx.model().mapToSource(src_idx)
if not recursive:
break
return src_idx
| 38.116635
| 80
| 0.644545
|
b1b5061b51a56520eb8a3d66dfcfc605aefc7abd
| 5,750
|
py
|
Python
|
tests/test_change.py
|
mattj23/m-notes
|
1e5b039cd70d87c1665afc4e73c743a79fe7fd4f
|
[
"MIT"
] | null | null | null |
tests/test_change.py
|
mattj23/m-notes
|
1e5b039cd70d87c1665afc4e73c743a79fe7fd4f
|
[
"MIT"
] | 1
|
2021-02-11T06:02:15.000Z
|
2021-02-11T06:02:15.000Z
|
tests/test_change.py
|
mattj23/m-notes
|
1e5b039cd70d87c1665afc4e73c743a79fe7fd4f
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from typing import Dict
import pytest
from datetime import datetime as DateTime
from mnotes.notes.index import IndexBuilder, GlobalIndices
from mnotes.notes.markdown_notes import Note, ID_TIME_FORMAT, NoteBuilder
from tests.tools.file_system_mocks import TestFileSystemProvider
from tests.test_index import local_tz
import tests.tools.sample_data as sample
@pytest.fixture
def transact_fixture():
d1 = deepcopy(sample.INDEX_FOR_FIXERS)
d2 = deepcopy(sample.INDEX_WITH_MISSING_ATTRS)
d1.update(d2)
provider = TestFileSystemProvider(d1)
note_builder = NoteBuilder(provider, local_tz)
index_builder = IndexBuilder(provider, note_builder)
directory = {"alpha": {"path": "/alpha"}, "fix": {"path": "/fix"}}
master = GlobalIndices(index_builder, directory=directory)
return provider, index_builder, master
def but_for(provider: TestFileSystemProvider, *args) -> Dict:
d = deepcopy(provider.internal)
for a in args:
del d[a]
return d
def test_simple_transaction_doesnt_change_data_in_index(transact_fixture):
provider, index_builder, master = transact_fixture
copy = deepcopy(provider)
master.load_all()
t = master.create_empty_transaction()
f0 = "/alpha/note-00.md"
note = t.get_note_state(f0)
note.info.author = "Replaced Author"
t.add_change(f0, note)
assert master.by_id["20240102080135"].author == "Alice Allison"
assert index_builder.note_builder.load_note(f0).info.author == "Alice Allison"
assert but_for(provider) == but_for(copy)
def test_simple_transaction_fetches_updated_data(transact_fixture):
provider, index_builder, master = transact_fixture
master.load_all()
t = master.create_empty_transaction()
f0 = "/alpha/note-00.md"
note = t.get_note_state(f0)
note.info.author = "Replaced Author"
t.add_change(f0, note)
note_info2 = t.get_note_info_state(f0)
note2 = t.get_note_state(f0)
assert note_info2.author == "Replaced Author"
assert note2.info.author == "Replaced Author"
def test_simple_transaction_changing_fetched_does_not_update(transact_fixture):
provider, index_builder, master = transact_fixture
master.load_all()
t = master.create_empty_transaction()
f0 = "/alpha/note-00.md"
note = t.get_note_state(f0)
note.info.author = "Replaced Author"
t.add_change(f0, note)
note_info2 = t.get_note_info_state(f0)
note2 = t.get_note_state(f0)
note_info2.author = "changed"
note2.info.author = "changed"
assert t.get_note_info_state(f0).author == "Replaced Author"
assert t.get_note_state(f0).info.author == "Replaced Author"
def test_simple_transaction_applies(transact_fixture):
provider, index_builder, master = transact_fixture
copy = deepcopy(provider)
master.load_all()
t = master.create_empty_transaction()
f0 = "/alpha/note-00.md"
note = t.get_note_state(f0)
note.info.author = "Replaced Author"
t.add_change(f0, note)
master.apply_transaction(t)
master.load_all()
assert master.by_id["20240102080135"].author == "Replaced Author"
assert but_for(copy, f0) == but_for(provider, f0)
def test_staged_transaction_applies(transact_fixture):
provider, index_builder, master = transact_fixture
copy = deepcopy(provider)
master.load_all()
t = master.create_empty_transaction()
f0 = "/alpha/note-00.md"
note = t.get_note_state(f0)
note.info.author = "Replaced Author"
t.add_change(f0, note)
f1 = "/alpha/note-01.md"
note = t.get_note_state(f1)
note.info.id = "12345678901234"
note.info.title = "Replaced Title"
t.add_change(f1, note)
master.apply_transaction(t)
master.load_all()
assert master.by_id["20240102080135"].author == "Replaced Author"
assert master.by_id["12345678901234"].title == "Replaced Title"
assert "19990907012114" not in master.all_ids
assert but_for(copy, f0, f1) == but_for(provider, f0, f1)
def test_transaction_build_errors_on_conflict(transact_fixture):
provider, index_builder, master = transact_fixture
master.load_all()
t = master.create_empty_transaction()
f0 = "/alpha/note-00.md"
note = t.get_note_state(f0)
note.info.id = "12345678901234"
t.add_change(f0, note)
f1 = "/alpha/note-01.md"
note = t.get_note_state(f1)
note.info.id = "12345678901234"
note.info.title = "Replaced Title"
assert not t.verify(f1, note)
with pytest.raises(ValueError):
t.add_change(f1, note)
def test_hetrogenous_transaction_with_move(transact_fixture):
provider, index_builder, master = transact_fixture
copy = deepcopy(provider)
master.load_all()
t = master.create_empty_transaction()
f0 = "/fix/timestamp-none.md"
note = t.get_note_state(f0)
note.info.created = provider.file_c_time(f0)[0]
note.info.author = "Replaced Author"
fr = "/alpha/note-renamed.md"
note.info.file_path = fr
t.add_change(f0, note)
f1 = "/alpha/note-01.md"
note = t.get_note_state(f1)
note.info.id = "12345678901234"
note.info.title = "Replaced Title"
t.add_change(f1, note)
note = t.get_note_state(f0)
note.info.id = note.info.created.strftime(ID_TIME_FORMAT)
t.add_change(f0, note)
master.apply_transaction(t)
master.load_all()
assert master.by_id["12345678901234"].title == "Replaced Title"
assert "19990907012114" not in master.all_ids
assert f0 not in master.by_path
assert master.by_path[fr].author == "Replaced Author"
assert master.by_path[fr].id == "20150430174927"
assert master.by_path[fr].created == DateTime(2015, 4, 30, 17, 49, 27)
assert but_for(copy, f0, f1) == but_for(provider, f1, fr)
| 31.593407
| 82
| 0.713565
|
c29faba1e2b96a2628ea3b6d0cf31355fc0010cf
| 2,780
|
py
|
Python
|
rfd.py
|
ebraminio/DeltaBot
|
14d427ca644c4e842f72802a0e07adcaecda7097
|
[
"CC0-1.0"
] | null | null | null |
rfd.py
|
ebraminio/DeltaBot
|
14d427ca644c4e842f72802a0e07adcaecda7097
|
[
"CC0-1.0"
] | null | null | null |
rfd.py
|
ebraminio/DeltaBot
|
14d427ca644c4e842f72802a0e07adcaecda7097
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# licensed under CC-Zero: https://creativecommons.org/publicdomain/zero/1.0
import pywikibot
import re
site = pywikibot.Site('wikidata', 'wikidata')
repo = site.data_repository()
page = pywikibot.Page(site, 'Wikidata:Requests for deletions')
cntDone = 0
cntNotDone = 0
force = False
content = re.findall(r'(?:(?<!=)==([^=]+)==(?!=))?([\s\S]+?(?=$|(?<!=)==[^=]+==(?!=)))', page.get())
for i in range(len(content)):
try:
content[i] = list(map(str.strip, list(content[i])))
res = re.search(r'(Q\d+)', content[i][0])
if res:
entity = pywikibot.ItemPage(repo, res.group(1))
else:
res = re.search(r'(Lexeme:L\d+)', content[i][0])
if res:
entity = pywikibot.Page(repo, res.group(1)) # T189321
if res:
if any(x in content[i][1] for x in ('{{done', '{{deleted', '{{not done', '{{notdone', '{{not deleted', '{{merged')):
continue
if not entity.exists() and not entity.isRedirectPage():
for m in site.logevents(logtype='delete', page=entity, total=1):
content[i][1] += u'\n: {{{{deleted|admin={}}}}} --~~~~'.format(m.user())
cntDone += 1
elif entity.isRedirectPage() and entity.getRedirectTarget().exists():
content[i][1] += (u'\n: {{{{done}}}} Redirect created by [[User:{}]], you can do it ' +
u'[[Special:MyLanguage/Help:Merge|yourself]] next time. --~~~~').format(entity.userName())
cntDone += 1
else:
if '{{on hold' not in content[i][1]:
refs = list(entity.backlinks(followRedirects=False, filterRedirects=False, namespaces=[0, 120], total=12))
numberOfRefs = len(refs)
if entity in refs:
numberOfRefs -= 1
if numberOfRefs > 0:
force = True
content[i][1] += u'\n: {{{{on hold}}}} This item is linked from {}{} other{}. --~~~~'.format(
min(numberOfRefs, 10), '+' if numberOfRefs > 10 else '', 's' if numberOfRefs > 1 else '')
cntNotDone += 1
except:
pass
text = ''
for section in content:
if section[0] != '':
text += u'== {} ==\n\n'.format(section[0])
text += section[1]+'\n\n'
if cntDone > 0 or force:
comment = 'Bot: marking {} requests as done ({} unactioned requests)'.format(cntDone, cntNotDone)
page.put(text, comment=comment, minorEdit=False)
statspage = pywikibot.Page(site, 'User:BeneBot*/RfD-stats')
statspage.put(cntNotDone, comment='Updating stats: '+str(cntNotDone), minorEdit=False)
| 42.769231
| 128
| 0.526619
|
0e5ed14801e703a45e564f269c28a1f8b34016dc
| 935
|
py
|
Python
|
scripts/rewards/rewards_utils.py
|
lookfirst/badger-system
|
8362bcbf00abf51d24e0ba67b1b30c301ca34e40
|
[
"MIT"
] | null | null | null |
scripts/rewards/rewards_utils.py
|
lookfirst/badger-system
|
8362bcbf00abf51d24e0ba67b1b30c301ca34e40
|
[
"MIT"
] | null | null | null |
scripts/rewards/rewards_utils.py
|
lookfirst/badger-system
|
8362bcbf00abf51d24e0ba67b1b30c301ca34e40
|
[
"MIT"
] | null | null | null |
import time
from brownie import *
from config.badger_config import badger_config
from rich.console import Console
from scripts.systems.badger_system import connect_badger
from config.rewards_config import rewards_config
from assistant.rewards.rewards_assistant import fetch_current_rewards_tree, run_action
console = Console()
def fetch_rewards_preconditions(badger):
print("Run at", int(time.time()))
# Fetch the appropriate file
currentRewards = fetch_current_rewards_tree(badger)
lastClaimEnd = int(currentRewards["endBlock"])
startBlock = lastClaimEnd + 1
# Claim at current block
endBlock = chain.height
# Sanity check: Ensure start block is not too far in the past
assert startBlock > endBlock - rewards_config.maxStartBlockAge
# Sanity check: Ensure start block is not too close to end block
print("Claim Section", startBlock, endBlock)
return (startBlock, endBlock)
| 30.16129
| 86
| 0.768984
|
4c7670376346521392937121b74e940ea309a90e
| 364
|
py
|
Python
|
tele_weather_bot/weather/configuration.py
|
herzog0/TeleWeatherBot
|
f6b351930704f50cce129275a9f904e339c7cbfd
|
[
"MIT"
] | null | null | null |
tele_weather_bot/weather/configuration.py
|
herzog0/TeleWeatherBot
|
f6b351930704f50cce129275a9f904e339c7cbfd
|
[
"MIT"
] | null | null | null |
tele_weather_bot/weather/configuration.py
|
herzog0/TeleWeatherBot
|
f6b351930704f50cce129275a9f904e339c7cbfd
|
[
"MIT"
] | null | null | null |
"""
Algumas pequenas mudanças na configuração
"""
from pyowm.caches.lrucache import LRUCache
from pyowm.weatherapi25.weathercoderegistry import WeatherCodeRegistry
from pyowm.weatherapi25.configuration25 import *
# uso de um cache LRU simples pra tentar reduzir os requests no OWM
cache = LRUCache(cache_max_size=50)
# tradução para português
language = 'pt'
| 22.75
| 70
| 0.807692
|
fd93a8bd1edc592a2d8dabb4163927723903d439
| 3,317
|
py
|
Python
|
agents/pathfinder.py
|
AnupNair08/Genetic-Algorithms
|
b24534beee99b841655388e7a74e582a02ff895b
|
[
"MIT"
] | null | null | null |
agents/pathfinder.py
|
AnupNair08/Genetic-Algorithms
|
b24534beee99b841655388e7a74e582a02ff895b
|
[
"MIT"
] | null | null | null |
agents/pathfinder.py
|
AnupNair08/Genetic-Algorithms
|
b24534beee99b841655388e7a74e582a02ff895b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
populationSize = int(input('Enter population size:\n'))
generations = 200
# Hyperparameters for mutation and crossover
CROSS_RATE = 0.7
MUTATE_RATE = 0.001
# The state space consisting of a 2D grid with start point end point and obstacle
class environment():
def __init__(self, start, end, obs):
self.start = start
self.end = end
self.obs = obs
def plotenv(self):
plt.scatter(*self.start)
plt.scatter(*self.end)
plt.plot([self.obs[0][0], self.obs[1][0]],
[self.obs[0][1], self.obs[1][1]])
plt.ion()
# plt.show()
# Normalise population and convert into [0,.25]
def normPop(population, start, region):
population = (population - 0.5) / 2
population[:, 0], population[:, region] = start[0], start[1]
x = np.cumsum(population[:, region:], axis=1)
y = np.cumsum(population[:, :region], axis=1)
return x, y
def fitness(x, y, goal, obs):
# Eucildean distance of all points on the lines followed by normalising in [0,1]
dist = ((x[:, -1] - goal[0])**2 + (y[:, -1] - goal[1])**2)**0.5
score = np.power(1/(1+dist), 2)
points = (x > obs[0][0] - 0.5) & (x < obs[1][0] + 0.5)
y_values = np.where(points, y, np.zeros_like(y) - 100)
# Avoiding obstacles and assigning low value to crossing population
bad_lines = ((y_values > obs[0][1]) & (y_values < obs[1][1])).max(axis=1)
score[bad_lines] = 1e-6
return score
# Randomly choose fit individuals from the population
def select(population, fitness):
ind = np.random.choice(np.arange(
populationSize), size=populationSize, replace=True, p=fitness/fitness.sum())
return population[ind]
# Crossover with a parent and a random fit individual
def crossover(parent, population, size):
if np.random.rand() < CROSS_RATE:
i = np.random.randint(0, populationSize, size=1)
cross_points = np.random.randint(0, 2, size).astype(np.bool)
parent[cross_points] = population[i, cross_points]
return parent
# Mutate the gene with a random bit
def mutate(child, size):
for i in range(size):
if np.random.rand() < MUTATE_RATE:
child[i] = np.random.randint(2)
return child
if __name__ == "__main__":
print('Path Finder simulation')
region = 100 # Region length to be convered by the lines
start = [0, 0]
goal = [1, 8]
obs = [[1.5, -2.5], [1.5, 5]]
# Initialize the population and the environment
env = environment(start, goal, obs)
population = np.random.randint(2, size=(populationSize, region*2))
# Fitness -> Selection -> crossover -> mutate
for i in range(generations):
x, y = normPop(population, [0, 0], region)
fitnessVal = fitness(x, y, goal, obs)
population = select(population, fitnessVal)
for individual in population:
child = crossover(individual, population, region*2)
child = mutate(child, region*2)
individual[:] = child
print("Generation : {} | Fitness : {}".format(i, np.argmax(fitnessVal)))
plt.cla()
env.plotenv()
plt.plot(x.T, y.T, c="r")
plt.xlim((-5, 10))
plt.ylim((-5, 10))
plt.pause(0.001)
plt.ioff()
plt.show()
| 30.712963
| 84
| 0.614712
|
36adc320365ff3ce80837f057d3eca4cb3e4ec82
| 1,120
|
py
|
Python
|
azure-mgmt-datalake-store/azure/mgmt/datalake/store/models/update_encryption_config.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-datalake-store/azure/mgmt/datalake/store/models/update_encryption_config.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-datalake-store/azure/mgmt/datalake/store/models/update_encryption_config.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UpdateEncryptionConfig(Model):
"""The encryption configuration used to update a user managed Key Vault key.
:param key_vault_meta_info: The updated Key Vault key to use in user
managed key rotation.
:type key_vault_meta_info:
~azure.mgmt.datalake.store.models.UpdateKeyVaultMetaInfo
"""
_attribute_map = {
'key_vault_meta_info': {'key': 'keyVaultMetaInfo', 'type': 'UpdateKeyVaultMetaInfo'},
}
def __init__(self, key_vault_meta_info=None):
super(UpdateEncryptionConfig, self).__init__()
self.key_vault_meta_info = key_vault_meta_info
| 36.129032
| 93
| 0.640179
|
53a15bc66c71182bc2a76a13d69af186bf3512ae
| 5,556
|
py
|
Python
|
jupyterlab_translate/api.py
|
goanpeca/jupyterlab-language
|
5db3231ce3f47fb7cd6a6f7ad5d49acffe5c0173
|
[
"BSD-3-Clause"
] | null | null | null |
jupyterlab_translate/api.py
|
goanpeca/jupyterlab-language
|
5db3231ce3f47fb7cd6a6f7ad5d49acffe5c0173
|
[
"BSD-3-Clause"
] | null | null | null |
jupyterlab_translate/api.py
|
goanpeca/jupyterlab-language
|
5db3231ce3f47fb7cd6a6f7ad5d49acffe5c0173
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""
API interface.
"""
import os
import shutil
from pathlib import Path
from typing import List
from typing import Union
from .constants import EXTENSIONS_FOLDER
from .constants import JUPYTERLAB
from .constants import LANG_PACKS_FOLDER
from .constants import LC_MESSAGES
from .constants import LOCALE_FOLDER
from .converters import convert_catalog_to_json
from .utils import check_locale
from .utils import compile_to_mo
from .utils import compile_translations
from .utils import create_new_language_pack
from .utils import extract_translations
from .utils import update_translations
def check_locales(locales: List[str]):
"""
Check if a given list of locale values is valid.
Raises an exception if an invalid locale value is found.
Args:
locales: List of locales
Raises:
ValueError: if the local is not valid.
"""
for locale in locales:
if not check_locale(locale):
raise ValueError(f"Invalid locale '{locale}'".format(locale=locale))
def normalize_project(project: str) -> str:
"""
Normalize a project name
Args:
project: project name
Returns:
The normalized project name
"""
return project.lower().replace("-", "_")
def extract_package(package_repo_dir, project, merge: bool = True):
"""
FIXME:
"""
project = normalize_project(project)
output_dir = package_repo_dir / project
if not os.path.isdir(output_dir):
raise Exception(
"Output dir `{output_dir}` not found!".format(output_dir=output_dir)
)
extract_translations(package_repo_dir, output_dir, project, merge)
def update_package(package_repo_dir, project, locales):
"""
FIXME:
"""
if locales:
check_locales(locales)
project = normalize_project(project)
output_dir = package_repo_dir / project
if not os.path.isdir(output_dir):
raise Exception(
"Output dir `{output_dir}` not found!".format(output_dir=output_dir)
)
update_translations(package_repo_dir, output_dir, project, locales)
def compile_package(package_repo_dir, project, locales):
"""
FIXME
"""
if locales:
check_locales(locales)
project = normalize_project(project)
output_dir = package_repo_dir / project
po_paths = compile_translations(output_dir, project, locales)
for _, po_path in po_paths.items():
output_path = po_path.parent
convert_catalog_to_json(po_path, output_path, project)
compile_to_mo(po_path)
def extract_language_pack(
package_repo_dir, language_packs_repo_dir, project, merge: bool = True
) -> None:
"""
Args:
package_repo_dir: Package folder to extract translation from
language_packs_repo_dir: Directory for POT files
project: project name
merge: Merge with existing POT file
"""
project = normalize_project(project)
if project == JUPYTERLAB:
output_dir = os.path.join(language_packs_repo_dir, project)
else:
output_dir = os.path.join(language_packs_repo_dir, EXTENSIONS_FOLDER, project)
os.makedirs(output_dir, exist_ok=True)
extract_translations(package_repo_dir, output_dir, project, merge)
def update_language_pack(package_repo_dir, language_packs_repo_dir, project, locales):
"""
FIXME
"""
if locales:
check_locales(locales)
project = normalize_project(project)
if project == JUPYTERLAB:
output_dir = os.path.join(language_packs_repo_dir, project)
else:
output_dir = os.path.join(
language_packs_repo_dir, "jupyterlab_extensions", project
)
os.makedirs(output_dir, exist_ok=True)
update_translations(package_repo_dir, output_dir, project, locales)
def compile_language_pack(
language_packs_repo_dir: Union[Path, str], project: str, locales: List[str]
) -> None:
"""
FIXME:
"""
language_packs_repo_dir = Path(language_packs_repo_dir)
if locales:
check_locales(locales)
project = normalize_project(project)
if project == JUPYTERLAB:
output_dir = language_packs_repo_dir / project
else:
output_dir = language_packs_repo_dir / EXTENSIONS_FOLDER / project
po_paths = compile_translations(output_dir, project, locales)
for locale, po_path in po_paths.items():
output_path = po_path.parent
json_path = convert_catalog_to_json(po_path, output_path, project)
mo_path = compile_to_mo(po_path)
# Move to language pack folder
language_packs_dir = language_packs_repo_dir / LANG_PACKS_FOLDER
pkg_name = f"jupyterlab-language-pack-{locale}".replace("_", "-")
locale_language_pack_dir = (
language_packs_dir / pkg_name / pkg_name.replace("-", "_")
)
# Check if it exists, otherwise create it
if not locale_language_pack_dir.is_dir():
create_new_language_pack(language_packs_dir, locale)
output_dir = (
locale_language_pack_dir
/ LOCALE_FOLDER
/ locale.replace("-", "_")
/ LC_MESSAGES
)
target_mo = output_dir / mo_path.name
if target_mo.exists():
target_mo.unlink()
mo_path.rename(target_mo)
target_json = output_dir / json_path.name
if target_json.exists():
target_json.unlink()
json_path.rename(target_json)
| 28.346939
| 86
| 0.685565
|
c76bd6f2d3f1b43b74a64ba82121f547732bd895
| 6,574
|
py
|
Python
|
qiling/examples/doogie_8086_crack.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:01.000Z
|
2021-06-04T14:27:15.000Z
|
qiling/examples/doogie_8086_crack.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | null | null | null |
qiling/examples/doogie_8086_crack.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:09.000Z
|
2021-06-04T14:27:21.000Z
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import sys, curses, math, struct, string, time
sys.path.append("..")
from qiling import *
from qiling.const import *
from qiling.os.disk import QlDisk
from qiling.os.dos.utils import BIN2BCD
from struct import pack
# https://stackoverflow.com/questions/9829578/fast-way-of-counting-non-zero-bits-in-positive-integer
def CountBits(n):
n = (n & 0x5555555555555555) + ((n & 0xAAAAAAAAAAAAAAAA) >> 1)
n = (n & 0x3333333333333333) + ((n & 0xCCCCCCCCCCCCCCCC) >> 2)
n = (n & 0x0F0F0F0F0F0F0F0F) + ((n & 0xF0F0F0F0F0F0F0F0) >> 4)
n = (n & 0x00FF00FF00FF00FF) + ((n & 0xFF00FF00FF00FF00) >> 8)
n = (n & 0x0000FFFF0000FFFF) + ((n & 0xFFFF0000FFFF0000) >> 16)
n = (n & 0x00000000FFFFFFFF) + ((n & 0xFFFFFFFF00000000) >> 32) # This last & isn't strictly necessary.
return n
def ham(lhs: int, rhs: int):
return CountBits(lhs^rhs)
def calavghd(bs: bytes, sz: int):
groups = len(bs) // sz
hdsum = 0
seqs = [ bs[i*sz:(i+1)*sz] for i in range(groups)]
for i in range(groups-1):
seq1 = seqs[i]
seq2 = seqs[(i+1)%groups]
lc = 0
for j in range(sz):
lc += ham(seq1[j], seq2[j])
hdsum += ham(seq1[j], seq2[j])
return hdsum / groups, hdsum / groups / sz
def calavghdall(bs: bytes, maxsz: int):
r = []
for i in range(1, maxsz):
r.append((i, *calavghd(bs, i)))
r.sort(key=lambda x: x[2])
return r
# Implmentation for https://trustedsignal.blogspot.com/2015/06/xord-play-normalized-hamming-distance.html
def guess_key_size(orig: bytes, maxsz=20):
avghd = calavghdall(orig, maxsz)
gcd12 = math.gcd(avghd[0][0], avghd[1][0])
gcd13 = math.gcd(avghd[0][0], avghd[2][0])
gcd23 = math.gcd(avghd[1][0], avghd[2][0])
if gcd12 != 1:
if gcd12 == gcd13 and gcd12 == gcd23:
if gcd12 in [t[0] for t in avghd[:5]]:
if gcd12 == avghd[0][0] or gcd12 == avghd[0][1]:
return gcd12
return avghd[0][0]
def is_all_printable(bs: bytes):
for b in bs:
if chr(b) not in string.printable:
return False
return True
def countchar(bs: bytes):
d = {}
for ch in bs:
if ch not in d:
d[ch] = 0
d[ch] += 1
r = [(chr(k), v) for k, v in d.items()]
r.sort(key=lambda x: x[1], reverse=True)
return r
def cal_count_for_seqs(seqs: dict):
seqs_keys={}
for seq in seqs:
seqs_keys[seq] = {}
for ch in range(0x20, 0x7E+1):
xored = bytes([b^ch for b in seq])
if not is_all_printable(xored):
continue
count = countchar(xored)
seqs_keys[seq][ch] = count
return seqs_keys
def search_possible_key(seqs: dict, seqs_keys: dict, max_occur=3):
keys = set()
cached = {}
def _impl(seq_idx: bytes, repeated: int, key: str):
if seq_idx == len(seqs):
keys.add(key)
return
if repeated not in cached[seq_idx]:
return
for ch in cached[seq_idx][repeated]:
_impl(seq_idx + 1, repeated, key + bytes([ch]))
return
for idx, seq in enumerate(seqs):
cached[idx] = {}
for ch, count in seqs_keys[seq].items():
for tp in count[:max_occur]:
if ord(tp[0]) not in cached[idx]:
cached[idx][ord(tp[0])] = []
cached[idx][ord(tp[0])].append(ch)
for i in range(0x20, 0x7E+1):
_impl(0, i, b"")
return keys
def echo_key(ql: Qiling, key):
# Note: In most cases, users are not supposed to use `ql.os.stdscr`
# directly. The hack here is to show the corresponding key.
stdscr = ql.os.stdscr
y, _ = stdscr.getmaxyx()
stdscr.addstr(y-2, 0, f"Current key: {key}")
stdscr.refresh()
def show_once(ql: Qiling, key):
klen = len(key)
ql.reg.ax = klen
ql.mem.write(0x87F4, key)
# Partial exectution to skip input reading
ql.run(begin=0x801B, end=0x803d)
echo_key(ql, key)
time.sleep(1)
# In this stage, we show every key.
def third_stage(keys):
# To setup terminal again, we have to restart the whole program.
ql = Qiling(["rootfs/8086/doogie/doogie.DOS_MBR"],
"rootfs/8086",
console=False)
ql.add_fs_mapper(0x80, QlDisk("rootfs/8086/doogie/doogie.DOS_MBR", 0x80))
ql.set_api((0x1a, 4), set_required_datetime, QL_INTERCEPT.EXIT)
hk = ql.hook_code(stop, begin=0x8018, end=0x8018)
ql.run()
ql.hook_del(hk)
# Snapshot API.
ctx = ql.save()
for key in keys:
show_once(ql, key)
ql.restore(ctx)
# In this stage, we crack the encrypted buffer.
def second_stage(ql: Qiling):
data = bytes(read_until_zero(ql, 0x8809))
key_size = guess_key_size(data) # Should be 17
seqs = []
for i in range(key_size):
seq = b""
j = i
while j < len(data):
seq += bytes([data[j]])
j += key_size
seqs.append(seq)
seqs_keys = cal_count_for_seqs(seqs)
keys = search_possible_key(seqs, seqs_keys)
return keys
def read_until_zero(ql: Qiling, addr):
buf = b""
ch = -1
while ch != 0:
ch = ql.mem.read(addr, 1)[0]
buf += pack("B", ch)
addr += 1
return buf
def set_required_datetime(ql: Qiling):
ql.log.info("Setting Feburary 06, 1990")
ql.reg.ch = BIN2BCD(19)
ql.reg.cl = BIN2BCD(1990%100)
ql.reg.dh = BIN2BCD(2)
ql.reg.dl = BIN2BCD(6)
def stop(ql, addr, data):
ql.emu_stop()
# In this stage, we get the encrypted data which xored with the specific date.
def first_stage():
ql = Qiling(["rootfs/8086/doogie/doogie.DOS_MBR"],
"rootfs/8086",
console=False)
ql.add_fs_mapper(0x80, QlDisk("rootfs/8086/doogie/doogie.DOS_MBR", 0x80))
# Doogie suggests that the datetime should be 1990-02-06.
ql.set_api((0x1a, 4), set_required_datetime, QL_INTERCEPT.EXIT)
# A workaround to stop the program.
hk = ql.hook_code(stop, begin=0x8018, end=0x8018)
ql.run()
ql.hook_del(hk)
return ql
if __name__ == "__main__":
ql = first_stage()
# resume terminal
curses.endwin()
keys = second_stage(ql)
for key in keys:
print(f"Possible key: {key}")
# The key of this challenge is not unique. The real
# result depends on the last ascii art.
print("Going to try every key.")
time.sleep(3)
third_stage(keys)
# resume terminal
curses.endwin()
| 31.304762
| 107
| 0.596897
|
fea0ab12a344493c6c8f48a36d83ad166b39ad18
| 856
|
py
|
Python
|
tests/__init__.py
|
JhonJYJ/Marvel
|
0f367a0ba00d76090cb6d766b61b0c9c364ff501
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
JhonJYJ/Marvel
|
0f367a0ba00d76090cb6d766b61b0c9c364ff501
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
JhonJYJ/Marvel
|
0f367a0ba00d76090cb6d766b61b0c9c364ff501
|
[
"MIT"
] | null | null | null |
from doctest import DocTestSuite
from unittest import TestSuite
from unittest import TextTestRunner
import arcade
def load_tests(loader=None, tests=None, pattern=None):
suite = TestSuite()
suite.addTests(DocTestSuite('arcade.draw_commands'))
suite.addTests(DocTestSuite('arcade.buffered_draw_commands'))
suite.addTests(DocTestSuite('arcade.window_commands'))
suite.addTests(DocTestSuite('arcade.geometry'))
suite.addTests(DocTestSuite('arcade.sprite'))
suite.addTests(DocTestSuite('arcade.sprite_list'))
suite.addTests(DocTestSuite('arcade.application'))
suite.addTests(DocTestSuite('arcade.sound'))
suite.addTests(DocTestSuite('arcade.physics_engines'))
suite.addTests(DocTestSuite('arcade.decorator_support'))
return suite
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(load_tests())
| 32.923077
| 65
| 0.766355
|
ca34f008acf0929efc271561b48547022559811e
| 391
|
py
|
Python
|
AB_site/wsgi.py
|
john-fitz/Address_Book
|
405f21adee592d6fbe6096fbc20403a561fc5be4
|
[
"Unlicense"
] | null | null | null |
AB_site/wsgi.py
|
john-fitz/Address_Book
|
405f21adee592d6fbe6096fbc20403a561fc5be4
|
[
"Unlicense"
] | null | null | null |
AB_site/wsgi.py
|
john-fitz/Address_Book
|
405f21adee592d6fbe6096fbc20403a561fc5be4
|
[
"Unlicense"
] | null | null | null |
"""
WSGI config for AB_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AB_site.settings')
application = get_wsgi_application()
| 23
| 78
| 0.785166
|
d4513950fe59aa77e90faa1a93fa6a84219f5b19
| 1,263
|
py
|
Python
|
test_project/frac/app/models.py
|
jmichalicek/django-fractions
|
693be84e14ff7ebdf2165692e4a98f2723e9070f
|
[
"BSD-3-Clause"
] | 4
|
2016-02-25T05:20:23.000Z
|
2019-06-25T00:34:40.000Z
|
test_project/frac/app/models.py
|
jmichalicek/django-fractions
|
693be84e14ff7ebdf2165692e4a98f2723e9070f
|
[
"BSD-3-Clause"
] | 7
|
2016-12-28T10:06:30.000Z
|
2021-12-02T00:44:06.000Z
|
test_project/frac/app/models.py
|
jmichalicek/django-fractions
|
693be84e14ff7ebdf2165692e4a98f2723e9070f
|
[
"BSD-3-Clause"
] | 3
|
2020-03-12T14:17:59.000Z
|
2021-11-24T20:31:09.000Z
|
from django.db import models
from djfractions.models import DecimalFractionField
class TestModel(models.Model):
"""
A test model to use for testing custom fields. This technique is based on core django code
such as at https://github.com/django/django/blob/stable/1.8.x/tests/field_subclassing/models.py
and https://github.com/django/django/blob/stable/1.8.x/tests/field_subclassing/tests.py
although for now this model is just right in the test
"""
defaults = DecimalFractionField(max_digits=10, decimal_places=5)
denominator_limited_to_ten = DecimalFractionField(
limit_denominator=10, decimal_places=10, max_digits=15, null=True, default=None,
)
coerce_thirds_true = DecimalFractionField(
coerce_thirds=True, decimal_places=10, max_digits=15, null=True, default=None,
)
# DecimalFractionField Django's DecimalField in many aspects
# and that does some fiddling which when doing proper float division
# will result in exceptions being raised if a fraction such as 1/3
# is saved to the database. Setting a max decimal_places fixes this.
decimal_places_limited = DecimalFractionField(
decimal_places=10, max_digits=15, coerce_thirds=False, null=True, default=None,
)
| 46.777778
| 99
| 0.752177
|
b61e2319643def1f582c3876da23d17c610b24ef
| 3,096
|
py
|
Python
|
test/functional/mining_getblocktemplate_longpoll.py
|
MoneyTreeXE/MoneyTreeXE
|
86966233c9a2172c5400c9ff17326c82b6e7c8f1
|
[
"MIT"
] | null | null | null |
test/functional/mining_getblocktemplate_longpoll.py
|
MoneyTreeXE/MoneyTreeXE
|
86966233c9a2172c5400c9ff17326c82b6e7c8f1
|
[
"MIT"
] | null | null | null |
test/functional/mining_getblocktemplate_longpoll.py
|
MoneyTreeXE/MoneyTreeXE
|
86966233c9a2172c5400c9ff17326c82b6e7c8f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from test_framework.test_framework import MoneyTreeXETestFramework
from test_framework.util import *
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(MoneyTreeXETestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| 43
| 112
| 0.68314
|
71ef05aab284f800cb717d997fc31df218f296a8
| 1,124
|
py
|
Python
|
meraki/http/http_call_back.py
|
bossypants22/python-sdk-test
|
37701d62dc18c2abb910eb790ab978913adcaf7b
|
[
"MIT"
] | 37
|
2019-04-24T14:01:33.000Z
|
2022-01-28T01:37:21.000Z
|
meraki/http/http_call_back.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 10
|
2019-07-09T16:35:11.000Z
|
2021-12-07T03:47:53.000Z
|
meraki/http/http_call_back.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 17
|
2019-04-30T23:53:21.000Z
|
2022-02-07T22:57:44.000Z
|
# -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class HttpCallBack(object):
"""An interface for the callback to be called before and after the
HTTP call for an endpoint is made.
This class should not be instantiated but should be used as a base class
for HttpCallBack classes.
"""
def on_before_request(self,
request):
"""The controller will call this method before making the HttpRequest.
Args:
request (HttpRequest): The request object which will be sent
to the HttpClient to be executed.
"""
raise NotImplementedError("This method has not been implemented.")
def on_after_response(self,
context):
"""The controller will call this method after making the HttpRequest.
Args:
context (HttpContext): The HttpContext of the API call.
"""
raise NotImplementedError("This method has not been implemented.")
| 30.378378
| 95
| 0.606762
|
f073cf94c27abd32d986f8b86eb1de04f89898e7
| 2,282
|
py
|
Python
|
PyQt5/Widget/ClipboardDemo.py
|
zhaokai0402/PyQt5-Study
|
e6280fdc615e47c947c0902836350db49441e6de
|
[
"MIT"
] | null | null | null |
PyQt5/Widget/ClipboardDemo.py
|
zhaokai0402/PyQt5-Study
|
e6280fdc615e47c947c0902836350db49441e6de
|
[
"MIT"
] | null | null | null |
PyQt5/Widget/ClipboardDemo.py
|
zhaokai0402/PyQt5-Study
|
e6280fdc615e47c947c0902836350db49441e6de
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys, os, math
#pylint: disable=E0602
sys.path.append(os.getcwd())
from importQt import *
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
textCopyButton = QPushButton("&Copy Text")
textPasteButton = QPushButton("Paste &Text")
htmlCopyButton = QPushButton("C&opy HTML")
htmlPasteButton = QPushButton("Paste &HTML")
imageCopyButton = QPushButton("Co&py Image")
imagePasteButton = QPushButton("Paste &Image")
self.textLabel = QLabel("Original text")
self.imageLabel = QLabel()
self.imageLabel.setPixmap(QPixmap(os.path.join(os.path.dirname( __file__), "../images/python.png")))
layout = QGridLayout()
layout.addWidget(textCopyButton, 0, 0)
layout.addWidget(imageCopyButton, 0, 1)
layout.addWidget(htmlCopyButton, 0, 2)
layout.addWidget(textPasteButton, 1, 0)
layout.addWidget(imagePasteButton, 1, 1)
layout.addWidget(htmlPasteButton, 1, 2)
layout.addWidget(self.textLabel, 2, 0, 1, 2)
layout.addWidget(self.imageLabel, 2, 2)
self.setLayout(layout)
textCopyButton.clicked.connect(self.copyText)
textPasteButton.clicked.connect(self.pasteText)
htmlCopyButton.clicked.connect(self.copyHtml)
htmlPasteButton.clicked.connect(self.pasteHtml)
imageCopyButton.clicked.connect(self.copyImage)
imagePasteButton.clicked.connect(self.pasteImage)
self.setWindowTitle("Clipboard 例子")
def copyText(self):
clipboard = QApplication.clipboard()
clipboard.setText("I've been clipped!")
def pasteText(self):
clipboard = QApplication.clipboard()
self.textLabel.setText(clipboard.text())
def copyImage(self):
clipboard = QApplication.clipboard()
clipboard.setPixmap(self.imageLabel.pixmap())
def pasteImage(self):
clipboard = QApplication.clipboard()
self.imageLabel.setPixmap(clipboard.pixmap())
def copyHtml(self):
mimeData = QMimeData()
mimeData.setHtml("<b>Bold and <font color=red>Red</font></b>")
clipboard = QApplication.clipboard()
clipboard.setMimeData(mimeData)
def pasteHtml(self):
clipboard = QApplication.clipboard()
mimeData = clipboard.mimeData()
if mimeData.hasHtml():
self.textLabel.setText(mimeData.html())
if __name__ == "__main__":
app = QApplication(sys.argv)
form = Form()
form.show()
sys.exit(app.exec_())
| 32.140845
| 102
| 0.745399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.