hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4e7d0285d7f4e01703e7b30700e97bc01d069afe
| 73
|
py
|
Python
|
tests/validators/__init__.py
|
hiroaki-yamamoto/WTF-OTP
|
e7c87839cbe8683dd1f691912156ac348dc98e67
|
[
"Unlicense",
"MIT"
] | 1
|
2019-06-27T06:47:25.000Z
|
2019-06-27T06:47:25.000Z
|
tests/validators/__init__.py
|
hiroaki-yamamoto/WTF-OTP
|
e7c87839cbe8683dd1f691912156ac348dc98e67
|
[
"Unlicense",
"MIT"
] | 2
|
2021-08-25T05:11:22.000Z
|
2022-01-29T02:51:57.000Z
|
tests/validators/__init__.py
|
hiroaki-yamamoto/WTF-OTP
|
e7c87839cbe8683dd1f691912156ac348dc98e67
|
[
"Unlicense",
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""OTP Number Validation tests."""
| 14.6
| 34
| 0.671233
|
4a5bb5d897f3509175df9a2a64e86dfc17e82843
| 1,028
|
py
|
Python
|
new_api/urls.py
|
AlessandroSFreitas/rest_api
|
d6564e8b899c82312ef5d9830f7e81eb2476e0ad
|
[
"MIT"
] | null | null | null |
new_api/urls.py
|
AlessandroSFreitas/rest_api
|
d6564e8b899c82312ef5d9830f7e81eb2476e0ad
|
[
"MIT"
] | null | null | null |
new_api/urls.py
|
AlessandroSFreitas/rest_api
|
d6564e8b899c82312ef5d9830f7e81eb2476e0ad
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from apps.person.api.views import PersonViewSet
from apps.family.api.views import FamilyViewSet
urlpatterns = [
path('', include('rest_framework.urls')),
path('admin/', admin.site.urls),
path('person/', PersonViewSet.as_view({"get": "list", "post": "create"}), name="person-add"),
path('person/<int:person_id>/', PersonViewSet.as_view(
{
"get": "retrieve",
"put": "update",
"patch": "partial_update",
"delete": "destroy"
}
),
name="person-detail"
),
path('family/', FamilyViewSet.as_view({"get": "list", "post": "create"}), name="family-add"),
path('family/<int:family_id>/', FamilyViewSet.as_view(
{
"get": "retrieve",
"put": "update",
"patch": "partial_update",
"delete": "destroy"
}
),
name="family-detail"
),
]
| 31.151515
| 97
| 0.52821
|
df213235e601902428c2c6c2fb0856beebdb1318
| 19,495
|
py
|
Python
|
airflow/models/dagrun.py
|
mebelousov/airflow
|
d99833c9b5be9eafc0c7851343ee86b6c20aed40
|
[
"Apache-2.0"
] | 3
|
2019-12-11T15:54:13.000Z
|
2021-05-24T20:21:08.000Z
|
airflow/models/dagrun.py
|
mebelousov/airflow
|
d99833c9b5be9eafc0c7851343ee86b6c20aed40
|
[
"Apache-2.0"
] | 8
|
2021-02-08T20:40:47.000Z
|
2022-03-29T22:27:53.000Z
|
airflow/models/dagrun.py
|
mebelousov/airflow
|
d99833c9b5be9eafc0c7851343ee86b6c20aed40
|
[
"Apache-2.0"
] | 2
|
2021-01-11T13:53:03.000Z
|
2021-10-02T05:06:34.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import List, Optional, Tuple, Union, cast
from sqlalchemy import (
Boolean, Column, DateTime, Index, Integer, PickleType, String, UniqueConstraint, and_, func, or_,
)
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import synonym
from sqlalchemy.orm.session import Session
from airflow.exceptions import AirflowException
from airflow.models.base import ID_LEN, Base
from airflow.models.taskinstance import TaskInstance as TI
from airflow.stats import Stats
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies import SCHEDULEABLE_STATES
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils.state import State
from airflow.utils.types import DagRunType
class DagRun(Base, LoggingMixin):
"""
DagRun describes an instance of a Dag. It can be created
by the scheduler (for regular runs) or by an external trigger
"""
__tablename__ = "dag_run"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN))
execution_date = Column(UtcDateTime, default=timezone.utcnow)
start_date = Column(UtcDateTime, default=timezone.utcnow)
end_date = Column(UtcDateTime)
_state = Column('state', String(50), default=State.RUNNING)
run_id = Column(String(ID_LEN))
external_trigger = Column(Boolean, default=True)
conf = Column(PickleType)
dag = None
__table_args__ = (
Index('dag_id_state', dag_id, _state),
UniqueConstraint('dag_id', 'execution_date'),
UniqueConstraint('dag_id', 'run_id'),
)
def __init__(self, dag_id=None, run_id=None, execution_date=None, start_date=None, external_trigger=None,
conf=None, state=None):
self.dag_id = dag_id
self.run_id = run_id
self.execution_date = execution_date
self.start_date = start_date
self.external_trigger = external_trigger
self.conf = conf
self.state = state
super().__init__()
def __repr__(self):
return (
'<DagRun {dag_id} @ {execution_date}: {run_id}, '
'externally triggered: {external_trigger}>'
).format(
dag_id=self.dag_id,
execution_date=self.execution_date,
run_id=self.run_id,
external_trigger=self.external_trigger)
def get_state(self):
return self._state
def set_state(self, state):
if self._state != state:
self._state = state
self.end_date = timezone.utcnow() if self._state in State.finished() else None
@declared_attr
def state(self):
return synonym('_state', descriptor=property(self.get_state, self.set_state))
@provide_session
def refresh_from_db(self, session=None):
"""
Reloads the current dagrun from the database
:param session: database session
"""
DR = DagRun
exec_date = func.cast(self.execution_date, DateTime)
dr = session.query(DR).filter(
DR.dag_id == self.dag_id,
func.cast(DR.execution_date, DateTime) == exec_date,
DR.run_id == self.run_id
).one()
self.id = dr.id
self.state = dr.state
@staticmethod
@provide_session
def find(
dag_id: Optional[Union[str, List[str]]] = None,
run_id: Optional[str] = None,
execution_date: Optional[datetime] = None,
state: Optional[str] = None,
external_trigger: Optional[bool] = None,
no_backfills: Optional[bool] = False,
session: Session = None,
execution_start_date=None, execution_end_date=None
):
"""
Returns a set of dag runs for the given search criteria.
:param dag_id: the dag_id or list of dag_id to find dag runs for
:type dag_id: str or list[str]
:param run_id: defines the run id for this dag run
:type run_id: str
:param execution_date: the execution date
:type execution_date: datetime.datetime or list[datetime.datetime]
:param state: the state of the dag run
:type state: str
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param no_backfills: return no backfills (True), return all (False).
Defaults to False
:type no_backfills: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param execution_start_date: dag run that was executed from this date
:type execution_start_date: datetime.datetime
:param execution_end_date: dag run that was executed until this date
:type execution_end_date: datetime.datetime
"""
DR = DagRun
qry = session.query(DR)
dag_ids = [dag_id] if isinstance(dag_id, str) else dag_id
if dag_ids:
qry = qry.filter(DR.dag_id.in_(dag_ids))
if run_id:
qry = qry.filter(DR.run_id == run_id)
if execution_date:
if isinstance(execution_date, list):
qry = qry.filter(DR.execution_date.in_(execution_date))
else:
qry = qry.filter(DR.execution_date == execution_date)
if execution_start_date and execution_end_date:
qry = qry.filter(DR.execution_date.between(execution_start_date, execution_end_date))
elif execution_start_date:
qry = qry.filter(DR.execution_date >= execution_start_date)
elif execution_end_date:
qry = qry.filter(DR.execution_date <= execution_end_date)
if state:
qry = qry.filter(DR.state == state)
if external_trigger is not None:
qry = qry.filter(DR.external_trigger == external_trigger)
if no_backfills:
# in order to prevent a circular dependency
qry = qry.filter(DR.run_id.notlike(f"{DagRunType.BACKFILL_JOB.value}__%"))
dr = qry.order_by(DR.execution_date).all()
return dr
@provide_session
def get_task_instances(self, state=None, session=None):
"""
Returns the task instances for this dag run
"""
tis = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
)
if state:
if isinstance(state, str):
tis = tis.filter(TI.state == state)
else:
# this is required to deal with NULL values
if None in state:
if all(x is None for x in state):
tis = tis.filter(TI.state.is_(None))
else:
not_none_state = [s for s in state if s]
tis = tis.filter(
or_(TI.state.in_(not_none_state),
TI.state.is_(None))
)
else:
tis = tis.filter(TI.state.in_(state))
if self.dag and self.dag.partial:
tis = tis.filter(TI.task_id.in_(self.dag.task_ids))
return tis.all()
@provide_session
def get_task_instance(self, task_id, session=None):
"""
Returns the task instance specified by task_id for this dag run
:param task_id: the task id
"""
ti = session.query(TI).filter(
TI.dag_id == self.dag_id,
TI.execution_date == self.execution_date,
TI.task_id == task_id
).first()
return ti
def get_dag(self):
"""
Returns the Dag associated with this DagRun.
:return: DAG
"""
if not self.dag:
raise AirflowException("The DAG (.dag) for {} needs to be set"
.format(self))
return self.dag
@provide_session
def get_previous_dagrun(self, state: Optional[str] = None, session: Session = None) -> Optional['DagRun']:
"""The previous DagRun, if there is one"""
session = cast(Session, session) # mypy
filters = [
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date,
]
if state is not None:
filters.append(DagRun.state == state)
return session.query(DagRun).filter(
*filters
).order_by(
DagRun.execution_date.desc()
).first()
@provide_session
def get_previous_scheduled_dagrun(self, session=None):
"""The previous, SCHEDULED DagRun, if there is one"""
dag = self.get_dag()
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == dag.previous_schedule(self.execution_date)
).first()
@provide_session
def update_state(self, session=None):
"""
Determines the overall state of the DagRun based on the state
of its TaskInstances.
:return: ready_tis: the tis that can be scheduled in the current loop
:rtype ready_tis: list[airflow.models.TaskInstance]
"""
dag = self.get_dag()
ready_tis = []
tis = [ti for ti in self.get_task_instances(session=session,
state=State.task_states + (State.SHUTDOWN,))]
self.log.debug("number of tis tasks for %s: %s task(s)", self, len(tis))
for ti in tis:
ti.task = dag.get_task(ti.task_id)
start_dttm = timezone.utcnow()
unfinished_tasks = [t for t in tis if t.state in State.unfinished()]
finished_tasks = [t for t in tis if t.state in State.finished() + [State.UPSTREAM_FAILED]]
none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks)
none_task_concurrency = all(t.task.task_concurrency is None
for t in unfinished_tasks)
if unfinished_tasks:
scheduleable_tasks = [ut for ut in unfinished_tasks if ut.state in SCHEDULEABLE_STATES]
if none_depends_on_past and none_task_concurrency:
# small speed up
self.log.debug(
"number of scheduleable tasks for %s: %s task(s)",
self, len(scheduleable_tasks))
ready_tis, changed_tis = self._get_ready_tis(scheduleable_tasks, finished_tasks, session)
self.log.debug("ready tis length for %s: %s task(s)", self, len(ready_tis))
are_runnable_tasks = ready_tis or self._are_premature_tis(
unfinished_tasks, finished_tasks, session) or changed_tis
else:
# slow path
for ti in scheduleable_tasks:
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session
):
self.log.debug('Queuing task: %s', ti)
ready_tis.append(ti)
duration = (timezone.utcnow() - start_dttm)
Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration)
leaf_tis = [ti for ti in tis if ti.task_id in {t.task_id for t in dag.leaves}]
# if all roots finished and at least one failed, the run failed
if not unfinished_tasks and any(
leaf_ti.state in {State.FAILED, State.UPSTREAM_FAILED} for leaf_ti in leaf_tis
):
self.log.info('Marking run %s failed', self)
self.set_state(State.FAILED)
dag.handle_callback(self, success=False, reason='task_failure',
session=session)
# if all leafs succeeded and no unfinished tasks, the run succeeded
elif not unfinished_tasks and all(
leaf_ti.state in {State.SUCCESS, State.SKIPPED} for leaf_ti in leaf_tis
):
self.log.info('Marking run %s successful', self)
self.set_state(State.SUCCESS)
dag.handle_callback(self, success=True, reason='success', session=session)
# if *all tasks* are deadlocked, the run failed
elif (unfinished_tasks and none_depends_on_past and
none_task_concurrency and not are_runnable_tasks):
self.log.info('Deadlock; marking run %s failed', self)
self.set_state(State.FAILED)
dag.handle_callback(self, success=False, reason='all_tasks_deadlocked',
session=session)
# finally, if the roots aren't done, the dag is still running
else:
self.set_state(State.RUNNING)
self._emit_duration_stats_for_finished_state()
# todo: determine we want to use with_for_update to make sure to lock the run
session.merge(self)
session.commit()
return ready_tis
def _get_ready_tis(
self,
scheduleable_tasks: List[TI],
finished_tasks: List[TI],
session: Session,
) -> Tuple[List[TI], bool]:
old_states = {}
ready_tis: List[TI] = []
changed_tis = False
if not scheduleable_tasks:
return ready_tis, changed_tis
# Check dependencies
for st in scheduleable_tasks:
old_state = st.state
if st.are_dependencies_met(
dep_context=DepContext(
flag_upstream_failed=True,
finished_tasks=finished_tasks),
session=session):
ready_tis.append(st)
else:
old_states[st.key] = old_state
# Check if any ti changed state
tis_filter = TI.filter_for_tis(old_states.keys())
if tis_filter is not None:
fresh_tis = session.query(TI).filter(tis_filter).all()
changed_tis = any(ti.state != old_states[ti.key] for ti in fresh_tis)
return ready_tis, changed_tis
def _are_premature_tis(
self,
unfinished_tasks: List[TI],
finished_tasks: List[TI],
session: Session,
) -> bool:
# there might be runnable tasks that are up for retry and for some reason(retry delay, etc) are
# not ready yet so we set the flags to count them in
for ut in unfinished_tasks:
if ut.are_dependencies_met(
dep_context=DepContext(
flag_upstream_failed=True,
ignore_in_retry_period=True,
ignore_in_reschedule_period=True,
finished_tasks=finished_tasks),
session=session):
return True
return False
def _emit_duration_stats_for_finished_state(self):
if self.state == State.RUNNING:
return
duration = (self.end_date - self.start_date)
if self.state is State.SUCCESS:
Stats.timing('dagrun.duration.success.{}'.format(self.dag_id), duration)
elif self.state == State.FAILED:
Stats.timing('dagrun.duration.failed.{}'.format(self.dag_id), duration)
@provide_session
def verify_integrity(self, session=None):
"""
Verifies the DagRun by checking for removed tasks or tasks that are not in the
database yet. It will set state to removed or add the task if required.
"""
dag = self.get_dag()
tis = self.get_task_instances(session=session)
# check for removed or restored tasks
task_ids = []
for ti in tis:
task_ids.append(ti.task_id)
task = None
try:
task = dag.get_task(ti.task_id)
except AirflowException:
if ti.state == State.REMOVED:
pass # ti has already been removed, just ignore it
elif self.state is not State.RUNNING and not dag.partial:
self.log.warning("Failed to get task '{}' for dag '{}'. "
"Marking it as removed.".format(ti, dag))
Stats.incr(
"task_removed_from_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.REMOVED
should_restore_task = (task is not None) and ti.state == State.REMOVED
if should_restore_task:
self.log.info("Restoring task '{}' which was previously "
"removed from DAG '{}'".format(ti, dag))
Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1)
ti.state = State.NONE
# check for missing tasks
for task in dag.task_dict.values():
if task.start_date > self.execution_date and not self.is_backfill:
continue
if task.task_id not in task_ids:
Stats.incr(
"task_instance_created-{}".format(task.__class__.__name__),
1, 1)
ti = TI(task, self.execution_date)
session.add(ti)
session.commit()
@staticmethod
def get_run(session, dag_id, execution_date):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:param execution_date: execution date
:type execution_date: datetime
:return: DagRun corresponding to the given dag_id and execution date
if one exists. None otherwise.
:rtype: airflow.models.DagRun
"""
qry = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.external_trigger == False, # noqa pylint: disable=singleton-comparison
DagRun.execution_date == execution_date,
)
return qry.first()
@property
def is_backfill(self):
return (
self.run_id is not None and
self.run_id.startswith(f"{DagRunType.BACKFILL_JOB.value}")
)
@classmethod
@provide_session
def get_latest_runs(cls, session):
"""Returns the latest DagRun for each DAG. """
subquery = (
session
.query(
cls.dag_id,
func.max(cls.execution_date).label('execution_date'))
.group_by(cls.dag_id)
.subquery()
)
dagruns = (
session
.query(cls)
.join(subquery,
and_(cls.dag_id == subquery.c.dag_id,
cls.execution_date == subquery.c.execution_date))
.all()
)
return dagruns
| 37.928016
| 110
| 0.602257
|
b76da0316e7a291fa9219c005e1905d16780dd3e
| 1,433
|
py
|
Python
|
__Training__/Python - HackerRank/16. Numpy/Polynomials.py
|
JUD210/Study-Note
|
2add9db3f11d99370f49878f0c19e9caa60d2d02
|
[
"MIT"
] | null | null | null |
__Training__/Python - HackerRank/16. Numpy/Polynomials.py
|
JUD210/Study-Note
|
2add9db3f11d99370f49878f0c19e9caa60d2d02
|
[
"MIT"
] | null | null | null |
__Training__/Python - HackerRank/16. Numpy/Polynomials.py
|
JUD210/Study-Note
|
2add9db3f11d99370f49878f0c19e9caa60d2d02
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/np-polynomials/problem
import numpy
# Inputs
standard_input = """1.1 2 3
0"""
p = [float(s) for s in input().split()]
# 1.1 2 3
x = int(input())
# 0
print(numpy.polyval(p, x))
# 3.0
""" Reference
[poly]
The poly tool returns the coefficients of a polynomial with the given sequence of roots.
print(numpy.poly([-1, 1, 1, 10]))
#Output : [ 1 -11 9 11 -10]
[roots]
The roots tool returns the roots of a polynomial with the given coefficients.
print(numpy.roots([1, 0, -1]))
#Output : [-1. 1.]
[polyint]
The polyint tool returns an antiderivative (indefinite integral) of a polynomial.
print(numpy.polyint([1, 1, 1]))
#Output : [ 0.33333333 0.5 1. 0. ]
[polyder]
The polyder tool returns the derivative of the specified order of a polynomial.
print(numpy.polyder([1, 1, 1, 1]))
#Output : [3 2 1]
[polyval]
The polyval tool evaluates the polynomial at specific value.
print(numpy.polyval([1, -2, 0, 2], 4))
Output : 34
[polyfit]
The polyfit tool fits a polynomial of a specified order to a set of data using a least-squares approach.
print(numpy.polyfit([0,1,-1, 2, -2], [0,1,1, 4, 4], 2)
#Output : [ 1.00000000e+00 0.00000000e+00 -3.97205465e-16]
-----------
The functions polyadd, polysub, polymul, and polydiv also handle proper addition, subtraction, multiplication, and division of polynomial coefficients, respectively.
"""
| 19.364865
| 165
| 0.672715
|
50d900294828e2311282c6f5e84b2c9f7a33cca7
| 2,272
|
py
|
Python
|
setup.py
|
akruszewski/kedro-mlflow
|
330cab52642a0993e957740726e7d453282f1588
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
akruszewski/kedro-mlflow
|
330cab52642a0993e957740726e7d453282f1588
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
akruszewski/kedro-mlflow
|
330cab52642a0993e957740726e7d453282f1588
|
[
"Apache-2.0"
] | null | null | null |
import pathlib
import re
from setuptools import find_packages, setup
NAME = "kedro_mlflow"
HERE = pathlib.Path(__file__).parent
# get package version
with open((HERE / NAME / "__init__.py").as_posix(), encoding="utf-8") as file_handler:
result = re.search(r'__version__ *= *["\']([^"\']+)', file_handler.read())
if not result:
raise ValueError("Can't find the version in kedro/__init__.py")
VERSION = result.group(1)
def _parse_requirements(path, encoding="utf-8"):
with open(path, mode="r", encoding=encoding) as file_handler:
requirements = [
x.strip() for x in file_handler if x.strip() and not x.startswith("-r")
]
return requirements
# get the dependencies and installs
base_requirements = _parse_requirements("requirements/requirements.txt")
test_requirements = _parse_requirements("requirements/test_requirements.txt")
# Get the long description from the README file
with open((HERE / "README.md").as_posix(), encoding="utf-8") as file_handler:
README = file_handler.read()
setup(
name=NAME,
version=VERSION,
description="A kedro-plugin to use mlflow in your kedro projects",
license="Apache Software License (Apache 2.0)",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/Galileo-Galilei/kedro-mlflow",
python_requires=">=3.6, <3.9",
packages=find_packages(exclude=["docs*", "tests*"]),
setup_requires=["setuptools_scm"],
include_package_data=True,
tests_require=test_requirements,
install_requires=base_requirements,
author="Galileo-Galilei",
entry_points={
"kedro.project_commands": [
"kedro_mlflow = kedro_mlflow.framework.cli.cli:commands"
],
"kedro.global_commands": [
"kedro_mlflow = kedro_mlflow.framework.cli.cli:commands"
],
},
zip_safe=False,
keywords="kedro plugin, mlflow, model versioning, model packaging, pipelines, machine learning, data pipelines, data science, data engineering",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 32.927536
| 148
| 0.680018
|
343f09834fd783225f8b469fb02773a6e6b949d6
| 2,774
|
py
|
Python
|
tensorflow/python/profiler/profiler_client_test.py
|
nicolas-harraudeau-sonarsource/tensorflow
|
f42f57b814b82a217943f621967036a08bb95e88
|
[
"Apache-2.0"
] | 27
|
2019-01-02T09:36:57.000Z
|
2022-02-21T06:41:51.000Z
|
tensorflow/python/profiler/profiler_client_test.py
|
whoozle/tensorflow
|
a2725aa0153d52260f602c6604756840629d1569
|
[
"Apache-2.0"
] | 3
|
2019-01-23T11:01:22.000Z
|
2022-02-24T02:53:31.000Z
|
tensorflow/python/profiler/profiler_client_test.py
|
whoozle/tensorflow
|
a2725aa0153d52260f602c6604756840629d1569
|
[
"Apache-2.0"
] | 11
|
2019-03-02T12:42:23.000Z
|
2021-02-04T12:20:10.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for profiler_client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import portpicker
from tensorflow.python.eager import test
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as profiler
class ProfilerClientTest(test_util.TensorFlowTestCase):
def testTrace_ProfileIdleServer(self):
test_port = portpicker.pick_unused_port()
profiler.start_server(test_port)
# Test the profilers are successfully started and connected to profiler
# service on the worker. Since there is no op running, it is expected to
# return UnavailableError with no trace events collected string.
with self.assertRaises(errors.UnavailableError) as error:
profiler_client.trace(
'localhost:' + str(test_port), self.get_temp_dir(), duration_ms=10)
self.assertEqual('No trace event is collected', str(error.exception))
def testTrace_ProfileIdleServerWithOptions(self):
test_port = portpicker.pick_unused_port()
profiler.start_server(test_port)
# Test the profilers are successfully started and connected to profiler
# service on the worker. Since there is no op running, it is expected to
# return UnavailableError with no trace events collected string.
with self.assertRaises(errors.UnavailableError) as error:
options = profiler.ProfilerOptions(
host_tracer_level=3, device_tracer_level=0)
profiler_client.trace(
'localhost:' + str(test_port),
self.get_temp_dir(),
duration_ms=10,
options=options)
self.assertEqual('No trace event is collected', str(error.exception))
def testMonitor_ProcessInvalidAddress(self):
# Monitor is only supported in cloud TPU. Test invalid address instead.
with self.assertRaises(errors.UnavailableError):
profiler_client.monitor('localhost:6006', 2000)
if __name__ == '__main__':
test.main()
| 41.402985
| 80
| 0.740807
|
1576e8e3a1fb52c674972866919edaf55178271f
| 934
|
py
|
Python
|
day01/t01/migrations/0002_cart_siji.py
|
lin8979/newrep01
|
d0d7e157d522c2e83d1976a35d6a815c9e7e4257
|
[
"Apache-2.0"
] | null | null | null |
day01/t01/migrations/0002_cart_siji.py
|
lin8979/newrep01
|
d0d7e157d522c2e83d1976a35d6a815c9e7e4257
|
[
"Apache-2.0"
] | null | null | null |
day01/t01/migrations/0002_cart_siji.py
|
lin8979/newrep01
|
d0d7e157d522c2e83d1976a35d6a815c9e7e4257
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.24 on 2021-06-09 15:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('t01', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('color', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Siji',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('cart', models.ForeignKey(on_delete='CASCADE', to='t01.Cart', verbose_name='车')),
],
),
]
| 31.133333
| 114
| 0.549251
|
28202b8e169c48b2bfdb0c1d8efded224aec059c
| 778
|
py
|
Python
|
src/cardiac_geometries/_import_checks.py
|
ComputationalPhysiology/cardiac_geometries
|
ce4a621f5f067554fca58460ddb1892189aadd99
|
[
"MIT"
] | 1
|
2022-01-31T11:14:08.000Z
|
2022-01-31T11:14:08.000Z
|
src/cardiac_geometries/_import_checks.py
|
ComputationalPhysiology/cardiac_geometries
|
ce4a621f5f067554fca58460ddb1892189aadd99
|
[
"MIT"
] | null | null | null |
src/cardiac_geometries/_import_checks.py
|
ComputationalPhysiology/cardiac_geometries
|
ce4a621f5f067554fca58460ddb1892189aadd99
|
[
"MIT"
] | null | null | null |
try:
import meshio # noqa: F401
_has_meshio = True
except ImportError:
_has_meshio = False
try:
import dolfin # noqa: F401
_has_dolfin = True
except ImportError:
_has_dolfin = False
try:
import gmsh # noqa: F401
_has_gmsh = True
except (ImportError, OSError):
_has_gmsh = False
try:
import mshr # noqa: F401
_has_mshr = True
except (ImportError, OSError):
_has_mshr = False
try:
import ldrb # noqa: F401
_has_ldrb = True
except ImportError:
_has_ldrb = False
def has_meshio() -> bool:
return _has_meshio
def has_dolfin() -> bool:
return _has_dolfin
def has_gmsh() -> bool:
return _has_gmsh
def has_ldrb() -> bool:
return _has_ldrb
def has_mshr() -> bool:
return _has_mshr
| 13.649123
| 31
| 0.654242
|
fa5beac2a607feca87ec71aba99b471a60a849ae
| 3,203
|
py
|
Python
|
core/tests/test_apps.nem.namespace.py
|
Kayuii/trezor-crypto
|
6556616681a4e2d7e18817e8692d4f6e041dee01
|
[
"MIT"
] | null | null | null |
core/tests/test_apps.nem.namespace.py
|
Kayuii/trezor-crypto
|
6556616681a4e2d7e18817e8692d4f6e041dee01
|
[
"MIT"
] | 1
|
2019-02-08T00:22:42.000Z
|
2019-02-13T09:41:54.000Z
|
core/tests/test_apps.nem.namespace.py
|
Kayuii/trezor-crypto
|
6556616681a4e2d7e18817e8692d4f6e041dee01
|
[
"MIT"
] | 2
|
2019-02-07T23:57:09.000Z
|
2020-10-21T07:07:27.000Z
|
from common import *
from apps.nem.helpers import *
from apps.nem.namespace import *
from apps.nem.namespace.serialize import *
from trezor.crypto import hashlib
from trezor.messages.NEMProvisionNamespace import NEMProvisionNamespace
from trezor.messages.NEMSignTx import NEMSignTx
class TestNemNamespace(unittest.TestCase):
def test_create_provision_namespace(self):
# http://bob.nem.ninja:8765/#/transfer/0acbf8df91e6a65dc56c56c43d65f31ff2a6a48d06fc66e78c7f3436faf3e74f
m = _create_msg(NEM_NETWORK_TESTNET,
56999445,
20000000,
57003045,
'gimre',
'',
'TAMESPACEWH4MKFMBCVFERDPOOP4FK7MTDJEYP35',
5000000000)
t = serialize_provision_namespace(m.transaction, m.provision_namespace, unhexlify('84afa1bbc993b7f5536344914dde86141e61f8cbecaf8c9cefc07391f3287cf5'))
self.assertEqual(hashlib.sha3_256(t, keccak=True).digest(), unhexlify('f7cab28da57204d01a907c697836577a4ae755e6c9bac60dcc318494a22debb3'))
# http://bob.nem.ninja:8765/#/namespace/7ddd5fe607e1bfb5606e0ac576024c318c8300d237273117d4db32a60c49524d
m = _create_msg(NEM_NETWORK_TESTNET,
21496797,
108000000,
21500397,
'misc',
'alice',
'TAMESPACEWH4MKFMBCVFERDPOOP4FK7MTDJEYP35',
5000000000)
t = serialize_provision_namespace(m.transaction, m.provision_namespace, unhexlify('244fa194e2509ac0d2fbc18779c2618d8c2ebb61c16a3bcbebcf448c661ba8dc'))
self.assertEqual(hashlib.sha3_256(t, keccak=True).digest(), unhexlify('7ddd5fe607e1bfb5606e0ac576024c318c8300d237273117d4db32a60c49524d'))
# http://chain.nem.ninja/#/namespace/57071aad93ca125dc231dc02c07ad8610cd243d35068f9b36a7d231383907569
m = _create_msg(NEM_NETWORK_MAINNET,
26699717,
108000000,
26703317,
'sex',
'',
'NAMESPACEWH4MKFMBCVFERDPOOP4FK7MTBXDPZZA',
50000000000)
t = serialize_provision_namespace(m.transaction, m.provision_namespace, unhexlify('9f3c14f304309c8b72b2821339c4428793b1518bea72d58dd01f19d523518614'))
self.assertEqual(hashlib.sha3_256(t, keccak=True).digest(), unhexlify('57071aad93ca125dc231dc02c07ad8610cd243d35068f9b36a7d231383907569'))
def _create_msg(network: int, timestamp: int, fee: int, deadline: int,
name: str, parent: str, sink: str, rental_fee: int):
m = NEMSignTx()
m.transaction = NEMTransactionCommon()
m.transaction.network = network
m.transaction.timestamp = timestamp
m.transaction.fee = fee
m.transaction.deadline = deadline
m.provision_namespace = NEMProvisionNamespace()
m.provision_namespace.namespace = name
m.provision_namespace.parent = parent
m.provision_namespace.sink = sink
m.provision_namespace.fee = rental_fee
return m
if __name__ == '__main__':
unittest.main()
| 43.876712
| 158
| 0.662816
|
1e5fbcb0616041dd9d158d6753fcfb63096ea6af
| 6,733
|
py
|
Python
|
statsmodels/stats/tests/test_dist_dependant_measures.py
|
timgates42/statsmodels
|
ab8ff09e3eb8c385214bd1575aa47b81bf53d584
|
[
"BSD-3-Clause"
] | 2
|
2020-04-13T15:45:38.000Z
|
2020-06-01T14:41:04.000Z
|
statsmodels/stats/tests/test_dist_dependant_measures.py
|
timgates42/statsmodels
|
ab8ff09e3eb8c385214bd1575aa47b81bf53d584
|
[
"BSD-3-Clause"
] | 1
|
2020-04-21T02:42:32.000Z
|
2020-04-21T02:42:32.000Z
|
statsmodels/stats/tests/test_dist_dependant_measures.py
|
timgates42/statsmodels
|
ab8ff09e3eb8c385214bd1575aa47b81bf53d584
|
[
"BSD-3-Clause"
] | 1
|
2020-03-20T00:36:16.000Z
|
2020-03-20T00:36:16.000Z
|
import numpy as np
from numpy.testing import assert_almost_equal
from pytest import raises as assert_raises, warns as assert_warns
import statsmodels.stats.dist_dependence_measures as ddm
from statsmodels.datasets import get_rdataset
class TestDistDependenceMeasures(object):
@classmethod
def setup_class(cls):
"""
Values were obtained via the R `energy` package.
R code:
------
> dcov.test(x, y, R=200)
dCov independence test (permutation test)
data: index 1, replicates 200
nV^2 = 45829, p-value = 0.004975
sample estimates:
dCov
47.86925
> DCOR(x, y)
$dCov
[1] 47.86925
$dCor
[1] 0.9999704
$dVarX
[1] 47.28702
$dVarY
[1] 48.46151
"""
np.random.seed(3)
cls.x = np.array(range(1, 101)).reshape((20, 5))
cls.y = cls.x + np.log(cls.x)
cls.dcor_exp = 0.9999704
cls.dcov_exp = 47.86925
cls.dvar_x_exp = 47.28702
cls.dvar_y_exp = 48.46151
cls.pval_emp_exp = 0.004975
cls.test_stat_emp_exp = 45829
# The values above are functions of the following values, and
# therefore when the above group of variables is computed correctly
# it means this group of variables was also correctly calculated.
cls.S_exp = 5686.03162
cls.test_stat_asym_exp = 2.8390102
cls.pval_asym_exp = 0.00452
def test_input_validation_nobs(self):
with assert_raises(ValueError, match="same number of observations"):
ddm.distance_covariance_test(self.x[:2, :], self.y)
def test_input_validation_unknown_method(self):
with assert_raises(ValueError, match="Unknown 'method' parameter"):
ddm.distance_covariance_test(self.x, self.y, method="wrong_name")
def test_statistic_value_asym_method(self):
statistic, pval, method = ddm.distance_covariance_test(
self.x, self.y, method="asym")
assert method == "asym"
assert_almost_equal(statistic, self.test_stat_asym_exp, 4)
assert_almost_equal(pval, self.pval_asym_exp, 3)
def test_statistic_value_emp_method(self):
statistic, pval, method = ddm.distance_covariance_test(
self.x, self.y, method="emp")
assert method == "emp"
assert_almost_equal(statistic, self.test_stat_emp_exp, 0)
assert_almost_equal(pval, self.pval_emp_exp, 3)
def test_fallback_to_asym_method(self):
match_text = "The asymptotic approximation will be used"
with assert_warns(UserWarning, match=match_text):
statistic, pval, _ = ddm.distance_covariance_test(
self.x, self.y, method="emp", B=200
)
assert_almost_equal(statistic, self.test_stat_emp_exp, 0)
assert_almost_equal(pval, self.pval_asym_exp, 3)
def test_statistics_for_2d_input(self):
stats = ddm.distance_statistics(self.x, self.y)
assert_almost_equal(stats.test_statistic, self.test_stat_emp_exp, 0)
assert_almost_equal(stats.distance_correlation, self.dcor_exp, 4)
assert_almost_equal(stats.distance_covariance, self.dcov_exp, 4)
assert_almost_equal(stats.dvar_x, self.dvar_x_exp, 4)
assert_almost_equal(stats.dvar_y, self.dvar_y_exp, 4)
assert_almost_equal(stats.S, self.S_exp, 4)
def test_statistics_for_1d_input(self):
x = np.array(range(1, 21))
y = x + np.log(x)
stats = ddm.distance_statistics(x, y)
# Values were obtained using the R `energy` package
assert_almost_equal(stats.test_statistic, 398.94623, 5)
assert_almost_equal(stats.distance_correlation, 0.9996107, 4)
assert_almost_equal(stats.distance_covariance, 4.4662414, 4)
assert_almost_equal(stats.dvar_x, 4.2294799, 4)
assert_almost_equal(stats.dvar_y, 4.7199304, 4)
assert_almost_equal(stats.S, 49.880200, 4)
def test_results_on_the_iris_dataset(self):
"""
R code example from the `energy` package documentation for
`energy::distance_covariance.test`:
> x <- iris[1:50, 1:4]
> y <- iris[51:100, 1:4]
> set.seed(1)
> dcov.test(x, y, R=200)
dCov independence test (permutation test)
data: index 1, replicates 200
nV^2 = 0.5254, p-value = 0.9552
sample estimates:
dCov
0.1025087
"""
iris = get_rdataset("iris").data.values[:, :4]
x = iris[:50]
y = iris[50:100]
stats = ddm.distance_statistics(x, y)
assert_almost_equal(stats.test_statistic, 0.5254, 4)
assert_almost_equal(stats.distance_correlation, 0.3060479, 4)
assert_almost_equal(stats.distance_covariance, 0.1025087, 4)
assert_almost_equal(stats.dvar_x, 0.2712927, 4)
assert_almost_equal(stats.dvar_y, 0.4135274, 4)
assert_almost_equal(stats.S, 0.667456, 4)
test_statistic, _, method = ddm.distance_covariance_test(x, y, B=199)
assert_almost_equal(test_statistic, 0.5254, 4)
assert method == "emp"
def test_results_on_the_quakes_dataset(self):
"""
R code:
------
> data("quakes")
> x = quakes[1:50, 1:3]
> y = quakes[51:100, 1:3]
> dcov.test(x, y, R=200)
dCov independence test (permutation test)
data: index 1, replicates 200
nV^2 = 45046, p-value = 0.4577
sample estimates:
dCov
30.01526
"""
quakes = get_rdataset("quakes").data.values[:, :3]
x = quakes[:50]
y = quakes[50:100]
stats = ddm.distance_statistics(x, y)
assert_almost_equal(np.round(stats.test_statistic), 45046, 0)
assert_almost_equal(stats.distance_correlation, 0.1894193, 4)
assert_almost_equal(stats.distance_covariance, 30.01526, 4)
assert_almost_equal(stats.dvar_x, 170.1702, 4)
assert_almost_equal(stats.dvar_y, 147.5545, 4)
assert_almost_equal(stats.S, 52265, 0)
test_statistic, _, method = ddm.distance_covariance_test(x, y, B=199)
assert_almost_equal(np.round(test_statistic), 45046, 0)
assert method == "emp"
def test_dcor(self):
assert_almost_equal(ddm.distance_correlation(self.x, self.y),
self.dcor_exp, 4)
def test_dcov(self):
assert_almost_equal(ddm.distance_covariance(self.x, self.y),
self.dcov_exp, 4)
def test_dvar(self):
assert_almost_equal(ddm.distance_variance(self.x),
self.dvar_x_exp, 4)
| 33.834171
| 77
| 0.630031
|
783b4d2ad3b24294c3e9abe21cc19418f0575aac
| 560
|
py
|
Python
|
BiLSTM/remove_symbols_from_dataset.py
|
cetinsamet/named-entity-recognition-turkish
|
63cf2ccaa92fdeeb3177f5359464efd098adaa28
|
[
"MIT"
] | null | null | null |
BiLSTM/remove_symbols_from_dataset.py
|
cetinsamet/named-entity-recognition-turkish
|
63cf2ccaa92fdeeb3177f5359464efd098adaa28
|
[
"MIT"
] | 1
|
2019-09-09T11:50:57.000Z
|
2019-09-10T15:02:46.000Z
|
BiLSTM/remove_symbols_from_dataset.py
|
cetinsamet/named-entity-recognition-turkish
|
63cf2ccaa92fdeeb3177f5359464efd098adaa28
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
# data loading
black_list = ['"', '.', ',', '/', '\\', ':', ';', "'", '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '(', ')', '%']
raw_txt_file = open('NER_Dataset/train_bio.txt', 'r', encoding="utf8").readlines()
file = open('NER_Dataset/train_bio_new.txt', "w", encoding="utf8")
for line in raw_txt_file:
flag = 0
for item in black_list:
segment = line.split('\t')[0]
if item in segment:
flag = 1
break
if flag == 0:
file.write(line )
file.close()
| 26.666667
| 124
| 0.501786
|
a6229355bfff56279f1b216fd50dbe3f3476d67d
| 1,945
|
py
|
Python
|
doc/02_extract_and_concat.py
|
alleyway/gamma-ray
|
8ed388b295d3bb37a1c190a406967d344d215456
|
[
"0BSD"
] | null | null | null |
doc/02_extract_and_concat.py
|
alleyway/gamma-ray
|
8ed388b295d3bb37a1c190a406967d344d215456
|
[
"0BSD"
] | null | null | null |
doc/02_extract_and_concat.py
|
alleyway/gamma-ray
|
8ed388b295d3bb37a1c190a406967d344d215456
|
[
"0BSD"
] | 1
|
2021-04-23T15:23:58.000Z
|
2021-04-23T15:23:58.000Z
|
#!/usr/bin/env python3
# Description:
# After downloading the raw data from BitMex, this script
# separates the data into separate files (one for each symbol) and removes the "symbol" column from the data
# By nature, it performs two sanity checks:
# 1) that the data downloaded is continuous (there isn't a missing file)
# 2) that the csv data can actually be read (by Pandas)
import glob
import re
import datetime
import pandas as pd
from datetime import timedelta, date
DATE_FORMAT = "%Y-%m-%dD%H:%M:%S.%f000"
def dateparse(time_in_str: str):
return datetime.datetime.strptime(time_in_str, DATE_FORMAT)
def process(file: str, quote_or_trade: str):
print(quote_or_trade)
mode = "a"
header = False
if x == 0:
mode = "w"
header = True
df = pd.read_csv(f'data/{quote_or_trade}/{file}', index_col='timestamp', parse_dates=True, date_parser=dateparse)
symbols = df.symbol.unique()
# df.drop(columns=["symbol"], inplace=True)
for symbol in symbols:
# print(f'Extracting {symbol}')
df[df['symbol'] == symbol].drop(columns=["symbol"]).to_csv(f'data/{symbol}_{quote_or_trade}.csv', mode=mode,
header=header, date_format=DATE_FORMAT)
file_list = sorted(glob.glob("data/*/*.csv.gz"))
first = re.search(f'\d+', file_list[0]).group() # gives us something like "20210208" to be parsed
last = re.search(f'\d+', file_list[len(file_list) - 1]).group()
start_date = datetime.datetime.strptime(first, '%Y%m%d')
end_date = datetime.datetime.strptime(last, '%Y%m%d')
days_diff = (end_date - start_date).days
print(f'Extracting and concat .gz: {first} to {last}')
for x in range(days_diff + 1):
print(f'{x + 1} of {days_diff + 1}')
target_date = start_date + timedelta(days=x)
filename = target_date.strftime('%Y%m%d') + ".csv.gz"
process(filename, "quote")
process(filename, "trade")
| 34.122807
| 117
| 0.660154
|
de5806abf443c74c662a66b7b6984c7cd0cb068e
| 379
|
py
|
Python
|
t_10_brief_tour_of_the_standard_library/t_10_6_mathematics/main.py
|
naokiur/Python-tutorial
|
7b03dc8fd2e5992859fde00bfe2873b4fb7ca5e5
|
[
"Apache-2.0"
] | null | null | null |
t_10_brief_tour_of_the_standard_library/t_10_6_mathematics/main.py
|
naokiur/Python-tutorial
|
7b03dc8fd2e5992859fde00bfe2873b4fb7ca5e5
|
[
"Apache-2.0"
] | null | null | null |
t_10_brief_tour_of_the_standard_library/t_10_6_mathematics/main.py
|
naokiur/Python-tutorial
|
7b03dc8fd2e5992859fde00bfe2873b4fb7ca5e5
|
[
"Apache-2.0"
] | null | null | null |
import math
import random
import statistics
print(math.cos(math.pi / 4))
print(math.log(1024, 2))
print(random.choice(['apple', 'pear', 'banana']))
print(random.sample(range(100), 10))
print(random.random())
print(random.randrange(6))
data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
print(statistics.mean(data))
print(statistics.median(data))
print(statistics.variance(data))
| 22.294118
| 49
| 0.709763
|
d82c02cc5ca5f89c26dfdb53a8acd035dad962a6
| 4,353
|
py
|
Python
|
ch09_WebCS/crawl.py
|
Nickhool/core-python
|
324d6aabff5ccadb490d228c6437a203612d93e2
|
[
"MIT"
] | 1
|
2019-07-25T02:36:11.000Z
|
2019-07-25T02:36:11.000Z
|
ch09_WebCS/crawl.py
|
Nickhool/core-python
|
324d6aabff5ccadb490d228c6437a203612d93e2
|
[
"MIT"
] | null | null | null |
ch09_WebCS/crawl.py
|
Nickhool/core-python
|
324d6aabff5ccadb490d228c6437a203612d93e2
|
[
"MIT"
] | null | null | null |
__Author__ = "noduez"
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/9 3:13 PM
# @File : crawl.py Web爬虫
# @Software: PyCharm
from io import StringIO
from formatter import AbstractFormatter, DumbWriter
import html
from html.parser import HTMLParser
import http.client
import os
import sys
import urllib.request
import urllib.parse
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print("Encountered a start tag:", tag)
def handle_endtag(self, tag):
print("Encountered an end tag :", tag)
def handle_data(self, data):
print("Encountered some data :", data)
class Retriever(object):
__slots__ = ('url', 'file')
def __init__(self, url):
self.url, self.file = self.getfile(url)
def getfile(self, url, default='index.html'):
'Create usable local filename from URL'
parsed = urllib.parse.urlparse(url)
host = parsed.netloc.split('@')[-1].split(':')[0]
filepath = '%s%s' % (host, parsed.path)
if not os.path.splitext(parsed.path)[1]:
filepath = os.path.join(filepath, default)
linkdir = os.path.dirname(filepath)
if not os.path.isdir(linkdir):
if os.path.exists(linkdir):
os.unlink(linkdir)
os.makedirs(linkdir)
return url, filepath
def download(self):
'Downlaod URL to specific named file'
try:
retval = urllib.request.urlretrieve(self.url, self.file)
except (IOError, http.client.InvalidURL) as e:
retval = (('*** ERROR: bad URL "%s":%s' % (self.url, e)),)
return retval
def parse_links(self):
'Parse out the links found in downloaded HTML file'
f = open(self.file, 'r')
data = f.read()
# print(data)
f.close()
# pa = HTMLParser(AbstractFormatter(DumbWriter(StringIO())))
pa = HTMLParser()
pa.feed(data)
pa.close()
return pa.rawdata
class Crawler(object):
count = 0
def __init__(self, url):
self.q = [url]
self.seen = set()
parsed = urllib.parse.urlparse(url)
host = parsed.netloc.split('@')[-1].split(':')[0]
self.dom = '.'.join(host.split('.')[:-2])
def get_page(self, url, media=False):
'Download page & parse links, add to queue if nec'
r = Retriever(url)
fname = r.download()[0]
if fname[0] == '*':
print(fname, '...skipping parse')
return
Crawler.count += 1
print('\n(', Crawler.count, ')')
print('URL:', url)
print('FILE:', fname)
self.seen.add(url)
ftype = os.path.splitext(fname)[1]
if ftype not in ('.htm', '.html'):
return
for link in r.parse_links():
if link.startswith('mailto:'):
print('... discarded, mailto link')
continue
if not media:
ftype = os.path.splitext(link)[1]
if ftype not in ('.mp3','.mp4','.m4v','.wav'):
print('... discarded, media file')
continue
if not link.startswith('http://'):
link = urllib.parse.urljoin(url, link)
print('*', link)
if link not in self.seen:
if self.dom not in link:
print('...discarded, not in domain')
else:
if link not in self.q:
self.q.append(link)
print('... new, added to Q')
else:
print('... discarded, already processed')
else:
print('... discarded, already processed')
def go(self, media=False):
'Process next page in queue (if any)'
while self.q:
url = self.q.pop()
self.get_page(url, media)
def main():
if len(sys.argv) > 1:
url = sys.argv[1]
else:
try:
url = input('Rnter starting URL: ')
except (KeyboardInterrupt, EOFError):
url = ''
if not url:
return
if not url.startswith('http://') and\
not url.startswith('ftp://'):
url = 'http://%s/' % url
robot = Crawler(url)
robot.go()
if __name__ == '__main__':
main()
| 31.092857
| 70
| 0.52952
|
abee35a9f0d0c0a25133d096d1bf4b6abc050c20
| 469
|
py
|
Python
|
30. Else and If/ex30.py
|
vishalnarnaware/Learn-PYTHON-the-HARD-WAY
|
392bae04c686c4a1076144f5dd295c7533e71163
|
[
"MIT"
] | null | null | null |
30. Else and If/ex30.py
|
vishalnarnaware/Learn-PYTHON-the-HARD-WAY
|
392bae04c686c4a1076144f5dd295c7533e71163
|
[
"MIT"
] | null | null | null |
30. Else and If/ex30.py
|
vishalnarnaware/Learn-PYTHON-the-HARD-WAY
|
392bae04c686c4a1076144f5dd295c7533e71163
|
[
"MIT"
] | null | null | null |
people = 30
cars = 40
buses = 15
if cars > people:
print("We should take the cars.")
elif cars < people:
print("We should not take the cars.")
else:
print("We can't decide.")
if buses > cars:
print("That's too many buses.")
elif buses < cars:
print("Maybe we could take the buses.")
else:
print("We still can't decide.")
if people > buses:
print("Alright, let's just take the buses.")
else:
print("Fine, let's just stay home then.")
| 19.541667
| 48
| 0.635394
|
09c0be87c9de528eb7deb99df1acbf30429209cf
| 23,169
|
py
|
Python
|
mavsdk/action_server_pb2_grpc.py
|
thomas-watters-skydio/MAVSDK-Python
|
e0f9db072e802a06a792a4ed6c64ce75f900167f
|
[
"BSD-3-Clause"
] | null | null | null |
mavsdk/action_server_pb2_grpc.py
|
thomas-watters-skydio/MAVSDK-Python
|
e0f9db072e802a06a792a4ed6c64ce75f900167f
|
[
"BSD-3-Clause"
] | null | null | null |
mavsdk/action_server_pb2_grpc.py
|
thomas-watters-skydio/MAVSDK-Python
|
e0f9db072e802a06a792a4ed6c64ce75f900167f
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import action_server_pb2 as action__server_dot_action__server__pb2
class ActionServerServiceStub(object):
"""Provide vehicle actions (as a server) such as arming, taking off, and landing.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SubscribeArmDisarm = channel.unary_stream(
"/mavsdk.rpc.action_server.ActionServerService/SubscribeArmDisarm",
request_serializer=action__server_dot_action__server__pb2.SubscribeArmDisarmRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.ArmDisarmResponse.FromString,
)
self.SubscribeFlightModeChange = channel.unary_stream(
"/mavsdk.rpc.action_server.ActionServerService/SubscribeFlightModeChange",
request_serializer=action__server_dot_action__server__pb2.SubscribeFlightModeChangeRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.FlightModeChangeResponse.FromString,
)
self.SubscribeTakeoff = channel.unary_stream(
"/mavsdk.rpc.action_server.ActionServerService/SubscribeTakeoff",
request_serializer=action__server_dot_action__server__pb2.SubscribeTakeoffRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.TakeoffResponse.FromString,
)
self.SubscribeLand = channel.unary_stream(
"/mavsdk.rpc.action_server.ActionServerService/SubscribeLand",
request_serializer=action__server_dot_action__server__pb2.SubscribeLandRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.LandResponse.FromString,
)
self.SubscribeReboot = channel.unary_stream(
"/mavsdk.rpc.action_server.ActionServerService/SubscribeReboot",
request_serializer=action__server_dot_action__server__pb2.SubscribeRebootRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.RebootResponse.FromString,
)
self.SubscribeShutdown = channel.unary_stream(
"/mavsdk.rpc.action_server.ActionServerService/SubscribeShutdown",
request_serializer=action__server_dot_action__server__pb2.SubscribeShutdownRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.ShutdownResponse.FromString,
)
self.SubscribeTerminate = channel.unary_stream(
"/mavsdk.rpc.action_server.ActionServerService/SubscribeTerminate",
request_serializer=action__server_dot_action__server__pb2.SubscribeTerminateRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.TerminateResponse.FromString,
)
self.SetAllowTakeoff = channel.unary_unary(
"/mavsdk.rpc.action_server.ActionServerService/SetAllowTakeoff",
request_serializer=action__server_dot_action__server__pb2.SetAllowTakeoffRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.SetAllowTakeoffResponse.FromString,
)
self.SetArmable = channel.unary_unary(
"/mavsdk.rpc.action_server.ActionServerService/SetArmable",
request_serializer=action__server_dot_action__server__pb2.SetArmableRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.SetArmableResponse.FromString,
)
self.SetDisarmable = channel.unary_unary(
"/mavsdk.rpc.action_server.ActionServerService/SetDisarmable",
request_serializer=action__server_dot_action__server__pb2.SetDisarmableRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.SetDisarmableResponse.FromString,
)
self.SetAllowableFlightModes = channel.unary_unary(
"/mavsdk.rpc.action_server.ActionServerService/SetAllowableFlightModes",
request_serializer=action__server_dot_action__server__pb2.SetAllowableFlightModesRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.SetAllowableFlightModesResponse.FromString,
)
self.GetAllowableFlightModes = channel.unary_unary(
"/mavsdk.rpc.action_server.ActionServerService/GetAllowableFlightModes",
request_serializer=action__server_dot_action__server__pb2.GetAllowableFlightModesRequest.SerializeToString,
response_deserializer=action__server_dot_action__server__pb2.GetAllowableFlightModesResponse.FromString,
)
class ActionServerServiceServicer(object):
"""Provide vehicle actions (as a server) such as arming, taking off, and landing.
"""
def SubscribeArmDisarm(self, request, context):
"""Subscribe to ARM/DISARM commands
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SubscribeFlightModeChange(self, request, context):
"""Subscribe to DO_SET_MODE
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SubscribeTakeoff(self, request, context):
"""Subscribe to takeoff command
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SubscribeLand(self, request, context):
"""Subscribe to land command
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SubscribeReboot(self, request, context):
"""Subscribe to reboot command
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SubscribeShutdown(self, request, context):
"""Subscribe to shutdown command
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SubscribeTerminate(self, request, context):
"""Subscribe to terminate command
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetAllowTakeoff(self, request, context):
"""Can the vehicle takeoff
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetArmable(self, request, context):
"""Can the vehicle arm when requested
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetDisarmable(self, request, context):
"""Can the vehicle disarm when requested
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def SetAllowableFlightModes(self, request, context):
"""Set which modes the vehicle can transition to (Manual always allowed)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetAllowableFlightModes(self, request, context):
"""Get which modes the vehicle can transition to (Manual always allowed)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_ActionServerServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"SubscribeArmDisarm": grpc.unary_stream_rpc_method_handler(
servicer.SubscribeArmDisarm,
request_deserializer=action__server_dot_action__server__pb2.SubscribeArmDisarmRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.ArmDisarmResponse.SerializeToString,
),
"SubscribeFlightModeChange": grpc.unary_stream_rpc_method_handler(
servicer.SubscribeFlightModeChange,
request_deserializer=action__server_dot_action__server__pb2.SubscribeFlightModeChangeRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.FlightModeChangeResponse.SerializeToString,
),
"SubscribeTakeoff": grpc.unary_stream_rpc_method_handler(
servicer.SubscribeTakeoff,
request_deserializer=action__server_dot_action__server__pb2.SubscribeTakeoffRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.TakeoffResponse.SerializeToString,
),
"SubscribeLand": grpc.unary_stream_rpc_method_handler(
servicer.SubscribeLand,
request_deserializer=action__server_dot_action__server__pb2.SubscribeLandRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.LandResponse.SerializeToString,
),
"SubscribeReboot": grpc.unary_stream_rpc_method_handler(
servicer.SubscribeReboot,
request_deserializer=action__server_dot_action__server__pb2.SubscribeRebootRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.RebootResponse.SerializeToString,
),
"SubscribeShutdown": grpc.unary_stream_rpc_method_handler(
servicer.SubscribeShutdown,
request_deserializer=action__server_dot_action__server__pb2.SubscribeShutdownRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.ShutdownResponse.SerializeToString,
),
"SubscribeTerminate": grpc.unary_stream_rpc_method_handler(
servicer.SubscribeTerminate,
request_deserializer=action__server_dot_action__server__pb2.SubscribeTerminateRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.TerminateResponse.SerializeToString,
),
"SetAllowTakeoff": grpc.unary_unary_rpc_method_handler(
servicer.SetAllowTakeoff,
request_deserializer=action__server_dot_action__server__pb2.SetAllowTakeoffRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.SetAllowTakeoffResponse.SerializeToString,
),
"SetArmable": grpc.unary_unary_rpc_method_handler(
servicer.SetArmable,
request_deserializer=action__server_dot_action__server__pb2.SetArmableRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.SetArmableResponse.SerializeToString,
),
"SetDisarmable": grpc.unary_unary_rpc_method_handler(
servicer.SetDisarmable,
request_deserializer=action__server_dot_action__server__pb2.SetDisarmableRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.SetDisarmableResponse.SerializeToString,
),
"SetAllowableFlightModes": grpc.unary_unary_rpc_method_handler(
servicer.SetAllowableFlightModes,
request_deserializer=action__server_dot_action__server__pb2.SetAllowableFlightModesRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.SetAllowableFlightModesResponse.SerializeToString,
),
"GetAllowableFlightModes": grpc.unary_unary_rpc_method_handler(
servicer.GetAllowableFlightModes,
request_deserializer=action__server_dot_action__server__pb2.GetAllowableFlightModesRequest.FromString,
response_serializer=action__server_dot_action__server__pb2.GetAllowableFlightModesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"mavsdk.rpc.action_server.ActionServerService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ActionServerService(object):
"""Provide vehicle actions (as a server) such as arming, taking off, and landing.
"""
@staticmethod
def SubscribeArmDisarm(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SubscribeArmDisarm",
action__server_dot_action__server__pb2.SubscribeArmDisarmRequest.SerializeToString,
action__server_dot_action__server__pb2.ArmDisarmResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SubscribeFlightModeChange(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SubscribeFlightModeChange",
action__server_dot_action__server__pb2.SubscribeFlightModeChangeRequest.SerializeToString,
action__server_dot_action__server__pb2.FlightModeChangeResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SubscribeTakeoff(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SubscribeTakeoff",
action__server_dot_action__server__pb2.SubscribeTakeoffRequest.SerializeToString,
action__server_dot_action__server__pb2.TakeoffResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SubscribeLand(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SubscribeLand",
action__server_dot_action__server__pb2.SubscribeLandRequest.SerializeToString,
action__server_dot_action__server__pb2.LandResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SubscribeReboot(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SubscribeReboot",
action__server_dot_action__server__pb2.SubscribeRebootRequest.SerializeToString,
action__server_dot_action__server__pb2.RebootResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SubscribeShutdown(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SubscribeShutdown",
action__server_dot_action__server__pb2.SubscribeShutdownRequest.SerializeToString,
action__server_dot_action__server__pb2.ShutdownResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SubscribeTerminate(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SubscribeTerminate",
action__server_dot_action__server__pb2.SubscribeTerminateRequest.SerializeToString,
action__server_dot_action__server__pb2.TerminateResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SetAllowTakeoff(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SetAllowTakeoff",
action__server_dot_action__server__pb2.SetAllowTakeoffRequest.SerializeToString,
action__server_dot_action__server__pb2.SetAllowTakeoffResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SetArmable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SetArmable",
action__server_dot_action__server__pb2.SetArmableRequest.SerializeToString,
action__server_dot_action__server__pb2.SetArmableResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SetDisarmable(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SetDisarmable",
action__server_dot_action__server__pb2.SetDisarmableRequest.SerializeToString,
action__server_dot_action__server__pb2.SetDisarmableResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def SetAllowableFlightModes(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/SetAllowableFlightModes",
action__server_dot_action__server__pb2.SetAllowableFlightModesRequest.SerializeToString,
action__server_dot_action__server__pb2.SetAllowableFlightModesResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetAllowableFlightModes(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/mavsdk.rpc.action_server.ActionServerService/GetAllowableFlightModes",
action__server_dot_action__server__pb2.GetAllowableFlightModesRequest.SerializeToString,
action__server_dot_action__server__pb2.GetAllowableFlightModesResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| 39.269492
| 121
| 0.679788
|
697d16cfc3d589f67c94d0f64e6fd80f088f75cc
| 474
|
py
|
Python
|
example.py
|
bahiamartins/infobip-api-python-client
|
88f044901af5690d01fcb7bce90d0ed2ab1ba4d1
|
[
"Apache-2.0"
] | null | null | null |
example.py
|
bahiamartins/infobip-api-python-client
|
88f044901af5690d01fcb7bce90d0ed2ab1ba4d1
|
[
"Apache-2.0"
] | null | null | null |
example.py
|
bahiamartins/infobip-api-python-client
|
88f044901af5690d01fcb7bce90d0ed2ab1ba4d1
|
[
"Apache-2.0"
] | 1
|
2020-04-08T19:03:26.000Z
|
2020-04-08T19:03:26.000Z
|
#!/usr/bin/env python
__author__ = 'nmaric'
import sys
import os
import glob
myfolder = os.path.dirname(__file__)
if sys.argv.__len__() < 2:
print 'Please choose example to run:'
files = glob.glob("examples/*.py")
for file_ in files:
if not file_.startswith("examples/__"):
print "* " + file_.split("/")[1].rstrip(".py")
print
exit(1)
sys.path.append(myfolder)
example = __import__("examples", globals(), locals(), [sys.argv[1]])
| 22.571429
| 68
| 0.637131
|
b80084789dc63a522f73c61018224b6d573f8e4c
| 2,304
|
py
|
Python
|
LeetCode/Python3/DFS&BFS/339. Nested List Weight Sum.py
|
WatsonWangZh/CodingPractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 11
|
2019-09-01T22:36:00.000Z
|
2021-11-08T08:57:20.000Z
|
LeetCode/Python3/DFS&BFS/339. Nested List Weight Sum.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | null | null | null |
LeetCode/Python3/DFS&BFS/339. Nested List Weight Sum.py
|
WatsonWangZh/LeetCodePractice
|
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
|
[
"MIT"
] | 2
|
2020-05-27T14:58:52.000Z
|
2020-05-27T15:04:17.000Z
|
# Given a nested list of integers, return the sum of all integers in the list weighted by their depth.
# Each element is either an integer, or a list -- whose elements may also be integers or other lists.
# Example 1:
# Input: [[1,1],2,[1,1]]
# Output: 10
# Explanation: Four 1's at depth 2, one 2 at depth 1.
# Example 2:
# Input: [1,[4,[6]]]
# Output: 27
# Explanation: One 1 at depth 1, one 4 at depth 2, and one 6 at depth 3; 1 + 4*2 + 6*3 = 27.
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution(object):
def depthSum(self, nestedList):
"""
:type nestedList: List[NestedInteger]
:rtype: int
"""
# DFS O(n)
def helper(nestedList,depth):
res = 0
for ele in nestedList:
if ele.isInteger():
res += ele.getInteger() * depth
else:
res += helper(ele.getList(), depth + 1)
return res
return helper(nestedList, 1)
| 31.135135
| 102
| 0.577257
|
c5ca87d0823e3dd1caa1f8bfa7aea9e4d04b7c4d
| 1,504
|
py
|
Python
|
nlpsc/vocabulary.py
|
BSlience/nlpsc
|
3a62ed81cacc4b71e8d4d30225ccf99bba318858
|
[
"MIT"
] | 4
|
2019-05-09T10:18:46.000Z
|
2021-02-27T17:10:12.000Z
|
nlpsc/vocabulary.py
|
BSlience/nlpsc
|
3a62ed81cacc4b71e8d4d30225ccf99bba318858
|
[
"MIT"
] | null | null | null |
nlpsc/vocabulary.py
|
BSlience/nlpsc
|
3a62ed81cacc4b71e8d4d30225ccf99bba318858
|
[
"MIT"
] | null | null | null |
# encoding:utf-8
import collections
from .util.file import get_default_path
from .util.python import convert_to_unicode
class Vocabulary(object):
"""词典对象"""
def __init__(self):
# 结构{'token': id}
self.vocab = None
# 结构{'id': token}
self.inv_vocab = None
def load_vocab(self, vocab_file):
"""根据文件生成字典"""
vocab = collections.OrderedDict()
fin = open(vocab_file, encoding='utf-8')
for num, line in enumerate(fin):
items = convert_to_unicode(line.strip()).split("\t")
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
self.vocab = vocab
self.inv_vocab = {v: k for k, v in vocab.items()}
return self
def auto_from_dataset(self, dataset: object) -> object:
"""根据当前的数据集生成"""
pass
def tokens2ids(self, tokens):
"""将token换成id"""
output = []
for token in tokens:
output.append(self.vocab[token])
return output
def ids2tokens(self, ids):
"""将id转换成token"""
output = []
for i in ids:
output.append(self.inv_vocab[i])
return output
def items(self):
for k, v in self.vocab.items():
yield k, v
def get_default_vocabulary():
return Vocabulary().load_vocab(get_default_path('ernie/vocab.txt'))
| 25.491525
| 71
| 0.559176
|
79174bc3c8bac7682aed56972c57302e34fe105c
| 238
|
py
|
Python
|
one_hot_demo.py
|
keithyin/tensorflow-demos
|
e716eb1469cf8985018dc913cff91fc07fb073e9
|
[
"Apache-2.0"
] | null | null | null |
one_hot_demo.py
|
keithyin/tensorflow-demos
|
e716eb1469cf8985018dc913cff91fc07fb073e9
|
[
"Apache-2.0"
] | null | null | null |
one_hot_demo.py
|
keithyin/tensorflow-demos
|
e716eb1469cf8985018dc913cff91fc07fb073e9
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
if __name__ == '__main__':
a = tf.constant([[1], [2], [3]], dtype=tf.int64)
one_hotted = tf.one_hot(a, depth=4)
print(one_hotted.shape)
with tf.Session() as sess:
print(sess.run(one_hotted))
| 34
| 52
| 0.634454
|
010be6a36581ec412ec06080e507ed14b004832b
| 1,664
|
py
|
Python
|
profiles_api/models.py
|
Amakson/profiles-rest-api
|
a02071a1eb6588c9a964066eb40fbccfd4dc4550
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
Amakson/profiles-rest-api
|
a02071a1eb6588c9a964066eb40fbccfd4dc4550
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
Amakson/profiles-rest-api
|
a02071a1eb6588c9a964066eb40fbccfd4dc4550
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
class UserProfileManager(BaseUserManager):
"""Manager For user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
""""Return string representation of our user"""
return self.email
| 28.689655
| 68
| 0.668269
|
e116cc941769e15d470723831988ea7780b7c202
| 238
|
py
|
Python
|
{{cookiecutter.project_slug}}/tests/lib/test_resources.py
|
thecoderstudio/cookiecutter-pyramid-api
|
b3122c0d98be7179bcf726c6527096c0327d7bb7
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/tests/lib/test_resources.py
|
thecoderstudio/cookiecutter-pyramid-api
|
b3122c0d98be7179bcf726c6527096c0327d7bb7
|
[
"MIT"
] | 1
|
2021-12-17T15:10:21.000Z
|
2021-12-17T15:10:21.000Z
|
{{cookiecutter.project_slug}}/tests/lib/test_resources.py
|
thecoderstudio/cookiecutter-pyramid-api
|
b3122c0d98be7179bcf726c6527096c0327d7bb7
|
[
"MIT"
] | 1
|
2021-03-01T14:27:10.000Z
|
2021-03-01T14:27:10.000Z
|
from {{cookiecutter.project_slug}}.lib.resources import LocationAwareResource
def test_location_aware_resource_name():
class Sample(LocationAwareResource):
pass
sample = Sample()
assert sample.__name__ == 'Sample'
| 21.636364
| 77
| 0.743697
|
a38ca6bff4a91432a3d76b1e07d625b2429f4259
| 2,994
|
py
|
Python
|
Gds/src/fprime_gds/common/loaders/ch_py_loader.py
|
hunterpaulson/fprime
|
70560897b56dc3037dc966c99751b708b1cc8a05
|
[
"Apache-2.0"
] | null | null | null |
Gds/src/fprime_gds/common/loaders/ch_py_loader.py
|
hunterpaulson/fprime
|
70560897b56dc3037dc966c99751b708b1cc8a05
|
[
"Apache-2.0"
] | 5
|
2020-07-13T16:56:33.000Z
|
2020-07-23T20:38:13.000Z
|
Gds/src/fprime_gds/common/loaders/ch_py_loader.py
|
hunterpaulson/lgtm-fprime
|
9eeda383c263ecba8da8188a45e1d020107ff323
|
[
"Apache-2.0"
] | null | null | null |
"""
@brief Loader class for importing python based channel dictionaries
@date Created July 11, 2018
@author R. Joseph Paetz
@bug No known bugs
"""
from __future__ import absolute_import
# Custom Python Modules
from .python_loader import PythonLoader
from fprime_gds.common.templates.ch_template import ChTemplate
class ChPyLoader(PythonLoader):
"""Class to load python based telemetry channel dictionaries"""
# Field names in the python module files (used to construct dictionaries)
ID_FIELD = "ID"
NAME_FIELD = "NAME"
COMP_FIELD = "COMPONENT"
DESC_FIELD = "CHANNEL_DESCRIPTION"
TYPE_FIELD = "TYPE"
FMT_STR_FIELD = "FORMAT_STRING"
LOW_R_FIELD = "LOW_RED"
LOW_O_FIELD = "LOW_ORANGE"
LOW_Y_FIELD = "LOW_YELLOW"
HIGH_Y_FIELD = "HIGH_YELLOW"
HIGH_O_FIELD = "HIGH_ORANGE"
HIGH_R_FIELD = "HIGH_RED"
def __init__(self):
"""
Constructor
Returns:
An initialized loader object
"""
super(ChPyLoader, self).__init__()
def construct_dicts(self, path):
"""
Constructs and returns python dictionaries keyed on id and name
This function should not be called directly, instead, use
get_id_dict(path) and get_name_dict(path)
Args:
path: Path to the python module file dictionary to convert. This
should be a directory. If using a regular fprime deployment,
this should be a path to the events dictionary in your
generated folder:
${GENERATED_FOLDER_LOCATION}/generated/${DEPLOYMENT}/channels
Returns:
A tuple with two channel dictionaries (python type dict):
(id_dict, name_dict). The keys should be the channels' id and
name fields respectively and the values should be ChTemplate
objects.
"""
# TODO currently, we are always using the superpkg when importing, is this OK?
# We do need it sometimes, so if we don't always set it to true, we will need to pass an arg
module_dicts = self.read_dict(path, use_superpkg=True)
id_dict = dict()
name_dict = dict()
for ch_dict in module_dicts:
# Create a channel template object
ch_temp = ChTemplate(
ch_dict[self.ID_FIELD],
ch_dict[self.NAME_FIELD],
ch_dict[self.COMP_FIELD],
ch_dict[self.TYPE_FIELD],
ch_dict[self.FMT_STR_FIELD],
ch_dict[self.DESC_FIELD],
ch_dict[self.LOW_R_FIELD],
ch_dict[self.LOW_O_FIELD],
ch_dict[self.LOW_Y_FIELD],
ch_dict[self.HIGH_Y_FIELD],
ch_dict[self.HIGH_O_FIELD],
ch_dict[self.HIGH_R_FIELD],
)
id_dict[ch_dict[self.ID_FIELD]] = ch_temp
name_dict[ch_dict[self.NAME_FIELD]] = ch_temp
return (id_dict, name_dict)
| 33.266667
| 100
| 0.625251
|
de4e2cbe2e0bf2f561464ccf027b2894a0d451bd
| 1,331
|
py
|
Python
|
lib_perspective.py
|
nguyenrobot/lane_detection_advanced_sliding_windows
|
c20076b42511294c9913c5baa041c90b1595b85b
|
[
"MIT"
] | 7
|
2020-11-17T17:36:26.000Z
|
2021-04-03T15:30:25.000Z
|
lib_perspective.py
|
v-thiennp12/lane_detection_advanced_sliding_windows
|
c20076b42511294c9913c5baa041c90b1595b85b
|
[
"MIT"
] | 1
|
2021-10-06T02:19:41.000Z
|
2022-03-16T08:03:01.000Z
|
lib_perspective.py
|
v-thiennp12/lane_detection_advanced_sliding_windows
|
c20076b42511294c9913c5baa041c90b1595b85b
|
[
"MIT"
] | 3
|
2020-11-17T17:36:28.000Z
|
2021-04-03T15:30:26.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 13:58:05 2020
@author:
Author : nguyenrobot
Copyright : nguyenrobot
https://github.com/nguyenrobot
https://www.nguyenrobot.com
"""
import cv2
import numpy
class birdseye:
def __init__(self, points):
self.source_points = points['source_points']
self.destination_points = points['destination_points']
self.birdeye_matrix = cv2.getPerspectiveTransform(self.source_points, self.destination_points)
self.inv_birdeye_matrix = cv2.getPerspectiveTransform(self.destination_points, self.source_points)
def apply_skyview(self, frame_camview):
"""apply birdseye view transform"""
shape = (frame_camview.shape[1], frame_camview.shape[0])
frame_skyview = cv2.warpPerspective(frame_camview, self.birdeye_matrix, (shape)) #, flags = cv2.INTER_LINEAR
self.frame_skyview = frame_skyview
return frame_skyview
def apply_vehicleview(self, frame_skyview):
"""apply reversed birdseye view transform to get back to camera view"""
shape = (frame_skyview.shape[1], frame_skyview.shape[0])
frame_camview = cv2.warpPerspective(frame_skyview, self.inv_birdeye_matrix, shape)
self.frame_camview = frame_camview
return frame_camview
| 39.147059
| 118
| 0.694966
|
81cff1c53fdc83c55b489f249b76c56287b682ec
| 6,741
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/desulfitobacteriumhafniense.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/desulfitobacteriumhafniense.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/desulfitobacteriumhafniense.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Desulfitobacterium hafniense.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 19:50:50.244418
The undirected graph Desulfitobacterium hafniense has 5004 nodes and 608632
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.04862 and has 14 connected components, where the component
with most nodes has 4974 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 217, the mean node degree is 243.26,
and the node degree mode is 1. The top 5 most central nodes are 138119.DSY0115
(degree 1662), 138119.DSY4888 (degree 1657), 138119.DSY1340 (degree 1636),
138119.DSY2291 (degree 1468) and 138119.DSY2685 (degree 1397).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import DesulfitobacteriumHafniense
# Then load the graph
graph = DesulfitobacteriumHafniense()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def DesulfitobacteriumHafniense(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Desulfitobacterium hafniense graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Desulfitobacterium hafniense graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 19:50:50.244418
The undirected graph Desulfitobacterium hafniense has 5004 nodes and 608632
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.04862 and has 14 connected components, where the component
with most nodes has 4974 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 217, the mean node degree is 243.26,
and the node degree mode is 1. The top 5 most central nodes are 138119.DSY0115
(degree 1662), 138119.DSY4888 (degree 1657), 138119.DSY1340 (degree 1636),
138119.DSY2291 (degree 1468) and 138119.DSY2685 (degree 1397).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import DesulfitobacteriumHafniense
# Then load the graph
graph = DesulfitobacteriumHafniense()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="DesulfitobacteriumHafniense",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.666667
| 223
| 0.706127
|
3d9cb5615c2106e647c4a225efa19c2b0156f924
| 9,475
|
py
|
Python
|
source/tests/py_tests/constexpr_functions_errors_test.py
|
Panzerschrek/U-00DC-Sprache
|
eb677a66d178985433a62eb6b8a50ce2cdb14b1a
|
[
"BSD-3-Clause"
] | 45
|
2016-06-21T22:28:43.000Z
|
2022-03-26T12:21:46.000Z
|
source/tests/py_tests/constexpr_functions_errors_test.py
|
Panzerschrek/U-00DC-Sprache
|
eb677a66d178985433a62eb6b8a50ce2cdb14b1a
|
[
"BSD-3-Clause"
] | 6
|
2020-07-12T18:00:10.000Z
|
2021-11-30T11:20:14.000Z
|
source/tests/py_tests/constexpr_functions_errors_test.py
|
Panzerschrek/U-00DC-Sprache
|
eb677a66d178985433a62eb6b8a50ce2cdb14b1a
|
[
"BSD-3-Clause"
] | 5
|
2019-09-03T17:20:34.000Z
|
2022-01-30T15:10:21.000Z
|
from py_tests_common import *
def ConstexprHalt_Test0():
c_program_text= """
fn constexpr Foo( i32 x ) : i32
{
halt if( (x&1) == 0 );
return x | 0xFF;
}
fn Baz(){ Foo( 84 ); }
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionEvaluationError" )
assert( errors_list[0].src_loc.line == 8 )
def ConstexprFunctionEvaluationError_Test1():
c_program_text= """
type fn_ptr= fn();
struct S{ fn_ptr ptr; }
fn constexpr Foo( S s ) : i32
{
return 0;
}
fn Bar(){}
var S constexpr s{ .ptr(Bar) };
fn Baz(){ Foo( s ); } // Passing function pointer to constexpr function.
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionEvaluationError" )
assert( errors_list[0].src_loc.line == 11 )
def ConstexprFunctionEvaluationError_Test3():
c_program_text= """
fn constexpr Count( u32 x ) : u32
{
if( x == 0u ) { return 0u; }
return 1u + Count( x - 1u );
}
fn Foo()
{
Count(16u * 65536u); // Recursive call depth here is too big.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionEvaluationError" )
assert( errors_list[0].src_loc.line == 9 )
def ConstexprFunctionEvaluationError_Test4():
c_program_text= """
fn constexpr Bar( u32 x ) : u32
{
var [ u8, 1024u * 1024u * 80u ] imut bytes= zero_init; // Allocating too big chunk of memory on stack.
return x;
}
fn Foo(){ Bar(0u); }
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionEvaluationError" )
assert( errors_list[0].src_loc.line == 7 )
def ConstexprFunctionsMustHaveBody_Test0():
c_program_text= """
fn constexpr Foo();
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionsMustHaveBody" )
assert( errors_list[0].src_loc.line == 2 )
def ConstexprFunctionsMustHaveBody_Test1():
c_program_text= """
struct S
{
fn constexpr Foo();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionsMustHaveBody" )
assert( errors_list[0].src_loc.line == 4 )
def ConstexprFunctionCanNotBeVirtual_Test0():
c_program_text= """
class S polymorph
{
fn virtual constexpr Foo(){}
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( HaveError( errors_list, "ConstexprFunctionCanNotBeVirtual", 4 ) )
def ConstexprFunctionCanNotBeVirtual_Test1():
c_program_text= """
class S polymorph
{
op virtual constexpr []( this, u32 x ){}
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( HaveError( errors_list, "ConstexprFunctionCanNotBeVirtual", 4 ) )
def InvalidTypeForConstexprFunction_Test1():
c_program_text= """
struct S{ i32& mut r; } // Struct is not constexpr.
// Argument type is not constexpr.
fn constexpr Foo( S& x ){}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "InvalidTypeForConstexprFunction" )
assert( errors_list[0].src_loc.line == 4 )
def InvalidTypeForConstexprFunction_Test2():
c_program_text= """
// Unsafe function can not be constexpr.
fn constexpr Foo() unsafe {}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "InvalidTypeForConstexprFunction" )
assert( errors_list[0].src_loc.line == 3 )
def InvalidTypeForConstexprFunction_Test3():
c_program_text= """
// Function with function pointer in signature can not be constexpr.
type fn_ptr= (fn());
fn constexpr Foo( fn_ptr ptr ) {}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "InvalidTypeForConstexprFunction" )
assert( errors_list[0].src_loc.line == 4 )
def InvalidTypeForConstexprFunction_Test5():
c_program_text= """
struct S
{
fn destructor(){}
} // Struct is not constexpr, because it have explicit destructor.
// Function, returnig non-constexpr result, can not be constexpr.
fn constexpr Foo() : S { return S(); }
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "InvalidTypeForConstexprFunction" )
assert( errors_list[0].src_loc.line == 7 )
def ConstexprFunctionContainsUnallowedOperations_Test0():
c_program_text= """
// Unsafe blocks not allowed in constexpr functions.
fn constexpr Foo() { unsafe{} }
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionContainsUnallowedOperations" )
assert( errors_list[0].src_loc.line == 3 )
def ConstexprFunctionContainsUnallowedOperations_Test1():
c_program_text= """
fn NonConstexprFunction(){}
// Calling non-constexpr functions in constexpr function not allowed.
fn constexpr Foo() { NonConstexprFunction(); }
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionContainsUnallowedOperations" )
assert( errors_list[0].src_loc.line == 4 )
def ConstexprFunctionContainsUnallowedOperations_Test2():
c_program_text= """
fn constexpr Foo()
{
var (fn()) ptr= Foo;
ptr(); // Calling function pointers not allowed in constexpr functions.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionContainsUnallowedOperations" )
assert( errors_list[0].src_loc.line == 2 )
def ConstexprFunctionContainsUnallowedOperations_Test3():
c_program_text= """
struct S{ fn destructor(){} }
fn constexpr Foo()
{
var S s{}; // Decalring variable with non-constexpr type not allowed on constexpr functions.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionContainsUnallowedOperations" )
assert( errors_list[0].src_loc.line == 3 )
def ConstexprFunctionContainsUnallowedOperations_Test4():
c_program_text= """
struct S{ fn destructor(){} }
fn constexpr Foo()
{
auto s= S(); // Decalring auto variable with non-constexpr type not allowed on constexpr functions.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionContainsUnallowedOperations" )
assert( errors_list[0].src_loc.line == 3 )
def ConstexprFunctionContainsUnallowedOperations_Test5():
c_program_text= """
struct S
{
op++( mut this ){}
}
fn constexpr Foo()
{
var S mut s{};
++s; // Calling non-constexpr overloaded operator in constexpr functions not allowed.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionContainsUnallowedOperations" )
assert( errors_list[0].src_loc.line == 6 )
def ConstexprFunctionContainsUnallowedOperations_Test6():
c_program_text= """
struct S
{
op+=( mut this, S& other ){}
}
fn constexpr Foo()
{
var S mut a{}, mut b{};
a+= b; // Calling non-constexpr overloaded operator in constexpr functions not allowed.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionContainsUnallowedOperations" )
assert( errors_list[0].src_loc.line == 6 )
def ConstexprFunctionContainsUnallowedOperations_Test7():
c_program_text= """
struct S
{
op()( this ){}
}
fn constexpr Foo()
{
var S s{};
s(); // Calling non-constexpr overloaded postfix operator in constexpr functions not allowed.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionContainsUnallowedOperations" )
assert( errors_list[0].src_loc.line == 6 )
def ConstexprFunctionContainsUnallowedOperations_Test8():
c_program_text= """
struct S
{
op~( S a ) : S { return S(); }
}
fn constexpr Foo()
{
var S s{};
~s; // Calling non-constexpr overloaded unary prefix operator in constexpr functions not allowed.
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "ConstexprFunctionContainsUnallowedOperations" )
assert( errors_list[0].src_loc.line == 6 )
| 31.374172
| 107
| 0.726332
|
894fc9d0623fe21647f9686d18093bd98a7f387d
| 650
|
py
|
Python
|
src/B2/__main__.py
|
Sidesplitter/Informatica-Olympiade-2016-2017
|
83704415c6c2febdcbb71116f86619950c8c1e6c
|
[
"MIT"
] | null | null | null |
src/B2/__main__.py
|
Sidesplitter/Informatica-Olympiade-2016-2017
|
83704415c6c2febdcbb71116f86619950c8c1e6c
|
[
"MIT"
] | null | null | null |
src/B2/__main__.py
|
Sidesplitter/Informatica-Olympiade-2016-2017
|
83704415c6c2febdcbb71116f86619950c8c1e6c
|
[
"MIT"
] | null | null | null |
from B2.pathfinder import PathFinder
from B2.world import World
def main():
world = World('world.yml')
path_finder = PathFinder((0, 0), (world.height - 1, world.width - 1), world)
path = path_finder.get_path()
total = sum(map(lambda coordinates: world.get_value(coordinates), path))
readable_coordinates = list(map(lambda coordinates: (coordinates[1], coordinates[0]), path))
print("Path that was taken (X, Y): {}".format(readable_coordinates))
print("Total amount of streets passed: {}".format(int((len(path) - 1) / 2)))
print("Maximum amount of houses: {}".format(total))
if __name__ == '__main__':
main()
| 32.5
| 96
| 0.675385
|
9bae6be35815f160677cfff480da8fcc83bde5b4
| 6,827
|
py
|
Python
|
nlp/nlp_utils.py
|
vghost2008/wml
|
d0c5a1da6c228e321ae59a563e9ac84aa66266ff
|
[
"MIT"
] | 6
|
2019-12-10T17:18:56.000Z
|
2022-03-01T01:00:35.000Z
|
nlp/nlp_utils.py
|
vghost2008/wml
|
d0c5a1da6c228e321ae59a563e9ac84aa66266ff
|
[
"MIT"
] | 2
|
2021-08-25T16:16:01.000Z
|
2022-02-10T05:21:19.000Z
|
nlp/nlp_utils.py
|
vghost2008/wml
|
d0c5a1da6c228e321ae59a563e9ac84aa66266ff
|
[
"MIT"
] | 2
|
2019-12-07T09:57:35.000Z
|
2021-09-06T04:58:10.000Z
|
#coding=utf-8
import numpy as np
import codecs
import os
import sys
import time
from functools import reduce
'''
load dict data which generated by trans_fastText
'''
def load_fastText_dict(dict_path):
dict = np.load(dict_path)
return dict
'''
load word embedding data which generated by trans_fastText
'''
def load_fastText_word_embeadding(path,index=None):
we = np.load(path)
if index is not None:
d = we[index]
we = np.concatenate((we,d,d,d,np.zeros(shape=[1,300])))
return we
'''
load the dict file and word embedding data file, which generated by trans_fastText
'''
def load_fastTextByFile(dict_path,word_embeadding_path):
dict,index = load_fastText_dict(dict_path)
we = load_fastText_word_embeadding(word_embeadding_path,index)
assert np.shape(dict)[0]==np.shape(we)[0]
return dict,we
'''
load the dict file and word embedding data file, which generated by trans_fastText
'''
def load_fastTextByDir(dir_path):
return load_fastTextByFile(os.path.join(dir_path,"dict.bin.npy"),os.path.join(dir_path,"wordembeadding.bin.npy"))
'''
Trans fast text word embedding data to two binary file: word embedding data and dict data
'''
def trans_fastText(file_path,save_dir="./"):
file = codecs.open(file_path, "r", "utf-8")
dict = []
file.readline()
we = []
nr = 332647
tmp = range(300)
count = 0
begin_t = time.time()
while True:
line = file.readline()
if not line:
break
data = line.split(u" ")
if len(data) == 300:
data = [' '] + data
elif len(data) < 300:
continue
dict.append(data[0])
if count == 73144:
print("A")
for i in range(300):
tmp[i] = (float(data[i + 1]))
we.append(np.array([tmp], dtype=np.float32))
if count % 100 == 0:
sys.stdout.write('\r>> Converting image %d/%d' % (len(dict), nr))
sys.stdout.flush()
count = count + 1
print("\n")
print("total time=%f" % (time.time() - begin_t))
# index = dict.index(u"甲肝")
# index = dict.index(u"乙肝")
# index = dict.index(u"丙炎")
# index = dict.index(u"")
we = np.concatenate(we)
# we = np.concatenate([we,[we[0]],[we[0]],[we[0]]])
np_dict = np.array(dict)
np.save(os.path.join(save_dir,"wordembeadding.bin"), we)
np.save(os.path.join(save_dir,"dict.bin"), np_dict)
'''
将文本进行分词并返回在词典中的索引
'''
def tokenize(text,thul,dict):
text = text.encode("utf-8")
thul_token = thul.cut(text)
res = []
token=[]
for t in thul_token:
word = t[0]
u_word = word.decode("utf-8")
index = np.where(dict == u_word)
shape = np.shape(index[0])
if shape[0] == 0:
words = tokenize_word(u_word,dict)
token.extend(words)
res.extend(indexs_of_words(words,dict))
else:
res.append(index[0][0])
token.append(u_word)
return res,token
def tokenize_word(word,dict):
if len(word)<=1:
return [word]
if len(word)==2:
return [word[0],word[1]]
begin_word = word[:2]
index = np.where(dict==begin_word)
if np.shape(index[0])[0] ==0:
return [begin_word[0],begin_word[1]]+tokenize_word(word[2:],dict)
else:
return [begin_word]+tokenize_word(word[2:],dict)
def indexs_of_words(words,dict):
res = []
for word in words:
index = np.where(dict == word)
shape = np.shape(index[0])
if shape[0] == 0:
res.append(0)
else:
res.append(index[0][0])
return res
'''
词/字典表
'''
class VocabTable(object):
def __init__(self,vocab,default_word=None,default_index=None):
'''
vocab: a list of word
'''
self.size = len(vocab)
x = range(len(vocab))
self.vocab_to_id = dict(zip(vocab,x))
self.vocab = vocab
if default_word is not None:
self.default_word = default_word
self.default_index = self.vocab_to_id[default_word]
elif default_index is not None:
self.default_index = default_index
self.default_word = self.vocab[default_index]
else:
self.default_word = "UNK"
self.default_index = 0
def get_id(self,word):
'''
get the index(id) of a word
'''
return self.vocab_to_id.get(word,self.default_index)
def get_word(self,id):
'''
:param id: index(id) of a word
:return: word string
'''
if id<0 or id>=len(self.vocab):
return self.default_word
return self.vocab[id]
def get_id_of_string(self,string):
'''
:param string: a word string splited by ' '
:return: id list of words
'''
words = string.strip().split(' ')
res = []
for w in words:
res.append(self.get_id(w))
return res
def get_string_of_ids(self,ids,spliter=" "):
'''
:param ids: a list of ids
:return: a word string splited by ' '
'''
words = ""
for id in ids:
words+= spliter+self.get_word(id)
return words
def get_vocab(self):
return self.vocab
def vocab_size(self):
return self.size
def load_glove_data(dir_path):
embedding_path = os.path.join(dir_path,"glove_embd.bin"+".npy")
vocab_path = os.path.join(dir_path,"glove_vocab.bin")
file = open(vocab_path,'r')
vocab = []
for s in file.readlines():
vocab.append(s.strip())
file.close()
embedding = np.load(embedding_path)
return embedding,VocabTable(vocab)
def load_default_dict(filepath):
file = open(filepath,'r')
vocab = []
for s in file.readlines():
vocab.append(s.strip())
file.close()
return vocab
def load_default_embedding_data(dir_path,embedding_name="word_embedding.bin"+".npy",vocab_name="vocab.bin"):
embedding_path = os.path.join(dir_path,embedding_name)
vocab_path = os.path.join(dir_path,vocab_name)
vocab = load_default_dict(vocab_path)
embedding = np.load(embedding_path)
return embedding,VocabTable(vocab)
def merge_words(words:list,user_voc,max_merge_nr=4):
res_words = []
def words_to_word(v:list):
return reduce(lambda lhv,rhv: lhv+rhv,v)
i = 0
while i<len(words):
is_in = False
for j in range(max_merge_nr,1,-1):
if i+j>len(words):
continue
word = words_to_word(words[i:i+j])
if word in user_voc:
res_words.append(word)
i += j
is_in = True
break
if not is_in:
res_words.append(words[i])
i += 1
return res_words
| 27.865306
| 117
| 0.587081
|
5768a22515f654bcf16433a173633dddd0610f13
| 2,602
|
py
|
Python
|
ymir/schema/validators.py
|
mattvonrocketstein/ymir
|
a16117ec64c60ec52b0daa3b19eb711ec42b5070
|
[
"MIT"
] | 3
|
2015-10-14T04:07:28.000Z
|
2017-09-09T11:12:51.000Z
|
ymir/schema/validators.py
|
mattvonrocketstein/ymir
|
a16117ec64c60ec52b0daa3b19eb711ec42b5070
|
[
"MIT"
] | 36
|
2015-05-07T11:46:32.000Z
|
2021-09-23T23:20:56.000Z
|
ymir/schema/validators.py
|
mattvonrocketstein/ymir
|
a16117ec64c60ec52b0daa3b19eb711ec42b5070
|
[
"MIT"
] | 3
|
2016-01-05T17:04:07.000Z
|
2016-03-14T09:08:22.000Z
|
# -*- coding: utf-8 -*-
""" ymir.schema.validators
"""
import os
from voluptuous import Invalid
def nested_vagrant_validator(dct, ):
""" """
if not isinstance(dct, dict):
err = ("expected hash for key @ `vagrant`")
raise Invalid(err)
for key in 'name boot_timeout box box_check_update sync_disabled ram cpus'.split():
if key not in dct:
err = 'key at `vagrant` would contain sub-key "{0}"'
raise Invalid(err.format(key))
def filepath_validator(string, key='unknown'):
""" """
if not isinstance(string, basestring):
raise Invalid("expected string for key @ `{0}`".format(
key))
string = string.strip()
if string.startswith("~"):
string = os.path.expanduser(string)
if not os.path.isabs(string):
string = os.path.abspath(string)
if not os.path.exists(string):
err = "filepath '{0}' at `{1}` does not exist"
raise Invalid(err.format(string, key))
if not os.path.isfile(string):
err = "filepath '{0}' at `{1}` exists, but is not a file"
raise Invalid(err.format(string, key))
_validate_extends_field = lambda val: filepath_validator(val, key="extends")
def list_of_dicts(lst, key=None):
""" """
if not isinstance(lst, list):
err = ("expected list of strings for key @ `{0}`")
err = err.format(key or 'unknown')
raise Invalid(err)
for i, x in enumerate(lst):
if not isinstance(x, dict):
err = ('expected JSON but top[{0}][{1}] is {2}')
err = err.format(key, i, type(x))
raise Invalid(err)
def list_of_strings(lst, key=None):
if not isinstance(lst, list):
err = ("expected list of strings for key @ `{0}`, got {1}")
err = err.format(key or 'unknown', str(list))
raise Invalid(err)
for i, x in enumerate(lst):
if not isinstance(x, basestring):
print lst
err = (
'expected string for key@`{0}`, but index {1} is "{3}" of type {2}')
err = err.format(
key, i, type(x).__name__, x)
raise Invalid(err)
string_or_int = lambda x: isinstance(x, (unicode, int))
_validate_sl_field = lambda lst: list_of_strings(lst, key='setup_list')
_validate_sg_field = lambda lst: list_of_strings(lst, key='security_groups')
_validate_pl_field = lambda lst: list_of_strings(lst, key='provision_list')
def _validate_puppet_parser(x):
""" """
if x != 'future':
err = "puppet_parser has only one acceptable value: 'future'"
raise Invalid(err)
| 33.792208
| 87
| 0.600307
|
c9212adc3550b1e240197cf393685d408a9d0224
| 152
|
py
|
Python
|
init5/src/articles/apps.py
|
iYoQ/init5
|
be67fc501376f17f2f6a258c5f8c2002defbc415
|
[
"Apache-2.0"
] | null | null | null |
init5/src/articles/apps.py
|
iYoQ/init5
|
be67fc501376f17f2f6a258c5f8c2002defbc415
|
[
"Apache-2.0"
] | null | null | null |
init5/src/articles/apps.py
|
iYoQ/init5
|
be67fc501376f17f2f6a258c5f8c2002defbc415
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class ArticlesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'src.articles'
| 21.714286
| 56
| 0.763158
|
d37a14e6962e168bd5d9c25097209ace0dc1d696
| 1,300
|
py
|
Python
|
ESN/EchoTorch-master/echotorch/models/HNilsNet.py
|
RogerFu18/drunken-monkey
|
2f8498a83105481d0d189b20407f6e3f658b1053
|
[
"MIT"
] | null | null | null |
ESN/EchoTorch-master/echotorch/models/HNilsNet.py
|
RogerFu18/drunken-monkey
|
2f8498a83105481d0d189b20407f6e3f658b1053
|
[
"MIT"
] | null | null | null |
ESN/EchoTorch-master/echotorch/models/HNilsNet.py
|
RogerFu18/drunken-monkey
|
2f8498a83105481d0d189b20407f6e3f658b1053
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# File : echotorch/models/NilsNet.py
# Description : A Hierarchical NilsNet module.
# Date : 09th of April, 2018
#
# This file is part of EchoTorch. EchoTorch is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Nils Schaetti, University of Neuchâtel <nils.schaetti@unine.ch>
# Imports
import torchvision
import torch.nn as nn
# A Hierarchical NilsNet
class HNilsNet(nn.Module):
"""
A Hierarchical NilsNet
"""
# Constructor
def __init__(self):
"""
Constructor
"""
pass
# end __init__
# Forward
def forward(self):
"""
Forward
:return:
"""
pass
# end forward
# end HNilsNet
| 25.490196
| 79
| 0.682308
|
e8ecce912f9cdde55c7d7b1c35ac1d809ba8ee2d
| 16,681
|
py
|
Python
|
catalogue/application.py
|
Aqueum/UFS-ItemCatalogue
|
75f1f0413343b8c23f8088378f4f13342456dbdf
|
[
"BSD-Source-Code"
] | null | null | null |
catalogue/application.py
|
Aqueum/UFS-ItemCatalogue
|
75f1f0413343b8c23f8088378f4f13342456dbdf
|
[
"BSD-Source-Code"
] | null | null | null |
catalogue/application.py
|
Aqueum/UFS-ItemCatalogue
|
75f1f0413343b8c23f8088378f4f13342456dbdf
|
[
"BSD-Source-Code"
] | null | null | null |
from flask import (Flask,
render_template,
request,
flash,
redirect,
url_for,
jsonify,
make_response)
from flask import session as login_session
from sqlalchemy import (create_engine,
asc)
from sqlalchemy.orm import sessionmaker
from catalogue_setup import (Base,
Category,
Item,
User)
import random
import string
from oauth2client.client import (flow_from_clientsecrets,
FlowExchangeError)
import json
import httplib2
import requests
# initialise flask application
app = Flask(__name__)
# read in authentication client secrets
CLIENT_ID = json.loads(
open('catalogue/client_secrets.json', 'r').read())['web']['client_id']
# connect to database & create database session
engine = create_engine('sqlite:///catalogue/catalogue.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# show list of categories
@app.route("/")
@app.route("/categories/")
def show_categories():
categories = session.query(Category).order_by(asc(Category.name))
loggedin = 'username' in login_session
return render_template('categories.html',
categories=categories,
loggedin=loggedin)
@app.route("/categories/JSON/")
def show_categories_json():
categories = session.query(Category).order_by(asc(Category.name))
return jsonify(categories=[c.serialise for c in categories])
# add a category
@app.route("/categories/new/", methods=['GET', 'POST'])
def add_category():
if 'username' in login_session:
if request.method == 'POST':
new_category = Category(name=request.form['name'],
description=request.form['description'],
user_id=login_session['user_id']
)
session.add(new_category)
flash('New category %s added successfully' % new_category.name)
session.commit()
return redirect(url_for('show_categories'))
else:
return render_template('newCategory.html')
else:
return redirect(url_for('login_page'))
# edit a category
@app.route("/categories/<int:category_id>/edit/", methods=['GET', 'POST'])
def edit_category(category_id):
if 'username' in login_session:
edited_category = session.query(Category).\
filter_by(id=category_id).one()
if edited_category.user_id != login_session['user_id']:
return "<script>function authorised() {alert('Only " \
"the author of a category may edit that category.');}" \
"</script><body onload='authorised()''>"
if request.method == 'POST':
if request.form['name'] == edited_category.name \
and request.form['description'] == \
edited_category.description:
return redirect(url_for('show_categories'))
else:
if request.form['name']:
edited_category.name = request.form['name']
if request.form['description']:
edited_category.description = request.form['description']
session.add(edited_category)
session.commit()
flash('Category %s edited' % edited_category.name)
return redirect(url_for('show_categories'))
else:
return render_template('editCategory.html',
category=edited_category)
else:
return redirect(url_for('login_page'))
# delete a category
@app.route("/categories/<int:category_id>/delete/", methods=['GET', 'POST'])
def delete_category(category_id):
if 'username' in login_session:
deleted_category = session.query(Category).\
filter_by(id=category_id).one()
if deleted_category.user_id != login_session['user_id']:
return "<script>function authorised() {alert('Only " \
"the author of a category may delete that category.');}" \
"</script><body onload='authorised()''>"
if request.method == 'POST':
session.delete(deleted_category)
session.commit()
return redirect(url_for('show_categories'))
else:
return render_template('deleteCategory.html',
category=deleted_category)
else:
return redirect(url_for('login_page'))
# show category page and its item list
@app.route("/categories/<int:category_id>")
def show_category(category_id):
category = session.query(Category).filter_by(id=category_id).one()
items = session.query(Item).filter_by(category_id=category_id)\
.order_by(asc(Item.name))
loggedin = 'username' in login_session
author = category.user_id == login_session.get('user_id')
return render_template('category.html',
category=category,
items=items,
loggedin=loggedin,
author=author)
@app.route("/categories/<int:category_id>/JSON/")
def show_category_json(category_id):
category = session.query(Category).filter_by(id=category_id).one()
items = session.query(Item).\
filter_by(category_id=category_id).\
order_by(asc(Item.name))
return jsonify(category=category.serialise,
items=[i.serialise for i in items])
# add an item
@app.route("/<int:category_id>/new_item/", methods=['GET', 'POST'])
def add_item(category_id):
if 'username' in login_session:
if request.method == 'POST':
new_item = Item(name=request.form['name'],
description=request.form['description'],
image=request.form['image'],
credit=request.form['credit'],
category_id=category_id,
user_id=login_session['user_id'])
session.add(new_item)
flash('New item %s added successfully' % new_item.name)
session.commit()
return redirect(url_for('show_category', category_id=category_id))
else:
return render_template('newItem.html')
else:
return redirect(url_for('login_page'))
# edit an item
@app.route("/categories/<int:category_id>/<int:item_id>/edit/",
methods=['GET', 'POST'])
def edit_item(category_id, item_id):
if 'username' in login_session:
category = session.query(Category).filter_by(id=category_id).one()
edited_item = session.query(Item).filter_by(id=item_id).one()
if edited_item.user_id is not login_session['user_id']:
return "<script>function authorised() {alert(" \
"'Only the author of an item may edit that item.');}" \
"</script><body onload='authorised()''>"
if request.method == 'POST':
if request.form['name'] == edited_item.name and request.form[
'description'] == edited_item.description and request.form[
'image'] == edited_item.image:
return redirect(url_for('show_category',
category_id=category_id))
else:
if request.form['name']:
edited_item.name = request.form['name']
if request.form['description']:
edited_item.description = request.form['description']
if request.form['image']:
edited_item.image = request.form['image']
session.add(edited_item)
session.commit()
flash('Item %s edited' % edited_item.name)
return redirect(url_for('show_category',
category_id=category_id))
else:
return render_template('editItem.html',
category=category,
item=edited_item)
else:
return redirect(url_for('login_page'))
# delete an item
@app.route("/categories/<int:category_id>/<int:item_id>/delete/",
methods=['GET', 'POST'])
def delete_item(category_id, item_id):
if 'username' in login_session:
category = session.query(Category).filter_by(id=category_id).one()
deleted_item = session.query(Item).filter_by(id=item_id).one()
if delete_item.user_id is not login_session['user_id']:
return "<script>function authorised() {alert('Only the author " \
"of an item may delete that item.');}" \
"</script><body onload='authorised()''>"
if request.method == 'POST':
session.delete(deleted_item)
return redirect(url_for('show_category',
category_id=category_id))
else:
return render_template('deleteItem.html',
category=category,
item=deleted_item)
else:
return redirect(url_for('login_page'))
# show item page
@app.route("/categories/<int:category_id>/<int:item_id>")
def show_item(category_id, item_id):
category = session.query(Category).filter_by(id=category_id).one()
item = session.query(Item).filter_by(id=item_id).one()
author = item.user_id == login_session.get('user_id')
return render_template('item.html',
category=category,
item=item,
author=author)
@app.route("/categories/<int:category_id>/<int:item_id>/JSON/")
def show_item_json(category_id, item_id):
category = session.query(Category).filter_by(id=category_id).one()
item = session.query(Item).filter_by(id=item_id).one()
return jsonify(category=category.name, item=item.serialise)
# launch login page after generating anti-forgery state token
@app.route("/login/")
def login_page():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(32))
login_session['state'] = state
return render_template('login.html', STATE=state, CLIENT_ID=CLIENT_ID)
# Google authentication
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('catalogue/client_secrets.json',
scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is '
'already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# ADD PROVIDER TO LOGIN SESSION
login_session['provider'] = 'google'
# see if user exists, if it doesn't make a new one
user_id = get_user_id(data["email"])
if not user_id:
user_id = create_user(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;'
output += '-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
# User Helper Functions
def create_user(auth_session):
new_user = User(name=auth_session['username'], email=auth_session[
'email'], picture=auth_session['picture'])
session.add(new_user)
session.commit()
user = session.query(User).filter_by(email=auth_session['email']).one()
return user.id
def get_user_info(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def get_user_id(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except Exception as e:
print e
return None
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/gdisconnect')
def gdisconnect():
# Only disconnect a connected user.
credentials = login_session.get('credentials')
if credentials is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] != '200':
# For whatever reason, the given token was invalid.
response = make_response(
json.dumps('Failed to revoke token for given user.'), 400)
response.headers['Content-Type'] = 'application/json'
return response
# Disconnect based on provider
@app.route('/disconnect')
def disconnect():
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
del login_session['gplus_id']
del login_session['access_token']
# if login_session['provider'] == 'facebook':
# fbdisconnect()
# del login_session['facebook_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash("You have successfully been logged out.")
return redirect(url_for('show_categories'))
# run flask development server
if __name__ == '__main__':
app.secret_key = 'aHr^8jH29Ne%k)puVr34Gj&wsh'
app.debug = True
app.run(host='0.0.0.0', port=8000)
| 38.88345
| 79
| 0.607218
|
c432a3a020fed576c35cc37a97b5f25ccb9fd1f2
| 13,977
|
py
|
Python
|
tfx/dsl/component/experimental/decorators.py
|
epona-science/tfx
|
2081eee1e94d97de399514e02079a233309d648d
|
[
"Apache-2.0"
] | null | null | null |
tfx/dsl/component/experimental/decorators.py
|
epona-science/tfx
|
2081eee1e94d97de399514e02079a233309d648d
|
[
"Apache-2.0"
] | null | null | null |
tfx/dsl/component/experimental/decorators.py
|
epona-science/tfx
|
2081eee1e94d97de399514e02079a233309d648d
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorators for defining components via Python functions.
Experimental: no backwards compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import types
from typing import Any, Callable, Dict, List, Text
# Standard Imports
import six
from tfx import types as tfx_types
from tfx.dsl.component.experimental import function_parser
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.types import channel_utils
from tfx.types import component_spec
class _SimpleComponent(base_component.BaseComponent):
"""Component whose constructor generates spec instance from arguments."""
def __init__(self, *unused_args, **kwargs):
if unused_args:
raise ValueError(('%s expects arguments to be passed as keyword '
'arguments') % (self.__class__.__name__,))
spec_kwargs = {}
unseen_args = set(kwargs.keys())
for key, channel_parameter in self.SPEC_CLASS.INPUTS.items():
if key not in kwargs and not channel_parameter.optional:
raise ValueError('%s expects input %r to be a Channel of type %s.' %
(self.__class__.__name__, key, channel_parameter.type))
if key in kwargs:
spec_kwargs[key] = kwargs[key]
unseen_args.remove(key)
for key, parameter in self.SPEC_CLASS.PARAMETERS.items():
if key not in kwargs and not parameter.optional:
raise ValueError('%s expects parameter %r of type %s.' %
(self.__class__.__name__, key, parameter.type))
if key in kwargs:
spec_kwargs[key] = kwargs[key]
unseen_args.remove(key)
instance_name = kwargs.get('instance_name', None)
unseen_args.discard('instance_name')
if unseen_args:
raise ValueError(
'Unknown arguments to %r: %s.' %
(self.__class__.__name__, ', '.join(sorted(unseen_args))))
for key, channel_parameter in self.SPEC_CLASS.OUTPUTS.items():
spec_kwargs[key] = channel_utils.as_channel([channel_parameter.type()])
spec = self.SPEC_CLASS(**spec_kwargs)
super(_SimpleComponent, self).__init__(spec, instance_name=instance_name)
class _FunctionExecutor(base_executor.BaseExecutor):
"""Base class for function-based executors."""
# Properties that should be overridden by subclass. Defaults are provided to
# allow pytype to properly type check these properties.
# Describes the format of each argument passed to the component function, as
# a dictionary from name to a `function_parser.ArgFormats` enum value.
_ARG_FORMATS = {}
# Map from names of optional arguments to their default argument values.
_ARG_DEFAULTS = {}
# User-defined component function. Should be wrapped in staticmethod() to
# avoid being interpreted as a bound method (i.e. one taking `self` as its
# first argument.
_FUNCTION = staticmethod(lambda: None)
# Set of output names that are primitive type values returned from the user
# function.
_RETURNED_VALUES = set()
def Do(self, input_dict: Dict[Text, List[tfx_types.Artifact]],
output_dict: Dict[Text, List[tfx_types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
function_args = {}
for name, arg_format in self._ARG_FORMATS.items():
if arg_format == function_parser.ArgFormats.INPUT_ARTIFACT:
input_list = input_dict.get(name, [])
if len(input_list) == 1:
function_args[name] = input_list[0]
elif not input_list and name in self._ARG_DEFAULTS:
# Do not pass the missing optional input.
pass
else:
raise ValueError((
'Expected input %r to %s to be a singleton ValueArtifact channel '
'(got %s instead).') % (name, self, input_list))
elif arg_format == function_parser.ArgFormats.OUTPUT_ARTIFACT:
output_list = output_dict.get(name, [])
if len(output_list) == 1:
function_args[name] = output_list[0]
else:
raise ValueError((
'Expected output %r to %s to be a singleton ValueArtifact channel '
'(got %s instead).') % (name, self, output_list))
elif arg_format == function_parser.ArgFormats.ARTIFACT_VALUE:
input_list = input_dict.get(name, [])
if len(input_list) == 1:
function_args[name] = input_list[0].value
elif not input_list and name in self._ARG_DEFAULTS:
# Do not pass the missing optional input.
pass
else:
raise ValueError((
'Expected input %r to %s to be a singleton ValueArtifact channel '
'(got %s instead).') % (name, self, input_list))
elif arg_format == function_parser.ArgFormats.PARAMETER:
if name in exec_properties:
function_args[name] = exec_properties[name]
elif name in self._ARG_DEFAULTS:
# Do not pass the missing optional input.
pass
else:
raise ValueError((
'Expected non-optional parameter %r of %s to be provided, but no '
'value was passed.') % (name, self))
else:
raise ValueError('Unknown argument format: %r' % (arg_format,))
# Call function and check returned values.
outputs = self._FUNCTION(**function_args)
outputs = outputs or {}
if not isinstance(outputs, dict):
raise ValueError(
('Expected component executor function %s to return a dict of '
'outputs (got %r instead).') % (self._FUNCTION, outputs))
# Assign returned ValueArtifact values.
for name in self._RETURNED_VALUES:
if name not in outputs:
raise ValueError(
'Did not receive expected output %r as return value from '
'component executor function %s.' % (name, self._FUNCTION))
try:
output_dict[name][0].value = outputs[name]
except TypeError:
raise TypeError(
('Return value %r for output %r is incompatible with output type '
'%r.') % (outputs[name], name, output_dict[name][0].__class__))
def component(func: types.FunctionType) -> Callable[..., Any]:
"""Decorator: creates a component from a typehint-annotated Python function.
This decorator creates a component based on typehint annotations specified for
the arguments and return value for a Python function. Specifically, function
arguments can be annotated with the following types and associated semantics:
* `Parameter[T]` where `T` is `int`, `float`, `str`, or `bytes`: indicates
that a primitive type execution parameter, whose value is known at pipeline
construction time, will be passed for this argument. These parameters will
be recorded in ML Metadata as part of the component's execution record. Can
be an optional argument.
* `int`, `float`, `str`, `bytes`: indicates that a primitive type value will
be passed for this argument. This value is tracked as an `Integer`, `Float`
`String` or `Bytes` artifact (see `tfx.types.standard_artifacts`) whose
value is read and passed into the given Python component function. Can be
an optional argument.
* `InputArtifact[ArtifactType]`: indicates that an input artifact object of
type `ArtifactType` (deriving from `tfx.types.Artifact`) will be passed for
this argument. This artifact is intended to be consumed as an input by this
component (possibly reading from the path specified by its `.uri`). Can be
an optional argument by specifying a default value of `None`.
* `OutputArtifact[ArtifactType]`: indicates that an output artifact object of
type `ArtifactType` (deriving from `tfx.types.Artifact`) will be passed for
this argument. This artifact is intended to be emitted as an output by this
component (and written to the path specified by its `.uri`). Cannot be an
optional argument.
The return value typehint should be either empty or `None`, in the case of a
component function that has no return values, or an instance of
`OutputDict(key_1=type_1, ...)`, where each key maps to a given type (each
type is a primitive value type, i.e. `int`, `float`, `str` or `bytes`), to
indicate that the return value is a dictionary with specified keys and value
types.
Note that output artifacts should not be included in the return value
typehint; they should be included as `OutputArtifact` annotations in the
function inputs, as described above.
The function to which this decorator is applied must be at the top level of
its Python module (it may not be defined within nested classes or function
closures).
This is example usage of component definition using this decorator:
from tfx.dsl.components.base.annotations import OutputDict
from tfx.dsl.components.base.annotations import
InputArtifact
from tfx.dsl.components.base.annotations import
OutputArtifact
from tfx.dsl.components.base.annotations import
Parameter
from tfx.dsl.components.base.decorators import component
from tfx.types.standard_artifacts import Examples
from tfx.types.standard_artifacts import Model
@component
def MyTrainerComponent(
training_data: InputArtifact[Examples],
model: OutputArtifact[Model],
dropout_hyperparameter: float,
num_iterations: Parameter[int] = 10
) -> OutputDict(loss=float, accuracy=float):
'''My simple trainer component.'''
records = read_examples(training_data.uri)
model_obj = train_model(records, num_iterations, dropout_hyperparameter)
model_obj.write_to(model.uri)
return {
'loss': model_obj.loss,
'accuracy': model_obj.accuracy
}
# Example usage in a pipeline graph definition:
# ...
trainer = MyTrainerComponent(
examples=example_gen.outputs['examples'],
dropout_hyperparameter=other_component.outputs['dropout'],
num_iterations=1000)
pusher = Pusher(model=trainer.outputs['model'])
# ...
Experimental: no backwards compatibility guarantees.
Args:
func: Typehint-annotated component executor function.
Returns:
`base_component.BaseComponent` subclass for the given component executor
function.
Raises:
EnvironmentError: if the current Python interpreter is not Python 3.
"""
if six.PY2:
raise EnvironmentError('`@component` is only supported in Python 3.')
# Defining a component within a nested class or function closure causes
# problems because in this case, the generated component classes can't be
# referenced via their qualified module path.
#
# See https://www.python.org/dev/peps/pep-3155/ for details about the special
# '<locals>' namespace marker.
if '<locals>' in func.__qualname__.split('.'):
raise ValueError(
'The @component decorator can only be applied to a function defined '
'at the module level. It cannot be used to construct a component for a '
'function defined in a nested class or function closure.')
inputs, outputs, parameters, arg_formats, arg_defaults, returned_values = (
function_parser.parse_typehint_component_function(func))
spec_inputs = {}
spec_outputs = {}
spec_parameters = {}
for key, artifact_type in inputs.items():
spec_inputs[key] = component_spec.ChannelParameter(
type=artifact_type, optional=(key in arg_defaults))
for key, artifact_type in outputs.items():
assert key not in arg_defaults, 'Optional outputs are not supported.'
spec_outputs[key] = component_spec.ChannelParameter(type=artifact_type)
for key, primitive_type in parameters.items():
spec_parameters[key] = component_spec.ExecutionParameter(
type=primitive_type, optional=(key in arg_defaults))
component_spec_class = type(
'%s_Spec' % func.__name__, (tfx_types.ComponentSpec,), {
'INPUTS': spec_inputs,
'OUTPUTS': spec_outputs,
'PARAMETERS': spec_parameters,
})
executor_class = type(
'%s_Executor' % func.__name__,
(_FunctionExecutor,),
{
'_ARG_FORMATS': arg_formats,
'_ARG_DEFAULTS': arg_defaults,
# The function needs to be marked with `staticmethod` so that later
# references of `self._FUNCTION` do not result in a bound method (i.e.
# one with `self` as its first parameter).
'_FUNCTION': staticmethod(func),
'_RETURNED_VALUES': returned_values,
'__module__': func.__module__,
})
# Expose the generated executor class in the same module as the decorated
# function. This is needed so that the executor class can be accessed at the
# proper module path. One place this is needed is in the Dill pickler used by
# Apache Beam serialization.
module = sys.modules[func.__module__]
setattr(module, '%s_Executor' % func.__name__, executor_class)
executor_spec_instance = executor_spec.ExecutorClassSpec(
executor_class=executor_class)
return type(
func.__name__, (_SimpleComponent,), {
'SPEC_CLASS': component_spec_class,
'EXECUTOR_SPEC': executor_spec_instance,
'__module__': func.__module__,
})
| 42.874233
| 81
| 0.694856
|
c397c8d6cbfdcce3ea9ddb40215c3f5c4e3a8afe
| 11,277
|
py
|
Python
|
autotest/ogr/ogr_vfk.py
|
joa-quim/gdal
|
4864590da00e0ff439159e378bdfeb25b4be48d4
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_vfk.py
|
joa-quim/gdal
|
4864590da00e0ff439159e378bdfeb25b4be48d4
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_vfk.py
|
joa-quim/gdal
|
4864590da00e0ff439159e378bdfeb25b4be48d4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test OGR VFK driver functionality.
# Author: Martin Landa <landa.martin gmail.com>
#
###############################################################################
# Copyright (c) 2009-2018 Martin Landa <landa.martin gmail.com>
# Copyright (c) 2010-2012, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append('../pymod')
import gdaltest
from osgeo import gdal
from osgeo import ogr
###############################################################################
# Open file, check number of layers, get first layer,
# check number of fields and features
def ogr_vfk_1():
gdaltest.vfk_drv = ogr.GetDriverByName('VFK')
if gdaltest.vfk_drv is None:
return 'skip'
try:
os.remove('data/bylany.vfk.db')
except OSError:
pass
gdaltest.vfk_ds = ogr.Open('data/bylany.vfk')
if gdaltest.vfk_ds is None:
return 'fail'
if gdaltest.vfk_ds.GetLayerCount() != 61:
gdaltest.post_reason('expected exactly 61 layers!')
return 'fail'
gdaltest.vfk_layer_par = gdaltest.vfk_ds.GetLayer(0)
if gdaltest.vfk_layer_par is None:
gdaltest.post_reason('cannot get first layer')
return 'fail'
if gdaltest.vfk_layer_par.GetName() != 'PAR':
gdaltest.post_reason('did not get expected layer name "PAR"')
return 'fail'
defn = gdaltest.vfk_layer_par.GetLayerDefn()
if defn.GetFieldCount() != 28:
gdaltest.post_reason('did not get expected number of fields, got %d' % defn.GetFieldCount())
return 'fail'
fc = gdaltest.vfk_layer_par.GetFeatureCount()
if fc != 1:
gdaltest.post_reason('did not get expected feature count, got %d' % fc)
return 'fail'
return 'success'
###############################################################################
# Read the first feature from layer 'PAR', check envelope
def ogr_vfk_2():
if gdaltest.vfk_drv is None:
return 'skip'
gdaltest.vfk_layer_par.ResetReading()
feat = gdaltest.vfk_layer_par.GetNextFeature()
if feat.GetFID() != 1:
gdaltest.post_reason('did not get expected fid for feature 1')
return 'fail'
geom = feat.GetGeometryRef()
if geom.GetGeometryType() != ogr.wkbPolygon:
gdaltest.post_reason('did not get expected geometry type.')
return 'fail'
envelope = geom.GetEnvelope()
area = (envelope[1] - envelope[0]) * (envelope[3] - envelope[2])
exp_area = 2010.5
if area < exp_area - 0.5 or area > exp_area + 0.5:
gdaltest.post_reason('envelope area not as expected, got %g.' % area)
return 'fail'
return 'success'
###############################################################################
# Read features from layer 'SOBR', test attribute query
def ogr_vfk_3():
if gdaltest.vfk_drv is None:
return 'skip'
gdaltest.vfk_layer_sobr = gdaltest.vfk_ds.GetLayer(43)
if gdaltest.vfk_layer_sobr.GetName() != 'SOBR':
gdaltest.post_reason('did not get expected layer name "SOBR"')
return 'fail'
gdaltest.vfk_layer_sobr.SetAttributeFilter("CISLO_BODU = '55'")
gdaltest.vfk_layer_sobr.ResetReading()
feat = gdaltest.vfk_layer_sobr.GetNextFeature()
count = 0
while feat:
feat = gdaltest.vfk_layer_sobr.GetNextFeature()
count += 1
if count != 1:
gdaltest.post_reason('did not get expected number of features, got %d' % count)
return 'fail'
return 'success'
###############################################################################
# Read features from layer 'SBP', test random access, check length
def ogr_vfk_4():
if gdaltest.vfk_drv is None:
return 'skip'
gdaltest.vfk_layer_sbp = gdaltest.vfk_ds.GetLayerByName('SBP')
if not gdaltest.vfk_layer_sbp:
gdaltest.post_reason('did not get expected layer name "SBP"')
return 'fail'
feat = gdaltest.vfk_layer_sbp.GetFeature(5)
length = int(feat.geometry().Length())
if length != 10:
gdaltest.post_reason('did not get expected length, got %d' % length)
return 'fail'
return 'success'
###############################################################################
# Read features from layer 'HP', check geometry type
def ogr_vfk_5():
if gdaltest.vfk_drv is None:
return 'skip'
gdaltest.vfk_layer_hp = gdaltest.vfk_ds.GetLayerByName('HP')
if not gdaltest.vfk_layer_hp != 'HP':
gdaltest.post_reason('did not get expected layer name "HP"')
return 'fail'
geom_type = gdaltest.vfk_layer_hp.GetGeomType()
if geom_type != ogr.wkbLineString:
gdaltest.post_reason('did not get expected geometry type, got %d' % geom_type)
return 'fail'
return 'success'
###############################################################################
# Re-Open file (test .db persistence)
def ogr_vfk_6():
if gdaltest.vfk_drv is None:
return 'skip'
gdaltest.vfk_layer_par = None
gdaltest.vfk_layer_sobr = None
gdaltest.vfk_ds = None
gdaltest.vfk_ds = ogr.Open('data/bylany.vfk')
if gdaltest.vfk_ds is None:
return 'fail'
if gdaltest.vfk_ds.GetLayerCount() != 61:
gdaltest.post_reason('expected exactly 61 layers!')
return 'fail'
gdaltest.vfk_layer_par = gdaltest.vfk_ds.GetLayer(0)
if gdaltest.vfk_layer_par is None:
gdaltest.post_reason('cannot get first layer')
return 'fail'
if gdaltest.vfk_layer_par.GetName() != 'PAR':
gdaltest.post_reason('did not get expected layer name "PAR"')
return 'fail'
defn = gdaltest.vfk_layer_par.GetLayerDefn()
if defn.GetFieldCount() != 28:
gdaltest.post_reason('did not get expected number of fields, got %d' % defn.GetFieldCount())
return 'fail'
fc = gdaltest.vfk_layer_par.GetFeatureCount()
if fc != 1:
gdaltest.post_reason('did not get expected feature count, got %d' % fc)
return 'fail'
return 'success'
###############################################################################
# Read PAR layer, check data types (Integer64 new in GDAL 2.2)
def ogr_vfk_7():
if gdaltest.vfk_drv is None:
return 'skip'
defn = gdaltest.vfk_layer_par.GetLayerDefn()
for idx, name, ctype in ((0, "ID", ogr.OFTInteger64),
(1, "STAV_DAT", ogr.OFTInteger),
(2, "DATUM_VZNIKU", ogr.OFTString),
(22, "CENA_NEMOVITOSTI", ogr.OFTReal)):
col = defn.GetFieldDefn(idx)
if col.GetName() != name or col.GetType() != ctype:
gdaltest.post_reason("PAR: '{}' column name/type mismatch".format(name))
return 'fail'
return 'success'
###############################################################################
# Open DB file as datasource (new in GDAL 2.2)
def ogr_vfk_8():
if gdaltest.vfk_drv is None:
return 'skip'
# open by SQLite driver first
vfk_ds_db = ogr.Open('data/bylany.db')
count1 = vfk_ds_db.GetLayerCount()
vfk_ds_db = None
# then open by VFK driver
os.environ['OGR_VFK_DB_READ'] = 'YES'
vfk_ds_db = ogr.Open('data/bylany.db')
count2 = vfk_ds_db.GetLayerCount()
vfk_ds_db = None
if count1 != count2:
gdaltest.post_reason('layer count differs when opening DB by SQLite and VFK drivers')
return 'fail'
del os.environ['OGR_VFK_DB_READ']
return 'success'
###############################################################################
# Open datasource with SUPPRESS_GEOMETRY open option (new in GDAL 2.3)
def ogr_vfk_9():
if gdaltest.vfk_drv is None:
return 'skip'
# open with suppressing geometry
vfk_ds = None
vfk_ds = gdal.OpenEx('data/bylany.vfk', open_options=['SUPPRESS_GEOMETRY=YES'])
vfk_layer_par = vfk_ds.GetLayerByName('PAR')
if not vfk_layer_par != 'PAR':
gdaltest.post_reason('did not get expected layer name "PAR"')
return 'fail'
geom_type = vfk_layer_par.GetGeomType()
vfk_layer_par = None
vfk_ds = None
if geom_type != ogr.wkbNone:
gdaltest.post_reason('did not get expected geometry type, got %d' % geom_type)
return 'fail'
return 'success'
###############################################################################
# Open datasource with FILE_FIELD open option (new in GDAL 2.4)
def ogr_vfk_10():
if gdaltest.vfk_drv is None:
return 'skip'
# open with suppressing geometry
vfk_ds = None
vfk_ds = gdal.OpenEx('data/bylany.vfk', open_options=['FILE_FIELD=YES'])
vfk_layer_par = vfk_ds.GetLayerByName('PAR')
if not vfk_layer_par != 'PAR':
gdaltest.post_reason('did not get expected layer name "PAR"')
return 'fail'
vfk_layer_par.ResetReading()
feat = vfk_layer_par.GetNextFeature()
file_field = feat.GetField('VFK_FILENAME')
vfk_layer_par = None
vfk_ds = None
if file_field != 'bylany.vfk':
gdaltest.post_reason('did not get expected file field value')
return 'fail'
return 'success'
###############################################################################
# cleanup
def ogr_vfk_cleanup():
if gdaltest.vfk_drv is None:
return 'skip'
gdaltest.vfk_layer_par = None
gdaltest.vfk_layer_hp = None
gdaltest.vfk_layer_sobr = None
gdaltest.vfk_ds = None
try:
os.remove('data/bylany.db')
except OSError:
pass
return 'success'
###############################################################################
#
gdaltest_list = [
ogr_vfk_1,
ogr_vfk_2,
ogr_vfk_3,
ogr_vfk_4,
ogr_vfk_5,
ogr_vfk_6,
ogr_vfk_7,
ogr_vfk_8,
ogr_vfk_9,
ogr_vfk_10,
ogr_vfk_cleanup]
if __name__ == '__main__':
gdaltest.setup_run('ogr_vfk')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
| 28.477273
| 100
| 0.593952
|
1caebd2893bd48e2199e840ba4e384679d162c3f
| 23,539
|
py
|
Python
|
intake/catalog/tests/test_local.py
|
raybellwaves/intake
|
8acc70d9adb19344ca15dee948315828b61e87b2
|
[
"BSD-2-Clause"
] | 3
|
2020-04-14T17:47:53.000Z
|
2021-07-09T05:35:12.000Z
|
intake/catalog/tests/test_local.py
|
raybellwaves/intake
|
8acc70d9adb19344ca15dee948315828b61e87b2
|
[
"BSD-2-Clause"
] | 2
|
2019-05-15T20:42:24.000Z
|
2019-07-25T00:35:24.000Z
|
intake/catalog/tests/test_local.py
|
raybellwaves/intake
|
8acc70d9adb19344ca15dee948315828b61e87b2
|
[
"BSD-2-Clause"
] | 1
|
2019-07-24T22:26:58.000Z
|
2019-07-24T22:26:58.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import datetime
import os.path
import shutil
import tempfile
import time
import pytest
import pandas
from .util import assert_items_equal
from intake import open_catalog
from intake.catalog import exceptions, local
from intake.catalog.local import get_dir, UserParameter, LocalCatalogEntry
from intake.utils import make_path_posix
def abspath(filename):
return make_path_posix(
os.path.join(os.path.dirname(__file__), filename))
def test_local_catalog(catalog1):
assert_items_equal(list(catalog1),
['use_example1', 'nested', 'entry1', 'entry1_part',
'remote_env', 'local_env', 'text', 'arr', 'datetime'])
assert len(catalog1) == 9
assert catalog1['entry1'].describe() == {
'name': 'entry1',
'container': 'dataframe',
'direct_access': 'forbid',
'user_parameters': [],
'description': 'entry1 full',
'args': {'urlpath': '{{ CATALOG_DIR }}/entry1_*.csv'},
'metadata': {'bar': [1, 2, 3], 'foo': 'bar'},
'plugin': ['csv'],
'driver': ['csv']
}
assert catalog1['entry1_part'].describe() == {
'name': 'entry1_part',
'container': 'dataframe',
'user_parameters': [
{
'name': 'part',
'description': 'part of filename',
'default': '1',
'type': 'str',
'allowed': ['1', '2'],
}
],
'description': 'entry1 part',
'direct_access': 'allow',
'args': {'urlpath': '{{ CATALOG_DIR }}/entry1_{{ part }}.csv'},
'metadata': {'foo': 'baz', 'bar': [2, 4, 6]},
'plugin': ['csv'],
'driver': ['csv']
}
assert catalog1['entry1'].container == 'dataframe'
md = catalog1['entry1'].metadata
md.pop('catalog_dir')
assert md['foo'] == 'bar'
assert md['bar'] == [1, 2, 3]
# Use default parameters
assert catalog1['entry1_part'].container == 'dataframe'
# Specify parameters
assert catalog1['entry1_part'].configure_new(part='2').container == 'dataframe'
def test_get_items(catalog1):
for key, entry in catalog1.items():
assert catalog1[key].describe() == entry.describe()
def test_nested(catalog1):
assert 'nested' in catalog1
assert 'entry1' in catalog1.nested.nested()
assert catalog1.entry1.read().equals(catalog1.nested.nested.entry1.read())
assert 'nested.nested' not in catalog1.walk(depth=1)
assert 'nested.nested' in catalog1.walk(depth=2)
assert catalog1.nested.cat == catalog1
assert catalog1.nested.nested.nested.cat.cat.cat is catalog1
def test_nested_gets_name_from_super(catalog1):
assert catalog1.name == 'name_in_cat'
assert 'nested' in catalog1
nested = catalog1.nested
assert nested.name == 'nested'
assert nested().name == 'nested'
def test_hash(catalog1):
assert catalog1.nested() == catalog1.nested.nested()
def test_getitem(catalog1):
assert list(catalog1) == list(catalog1['nested']())
assert list(catalog1) == list(catalog1['nested.nested']())
assert list(catalog1) == list(catalog1['nested', 'nested']())
def test_source_plugin_config(catalog1):
from intake import registry
assert 'example1' in registry
assert 'example2' in registry
def test_metadata(catalog1):
assert hasattr(catalog1, 'metadata')
assert catalog1.metadata['test'] is True
def test_use_source_plugin_from_config(catalog1):
catalog1['use_example1']
def test_get_dir():
assert get_dir('file:///path/catalog.yml') == 'file:///path'
assert get_dir('https://example.com/catalog.yml') == 'https://example.com'
path = 'example/catalog.yml'
out = get_dir(path)
assert os.path.isabs(out)
assert out.endswith('/example/')
path = '/example/catalog.yml'
out = get_dir(path)
# it's ok if the first two chars indicate drive for win (C:)
assert '/example/' in [out, out[2:]]
path = 'example'
out = get_dir(path)
assert os.path.isabs(out)
assert not out.endswith('/example')
assert out.endswith('/')
def test_entry_dir_function(catalog1):
assert 'nested' in dir(catalog1.nested)
@pytest.mark.parametrize("dtype,expected", [
("bool", False),
("datetime", pandas.Timestamp(1970, 1, 1, 0, 0, 0)),
("float", 0.0),
("int", 0),
("list", []),
("str", ""),
("unicode", u""),
])
def test_user_parameter_default_value(dtype, expected):
p = local.UserParameter('a', 'a desc', dtype)
assert p.validate(None) == expected
def test_user_parameter_repr():
p = local.UserParameter('a', 'a desc', 'str')
expected = "<UserParameter 'a'>"
assert repr(p) == str(p) == expected
@pytest.mark.parametrize("dtype,given,expected", [
("bool", "true", True),
("bool", 0, False),
("datetime", datetime.datetime(2018, 1, 1, 0, 34, 0), pandas.Timestamp(2018, 1, 1, 0, 34, 0)),
("datetime", "2018-01-01 12:34AM", pandas.Timestamp(2018, 1, 1, 0, 34, 0)),
("datetime", 1234567890000000000, pandas.Timestamp(2009, 2, 13, 23, 31, 30)),
("float", "3.14", 3.14),
("int", "1", 1),
("list", (3, 4), [3, 4]),
("str", 1, "1"),
("unicode", "foo", u"foo"),
])
def test_user_parameter_coerce_value(dtype, given, expected):
p = local.UserParameter('a', 'a desc', dtype, given)
assert p.validate(given) == expected
@pytest.mark.parametrize("given", ["now", "today"])
def test_user_parameter_coerce_special_datetime(given):
p = local.UserParameter('a', 'a desc', 'datetime', given)
assert type(p.validate(given)) == pandas.Timestamp
@pytest.mark.parametrize("dtype,given,expected", [
("float", "100.0", 100.0),
("int", "20", 20),
("int", 20.0, 20),
])
def test_user_parameter_coerce_min(dtype, given, expected):
p = local.UserParameter('a', 'a desc', dtype, expected, min=given)
assert p.min == expected
@pytest.mark.parametrize("dtype,given,expected", [
("float", "100.0", 100.0),
("int", "20", 20),
("int", 20.0, 20),
])
def test_user_parameter_coerce_max(dtype, given, expected):
p = local.UserParameter('a', 'a desc', dtype, expected, max=given)
assert p.max == expected
@pytest.mark.parametrize("dtype,given,expected", [
("float", [50, "100.0", 150.0], [50.0, 100.0, 150.0]),
("int", [1, "2", 3.0], [1, 2, 3]),
])
def test_user_parameter_coerce_allowed(dtype, given, expected):
p = local.UserParameter('a', 'a desc', dtype, expected[0], allowed=given)
assert p.allowed == expected
def test_user_parameter_validation_range():
p = local.UserParameter('a', 'a desc', 'int', 1, min=0, max=3)
with pytest.raises(ValueError) as except_info:
p.validate(-1)
assert 'less than' in str(except_info.value)
assert p.validate(0) == 0
assert p.validate(1) == 1
assert p.validate(2) == 2
assert p.validate(3) == 3
with pytest.raises(ValueError) as except_info:
p.validate(4)
assert 'greater than' in str(except_info.value)
def test_user_parameter_validation_allowed():
p = local.UserParameter('a', 'a desc', 'int', 1, allowed=[1, 2])
with pytest.raises(ValueError) as except_info:
p.validate(0)
assert 'allowed' in str(except_info.value)
assert p.validate(1) == 1
assert p.validate(2) == 2
with pytest.raises(ValueError) as except_info:
p.validate(3)
assert 'allowed' in str(except_info.value)
@pytest.mark.parametrize("filename", [
"catalog_non_dict",
"data_source_missing",
"data_source_name_non_string",
"data_source_non_dict",
"data_source_value_non_dict",
"params_missing_required",
"params_name_non_string",
"params_non_dict",
"params_value_bad_choice",
"params_value_bad_type",
"params_value_non_dict",
"plugins_non_dict",
"plugins_source_missing",
"plugins_source_missing_key",
"plugins_source_non_dict",
"plugins_source_non_list",
])
def test_parser_validation_error(filename):
with pytest.raises(exceptions.ValidationError):
list(open_catalog(abspath(filename + ".yml")))
@pytest.mark.parametrize("filename", [
"obsolete_data_source_list",
"obsolete_params_list",
])
def test_parser_obsolete_error(filename):
with pytest.raises(exceptions.ObsoleteError):
open_catalog(abspath(filename + ".yml"))
def test_union_catalog():
path = os.path.dirname(__file__)
uri1 = os.path.join(path, 'catalog_union_1.yml')
uri2 = os.path.join(path, 'catalog_union_2.yml')
union_cat = open_catalog([uri1, uri2])
assert_items_equal(list(union_cat), ['entry1', 'entry1_part', 'use_example1'])
expected = {
'name': 'entry1_part',
'container': 'dataframe',
'user_parameters': [
{
'name': 'part',
'description': 'part of filename',
'default': '1',
'type': 'str',
'allowed': ['1', '2'],
}
],
'description': 'entry1 part',
'direct_access': 'allow'
}
for k in expected:
assert union_cat.entry1_part.describe()[k] == expected[k]
# Implied creation of data source
assert union_cat.entry1.container == 'dataframe'
md = union_cat.entry1.describe()['metadata']
assert md == dict(foo='bar', bar=[1, 2, 3])
# Use default parameters in explict creation of data source
assert union_cat.entry1_part().container == 'dataframe'
# Specify parameters in creation of data source
assert union_cat.entry1_part(part='2').container == 'dataframe'
def test_persist_local_cat(temp_cache):
# when persisted, multiple cat become one
from intake.catalog.local import YAMLFileCatalog
path = os.path.dirname(__file__)
uri1 = os.path.join(path, 'catalog_union_1.yml')
uri2 = os.path.join(path, 'catalog_union_2.yml')
s = open_catalog([uri1, uri2])
s2 = s.persist()
assert isinstance(s2, YAMLFileCatalog)
assert set(s) == set(s2)
def test_empty_catalog():
cat = open_catalog()
assert list(cat) == []
def test_nonexistent_error():
with pytest.raises(IOError):
local.YAMLFileCatalog('nonexistent')
def test_duplicate_data_sources():
path = os.path.dirname(__file__)
uri = os.path.join(path, 'catalog_dup_sources.yml')
with pytest.raises(exceptions.DuplicateKeyError):
open_catalog(uri)
def test_duplicate_parameters():
path = os.path.dirname(__file__)
uri = os.path.join(path, 'catalog_dup_parameters.yml')
with pytest.raises(exceptions.DuplicateKeyError):
open_catalog(uri)
@pytest.fixture
def temp_catalog_file():
path = tempfile.mkdtemp()
catalog_file = os.path.join(path, 'catalog.yaml')
with open(catalog_file, 'w') as f:
f.write('''
sources:
a:
driver: csv
args:
urlpath: /not/a/file
b:
driver: csv
args:
urlpath: /not/a/file
''')
yield catalog_file
shutil.rmtree(path)
def test_catalog_file_removal(temp_catalog_file):
cat_dir = os.path.dirname(temp_catalog_file)
cat = open_catalog(cat_dir + '/*', ttl=0.1)
assert set(cat) == {'a', 'b'}
os.remove(temp_catalog_file)
time.sleep(0.5) # wait for catalog refresh
assert set(cat) == set()
def test_flatten_duplicate_error():
path = tempfile.mkdtemp()
f1 = os.path.join(path, 'catalog.yaml')
path = tempfile.mkdtemp()
f2 = os.path.join(path, 'catalog.yaml')
for f in [f1, f2]:
with open(f, 'w') as fo:
fo.write("""
sources:
a:
driver: csv
args:
urlpath: /not/a/file
""")
with pytest.raises(ValueError):
open_catalog([f1, f2])
def test_multi_cat_names():
fn = abspath("catalog_union*.yml")
cat = open_catalog(fn)
assert cat.name == fn
assert fn in repr(cat)
fn1 = abspath("catalog_union_1.yml")
fn2 = abspath("catalog_union_2.yml")
cat = open_catalog([fn1, fn2])
assert cat.name == '2 files'
assert cat.description == 'Catalog generated from 2 files'
cat = open_catalog([fn1, fn2], name='special_name',
description='Special description')
assert cat.name == 'special_name'
assert cat.description == 'Special description'
def test_name_of_builtin():
import intake
assert intake.cat.name == 'builtin'
assert intake.cat.description == 'Generated from data packages found on your intake search path'
def test_cat_with_declared_name():
fn = abspath("catalog_named.yml")
description = 'Description declared in the open function'
cat = open_catalog(fn, name='name_in_func', description=description)
assert cat.name == 'name_in_func'
assert cat.description == description
cat._load() # we don't get metadata until load/list/getitem
assert cat.metadata.get('some') == 'thing'
cat = open_catalog(fn)
assert cat.name == 'name_in_spec'
assert cat.description == 'This is a catalog with a description in the yaml'
def test_cat_with_no_declared_name_gets_name_from_dir_if_file_named_catalog():
fn = abspath("catalog.yml")
cat = open_catalog(fn, name='name_in_func', description='Description in func')
assert cat.name == 'name_in_func'
assert cat.description == 'Description in func'
cat = open_catalog(fn)
assert cat.name == 'tests'
assert cat.description == None
def test_default_expansions():
try:
os.environ['INTAKE_INT_TEST'] = '1'
par = UserParameter('', '', 'int', default='env(INTAKE_INT_TEST)')
par.expand_defaults()
assert par.expanded_default == 1
finally:
del os.environ['INTAKE_INT_TEST']
par = UserParameter('', '', 'str', default='env(USER)')
par.expand_defaults(getenv=False)
assert par.expanded_default == 'env(USER)'
par.expand_defaults()
assert par.expanded_default == os.getenv('USER', '')
par = UserParameter('', '', 'str', default='client_env(USER)')
par.expand_defaults()
assert par.expanded_default == 'client_env(USER)'
par.expand_defaults(client=True)
assert par.expanded_default == os.getenv('USER', '')
par = UserParameter('', '', 'str', default='shell(echo success)')
par.expand_defaults(getshell=False)
assert par.expanded_default == 'shell(echo success)'
par.expand_defaults()
assert par.expanded_default == 'success'
par = UserParameter('', '', 'str', default='client_shell(echo success)')
par.expand_defaults(client=True)
assert par.expanded_default == 'success'
par = UserParameter('', '', 'int', default=1)
par.expand_defaults() # no error from string ops
def test_remote_cat(http_server):
url = http_server + 'catalog1.yml'
cat = open_catalog(url)
assert 'entry1' in cat
assert cat.entry1.describe()
def test_multi_plugins():
from intake.source.csv import CSVSource
fn = abspath('multi_plugins.yaml')
cat = open_catalog(fn)
s = cat.tables0()
assert isinstance(s, CSVSource)
s = cat.tables1()
assert isinstance(s, CSVSource)
s = cat.tables2()
assert isinstance(s, CSVSource)
s = cat.tables3()
assert isinstance(s, CSVSource)
assert s._csv_kwargs == {}
s = cat.tables3(plugin='myplug')
assert isinstance(s, CSVSource)
assert s._csv_kwargs == {}
s = cat.tables3(plugin='myplug2')
assert isinstance(s, CSVSource)
assert s._csv_kwargs is True
with pytest.raises(ValueError):
cat.tables4()
with pytest.raises(ValueError):
cat.tables4(plugin='myplug')
with pytest.raises(ValueError):
cat.tables4(plugin='myplug2')
s = cat.tables5()
assert isinstance(s, CSVSource)
with pytest.raises(ValueError):
cat.tables5(plugin='myplug')
fn = abspath('multi_plugins2.yaml')
with pytest.raises(ValueError):
open_catalog(fn)
def test_no_plugins():
fn = abspath('multi_plugins.yaml')
cat = open_catalog(fn)
with pytest.raises(ValueError) as e:
cat.tables6
assert 'doesnotexist' in str(e.value)
assert 'plugin-directory' in str(e.value)
with pytest.raises(ValueError) as e:
cat.tables7
assert 'doesnotexist' in str(e.value)
def test_explicit_entry_driver():
from intake.source.textfiles import TextFilesSource
e = LocalCatalogEntry('test', 'desc', TextFilesSource,
args={'urlpath': None})
assert e.describe()['container'] == 'python'
assert isinstance(e(), TextFilesSource)
with pytest.raises(TypeError):
LocalCatalogEntry('test', 'desc', None)
def test_getitem_and_getattr():
fn = abspath('multi_plugins.yaml')
catalog = open_catalog(fn)
catalog['tables0']
with pytest.raises(KeyError):
catalog['doesnotexist']
with pytest.raises(KeyError):
catalog['_doesnotexist']
with pytest.raises(KeyError):
# This exists as an *attribute* but not as an item.
catalog['metadata']
catalog.tables0 # alias to catalog['tables0']
catalog.metadata # a normal attribute
with pytest.raises(AttributeError):
catalog.doesnotexit
with pytest.raises(AttributeError):
catalog._doesnotexit
assert catalog.tables0 == catalog['tables0']
assert isinstance(catalog.metadata, (dict, type(None)))
def test_dot_names():
fn = abspath('dot-nest.yaml')
cat = open_catalog(fn)
assert cat.self.leaf.description == 'leaf'
assert cat.self['leafdot.dot'].description == 'leaf-dot'
assert cat['selfdot.dot', 'leafdot.dot'].description == 'leaf-dot'
assert cat['self.selfdot.dot', 'leafdot.dot'].description == 'leaf-dot'
assert cat['self.self.dot', 'leafdot.dot'].description == 'leaf-dot'
assert cat['self.self.dot', 'leaf'].description == 'leaf'
assert cat['self.self.dot', 'leaf.dot'].description == 'leaf-dot'
assert cat['self.self.dot.leaf.dot'].description == 'leaf-dot'
def test_listing(catalog1):
assert list(catalog1) == list(catalog1.nested)
with pytest.raises(TypeError):
list(catalog1.arr)
def test_dict_save():
from intake.catalog.base import Catalog
fn = os.path.join(tempfile.mkdtemp(), 'mycat.yaml')
entry = LocalCatalogEntry(name='trial', description='get this back',
driver='csv', args=dict(urlpath=""))
cat = Catalog.from_dict({'trial': entry}, name='mycat')
cat.save(fn)
cat2 = open_catalog(fn)
assert 'trial' in cat2
assert cat2.name == 'mycat'
assert "CSV" in cat2.trial.classname
def test_dict_save_complex():
from intake.catalog.base import Catalog
fn = os.path.join(tempfile.mkdtemp(), 'mycat.yaml')
cat = Catalog()
entry = LocalCatalogEntry(name='trial', description='get this back',
driver='csv', cache=[], catalog=cat,
parameters=[UserParameter(name='par1', description='desc', type='int')],
args={'urlpath': 'none'})
cat._entries = {'trial': entry}
cat.save(fn)
cat2 = open_catalog(fn)
assert 'trial' in cat2
assert cat2.name == 'mycat'
assert cat2.trial.describe()['plugin'][0] == 'csv'
def test_dict_adddel():
from intake.catalog.base import Catalog
entry = LocalCatalogEntry(name='trial', description='get this back',
driver='csv', args=dict(urlpath=""))
cat = Catalog.from_dict({'trial': entry}, name='mycat')
assert 'trial' in cat
cat['trial2'] = entry
assert list(cat) == ['trial', 'trial2']
cat.pop('trial')
assert list(cat) == ['trial2']
assert cat['trial2'].describe() == entry.describe()
def test_filter():
from intake.catalog.base import Catalog
entry1 = LocalCatalogEntry(name='trial', description='get this back',
driver='csv', args=dict(urlpath=""))
entry2 = LocalCatalogEntry(name='trial', description='pass this through',
driver='csv', args=dict(urlpath=""))
cat = Catalog.from_dict({'trial1': entry1,
'trial2': entry2}, name='mycat')
cat2 = cat.filter(lambda e: 'pass' in e._description)
assert list(cat2) == ['trial2']
assert cat2.trial2 == entry2()
def test_from_dict_with_data_source():
"Check that Catalog.from_dict accepts DataSources not wrapped in Entry."
from intake.catalog.base import Catalog
fn = os.path.join(tempfile.mkdtemp(), 'mycat.yaml')
entry = LocalCatalogEntry(name='trial', description='get this back',
driver='csv', args=dict(urlpath=""))
ds = entry()
cat = Catalog.from_dict({'trial': ds}, name='mycat')
def test_no_instance():
from intake.catalog.local import LocalCatalogEntry
e0 = LocalCatalogEntry('foo', '', 'fake')
e1 = LocalCatalogEntry('foo0', '', 'fake')
# this would error on instantiation with driver not found
assert e0 != e1
def test_fsspec_integration():
import fsspec
import pandas as pd
mem = fsspec.filesystem('memory')
with mem.open('cat.yaml', 'wt') as f:
f.write("""
sources:
implicit:
driver: csv
description: o
args:
urlpath: "{{CATALOG_DIR}}/file.csv"
explicit:
driver: csv
description: o
args:
urlpath: "memory:///file.csv"
extra:
driver: csv
description: o
args:
urlpath: "{{CATALOG_DIR}}/file.csv"
storage_options: {other: option}"""
)
with mem.open('/file.csv', 'wt') as f:
f.write("a,b\n0,1")
expected = pd.DataFrame({'a': [0], 'b': [1]})
cat = open_catalog("memory://cat.yaml")
assert list(cat) == ['implicit', 'explicit', 'extra']
assert cat.implicit.read().equals(expected)
assert cat.explicit.read().equals(expected)
s = cat.extra()
assert s._storage_options['other']
def test_cat_add(tmpdir):
tmpdir = str(tmpdir)
fn = os.path.join(tmpdir, 'cat.yaml')
with open(fn, 'w') as f:
f.write('sources: {}')
cat = open_catalog(fn)
assert list(cat) == []
# was added in memory
cat.add(cat)
cat._load() # this would happen automatically, but not immediately
assert list(cat) == ['cat']
# was added to the file
cat = open_catalog(fn)
assert list(cat) == ['cat']
def test_no_entries_items(catalog1):
from intake.catalog.entry import CatalogEntry
from intake.source.base import DataSource
for k, v in catalog1.items():
assert not isinstance(v, CatalogEntry)
assert isinstance(v, DataSource)
for k in catalog1:
v = catalog1[k]
assert not isinstance(v, CatalogEntry)
assert isinstance(v, DataSource)
for k in catalog1:
# we can't do attribute access on "text" because it
# collides with a property
if k == 'text':
continue
v = getattr(catalog1, k)
assert not isinstance(v, CatalogEntry)
assert isinstance(v, DataSource)
def test_cat_dictlike(catalog1):
assert list(catalog1) == list(catalog1.keys())
assert len(list(catalog1)) == len(catalog1)
assert list(catalog1.items()) == list(zip(catalog1.keys(), catalog1.values()))
| 30.6897
| 102
| 0.633587
|
457fbbb50ba5d857145b62a7880ae4125a5ec4e2
| 267
|
py
|
Python
|
tests/artificial/transf_Anscombe/trend_PolyTrend/cycle_30/ar_12/test_artificial_1024_Anscombe_PolyTrend_30_12_100.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/artificial/transf_Anscombe/trend_PolyTrend/cycle_30/ar_12/test_artificial_1024_Anscombe_PolyTrend_30_12_100.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/artificial/transf_Anscombe/trend_PolyTrend/cycle_30/ar_12/test_artificial_1024_Anscombe_PolyTrend_30_12_100.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 12);
| 38.142857
| 167
| 0.734082
|
78a5139ebe32861f3af06324d1f043b355891a1a
| 271
|
py
|
Python
|
projects/simple-email/app.py
|
iam-jjintta/python-tutorial
|
7465c4ff9cf0a766fd016ccf0fdb48fd01d2677a
|
[
"Apache-2.0"
] | null | null | null |
projects/simple-email/app.py
|
iam-jjintta/python-tutorial
|
7465c4ff9cf0a766fd016ccf0fdb48fd01d2677a
|
[
"Apache-2.0"
] | null | null | null |
projects/simple-email/app.py
|
iam-jjintta/python-tutorial
|
7465c4ff9cf0a766fd016ccf0fdb48fd01d2677a
|
[
"Apache-2.0"
] | null | null | null |
import sys
import tkinter as tk
from pyemail.mailbox import MailBox
title = '흔한 찐따의 이메일 프로그램'
icon = 'icon.ico'
if __name__ == '__main__':
root = tk.Tk()
mailbox = MailBox(root, title=title, icon=icon)
mailbox.mainloop()
sys.exit(0)
| 15.941176
| 52
| 0.627306
|
1fc236735a63bb0e01e610bcbd792bd50b8479e4
| 7,902
|
py
|
Python
|
server_pool.py
|
xiaobailong24-shadowsocks/SSR-manyuser
|
0b94af0b7d721cb14d1c1a16a2e8a54e559fb616
|
[
"Apache-2.0"
] | 6
|
2018-01-06T12:10:30.000Z
|
2020-09-01T16:53:42.000Z
|
server_pool.py
|
xiaobailong24-shadowsocks/SSR-manyuser
|
0b94af0b7d721cb14d1c1a16a2e8a54e559fb616
|
[
"Apache-2.0"
] | null | null | null |
server_pool.py
|
xiaobailong24-shadowsocks/SSR-manyuser
|
0b94af0b7d721cb14d1c1a16a2e8a54e559fb616
|
[
"Apache-2.0"
] | 6
|
2017-10-18T09:30:19.000Z
|
2021-08-04T01:19:28.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import logging
import time
from shadowsocks import shell, eventloop, tcprelay, udprelay, asyncdns, common
import threading
import sys
from socket import *
from configloader import load_config, get_config
class MainThread(threading.Thread):
def __init__(self, params):
threading.Thread.__init__(self)
self.params = params
def run(self):
ServerPool._loop(*self.params)
class ServerPool(object):
instance = None
def __init__(self):
shell.check_python()
self.config = shell.get_config(False)
self.dns_resolver = asyncdns.DNSResolver()
if not self.config.get('dns_ipv6', False):
asyncdns.IPV6_CONNECTION_SUPPORT = False
self.mgr = None #asyncmgr.ServerMgr()
self.tcp_servers_pool = {}
self.tcp_ipv6_servers_pool = {}
self.udp_servers_pool = {}
self.udp_ipv6_servers_pool = {}
self.stat_counter = {}
self.loop = eventloop.EventLoop()
self.thread = MainThread( (self.loop, self.dns_resolver, self.mgr) )
self.thread.start()
@staticmethod
def get_instance():
if ServerPool.instance is None:
ServerPool.instance = ServerPool()
return ServerPool.instance
def stop(self):
self.loop.stop()
@staticmethod
def _loop(loop, dns_resolver, mgr):
try:
if mgr is not None:
mgr.add_to_loop(loop)
dns_resolver.add_to_loop(loop)
loop.run()
except (KeyboardInterrupt, IOError, OSError) as e:
logging.error(e)
import traceback
traceback.print_exc()
os.exit(0)
except Exception as e:
logging.error(e)
import traceback
traceback.print_exc()
def server_is_run(self, port):
port = int(port)
ret = 0
if port in self.tcp_servers_pool:
ret = 1
if port in self.tcp_ipv6_servers_pool:
ret |= 2
return ret
def server_run_status(self, port):
if 'server' in self.config:
if port not in self.tcp_servers_pool:
return False
if 'server_ipv6' in self.config:
if port not in self.tcp_ipv6_servers_pool:
return False
return True
def new_server(self, port, user_config):
ret = True
port = int(port)
ipv6_ok = False
if 'server_ipv6' in self.config:
if port in self.tcp_ipv6_servers_pool:
logging.info("server already at %s:%d" % (self.config['server_ipv6'], port))
return 'this port server is already running'
else:
a_config = self.config.copy()
a_config.update(user_config)
if len(a_config['server_ipv6']) > 2 and a_config['server_ipv6'][0] == "[" and a_config['server_ipv6'][-1] == "]":
a_config['server_ipv6'] = a_config['server_ipv6'][1:-1]
a_config['server'] = a_config['server_ipv6']
a_config['server_port'] = port
a_config['max_connect'] = 128
a_config['method'] = common.to_str(a_config['method'])
try:
logging.info("starting server at [%s]:%d" % (common.to_str(a_config['server']), port))
tcp_server = tcprelay.TCPRelay(a_config, self.dns_resolver, False, stat_counter=self.stat_counter)
tcp_server.add_to_loop(self.loop)
self.tcp_ipv6_servers_pool.update({port: tcp_server})
udp_server = udprelay.UDPRelay(a_config, self.dns_resolver, False, stat_counter=self.stat_counter)
udp_server.add_to_loop(self.loop)
self.udp_ipv6_servers_pool.update({port: udp_server})
if common.to_str(a_config['server_ipv6']) == "::":
ipv6_ok = True
except Exception as e:
logging.warn("IPV6 %s " % (e,))
if 'server' in self.config:
if port in self.tcp_servers_pool:
logging.info("server already at %s:%d" % (common.to_str(self.config['server']), port))
return 'this port server is already running'
else:
a_config = self.config.copy()
a_config.update(user_config)
a_config['server_port'] = port
a_config['max_connect'] = 128
a_config['method'] = common.to_str(a_config['method'])
try:
logging.info("starting server at %s:%d" % (common.to_str(a_config['server']), port))
tcp_server = tcprelay.TCPRelay(a_config, self.dns_resolver, False)
tcp_server.add_to_loop(self.loop)
self.tcp_servers_pool.update({port: tcp_server})
udp_server = udprelay.UDPRelay(a_config, self.dns_resolver, False)
udp_server.add_to_loop(self.loop)
self.udp_servers_pool.update({port: udp_server})
except Exception as e:
if not ipv6_ok:
logging.warn("IPV4 %s " % (e,))
return True
def del_server(self, port):
port = int(port)
logging.info("del server at %d" % port)
try:
udpsock = socket(AF_INET, SOCK_DGRAM)
udpsock.sendto('%s:%s:0:0' % (get_config().MANAGE_PASS, port), (get_config().MANAGE_BIND_IP, get_config().MANAGE_PORT))
udpsock.close()
except Exception as e:
logging.warn(e)
return True
def cb_del_server(self, port):
port = int(port)
if port not in self.tcp_servers_pool:
logging.info("stopped server at %s:%d already stop" % (self.config['server'], port))
else:
logging.info("stopped server at %s:%d" % (self.config['server'], port))
try:
self.tcp_servers_pool[port].close(True)
del self.tcp_servers_pool[port]
except Exception as e:
logging.warn(e)
try:
self.udp_servers_pool[port].close(True)
del self.udp_servers_pool[port]
except Exception as e:
logging.warn(e)
if 'server_ipv6' in self.config:
if port not in self.tcp_ipv6_servers_pool:
logging.info("stopped server at [%s]:%d already stop" % (self.config['server_ipv6'], port))
else:
logging.info("stopped server at [%s]:%d" % (self.config['server_ipv6'], port))
try:
self.tcp_ipv6_servers_pool[port].close(True)
del self.tcp_ipv6_servers_pool[port]
except Exception as e:
logging.warn(e)
try:
self.udp_ipv6_servers_pool[port].close(True)
del self.udp_ipv6_servers_pool[port]
except Exception as e:
logging.warn(e)
return True
def get_server_transfer(self, port):
port = int(port)
ret = [0, 0]
if port in self.tcp_servers_pool:
ret[0] = self.tcp_servers_pool[port].server_transfer_ul
ret[1] = self.tcp_servers_pool[port].server_transfer_dl
if port in self.udp_servers_pool:
ret[0] += self.udp_servers_pool[port].server_transfer_ul
ret[1] += self.udp_servers_pool[port].server_transfer_dl
if port in self.tcp_ipv6_servers_pool:
ret[0] += self.tcp_ipv6_servers_pool[port].server_transfer_ul
ret[1] += self.tcp_ipv6_servers_pool[port].server_transfer_dl
if port in self.udp_ipv6_servers_pool:
ret[0] += self.udp_ipv6_servers_pool[port].server_transfer_ul
ret[1] += self.udp_ipv6_servers_pool[port].server_transfer_dl
return ret
def get_servers_transfer(self):
servers = self.tcp_servers_pool.copy()
servers.update(self.tcp_ipv6_servers_pool)
servers.update(self.udp_servers_pool)
servers.update(self.udp_ipv6_servers_pool)
ret = {}
for port in servers.keys():
ret[port] = self.get_server_transfer(port)
return ret
| 32.518519
| 122
| 0.717287
|
56606bfa1b54b7724552476858d006d3a983eff8
| 4,083
|
py
|
Python
|
test/testscenarios.py
|
VITObelgium/geodynamix
|
6d3323bc4cae1b85e26afdceab2ecf3686b11369
|
[
"MIT"
] | null | null | null |
test/testscenarios.py
|
VITObelgium/geodynamix
|
6d3323bc4cae1b85e26afdceab2ecf3686b11369
|
[
"MIT"
] | null | null | null |
test/testscenarios.py
|
VITObelgium/geodynamix
|
6d3323bc4cae1b85e26afdceab2ecf3686b11369
|
[
"MIT"
] | 1
|
2021-06-16T11:55:27.000Z
|
2021-06-16T11:55:27.000Z
|
import os
import unittest
import geodynamix as gdx
import numpy as np
class GdxIntegrationTest(unittest.TestCase):
script_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(script_dir, 'mapdata')
reference_dir = os.path.join(script_dir, 'referencedata')
def tab(self, tab_file):
return os.path.join(self.data_dir, tab_file)
def map(self, map_file):
return gdx.read(os.path.join(self.data_dir, map_file))
def ref(self, map_file):
return gdx.read(os.path.join(self.reference_dir, map_file))
def test_inhabitants_green_areas(self):
lu = self.map('landuse.tif')
green = gdx.reclass(self.tab('lu2green.tab'), lu)
green_clusters = gdx.cluster_size(green)
sel_greenclusters = green_clusters >= 30
green_in_reach = gdx.sum_in_buffer(sel_greenclusters, 1600) > 0
inw = gdx.read_as('float32', os.path.join(self.data_dir, 'inhabitants.tif'))
inhabitants_green_areas = inw * green_in_reach
expected = gdx.read_as('float32', os.path.join(self.reference_dir, 'inhabitants_green_areas_reference.asc'))
self.assertTrue(gdx.allclose(expected, inhabitants_green_areas, 1e-3))
def test_food_production(self):
gewas = self.map('gewas.tif').astype('float32')
eerste_bod = self.map('eerste_bod.tif')
bofek_2012 = self.map('bofek_2012.tif')
lceu = self.map('lceu.tif')
textuur = gdx.reclass(self.tab('textuur.tab'), bofek_2012)
profiel = gdx.reclass(self.tab('profiel.tab'), eerste_bod)
teelt = gdx.raster(gewas.metadata, fill=1, dtype=gewas.dtype)
bodemgeschiktheid1 = gdx.reclassi(self.tab('bodemgeschiktheid_a.tab'), textuur, teelt, index=1)
geschikt = bodemgeschiktheid1 != 0
bodemgeschiktheid2 = bodemgeschiktheid1 + gdx.reclassi(self.tab('bodemgeschiktheid_b.tab'), profiel, teelt, index=1)
bodemgeschiktheid2 = geschikt * gdx.clip(bodemgeschiktheid2, 1, 5)
# STAP 4: Corrigeer landbouwgeschiktheid door rekening te houden met overstromingsgevoeligheid
rendement = gdx.reclass(self.tab('rendement.tab'), bodemgeschiktheid2.astype('float32'))
correctiefactor2 = gdx.reclassi(self.tab('opbrengstverlies_overstroming.tab'), teelt, gdx.reclass(self.tab('uiterwaarden.tab'), lceu).astype('float32'), index=1)
# STAP 5: Bepaal de de biofysische geschiktheid en de potentiele voedselproductie
potentiele_voedselproductie = rendement - correctiefactor2
potentiele_voedselproductie = gdx.clip_low(potentiele_voedselproductie, 0)
fysische_geschiktheid = gdx.normalise(potentiele_voedselproductie)
self.assertTrue(gdx.allclose(self.ref('fysische_geschiktheid_akker.tif'), fysische_geschiktheid, 1e-5))
self.assertTrue(gdx.allclose(self.ref('potentiele_voedselproductie_akker.tif'), potentiele_voedselproductie, 1e-5))
# STAP 6: Bepaal de actuele voedselproductie
teelt_type_binair = gdx.reclass(self.tab('teelt.tab'), gewas) == teelt
opbrengstpercentage = gdx.reclass(self.tab('opbrengstverlies_beheer.tab'), gdx.reclass(self.tab('landbouwmilieumaatregelen.tab'), gewas))
actuele_voedselproductie = opbrengstpercentage * teelt_type_binair * potentiele_voedselproductie
# STAP 7: Bepaal de waarde van de actuele voedselproductie
reference = self.ref('waarde_actuele_voedselproductie_met_subsidie_akker.tif')
actual = gdx.reclassi(self.tab('boekhoudkundige_resultaten.tab'), gdx.nreclass(self.tab('rendementsklasse.tab'), actuele_voedselproductie), teelt, index=1).astype("float32")
self.assertTrue(gdx.allclose(reference, actual, 1e-5))
reference = self.ref('waarde_actuele_voedselproductie_zonder_subsidie_akker.tif')
actual = gdx.reclassi(self.tab('boekhoudkundige_resultaten.tab'), gdx.nreclass(self.tab('rendementsklasse.tab'), actuele_voedselproductie), teelt, index=2).astype("float32")
self.assertTrue(gdx.allclose(reference, actual, 1e-5))
if __name__ == '__main__':
unittest.main()
| 52.346154
| 181
| 0.722753
|
6bbc49b6ca82dd6db1bf501f2e0252fbcddf49a3
| 2,166
|
py
|
Python
|
02. Chapter_2/julia1_lineprofiler3.py
|
Mikma03/High-performance-Python
|
b7720377bc967e856e16678ae91b37c2503b49e0
|
[
"MIT"
] | null | null | null |
02. Chapter_2/julia1_lineprofiler3.py
|
Mikma03/High-performance-Python
|
b7720377bc967e856e16678ae91b37c2503b49e0
|
[
"MIT"
] | null | null | null |
02. Chapter_2/julia1_lineprofiler3.py
|
Mikma03/High-performance-Python
|
b7720377bc967e856e16678ae91b37c2503b49e0
|
[
"MIT"
] | null | null | null |
"""Julia set generator without optional PIL-based image drawing"""
import time
# area of complex space to investigate
x1, x2, y1, y2 = -1.8, 1.8, -1.8, 1.8
c_real, c_imag = -0.62772, -.42193
@profile
def calculate_z_serial_purepython(maxiter, zs, cs):
"""Calculate output list using Julia update rule"""
output = [0] * len(zs)
for i in range(len(zs)):
n = 0
z = zs[i]
c = cs[i]
while n < maxiter and abs(z) < 2:
z = z * z + c
n += 1
output[i] = n
return output
@profile
def calc_pure_python(draw_output, desired_width, max_iterations):
"""Create a list of complex co-ordinates (zs) and complex parameters (cs), build Julia set and display"""
x_step = (x2 - x1) / desired_width
y_step = (y1 - y2) / desired_width
x = []
y = []
ycoord = y2
while ycoord > y1:
y.append(ycoord)
ycoord += y_step
xcoord = x1
while xcoord < x2:
x.append(xcoord)
xcoord += x_step
# set width and height to the generated pixel counts, rather than the
# pre-rounding desired width and height
# build a list of co-ordinates and the initial condition for each cell.
# Note that our initial condition is a constant and could easily be removed,
# we use it to simulate a real-world scenario with several inputs to our function
zs = []
cs = []
for ycoord in y:
for xcoord in x:
zs.append(complex(xcoord, ycoord))
cs.append(complex(c_real, c_imag))
print("Length of x:", len(x))
print("Total elements:", len(zs))
start_time = time.time()
output = calculate_z_serial_purepython(max_iterations, zs, cs)
end_time = time.time()
secs = end_time - start_time
print(calculate_z_serial_purepython.__name__ + " took", secs, "seconds")
assert sum(output) == 33219980 # this sum is expected for 1000^2 grid with 300 iterations
# Calculate the Julia set using a pure Python solution with
# reasonable defaults for a laptop
# set draw_output to True to use PIL to draw an image
calc_pure_python(draw_output=False, desired_width=1000, max_iterations=300)
| 31.391304
| 109
| 0.645429
|
41492cda1d59787189845f7c6236bbb2909f22fd
| 42
|
py
|
Python
|
sabersql/__init__.py
|
dhuber34/sabersql
|
27a4824d56dda9b7a8cd9b418fc06b3cc5dee935
|
[
"MIT"
] | 1
|
2022-01-19T23:21:43.000Z
|
2022-01-19T23:21:43.000Z
|
sabersql/__init__.py
|
dhuber34/sabersql
|
27a4824d56dda9b7a8cd9b418fc06b3cc5dee935
|
[
"MIT"
] | 2
|
2020-12-08T00:37:23.000Z
|
2022-01-19T21:34:54.000Z
|
sabersql/__init__.py
|
dhuber34/sabersql
|
27a4824d56dda9b7a8cd9b418fc06b3cc5dee935
|
[
"MIT"
] | 1
|
2021-09-05T14:56:02.000Z
|
2021-09-05T14:56:02.000Z
|
#!/usr/bin/env python3
name = "sabersql"
| 10.5
| 22
| 0.666667
|
9ad24b1b8198ac53d5567c1e1f83b3dfa9f4a09f
| 4,153
|
py
|
Python
|
my_account/custom_dns_api.py
|
bobzz-zone/saas_my_account
|
0349bf14714bd070ec003dd96b3f60878af1b9b1
|
[
"MIT"
] | null | null | null |
my_account/custom_dns_api.py
|
bobzz-zone/saas_my_account
|
0349bf14714bd070ec003dd96b3f60878af1b9b1
|
[
"MIT"
] | null | null | null |
my_account/custom_dns_api.py
|
bobzz-zone/saas_my_account
|
0349bf14714bd070ec003dd96b3f60878af1b9b1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Myme and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import os
import requests
import json
import subprocess
from frappe.utils.background_jobs import enqueue
from frappe.frappeclient import FrappeClient
from frappe.core.doctype.data_import.data_import import import_doc, export_json
from frappe.utils.nestedset import rebuild_tree
# import paramiko
class custom_dns_api(Document):
pass
@frappe.whitelist()
def rebuild_tree_error():
rebuild_tree("Account", "parent_account")
@frappe.whitelist()
def api_call_create_dns(new_site_name):
url = "https://api.cloudflare.com/client/v4/zones/757307a566e97c7d08935340b281f925/dns_records"
# payload = "------WebKitFormBoundary7MA4YWxkTrZu0gW\r\nContent-Disposition: form-data; name=\"type\"\r\n\r\nA\r\n------WebKitFormBoundary7MA4YWxkTrZu0gW\r\nContent-Disposition: form-data; name=\"name\"\r\n\r\nnewsite.crativate.com\r\n------WebKitFormBoundary7MA4YWxkTrZu0gW\r\nContent-Disposition: form-data; name=\"content\"\r\n\r\n35.197.133.195\r\n------WebKitFormBoundary7MA4YWxkTrZu0gW--"
payload = {
'type' : "A",
'name' : new_site_name,
'content': "139.162.21.199",
'proxied' : True
}
headers = {
'content-type': "multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW",
'Content-Type': "application/json",
'X-Auth-Email': "bobby@solubis.id",
# api key crativate / rectios : 7eb0d91566ac6409d1957961abac095ec405c
# antusias : 2a7fc7cab52ed7d244db75641d75ca8bf4b93
'X-Auth-Key': "2a7fc7cab52ed7d244db75641d75ca8bf4b93",
'Cache-Control': "no-cache",
'Postman-Token': "b8f18408-ab53-00b4-3931-90536b6d5371"
}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers)
print(response.text)
#added by bobby
@frappe.whitelist()
def create_new_user(newsitename,sitesubdomain, subdomuser, subdompass, fullname_user):
os.chdir("/home/frappe/frappe-bench")
os.system(""" bench --site {} execute my_account.custom_dns_api.create_user_baru --args "['{}','{}','{}']" """.format(newsitename,fullname_user,subdomuser,subdompass))
#end of add
@frappe.whitelist()
def create_new_site_subprocess(newsitename,sitesubdomain, subdomuser, fullname_user):
os.chdir("/home/frappe/frappe-bench")
print("create_new_site subprocess")
new_site_name = newsitename
site_sub_domain = sitesubdomain
#api_call_create_dns(new_site_name)
subdompass = frappe.db.get_value("Purchase User", subdomuser, 'current_password')
plan = frappe.db.get_value("Master Subdomain",sitesubdomain,"active_plan")
frappe.db.sql("""update `tabMaster Subdomain` set is_created = 1 where name = '{}' """.format(sitesubdomain))
frappe.db.commit()
os.chdir("/home/frappe/frappe-bench")
os.system("sudo su frappe")
os.system("bench new-site {} --db-name db_{} --mariadb-root-username root --mariadb-root-password majuterus234@ --admin-password majuterus234@ --install-app erpnext --install-app solubis_brand".format(new_site_name,site_sub_domain))
#os.system("bench setup nginx --yes")
#os.system("sudo service nginx reload")
#os.system("bench --site {} execute solubis_brand.custom_function.disable_signup_website".format(new_site_name))
#os.system("bench --site {} execute solubis_brand.custom_function.import_fixtures".format(new_site_name))
#os.system(""" bench --site {} execute solubis_brand.custom_function.disable_other_roles --args "['{}']" """.format(plan))
#os.system("""bench --site {} execute solubis_brand.custom_function.create_user_baru --args "['{}','{}','{}','{}']"
# """.format(newsitename,fullname_user,subdomuser,subdompass,plan))
@frappe.whitelist(allow_guest=True)
def send_mail_site_created(subdomuser, fullname, newsitename):
setting = frappe.get_single("Additional Settings")
subject = "Welcome to {}".format(setting.url)
args = {"full_name":fullname,"site_link":newsitename}
frappe.sendmail(recipients=subdomuser, sender=setting.email_sender, subject=subject,
template="site_created", header=[subject, "green"], args=args,delayed=False)
| 43.715789
| 395
| 0.756321
|
554876438bf9923db06aa4c745b53c05d81df286
| 14,555
|
py
|
Python
|
src/huntsman/drp/collection/collection.py
|
AstroHuntsman/huntsman-drp
|
00f045ccccc1f7545da491457a2b17b9aabea89a
|
[
"MIT"
] | 1
|
2022-01-03T05:24:33.000Z
|
2022-01-03T05:24:33.000Z
|
src/huntsman/drp/collection/collection.py
|
fergusL/huntsman-drp
|
7f370079e347e4ef5500678808ea9a7952c04e7e
|
[
"MIT"
] | 139
|
2020-10-02T01:49:29.000Z
|
2021-09-07T04:58:51.000Z
|
src/huntsman/drp/collection/collection.py
|
fergusL/huntsman-drp
|
7f370079e347e4ef5500678808ea9a7952c04e7e
|
[
"MIT"
] | 3
|
2020-09-03T03:31:30.000Z
|
2020-09-07T05:22:23.000Z
|
from contextlib import suppress
from datetime import timedelta
from urllib.parse import quote_plus
import pymongo
from pymongo.errors import ServerSelectionTimeoutError
from huntsman.drp.base import HuntsmanBase
from huntsman.drp.utils.date import current_date, make_mongo_date_constraint
from huntsman.drp.document import Document
from huntsman.drp.utils.mongo import mongo_logical_and
class Collection(HuntsmanBase):
""" This class is used to interface with the mongodb. It is responsible for performing queries
and inserting/updating/deleting documents, as well as validating new documents.
"""
_index_fields = None
_required_fields = None
_DocumentClass = None
def __init__(self, db_name=None, collection_name=None, **kwargs):
super().__init__(**kwargs)
# Get the name of the mongo database
self._db_name = self.config["mongodb"]["db_name"] if db_name is None else db_name
# Get the name of the collection in the mongo database
if not collection_name:
try:
collection_name = self.config["collections"][self.__class__.__name__]["name"]
except KeyError:
raise ValueError("Unable to determine collection name.")
self.collection_name = collection_name
# Get the fields required for new documents
with suppress(KeyError):
self._required_fields = self.config["collections"][self.__class__.__name__
]["required_fields"]
# Get the fields used to create a lookup index
# The combination of field values must be unique for each document
with suppress(KeyError):
self._index_fields = self.config["collections"][self.__class__.__name__
]["index_fields"]
# Connect to the DB and initialise the collection
self._connect()
def __str__(self):
return f"{self.__class__.__name__} ({self.collection_name})"
# Public methods
def find(self, document_filter=None, key=None, quality_filter=False, limit=None, sort_by=None,
sort_direction=pymongo.ASCENDING, **kwargs):
"""Get data for one or more matches in the table.
Args:
document_filter (dict, optional): A dictionary containing key, value pairs to be
matched against other documents, by default None
key (str, optional):
Specify a specific key to be returned from the query (e.g. filename), by default
None.
quality_filter (bool, optional): If True, only return documents that satisfy quality
cuts. Default: False.
limit (int): Limit the number of returned documents to this amount.
sort_by (str, optional): If provided, sort results by this key. Default: None.
sort_direction (int, optional): The sorting direction. Use pymongo.ASCENDING or
pymongo.DESCENDING. Default: pymongo.ASCENDING.
**kwargs: Parsed to make_mongo_date_constraint.
Returns:
result (list): List of DataIds or key values if key is specified.
"""
document_filter = Document(document_filter, copy=True)
with suppress(KeyError):
del document_filter["date_modified"] # This might change so don't match with it
# Add date range to criteria if provided
date_constraint = make_mongo_date_constraint(**kwargs)
if date_constraint:
document_filter.update({self._date_key: date_constraint})
mongo_filter = document_filter.to_mongo(flatten=True)
# Add quality cuts to document filter
if quality_filter:
mongo_quality_filter = self._get_quality_filter()
if mongo_quality_filter:
mongo_filter = mongo_logical_and([mongo_filter, mongo_quality_filter])
self.logger.debug(f"Performing mongo find operation with filter: {mongo_filter}.")
# Do the mongo query
cursor = self._collection.find(mongo_filter, {"_id": False})
# Sort results
if sort_by is not None:
self.logger.debug(f"Sorting results by {sort_by}")
cursor.sort(sort_by, sort_direction)
# Limit results
if limit is not None:
self.logger.debug(f"Limiting results to {limit} matches")
cursor = cursor.limit(limit)
# Retrieve the documents
documents = list(cursor)
self.logger.debug(f"Find operation returned {len(documents)} results.")
if key is not None:
return [d[key] for d in documents]
# Skip validation to speed up - inserted documents should already be valid
return [self._DocumentClass(d, validate=False, config=self.config) for d in documents]
def find_one(self, *args, **kwargs):
""" Find a single matching document. If multiple matches, raise a RuntimeError.
Args:
*args, **kwargs: Parsed to self.find.
Returns:
Document or None: If there is a match return the document, else None.
"""
documents = self.find(*args, **kwargs)
if not documents:
return None
if len(documents) > 1:
raise RuntimeError("Matched with more than one document.")
return documents[0]
def insert_one(self, document):
""" Insert a new document into the table after ensuring it is valid and unique.
Args:
document (dict): The document to be inserted into the table.
"""
# Check the required columns exist in the new document
doc = self._prepare_doc_for_insert(document)
# Insert the document
# Uniqueness is verified implicitly
self.logger.debug(f"Inserting document into {self}: {doc}.")
self._collection.insert_one(doc.to_mongo())
def replace_one(self, document_filter, replacement, **kwargs):
""" Replace a matching document with a new one.
Args:
document_filter (Document): dictionary containing key, value pairs used to identify
the document to replace.
replacement (Document): The document to replace with.
**kwargs: Parsed to pymongo replace_one.
Raises:
RuntimeError: If document filter matches with more than one document.
"""
document_filter = Document(document_filter)
# Make sure the filter matches with at most one doc
if self.count_documents(document_filter) > 1:
raise RuntimeError(f"Document filter {document_filter} matches with multiple documents"
f" in {self}.")
mongo_filter = document_filter.to_mongo()
mongo_doc = self._prepare_doc_for_insert(replacement).to_mongo() # Implicit validation
self.logger.debug(f"Replacing {mongo_filter} with {mongo_doc}")
self._collection.replace_one(mongo_filter, mongo_doc, **kwargs)
def update_one(self, document_filter, to_update, upsert=False):
""" Update a single document in the table.
See: https://docs.mongodb.com/manual/reference/operator/update/set/#up._S_set
Args:
document_filter (dict): A dictionary containing key, value pairs used to identify
the document to update, by default None.
to_update (dict): The key, value pairs to update within the matched document.
upsert (bool, optional): If True perform the insert even if no matching documents
are found, by default False.
"""
document_filter = Document(document_filter, copy=True)
with suppress(KeyError):
del document_filter["date_modified"] # This might change so don't match with it
count = self.count_documents(document_filter)
if count > 1:
raise RuntimeError(f"Multiple matches found for document in {self}: {document_filter}.")
elif count == 0:
if upsert:
self.insert_one(to_update)
return
else:
raise RuntimeError(f"No matches found for document {document_filter} in {self}. Use"
" upsert=True to upsert.")
to_update = Document(to_update)
to_update["date_modified"] = current_date()
# Use flattened version (dot notation) for nested updates to work properly
mongo_update = to_update.to_mongo(flatten=True)
self.logger.debug(f"Updating document with: {mongo_update}")
self._collection.update_one(document_filter, {'$set': mongo_update}, upsert=False)
def delete_one(self, document_filter, force=False):
"""Delete one document from the table.
Args:
document_filter (dict, optional): A dictionary containing key, value pairs used to
identify the document to delete, by default None
force (bool, optional): If True, ignore checks and delete all matching documents.
Default False.
"""
document_filter = Document(document_filter, validate=False)
mongo_filter = document_filter.to_mongo()
if not force:
count = self.count_documents(document_filter)
if count > 1:
raise RuntimeError(f"Multiple matches found for document in {self}:"
f" {document_filter}.")
elif (count == 0):
raise RuntimeError(f"No matches found for document in {self}: {document_filter}.")
self.logger.debug(f"Deleting {document_filter} from {self}.")
self._collection.delete_one(mongo_filter)
def insert_many(self, documents, **kwargs):
"""Insert a new document into the table.
Args:
documents (list): List of dictionaries that specify documents to be inserted in the
table.
"""
for d in documents:
self.insert_one(d, **kwargs)
def delete_many(self, documents, **kwargs):
""" Delete one document from the table.
Args:
documents (list): List of dictionaries that specify documents to be deleted from the
table.
"""
self.logger.debug(f"Deleting {len(documents)} documents from {self}.")
for d in documents:
self.delete_one(d, **kwargs)
def find_latest(self, days=0, hours=0, seconds=0, **kwargs):
""" Convenience function to query the latest files in the db.
Args:
days (int): default 0.
hours (int): default 0.
seconds (int): default 0.
Returns:
list: Query result.
"""
date_now = current_date()
date_min = date_now - timedelta(days=days, hours=hours, seconds=seconds)
return self.find(date_min=date_min, **kwargs)
def delete_all(self, really=False, **kwargs):
""" Delete all documents from the collection. """
if not really:
raise RuntimeError("If you really want to do this, parse really=True.")
docs = self.find()
self.delete_many(docs, **kwargs)
def count_documents(self, *args, **kwargs):
""" Count the number of matching documents in the collection.
Args:
*args, **kwargs: Parsed to self.find.
Returns:
int: The number of matching documents in the collection.
"""
return len(self.find(*args, **kwargs))
# Private methods
def _connect(self):
""" Initialise the database.
Args:
db_name (str): The name of the (mongo) database.
collection_name (str): The name of the table (mongo collection).
"""
# Connect to the mongodb
hostname = self.config["mongodb"]["hostname"]
port = self.config["mongodb"]["port"]
if "username" in self.config["mongodb"].keys():
username = quote_plus(self.config["mongodb"]["username"])
password = quote_plus(self.config["mongodb"]["password"])
uri = f"mongodb://{username}:{password}@{hostname}/{self._db_name}?ssl=true"
self._client = pymongo.MongoClient(uri)
else:
self._client = pymongo.MongoClient(hostname, port)
try:
self._client.server_info()
self.logger.info(f"{self} connected to mongodb at {hostname}:{port}.")
except ServerSelectionTimeoutError as err:
self.logger.error(f"Unable to connect {self} to mongodb at {hostname}:{port}.")
raise err
self._db = self._client[self._db_name]
self._collection = self._db[self.collection_name]
# Create unique index
# This leverages mongdb's server-side locking mechanism for thread-safety on inserts
if self._index_fields is not None:
self._collection.create_index([(k, pymongo.ASCENDING) for k in self._index_fields],
unique=True)
def _prepare_doc_for_insert(self, document):
""" Prepare a document to be inserted into the database.
Args:
document (Document or dict): The document to prepare.
Returns:
Document: The prepared document of the appropriate type for this collection.
"""
# Create and validate document
doc = self._DocumentClass(document, copy=True, unflatten=True)
self._validate_document(doc)
# Add date records
doc["date_created"] = current_date()
doc["date_modified"] = current_date()
return doc
def _validate_document(self, document, required_fields=None):
""" Validate a document for insersion.
Args:
document (Document): The document to validate.
required_fields (iterable of str, optional): Fields required to exist in document.
If not provided, use class attribute.
Raises:
ValueError: If the document is invalid.
"""
if required_fields is None:
required_fields = self._required_fields
if not required_fields:
return
for field in required_fields:
if field not in document:
raise ValueError(f"Field {field} not in document. Cannot insert.")
def _get_quality_filter(self):
""" Return the Query object corresponding to quality cuts. """
raise NotImplementedError
| 42.066474
| 100
| 0.62652
|
eea9c00ed019d5e2eb93b46aea9540dd9417759a
| 2,634
|
py
|
Python
|
bin/Python27/Lib/site-packages/numpy/fft/tests/test_helper.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/numpy/fft/tests/test_helper.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/numpy/fft/tests/test_helper.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | 1
|
2020-08-08T12:44:48.000Z
|
2020-08-08T12:44:48.000Z
|
#!/usr/bin/env python
"""Test functions for fftpack.helper module
Copied from fftpack.helper by Pearu Peterson, October 2005
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
from numpy import fft
from numpy import pi
class TestFFTShift(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
def test_inverse(self):
for n in [1, 4, 9, 100, 211]:
x = np.random.random((n,))
assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
def test_axes_keyword(self):
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
assert_array_almost_equal(fft.fftshift(freqs, axes=0),
fft.fftshift(freqs, axes=(0,)))
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
fft.ifftshift(shifted, axes=(0,)))
class TestFFTFreq(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
assert_array_almost_equal(9*fft.fftfreq(9), x)
assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
assert_array_almost_equal(10*fft.fftfreq(10), x)
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
class TestRFFTFreq(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4]
assert_array_almost_equal(9*fft.rfftfreq(9), x)
assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, 5]
assert_array_almost_equal(10*fft.rfftfreq(10), x)
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
class TestIRFFTN(TestCase):
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
if __name__ == "__main__":
run_module_suite()
| 33.341772
| 80
| 0.58656
|
6093f0fba93055c5a308b5bd55568d008d793252
| 21,557
|
py
|
Python
|
test.py
|
lvca/graph-databases-testsuite
|
217cd37d1037bbbfd53b89aab0391b1abcca1590
|
[
"MIT"
] | 29
|
2017-06-21T10:15:10.000Z
|
2022-01-17T00:15:18.000Z
|
test.py
|
lvca/graph-databases-testsuite
|
217cd37d1037bbbfd53b89aab0391b1abcca1590
|
[
"MIT"
] | 34
|
2018-07-05T11:54:08.000Z
|
2022-02-11T16:26:35.000Z
|
test.py
|
lvca/graph-databases-testsuite
|
217cd37d1037bbbfd53b89aab0391b1abcca1590
|
[
"MIT"
] | 9
|
2019-03-17T10:49:35.000Z
|
2022-01-17T00:15:22.000Z
|
#!/usr/bin/env python
"""
Assumptions:
- The entry script (CMD) of every docker-image will eventually execute
`/runtime/{tp2,tp3}/execute.sh`, after database bootstrap operation
have been completed.
- `runtime` directory structure
Provided guarantees:
- Query are executed in alphabetical order
- Files whose name start by '.' are ignored.
Maintainer: Brugnara <mb@disi.unitn.eu>
"""
import re
import sys
import json
import hashlib
import logging
import argparse
import itertools
import subprocess32 as subprocess
from os import listdir
from base64 import b64encode
from itertools import product
from os.path import join, exists, isdir, abspath, basename
# -----------------------------------------------------------------------------
# BASE EXPERIMENT SETTINGS:
ITERATIONS = None # overrided in main
DEBUG = False
PRINT_ONLY = False
# Per query timeout (in seconds) [2 hours]
# 2602253683670
TIMEOUT = 2 * 60 * 60 # 2 hours
# When a query timeout, skip testing with other meta values?
TIMEOUT_MAX_RETRY = 7
# SKIP = False --> 0
# SKIP = True --> 1
DATA_SUFFIX = ''
COMMIT_SUFFIX = ''
# -----------------------------------------------------------------------------
# LOGGING
logger, to_log = None, None
SUBPROCESS_LOGF_NAME = 'docker.log'
SUBPROCESS_LOGF = None
# -----------------------------------------------------------------------------
# Docker images
DATABASES = [
'neo4j',
'orientdb', # NOTE: it uses its own loader (@see README).
'sparksee',
'arangodb', # NOTE: it uses its own loader (@see README).
'titan',
'blazegraph', # Uses Tp3
'neo4j-tp3', # Uses Tp3
'titan-tp3', # Uses Tp3
'janus-tp3', # Uses Tp3
'pg', # NOTE: it uses its own loader (@see README).
# leave as last
'2to3', # Only for conversion
]
IMAGES = [
'dbtrento/gremlin-neo4j',
'dbtrento/gremlin-orientdb',
'dbtrento/gremlin-sparksee',
'dbtrento/gremlin-arangodb',
'dbtrento/gremlin-titan',
'dbtrento/gremlin-blazegraph',
'dbtrento/gremlin-neo4j-tp3',
'dbtrento/gremlin-titan-tp3',
'dbtrento/gremlin-janus-tp3',
'dbtrento/gremlin-pg',
'dbtrento/gremlin-2to3',
]
assert len(DATABASES) == len(IMAGES)
# Changing this requires rebuilding all images with updated paths
RUNTIME_DIR = '/runtime'
# -----------------------------------------------------------------------------
# Global variables
ENV_EXT = [
# Extra env variables,
# @see -e option.
"-e", "RUNTIME_DIR=" + RUNTIME_DIR,
]
CMD_EXT = [
# Command extra arguments,
# @see -v option for volumes
"--cap-add=IPC_LOCK",
"--ulimit",
"memlock=-1:-1",
]
SETTINGS_FNAME = None
DEFAULT_SETTINGS_FILE = "./settings.json"
# Support for 'batch only mode'
BATCH_VAR = ('SID', 'INDEX')
BATCH_ONLY = False
# Flags
LOAD_ONLY = False
FORCE_LOAD = False
# -----------------------------------------------------------------------------
def main(root):
""" Main function
root: absolute path to 'runtime/'
+ for each database
| + for each dataset
| | + if not loaded
| | | load $dataset in $database
| | | commit $database as $data_iamge
| | -
| |
| | + for each query
| | | execute $query on $data_image
- - -
"""
logger.debug('Root {}'.format(root))
datasets, queries, meta_dir = get_test_settings(root, SETTINGS_FNAME)
logger.info("Main testing loop (for each database): ")
logger.info(zip(DATABASES, IMAGES))
if len(IMAGES) == 0:
logger.error("NOTHING TO PROCESS")
for db_name, image in zip(DATABASES, IMAGES):
logger.info("[CURRENT] DB: {} w/image {}".format(db_name, image))
for dataset in datasets:
logger.info("[CURRENT] DATASET: " + dataset)
# Setting up common environment
dataset_path = join(RUNTIME_DIR, 'data', dataset)
data_image = '{}_{}'.format(basename(image), dataset)\
.replace(' ', '-')
samples_file = join(RUNTIME_DIR, 'presampled/',
'samples_' + dataset)
lids_file = join(RUNTIME_DIR, 'presampled/',
'lids_' + '_'.join([dataset, db_name,
b64encode(image)]))
base_environ = ENV_EXT + [
"-e", "DATABASE_NAME=" + db_name, # neo4j
"-e", "DATABASE=" + basename(image), # gremlin-neo4j
"-e", "DATASET=" + dataset_path,
"-e", "SAMPLES_FILE=" + samples_file,
"-e", "LIDS_FILE=" + lids_file,
]
logger.debug('base_environ: ' + ' '.join(base_environ))
try:
load_env = base_environ + [
"-e", 'QUERY=loader.groovy',
"-e", 'ITERATION=0',
]
exec_query(root, image, load_env,
commit_name=data_image + DATA_SUFFIX)
except subprocess.TimeoutExpired:
# should never happen
logger.error('Timeout while loading. How (no timeout set)?.')
sys.exit(42)
except subprocess.CalledProcessError as e:
# error loading this database stop all
logger.fatal('Failed loading {} into {}'.format(db_name,
image))
sys.exit(3)
if LOAD_ONLY:
logger.info("Load only flag is set, skipping tests.")
continue
logger.info("Starting benchmark")
for query in queries:
TIMEOUT_COUNTER = 0
logger.info("[CURRENT] query: {} REPEAT {}"
.format(query, ITERATIONS))
# Query meta variables parsing and range generation
meta_names, meta_values, contains_batch =\
read_meta(meta_dir, query, ITERATIONS)
if BATCH_ONLY and not contains_batch:
logger.info("BATCH ONLY mode: skipping " + query)
continue
logger.info("Query {} on {} using {} (image: {})"
.format(query, dataset, db_name, image))
query_env = base_environ + ["-e", "QUERY=" + query]
logger.info("({}) meta parameters: {}"
.format(len(meta_names), meta_names))
for values in meta_values:
# Express meta parameters as ENV variables
meta_env = list(itertools.chain.from_iterable(
("-e", "{}={}".format(n, v)) for n, v in
zip(meta_names, values)))
try:
test_env = []
cn = None
to = TIMEOUT
if COMMIT_SUFFIX:
cn = data_image + DATA_SUFFIX + COMMIT_SUFFIX
to = None
else:
test_env += ['--rm']
test_env += query_env + meta_env
exec_query(root, data_image + DATA_SUFFIX, test_env,
timeout=to, commit_name=cn)
TIMEOUT_COUNTER = 0
except subprocess.TimeoutExpired:
to_log.error(','.join([
basename(image), dataset, query,
str(TIMEOUT), str(zip(meta_names, values))]))
TIMEOUT_COUNTER += 1
if (TIMEOUT_MAX_RETRY != 0 and
TIMEOUT_COUNTER >= TIMEOUT_MAX_RETRY):
logger.warn('SKIP_ON_TIMEOUT giving up on {}'
.format(query))
break
except subprocess.CalledProcessError, e:
logger.error('Executing query {}'.format(query))
logger.error(e)
logger.info("Done for now")
def exec_query(root, docker_image, env, timeout=None, commit_name=None):
""" Create a container from the $docker_image,
append the $env list (of `-e` option) to the docker command.
Waits for the container to exit.
- timeout: enforce maximum running time.
- commit_name: if specified and the container returns successfully (0),
the container will be committed as $commit_name
Globals:
DEBUG, PRINT_ONLY, FORCE_LOAD, CMD_EXT, SUBPROCESS_LOGF, logger
Throws:
subprocess.TimeoutExpired: the timeout fired
subprocess.CalledProcessError: there was an error in the container
"""
# Check if what we are going to commit (if any) already exists
if commit_name:
command = ["docker", "images", "-q", commit_name]
if len(subprocess.check_output(command).strip('\n ')):
if not FORCE_LOAD:
logger.info("Loading (or committing): use existing {}"
.format(commit_name))
return
logger.info("Loading (or committing): overriding {}"
.format(commit_name))
# Build command
command = ["docker", "run", "-v", root + ':' + RUNTIME_DIR] + CMD_EXT + env
container_name = 'CNT_' + docker_image.replace('/', '-') + hashlib.sha256(
''.join(command + [docker_image]).encode()).hexdigest()
command += ['--name', container_name, docker_image]
try:
# Execute the query
logger.debug('Command: ' + ' '.join(command))
if PRINT_ONLY:
print(' '.join(command))
return
subprocess.check_call(command, stdout=SUBPROCESS_LOGF,
stderr=SUBPROCESS_LOGF, timeout=timeout)
# Commit if we should
if commit_name:
logger.info('Committing {} as {}'
.format(container_name, commit_name))
command = ["docker", "commit", container_name, commit_name]
subprocess.check_call(command, stdout=SUBPROCESS_LOGF,
stderr=SUBPROCESS_LOGF)
finally:
rmf(container_name)
def rmf(container_name):
""" Force the removal of a container
"""
command = ["docker", "rm", "-f", container_name]
try:
error = None
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outs, errs = proc.communicate(timeout=TIMEOUT)
except subprocess.TimeoutExpired, e:
proc.kill()
error = e
outs, errs = proc.communicate()
except Exception, e:
error = e
outs, errs = proc.communicate()
finally:
proc.kill()
if error and (not ("No such container" in str(errs))):
logger.error(error)
def read_meta(meta_dir, query_name, iterations):
"""
Parse metadata.
Format:
*first line of the query file only*
- Starts with '#META:'
- variables are encoded as 'name=value'
- value can be range as '[start-end]'
[0-3] == {0,1,2,3}
- value can be set as {1,2,3,stella}
- set values can not have space in front or tail;
if needed use ' eg: {1,' ciao mondo '}
- space between value will be stripped:
{1,2,3} == {1 , 2, 3}
- different variables should be separated by ;
* this chars are not allowed as parts of set values: []-{},;=
#META:first=[1-4];second={1,2,3,stella}
-> 'first' in (1,2,3,4)
-> 'second' in set{1,2,3,stella}
"""
with open(join(meta_dir, query_name), 'r') as query:
first_line = query.readline()
signature = '#META:'
meta_names = ['ITERATION']
meta_values = [xrange(0, iterations)]
contains_batch = False
first_line = first_line.strip(' ,;')
if first_line.startswith(signature):
line = first_line[len(signature):]
if len(line.strip()):
for (name, value) in (x.split('=') for x in line.split(';')):
name, value = name.strip(), value.strip()
meta_names.append(name)
if value[0] == '[':
start, end = value.strip('[]').split('-')
if BATCH_ONLY and name in BATCH_VAR:
meta_values.append([int(end)])
contains_batch = True
else:
meta_values.append(xrange(int(start), int(end) + 1))
else:
meta_values.append(map(lambda v: v.strip(),
value.strip('{}').split(',')))
return meta_names, product(*meta_values), contains_batch
# -----------------------------------------------------------------------------
# Support functions
def dir_iter(directory):
return (f for f in sorted(listdir(directory))
if not isdir(f) and not f.startswith('.'))
def check_paths(root):
dataset_dir = join(root, 'data')
query_tp2 = join(root, 'tp2/queries')
query_tp3 = join(root, 'tp3/queries')
meta = join(root, 'meta')
assert exists(dataset_dir), "dataset directory should exists inside root"
assert exists(query_tp2), "tp2 query directory should exists inside root"
assert exists(query_tp3), "tp3 query directory should exists inside root"
assert exists(meta), "meta directory should exists inside root"
return dataset_dir, query_tp2, query_tp3, meta
# -----------------------------------------------------------------------------
# Logging
def init_loggers():
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Script logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('test.log')
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG if DEBUG else logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Logger configuration completed')
# Timeout tracker
to_log = logging.getLogger('timeout')
to_log.setLevel(logging.INFO)
to_handler = logging.FileHandler('timeout.log')
to_handler.setLevel(logging.INFO)
to_handler.setFormatter(formatter)
to_log.addHandler(to_handler)
logger.info('Timeout logger configuration completed')
return logger, to_log
# -----------------------------------------------------------------------------
# Command line arguments parsing
def parse_arguments():
parser = argparse.ArgumentParser(
description='dbTrento "graph databases comparison" script.')
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='enable debug information')
parser.add_argument('-e', '--env', action='append', metavar='ENV',
help='Set this ENV variables in the executed docker')
parser.add_argument('-v', '--volume', action='append', metavar='VOLUME',
help='Mount volumes (use for resources under ln -s)')
parser.add_argument('-l', '--load_only', action='store_true',
default=False,
help='prepare the images but do not run tests')
parser.add_argument('-f', '--force_load', action='store_true',
default=False,
help='recreate data image even if it exists')
parser.add_argument('-b', '--batch_only', action='store_true',
default=False,
help='run only "batch" tests')
parser.add_argument('-s', '--settings',
default=None,
help='JSON file with dataset and queries fnames')
parser.add_argument('-r', '--repetitions', default=5,
help='number of repetitions for each query')
parser.add_argument('-i', '--image', action='append', metavar='IMAGE_TAG',
help='run only on those images/databases')
parser.add_argument('-p', '--print-only', dest='print_only',
action='store_true', default=False,
help='only print the docker command')
parser.add_argument('-x', '--suffix', default='', help="data img suffix")
parser.add_argument('-c', '--commit', default='', help="commit img suffix. Also disables TIMEOUT")
args = parser.parse_args(sys.argv[1:])
global DEBUG
DEBUG = DEBUG or args.debug
global PRINT_ONLY
PRINT_ONLY = PRINT_ONLY or args.print_only
global LOAD_ONLY, FORCE_LOAD, BATCH_ONLY, S
LOAD_ONLY = args.load_only
FORCE_LOAD = args.force_load
BATCH_ONLY = args.batch_only
global SETTINGS_FNAME
SETTINGS_FNAME = args.settings
global ITERATIONS
ITERATIONS = int(args.repetitions)
global DATA_SUFFIX, COMMIT_SUFFIX
DATA_SUFFIX = args.suffix
COMMIT_SUFFIX = args.commit
parse_images(args.image)
parse_volumes(args.volume)
parse_env(args.env)
if DEBUG:
ENV_EXT.extend(['-e', 'DEBUG=true'])
def parse_images(i):
global DATABASES, IMAGES
if not(i and len(i)):
DATABASES = DATABASES[:-1]
IMAGES = IMAGES[:-1]
return
new_db, new_img = [], []
for index, name in enumerate(IMAGES):
if name in i:
new_db.append(DATABASES[index])
new_img.append(IMAGES[index])
DATABASES, IMAGES = new_db, new_img
def parse_volumes(vols):
if not(vols and len(vols)):
return
CMD_EXT.extend(itertools.chain(
*map(lambda x: ['-v', '{0}:{0}'.format(abspath(x)) if ':' not in x else x], vols)))
def parse_env(env):
if env and len(env):
ENV_EXT.extend(itertools.chain.from_iterable(
map(lambda v: ['-e', v], env)))
# Consider if enforce add of ' only if not '
# v.split('=',1)[0]+ "='"+v.split('=',1)[1].strip("'")+"'"], env)))
logger.info('Additional ENV from prompt:')
logger.info(ENV_EXT)
else:
logger.info('You may want to specify, via -e,' +
'(JAVA_TOOL_OPTIONS, JAVA_OPTIONS, JAVA_OPTS)')
logger.info("Example: -e JAVA_OPTIONS='-Xms1G -Xmn128M -Xmx120G'")
# -----------------------------------------------------------------------------
def comment_remover(text):
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " " # note: a space and not an empty string
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
# Test settings
def get_test_settings(root, fname=None):
""" Return the list of datasets and queries onto run the tests.
fname: name of the JSON file from which read the settings.
{
'datasets': [],
'queries': []
}
If no file is provided the settings are inferred from the directories
content; i.e. ls runtime/dataset ls runtime/tp2/query
NOTE: All queries (begin from settings.queries or ls)
must exists in both runtime/tp2/queries, runtime/tp3/queries
"""
if fname is None and exists(DEFAULT_SETTINGS_FILE):
fname = DEFAULT_SETTINGS_FILE
datasets = queries = None
if fname:
if not exists(fname):
logger.fatal('Settings file {} does not exists'.format(fname))
sys.exit(4)
try:
with open(fname) as f:
settings = json.loads(comment_remover(f.read()))
except ValueError:
logger.fatal('Settings file {} is not valid JSON'.format(fname))
sys.exit(5)
datasets, queries = settings['datasets'], settings['queries']
logger.info("From settings: {} Datasets and {} Queries".format(
len(datasets), len(queries)))
dataset_dir, query_tp2, query_tp3, meta_dir = check_paths(root)
tp2_queries = set(dir_iter(query_tp2))
tp3_queries = set(dir_iter(query_tp3))
metas = set(dir_iter(meta_dir))
if datasets is None:
datasets = sorted(list(dir_iter(dataset_dir)))
datasets = [x for x in datasets if x]
if queries is None:
queries = sorted(list(tp2_queries | tp3_queries))
queries = [x for x in queries if x]
common_queries = tp2_queries & tp3_queries & metas
missing_queries = set(queries) - common_queries
assert not len(missing_queries),\
'Missing implementation of {} in tp2, {} in tp3, {} in meta'.format(
missing_queries - tp2_queries, missing_queries - tp3_queries,
missing_queries - metas)
return datasets, queries, meta_dir
# =============================================================================
if __name__ == '__main__':
logger, to_log = init_loggers()
parse_arguments()
with open(SUBPROCESS_LOGF_NAME, 'a') as SUBPROCESS_LOGF:
main(abspath("./runtime"))
| 34.657556
| 102
| 0.543443
|
2e809ff144681afcb0869bb6b43e4298dfaab6f5
| 6,783
|
py
|
Python
|
accelbyte_py_sdk/api/platform/operations/item/get_item_dynamic_data.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/platform/operations/item/get_item_dynamic_data.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/platform/operations/item/get_item_dynamic_data.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-platform-service (4.10.0)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ErrorEntity
from ...models import ItemDynamicDataInfo
class GetItemDynamicData(Operation):
"""Get item dynamic data (getItemDynamicData)
Get item dynamic data for published item.
Other detail info:
* Required permission : resource="ADMIN:NAMESPACE:{namespace}:ITEM", action=2 (READ)
* Returns : item dynamic data
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ITEM [READ]
Properties:
url: /platform/admin/namespaces/{namespace}/items/{itemId}/dynamic
method: GET
tags: ["Item"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
item_id: (itemId) REQUIRED str in path
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - ItemDynamicDataInfo (successful operation)
404: Not Found - ErrorEntity (30341: Item [{itemId}] does not exist in namespace [{namespace}])
"""
# region fields
_url: str = "/platform/admin/namespaces/{namespace}/items/{itemId}/dynamic"
_method: str = "GET"
_consumes: List[str] = []
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"], ["BEARER_AUTH"]]
_location_query: str = None
item_id: str # REQUIRED in [path]
namespace: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "item_id"):
result["itemId"] = self.item_id
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_item_id(self, value: str) -> GetItemDynamicData:
self.item_id = value
return self
def with_namespace(self, value: str) -> GetItemDynamicData:
self.namespace = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "item_id") and self.item_id:
result["itemId"] = str(self.item_id)
elif include_empty:
result["itemId"] = ""
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, ItemDynamicDataInfo], Union[None, ErrorEntity, HttpResponse]]:
"""Parse the given response.
200: OK - ItemDynamicDataInfo (successful operation)
404: Not Found - ErrorEntity (30341: Item [{itemId}] does not exist in namespace [{namespace}])
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return ItemDynamicDataInfo.create_from_dict(content), None
if code == 404:
return None, ErrorEntity.create_from_dict(content)
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
item_id: str,
namespace: str,
) -> GetItemDynamicData:
instance = cls()
instance.item_id = item_id
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> GetItemDynamicData:
instance = cls()
if "itemId" in dict_ and dict_["itemId"] is not None:
instance.item_id = str(dict_["itemId"])
elif include_empty:
instance.item_id = ""
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"itemId": "item_id",
"namespace": "namespace",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"itemId": True,
"namespace": True,
}
# endregion static methods
| 28.620253
| 156
| 0.628336
|
9a56a11d0acfbff4358e71d090602b33ae043bad
| 4,570
|
py
|
Python
|
circus/tests/test_client.py
|
asteven/circus
|
278ff49c1f941177465056350512282b233e8ccf
|
[
"Apache-2.0"
] | null | null | null |
circus/tests/test_client.py
|
asteven/circus
|
278ff49c1f941177465056350512282b233e8ccf
|
[
"Apache-2.0"
] | null | null | null |
circus/tests/test_client.py
|
asteven/circus
|
278ff49c1f941177465056350512282b233e8ccf
|
[
"Apache-2.0"
] | 2
|
2019-01-15T10:38:19.000Z
|
2020-09-14T14:07:50.000Z
|
import os
import tempfile
from tornado.testing import gen_test
from tornado.gen import coroutine, Return
from circus.util import tornado_sleep
from circus.tests.support import TestCircus, EasyTestSuite, IS_WINDOWS
from circus.client import make_message
from circus.stream import QueueStream
class TestClient(TestCircus):
@coroutine
def status(self, cmd, **props):
resp = yield self.call(cmd, **props)
raise Return(resp.get('status'))
@coroutine
def numprocesses(self, cmd, **props):
resp = yield self.call(cmd, waiting=True, **props)
raise Return(resp.get('numprocesses'))
@coroutine
def numwatchers(self, cmd, **props):
resp = yield self.call(cmd, **props)
raise Return(resp.get('numwatchers'))
@coroutine
def set(self, name, **opts):
resp = yield self.status("set", name=name, waiting=True, options=opts)
raise Return(resp)
@gen_test
def test_client(self):
# playing around with the watcher
yield self.start_arbiter()
msg = make_message("numwatchers")
resp = yield self.cli.call(msg)
self.assertEqual(resp.get("numwatchers"), 1)
self.assertEqual((yield self.numprocesses("numprocesses")), 1)
self.assertEqual((yield self.set("test", numprocesses=2)), 'ok')
self.assertEqual((yield self.numprocesses("numprocesses")), 2)
self.assertEqual((yield self.set("test", numprocesses=1)), 'ok')
self.assertEqual((yield self.numprocesses("numprocesses")), 1)
self.assertEqual((yield self.numwatchers("numwatchers")), 1)
self.assertEqual((yield self.call("list")).get('watchers'), ['test'])
self.assertEqual((yield self.numprocesses("incr", name="test")), 2)
self.assertEqual((yield self.numprocesses("numprocesses")), 2)
self.assertEqual((yield self.numprocesses("incr", name="test", nb=2)),
4)
self.assertEqual((yield self.numprocesses("decr", name="test", nb=3)),
1)
self.assertEqual((yield self.numprocesses("numprocesses")), 1)
if IS_WINDOWS:
# On Windows we can't set an env to a process without some keys
env = dict(os.environ)
else:
env = {}
env['test'] = 2
self.assertEqual((yield self.set("test", env=env)), 'error')
env['test'] = '2'
self.assertEqual((yield self.set("test", env=env)), 'ok')
resp = yield self.call('get', name='test', keys=['env'])
options = resp.get('options', {})
self.assertEqual(options.get('env', {}), env)
resp = yield self.call('stats', name='test')
self.assertEqual(resp['status'], 'ok')
resp = yield self.call('globaloptions', name='test')
self.assertEqual(resp['options']['pubsub_endpoint'],
self.arbiter.pubsub_endpoint)
yield self.stop_arbiter()
_, tmp_filename = tempfile.mkstemp(prefix='test_hook')
def long_hook(*args, **kw):
os.unlink(tmp_filename)
class TestWithHook(TestCircus):
def run_with_hooks(self, hooks):
self.stream = QueueStream()
self.errstream = QueueStream()
dummy_process = 'circus.tests.support.run_process'
return self._create_circus(dummy_process, async=True,
stdout_stream={'stream': self.stream},
stderr_stream={'stream': self.errstream},
hooks=hooks)
@gen_test
def test_message_id(self):
hooks = {'before_stop': ('circus.tests.test_client.long_hook', False)}
testfile, arbiter = self.run_with_hooks(hooks)
yield arbiter.start()
try:
self.assertTrue(os.path.exists(tmp_filename))
msg = make_message("numwatchers")
resp = yield self.cli.call(msg)
self.assertEqual(resp.get("numwatchers"), 1)
# this should timeout
resp = yield self.cli.call(make_message("stop"))
self.assertEqual(resp.get('status'), 'ok')
while arbiter.watchers[0].status() != 'stopped':
yield tornado_sleep(.1)
resp = yield self.cli.call(make_message("numwatchers"))
self.assertEqual(resp.get("numwatchers"), 1)
self.assertFalse(os.path.exists(tmp_filename))
finally:
if os.path.exists(tmp_filename):
os.unlink(tmp_filename)
arbiter.stop()
test_suite = EasyTestSuite(__name__)
| 35.703125
| 78
| 0.607659
|
2f36a6b6131790d94581ad07517b53019722b5d5
| 2,074
|
py
|
Python
|
solum/tests/common/test_context.py
|
ed-/solum
|
2d23edb7fb53e1bdeff510710824658575d166c4
|
[
"Apache-2.0"
] | null | null | null |
solum/tests/common/test_context.py
|
ed-/solum
|
2d23edb7fb53e1bdeff510710824658575d166c4
|
[
"Apache-2.0"
] | null | null | null |
solum/tests/common/test_context.py
|
ed-/solum
|
2d23edb7fb53e1bdeff510710824658575d166c4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 - Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from solum.common import context
from solum.tests import base
class TestContext(base.BaseTestCase):
def test_context_to_dict(self):
ctx = context.RequestContext('_token_', '_user_', '_tenant_',
'_domain_', '_user_domain_',
'_project_domain_', False, False,
'_request_id_', '_user_name_',
['admin', 'member'], 'fake_auth_url',
trust_id='fake_trust_id')
ctx_dict = ctx.to_dict()
self.assertEqual(ctx_dict['auth_token'], '_token_')
self.assertEqual(ctx_dict['user'], '_user_')
self.assertEqual(ctx_dict['tenant'], '_tenant_')
self.assertEqual(ctx_dict['domain'], '_domain_')
self.assertEqual(ctx_dict['user_domain'], '_user_domain_')
self.assertEqual(ctx_dict['project_domain'], '_project_domain_')
self.assertEqual(ctx_dict['is_admin'], False)
self.assertEqual(ctx_dict['read_only'], False)
self.assertEqual(ctx_dict['show_deleted'], False)
self.assertEqual(ctx_dict['auth_token'], '_token_')
self.assertEqual(ctx_dict['instance_uuid'], None)
self.assertEqual(ctx_dict['user_name'], '_user_name_')
self.assertEqual(ctx_dict['roles'], ['admin', 'member'])
self.assertEqual(ctx_dict['auth_url'], 'fake_auth_url')
self.assertEqual(ctx_dict['trust_id'], 'fake_trust_id')
| 47.136364
| 75
| 0.645612
|
d0ea331599fc20800335cf9ade1533bb340e700e
| 2,257
|
py
|
Python
|
app/data_info.py
|
chuusan/DSCI_532_Group114_SKEC
|
1ee8f15497ad96eb9faa523db18e3d219ed7aabb
|
[
"MIT"
] | null | null | null |
app/data_info.py
|
chuusan/DSCI_532_Group114_SKEC
|
1ee8f15497ad96eb9faa523db18e3d219ed7aabb
|
[
"MIT"
] | null | null | null |
app/data_info.py
|
chuusan/DSCI_532_Group114_SKEC
|
1ee8f15497ad96eb9faa523db18e3d219ed7aabb
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import altair as alt
import os as os
# need to pip install pyproj
from pyproj import Proj
# Creating Path to read data from the repo
x = os.getcwd()
x = x[:len(x)-3]
path=x + "/Data/crimedata_csv_all_years.csv"
# Reading Vancouver City Population from 2001 to 2018
path_pop=x + "/Data/Population_trend.csv"
pop_yr = pd.read_csv(path_pop)
pop_yr = pop_yr[['YEAR', 'Population']]
# Reading each neighborhood's proportion of population to overall city's population
path_prop=x + "/Data/population_proportion.csv"
pop_prop = pd.read_csv(path_prop)
def clean_data():
#alt.renderers.enable('notebook')
alt.data_transformers.disable_max_rows()
#mydata = pd.read_csv('~/MDS/DSCI_532_Group114_SKEC/Data/crimedata_csv_all_years.csv')
mydata = pd.read_csv(path)
mydata = mydata[~((mydata['X']==0) | (mydata['Y']==0) | (mydata['NEIGHBOURHOOD'].isna()))]
mydata = mydata.drop(columns=['DAY', 'MINUTE', 'HUNDRED_BLOCK'])
# >>>>
# Excluding Year 2019 because the data is till Oct only whereas other years have full data
mydata = mydata[mydata['YEAR']!=2019]
# Relacing Stanley Park with West End because its a subset
mydata = mydata.replace({'NEIGHBOURHOOD': 'Stanley Park'}, value = 'West End')
# Relacing Musqueam with Marpole because its a subset
mydata = mydata.replace({'NEIGHBOURHOOD': 'Musqueam'}, value = 'Marpole')
mydata = mydata.replace({'NEIGHBOURHOOD': 'Central Business District'}, value = 'Downtown')
mydata = mydata.replace({'NEIGHBOURHOOD': 'Arbutus Ridge'}, value = 'Arbutus-Ridge')
mydata = mydata.replace({'TYPE': 'Vehicle Collision or Pedestrian Struck (with Fatality)'}, value = 'Vehicle Collision or Pedestrian Struck')
mydata = mydata.replace({'TYPE': 'Vehicle Collision or Pedestrian Struck (with Injury)'}, value = 'Vehicle Collision or Pedestrian Struck')
# >>>>
# Converting XY UTM coordinate system to Latitude & Longitude
p = Proj(proj='utm',zone=10,ellps='WGS84', preserve_units=False)
lon, lat = p(mydata['X'].values, mydata['Y'].values, inverse=True)
latlon = pd.DataFrame(np.c_[lon, lat], columns=['Lon', 'Lat'])
mydata['Lon']=latlon['Lon']
mydata['Lat']=latlon['Lat']
return mydata
| 40.303571
| 145
| 0.699158
|
a4a871c163f304ac2d19e7a0ca9cdeb7996ae268
| 641
|
py
|
Python
|
AlgoBox/Users/forms.py
|
AbhiY98/AlgoBox
|
86de7a7e7081c0f0c5df39f5530fa67140417d62
|
[
"Apache-2.0"
] | null | null | null |
AlgoBox/Users/forms.py
|
AbhiY98/AlgoBox
|
86de7a7e7081c0f0c5df39f5530fa67140417d62
|
[
"Apache-2.0"
] | null | null | null |
AlgoBox/Users/forms.py
|
AbhiY98/AlgoBox
|
86de7a7e7081c0f0c5df39f5530fa67140417d62
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.db.models import CharField
class SignUpForm(UserCreationForm):
bio = forms.CharField(max_length = 500, help_text = 'Something that describes you in brief?')
cc_handle = forms.CharField(max_length = 40)
cf_handle = forms.CharField(max_length=40)
sp_handle = forms.CharField(max_length=40)
ac_handle = forms.CharField(max_length=40)
class Meta:
model = User
fields = ('username', 'bio', 'cc_handle', 'cf_handle', 'sp_handle', 'ac_handle', 'password1', 'password2',)
| 42.733333
| 115
| 0.733229
|
47d3b03f8385465596780cedad5e3fcdeb33c2e0
| 1,572
|
py
|
Python
|
data/showtimes.py
|
rjkerrison/allocine-python
|
35167d9b7b6e2e05285907ce3bdefc094ca14f0c
|
[
"MIT"
] | null | null | null |
data/showtimes.py
|
rjkerrison/allocine-python
|
35167d9b7b6e2e05285907ce3bdefc094ca14f0c
|
[
"MIT"
] | null | null | null |
data/showtimes.py
|
rjkerrison/allocine-python
|
35167d9b7b6e2e05285907ce3bdefc094ca14f0c
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from datetime import date
from typing import List
from helpers.schedules import Schedule, build_weekly_schedule_str
from .movies import MovieVersion
@dataclass
class Showtime(Schedule):
movie: MovieVersion
def __str__(self):
return f"{self.date_str} : {self.movie}"
def toJSON(self):
raise Exception('Showtimes should not be converted directly to JSON')
def get_showtimes_of_a_day(showtimes: List[Showtime], *, date: date):
return [showtime for showtime in showtimes if showtime.date == date]
# == Utils ==
def get_available_dates(showtimes: List[Showtime]):
dates = [s.date for s in showtimes]
return sorted(list(set(dates)))
def group_showtimes_per_schedule(showtimes: List[Showtime]):
showtimes_per_date = {}
available_dates = get_available_dates(showtimes=showtimes)
for available_date in available_dates:
showtimes_per_date[available_date] = get_showtimes_of_a_day(
showtimes=showtimes, date=available_date
)
grouped_showtimes = {}
for available_date in available_dates:
hours = [s.hour_short_str for s in showtimes_per_date[available_date]]
hours_str = ", ".join(hours)
if grouped_showtimes.get(hours_str) is None:
grouped_showtimes[hours_str] = []
grouped_showtimes[hours_str].append(available_date)
return grouped_showtimes
def build_program_str(showtimes: List[Showtime]):
schedules = [Schedule(s.date_time) for s in showtimes]
return build_weekly_schedule_str(schedules)
| 31.44
| 78
| 0.732188
|
ce6617b04d462a5830c3c745815e67befe4bf032
| 6,400
|
py
|
Python
|
frappe/defaults.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
frappe/defaults.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
frappe/defaults.py
|
erpnext-tm/frappe
|
7b470f28e1cf00b0659c01e06a2d0a4693b28d98
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.cache_manager import clear_defaults_cache, common_default_keys
from frappe.desk.notifications import clear_notifications
# Note: DefaultValue records are identified by parenttype
# __default, __global or 'User Permission'
def set_user_default(key, value, user=None, parenttype=None):
set_default(key, value, user or frappe.session.user, parenttype)
def add_user_default(key, value, user=None, parenttype=None):
add_default(key, value, user or frappe.session.user, parenttype)
def get_user_default(key, user=None):
user_defaults = get_defaults(user or frappe.session.user)
d = user_defaults.get(key, None)
if is_a_user_permission_key(key):
if d and isinstance(d, (list, tuple)) and len(d) == 1:
# Use User Permission value when only when it has a single value
d = d[0]
else:
d = user_defaults.get(frappe.scrub(key), None)
value = isinstance(d, (list, tuple)) and d[0] or d
if not_in_user_permission(key, value, user):
return
return value
def get_user_default_as_list(key, user=None):
user_defaults = get_defaults(user or frappe.session.user)
d = user_defaults.get(key, None)
if is_a_user_permission_key(key):
if d and isinstance(d, (list, tuple)) and len(d) == 1:
# Use User Permission value when only when it has a single value
d = [d[0]]
else:
d = user_defaults.get(frappe.scrub(key), None)
d = list(filter(None, (not isinstance(d, (list, tuple))) and [d] or d))
# filter default values if not found in user permission
values = [value for value in d if not not_in_user_permission(key, value)]
return values
def is_a_user_permission_key(key):
return ":" not in key and key != frappe.scrub(key)
def not_in_user_permission(key, value, user=None):
# returns true or false based on if value exist in user permission
user = user or frappe.session.user
user_permission = get_user_permissions(user).get(frappe.unscrub(key)) or []
for perm in user_permission:
# doc found in user permission
if perm.get("doc") == value:
return False
# return true only if user_permission exists
return True if user_permission else False
def get_user_permissions(user=None):
from frappe.core.doctype.user_permission.user_permission import (
get_user_permissions as _get_user_permissions,
)
"""Return frappe.core.doctype.user_permissions.user_permissions._get_user_permissions (kept for backward compatibility)"""
return _get_user_permissions(user)
def get_defaults(user=None):
globald = get_defaults_for()
if not user:
user = frappe.session.user if frappe.session else "Guest"
if user:
userd = {}
userd.update(get_defaults_for(user))
userd.update({"user": user, "owner": user})
globald.update(userd)
return globald
def clear_user_default(key, user=None):
clear_default(key, parent=user or frappe.session.user)
# Global
def set_global_default(key, value):
set_default(key, value, "__default")
def add_global_default(key, value):
add_default(key, value, "__default")
def get_global_default(key):
d = get_defaults().get(key, None)
value = isinstance(d, (list, tuple)) and d[0] or d
if not_in_user_permission(key, value):
return
return value
# Common
def set_default(key, value, parent, parenttype="__default"):
"""Override or add a default value.
Adds default value in table `tabDefaultValue`.
:param key: Default key.
:param value: Default value.
:param parent: Usually, **User** to whom the default belongs.
:param parenttype: [optional] default is `__default`."""
if frappe.db.sql(
"""
select
defkey
from
`tabDefaultValue`
where
defkey=%s and parent=%s
for update""",
(key, parent),
):
frappe.db.sql(
"""
delete from
`tabDefaultValue`
where
defkey=%s and parent=%s""",
(key, parent),
)
if value != None:
add_default(key, value, parent)
else:
_clear_cache(parent)
def add_default(key, value, parent, parenttype=None):
d = frappe.get_doc(
{
"doctype": "DefaultValue",
"parent": parent,
"parenttype": parenttype or "__default",
"parentfield": "system_defaults",
"defkey": key,
"defvalue": value,
}
)
d.insert(ignore_permissions=True)
_clear_cache(parent)
def clear_default(key=None, value=None, parent=None, name=None, parenttype=None):
"""Clear a default value by any of the given parameters and delete caches.
:param key: Default key.
:param value: Default value.
:param parent: User name, or `__global`, `__default`.
:param name: Default ID.
:param parenttype: Clear defaults table for a particular type e.g. **User**.
"""
conditions = []
values = []
if name:
conditions.append("name=%s")
values.append(name)
else:
if key:
conditions.append("defkey=%s")
values.append(key)
if value:
conditions.append("defvalue=%s")
values.append(value)
if parent:
conditions.append("parent=%s")
values.append(parent)
if parenttype:
conditions.append("parenttype=%s")
values.append(parenttype)
if parent:
clear_defaults_cache(parent)
else:
clear_defaults_cache("__default")
clear_defaults_cache("__global")
if not conditions:
raise Exception("[clear_default] No key specified.")
frappe.db.sql(
"""delete from tabDefaultValue where {0}""".format(" and ".join(conditions)), tuple(values)
)
_clear_cache(parent)
def get_defaults_for(parent="__default"):
"""get all defaults"""
defaults = frappe.cache().hget("defaults", parent)
if defaults == None:
# sort descending because first default must get precedence
res = frappe.db.sql(
"""select defkey, defvalue from `tabDefaultValue`
where parent = %s order by creation""",
(parent,),
as_dict=1,
)
defaults = frappe._dict({})
for d in res:
if d.defkey in defaults:
# listify
if not isinstance(defaults[d.defkey], list) and defaults[d.defkey] != d.defvalue:
defaults[d.defkey] = [defaults[d.defkey]]
if d.defvalue not in defaults[d.defkey]:
defaults[d.defkey].append(d.defvalue)
elif d.defvalue is not None:
defaults[d.defkey] = d.defvalue
frappe.cache().hset("defaults", parent, defaults)
return defaults
def _clear_cache(parent):
if parent in common_default_keys:
frappe.clear_cache()
else:
clear_notifications(user=parent)
frappe.clear_cache(user=parent)
| 24.150943
| 123
| 0.719688
|
5ae6ed402ce2ef07ef1eae69c0cb485c92f4d098
| 5,106
|
py
|
Python
|
samples/oci-load-file-into-adw-python/func.py
|
ojasvajain/oracle-functions-samples
|
268fe177e50349fa4d826be6370493b3d678506d
|
[
"Apache-2.0"
] | 1
|
2022-01-18T05:45:13.000Z
|
2022-01-18T05:45:13.000Z
|
samples/oci-load-file-into-adw-python/func.py
|
ojasvajain/oracle-functions-samples
|
268fe177e50349fa4d826be6370493b3d678506d
|
[
"Apache-2.0"
] | null | null | null |
samples/oci-load-file-into-adw-python/func.py
|
ojasvajain/oracle-functions-samples
|
268fe177e50349fa4d826be6370493b3d678506d
|
[
"Apache-2.0"
] | null | null | null |
#
# oci-load-file-into-adw-python version 1.0.
#
# Copyright (c) 2020 Oracle, Inc.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
import io
import json
import oci
import csv
import requests
from fdk import response
def soda_insert(ordsbaseurl, schema, dbuser, dbpwd, document):
auth=(dbuser, dbpwd)
sodaurl = ordsbaseurl + schema + '/soda/latest/'
collectionurl = sodaurl + "regionsnumbers"
headers = {'Content-Type': 'application/json'}
r = requests.post(collectionurl, auth=auth, headers=headers, data=json.dumps(document))
r_json = {}
try:
r_json = json.loads(r.text)
except ValueError as e:
print(r.text, flush=True)
raise
return r_json
def load_data(signer, namespace, bucket_name, object_name, ordsbaseurl, schema, dbuser, dbpwd):
client = oci.object_storage.ObjectStorageClient(config={}, signer=signer)
try:
print("INFO - About to read object {0} in bucket {1}...".format(object_name, bucket_name), flush=True)
# we assume the file can fit in memory, otherwise we have to use the "range" argument and loop through the file
csvdata = client.get_object(namespace, bucket_name, object_name)
if csvdata.status == 200:
print("INFO - Object {0} is read".format(object_name), flush=True)
input_csv_text = str(csvdata.data.text)
reader = csv.DictReader(input_csv_text.split('\n'), delimiter=',')
for row in reader:
print("INFO - inserting:")
print("INFO - " + json.dumps(row), flush=True)
insert_status = soda_insert(ordsbaseurl, schema, dbuser, dbpwd, row)
if "id" in insert_status["items"][0]:
print("INFO - Successfully inserted document ID " + insert_status["items"][0]["id"], flush=True)
else:
raise SystemExit("Error while inserting: " + insert_status)
else:
raise SystemExit("cannot retrieve the object" + str(object_name))
except Exception as e:
raise SystemExit(str(e))
print("INFO - All documents are successfully loaded into the database", flush=True)
def move_object(signer, namespace, source_bucket, destination_bucket, object_name):
objstore = oci.object_storage.ObjectStorageClient(config={}, signer=signer)
objstore_composite_ops = oci.object_storage.ObjectStorageClientCompositeOperations(objstore)
resp = objstore_composite_ops.copy_object_and_wait_for_state(
namespace,
source_bucket,
oci.object_storage.models.CopyObjectDetails(
destination_bucket=destination_bucket,
destination_namespace=namespace,
destination_object_name=object_name,
destination_region=signer.region,
source_object_name=object_name
),
wait_for_states=[
oci.object_storage.models.WorkRequest.STATUS_COMPLETED,
oci.object_storage.models.WorkRequest.STATUS_FAILED])
if resp.data.status != "COMPLETED":
raise Exception("cannot copy object {0} to bucket {1}".format(object_name,destination_bucket))
else:
resp = objstore.delete_object(namespace, source_bucket, object_name)
print("INFO - Object {0} moved to Bucket {1}".format(object_name,destination_bucket), flush=True)
def handler(ctx, data: io.BytesIO=None):
signer = oci.auth.signers.get_resource_principals_signer()
object_name = bucket_name = namespace = ordsbaseurl = schema = dbuser = dbpwd = ""
try:
cfg = ctx.Config()
input_bucket = cfg["input-bucket"]
processed_bucket = cfg["processed-bucket"]
ordsbaseurl = cfg["ords-base-url"]
schema = cfg["db-schema"]
dbuser = cfg["db-user"]
dbpwd = cfg["dbpwd-cipher"]
except Exception as e:
print('Missing function parameters: bucket_name, ordsbaseurl, schema, dbuser, dbpwd', flush=True)
raise
try:
body = json.loads(data.getvalue())
print("INFO - Event ID {} received".format(body["eventID"]), flush=True)
print("INFO - Object name: " + body["data"]["resourceName"], flush=True)
object_name = body["data"]["resourceName"]
print("INFO - Bucket name: " + body["data"]["additionalDetails"]["bucketName"], flush=True)
if body["data"]["additionalDetails"]["bucketName"] != input_bucket:
raise ValueError("Event Bucket name error")
print("INFO - Namespace: " + body["data"]["additionalDetails"]["namespace"], flush=True)
namespace = body["data"]["additionalDetails"]["namespace"]
except Exception as e:
print('ERROR: bad Event!', flush=True)
raise
load_data(signer, namespace, input_bucket, object_name, ordsbaseurl, schema, dbuser, dbpwd)
move_object(signer, namespace, input_bucket, processed_bucket, object_name)
return response.Response(
ctx,
response_data=json.dumps({"status": "Success"}),
headers={"Content-Type": "application/json"}
)
| 44.4
| 119
| 0.660595
|
c5ce122d17b6579b81290b29923a957c689b36c7
| 5,399
|
py
|
Python
|
attic/tests/post-deployment/resources/test_support/controls_low.py
|
ska-telescope/skampi
|
cd2f95bd56594888c8d0c3476824b438dfcfcf71
|
[
"BSD-3-Clause"
] | null | null | null |
attic/tests/post-deployment/resources/test_support/controls_low.py
|
ska-telescope/skampi
|
cd2f95bd56594888c8d0c3476824b438dfcfcf71
|
[
"BSD-3-Clause"
] | 3
|
2019-10-25T13:38:56.000Z
|
2022-03-30T09:13:33.000Z
|
attic/tests/post-deployment/resources/test_support/controls_low.py
|
ska-telescope/skampi
|
cd2f95bd56594888c8d0c3476824b438dfcfcf71
|
[
"BSD-3-Clause"
] | 2
|
2019-11-04T09:59:06.000Z
|
2020-05-07T11:05:42.000Z
|
import pytest
from datetime import date,datetime
import os
import logging
from tango import DeviceProxy
from ska.scripting.domain import Telescope, SubArray
##SUT imports
from ska.scripting.domain import Telescope
from resources.test_support.helpers_low import subarray_devices,resource,ResourceGroup,waiter,watch
from resources.test_support.sync_decorators_low import sync_assign_resources, sync_configure, sync_reset_sa, sync_end_sb
import resources.test_support.tmc_helpers_low as tmc
from resources.test_support.mappings import device_to_subarrays
from resources.test_support.mappings_low import device_to_subarray
LOGGER = logging.getLogger(__name__)
def take_subarray(id):
return pilot(id)
class pilot():
def __init__(self, id):
self.SubArray = SubArray(id)
self.logs = ""
self.agents = ResourceGroup(resource_names=subarray_devices)
self.state = "Empty"
self.rollback_order = {
'IDLE': self.reset_when_aborted
#'Ready':self.and_end_sb_when_ready,
# 'Configuring':restart_subarray,
# 'Scanning':restart_subarray
}
def and_display_state(self):
print("state at {} is:\n{}".format(datetime.now(),self.agents.get('State')))
return self
def and_display_obsState(self):
print("state at {} is:\n{}".format(datetime.now(),self.agents.get('obsState')))
return self
def reset_when_aborted(self):
@sync_reset_sa
def reset():
self.SubArray.reset()
reset()
self.state = "IDLE"
return self
def and_end_sb_when_ready(self):
@sync_end_sb
def end_sb():
self.SubArray.end()
end_sb()
self.state = "Composed"
return self
def telescope_is_in_standby():
LOGGER.info('resource("ska_low/tm_subarray_node/1").get("State")'+ str(resource('ska_low/tm_subarray_node/1').get("State")))
LOGGER.info('resource("ska_low/tm_leaf_node/mccs_master").get("State")' +
str(resource('ska_low/tm_leaf_node/mccs_master').get("State")))
return [resource('ska_low/tm_subarray_node/1').get("State"),
resource('ska_low/tm_leaf_node/mccs_master').get("State")] == \
['OFF','OFF']
def set_telescope_to_running(disable_waiting = False):
resource('ska_low/tm_subarray_node/1').assert_attribute('State').equals('OFF')
the_waiter = waiter()
the_waiter.set_wait_for_starting_up()
Telescope().start_up()
if not disable_waiting:
the_waiter.wait(100)
if the_waiter.timed_out:
pytest.fail("timed out whilst starting up telescope:\n {}".format(the_waiter.logs))
def set_telescope_to_standby():
resource('ska_low/tm_subarray_node/1').assert_attribute('State').equals('ON')
the_waiter = waiter()
the_waiter.set_wait_for_going_to_standby()
Telescope().standby()
#It is observed that CSP and CBF subarrays sometimes take more than 8 sec to change the State to DISABLE
#therefore timeout is given as 12 sec
the_waiter.wait(100)
if the_waiter.timed_out:
pytest.fail("timed out whilst setting telescope to standby:\n {}".format(the_waiter.logs))
@sync_assign_resources(100)
def to_be_composed_out_of():
resource('ska_low/tm_subarray_node/1').assert_attribute('State').equals('ON')
resource('ska_low/tm_subarray_node/1').assert_attribute('obsState').equals('EMPTY')
assign_resources_file = 'resources/test_data/OET_integration/mccs_assign_resources.json'
subarray = SubArray(1)
LOGGER.info('Subarray has been created.')
subarray.allocate_from_file(cdm_file=assign_resources_file, with_processing=False)
LOGGER.info('Invoked AssignResources on CentralNodeLow')
@sync_configure
def configure_by_file():
configure_file = 'resources/test_data/OET_integration/mccs_configure.json'
SubarrayNodeLow = DeviceProxy('ska_low/tm_subarray_node/1')
subarray = SubArray(1)
LOGGER.info('Subarray has been created.')
subarray.configure_from_file(configure_file, 10, with_processing = False)
LOGGER.info("Subarray obsState is: " + str(SubarrayNodeLow.obsState))
LOGGER.info('Invoked Configure on Subarray')
def restart_subarray(id):
devices = device_to_subarrays.keys()
filtered_devices = [device for device in devices if device_to_subarrays[device] == id ]
the_waiter = waiter()
the_waiter.set_wait_for_going_to_standby()
exceptions_raised = ""
for device in filtered_devices:
try:
resource(device).restart()
except Exception as e:
exceptions_raised += f'\nException raised on reseting {device}:{e}'
if exceptions_raised != "":
raise Exception(f'Error in initialising devices:{exceptions_raised}')
the_waiter.wait()
def restart_subarray_low(id):
devices = device_to_subarray.keys()
filtered_devices = [device for device in devices if device_to_subarray[device] == id ]
the_waiter = waiter()
the_waiter.set_wait_for_going_to_standby()
exceptions_raised = ""
for device in filtered_devices:
try:
resource(device).restart()
except Exception as e:
exceptions_raised += f'\nException raised on reseting {device}:{e}'
if exceptions_raised != "":
raise Exception(f'Error in initialising devices:{exceptions_raised}')
the_waiter.wait()
| 38.29078
| 128
| 0.704575
|
e0a7e488ad0bc4668fa2480347756f64b3082414
| 4,838
|
py
|
Python
|
scripts/fighting.py
|
itsabugnotafeature/Raiders
|
72c20a83c253538a3a41658a78cbc0fe5eca346b
|
[
"MIT"
] | 1
|
2022-03-27T05:36:44.000Z
|
2022-03-27T05:36:44.000Z
|
scripts/fighting.py
|
itsabugnotafeature/Raiders
|
72c20a83c253538a3a41658a78cbc0fe5eca346b
|
[
"MIT"
] | null | null | null |
scripts/fighting.py
|
itsabugnotafeature/Raiders
|
72c20a83c253538a3a41658a78cbc0fe5eca346b
|
[
"MIT"
] | null | null | null |
from scripts.variables.localvars import *
class FightManager:
def __init__(self, system):
self.attacker = None
self.defender = None
self.player = None
self.monster = None
self.GameLogic = system
self.Engine = self.GameLogic.Engine
self.turn_counter = 1
self.AbilityAnalyzer = AbilityAnalyzer()
def set_engine(self, engine):
self.Engine = engine
def handle_event(self, event):
if event.subtype == FIGHT_BEGIN:
self.player = event.player
self.monster = event.monster
self.attacker = self.GameLogic.get_active_sprite()
self.defender = self.GameLogic.get_active_target()
self.attacker.prepare_for_fight()
self.defender.prepare_for_fight()
self.turn_counter = 1
if event.subtype == ACTION:
self.attacker.prepare_for_turn()
self.defender.prepare_for_turn()
player_attack = self.player.get_attack(self.monster, event.num, self.GameLogic.grid)
monster_attack = self.monster.get_attack(self.player, self.turn_counter, self.GameLogic.grid)
print("FIGHT: {} used {}, {} used {}".format(self.player, player_attack, self.monster, monster_attack))
player_outcome, monster_outcome = self.AbilityAnalyzer.get_outcome(self.player, player_attack,
self.monster, monster_attack)
# TODO: Is this redundant given that the results are calculated order agnostic in the AbilityAnalyzer?
if self.attacker == self.player:
self.player.use(player_attack, self.monster, player_outcome, self.Engine)
self.monster.use(monster_attack, self.player, monster_outcome, self.Engine)
else:
self.monster.use(monster_attack, self.player, monster_outcome, self.Engine)
self.player.use(player_attack, self.monster, player_outcome, self.Engine)
if not self.player.fightable or not self.monster.fightable:
make_event(FIGHT_EVENT, subtype=FIGHT_END)
return
self.turn_counter += 1
if not self.can_continue(self.player, self.monster, self.Engine.Logic.grid):
make_event(FIGHT_EVENT, subtype=FIGHT_END)
if event.subtype == FIGHT_END:
self.Engine.game_vars[GAME_STATE] = TURN_RESET
self.turn_counter = 1
self.monster = None
self.player = None
self.attacker = None
self.defender = None
def can_continue(self, player, monster, grid):
if self.turn_counter > 3:
return False
if not self.player.can_make_attack(monster, grid) and not monster.can_make_attack(player, grid):
print("FIGHT: Exiting early because {} and {} have no more available abilities.".format(self.player, self.monster))
return False
return True
class AbilityAnalyzer:
def __init__(self):
pass
def get_outcome(self, player, player_ability, monster, monster_ability):
"""
Generates a dict of possible results that the various scripts involved in the attacking process can use to
decide what animations/sounds/attacks to do
"blocked" signifies that the respective attack was blocked
"blocking" signifies that the respective block successfully blocked an attack
"""
player_outcome = {"blocked": False, "blocking": False, "counter": False, "death_blocked": False,
"opposite_ability": monster_ability, "out_of_range": False}
monster_outcome = {"blocked": False, "blocking": False, "counter": False, "death_blocked": False,
"opposite_ability": player_ability, "out_of_range": False}
if not player.fightable:
player_outcome["death_blocked"] = True
if monster_ability.type == "block":
is_player_blocked = self.resolve_block(player_ability, monster_ability)
player_outcome["blocked"] = is_player_blocked
monster_outcome["blocking"] = is_player_blocked
if not monster.fightable:
monster_outcome["death_blocked"] = True
if player_ability.type == "block":
is_monster_blocked = self.resolve_block(monster_ability, player_ability)
monster_outcome["blocked"] = is_monster_blocked
player_outcome["blocking"] = is_monster_blocked
return player_outcome, monster_outcome
@staticmethod
def resolve_block(base_ability, blocking_ability):
if base_ability.type == blocking_ability.type:
return False
return blocking_ability.can_block(base_ability)
| 39.655738
| 127
| 0.637247
|
4209d5f15abae8052e16f12612044d4a0d62e938
| 5,366
|
py
|
Python
|
model/attention/HaloAttention.py
|
lyp2333/External-Attention-pytorch
|
08be6baff82bf22d48b1746fd668446280365b9b
|
[
"MIT"
] | 1
|
2022-03-15T11:25:34.000Z
|
2022-03-15T11:25:34.000Z
|
model/attention/HaloAttention.py
|
lyp2333/External-Attention-pytorch
|
08be6baff82bf22d48b1746fd668446280365b9b
|
[
"MIT"
] | null | null | null |
model/attention/HaloAttention.py
|
lyp2333/External-Attention-pytorch
|
08be6baff82bf22d48b1746fd668446280365b9b
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# relative positional embedding
def to(x):
return {'device': x.device, 'dtype': x.dtype}
def pair(x):
return (x, x) if not isinstance(x, tuple) else x
def expand_dim(t, dim, k):
t = t.unsqueeze(dim = dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def rel_to_abs(x):
b, l, m = x.shape
r = (m + 1) // 2
col_pad = torch.zeros((b, l, 1), **to(x))
x = torch.cat((x, col_pad), dim = 2)
flat_x = rearrange(x, 'b l c -> b (l c)')
flat_pad = torch.zeros((b, m - l), **to(x))
flat_x_padded = torch.cat((flat_x, flat_pad), dim = 1)
final_x = flat_x_padded.reshape(b, l + 1, m)
final_x = final_x[:, :l, -r:]
return final_x
def relative_logits_1d(q, rel_k):
b, h, w, _ = q.shape
r = (rel_k.shape[0] + 1) // 2
logits = einsum('b x y d, r d -> b x y r', q, rel_k)
logits = rearrange(logits, 'b x y r -> (b x) y r')
logits = rel_to_abs(logits)
logits = logits.reshape(b, h, w, r)
logits = expand_dim(logits, dim = 2, k = r)
return logits
class RelPosEmb(nn.Module):
def __init__(
self,
block_size,
rel_size,
dim_head
):
super().__init__()
height = width = rel_size
scale = dim_head ** -0.5
self.block_size = block_size
self.rel_height = nn.Parameter(torch.randn(height * 2 - 1, dim_head) * scale)
self.rel_width = nn.Parameter(torch.randn(width * 2 - 1, dim_head) * scale)
def forward(self, q):
block = self.block_size
q = rearrange(q, 'b (x y) c -> b x y c', x = block)
rel_logits_w = relative_logits_1d(q, self.rel_width)
rel_logits_w = rearrange(rel_logits_w, 'b x i y j-> b (x y) (i j)')
q = rearrange(q, 'b x y d -> b y x d')
rel_logits_h = relative_logits_1d(q, self.rel_height)
rel_logits_h = rearrange(rel_logits_h, 'b x i y j -> b (y x) (j i)')
return rel_logits_w + rel_logits_h
# classes
class HaloAttention(nn.Module):
def __init__(
self,
*,
dim,
block_size,
halo_size,
dim_head = 64,
heads = 8
):
super().__init__()
assert halo_size > 0, 'halo size must be greater than 0'
self.dim = dim
self.heads = heads
self.scale = dim_head ** -0.5
self.block_size = block_size
self.halo_size = halo_size
inner_dim = dim_head * heads
self.rel_pos_emb = RelPosEmb(
block_size = block_size,
rel_size = block_size + (halo_size * 2),
dim_head = dim_head
)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x):
b, c, h, w, block, halo, heads, device = *x.shape, self.block_size, self.halo_size, self.heads, x.device
assert h % block == 0 and w % block == 0, 'fmap dimensions must be divisible by the block size'
assert c == self.dim, f'channels for input ({c}) does not equal to the correct dimension ({self.dim})'
# get block neighborhoods, and prepare a halo-ed version (blocks with padding) for deriving key values
q_inp = rearrange(x, 'b c (h p1) (w p2) -> (b h w) (p1 p2) c', p1 = block, p2 = block)
kv_inp = F.unfold(x, kernel_size = block + halo * 2, stride = block, padding = halo)
kv_inp = rearrange(kv_inp, 'b (c j) i -> (b i) j c', c = c)
# derive queries, keys, values
q = self.to_q(q_inp)
k, v = self.to_kv(kv_inp).chunk(2, dim = -1)
# split heads
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = heads), (q, k, v))
# scale
q *= self.scale
# attention
sim = einsum('b i d, b j d -> b i j', q, k)
# add relative positional bias
sim += self.rel_pos_emb(q)
# mask out padding (in the paper, they claim to not need masks, but what about padding?)
mask = torch.ones(1, 1, h, w, device = device)
mask = F.unfold(mask, kernel_size = block + (halo * 2), stride = block, padding = halo)
mask = repeat(mask, '() j i -> (b i h) () j', b = b, h = heads)
mask = mask.bool()
max_neg_value = -torch.finfo(sim.dtype).max
sim.masked_fill_(mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1)
# aggregate
out = einsum('b i j, b j d -> b i d', attn, v)
# merge and combine heads
out = rearrange(out, '(b h) n d -> b n (h d)', h = heads)
out = self.to_out(out)
# merge blocks back to original feature map
out = rearrange(out, '(b h w) (p1 p2) c -> b c (h p1) (w p2)', b = b, h = (h // block), w = (w // block), p1 = block, p2 = block)
return out
if __name__ == '__main__':
input=torch.randn(1,512,8,8)
halo = HaloAttention(dim=512,
block_size=2,
halo_size=1,)
output=halo(input)
print(output.shape)
| 30.83908
| 138
| 0.544353
|
936e4b0accf0d2b889f4ba8d69763f717a5a56e5
| 1,949
|
py
|
Python
|
package/spack-diamond/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | 1
|
2018-07-17T07:45:09.000Z
|
2018-07-17T07:45:09.000Z
|
package/spack-diamond/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
package/spack-diamond/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Diamond(CMakePackage):
"""DIAMOND is a sequence aligner for protein and translated DNA searches,
designed for high performance analysis of big sequence data."""
homepage = "https://ab.inf.uni-tuebingen.de/software/diamond"
url = "https://github.com/bbuchfink/diamond/archive/v0.9.14.tar.gz"
version('0.9.21', '6f3c53520f3dad37dfa3183d61f21dd5')
version('0.9.20', 'd73f4955909d16456d83b30d9c294b2b')
version('0.9.19', '8565d2d3bfe407ee778eeabe7c6a7fde')
version('0.9.14', 'b9e1d0bc57f07afa05dbfbb53c31aae2')
version('0.8.38', 'd4719c8a7947ba9f743446ac95cfe644')
version('0.8.26', '0d86305ab25cc9b3bb3564188d30fff2')
depends_on('zlib')
| 45.325581
| 78
| 0.691637
|
8d07be0b5f3a87621f9a283dad7204d5d5e8b50e
| 566
|
py
|
Python
|
src/browserist/browser/mouse/hover.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | 2
|
2022-02-20T10:03:19.000Z
|
2022-03-22T11:17:10.000Z
|
src/browserist/browser/mouse/hover.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | null | null | null |
src/browserist/browser/mouse/hover.py
|
jakob-bagterp/browserist
|
76bd916dd217b7da3759fd6ec3374191002dc091
|
[
"Apache-2.0"
] | null | null | null |
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from ...constant import timeout
from ...model.type.xpath import XPath
from ..wait.for_element import wait_for_element
def mouse_hover(driver: object, xpath: str, timeout: int = timeout.DEFAULT) -> None:
xpath = XPath(xpath)
wait_for_element(driver, xpath, timeout)
element = driver.find_element(By.XPATH, xpath) # type: ignore
actions = ActionChains(driver) # type: ignore
actions.move_to_element(element).perform() # type: ignore
| 37.733333
| 84
| 0.757951
|
67245e20cb1ed74e4ad166a1bb00ad9591296fd3
| 3,042
|
py
|
Python
|
python13 (writing files with python).py
|
Ahammmad-Shawki8/AS8-repository
|
fb0d1663c888c0bec0b1013b49778861117b6398
|
[
"MIT"
] | 11
|
2020-09-08T12:18:50.000Z
|
2021-11-08T11:22:44.000Z
|
python13 (writing files with python).py
|
Ahammmad-Shawki8/AS8-repository
|
fb0d1663c888c0bec0b1013b49778861117b6398
|
[
"MIT"
] | 5
|
2021-03-02T13:18:38.000Z
|
2021-04-24T03:30:01.000Z
|
python13 (writing files with python).py
|
Ahammmad-Shawki8/AS8-repository
|
fb0d1663c888c0bec0b1013b49778861117b6398
|
[
"MIT"
] | 4
|
2020-08-17T12:23:12.000Z
|
2021-03-08T17:52:33.000Z
|
# working with files
# how do we write a file with code?
#1.use the open function to create and open a file.
# myFile= open(fileName,accessMode)
#2.we must specify
#-file name
#-access mode
# what is the file name?
#= the file name is the name of our file including the extension
#-data.txt, mytimes.csv
# the file will be created in the same folder as your program.
# if we aren't sure what directory our project is using, we can right click
# on the tab for our code window and select poen containing folder to see the folder in file explorer.
# what is the access mode?
#= the access mode specifies what we will do with the file after we open it.
# we can specify any of them following:
# Access Mode - Actions
# r - Read a file
# w - Write to the file. It will overwrite the file.
# a - Append to the exixting file content.
# b - Open a binary file
# r+/w+ - Read and write.
# x - Creates a new file. If file already exists, the operation fails.
# t - This is the default mode. It opens in text mode.
filename="Guestlist.txt"
accessmode="w"
myfile=open(filename, accessmode)
#or myfile=open(filename, mode=accessmode)
#or myfile=open(filename, mode="w")
#or myfile=open("Guestlist.txt", "w")
# Writing to files
# how do we write to a function?
# we can use write function.
filename="Guestlist.txt"
accessmode="w"
myfile=open(filename, accessmode)
myfile.write("I am a coder!\n")
myfile.write("I read in class 9.")
# when we are finished we should always close the file.
# we can use close method.
filename="Guestlist.txt"
accessmode="w"
myfile=open(filename, accessmode)
myfile.write("I am a coder!\n")
myfile.write("I read in class 9.")
myfile.close()
# we can print the name of the file.
print(myfile.name)
# we can also print the mode of the file.
print(myfile.mode)
# we can see if the file is closed or not by using closed method.
print(myfile.closed)
# but everytime opening a file and closing it is a difficult task.
# so we are going to use context manager.
# programmes should always open a file, and close it when they are done.
# context manager automically open and close the file for us.
with open(filename, accessmode) as myfile:
pass
# here we dont working with this file so we are passing it by pass keyword.
# while using context manager, we need to indent working lines of our code.
# unless the code wont work.
# using seek method in writing files.
with open("overwrite.txt","w") as d:
d.write("test")
d.seek(0)
d.write("test")
# it will overwrite test over test.
# seeking is a little confusing for file write.
# because it dont overwrite everything, it overwrites only what it needs to overwrite.
with open("overwrite.txt","w") as d:
d.write("test")
d.seek(0)
d.write("r")
# user input
filename="UserGuest.txt"
accessmode="w"
data= input("Please enter file info: ")
with open(filename, accessmode) as myfile:
myfile.write(data)
print("File written successfully.")
| 32.361702
| 102
| 0.70217
|
42d0cb99fe3cce0b8dd6aab785b15feda71f83f4
| 1,568
|
py
|
Python
|
pipeline_plugins/components/collections/sites/open/cc/transfer_host_resource/legacy.py
|
wkma/bk-sops
|
8fb5609c0c4495c28d588fbafa9d9f5f2976929b
|
[
"Apache-2.0"
] | 881
|
2019-03-25T02:45:42.000Z
|
2022-03-30T09:10:49.000Z
|
pipeline_plugins/components/collections/sites/open/cc/transfer_host_resource/legacy.py
|
wkma/bk-sops
|
8fb5609c0c4495c28d588fbafa9d9f5f2976929b
|
[
"Apache-2.0"
] | 3,303
|
2019-03-25T04:18:03.000Z
|
2022-03-31T11:52:03.000Z
|
pipeline_plugins/components/collections/sites/open/cc/transfer_host_resource/legacy.py
|
wkma/bk-sops
|
8fb5609c0c4495c28d588fbafa9d9f5f2976929b
|
[
"Apache-2.0"
] | 395
|
2019-03-25T02:53:36.000Z
|
2022-03-31T08:37:28.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from django.utils.translation import ugettext_lazy as _
from pipeline.component_framework.component import Component
from gcloud.conf import settings
from pipeline_plugins.components.collections.sites.open.cc.base import BaseTransferHostToModuleService
logger = logging.getLogger("celery")
__group_name__ = _("配置平台(CMDB)")
class CmdbTransferHostResourceModuleService(BaseTransferHostToModuleService):
def execute(self, data, parent_data):
return self.exec_transfer_host_module(data, parent_data, "transfer_host_to_resourcemodule")
class CmdbTransferHostResourceModuleComponent(Component):
name = _("上交主机至资源池")
code = "cmdb_transfer_host_resource"
bound_service = CmdbTransferHostResourceModuleService
form = "%scomponents/atoms/cc/cmdb_transfer_host_resource.js" % settings.STATIC_URL
| 43.555556
| 115
| 0.806122
|
410d3de51cfbb6bb4c055c98d1f682f94157bea2
| 9,111
|
py
|
Python
|
via_cms/model/static/geoloc_dao.py
|
jeanjacquesp/via-cms
|
12b212f8005e3d667c23ffc4da831e4d3e653999
|
[
"MIT"
] | null | null | null |
via_cms/model/static/geoloc_dao.py
|
jeanjacquesp/via-cms
|
12b212f8005e3d667c23ffc4da831e4d3e653999
|
[
"MIT"
] | null | null | null |
via_cms/model/static/geoloc_dao.py
|
jeanjacquesp/via-cms
|
12b212f8005e3d667c23ffc4da831e4d3e653999
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Pax Syriana Foundation. Licensed under the Apache License, Version 2.0
#
from collections import namedtuple
from via_cms.extension import db
from via_cms.model._database import Model
from via_cms.util.helper import Separator
CATEGORY_COUNTRY = 0
CATEGORY_PROVINCE = 1
CATEGORY_DISTRICT = 2
CATEGORY_SUBDISTRICT = 3
CATEGORY_NEIGHBORHOOD = 4
class Geoloc(Model):
__tablename__ = 'geoloc_tbl'
id = db.Column(db.Integer, primary_key=True, autoincrement=False)
category = db.Column(db.Integer, nullable=False)
label_ar = db.Column(db.Unicode(128), nullable=False) # not necessarily unique (for different parents)
label_en = db.Column(db.Unicode(128), nullable=False) # not necessarily unique (for different parents)
lat = db.Column(db.Numeric(precision=10, scale=6), nullable=False)
lon = db.Column(db.Numeric(precision=10, scale=6), nullable=False)
un_code = db.Column(db.Unicode(128), nullable=False, unique=True)
un_parent_code = db.Column(db.Unicode(128))
valid_on = db.Column(db.Unicode(128), nullable=False)
main_town = db.Column(db.Integer, nullable=False)
# price_list = db.relationship('Price', back_populates='geoloc')
parent_id = db.Column(db.Integer, db.ForeignKey('geoloc_tbl.id'))
child_list = db.relationship("Geoloc", backref=db.backref('parent', remote_side=[id]))
post_list = db.relationship("FeedPost", secondary='geoloc_post_tbl', back_populates="geoloc_list")
__table_args__ = (db.UniqueConstraint('label_ar', 'parent_id'),
db.UniqueConstraint('label_en', 'parent_id'))
@staticmethod
def import_from_csv(file_path):
import csv
with open(file_path, encoding='utf-8') as csv_file:
reader = csv.reader(csv_file, quoting=csv.QUOTE_NONE)
nb_updates = 0
first = True
for row in reader:
if first:
first = False
else:
id = row[0]
category = row[1]
lat = row[2]
lon = row[3]
label_ar = row[4]
label_en = row[5]
parent_id = row[6]
un_code = row[7]
un_parent_code = row[8]
valid_on = row[9]
main_town = 1 if 'TRUE' == row[10] else 0 if 'FALSE' == row[10] else -1
parent = None
if parent_id:
try:
parent = Geoloc.query.get(parent_id)
except:
raise RuntimeError("Entries must be in revert order: parents first")
# end if parent_id is not None
if parent:
geoloc = Geoloc(id=id, category=category, label_ar=label_ar, label_en=label_en, lat=lat, lon=lon,
un_code=un_code, un_parent_code=un_parent_code, valid_on=valid_on, main_town=main_town,
parent_id=parent.id)
else:
geoloc = Geoloc(id=id, category=category, label_ar=label_ar, label_en=label_en, lat=lat, lon=lon,
un_code=un_code, un_parent_code=un_parent_code, valid_on=valid_on, main_town=main_town)
# end if parent
nb_updates += 1
geoloc.save()
# end if first
# end for row in reader
# set the link between parent and children
geoloc_all = Geoloc.query
for geoloc in geoloc_all:
child_list = Geoloc.query.filter_by(parent_id=int(geoloc.id))
if child_list:
geoloc.child_list.extend(child_list)
geoloc.save(commit=False)
# end for geoloc in geoloc_all
if nb_updates > 0:
db.session.commit()
# end with open(file_path, encoding='utf-8') as csv_file
@staticmethod
def export_to_csv(file_path):
# TODO Unused....
# syria = Geoloc.query.filter_by(id=1000).all()
# level = []
# Geoloc.print_all(syria[0], level)
res = Geoloc.get_tree()
print(res)
@staticmethod
def _get_sub_tree(node):
child_list = node.child_list
e = []
for child in child_list:
e.append(Geoloc._get_sub_tree(child))
# noinspection PyRedundantParentheses
return (node.id, node.geoloc_name_en, e)
@staticmethod
def get_tree():
syria = Geoloc.query.filter_by(id=1000).all()
return Geoloc._get_sub_tree(syria[0])
@staticmethod
def _build_html_subtree(node):
# html = ''
child_list = node.child_list
if child_list:
html = '<optgroup id="{}" label="{}">\n'.format(node.id, node.geoloc_name_en)
else:
return '<option id="{}" value="{}">{}</option>\n'.format(node.id, node.geoloc_name_en, node.geoloc_name_en)
# end if child_list
for child in child_list:
html += Geoloc._build_html_subtree(child)
# end for child in child_list
html += '</optgroup>\n'
return html
@staticmethod
def build_html_tree():
syria = Geoloc.query.filter_by(id=1000).all()
res = Geoloc._build_html_subtree(syria[0])
return res
@staticmethod
def get_name_by_category(category, language):
res = Geoloc.query.filter_by(category=category)
if language == 'ar':
res = [x.geoloc_name_ar for x in res]
elif language == 'en':
res = [x.geoloc_name_en for x in res]
return res
@staticmethod
def get_name_by_parent(parent_id, language):
res = Geoloc.query.filter_by(parent_id=parent_id)
if language == 'ar':
res = [x.geoloc_name_ar for x in res]
elif language == 'en':
res = [x.geoloc_name_en for x in res]
return res
@staticmethod
def _build_fancytree_json_subtree(node, lang):
title = node.label_en if lang == 'en' else node.label_ar
title = '{}|{}'.format(node.id, title)
tooltip = '{} | {:3.5f}:{:3.5f} | {}' \
.format(node.id, node.lat, node.lon, node.label_en if lang == 'ar' else node.label_ar)
json = '{{"title": "{0}", "id": {1}, "tooltip": "{2}", "expanded": true, "folder": true, "children": [' \
.format(title, node.id, tooltip)
child_list = node.child_list
first = True
for child in child_list:
if first:
first = False
else:
json += ','
# end if first
json += Geoloc._build_fancytree_json_subtree(child, lang)
json += ']}\n'
return json
@staticmethod
def build_fancytree_json_tree(lang, geoloc_list):
result = []
if not geoloc_list:
geoloc_list = ['1000'] # TODO magic number
# end if not geoloc_list
for geoloc_id in geoloc_list:
geoloc = Geoloc.query.get(geoloc_id)
res = Geoloc._build_fancytree_json_subtree(geoloc, lang)
result.append(res)
# end for geoloc_id in geoloc_list
result = '[{}]'.format(','.join(result))
return result
def to_dict(self):
Separator.SUB_TAG = '†' # alt 0134 #TODO move from here
neighborhood = namedtuple('X', 'id')(-1) # TODO neighborhood not managed (yet?)
subdistrict = namedtuple('X', 'id')(-1)
district = namedtuple('X', 'id')(-1)
province = namedtuple('X', 'id')(-1)
if self.category == CATEGORY_NEIGHBORHOOD: # TODO magic number
neighborhood = self
subdistrict = self.parent
district = subdistrict.parent
province = district.parent
country = province.parent
elif self.category == CATEGORY_SUBDISTRICT: # TODO magic number
subdistrict = self
district = self.parent
province = district.parent
country = province.parent
elif self.category == CATEGORY_DISTRICT:
district = self
province = district.parent
country = province.parent
elif self.category == CATEGORY_PROVINCE:
province = self
country = self.parent
elif self.category == CATEGORY_COUNTRY:
country = self
return {'id': self.id,
'category': self.category,
'parent_id': self.parent_id,
'label_ar': self.label_ar,
'label_en': self.label_en,
'neighborhood': neighborhood.id,
'subdistrict': subdistrict.id,
'district': district.id,
'province': province.id,
'country': country.id,
'lat': round(float(self.lat.canonical()), 6),
'lon': round(float(self.lon.canonical()), 6)}
| 37.804979
| 127
| 0.564922
|
0ee77a53e2d398f1b2e6dfd8a0596ae20c928fd5
| 2,578
|
py
|
Python
|
examples/broadcast_exec/server.py
|
TrigonDev/hikari-clusters
|
d94cce2ee880c35109f3663e904a64c5e4c0ae2a
|
[
"MIT"
] | 4
|
2022-01-16T20:02:12.000Z
|
2022-03-23T01:36:11.000Z
|
examples/broadcast_exec/server.py
|
TrigonDev/hikari-clusters
|
d94cce2ee880c35109f3663e904a64c5e4c0ae2a
|
[
"MIT"
] | 15
|
2021-12-15T17:50:54.000Z
|
2022-03-25T06:24:09.000Z
|
examples/broadcast_exec/server.py
|
TrigonDev/hikari-clusters
|
d94cce2ee880c35109f3663e904a64c5e4c0ae2a
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2021 TrigonDev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# When you send `!exec <code>`, the code will be sent to all clusters. Try
# running `print(1)` with multiple servers.
from typing import Any
from hikari import GatewayBot, GuildMessageCreateEvent
from hikari_clusters import Cluster, ClusterLauncher, Server, payload
from hikari_clusters.commands import CommandGroup
class MyBot(GatewayBot):
cluster: Cluster
def __init__(self) -> None:
super().__init__(token="discord token")
self.listen(GuildMessageCreateEvent)(self.on_message)
async def on_message(self, event: GuildMessageCreateEvent) -> None:
if not event.content:
return
if event.content.startswith("!exec"):
await self.cluster.ipc.send_command(
self.cluster.ipc.cluster_uids,
"exec_code",
{"code": event.content[6:]},
)
async def start(self, *args: Any, **kwargs: Any) -> None:
# we include commands inside start() because self.cluster is not
# defined inside __init__()
self.cluster.ipc.commands.include(COMMANDS)
await super().start(*args, **kwargs)
COMMANDS = CommandGroup()
@COMMANDS.add("exec_code")
async def exec_code(pl: payload.COMMAND) -> None:
assert pl.data.data is not None
exec(pl.data.data["code"])
def run() -> None:
Server(
host="localhost",
port=8765,
token="ipc token",
cluster_launcher=ClusterLauncher(MyBot),
).run()
| 34.373333
| 79
| 0.703646
|
f602b82d5a48a1efb63d72451cebd81ded20d845
| 4,915
|
py
|
Python
|
docs/conf.py
|
rankoliang/investment_tracker
|
5345409fdddc6a39da0d812eb035b2408e6ca959
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
rankoliang/investment_tracker
|
5345409fdddc6a39da0d812eb035b2408e6ca959
|
[
"MIT"
] | 29
|
2020-02-12T22:08:05.000Z
|
2022-03-21T14:28:09.000Z
|
docs/conf.py
|
rankoliang/investment_tracker
|
5345409fdddc6a39da0d812eb035b2408e6ca959
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# investment_tracker documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import investment_tracker
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Investment Tracker'
copyright = "2020, Ranko Liang"
author = "Ranko Liang"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = investment_tracker.__version__
# The full version, including alpha/beta/rc tags.
release = investment_tracker.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'investment_trackerdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'investment_tracker.tex',
'investment_tracker Documentation',
'Ranko Liang', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'investment_tracker',
'investment_tracker Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'investment_tracker',
'investment_tracker Documentation',
author,
'investment_tracker',
'One line description of project.',
'Miscellaneous'),
]
| 30.71875
| 77
| 0.693184
|
cca45396a936ac4b7c4f6f8fea644be08dffe62f
| 2,728
|
py
|
Python
|
Source/Tools/TrainList_Kitti_2015.py
|
NPU-IAP/NLCA-Net
|
a5e03dd8370a8fa41552155aabaf8fb531b0ac22
|
[
"MIT"
] | 9
|
2020-07-31T10:00:48.000Z
|
2022-03-23T06:14:14.000Z
|
Source/Tools/TrainList_Kitti_2015.py
|
NPU-IAP/NLCA-Net
|
a5e03dd8370a8fa41552155aabaf8fb531b0ac22
|
[
"MIT"
] | 4
|
2020-10-17T12:58:24.000Z
|
2022-02-10T03:34:40.000Z
|
Source/Tools/TrainList_Kitti_2015.py
|
NPU-IAP/NLCA-Net
|
a5e03dd8370a8fa41552155aabaf8fb531b0ac22
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
# define sone struct
RootPath = '/home1/Documents/Database/' # root path
RawDataFolder = 'Kitti/training/%s/'
LeftFolder = 'image_2'
RightFolder = 'image_3'
LableFolder = 'disp_occ_0'
FileName = '%06d_10'
RawDataType = '.png'
LabelType = '.png'
TrainListPath = './Dataset/trainlist_kitti_2015.txt'
LabelListPath = './Dataset/labellist_kitti_2015.txt'
ValTrainListPath = './Dataset/val_trainlist_kitti_2015.txt'
ValLabelListPath = './Dataset/val_labellist_kitti_2015.txt'
ImgNum = 200
Times = 5
def GenRawPath(fileFolder, num):
path = RootPath + RawDataFolder % fileFolder + FileName % num + \
RawDataType
return path
def OpenFile():
if os.path.exists(TrainListPath):
os.remove(TrainListPath)
if os.path.exists(LabelListPath):
os.remove(LabelListPath)
if os.path.exists(ValTrainListPath):
os.remove(ValTrainListPath)
if os.path.exists(ValLabelListPath):
os.remove(ValLabelListPath)
fd_train_list = open(TrainListPath, 'a')
fd_label_list = open(LabelListPath, 'a')
fd_val_train_list = open(ValTrainListPath, 'a')
fd_val_label_list = open(ValLabelListPath, 'a')
return fd_train_list, fd_label_list, fd_val_train_list, fd_val_label_list
def OutputData(outputFile, data):
outputFile.write(str(data) + '\n')
outputFile.flush()
def GenList(fd_train_list, fd_label_list, fd_val_train_list, fd_val_label_list):
total = 0
for num in xrange(ImgNum):
rawLeftPath = GenRawPath(LeftFolder, num)
rawRightPath = GenRawPath(RightFolder, num)
lablePath = GenRawPath(LableFolder, num)
rawLeftPathisExists = os.path.exists(rawLeftPath)
rawRightPathisExists = os.path.exists(rawRightPath)
lablePathisExists = os.path.exists(lablePath)
if (not rawLeftPathisExists) and \
(not lablePathisExists) and (not rawRightPathisExists):
break
if num % Times == 0:
OutputData(fd_val_train_list, rawLeftPath)
OutputData(fd_val_train_list, rawRightPath)
OutputData(fd_val_label_list, lablePath)
else:
OutputData(fd_train_list, rawLeftPath)
OutputData(fd_train_list, rawRightPath)
OutputData(fd_label_list, lablePath)
total = total + 1
return total
if __name__ == '__main__':
fd_train_list, fd_label_list, fd_val_train_list, fd_val_label_list = OpenFile()
total = GenList(fd_train_list, fd_label_list, fd_val_train_list, fd_val_label_list)
print total
#folderId = ConvertNumToChar(0)
#folderNum = 0
#num = 6
#rawLeftPath = GenRawPath(folderId, folderNum, LeftFolder, num)
# print rawLeftPath
| 29.978022
| 87
| 0.696114
|
971c422abc83e0bf380f5071a39b1333fe5e67aa
| 3,210
|
py
|
Python
|
fundamentals/tries/trie_v1.py
|
sassafras13/coding-interview
|
9e23027e535fbae90a874076a18cdfa42e362b7c
|
[
"MIT"
] | null | null | null |
fundamentals/tries/trie_v1.py
|
sassafras13/coding-interview
|
9e23027e535fbae90a874076a18cdfa42e362b7c
|
[
"MIT"
] | null | null | null |
fundamentals/tries/trie_v1.py
|
sassafras13/coding-interview
|
9e23027e535fbae90a874076a18cdfa42e362b7c
|
[
"MIT"
] | null | null | null |
# 01-11-2021
# Emma Benjaminson
# Trie Implementation
# Source: https://towardsdatascience.com/implementing-a-trie-data-structure-in-python-in-less-than-100-lines-of-code-a877ea23c1a1
# a tuple is a built-in data type in Python that stores data
# ordered and unchangeable
# written with round brackets
from typing import Tuple
# object is an identifier that refers to a builtin type
class TrieNode(object):
"""
Basic trie node implementation.
"""
def __init__(self, char: str):
self.char = char
self.children = []
# check if it is the last char in word
self.word_finished = False
# how many times has this char appeared
self.counter = 1
def add(root, word: str):
"""
Adding a word in the trie structure
"""
node = root
for char in word:
found_in_child = False
# search for the char in the children of the current node
for child in node.children:
if child.char == char:
# we found the char, increase counter by 1 to record that another word shares this char
child.counter += 1
# point the node to the child that contains this char
node = child
found_in_child = True
break
# otherwise we did not find the char so we add a new child node
if not found_in_child:
new_node = TrieNode(char)
node.children.append(new_node)
# then point the node to the new child
node = new_node
# finished running through the word
node.word_finished = True
# the syntax "prefix: str" means that the input, prefix, must be type string
# similarly the syntax "-> Tuple[bool, int]" means that the output must be type Tuple
def find_prefix(root, prefix: str) -> Tuple[bool, int]:
"""
Check and return:
1. If the prefix exists in any of the words we have already added
2. If yes then how many words have the prefix
"""
node = root
# if the root node has no children, then return False
# because this means we are trying to search an empty trie
if not root.children:
return False, 0
for char in prefix:
char_not_found = True
# search through all the children of the present node
for child in node.children:
if child.char == char:
# we found the char among the children
char_not_found = False
# assign node as the child containing the char
node = child
break
# return false when we do not find the char
if char_not_found:
return False, 0
# if we are still in the function at this line then we have found the prefix
# we return the counter to indicate how many words have this prefix
return True, node.counter
if __name__ == "__main__":
root = TrieNode('*')
add(root, "hackathon")
add(root, "hack")
print(find_prefix(root, "hac"))
print(find_prefix(root, "hack"))
print(find_prefix(root, "hackathon"))
print(find_prefix(root, "ha"))
print(find_prefix(root, "hammer"))
| 29.722222
| 130
| 0.619626
|
85c6a276eed151ad6e2ca2a5f618f87d760540de
| 11,323
|
py
|
Python
|
release/ansible/ansible_collections/kubernetes/core/plugins/modules/k8s_scale.py
|
jmrodri/ocp-release-operator-sdk
|
fe08de5bb8fe56f3ea2868da7f467dae03cd3829
|
[
"Apache-2.0"
] | 7
|
2019-06-17T21:46:31.000Z
|
2022-03-03T07:43:59.000Z
|
release/ansible/ansible_collections/kubernetes/core/plugins/modules/k8s_scale.py
|
openshift/ocp-release-operator-sdk
|
6466b01d2a1ed29cbd8cb80d687dbf42d706a1a3
|
[
"Apache-2.0"
] | 223
|
2019-01-17T19:22:40.000Z
|
2022-03-31T16:12:49.000Z
|
release/ansible/ansible_collections/kubernetes/core/plugins/modules/k8s_scale.py
|
jmrodri/ocp-release-operator-sdk
|
fe08de5bb8fe56f3ea2868da7f467dae03cd3829
|
[
"Apache-2.0"
] | 24
|
2019-01-17T19:19:49.000Z
|
2022-02-03T13:34:08.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Chris Houseknecht <@chouseknecht>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: k8s_scale
short_description: Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job.
author:
- "Chris Houseknecht (@chouseknecht)"
- "Fabian von Feilitzsch (@fabianvf)"
description:
- Similar to the kubectl scale command. Use to set the number of replicas for a Deployment, ReplicaSet,
or Replication Controller, or the parallelism attribute of a Job. Supports check mode.
- C(wait) parameter is not supported for Jobs.
extends_documentation_fragment:
- kubernetes.core.k8s_name_options
- kubernetes.core.k8s_auth_options
- kubernetes.core.k8s_resource_options
- kubernetes.core.k8s_scale_options
options:
label_selectors:
description: List of label selectors to use to filter results.
type: list
elements: str
version_added: 2.0.0
continue_on_error:
description:
- Whether to continue on errors when multiple resources are defined.
type: bool
default: False
version_added: 2.0.0
requirements:
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
'''
EXAMPLES = r'''
- name: Scale deployment up, and extend timeout
kubernetes.core.k8s_scale:
api_version: v1
kind: Deployment
name: elastic
namespace: myproject
replicas: 3
wait_timeout: 60
- name: Scale deployment down when current replicas match
kubernetes.core.k8s_scale:
api_version: v1
kind: Deployment
name: elastic
namespace: myproject
current_replicas: 3
replicas: 2
- name: Increase job parallelism
kubernetes.core.k8s_scale:
api_version: batch/v1
kind: job
name: pi-with-timeout
namespace: testing
replicas: 2
# Match object using local file or inline definition
- name: Scale deployment based on a file from the local filesystem
kubernetes.core.k8s_scale:
src: /myproject/elastic_deployment.yml
replicas: 3
wait: no
- name: Scale deployment based on a template output
kubernetes.core.k8s_scale:
resource_definition: "{{ lookup('template', '/myproject/elastic_deployment.yml') | from_yaml }}"
replicas: 3
wait: no
- name: Scale deployment based on a file from the Ansible controller filesystem
kubernetes.core.k8s_scale:
resource_definition: "{{ lookup('file', '/myproject/elastic_deployment.yml') | from_yaml }}"
replicas: 3
wait: no
- name: Scale deployment using label selectors (continue operation in case error occured on one resource)
kubernetes.core.k8s_scale:
replicas: 3
kind: Deployment
namespace: test
label_selectors:
- app=test
continue_on_error: true
'''
RETURN = r'''
result:
description:
- If a change was made, will return the patched object, otherwise returns the existing object.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
duration:
description: elapsed time of task in seconds
returned: when C(wait) is true
type: int
sample: 48
'''
import copy
from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
AUTH_ARG_SPEC, RESOURCE_ARG_SPEC, NAME_ARG_SPEC)
SCALE_ARG_SPEC = {
'replicas': {'type': 'int', 'required': True},
'current_replicas': {'type': 'int'},
'resource_version': {},
'wait': {'type': 'bool', 'default': True},
'wait_timeout': {'type': 'int', 'default': 20},
'wait_sleep': {'type': 'int', 'default': 5},
}
def execute_module(module, k8s_ansible_mixin,):
k8s_ansible_mixin.set_resource_definitions(module)
definition = k8s_ansible_mixin.resource_definitions[0]
name = definition['metadata']['name']
namespace = definition['metadata'].get('namespace')
api_version = definition['apiVersion']
kind = definition['kind']
current_replicas = module.params.get('current_replicas')
replicas = module.params.get('replicas')
resource_version = module.params.get('resource_version')
label_selectors = module.params.get('label_selectors')
if not label_selectors:
label_selectors = []
continue_on_error = module.params.get('continue_on_error')
wait = module.params.get('wait')
wait_time = module.params.get('wait_timeout')
wait_sleep = module.params.get('wait_sleep')
existing = None
existing_count = None
return_attributes = dict(result=dict(), diff=dict())
if wait:
return_attributes['duration'] = 0
resource = k8s_ansible_mixin.find_resource(kind, api_version, fail=True)
from ansible_collections.kubernetes.core.plugins.module_utils.common import NotFoundError
multiple_scale = False
try:
existing = resource.get(name=name, namespace=namespace, label_selector=','.join(label_selectors))
if existing.kind.endswith('List'):
existing_items = existing.items
multiple_scale = len(existing_items) > 1
else:
existing_items = [existing]
except NotFoundError as exc:
module.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc),
error=exc.value.get('status'))
if multiple_scale:
# when scaling multiple resource, the 'result' is changed to 'results' and is a list
return_attributes = {'results': []}
changed = False
def _continue_or_fail(error):
if multiple_scale and continue_on_error:
if "errors" not in return_attributes:
return_attributes['errors'] = []
return_attributes['errors'].append({'error': error, 'failed': True})
else:
module.fail_json(msg=error, **return_attributes)
def _continue_or_exit(warn):
if multiple_scale:
return_attributes['results'].append({'warning': warn, 'changed': False})
else:
module.exit_json(warning=warn, **return_attributes)
for existing in existing_items:
if module.params['kind'] == 'job':
existing_count = existing.spec.parallelism
elif hasattr(existing.spec, 'replicas'):
existing_count = existing.spec.replicas
if existing_count is None:
error = 'Failed to retrieve the available count for object kind={0} name={1} namespace={2}.'.format(
existing.kind, existing.metadata.name, existing.metadata.namespace)
_continue_or_fail(error)
continue
if resource_version and resource_version != existing.metadata.resourceVersion:
warn = 'expected resource version {0} does not match with actual {1} for object kind={2} name={3} namespace={4}.'.format(
resource_version, existing.metadata.resourceVersion, existing.kind, existing.metadata.name, existing.metadata.namespace)
_continue_or_exit(warn)
continue
if current_replicas is not None and existing_count != current_replicas:
warn = 'current replicas {0} does not match with actual {1} for object kind={2} name={3} namespace={4}.'.format(
current_replicas, existing_count, existing.kind, existing.metadata.name, existing.metadata.namespace)
_continue_or_exit(warn)
continue
if existing_count != replicas:
if not module.check_mode:
if module.params['kind'] == 'job':
existing.spec.parallelism = replicas
result = resource.patch(existing.to_dict()).to_dict()
else:
result = scale(module, k8s_ansible_mixin, resource, existing, replicas, wait, wait_time, wait_sleep)
changed = changed or result['changed']
else:
name = existing.metadata.name
namespace = existing.metadata.namespace
existing = resource.get(name=name, namespace=namespace)
result = {'changed': False, 'result': existing.to_dict(), 'diff': {}}
if wait:
result['duration'] = 0
# append result to the return attribute
if multiple_scale:
return_attributes['results'].append(result)
else:
module.exit_json(**result)
module.exit_json(changed=changed, **return_attributes)
def argspec():
args = copy.deepcopy(SCALE_ARG_SPEC)
args.update(RESOURCE_ARG_SPEC)
args.update(NAME_ARG_SPEC)
args.update(AUTH_ARG_SPEC)
args.update({'label_selectors': {'type': 'list', 'elements': 'str', 'default': []}})
args.update(({'continue_on_error': {'type': 'bool', 'default': False}}))
return args
def scale(module, k8s_ansible_mixin, resource, existing_object, replicas, wait, wait_time, wait_sleep):
name = existing_object.metadata.name
namespace = existing_object.metadata.namespace
kind = existing_object.kind
if not hasattr(resource, 'scale'):
module.fail_json(
msg="Cannot perform scale on resource of kind {0}".format(resource.kind)
)
scale_obj = {'kind': kind, 'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}}
existing = resource.get(name=name, namespace=namespace)
try:
resource.scale.patch(body=scale_obj)
except Exception as exc:
module.fail_json(msg="Scale request failed: {0}".format(exc))
k8s_obj = resource.get(name=name, namespace=namespace).to_dict()
match, diffs = k8s_ansible_mixin.diff_objects(existing.to_dict(), k8s_obj)
result = dict()
result['result'] = k8s_obj
result['changed'] = not match
result['diff'] = diffs
if wait:
success, result['result'], result['duration'] = k8s_ansible_mixin.wait(resource, scale_obj, wait_sleep, wait_time)
if not success:
module.fail_json(msg="Resource scaling timed out", **result)
return result
def main():
mutually_exclusive = [
('resource_definition', 'src'),
]
module = AnsibleModule(argument_spec=argspec(), mutually_exclusive=mutually_exclusive, supports_check_mode=True)
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
K8sAnsibleMixin, get_api_client)
k8s_ansible_mixin = K8sAnsibleMixin(module)
k8s_ansible_mixin.client = get_api_client(module=module)
execute_module(module, k8s_ansible_mixin)
if __name__ == '__main__':
main()
| 34.521341
| 139
| 0.675528
|
ae05774ffcd6bcfd9587bf32d55e43134f45be43
| 2,313
|
py
|
Python
|
ddmin/ddmin.py
|
hydraseq/ddmin
|
44d5ab3d6d8905c4025198c9630ed8672b95febb
|
[
"MIT"
] | 1
|
2020-03-21T17:03:41.000Z
|
2020-03-21T17:03:41.000Z
|
ddmin/ddmin.py
|
hydraseq/ddmin
|
44d5ab3d6d8905c4025198c9630ed8672b95febb
|
[
"MIT"
] | 1
|
2020-12-09T11:31:39.000Z
|
2020-12-09T11:31:39.000Z
|
ddmin/ddmin.py
|
hydraseq/ddmin
|
44d5ab3d6d8905c4025198c9630ed8672b95febb
|
[
"MIT"
] | null | null | null |
class DDMin:
def __init__(self, circumstances, test_func, default_partition=2):
self.circumstances = circumstances
self.test = test_func
self.part = default_partition
def listminus(self, circ1, circ2):
"""Return all elements of C1 that are not in C2. Assumes elementes of C1
are hashable."""
# The hash map S2 has an entry for each element in C2
s2 = {delta:1 for delta in circ2}
return [delta for delta in circ1 if not s2.get(delta, None)]
def split(self, circumstances, partition):
"""Split a configuration of CIRCUMSTANCES into N subsets; return the list
of subsets."""
subsets= [] # Result
start = 0 # Start of the next subset
len_subset = 0
for idx in range(0, partition):
len_subset = int((len(circumstances) - start) / float(partition - idx) + 0.5)
subset = circumstances[start:start + len_subset]
subsets.append(subset)
start += len(subset)
assert len(subsets) == partition
assert not any([len(subset) == 0 for subset in subsets])
return subsets
def execute(self):
"""Return a sublist of CIRCUMSTANCES that is a relevant configuration with
respect to TEST."""
assert self.test([]) == True # Hard condition, empty must not trigger
assert self.test(self.circumstances) == False # The circumstances must trigger
partition = self.part # Usually start with binary, 2, but...
while len(self.circumstances) >= 2:
subsets = self.split(self.circumstances, partition)
some_complement_is_failing = False
for subset in subsets:
complement = self.listminus(self.circumstances, subset)
if self.test(complement) == False:
self.circumstances = complement
partition = max(partition - 1, 2)
some_complement_is_failing = True
break
if not some_complement_is_failing:
if partition == len(self.circumstances):
break
partition = min(partition*2, len(self.circumstances))
return self.circumstances
| 37.918033
| 94
| 0.58971
|
8ed03064c5b5efa81b21b1a4333711cc450a7162
| 23,931
|
py
|
Python
|
ssd_utils.py
|
PrimadonnaGit/tpgr_ssd_detectors
|
de45dea112af67d6e8ff7bebcdde64a8665e2ff5
|
[
"MIT"
] | 3
|
2019-09-27T01:30:59.000Z
|
2020-09-02T06:55:24.000Z
|
ssd_utils.py
|
PrimadonnaGit/tpgr_ssd_detectors
|
de45dea112af67d6e8ff7bebcdde64a8665e2ff5
|
[
"MIT"
] | null | null | null |
ssd_utils.py
|
PrimadonnaGit/tpgr_ssd_detectors
|
de45dea112af67d6e8ff7bebcdde64a8665e2ff5
|
[
"MIT"
] | null | null | null |
"""Some utils for SSD."""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import keras.backend as K
import h5py
import cv2
import os
#from tqdm import tqdm
#from utils.model import load_weights, calc_memory_usage, count_parameters, plot_parameter_statistic, calc_receptive_field
#from utils.vis import to_rec
def iou(box, priors):
"""Compute intersection over union for the box with all priors.
# Arguments
box: Box, numpy tensor of shape (4,).
(x1 + y1 + x2 + y2)
priors:
# Return
iou: Intersection over union,
numpy tensor of shape (num_priors).
"""
# compute intersection
inter_upleft = np.maximum(priors[:, :2], box[:2])
inter_botright = np.minimum(priors[:, 2:4], box[2:])
inter_wh = inter_botright - inter_upleft
inter_wh = np.maximum(inter_wh, 0)
inter = inter_wh[:, 0] * inter_wh[:, 1]
# compute union
area_pred = (box[2] - box[0]) * (box[3] - box[1])
area_gt = (priors[:, 2] - priors[:, 0]) * (priors[:, 3] - priors[:, 1])
union = area_pred + area_gt - inter
# compute iou
iou = inter / union
return iou
def non_maximum_suppression_slow(boxes, confs, iou_threshold, top_k):
"""Does None-Maximum Suppresion on detection results.
Intuitive but slow as hell!!!
# Agruments
boxes: Array of bounding boxes (boxes, xmin + ymin + xmax + ymax).
confs: Array of corresponding confidenc values.
iou_threshold: Intersection over union threshold used for comparing
overlapping boxes.
top_k: Maximum number of returned indices.
# Return
List of remaining indices.
"""
idxs = np.argsort(-confs)
selected = []
for idx in idxs:
if np.any(iou(boxes[idx], boxes[selected]) >= iou_threshold):
continue
selected.append(idx)
if len(selected) >= top_k:
break
return selected
def non_maximum_suppression(boxes, confs, overlap_threshold, top_k):
"""Does None-Maximum Suppresion on detection results.
# Agruments
boxes: Array of bounding boxes (boxes, xmin + ymin + xmax + ymax).
confs: Array of corresponding confidenc values.
overlap_threshold:
top_k: Maximum number of returned indices.
# Return
List of remaining indices.
# References
- Girshick, R. B. and Felzenszwalb, P. F. and McAllester, D.
[Discriminatively Trained Deformable Part Models, Release 5](http://people.cs.uchicago.edu/~rbg/latent-release5/)
"""
eps = 1e-15
boxes = boxes.astype(np.float64)
pick = []
x1, y1, x2, y2 = boxes.T
idxs = np.argsort(confs)
area = (x2 - x1) * (y2 - y1)
while len(idxs) > 0:
i = idxs[-1]
pick.append(i)
if len(pick) >= top_k:
break
idxs = idxs[:-1]
xx1 = np.maximum(x1[i], x1[idxs])
yy1 = np.maximum(y1[i], y1[idxs])
xx2 = np.minimum(x2[i], x2[idxs])
yy2 = np.minimum(y2[i], y2[idxs])
w = np.maximum(0, xx2 - xx1)
h = np.maximum(0, yy2 - yy1)
I = w * h
overlap = I / (area[idxs] + eps)
# as in Girshick et. al.
#U = area[idxs] + area[i] - I
#overlap = I / (U + eps)
idxs = idxs[overlap <= overlap_threshold]
return pick
class PriorMap(object):
"""Handles prior boxes for a given feature map.
# Arguments / Attributes
source_layer_name
image_size: Tuple with spatial size of model input.
map_size
variances
aspect_ratios: List of aspect ratios for the prior boxes at each
location.
shift: List of tuples for the displacement of the prior boxes
relative to ther location. Each tuple contains an value between
-1.0 and 1.0 for x and y direction.
clip: Boolean, whether the boxes should be cropped to do not exceed
the borders of the input image.
step
minmax_size: List of tuples with s_min and s_max values (see paper).
special_ssd_box: Boolean, wether or not the extra box for aspect
ratio 1 is used.
# Notes
The compute_priors methode has to be called to get usable prior boxes.
"""
def __init__(self, source_layer_name, image_size, map_size,
minmax_size=None, variances=[0.1, 0.1, 0.2, 0.2],
aspect_ratios=[1], shift=None,
clip=False, step=None, special_ssd_box=False):
self.__dict__.update(locals())
#self.compute_priors()
def __str__(self):
s = ''
for a in ['source_layer_name',
'map_size',
'aspect_ratios',
'shift',
'clip',
'minmax_size',
'special_ssd_box',
'num_locations',
'num_boxes',
'num_boxes_per_location',
]:
s += '%-24s %s\n' % (a, getattr(self, a))
return s
@property
def num_boxes_per_location(self):
return len(self.box_wh)
@property
def num_locations(self):
return len(self.box_xy)
@property
def num_boxes(self):
return len(self.box_xy) * len(self.box_wh) # len(self.priors)
def compute_priors(self):
image_h, image_w = image_size = self.image_size
map_h, map_w = map_size = self.map_size
min_size, max_size = self.minmax_size
# define centers of prior boxes
if self.step is None:
step_x = image_w / map_w
step_y = image_h / map_h
assert step_x % 1 == 0 and step_y % 1 == 0, 'map size %s not constiten with input height %s' % (map_size, image_size)
else:
step_x = step_y = self.step
linx = np.array([(0.5 + i) for i in range(map_w)]) * step_x
liny = np.array([(0.5 + i) for i in range(map_h)]) * step_y
box_xy = np.array(np.meshgrid(linx, liny)).reshape(2,-1).T
if self.shift is None:
shift = [(0.0,0.0)] * len(self.aspect_ratios)
else:
shift = self.shift
box_wh = []
box_shift = []
for i in range(len(self.aspect_ratios)):
ar = self.aspect_ratios[i]
box_wh.append([min_size * np.sqrt(ar), min_size / np.sqrt(ar)])
box_shift.append(shift[i])
if ar == 1 and self.special_ssd_box: # special SSD box
box_wh.append([np.sqrt(min_size * max_size), np.sqrt(min_size * max_size)])
box_shift.append((0.0,0.0))
box_wh = np.asarray(box_wh)
box_shift = np.asarray(box_shift)
box_shift = np.clip(box_shift, -1.0, 1.0)
box_shift = box_shift * 0.5 * np.array([step_x, step_y]) # percent to pixels
# values for individual prior boxes
priors_shift = np.tile(box_shift, (len(box_xy),1))
priors_xy = np.repeat(box_xy, len(box_wh), axis=0) + priors_shift
priors_wh = np.tile(box_wh, (len(box_xy),1))
priors_min_xy = priors_xy - priors_wh / 2.
priors_max_xy = priors_xy + priors_wh / 2.
if self.clip:
priors_min_xy[:,0] = np.clip(priors_min_xy[:,0], 0, image_w)
priors_min_xy[:,1] = np.clip(priors_min_xy[:,1], 0, image_h)
priors_max_xy[:,0] = np.clip(priors_max_xy[:,0], 0, image_w)
priors_max_xy[:,1] = np.clip(priors_max_xy[:,1], 0, image_h)
priors_variances = np.tile(self.variances, (len(priors_xy),1))
self.box_xy = box_xy
self.box_wh = box_wh
self.box_shfit = box_shift
self.priors_xy = priors_xy
self.priors_wh = priors_wh
self.priors_min_xy = priors_min_xy
self.priors_max_xy = priors_max_xy
self.priors_variances = priors_variances
self.priors = np.concatenate([priors_min_xy, priors_max_xy, priors_variances], axis=1)
def plot_locations(self, color='r'):
xy = self.box_xy
plt.plot(xy[:,0], xy[:,1], '.', color=color, markersize=6)
def plot_boxes(self, location_idxs=[]):
colors = 'rgbcmy'
ax = plt.gca()
n = self.num_boxes_per_location
for i in location_idxs:
for j in range(n):
idx = i*n+j
if idx >= self.num_boxes:
break
x1, y1, x2, y2 = self.priors[idx, :4]
ax.add_patch(plt.Rectangle((x1, y1), x2-x1, y2-y1,
fill=False, edgecolor=colors[j%len(colors)], linewidth=2))
ax.autoscale_view()
class PriorUtil(object):
"""Utility for SSD prior boxes.
"""
def __init__(self, model, aspect_ratios=None, shifts=None,
minmax_sizes=None, steps=None, scale=None, clips=None,
special_ssd_boxes=None, ssd_assignment=None):
source_layers_names = [l.name.split('/')[0] for l in model.source_layers]
self.source_layers_names = source_layers_names
self.model = model
self.image_size = model.input_shape[1:3]
num_maps = len(source_layers_names)
# take parameters from model definition if they exist there
if aspect_ratios is None:
if hasattr(model, 'aspect_ratios'):
aspect_ratios = model.aspect_ratios
else:
aspect_ratios = [[1]] * num_maps
if shifts is None:
if hasattr(model, 'shifts'):
shifts = model.shifts
else:
shifts = [None] * num_maps
if minmax_sizes is None:
if hasattr(model, 'minmax_sizes'):
minmax_sizes = model.minmax_sizes
else:
# as in equation (4)
min_dim = np.min(self.image_size)
min_ratio = 10 # 15
max_ratio = 100 # 90
s = np.linspace(min_ratio, max_ratio, num_maps+1) * min_dim / 100.
minmax_sizes = [(round(s[i]), round(s[i+1])) for i in range(len(s)-1)]
if scale is None:
if hasattr(model, 'scale'):
scale = model.scale
else:
scale = 1.0
minmax_sizes = np.array(minmax_sizes) * scale
if steps is None:
if hasattr(model, 'steps'):
steps = model.steps
else:
steps = [None] * num_maps
if clips is None:
if hasattr(model, 'clips'):
clips = model.clips
else:
clips = False
if type(clips) == bool:
clips = [clips] * num_maps
if special_ssd_boxes is None:
if hasattr(model, 'special_ssd_boxes'):
special_ssd_boxes = model.special_ssd_boxes
else:
special_ssd_boxes = False
if type(special_ssd_boxes) == bool:
special_ssd_boxes = [special_ssd_boxes] * num_maps
if ssd_assignment is None:
if hasattr(model, 'ssd_assignment'):
ssd_assignment = model.ssd_assignment
else:
ssd_assignment = True
self.ssd_assignment = ssd_assignment
self.prior_maps = []
for i in range(num_maps):
layer = model.get_layer(source_layers_names[i])
map_h, map_w = map_size = layer.output_shape[1:3]
m = PriorMap(source_layer_name=source_layers_names[i],
image_size=self.image_size,
map_size=map_size,
minmax_size=minmax_sizes[i],
variances=[0.1, 0.1, 0.2, 0.2],
aspect_ratios=aspect_ratios[i],
shift=shifts[i],
step=steps[i],
special_ssd_box=special_ssd_boxes[i],
clip=clips[i])
self.prior_maps.append(m)
self.update_priors()
self.nms_top_k = 400
self.nms_thresh = 0.45
@property
def num_maps(self):
return len(self.prior_maps)
def update_priors(self):
priors_xy = []
priors_wh = []
priors_min_xy = []
priors_max_xy = []
priors_variances = []
priors = []
map_offsets = [0]
for i in range(len(self.prior_maps)):
m = self.prior_maps[i]
# compute prior boxes
m.compute_priors()
# collect prior data
priors_xy.append(m.priors_xy)
priors_wh.append(m.priors_wh)
priors_min_xy.append(m.priors_min_xy)
priors_max_xy.append(m.priors_max_xy)
priors_variances.append(m.priors_variances)
priors.append(m.priors)
map_offsets.append(map_offsets[-1]+len(m.priors))
self.priors_xy = np.concatenate(priors_xy, axis=0)
self.priors_wh = np.concatenate(priors_wh, axis=0)
self.priors_min_xy = np.concatenate(priors_min_xy, axis=0)
self.priors_max_xy = np.concatenate(priors_max_xy, axis=0)
self.priors_variances = np.concatenate(priors_variances, axis=0)
self.priors = np.concatenate(priors, axis=0)
self.map_offsets = map_offsets
# normalized prior boxes
image_h, image_w = self.image_size
self.priors_xy_norm = self.priors_xy / (image_w, image_h)
self.priors_wh_norm = self.priors_wh / (image_w, image_h)
self.priors_min_xy_norm = self.priors_min_xy / (image_w, image_h)
self.priors_max_xy_norm = self.priors_max_xy / (image_w, image_h)
self.priors_norm = np.concatenate([self.priors_min_xy_norm, self.priors_max_xy_norm, self.priors_variances], axis=1)
def encode(self, gt_data, overlap_threshold=0.5, debug=False):
# calculation is done with normalized sizes
# TODO: empty ground truth
if gt_data.shape[0] == 0:
print('gt_data', type(gt_data), gt_data.shape)
num_classes = self.model.num_classes
num_priors = self.priors.shape[0]
gt_boxes = self.gt_boxes = np.copy(gt_data[:,:4]) # normalized xmin, ymin, xmax, ymax
gt_class_idx = np.asarray(gt_data[:,-1]+0.5, dtype=np.int)
gt_one_hot = np.zeros([len(gt_class_idx),num_classes])
gt_one_hot[range(len(gt_one_hot)),gt_class_idx] = 1 # one_hot classes including background
gt_min_xy = gt_boxes[:,0:2]
gt_max_xy = gt_boxes[:,2:4]
gt_xy = (gt_boxes[:,2:4] + gt_boxes[:,0:2]) / 2.
gt_wh = gt_boxes[:,2:4] - gt_boxes[:,0:2]
gt_iou = np.array([iou(b, self.priors_norm) for b in gt_boxes]).T
max_idxs = np.argmax(gt_iou, axis=1)
priors_xy = self.priors_xy_norm
priors_wh = self.priors_wh_norm
# assign ground truth to priors
if self.ssd_assignment:
# original ssd assignment rule
max_idxs = np.argmax(gt_iou, axis=1)
max_val = gt_iou[np.arange(num_priors), max_idxs]
prior_mask = max_val > overlap_threshold
match_indices = max_idxs[prior_mask]
else:
prior_area = np.product(priors_wh, axis=-1)[:,None]
gt_area = np.product(gt_wh, axis=-1)[:,None]
priors_ar = priors_wh[:,0] / priors_wh[:,1]
gt_ar = gt_wh[:,0] / gt_wh[:,1]
match_mask = np.array([np.concatenate([
priors_xy >= gt_min_xy[i],
priors_xy <= gt_max_xy[i],
#priors_wh >= 0.5 * gt_wh[i],
#priors_wh <= 2.0 * gt_wh[i],
#prior_area >= 0.25 * gt_area[i],
#prior_area <= 4.0 * gt_area[i],
prior_area >= 0.0625 * gt_area[i],
prior_area <= 1.0 * gt_area[i],
#((priors_ar < 1.0) == (gt_ar[i] < 1.0))[:,None],
(np.abs(priors_ar - gt_ar[i]) < 0.5)[:,None],
max_idxs[:,None] == i
], axis=-1) for i in range(len(gt_boxes))])
self.match_mask = match_mask
match_mask = np.array([np.all(m, axis=-1) for m in match_mask]).T
prior_mask = np.any(match_mask, axis=-1)
match_indices = np.argmax(match_mask[prior_mask,:], axis=-1)
self.match_indices = dict(zip(list(np.ix_(prior_mask)[0]), list(match_indices)))
# prior labels
confidence = np.zeros((num_priors, num_classes))
confidence[:,0] = 1
confidence[prior_mask] = gt_one_hot[match_indices]
# compute local offsets from ground truth boxes
gt_xy = gt_xy[match_indices]
gt_wh = gt_wh[match_indices]
priors_xy = priors_xy[prior_mask]
priors_wh = priors_wh[prior_mask]
priors_variances = self.priors_variances[prior_mask,:]
offsets = np.zeros((num_priors, 4))
offsets[prior_mask,0:2] = (gt_xy - priors_xy) / priors_wh
offsets[prior_mask,2:4] = np.log(gt_wh / priors_wh)
offsets[prior_mask,0:4] /= priors_variances
return np.concatenate([offsets, confidence], axis=1)
def decode(self, model_output, confidence_threshold=0.01, keep_top_k=200, fast_nms=True, sparse=True):
# calculation is done with normalized sizes
prior_mask = model_output[:,4:] > confidence_threshold
if sparse:
# compute boxes only if the confidence is high enough and the class is not background
mask = np.any(prior_mask[:,1:], axis=1)
prior_mask = prior_mask[mask]
mask = np.ix_(mask)[0]
model_output = model_output[mask]
priors_xy = self.priors_xy[mask] / self.image_size
priors_wh = self.priors_wh[mask] / self.image_size
priors_variances = self.priors_variances[mask,:]
else:
priors_xy = self.priors_xy / self.image_size
priors_wh = self.priors_wh / self.image_size
priors_variances = self.priors_variances
offsets = model_output[:,:4]
confidence = model_output[:,4:]
num_priors = offsets.shape[0]
num_classes = confidence.shape[1]
# compute bounding boxes from local offsets
boxes = np.empty((num_priors, 4))
offsets = offsets * priors_variances
boxes_xy = priors_xy + offsets[:,0:2] * priors_wh
boxes_wh = priors_wh * np.exp(offsets[:,2:4])
boxes[:,0:2] = boxes_xy - boxes_wh / 2. # xmin, ymin
boxes[:,2:4] = boxes_xy + boxes_wh / 2. # xmax, ymax
boxes = np.clip(boxes, 0.0, 1.0)
# do non maximum suppression
results = []
for c in range(1, num_classes):
mask = prior_mask[:,c]
boxes_to_process = boxes[mask]
if len(boxes_to_process) > 0:
confs_to_process = confidence[mask, c]
if fast_nms:
idx = non_maximum_suppression(
boxes_to_process, confs_to_process,
self.nms_thresh, self.nms_top_k)
else:
idx = non_maximum_suppression_slow(
boxes_to_process, confs_to_process,
self.nms_thresh, self.nms_top_k)
good_boxes = boxes_to_process[idx]
good_confs = confs_to_process[idx][:, None]
labels = np.ones((len(idx),1)) * c
c_pred = np.concatenate((good_boxes, good_confs, labels), axis=1)
results.extend(c_pred)
if len(results) > 0:
results = np.array(results)
order = np.argsort(-results[:, 4])
results = results[order]
results = results[:keep_top_k]
else:
results = np.empty((0,6))
self.results = results
return results
def compute_class_weights(self, gt_util, num_samples=np.inf):
"""Computes weighting factors for the classification loss by considering
the inverse frequency of class instance in local ground truth.
"""
s = np.zeros(gt_util.num_classes)
for i in tqdm(range(min(gt_util.num_samples, num_samples))):
egt = self.encode(gt_util.data[i])
s += np.sum(egt[:,-gt_util.num_classes:], axis=0)
si = 1/s
return si/np.sum(si) * len(s)
def show_image(self, img):
"""Resizes an image to the network input size and shows it in the current figure.
"""
image_size = self.image_size # width, hight
img = cv2.resize(img, image_size, cv2.INTER_LINEAR)
img = img[:, :, (2,1,0)] # BGR to RGB
img = img / 256.
plt.imshow(img)
def plot_assignment(self, map_idx):
ax = plt.gca()
im = plt.gci()
img_h, img_w = image_size = im.get_size()
# ground truth
boxes = self.gt_boxes
boxes_x = (boxes[:,0] + boxes[:,2]) / 2. * img_h
boxes_y = (boxes[:,1] + boxes[:,3]) / 2. * img_w
for box in boxes:
xy_rec = to_rec(box[:4], image_size)
ax.add_patch(plt.Polygon(xy_rec, fill=False, edgecolor='b', linewidth=2))
plt.plot(boxes_x, boxes_y, 'bo', markersize=6)
# prior boxes
for idx, box_idx in self.match_indices.items():
if idx >= self.map_offsets[map_idx] and idx < self.map_offsets[map_idx+1]:
x, y = self.priors_xy[idx]
w, h = self.priors_wh[idx]
plt.plot(x, y, 'ro', markersize=4)
plt.plot([x, boxes_x[box_idx]], [y, boxes_y[box_idx]], '-r', linewidth=1)
ax.add_patch(plt.Rectangle((x-w/2, y-h/2), w+1, h+1,
fill=False, edgecolor='y', linewidth=2))
def plot_results(self, results=None, classes=None, show_labels=True, gt_data=None, confidence_threshold=None):
if results is None:
results = self.results
if confidence_threshold is not None:
mask = results[:, 4] > confidence_threshold
results = results[mask]
if classes is not None:
colors = plt.cm.hsv(np.linspace(0, 1, len(classes)+1)).tolist()
ax = plt.gca()
im = plt.gci()
image_size = im.get_size()
# draw ground truth
if gt_data is not None:
for box in gt_data:
label = np.nonzero(box[4:])[0][0]+1
color = 'g' if classes == None else colors[label]
xy_rec = to_rec(box[:4], image_size)
ax.add_patch(plt.Polygon(xy_rec, fill=True, color=color, linewidth=1, alpha=0.3))
# draw prediction
for r in results:
label = int(r[5])
confidence = r[4]
color = 'r' if classes == None else colors[label]
xy_rec = to_rec(r[:4], image_size)
ax.add_patch(plt.Polygon(xy_rec, fill=False, edgecolor=color, linewidth=2))
if show_labels:
label_name = label if classes == None else classes[label]
xmin, ymin = xy_rec[0]
display_txt = '%0.2f, %s' % (confidence, label_name)
ax.text(xmin, ymin, display_txt, bbox={'facecolor':color, 'alpha':0.5})
def print_gt_stats(self):
# TODO
pass
| 37.805687
| 129
| 0.554093
|
9cb5a4952715645e130326dc514c1c46ad24ed89
| 3,310
|
py
|
Python
|
o/soft_robot/derivation_of_dynamics/derived/mac2/eqs/numpy_style/Theta0.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
o/soft_robot/derivation_of_dynamics/derived/mac2/eqs/numpy_style/Theta0.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
o/soft_robot/derivation_of_dynamics/derived/mac2/eqs/numpy_style/Theta0.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
import numpy
def f(q, q_dot, xi):
l1, l2, l3 = q[0,0], q[1,0], q[2,0]
l1_dot, l2_dot, l3_dot = q_dot[0,0], q_dot[1,0], q_dot[2,0]
return numpy.array([[8.8412689977681e-5*xi**2*(0.5*l1 - 5.0e-5)*(l2 - 0.002) + 3.31547587416304e-5*xi**2*(0.5*l1 - 5.0e-5)*(l3 - 0.0015) - 3.03918621798279e-5*xi**2*(l1 - 0.0001)**2 - 2.41125518120948e-5*xi**2*(0.5*l2 - 0.001)*(l3 - 0.0015) - 1.60750345413965e-5*xi**2*(l2 - 0.002)**2 + 0.5*(l3 - 0.0015)**2*(-4.52110346476778e-6*xi**2 - 1.04773789644241e-9) + 1.0, -2.32023137992695e-5*xi**2*(0.5*l1 - 5.0e-5)*(l2 - 0.002) - 8.70086767472609e-6*xi**2*(0.5*l1 - 5.0e-5)*(l3 - 0.0015) + 7.97579536849891e-6*xi**2*(l1 - 0.0001)**2 + 6.32790376343716e-6*xi**2*(0.5*l2 - 0.001)*(l3 - 0.0015) + 4.2186025089581e-6*xi**2*(l2 - 0.002)**2 + 5.93240977822234e-7*xi**2*(l3 - 0.0015)**2, 2.96701739469302*xi*(0.5*l1 - 5.0e-5)*(l2 - 0.002) - 4.79372586530627*xi*(0.5*l1 - 5.0e-5)*(l3 - 0.0015) + 0.456677117653311*xi*(l1 - 0.0001)**2 - 0.00779639175257732*xi*(l1 - 0.0001) - 5.26092039536614*xi*(0.5*l2 - 0.001)*(l3 - 0.0015) + 0.57347575016828*xi*(l2 - 0.002)**2 + 0.00567010309278351*xi*(l2 - 0.002) + 2.5136615651681*xi*(l3 - 0.0015)**2 + 0.00212628865979382*xi*(l3 - 0.0015)], [8.8412689977681e-5*xi**2*(0.5*l1 - 5.0e-5)*(l2 - 0.002) + 3.31547587416304e-5*xi**2*(0.5*l1 - 5.0e-5)*(l3 - 0.0015) - 3.03918621798279e-5*xi**2*(l1 - 0.0001)**2 - 2.41125518120948e-5*xi**2*(0.5*l2 - 0.001)*(l3 - 0.0015) - 1.60750345413965e-5*xi**2*(l2 - 0.002)**2 + 0.5*(l3 - 0.0015)**2*(-4.52110346476778e-6*xi**2 - 1.04773789644241e-9) + 1.0, -2.32023137992695e-5*xi**2*(0.5*l1 - 5.0e-5)*(l2 - 0.002) - 8.70086767472609e-6*xi**2*(0.5*l1 - 5.0e-5)*(l3 - 0.0015) + 7.97579536849891e-6*xi**2*(l1 - 0.0001)**2 + 6.32790376343716e-6*xi**2*(0.5*l2 - 0.001)*(l3 - 0.0015) + 4.2186025089581e-6*xi**2*(l2 - 0.002)**2 + 5.93240977822234e-7*xi**2*(l3 - 0.0015)**2, 2.96701739469302*xi*(0.5*l1 - 5.0e-5)*(l2 - 0.002) - 4.79372586530627*xi*(0.5*l1 - 5.0e-5)*(l3 - 0.0015) + 0.456677117653311*xi*(l1 - 0.0001)**2 - 0.00779639175257732*xi*(l1 - 0.0001) - 5.26092039536614*xi*(0.5*l2 - 0.001)*(l3 - 0.0015) + 0.57347575016828*xi*(l2 - 0.002)**2 + 0.00567010309278351*xi*(l2 - 0.002) + 2.5136615651681*xi*(l3 - 0.0015)**2 + 0.00212628865979382*xi*(l3 - 0.0015)], [8.8412689977681e-5*xi**2*(0.5*l1 - 5.0e-5)*(l2 - 0.002) + 3.31547587416304e-5*xi**2*(0.5*l1 - 5.0e-5)*(l3 - 0.0015) - 3.03918621798279e-5*xi**2*(l1 - 0.0001)**2 - 2.41125518120948e-5*xi**2*(0.5*l2 - 0.001)*(l3 - 0.0015) - 1.60750345413965e-5*xi**2*(l2 - 0.002)**2 + 0.5*(l3 - 0.0015)**2*(-4.52110346476778e-6*xi**2 - 1.04773789644241e-9) + 1.0, -2.32023137992695e-5*xi**2*(0.5*l1 - 5.0e-5)*(l2 - 0.002) - 8.70086767472609e-6*xi**2*(0.5*l1 - 5.0e-5)*(l3 - 0.0015) + 7.97579536849891e-6*xi**2*(l1 - 0.0001)**2 + 6.32790376343716e-6*xi**2*(0.5*l2 - 0.001)*(l3 - 0.0015) + 4.2186025089581e-6*xi**2*(l2 - 0.002)**2 + 5.93240977822234e-7*xi**2*(l3 - 0.0015)**2, 2.96701739469302*xi*(0.5*l1 - 5.0e-5)*(l2 - 0.002) - 4.79372586530627*xi*(0.5*l1 - 5.0e-5)*(l3 - 0.0015) + 0.456677117653311*xi*(l1 - 0.0001)**2 - 0.00779639175257732*xi*(l1 - 0.0001) - 5.26092039536614*xi*(0.5*l2 - 0.001)*(l3 - 0.0015) + 0.57347575016828*xi*(l2 - 0.002)**2 + 0.00567010309278351*xi*(l2 - 0.002) + 2.5136615651681*xi*(l3 - 0.0015)**2 + 0.00212628865979382*xi*(l3 - 0.0015)]])
| 551.666667
| 3,171
| 0.60997
|
12f2511f6f80ac8c9e70e0fe21ae83dcf8eb0edf
| 1,218
|
py
|
Python
|
chat/views.py
|
damiso15/chat_bot
|
d3c08252c4bd728e5c604f498e25c866213f5174
|
[
"MIT"
] | null | null | null |
chat/views.py
|
damiso15/chat_bot
|
d3c08252c4bd728e5c604f498e25c866213f5174
|
[
"MIT"
] | 6
|
2020-06-21T10:12:02.000Z
|
2021-09-22T19:16:45.000Z
|
chat/views.py
|
damiso15/chat_bot
|
d3c08252c4bd728e5c604f498e25c866213f5174
|
[
"MIT"
] | 1
|
2020-07-29T10:54:57.000Z
|
2020-07-29T10:54:57.000Z
|
from django.shortcuts import render
from django.http import HttpResponse
import json
from django.views.decorators.csrf import csrf_exempt
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
# Create your views here.
# chatbot = ChatBot(
# 'Ron Obvious',
# trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
# )
# chatbot.train("chatterbot.corpus.english")
chatbot = ChatBot('Ron Obvious')
# chatbot.storage.drop()
# trainer = ChatterBotCorpusTrainer(chatbot)
# trainer.train('chatterbot.corpus.english')
@csrf_exempt
def get_response(request):
response = {'status': None}
if request.method == 'POST':
data = json.loads(request.body.decode('utf-8'))
message = data['message']
chat_response = chatbot.get_response(message).text
response['message'] = {'text': chat_response, 'user': False, 'chat_bot': True}
response['status'] = 'ok'
else:
response['error'] = 'no post data found'
return HttpResponse(json.dumps(response), content_type='application/json')
def home(request, template_name='home.html'):
context = {'title': 'Chatbot Version 1.0'}
return render(request, template_name, context)
| 28.325581
| 86
| 0.711823
|
6b4e83770131798fa5ecd44529b901b4e54d7f9f
| 7,239
|
py
|
Python
|
huey/contrib/sql_huey.py
|
qyqx233/huey
|
b1275c9fff5d72683979fc3503b174544ce10a8e
|
[
"MIT"
] | 3,676
|
2015-01-02T22:43:04.000Z
|
2022-03-31T05:29:02.000Z
|
huey/contrib/sql_huey.py
|
qyqx233/huey
|
b1275c9fff5d72683979fc3503b174544ce10a8e
|
[
"MIT"
] | 542
|
2015-01-15T17:18:52.000Z
|
2022-03-16T19:18:55.000Z
|
huey/contrib/sql_huey.py
|
qyqx233/huey
|
b1275c9fff5d72683979fc3503b174544ce10a8e
|
[
"MIT"
] | 385
|
2015-01-08T05:02:58.000Z
|
2022-03-17T12:27:38.000Z
|
from functools import partial
import operator
from peewee import *
from playhouse.db_url import connect as db_url_connect
from huey.api import Huey
from huey.constants import EmptyData
from huey.exceptions import ConfigurationError
from huey.storage import BaseStorage
class BytesBlobField(BlobField):
def python_value(self, value):
return value if isinstance(value, bytes) else bytes(value)
class SqlStorage(BaseStorage):
def __init__(self, name='huey', database=None, **kwargs):
super(SqlStorage, self).__init__(name)
if database is None:
raise ConfigurationError('Use of SqlStorage requires a '
'database= argument, which should be a '
'peewee database or a connection string.')
if isinstance(database, Database):
self.database = database
else:
# Treat database argument as a URL connection string.
self.database = db_url_connect(database)
self.KV, self.Schedule, self.Task = self.create_models()
self.create_tables()
def create_models(self):
class Base(Model):
class Meta:
database = self.database
class KV(Base):
queue = CharField()
key = CharField()
value = BytesBlobField()
class Meta:
primary_key = CompositeKey('queue', 'key')
class Schedule(Base):
queue = CharField()
data = BytesBlobField()
timestamp = TimestampField(resolution=1000)
class Meta:
indexes = ((('queue', 'timestamp'), False),)
class Task(Base):
queue = CharField()
data = BytesBlobField()
priority = FloatField(default=0.0)
Task.add_index(Task.priority.desc(), Task.id)
return (KV, Schedule, Task)
def create_tables(self):
with self.database:
self.database.create_tables([self.KV, self.Schedule, self.Task])
def drop_tables(self):
with self.database:
self.database.drop_tables([self.KV, self.Schedule, self.Task])
def close(self):
return self.database.close()
def tasks(self, *columns):
return self.Task.select(*columns).where(self.Task.queue == self.name)
def schedule(self, *columns):
return (self.Schedule.select(*columns)
.where(self.Schedule.queue == self.name))
def kv(self, *columns):
return self.KV.select(*columns).where(self.KV.queue == self.name)
def check_conn(self):
if not self.database.is_connection_usable():
self.database.close()
self.database.connect()
def enqueue(self, data, priority=None):
self.check_conn()
self.Task.create(queue=self.name, data=data, priority=priority or 0)
def dequeue(self):
self.check_conn()
query = (self.tasks(self.Task.id, self.Task.data)
.order_by(self.Task.priority.desc(), self.Task.id)
.limit(1))
if self.database.for_update:
query = query.for_update()
with self.database.atomic():
try:
task = query.get()
except self.Task.DoesNotExist:
return
nrows = self.Task.delete().where(self.Task.id == task.id).execute()
if nrows == 1:
return task.data
def queue_size(self):
return self.tasks().count()
def enqueued_items(self, limit=None):
query = self.tasks(self.Task.data).order_by(self.Task.priority.desc(),
self.Task.id)
if limit is not None:
query = query.limit(limit)
return list(map(operator.itemgetter(0), query.tuples()))
def flush_queue(self):
self.Task.delete().where(self.Task.queue == self.name).execute()
def add_to_schedule(self, data, timestamp, utc):
self.check_conn()
self.Schedule.create(queue=self.name, data=data, timestamp=timestamp)
def read_schedule(self, timestamp):
self.check_conn()
query = (self.schedule(self.Schedule.id, self.Schedule.data)
.where(self.Schedule.timestamp <= timestamp)
.tuples())
if self.database.for_update:
query = query.for_update()
with self.database.atomic():
results = list(query)
if not results:
return []
id_list, data = zip(*results)
(self.Schedule
.delete()
.where(self.Schedule.id.in_(id_list))
.execute())
return list(data)
def schedule_size(self):
return self.schedule().count()
def scheduled_items(self):
tasks = (self.schedule(self.Schedule.data)
.order_by(self.Schedule.timestamp)
.tuples())
return list(map(operator.itemgetter(0), tasks))
def flush_schedule(self):
(self.Schedule
.delete()
.where(self.Schedule.queue == self.name)
.execute())
def put_data(self, key, value, is_result=False):
self.check_conn()
if isinstance(self.database, PostgresqlDatabase):
(self.KV
.insert(queue=self.name, key=key, value=value)
.on_conflict(conflict_target=[self.KV.queue, self.KV.key],
preserve=[self.KV.value])
.execute())
else:
self.KV.replace(queue=self.name, key=key, value=value).execute()
def peek_data(self, key):
self.check_conn()
try:
kv = self.kv(self.KV.value).where(self.KV.key == key).get()
except self.KV.DoesNotExist:
return EmptyData
else:
return kv.value
def pop_data(self, key):
self.check_conn()
query = self.kv().where(self.KV.key == key)
if self.database.for_update:
query = query.for_update()
with self.database.atomic():
try:
kv = query.get()
except self.KV.DoesNotExist:
return EmptyData
else:
dq = self.KV.delete().where(
(self.KV.queue == self.name) &
(self.KV.key == key))
return kv.value if dq.execute() == 1 else EmptyData
def has_data_for_key(self, key):
self.check_conn()
return self.kv().where(self.KV.key == key).exists()
def put_if_empty(self, key, value):
self.check_conn()
try:
with self.database.atomic():
self.KV.insert(queue=self.name, key=key, value=value).execute()
except IntegrityError:
return False
else:
return True
def result_store_size(self):
return self.kv().count()
def result_items(self):
query = self.kv(self.KV.key, self.KV.value).tuples()
return dict((k, v) for k, v in query.iterator())
def flush_results(self):
self.KV.delete().where(self.KV.queue == self.name).execute()
SqlHuey = partial(Huey, storage_class=SqlStorage)
| 31.75
| 79
| 0.570521
|
4d072e8d40ee7162553c37709ac9be2f2f407748
| 59
|
py
|
Python
|
project/apps/smanager/__init__.py
|
bhs-contests/barberscore-api
|
7bd06b074c99903f031220f41b15a22474724044
|
[
"BSD-2-Clause"
] | null | null | null |
project/apps/smanager/__init__.py
|
bhs-contests/barberscore-api
|
7bd06b074c99903f031220f41b15a22474724044
|
[
"BSD-2-Clause"
] | 9
|
2020-06-05T22:17:17.000Z
|
2022-03-12T00:04:00.000Z
|
project/apps/smanager/__init__.py
|
bhs-contests/barberscore-api
|
7bd06b074c99903f031220f41b15a22474724044
|
[
"BSD-2-Clause"
] | null | null | null |
default_app_config = 'apps.smanager.config.SmanagerConfig'
| 29.5
| 58
| 0.847458
|
f1cf5f854b2c3af21eaecf8348ae4839e96fc14a
| 15,058
|
py
|
Python
|
testproject/tests.py
|
nim65s/cagnotte_solidaire
|
60df468093dadfab67a6a44d77549d13a2b79616
|
[
"BSD-2-Clause"
] | null | null | null |
testproject/tests.py
|
nim65s/cagnotte_solidaire
|
60df468093dadfab67a6a44d77549d13a2b79616
|
[
"BSD-2-Clause"
] | 15
|
2019-10-22T21:36:18.000Z
|
2022-03-11T23:27:27.000Z
|
testproject/tests.py
|
nim65s/cagnotte_solidaire
|
60df468093dadfab67a6a44d77549d13a2b79616
|
[
"BSD-2-Clause"
] | null | null | null |
"""Main test module for Cagnotte Solidaire."""
from datetime import date
from cagnottesolidaire.models import Cagnotte, Demande, Offre, Proposition
from django.contrib.auth.models import User
from django.core import mail
from django.test import TestCase
from django.urls import reverse
def strpdate(s: str) -> date:
"""Parse a date. Nobody did that before."""
d, m, y = [int(i) for i in s.split('/')]
return date(y, m, d)
class TestCagnotte(TestCase):
"""Mait test class for Cagnotte Solidaire."""
def setUp(self):
"""Create 4 guys for all tests."""
for guy in 'abcs':
User.objects.create_user(guy, email=f'{guy}@example.org', password=guy, is_staff=guy == 's')
def test_cagnotte(self):
"""Perform tests on the Cagnotte model."""
self.assertEqual(Cagnotte.objects.count(), 0)
self.assertEqual(
self.client.get(reverse('cagnottesolidaire:cagnotte', kwargs={'slug': 'first'})).status_code, 404)
self.assertEqual(self.client.get(reverse('cagnottesolidaire:cagnotte_list')).status_code, 200)
self.assertEqual(self.client.get(reverse('cagnottesolidaire:cagnotte_create')).status_code, 302)
self.client.login(username='a', password='a')
self.assertEqual(self.client.get(reverse('cagnottesolidaire:cagnotte_create')).status_code, 200)
cagnotte_data = {
'name': 'first',
'objectif': 'nothing',
'finances': 42,
'fin_depot': '31/12/2016',
'fin_achat': '30/12/2017'
}
# fin_depot < today
self.assertLess(strpdate(cagnotte_data['fin_depot']), date.today())
r = self.client.post(reverse('cagnottesolidaire:cagnotte_create'), cagnotte_data)
self.assertEqual(Cagnotte.objects.count(), 0)
self.assertEqual(r.status_code, 200)
# fin_achat < fin_depot
cagnotte_data['fin_depot'] = '31/12/2021'
self.assertLess(strpdate(cagnotte_data['fin_achat']), strpdate(cagnotte_data['fin_depot']))
r = self.client.post(reverse('cagnottesolidaire:cagnotte_create'), cagnotte_data)
self.assertEqual(Cagnotte.objects.count(), 0)
self.assertEqual(r.status_code, 200)
# OK
cagnotte_data['fin_achat'] = '31/12/2022'
self.assertLess(date.today(), strpdate(cagnotte_data['fin_depot']))
self.assertLess(strpdate(cagnotte_data['fin_depot']), strpdate(cagnotte_data['fin_achat']))
r = self.client.post(reverse('cagnottesolidaire:cagnotte_create'), cagnotte_data)
self.assertEqual(Cagnotte.objects.count(), 1)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.url, reverse('cagnottesolidaire:cagnotte', kwargs={'slug': 'first'}))
self.assertEqual(self.client.get(reverse('cagnottesolidaire:cagnotte_list')).status_code, 200)
self.assertEqual(
self.client.get(reverse('cagnottesolidaire:cagnotte', kwargs={'slug': 'first'})).status_code, 200)
def test_proposition(self):
"""Perform tests on the Proposition model."""
guy = User.objects.first()
self.assertEqual(Proposition.objects.count(), 0)
self.assertEqual(Cagnotte.objects.count(), 0)
proj = Cagnotte.objects.create(name='second',
responsable=guy,
objectif='nothing',
finances=43,
fin_depot=date(2017, 12, 31),
fin_achat=date(2018, 12, 31))
projd = {'slug': proj.slug}
propd = {'p_slug': proj.slug, 'slug': 'propo'}
self.assertEqual(self.client.get(reverse('cagnottesolidaire:proposition', kwargs=propd)).status_code, 404)
self.assertEqual(self.client.get(reverse('cagnottesolidaire:cagnotte', kwargs=projd)).status_code, 200)
self.assertEqual(
self.client.get(reverse('cagnottesolidaire:proposition_create', kwargs=projd)).status_code, 302)
self.client.login(username='a', password='a')
self.assertEqual(
self.client.get(reverse('cagnottesolidaire:proposition_create', kwargs=projd)).status_code, 200)
proposition_data = {'name': 'Propo', 'description': 'blah blah', 'prix': '-42', 'beneficiaires': '1'}
# prix < 0
r = self.client.post(reverse('cagnottesolidaire:proposition_create', kwargs=projd), proposition_data)
self.assertEqual(Proposition.objects.count(), 0)
self.assertEqual(r.status_code, 200)
proposition_data['prix'] = '42'
r = self.client.post(reverse('cagnottesolidaire:proposition_create', kwargs=projd), proposition_data)
self.assertEqual(Proposition.objects.count(), 1)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.url, reverse('cagnottesolidaire:proposition', kwargs=propd))
self.assertEqual(self.client.get(reverse('cagnottesolidaire:cagnotte', kwargs=projd)).status_code, 200)
self.assertEqual(self.client.get(reverse('cagnottesolidaire:proposition', kwargs=propd)).status_code, 200)
def test_offre(self):
"""Perform tests on the Offre model."""
guy = User.objects.first()
proj = Cagnotte.objects.create(name='third',
responsable=guy,
objectif='nothing',
finances=43,
fin_depot=date(2017, 12, 31),
fin_achat=date(2021, 12, 31))
prop = Proposition.objects.create(name='Pipo', description='nope', prix=20, cagnotte=proj, responsable=guy)
propd = {'p_slug': proj.slug, 'slug': prop.slug}
self.assertEqual(self.client.get(reverse('cagnottesolidaire:proposition', kwargs=propd)).status_code, 200)
self.assertEqual(Offre.objects.count(), 0)
self.assertEqual(self.client.get(reverse('cagnottesolidaire:offre_create', kwargs=propd)).status_code, 302)
self.client.login(username='a', password='a')
self.assertEqual(self.client.get(reverse('cagnottesolidaire:offre_create', kwargs=propd)).status_code, 200)
# min price is 20, so trying 18 should return an error
r = self.client.post(reverse('cagnottesolidaire:offre_create', kwargs=propd), {'prix': '18'})
self.assertEqual(Offre.objects.count(), 0)
self.assertEqual(r.status_code, 200)
self.assertEqual(len(mail.outbox), 0)
r = self.client.post(reverse('cagnottesolidaire:offre_create', kwargs=propd), {'prix': '22'})
self.assertEqual(Offre.objects.count(), 1)
self.assertEqual(r.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(r.url, reverse('cagnottesolidaire:proposition', kwargs=propd))
self.assertEqual(self.client.get(reverse('cagnottesolidaire:proposition', kwargs=propd)).status_code, 200)
# offre_detail
url = reverse('cagnottesolidaire:offre', kwargs={'pk': Offre.objects.first().pk})
self.assertEqual(self.client.get(url).status_code, 200)
self.client.login(username='s', password='s')
self.assertEqual(self.client.get(url).status_code, 200)
self.client.login(username='b', password='b')
self.assertIn(self.client.get(url).status_code, [302, 403])
self.client.logout()
self.assertEqual(self.client.get(url).status_code, 302)
def test_lists(self):
"""Check list views."""
self.assertEqual(self.client.get(reverse('cagnottesolidaire:offre_list')).status_code, 302)
self.assertEqual(self.client.get(reverse('cagnottesolidaire:proposition_list')).status_code, 302)
self.client.login(username='a', password='a')
self.assertEqual(self.client.get(reverse('cagnottesolidaire:offre_list')).status_code, 200)
self.assertEqual(self.client.get(reverse('cagnottesolidaire:proposition_list')).status_code, 200)
guy = User.objects.first()
proj = Cagnotte.objects.create(name='quatre',
responsable=guy,
objectif='nothing',
finances=43,
fin_depot=date(2017, 12, 31),
fin_achat=date(2018, 12, 31))
prop = Proposition.objects.create(name='cinq', description='nope', prix=20, cagnotte=proj, responsable=guy)
offr = Offre.objects.create(proposition=prop, beneficiaire=guy, prix=3)
self.assertEqual(self.client.get(reverse('cagnottesolidaire:offre_list')).status_code, 200)
self.assertEqual(self.client.get(reverse('cagnottesolidaire:proposition_list')).status_code, 200)
self.assertEqual(str(offr), 'offre de a sur cinq (cagnotte quatre)')
def test_fbv(self):
"""Test the fuction based views in Cagnotte Solidaire."""
a, b, c, s = User.objects.all()
proj = Cagnotte.objects.create(name='fourth',
responsable=a,
objectif='nothing',
finances=43,
fin_depot=date(2017, 12, 31),
fin_achat=date(2018, 12, 31))
prop = Proposition.objects.create(name='Pipo', description='nope', prix=20, cagnotte=proj, responsable=b)
offr = Offre.objects.create(proposition=prop, prix=22, beneficiaire=c)
ok, ko, paye = [
reverse(f'cagnottesolidaire:offre_{view}', kwargs={'pk': offr.pk}) for view in ['ok', 'ko', 'paye']
]
# Must be logged in
self.assertEqual(self.client.get(ok).url.split('?')[0], reverse('login'))
self.assertEqual(self.client.get(ko).url.split('?')[0], reverse('login'))
self.assertEqual(self.client.get(paye).url.split('?')[0], reverse('login'))
# Une offre non validée ne peut pas être payée
self.client.login(username='a', password='a')
self.assertEqual(self.client.get(paye).status_code, 403)
# Seul b peut accepter ou refuser
self.assertEqual(self.client.get(ok).status_code, 403)
self.assertEqual(self.client.get(ko).status_code, 403)
self.assertEqual(Offre.objects.first().valide, None)
self.client.login(username='b', password='b')
self.assertEqual(self.client.get(ko).status_code, 302)
self.assertEqual(Offre.objects.first().valide, False)
self.assertEqual(self.client.get(ok).status_code, 302)
self.assertEqual(Offre.objects.first().valide, True)
# Une fois que c’est accepté, seul a peut encaisser
self.assertEqual(self.client.get(paye).status_code, 403)
self.assertEqual(Offre.objects.first().paye, False)
self.client.login(username='a', password='a')
self.assertEqual(self.client.get(paye).status_code, 302)
self.assertEqual(Offre.objects.first().paye, True)
def test_offrable(self):
"""Test something, I don't know what right now."""
a, b, c, s = User.objects.all()
proj = Cagnotte.objects.create(name='fifth',
responsable=a,
objectif='nothing',
finances=43,
fin_depot=date(2017, 12, 31),
fin_achat=date(2021, 12, 31))
prop = Proposition.objects.create(name='Pipo',
description='nope',
prix=20,
cagnotte=proj,
responsable=b,
beneficiaires=2)
self.client.login(username='c', password='c')
self.assertEqual(Offre.objects.count(), 0)
url = reverse('cagnottesolidaire:offre_create', kwargs={'p_slug': proj.slug, 'slug': prop.slug})
self.assertEqual(self.client.post(url, {'prix': '21'}).status_code, 302)
self.assertEqual(Offre.objects.count(), 1)
self.assertEqual(self.client.post(url, {'prix': '21'}).status_code, 302)
self.assertEqual(Offre.objects.count(), 2)
self.assertEqual(self.client.post(url, {'prix': '21'}).status_code, 302)
self.assertEqual(Offre.objects.count(), 3)
# old
proj = Cagnotte.objects.create(name='sixth',
responsable=a,
objectif='nothing',
finances=43,
fin_depot=date(2014, 12, 31),
fin_achat=date(2015, 12, 31))
prop = Proposition.objects.create(name='popo',
description='nope',
prix=20,
cagnotte=proj,
responsable=b,
beneficiaires=2)
self.client.login(username='c', password='c')
self.assertEqual(Offre.objects.count(), 3)
url = reverse('cagnottesolidaire:offre_create', kwargs={'p_slug': proj.slug, 'slug': prop.slug})
self.assertEqual(self.client.post(url, {'prix': '21'}).status_code, 403)
self.assertEqual(Offre.objects.count(), 3)
def test_demande(self):
"""Perform tests on the Demande model."""
guy = User.objects.first()
self.assertEqual(Demande.objects.count(), 0)
proj = Cagnotte.objects.create(name='last',
responsable=guy,
objectif='nothing',
finances=43,
fin_depot=date(2017, 12, 31),
fin_achat=date(2018, 12, 31))
data = {'slug': proj.slug}
# Not logged in
self.assertEqual(self.client.get(reverse('cagnottesolidaire:demande_create', kwargs=data)).status_code, 302)
self.client.login(username='c', password='c')
self.assertEqual(self.client.get(reverse('cagnottesolidaire:demande_create', kwargs=data)).status_code, 200)
demande_data = {'description': 'cours de massage'}
r = self.client.post(reverse('cagnottesolidaire:demande_create', kwargs=data), demande_data)
self.assertEqual(Demande.objects.count(), 1)
self.assertEqual(r.status_code, 302)
self.assertEqual(self.client.get(reverse('cagnottesolidaire:cagnotte', kwargs=data)).status_code, 200)
delete_url = reverse('cagnottesolidaire:demande_delete', kwargs={'pk': Demande.objects.first().pk})
self.assertEqual(self.client.get(delete_url).status_code, 200)
self.client.post(delete_url)
self.assertEqual(Demande.objects.count(), 0)
| 57.254753
| 116
| 0.596162
|
fb7787e2fb06d1815f81aedc052cb891c6a1bdfd
| 2,130
|
py
|
Python
|
Exscript/protocols/drivers/one_os.py
|
timgates42/exscript
|
37f4f632352b934a2ff8fdf94ee26663e8181ead
|
[
"MIT"
] | 226
|
2015-01-20T19:59:06.000Z
|
2022-01-02T11:13:01.000Z
|
Exscript/protocols/drivers/one_os.py
|
timgates42/exscript
|
37f4f632352b934a2ff8fdf94ee26663e8181ead
|
[
"MIT"
] | 155
|
2015-01-02T07:56:27.000Z
|
2022-01-09T20:56:19.000Z
|
Exscript/protocols/drivers/one_os.py
|
timgates42/exscript
|
37f4f632352b934a2ff8fdf94ee26663e8181ead
|
[
"MIT"
] | 114
|
2015-01-03T11:48:17.000Z
|
2022-01-26T02:50:43.000Z
|
#
# Copyright (C) 2010-2017 Samuel Abels
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
A driver for OneOS (OneAccess).
"""
import re
from Exscript.protocols.drivers.driver import Driver
_user_re = [re.compile(r'[\r\n]Username: ?$')]
_password_re = [re.compile(r'[\r\n]Password: ?$')]
_first_prompt_re = re.compile(r'\r?\n\r?\n[\-\w+\.]+[>#]$')
_prompt_re = [re.compile(r'[\r\n][\-\w+\.]+(?:\([^\)]+\))?[>#] ?$')]
class OneOSDriver(Driver):
def __init__(self):
Driver.__init__(self, 'one_os')
self.user_re = _user_re
self.password_re = _password_re
self.prompt_re = _prompt_re
def check_head_for_os(self, string):
if _first_prompt_re.search(string):
return 40
return 0
def init_terminal(self, conn):
conn.execute('term len 0')
# TERMINAL WIDTH ONEOS5
try:
conn.execute('stty columns 255')
except Exception:
pass
def auto_authorize(self, conn, account, flush, bailout):
conn.send('enable\r')
conn.app_authorize(account, flush, bailout)
| 36.101695
| 72
| 0.69108
|
3bdad9e4c71b18ad3bcb160a0b3845a60e46a0db
| 3,248
|
py
|
Python
|
Leetcode-cn/74.搜索二维矩阵.py
|
joey66666/Codeyard
|
08fc599baf1d99e39f878386124af854006a3602
|
[
"MIT"
] | null | null | null |
Leetcode-cn/74.搜索二维矩阵.py
|
joey66666/Codeyard
|
08fc599baf1d99e39f878386124af854006a3602
|
[
"MIT"
] | 3
|
2020-08-11T10:18:23.000Z
|
2021-05-18T15:25:42.000Z
|
Leetcode-cn/74.搜索二维矩阵.py
|
joey66666/Codeyard
|
08fc599baf1d99e39f878386124af854006a3602
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode.cn id=74 lang=python3
#
# [74] 搜索二维矩阵
#
# https://leetcode-cn.com/problems/search-a-2d-matrix/description/
#
# algorithms
# Medium (41.18%)
# Likes: 401
# Dislikes: 0
# Total Accepted: 123.9K
# Total Submissions: 281.6K
# Testcase Example: '[[1,3,5,7],[10,11,16,20],[23,30,34,60]]\n3'
#
# 编写一个高效的算法来判断 m x n 矩阵中,是否存在一个目标值。该矩阵具有如下特性:
#
#
# 每行中的整数从左到右按升序排列。
# 每行的第一个整数大于前一行的最后一个整数。
#
#
#
#
# 示例 1:
#
#
# 输入:matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 3
# 输出:true
#
#
# 示例 2:
#
#
# 输入:matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]], target = 13
# 输出:false
#
#
#
#
# 提示:
#
#
# m == matrix.length
# n == matrix[i].length
# 1
# -10^4
#
#
#
# @lc code=start
# Solution1, 两次查找,先确定是否在这行左右区间,再从左往右扫描, Time: O(m + n), Space: O(1), Runtime: 83%
# - 从右上往左下角找,更显著, Runtime: 83%
# class Solution:
# def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
# rows, cols = len(matrix), len(matrix[0])
# if rows == 0 or cols == 0:
# return False
# for i in range(rows):
# left, right = 0, cols - 1
# if (target >= matrix[i][left]) and (target <= matrix[i][right]):
# while right >= left:
# if target == matrix[i][right]:
# return True
# else:
# right -= 1
# return False
# Solution2, 一次二分,用二分查找改进Solution1中单向遍历, Time: O(n + logM), Space: O(1), Runtime: 94%
# class Solution:
# def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
# rows, cols = len(matrix), len(matrix[0])
# if rows == 0 or cols == 0:
# return False
# for i in range(rows):
# if (target >= matrix[i][0]) and (target <= matrix[i][-1]):
# left, right = 0, cols - 1
# while left <= right:
# mid = int(left + (right - left) / 2)
# if target < matrix[i][mid]:
# right = mid - 1
# elif target > matrix[i][mid]:
# left = mid + 1
# else:
# return True
# return False
# return False
# Solution3, 两次二分,用二分查找改进Solution2中第一次单向遍历, Time: O(logN + logM), Space: O(1), Runtime: 94%
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rows, cols = len(matrix), len(matrix[0])
if rows == 0 or cols == 0:
return False
up, down, middle = -1, rows - 1, 0
while up < down:
middle = int(up + (down - up + 1) / 2)
if target >= matrix[middle][0]:
up = middle
else:
down = middle - 1
if (target >= matrix[up][0]) and (target <= matrix[up][-1]):
left, right = 0, cols - 1
while left <= right:
mid = int(left + (right - left) / 2)
if target < matrix[up][mid]:
right = mid - 1
elif target > matrix[up][mid]:
left = mid + 1
else:
return True
return False
return False
# @lc code=end
| 27.294118
| 91
| 0.47814
|
ca56dff3a343009c12c245791b2ce1fdecc1b392
| 1,029
|
py
|
Python
|
tasks/password_validator/05_Viktoriya_Vasileva/password.py
|
valentinvarbanov/software_engineering_2021
|
33ece7d1e4889840621626e30f975d6cfd370b38
|
[
"MIT"
] | 7
|
2021-10-05T14:54:55.000Z
|
2022-02-16T06:07:12.000Z
|
tasks/password_validator/05_Viktoriya_Vasileva/password.py
|
valentinvarbanov/software_engineering_2021
|
33ece7d1e4889840621626e30f975d6cfd370b38
|
[
"MIT"
] | 2
|
2021-12-04T10:49:46.000Z
|
2022-02-28T06:09:06.000Z
|
tasks/password_validator/05_Viktoriya_Vasileva/password.py
|
valentinvarbanov/software_engineering_2021
|
33ece7d1e4889840621626e30f975d6cfd370b38
|
[
"MIT"
] | null | null | null |
import requests
def weak_passwords(password):
weak = ['12345', 'qwerty', 'password', 'asdf']
if password in weak:
return True
return False
def letters_password(password):
return any(c.isalpha() for c in password)
def special_letters_password(password):
special_characters = "?!*%$@" #""!@#$%^&*()-+?_=,<>/""
if any(c in special_characters for c in password):
return True
return False
def size_password(password):
if len(password) > 8 :
return True
return False
def check_password(password):
if weak_passwords(password):
return 0.0
complexity = 0.0
if letters_password(password):
complexity += 0.25
if special_letters_password(password):
complexity += 0.25
if size_password(password):
complexity += 0.5
return complexity
response = requests.get('https://passwordinator.herokuapp.com/generate').json()
# print(response)
password = response['data']
print(password)
print(check_password(password))
| 19.415094
| 79
| 0.656948
|
128b652efdd4f9a115ef80c22578f784dcf1ebfe
| 5,520
|
py
|
Python
|
Code/automation/SimTest.py
|
oahul14/ShockNet
|
cbc72d1a4f2d5000b9374a4e929282034171491a
|
[
"MIT"
] | 4
|
2020-10-03T07:58:10.000Z
|
2021-12-13T13:43:02.000Z
|
Code/automation/SimTest.py
|
oahul14/ShockNet
|
cbc72d1a4f2d5000b9374a4e929282034171491a
|
[
"MIT"
] | 4
|
2021-06-08T22:20:50.000Z
|
2022-03-12T00:46:57.000Z
|
Code/automation/SimTest.py
|
oahul14/ShockNet
|
cbc72d1a4f2d5000b9374a4e929282034171491a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
import random
class GeoDomain(object):
def __init__(self):
"""
Initialise a GeoDomain of either a polygon or a cube.
Defined by the original coordinates and the sides of the domain
Parameters
----------
gtype : List of str
list of geometry types: "2d" or "3d"
sides : List of float
Length of sides of the geometry: x, y, (z)
org_corr : tuple, optional
Coordinates as a tuple. Length of 2 for 2d and 3 for 3d
Returns
-------
None.
"""
self.body = Model.Geometry.Children[0].Children[0]
self.bodyWrapper = self.body.GetGeoBody()
self.edges = self.bodyWrapper.Edges
self.edge_lengths = [edge.Length for edge in self.edges]
self.nodes = 0
super(GeoDomain, self).__init__()
def meshing(self, size):
"""
Define mesh size and generate meshed domain
Parameters
----------
size :
Indicating the element size
Returns
-------
None.
"""
Model.Mesh.ElementSize = Quantity(str(size)+"[m]")
Model.Mesh.GenerateMesh()
self.nodes = Model.Mesh.Nodes
class PlaneDomain(GeoDomain):
def __init__(self):
"""
2D domain
Returns
-------
None.
"""
super(PlaneDomain, self).__init__()
class SolidDomain(GeoDomain):
def __init__(self):
"""
3D domain
Returns
-------
None.
"""
super(SolidDomain, self).__init__()
self.faces = self.bodyWrapper.Faces
class SimTest(object):
def __init__(self, geo_domain):
"""
Parameters
----------
geo_domain : TYPE
DESCRIPTION.
Returns
-------
None.
"""
self.geo = geo_domain
self.body = geo_domain.body
self.bodyWrapper = geo_domain.bodyWrapper
self.nodes = None
self.curr_timing = 0
self.timings = []
self.nodelist = []
super(SimTest, self).__init__()
class Test2d(SimTest):
def __init__(self, geo_domain, sizes):
self.sizes = sizes
super(Test2d, self).__init__(geo_domain)
def random_2D_simple_simulation(self):
"""
Run a simple 2D simulation by creating a pressure (randomised) and a fixed support
Returns
-------
None.
"""
self.body.Thickness = Quantity("0.00000000001 [m]")
p_edge = self.geo.edges[1]
s_edge = self.geo.edges[3]
sln = ExtAPI.SelectionManager.CreateSelectionInfo(SelectionTypeEnum.GeometryEntities)
p_sln = sln
p_sln.Entities = [p_edge]
pressure = Model.Analyses[0].AddPressure()
pressure.Location = p_sln
# pressure random from 0 to 10 (float -> string)
rand_p = str(random.random() * random.randint(1, 10))
pressure.Magnitude.Output.DiscreteValues = [Quantity(rand_p+"[Pa]")]
support = Model.Analyses[0].AddFixedSupport()
s_sln = sln
s_sln.Entities = [s_edge]
support.Location = s_sln
Model.Analyses[0].Solution.AddEquivalentStress()
Model.Solve()
def time_2d(self):
for size in self.sizes:
self.geo.meshing(size)
start = time.time()
for i in range(10):
self.random_2D_simple_simulation()
end = time.time()
self.curr_timing = (end - start)/5
print("Node NO.: %s, Timing: %s" % (self.geo.nodes, self.curr_timing))
self.timings.append(self.curr_timing)
self.nodelist.append(self.geo.nodes)
class Test3d(SimTest):
def __init__(self, geo_domain, sizes):
self.sizes = sizes
super(Test3d, self).__init__(geo_domain)
def random_3D_simple_simulation(self):
"""
Run a simple 2D simulation by creating a pressure (randomised) and a fixed support
Returns
-------
None.
"""
p_face = self.geo.faces[1]
s_face = self.geo.faces[3]
sln = ExtAPI.SelectionManager.CreateSelectionInfo(SelectionTypeEnum.GeometryEntities)
p_sln = sln
p_sln.Entities = [p_face]
pressure = Model.Analyses[0].AddPressure()
pressure.Location = p_sln
# pressure random from 0 to 10 (float -> string)
rand_p = str(random.random() * random.randint(1, 10))
pressure.Magnitude.Output.DiscreteValues = [Quantity(rand_p+"[Pa]")]
support = Model.Analyses[0].AddFixedSupport()
s_sln = sln
s_sln.Entities = [s_face]
support.Location = s_sln
Model.Analyses[0].Solution.AddEquivalentStress()
Model.Solve()
def time_3d(self):
for size in self.sizes:
self.geo.meshing(size)
start = time.time()
for i in range(1):
self.random_3D_simple_simulation()
end = time.time()
self.curr_timing = (end - start)/5
print("Node NO.: %s, Timing: %s" % (self.geo.nodes, self.curr_timing))
self.timings.append(self.curr_timing)
self.nodelist.append(self.geo.nodes)
solid = SolidDomain()
sizes = [4e-3]#, 2e-3, 1e-3, 8e-4, 6e-4]
test = Test3d(solid, sizes)
test.time_3d()
| 30
| 93
| 0.555254
|
0d9be5504af9280485e42f63af5103fb494c3364
| 3,611
|
py
|
Python
|
src/server/lis.py
|
eugeneai/hw-cardio-nec
|
f145c4f9a7eda0854e1ae6dd6a1cf19e2b8d2431
|
[
"MIT"
] | null | null | null |
src/server/lis.py
|
eugeneai/hw-cardio-nec
|
f145c4f9a7eda0854e1ae6dd6a1cf19e2b8d2431
|
[
"MIT"
] | null | null | null |
src/server/lis.py
|
eugeneai/hw-cardio-nec
|
f145c4f9a7eda0854e1ae6dd6a1cf19e2b8d2431
|
[
"MIT"
] | 1
|
2021-12-15T23:06:28.000Z
|
2021-12-15T23:06:28.000Z
|
import requests as req
import pics
# SERVER = "http://192.168.22.4:5000/"
SERVER = "http://192.168.0.114:5000/"
H3 = SERVER+'h3/'
SPI = H3+'spi'
FORTH = H3+'forth'
SCREND = 9
class Colors():
pass
class Base():
def __init__(self, endpoint):
self.endpoint = endpoint
def post(self, abytesarray=None, typ='str', json=None):
if abytesarray is not None:
json = {'type': typ, 'message': abytesarray}
elif json is not None:
pass
else:
raise ValueError('wrong parameter combination')
resp = req.post(self.endpoint, json=json)
if resp.ok:
return resp.json()
else:
resp.raise_for_status()
class CBuf(Base):
def sendstr(self, msg):
self.post(msg)
def sendts(self, abytesarray):
self.post(abytesarray, typ='bytes')
CBUF = CBuf(SPI)
CYAN = (0, 255, 255)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 0)
class TFTInterface(Base):
def __init__(self, cbuf, vno=6):
self.vno = vno
self.cbuf = cbuf
super().__init__(FORTH)
# R, G, B
self.bg = (0, 0, 0)
fg = self.fg = Colors()
fg.basic = YELLOW
fg.patient = CYAN
# fg.condition = (0, 255, 255)
fg.condition = (0, 0, 255)
fg.lead = GREEN
def setfg(self, color):
(r, g, b) = color
self.cmd('{} {} {} tft-f!'.format(255-b, 255-g, 255-r))
def setbg(self, color):
(r, g, b) = color
self.cmd('{} {} {} tft-b!'.format(255-b, 255-g, 255-r))
def cmd(self, cmd):
assert (self.post(json={'command': cmd})['rc'] == 'OK')
def locate(self, x, y):
self.cmd('{} {} tft-locate'.format(y, x))
def type(self, s):
n = len(s)
self.cbuf.sendstr(s)
self.cmd('CBUF {} tft-type'.format(n))
def patient(self, name, id):
self.locate(0, 0)
self.setfg(self.fg.basic)
self.type('Patient:')
self.setfg(self.fg.patient)
self.type(name + ' ')
self.setfg(self.fg.basic)
self.type('ID:')
self.setfg(self.fg.patient)
self.type(str(id))
def condition(self, date, time, doctor):
self.locate(0, SCREND)
self.setfg(self.fg.basic)
self.type('Date:')
self.setfg(self.fg.condition)
self.type(str(date)+' ')
self.setfg(self.fg.basic)
self.type('Doctor:')
self.setfg(self.fg.condition)
self.type(doctor)
self.locate(0, SCREND-1)
self.setfg(self.fg.basic)
self.type('Time:')
self.setfg(self.fg.condition)
self.type(str(time))
def lead(self, n):
self.locate(0, n+1)
self.setfg(self.fg.basic)
self.type('V{}:'.format(n+1))
self.setfg(self.fg.lead)
self.graphics(list(pics.as_binary(n)))
def graphics(self, abytesarray):
self.cbuf.sendts(abytesarray)
self.cmd('CBUF {} tft-buf-draw'.format(len(abytesarray) // 4))
def draw(self, name, id, date, time, doctor):
self.setbg(self.bg)
self.patient(name, id)
self.condition(date, time, doctor)
for n in range(self.vno):
self.lead(n)
self.setfg(self.fg.basic)
def clear(self):
self.setbg(self.bg)
self.cmd('tft-clear')
if __name__ == '__main__':
# CBUF.sendstr('Linux & Forth Rule!')
# arr = pics.as_binary(0)
# CBUF.sendts(list(arr))
scr = TFTInterface(CBUF, 6)
# scr.clear()
scr.draw('Ivanov', 'IKT-181221', '2021-05-16', '13:15 GMT+8', 'Petrov')
| 24.903448
| 76
| 0.544448
|
332db8df4876467266d5f5df0381f8cd61d37fb5
| 955
|
py
|
Python
|
setup.py
|
yabirgb/simobility
|
aca8245d582a94a16c6bd0eed98344974f6dafb4
|
[
"MIT"
] | null | null | null |
setup.py
|
yabirgb/simobility
|
aca8245d582a94a16c6bd0eed98344974f6dafb4
|
[
"MIT"
] | null | null | null |
setup.py
|
yabirgb/simobility
|
aca8245d582a94a16c6bd0eed98344974f6dafb4
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from simobility import __version__
setup(
name="simobility",
description="Lightweight mobility simulation for quick algorithm prototyping",
author="Oleksandr Lysenko",
author_email="sashkolysenko@gmail.com",
version=__version__,
packages=[
"simobility",
"simobility.core",
"simobility.routers",
"simobility.simulator",
],
python_requires=">=3.7.*",
install_requires=[
"pandas>=0.24.1",
"scipy>=1.2.1",
"haversine",
"geojson",
"transitions",
"geopandas",
"shapely",
"requests",
"pyarrow",
"pyyaml",
"h3"
],
extras_require={
"dev": [
"ipdb",
"jupyter",
"jupyterlab",
"flake8",
"nose",
"coverage",
"pytest",
"pip-tools",
]
},
zip_safe=False,
)
| 21.704545
| 82
| 0.506806
|
5118380035e938189b7eb20f0658afb95a85a391
| 1,390
|
py
|
Python
|
samples/use_cnbc_client.py
|
areed1192/finance-news-aggregator
|
6d66f44292565cf4ccfeab0983fce1dab90fd1ae
|
[
"MIT"
] | 49
|
2020-07-19T17:53:41.000Z
|
2022-03-27T10:36:36.000Z
|
samples/use_cnbc_client.py
|
webclinic017/finance-news-aggregator
|
6d66f44292565cf4ccfeab0983fce1dab90fd1ae
|
[
"MIT"
] | 1
|
2021-09-12T15:06:29.000Z
|
2021-09-12T15:06:29.000Z
|
samples/use_cnbc_client.py
|
webclinic017/finance-news-aggregator
|
6d66f44292565cf4ccfeab0983fce1dab90fd1ae
|
[
"MIT"
] | 18
|
2020-08-06T07:39:17.000Z
|
2022-01-16T14:59:28.000Z
|
from pprint import pprint
from finnews.client import News
from finnews.news_enum import cnbc_top_news
from finnews.news_enum import cnbc_tv_programs_asia
# Create a new instance of the News Client.
news_client = News()
# Grab the CNBC News Client.
cnbc_news_client = news_client.cnbc
# Grab the top news.
cbnc_top_news = cnbc_news_client.news_feed(
topic='top_news'
)
# Grab the top news, using enums.
cnbc_real_estate_news = cnbc_news_client.news_feed(
topic=cnbc_top_news.REAL_ESTATE
)
# Grab the investing news, from the Investment Feed.
cnbc_investing_news = cnbc_news_client.investing_feeds(
topic='investing'
)
# Grab the blog news, from the Blog Feed.
cnbc_blog_news = cnbc_news_client.blogs(
topic='charting_asia'
)
# Grab the video and tv news, from the Video & TV Feed.
cnbc_tv_and_video_news = cnbc_news_client.videos_and_tv(
topic='top_video'
)
# Grab the video and tv news, from the Europe News Feed.
cnbc_tv_europe_news = cnbc_news_client.tv_programs_europe(
topic='capital_connection'
)
# Grab the video and tv news, from the Asia News Feed.
cnbc_tv_asia_news = cnbc_news_client.tv_programs_asia(
topic=cnbc_tv_programs_asia.SQUAWK_BOX_ASIA
)
# Grab all the news feeds.
cnbc_all_news_feeds = cnbc_news_client.all_feeds()
# Save the data.
news_client.save_to_file(
content=cnbc_all_news_feeds,
file_name='cnbc_all_news_feeds'
)
| 24.821429
| 58
| 0.784173
|
fa1405bc4069e2aa4abb6a94195ea2230f27586a
| 91
|
py
|
Python
|
dnsQuery/apps.py
|
valarpirai/docker-demo
|
26e1238a4fdf9086cc518ac99d38821075dd2aa5
|
[
"MIT"
] | 1
|
2019-11-16T10:32:20.000Z
|
2019-11-16T10:32:20.000Z
|
dnsQuery/apps.py
|
valarpirai/docker-demo
|
26e1238a4fdf9086cc518ac99d38821075dd2aa5
|
[
"MIT"
] | 6
|
2020-03-24T15:30:02.000Z
|
2021-06-10T18:38:20.000Z
|
dnsQuery/apps.py
|
valarpirai/docker-demo
|
26e1238a4fdf9086cc518ac99d38821075dd2aa5
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class DnsqueryConfig(AppConfig):
name = 'dnsQuery'
| 15.166667
| 33
| 0.758242
|
7d82acc2b408bd1be4320c40b23cf2b8cf78a9c4
| 699
|
py
|
Python
|
oops_fhir/r4/value_set/v3_query_parameter_value.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/v3_query_parameter_value.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/v3_query_parameter_value.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_query_parameter_value import (
v3QueryParameterValue as v3QueryParameterValue_,
)
__all__ = ["v3QueryParameterValue"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class v3QueryParameterValue(v3QueryParameterValue_):
"""
v3 Code System QueryParameterValue
The domain of coded values used as parameters within QueryByParameter
queries.
Status: active - Version: 2018-08-12
http://terminology.hl7.org/ValueSet/v3-QueryParameterValue
"""
class Meta:
resource = _resource
| 21.84375
| 74
| 0.76681
|
f7e701a9e7fc983a8db743b79b3c408007f82241
| 2,865
|
py
|
Python
|
vision_transform_codes/tests/sparse_coding_2.py
|
spencerkent/vision-transform-codes
|
63258ce698e436ee3ce29def75c89337759fb98b
|
[
"BSD-3-Clause"
] | null | null | null |
vision_transform_codes/tests/sparse_coding_2.py
|
spencerkent/vision-transform-codes
|
63258ce698e436ee3ce29def75c89337759fb98b
|
[
"BSD-3-Clause"
] | null | null | null |
vision_transform_codes/tests/sparse_coding_2.py
|
spencerkent/vision-transform-codes
|
63258ce698e436ee3ce29def75c89337759fb98b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test: Sparse coding, fully connected, fista, cheap qaudratic descent
"""
import _set_the_path
import math
import pickle
import torch
from training.sparse_coding import train_dictionary
from utils.dataset_generation import OneOutputDset
from utils import defaults
RUN_IDENTIFIER = '_testing_sc_2'
LOGS_STORED_HERE = defaults.logging_directory
TRAINING_SET_SIZE = 10000
VALIDATION_SET_SIZE = 5000
BATCH_SIZE = 1000
PATCH_HEIGHT = 16
PATCH_WIDTH = 16
CODE_SIZE = 1 * PATCH_HEIGHT*PATCH_WIDTH # critically sampled
NUM_EPOCHS = 1
iters_per_epoch = int(math.ceil(TRAINING_SET_SIZE / BATCH_SIZE))
trn_val_dsets = pickle.load(open(defaults.dataset_directory /
'vtc_testing/field_white_16x16.p', 'rb'))
SC_PARAMS = {
'mode': 'fully-connected',
'num_epochs': NUM_EPOCHS,
'code_inference_algorithm': 'fista',
'inference_param_schedule': {
0: {'sparsity_weight': 0.008, 'num_iters': 5}},
'dictionary_update_algorithm': 'sc_cheap_quadratic_descent',
'dict_update_param_schedule': {
0: {'stepsize': 0.1, 'num_iters': 1},
5*iters_per_epoch: {'stepsize': 0.05, 'num_iters': 1}},
# write various tensorboard logs on the following schedule:
'training_visualization_schedule': set([0, 10, 500]),
'reshaped_kernel_size': (PATCH_HEIGHT, PATCH_WIDTH),
# actually store all logs here:
'logging_folder_fullpath': LOGS_STORED_HERE / RUN_IDENTIFIER,
# checkpoint the dictionary on this interval
'checkpoint_schedule': set([iters_per_epoch,
(NUM_EPOCHS*iters_per_epoch)-1])}
SC_PARAMS['training_visualization_schedule'].update(set(
[iters_per_epoch*x for x in range(1, NUM_EPOCHS)]))
# Now initialize model and begin training
torch_device = torch.device('cuda:1')
# otherwise can put on 'cuda:0' or 'cpu'
# send ALL image patches to the GPU and wrap in a simple dataloader
image_patches_gpu_training = torch.utils.data.DataLoader(
OneOutputDset(torch.from_numpy(
trn_val_dsets['training']['patches']).to(torch_device)),
batch_size=BATCH_SIZE, shuffle=True)
image_patches_gpu_validation = torch.utils.data.DataLoader(
OneOutputDset(torch.from_numpy(
trn_val_dsets['validation']['patches']).to(torch_device)),
batch_size=BATCH_SIZE*10) # larger batches for validation data
# if data is too big to all fit on GPU, just omit .to(torch_device) above.
# Can also add num_workers=x to the DataLoader constructor
# create the dictionary Tensor on the GPU
sparse_coding_dictionary = torch.randn((CODE_SIZE, PATCH_HEIGHT*PATCH_WIDTH),
device=torch_device)
# start out the dictionaries with norm 1
sparse_coding_dictionary.div_(
sparse_coding_dictionary.norm(p=2, dim=1)[:, None])
train_dictionary(image_patches_gpu_training, image_patches_gpu_validation,
sparse_coding_dictionary, SC_PARAMS)
| 37.207792
| 77
| 0.742408
|
ad333a4eaea44bdaa6fd0f3b9687f2e04a567329
| 4,516
|
py
|
Python
|
src/api-service/__app__/proxy/__init__.py
|
CuteCutePanda/onefuzz
|
c71ce580bd6ef37e6e8f8ee7c9413a13c0abd695
|
[
"MIT"
] | 1
|
2021-12-20T14:48:40.000Z
|
2021-12-20T14:48:40.000Z
|
src/api-service/__app__/proxy/__init__.py
|
CuteCutePanda/onefuzz
|
c71ce580bd6ef37e6e8f8ee7c9413a13c0abd695
|
[
"MIT"
] | null | null | null |
src/api-service/__app__/proxy/__init__.py
|
CuteCutePanda/onefuzz
|
c71ce580bd6ef37e6e8f8ee7c9413a13c0abd695
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import Optional
import azure.functions as func
from onefuzztypes.enums import ErrorCode, VmState
from onefuzztypes.models import Error
from onefuzztypes.requests import ProxyCreate, ProxyDelete, ProxyGet, ProxyReset
from onefuzztypes.responses import BoolResult, ProxyGetResult, ProxyInfo, ProxyList
from ..onefuzzlib.endpoint_authorization import call_if_user
from ..onefuzzlib.events import get_events
from ..onefuzzlib.proxy import Proxy
from ..onefuzzlib.proxy_forward import ProxyForward
from ..onefuzzlib.request import not_ok, ok, parse_request
from ..onefuzzlib.workers.scalesets import Scaleset
def get_result(proxy_forward: ProxyForward, proxy: Optional[Proxy]) -> ProxyGetResult:
forward = proxy_forward.to_forward()
if (
proxy is None
or proxy.state not in [VmState.running, VmState.extensions_launch]
or proxy.heartbeat is None
or forward not in proxy.heartbeat.forwards
):
return ProxyGetResult(forward=forward)
return ProxyGetResult(ip=proxy.ip, forward=forward)
def get(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(ProxyGet, req)
if isinstance(request, Error):
return not_ok(request, context="ProxyGet")
if (
request.scaleset_id is not None
and request.machine_id is not None
and request.dst_port is not None
):
scaleset = Scaleset.get_by_id(request.scaleset_id)
if isinstance(scaleset, Error):
return not_ok(scaleset, context="ProxyGet")
proxy = Proxy.get_or_create(scaleset.region)
forwards = ProxyForward.search_forward(
scaleset_id=request.scaleset_id,
machine_id=request.machine_id,
dst_port=request.dst_port,
)
if not forwards:
return not_ok(
Error(
code=ErrorCode.INVALID_REQUEST,
errors=["no forwards for scaleset and node"],
),
context="debug_proxy get",
)
return ok(get_result(forwards[0], proxy))
else:
proxies = [
ProxyInfo(region=x.region, proxy_id=x.proxy_id, state=x.state)
for x in Proxy.search()
]
return ok(ProxyList(proxies=proxies))
def post(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(ProxyCreate, req)
if isinstance(request, Error):
return not_ok(request, context="ProxyCreate")
scaleset = Scaleset.get_by_id(request.scaleset_id)
if isinstance(scaleset, Error):
return not_ok(scaleset, context="debug_proxy create")
forward = ProxyForward.update_or_create(
region=scaleset.region,
scaleset_id=scaleset.scaleset_id,
machine_id=request.machine_id,
dst_port=request.dst_port,
duration=request.duration,
)
if isinstance(forward, Error):
return not_ok(forward, context="debug_proxy create")
proxy = Proxy.get_or_create(scaleset.region)
if proxy:
forward.proxy_id = proxy.proxy_id
forward.save()
proxy.save_proxy_config()
return ok(get_result(forward, proxy))
def patch(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(ProxyReset, req)
if isinstance(request, Error):
return not_ok(request, context="ProxyReset")
proxy = Proxy.get(request.region)
if proxy is not None:
proxy.state = VmState.stopping
proxy.save()
return ok(BoolResult(result=True))
return ok(BoolResult(result=False))
def delete(req: func.HttpRequest) -> func.HttpResponse:
request = parse_request(ProxyDelete, req)
if isinstance(request, Error):
return not_ok(request, context="debug_proxy delete")
regions = ProxyForward.remove_forward(
scaleset_id=request.scaleset_id,
machine_id=request.machine_id,
dst_port=request.dst_port,
)
for region in regions:
proxy = Proxy.get_or_create(region)
if proxy:
proxy.save_proxy_config()
return ok(BoolResult(result=True))
def main(req: func.HttpRequest, dashboard: func.Out[str]) -> func.HttpResponse:
methods = {"GET": get, "POST": post, "DELETE": delete, "PATCH": patch}
method = methods[req.method]
result = call_if_user(req, method)
events = get_events()
if events:
dashboard.set(events)
return result
| 32.028369
| 86
| 0.678255
|
9f5083a51e0f3e0de9a672079f151e9b84efabeb
| 35,740
|
py
|
Python
|
external/devlib/devlib/utils/android.py
|
douglas-raillard-arm/lisa
|
147906063550728400aa7914c1fe568228dcf3bf
|
[
"Apache-2.0"
] | 159
|
2016-01-25T11:08:39.000Z
|
2022-03-28T05:20:41.000Z
|
external/devlib/devlib/utils/android.py
|
douglas-raillard-arm/lisa
|
147906063550728400aa7914c1fe568228dcf3bf
|
[
"Apache-2.0"
] | 656
|
2016-01-25T11:16:56.000Z
|
2022-03-23T16:03:28.000Z
|
external/devlib/devlib/utils/android.py
|
ARM-software/lisa
|
3c39388f22d2531b5b3f2159e2f7641dee99bd46
|
[
"Apache-2.0"
] | 116
|
2016-01-25T12:06:31.000Z
|
2022-03-28T08:43:28.000Z
|
# Copyright 2013-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility functions for working with Android devices through adb.
"""
# pylint: disable=E1103
import glob
import logging
import os
import pexpect
import re
import subprocess
import sys
import tempfile
import time
import uuid
import zipfile
from collections import defaultdict
from io import StringIO
from lxml import etree
try:
from shlex import quote
except ImportError:
from pipes import quote
from devlib.exception import TargetTransientError, TargetStableError, HostError, TargetTransientCalledProcessError, TargetStableCalledProcessError
from devlib.utils.misc import check_output, which, ABI_MAP, redirect_streams, get_subprocess
from devlib.connection import ConnectionBase, AdbBackgroundCommand, PopenBackgroundCommand, PopenTransferManager
logger = logging.getLogger('android')
MAX_ATTEMPTS = 5
AM_START_ERROR = re.compile(r"Error: Activity.*")
AAPT_BADGING_OUTPUT = re.compile(r"no dump ((file)|(apk)) specified", re.IGNORECASE)
# See:
# http://developer.android.com/guide/topics/manifest/uses-sdk-element.html#ApiLevels
ANDROID_VERSION_MAP = {
29: 'Q',
28: 'PIE',
27: 'OREO_MR1',
26: 'OREO',
25: 'NOUGAT_MR1',
24: 'NOUGAT',
23: 'MARSHMALLOW',
22: 'LOLLYPOP_MR1',
21: 'LOLLYPOP',
20: 'KITKAT_WATCH',
19: 'KITKAT',
18: 'JELLY_BEAN_MR2',
17: 'JELLY_BEAN_MR1',
16: 'JELLY_BEAN',
15: 'ICE_CREAM_SANDWICH_MR1',
14: 'ICE_CREAM_SANDWICH',
13: 'HONEYCOMB_MR2',
12: 'HONEYCOMB_MR1',
11: 'HONEYCOMB',
10: 'GINGERBREAD_MR1',
9: 'GINGERBREAD',
8: 'FROYO',
7: 'ECLAIR_MR1',
6: 'ECLAIR_0_1',
5: 'ECLAIR',
4: 'DONUT',
3: 'CUPCAKE',
2: 'BASE_1_1',
1: 'BASE',
}
# See https://developer.android.com/reference/android/content/Intent.html#setFlags(int)
INTENT_FLAGS = {
'ACTIVITY_NEW_TASK' : 0x10000000,
'ACTIVITY_CLEAR_TASK' : 0x00008000
}
# Initialized in functions near the botton of the file
android_home = None
platform_tools = None
adb = None
aapt = None
aapt_version = None
fastboot = None
class AndroidProperties(object):
def __init__(self, text):
self._properties = {}
self.parse(text)
def parse(self, text):
self._properties = dict(re.findall(r'\[(.*?)\]:\s+\[(.*?)\]', text))
def iteritems(self):
return iter(self._properties.items())
def __iter__(self):
return iter(self._properties)
def __getattr__(self, name):
return self._properties.get(name)
__getitem__ = __getattr__
class AdbDevice(object):
def __init__(self, name, status):
self.name = name
self.status = status
# pylint: disable=undefined-variable
def __cmp__(self, other):
if isinstance(other, AdbDevice):
return cmp(self.name, other.name)
else:
return cmp(self.name, other)
def __str__(self):
return 'AdbDevice({}, {})'.format(self.name, self.status)
__repr__ = __str__
class ApkInfo(object):
version_regex = re.compile(r"name='(?P<name>[^']+)' versionCode='(?P<vcode>[^']+)' versionName='(?P<vname>[^']+)'")
name_regex = re.compile(r"name='(?P<name>[^']+)'")
permission_regex = re.compile(r"name='(?P<permission>[^']+)'")
activity_regex = re.compile(r'\s*A:\s*android:name\(0x\d+\)=".(?P<name>\w+)"')
def __init__(self, path=None):
self.path = path
self.package = None
self.activity = None
self.label = None
self.version_name = None
self.version_code = None
self.native_code = None
self.permissions = []
self._apk_path = None
self._activities = None
self._methods = None
if path:
self.parse(path)
# pylint: disable=too-many-branches
def parse(self, apk_path):
_check_env()
output = self._run([aapt, 'dump', 'badging', apk_path])
for line in output.split('\n'):
if line.startswith('application-label:'):
self.label = line.split(':')[1].strip().replace('\'', '')
elif line.startswith('package:'):
match = self.version_regex.search(line)
if match:
self.package = match.group('name')
self.version_code = match.group('vcode')
self.version_name = match.group('vname')
elif line.startswith('launchable-activity:'):
match = self.name_regex.search(line)
self.activity = match.group('name')
elif line.startswith('native-code'):
apk_abis = [entry.strip() for entry in line.split(':')[1].split("'") if entry.strip()]
mapped_abis = []
for apk_abi in apk_abis:
found = False
for abi, architectures in ABI_MAP.items():
if apk_abi in architectures:
mapped_abis.append(abi)
found = True
break
if not found:
mapped_abis.append(apk_abi)
self.native_code = mapped_abis
elif line.startswith('uses-permission:'):
match = self.permission_regex.search(line)
if match:
self.permissions.append(match.group('permission'))
else:
pass # not interested
self._apk_path = apk_path
self._activities = None
self._methods = None
@property
def activities(self):
if self._activities is None:
cmd = [aapt, 'dump', 'xmltree', self._apk_path]
if aapt_version == 2:
cmd += ['--file']
cmd += ['AndroidManifest.xml']
matched_activities = self.activity_regex.finditer(self._run(cmd))
self._activities = [m.group('name') for m in matched_activities]
return self._activities
@property
def methods(self):
if self._methods is None:
# Only try to extract once
self._methods = []
with tempfile.TemporaryDirectory() as tmp_dir:
with zipfile.ZipFile(self._apk_path, 'r') as z:
try:
extracted = z.extract('classes.dex', tmp_dir)
except KeyError:
return []
dexdump = os.path.join(os.path.dirname(aapt), 'dexdump')
command = [dexdump, '-l', 'xml', extracted]
dump = self._run(command)
# Dexdump from build tools v30.0.X does not seem to produce
# valid xml from certain APKs so ignore errors and attempt to recover.
parser = etree.XMLParser(encoding='utf-8', recover=True)
xml_tree = etree.parse(StringIO(dump), parser)
package = next((i for i in xml_tree.iter('package')
if i.attrib['name'] == self.package), None)
self._methods = [(meth.attrib['name'], klass.attrib['name'])
for klass in package.iter('class')
for meth in klass.iter('method')] if package else []
return self._methods
def _run(self, command):
logger.debug(' '.join(command))
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if sys.version_info[0] == 3:
output = output.decode(sys.stdout.encoding or 'utf-8', 'replace')
except subprocess.CalledProcessError as e:
raise HostError('Error while running "{}":\n{}'
.format(command, e.output))
return output
class AdbConnection(ConnectionBase):
# maintains the count of parallel active connections to a device, so that
# adb disconnect is not invoked untill all connections are closed
active_connections = defaultdict(int)
# Track connected as root status per device
_connected_as_root = defaultdict(lambda: None)
default_timeout = 10
ls_command = 'ls'
su_cmd = 'su -c {}'
@property
def name(self):
return self.device
@property
def connected_as_root(self):
if self._connected_as_root[self.device] is None:
result = self.execute('id')
self._connected_as_root[self.device] = 'uid=0(' in result
return self._connected_as_root[self.device]
@connected_as_root.setter
def connected_as_root(self, state):
self._connected_as_root[self.device] = state
# pylint: disable=unused-argument
def __init__(self, device=None, timeout=None, platform=None, adb_server=None,
adb_as_root=False, connection_attempts=MAX_ATTEMPTS,
poll_transfers=False,
start_transfer_poll_delay=30,
total_transfer_timeout=3600,
transfer_poll_period=30,):
super().__init__()
self.timeout = timeout if timeout is not None else self.default_timeout
if device is None:
device = adb_get_device(timeout=timeout, adb_server=adb_server)
self.device = device
self.adb_server = adb_server
self.adb_as_root = adb_as_root
self.poll_transfers = poll_transfers
if poll_transfers:
transfer_opts = {'start_transfer_poll_delay': start_transfer_poll_delay,
'total_timeout': total_transfer_timeout,
'poll_period': transfer_poll_period,
}
self.transfer_mgr = PopenTransferManager(self, **transfer_opts) if poll_transfers else None
if self.adb_as_root:
self.adb_root(enable=True)
adb_connect(self.device, adb_server=self.adb_server, attempts=connection_attempts)
AdbConnection.active_connections[self.device] += 1
self._setup_ls()
self._setup_su()
def push(self, sources, dest, timeout=None):
return self._push_pull('push', sources, dest, timeout)
def pull(self, sources, dest, timeout=None):
return self._push_pull('pull', sources, dest, timeout)
def _push_pull(self, action, sources, dest, timeout):
sources = list(sources)
paths = sources + [dest]
# Quote twice to avoid expansion by host shell, then ADB globbing
do_quote = lambda x: quote(glob.escape(x))
paths = ' '.join(map(do_quote, paths))
command = "{} {}".format(action, paths)
if timeout or not self.poll_transfers:
adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
else:
with self.transfer_mgr.manage(sources, dest, action):
bg_cmd = adb_command_background(self.device, command, adb_server=self.adb_server)
self.transfer_mgr.set_transfer_and_wait(bg_cmd)
# pylint: disable=unused-argument
def execute(self, command, timeout=None, check_exit_code=False,
as_root=False, strip_colors=True, will_succeed=False):
try:
return adb_shell(self.device, command, timeout, check_exit_code,
as_root, adb_server=self.adb_server, su_cmd=self.su_cmd)
except subprocess.CalledProcessError as e:
cls = TargetTransientCalledProcessError if will_succeed else TargetStableCalledProcessError
raise cls(
e.returncode,
command,
e.output,
e.stderr,
)
except TargetStableError as e:
if will_succeed:
raise TargetTransientError(e)
else:
raise
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
bg_cmd = self._background(command, stdout, stderr, as_root)
self._current_bg_cmds.add(bg_cmd)
return bg_cmd
def _background(self, command, stdout, stderr, as_root):
adb_shell, pid = adb_background_shell(self, command, stdout, stderr, as_root)
bg_cmd = AdbBackgroundCommand(
conn=self,
adb_popen=adb_shell,
pid=pid,
as_root=as_root
)
return bg_cmd
def _close(self):
AdbConnection.active_connections[self.device] -= 1
if AdbConnection.active_connections[self.device] <= 0:
if self.adb_as_root:
self.adb_root(enable=False)
adb_disconnect(self.device, self.adb_server)
del AdbConnection.active_connections[self.device]
def cancel_running_command(self):
# adbd multiplexes commands so that they don't interfer with each
# other, so there is no need to explicitly cancel a running command
# before the next one can be issued.
pass
def adb_root(self, enable=True):
cmd = 'root' if enable else 'unroot'
output = adb_command(self.device, cmd, timeout=30, adb_server=self.adb_server)
if 'cannot run as root in production builds' in output:
raise TargetStableError(output)
AdbConnection._connected_as_root[self.device] = enable
def wait_for_device(self, timeout=30):
adb_command(self.device, 'wait-for-device', timeout, self.adb_server)
def reboot_bootloader(self, timeout=30):
adb_command(self.device, 'reboot-bootloader', timeout, self.adb_server)
# Again, we need to handle boards where the default output format from ls is
# single column *and* boards where the default output is multi-column.
# We need to do this purely because the '-1' option causes errors on older
# versions of the ls tool in Android pre-v7.
def _setup_ls(self):
command = "shell '(ls -1); echo \"\n$?\"'"
try:
output = adb_command(self.device, command, timeout=self.timeout, adb_server=self.adb_server)
except subprocess.CalledProcessError as e:
raise HostError(
'Failed to set up ls command on Android device. Output:\n'
+ e.output)
lines = output.splitlines()
retval = lines[-1].strip()
if int(retval) == 0:
self.ls_command = 'ls -1'
else:
self.ls_command = 'ls'
logger.debug("ls command is set to {}".format(self.ls_command))
def _setup_su(self):
try:
# Try the new style of invoking `su`
self.execute('ls', timeout=self.timeout, as_root=True,
check_exit_code=True)
# If failure assume either old style or unrooted. Here we will assume
# old style and root status will be verified later.
except (TargetStableError, TargetTransientError, TimeoutError):
self.su_cmd = 'echo {} | su'
logger.debug("su command is set to {}".format(quote(self.su_cmd)))
def fastboot_command(command, timeout=None, device=None):
_check_env()
target = '-s {}'.format(quote(device)) if device else ''
full_command = 'fastboot {} {}'.format(target, command)
logger.debug(full_command)
output, _ = check_output(full_command, timeout, shell=True)
return output
def fastboot_flash_partition(partition, path_to_image):
command = 'flash {} {}'.format(quote(partition), quote(path_to_image))
fastboot_command(command)
def adb_get_device(timeout=None, adb_server=None):
"""
Returns the serial number of a connected android device.
If there are more than one device connected to the machine, or it could not
find any device connected, :class:`devlib.exceptions.HostError` is raised.
"""
# TODO this is a hacky way to issue a adb command to all listed devices
# Ensure server is started so the 'daemon started successfully' message
# doesn't confuse the parsing below
adb_command(None, 'start-server', adb_server=adb_server)
# The output of calling adb devices consists of a heading line then
# a list of the devices sperated by new line
# The last line is a blank new line. in otherwords, if there is a device found
# then the output length is 2 + (1 for each device)
start = time.time()
while True:
output = adb_command(None, "devices", adb_server=adb_server).splitlines() # pylint: disable=E1103
output_length = len(output)
if output_length == 3:
# output[1] is the 2nd line in the output which has the device name
# Splitting the line by '\t' gives a list of two indexes, which has
# device serial in 0 number and device type in 1.
return output[1].split('\t')[0]
elif output_length > 3:
message = '{} Android devices found; either explicitly specify ' +\
'the device you want, or make sure only one is connected.'
raise HostError(message.format(output_length - 2))
else:
if timeout < time.time() - start:
raise HostError('No device is connected and available')
time.sleep(1)
def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS, adb_server=None):
_check_env()
tries = 0
output = None
while tries <= attempts:
tries += 1
if device:
if "." in device: # Connect is required only for ADB-over-IP
# ADB does not automatically remove a network device from it's
# devices list when the connection is broken by the remote, so the
# adb connection may have gone "stale", resulting in adb blocking
# indefinitely when making calls to the device. To avoid this,
# always disconnect first.
adb_disconnect(device, adb_server)
adb_cmd = get_adb_command(None, 'connect', adb_server)
command = '{} {}'.format(adb_cmd, quote(device))
logger.debug(command)
output, _ = check_output(command, shell=True, timeout=timeout)
if _ping(device, adb_server):
break
time.sleep(10)
else: # did not connect to the device
message = 'Could not connect to {}'.format(device or 'a device')
if output:
message += '; got: "{}"'.format(output)
raise HostError(message)
def adb_disconnect(device, adb_server=None):
_check_env()
if not device:
return
if ":" in device and device in adb_list_devices(adb_server):
adb_cmd = get_adb_command(None, 'disconnect', adb_server)
command = "{} {}".format(adb_cmd, device)
logger.debug(command)
retval = subprocess.call(command, stdout=open(os.devnull, 'wb'), shell=True)
if retval:
raise TargetTransientError('"{}" returned {}'.format(command, retval))
def _ping(device, adb_server=None):
_check_env()
adb_cmd = get_adb_command(device, 'shell', adb_server)
command = "{} {}".format(adb_cmd, quote('ls /data/local/tmp > /dev/null'))
logger.debug(command)
result = subprocess.call(command, stderr=subprocess.PIPE, shell=True)
if not result: # pylint: disable=simplifiable-if-statement
return True
else:
return False
# pylint: disable=too-many-locals
def adb_shell(device, command, timeout=None, check_exit_code=False,
as_root=False, adb_server=None, su_cmd='su -c {}'): # NOQA
_check_env()
# On older combinations of ADB/Android versions, the adb host command always
# exits with 0 if it was able to run the command on the target, even if the
# command failed (https://code.google.com/p/android/issues/detail?id=3254).
# Homogenise this behaviour by running the command then echoing the exit
# code of the executed command itself.
command = r'({}); echo "\n$?"'.format(command)
parts = ['adb']
if adb_server is not None:
parts += ['-H', adb_server]
if device is not None:
parts += ['-s', device]
parts += ['shell',
command if not as_root else su_cmd.format(quote(command))]
logger.debug(' '.join(quote(part) for part in parts))
try:
raw_output, error = check_output(parts, timeout, shell=False)
except subprocess.CalledProcessError as e:
raise TargetStableError(str(e))
if raw_output:
try:
output, exit_code, _ = raw_output.replace('\r\n', '\n').replace('\r', '\n').rsplit('\n', 2)
except ValueError:
exit_code, _ = raw_output.replace('\r\n', '\n').replace('\r', '\n').rsplit('\n', 1)
output = ''
else: # raw_output is empty
exit_code = '969696' # just because
output = ''
if check_exit_code:
exit_code = exit_code.strip()
re_search = AM_START_ERROR.findall(output)
if exit_code.isdigit():
exit_code = int(exit_code)
if exit_code:
raise subprocess.CalledProcessError(
exit_code,
command,
output,
error,
)
elif re_search:
message = 'Could not start activity; got the following:\n{}'
raise TargetStableError(message.format(re_search[0]))
else: # not all digits
if re_search:
message = 'Could not start activity; got the following:\n{}'
raise TargetStableError(message.format(re_search[0]))
else:
message = 'adb has returned early; did not get an exit code. '\
'Was kill-server invoked?\nOUTPUT:\n-----\n{}\n'\
'-----\nSTDERR:\n-----\n{}\n-----'
raise TargetTransientError(message.format(raw_output, error))
return '\n'.join(x for x in (output, error) if x)
def adb_background_shell(conn, command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
as_root=False):
"""Runs the specified command in a subprocess, returning the the Popen object."""
device = conn.device
adb_server = conn.adb_server
_check_env()
stdout, stderr, command = redirect_streams(stdout, stderr, command)
if as_root:
command = 'echo {} | su'.format(quote(command))
# Attach a unique UUID to the command line so it can be looked for without
# any ambiguity with ps
uuid_ = uuid.uuid4().hex
uuid_var = 'BACKGROUND_COMMAND_UUID={}'.format(uuid_)
command = "{} sh -c {}".format(uuid_var, quote(command))
adb_cmd = get_adb_command(device, 'shell', adb_server)
full_command = '{} {}'.format(adb_cmd, quote(command))
logger.debug(full_command)
p = subprocess.Popen(full_command, stdout=stdout, stderr=stderr, stdin=subprocess.PIPE, shell=True)
# Out of band PID lookup, to avoid conflicting needs with stdout redirection
find_pid = '{} ps -A -o pid,args | grep {}'.format(conn.busybox, quote(uuid_var))
ps_out = conn.execute(find_pid)
pids = [
int(line.strip().split(' ', 1)[0])
for line in ps_out.splitlines()
]
# The line we are looking for is the first one, since it was started before
# any look up command
pid = sorted(pids)[0]
return (p, pid)
def adb_kill_server(timeout=30, adb_server=None):
adb_command(None, 'kill-server', timeout, adb_server)
def adb_list_devices(adb_server=None):
output = adb_command(None, 'devices', adb_server=adb_server)
devices = []
for line in output.splitlines():
parts = [p.strip() for p in line.split()]
if len(parts) == 2:
devices.append(AdbDevice(*parts))
return devices
def get_adb_command(device, command, adb_server=None):
_check_env()
device_string = ""
if adb_server != None:
device_string = ' -H {}'.format(adb_server)
device_string += ' -s {}'.format(device) if device else ''
return "adb{} {}".format(device_string, command)
def adb_command(device, command, timeout=None, adb_server=None):
full_command = get_adb_command(device, command, adb_server)
logger.debug(full_command)
output, _ = check_output(full_command, timeout, shell=True)
return output
def adb_command_background(device, command, adb_server=None):
full_command = get_adb_command(device, command, adb_server)
logger.debug(full_command)
proc = get_subprocess(full_command, shell=True)
cmd = PopenBackgroundCommand(proc)
return cmd
def grant_app_permissions(target, package):
"""
Grant an app all the permissions it may ask for
"""
dumpsys = target.execute('dumpsys package {}'.format(package))
permissions = re.search(
r'requested permissions:\s*(?P<permissions>(android.permission.+\s*)+)', dumpsys
)
if permissions is None:
return
permissions = permissions.group('permissions').replace(" ", "").splitlines()
for permission in permissions:
try:
target.execute('pm grant {} {}'.format(package, permission))
except TargetStableError:
logger.debug('Cannot grant {}'.format(permission))
# Messy environment initialisation stuff...
class _AndroidEnvironment(object):
def __init__(self):
self.android_home = None
self.platform_tools = None
self.build_tools = None
self.adb = None
self.aapt = None
self.aapt_version = None
self.fastboot = None
def _initialize_with_android_home(env):
logger.debug('Using ANDROID_HOME from the environment.')
env.android_home = android_home
env.platform_tools = os.path.join(android_home, 'platform-tools')
os.environ['PATH'] = env.platform_tools + os.pathsep + os.environ['PATH']
_init_common(env)
return env
def _initialize_without_android_home(env):
adb_full_path = which('adb')
if adb_full_path:
env.adb = 'adb'
else:
raise HostError('ANDROID_HOME is not set and adb is not in PATH. '
'Have you installed Android SDK?')
logger.debug('Discovering ANDROID_HOME from adb path.')
env.platform_tools = os.path.dirname(adb_full_path)
env.android_home = os.path.dirname(env.platform_tools)
_init_common(env)
return env
def _init_common(env):
_discover_build_tools(env)
_discover_aapt(env)
def _discover_build_tools(env):
logger.debug('ANDROID_HOME: {}'.format(env.android_home))
build_tools_directory = os.path.join(env.android_home, 'build-tools')
if os.path.isdir(build_tools_directory):
env.build_tools = build_tools_directory
def _check_supported_aapt2(binary):
# At time of writing the version argument of aapt2 is not helpful as
# the output is only a placeholder that does not distinguish between versions
# with and without support for badging. Unfortunately aapt has been
# deprecated and fails to parse some valid apks so we will try to favour
# aapt2 if possible else will fall back to aapt.
# Try to execute the badging command and check if we get an expected error
# message as opposed to an unknown command error to determine if we have a
# suitable version.
cmd = '{} dump badging'.format(binary)
result = subprocess.run(cmd.encode('utf-8'), shell=True, stderr=subprocess.PIPE)
supported = bool(AAPT_BADGING_OUTPUT.search(result.stderr.decode('utf-8')))
msg = 'Found a {} aapt2 binary at: {}'
logger.debug(msg.format('supported' if supported else 'unsupported', binary))
return supported
def _discover_aapt(env):
if env.build_tools:
aapt_path = ''
aapt2_path = ''
versions = os.listdir(env.build_tools)
for version in reversed(sorted(versions)):
if not os.path.isfile(aapt2_path):
aapt2_path = os.path.join(env.build_tools, version, 'aapt2')
if not os.path.isfile(aapt_path):
aapt_path = os.path.join(env.build_tools, version, 'aapt')
aapt_version = 1
# Use latest available version for aapt/appt2 but ensure at least one is valid.
if os.path.isfile(aapt2_path) or os.path.isfile(aapt_path):
break
# Use aapt2 only if present and we have a suitable version
if aapt2_path and _check_supported_aapt2(aapt2_path):
aapt_path = aapt2_path
aapt_version = 2
# Use the aapt version discoverted from build tools.
if aapt_path:
logger.debug('Using {} for version {}'.format(aapt_path, version))
env.aapt = aapt_path
env.aapt_version = aapt_version
return
# Try detecting aapt2 and aapt from PATH
if not env.aapt:
aapt2_path = which('aapt2')
if _check_supported_aapt2(aapt2_path):
env.aapt = aapt2_path
env.aapt_version = 2
else:
env.aapt = which('aapt')
env.aapt_version = 1
if not env.aapt:
raise HostError('aapt/aapt2 not found. Please make sure it is avaliable in PATH'
' or at least one Android platform is installed')
def _check_env():
global android_home, platform_tools, adb, aapt, aapt_version # pylint: disable=W0603
if not android_home:
android_home = os.getenv('ANDROID_HOME')
if android_home:
_env = _initialize_with_android_home(_AndroidEnvironment())
else:
_env = _initialize_without_android_home(_AndroidEnvironment())
android_home = _env.android_home
platform_tools = _env.platform_tools
adb = _env.adb
aapt = _env.aapt
aapt_version = _env.aapt_version
class LogcatMonitor(object):
"""
Helper class for monitoring Anroid's logcat
:param target: Android target to monitor
:type target: :class:`AndroidTarget`
:param regexps: List of uncompiled regular expressions to filter on the
device. Logcat entries that don't match any will not be
seen. If omitted, all entries will be sent to host.
:type regexps: list(str)
"""
@property
def logfile(self):
return self._logfile
def __init__(self, target, regexps=None, logcat_format=None):
super(LogcatMonitor, self).__init__()
self.target = target
self._regexps = regexps
self._logcat_format = logcat_format
self._logcat = None
self._logfile = None
def start(self, outfile=None):
"""
Start logcat and begin monitoring
:param outfile: Optional path to file to store all logcat entries
:type outfile: str
"""
if outfile:
self._logfile = open(outfile, 'w')
else:
self._logfile = tempfile.NamedTemporaryFile(mode='w')
self.target.clear_logcat()
logcat_cmd = 'logcat'
# Join all requested regexps with an 'or'
if self._regexps:
regexp = '{}'.format('|'.join(self._regexps))
if len(self._regexps) > 1:
regexp = '({})'.format(regexp)
# Logcat on older version of android do not support the -e argument
# so fall back to using grep.
if self.target.get_sdk_version() > 23:
logcat_cmd = '{} -e {}'.format(logcat_cmd, quote(regexp))
else:
logcat_cmd = '{} | grep {}'.format(logcat_cmd, quote(regexp))
if self._logcat_format:
logcat_cmd = "{} -v {}".format(logcat_cmd, quote(self._logcat_format))
logcat_cmd = get_adb_command(self.target.conn.device, logcat_cmd, self.target.adb_server)
logger.debug('logcat command ="{}"'.format(logcat_cmd))
self._logcat = pexpect.spawn(logcat_cmd, logfile=self._logfile, encoding='utf-8')
def stop(self):
self.flush_log()
self._logcat.terminate()
self._logfile.close()
def get_log(self):
"""
Return the list of lines found by the monitor
"""
self.flush_log()
with open(self._logfile.name) as fh:
return [line for line in fh]
def flush_log(self):
# Unless we tell pexect to 'expect' something, it won't read from
# logcat's buffer or write into our logfile. We'll need to force it to
# read any pending logcat output.
while True:
try:
read_size = 1024 * 8
# This will read up to read_size bytes, but only those that are
# already ready (i.e. it won't block). If there aren't any bytes
# already available it raises pexpect.TIMEOUT.
buf = self._logcat.read_nonblocking(read_size, timeout=0)
# We can't just keep calling read_nonblocking until we get a
# pexpect.TIMEOUT (i.e. until we don't find any available
# bytes), because logcat might be writing bytes the whole time -
# in that case we might never return from this function. In
# fact, we only care about bytes that were written before we
# entered this function. So, if we read read_size bytes (as many
# as we were allowed to), then we'll assume there are more bytes
# that have already been sitting in the output buffer of the
# logcat command. If not, we'll assume we read everything that
# had already been written.
if len(buf) == read_size:
continue
else:
break
except pexpect.TIMEOUT:
# No available bytes to read. No prob, logcat just hasn't
# printed anything since pexpect last read from its buffer.
break
def clear_log(self):
with open(self._logfile.name, 'w') as _:
pass
def search(self, regexp):
"""
Search a line that matches a regexp in the logcat log
Return immediatly
"""
return [line for line in self.get_log() if re.match(regexp, line)]
def wait_for(self, regexp, timeout=30):
"""
Search a line that matches a regexp in the logcat log
Wait for it to appear if it's not found
:param regexp: regexp to search
:type regexp: str
:param timeout: Timeout in seconds, before rasing RuntimeError.
``None`` means wait indefinitely
:type timeout: number
:returns: List of matched strings
"""
log = self.get_log()
res = [line for line in log if re.match(regexp, line)]
# Found some matches, return them
if res:
return res
# Store the number of lines we've searched already, so we don't have to
# re-grep them after 'expect' returns
next_line_num = len(log)
try:
self._logcat.expect(regexp, timeout=timeout)
except pexpect.TIMEOUT:
raise RuntimeError('Logcat monitor timeout ({}s)'.format(timeout))
return [line for line in self.get_log()[next_line_num:]
if re.match(regexp, line)]
| 37.424084
| 146
| 0.618942
|
aacde24fbe2ea42d01d4c7ca74eaecebf8427acc
| 3,371
|
py
|
Python
|
12 - The Python Programmer Bootcamp/5_Conditionals/7_Practice challenge (15:11)/6.7_Conditionals_questions_UNSOLVED.py
|
olayinka04/365-data-science-courses
|
7d71215432f0ef07fd3def559d793a6f1938d108
|
[
"Apache-2.0"
] | null | null | null |
12 - The Python Programmer Bootcamp/5_Conditionals/7_Practice challenge (15:11)/6.7_Conditionals_questions_UNSOLVED.py
|
olayinka04/365-data-science-courses
|
7d71215432f0ef07fd3def559d793a6f1938d108
|
[
"Apache-2.0"
] | null | null | null |
12 - The Python Programmer Bootcamp/5_Conditionals/7_Practice challenge (15:11)/6.7_Conditionals_questions_UNSOLVED.py
|
olayinka04/365-data-science-courses
|
7d71215432f0ef07fd3def559d793a6f1938d108
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 17 13:04:18 2019
@author: Giles
"""
'''
Question 1
Write code that asks the user to input a number between 1 and 5 inclusive.
The code will take the integer value and print out the string value. So for
example if the user inputs 2 the code will print two. Reject any input that
is not a number in that range
'''
#user_input = int(input('Please enter an integer between 1-5:> '))
#if user_input == 1:
# print('one')
#elif user_input == 2:
# print('two')
#elif user_input == 3:
# print('three')
#elif user_input == 4:
# print('four')
#elif user_input == 5:
# print('five')
#else:
# print('Out of range')
'''
Question 2
Repeat the previous task but this time the user will input a string and the
code will ouput the integer value. Convert the string to lowercase first.
'''
#user_input = input('Please enter an string between One and five:> ')
#user_input = user_input.lower()
#if user_input == 'one':
# print(1)
#elif user_input == 'two':
# print(2)
#elif user_input == 'three':
# print(3)
#elif user_input == 'four':
# print(4)
#elif user_input == 'five':
# print(5)
#else:
# print('Out of range')
'''
Question 3
Create a variable containing an integer between 1 and 10 inclusive. Ask the
user to guess the number. If they guess too high or too low, tell them they
have not won. Tell them they win if they guess the correct number.
'''
#secret_number = 3
#guess = input('Guess the number between 1-10:> ')
#if guess.isdigit():
# guess = int(guess)
# if guess == secret_number:
# print('You guessed the correct number! You win!')
# elif guess > secret_number and guess <= 10:
# print('You guessed too high. Sorry you lose!')
# elif guess < secret_number and guess >= 1:
# print('You guessed too low. Sorry you lose!')
# else:
# print('Out of range')
#else:
# print('That\'s not even an integer! What are you playing at?!')
'''
Question 4
Ask the user to input their name. Check the length of the name. If it is
greater than 5 characters long, write a message telling them how many characters
otherwise write a message saying the length of their name is a secret
'''
#name = input('Please enter your name:>')
#name_len = len(name)
#if name_len > 5:
# print('Your name contains',name_len,'characters.')
#else:
# print('I\'m not telling you the length of your name.')
'''
Question 5
Ask the user for two integers between 1 and 20. If they are both greater than
15 return their product. If only one is greater than 15 return their sum, if
neither are greater than 15 return zero
'''
#int_1 = int(input('Please enter an integer between 1-20:> '))
#int_2 = int(input('Please enter another integer between 1-20:> '))
#
#if int_1 => 15 and int_2 => 15:
# print(int_1 * int_2)
#elif int_1 => 15 or int_2 => 15:
# print(int_1 + int_2)
#else:
# print(0)
'''
Question 6
Ask the user for two integers, then swap the contents of the variables. So if
var_1 = 1 and var_2 = 2 initially, once the code has run var_1 should equal 2
and var_2 should equal 1.
'''
#int_1 = int(input('Please enter first integer:> '))
#int_2 = int(input('Please enter second integer:> '))
#print('Before swapping int_1 =',int_1,'and int_2 =',int_2)
#int_1,int_2 = int_2,int_1
#print('After swapping int_1 =', int_1,'and int_2 = ',int_2)
| 30.369369
| 80
| 0.679917
|
5ab5c4a57cfe60464e662003349d560a3cd16f42
| 23,242
|
py
|
Python
|
tests/fnet/test_modeling_fnet.py
|
dctelus/transformers
|
6786cbc4b14ebff0ac59c768cadd109391db9a08
|
[
"Apache-2.0"
] | 3
|
2022-01-15T08:06:07.000Z
|
2022-03-10T07:13:18.000Z
|
tests/fnet/test_modeling_fnet.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | 2
|
2022-03-14T10:13:16.000Z
|
2022-03-14T11:50:27.000Z
|
tests/fnet/test_modeling_fnet.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | 2
|
2022-03-21T04:32:39.000Z
|
2022-03-22T01:02:49.000Z
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch FNet model. """
import unittest
from typing import Dict, List, Tuple
from transformers import FNetConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tokenizers, require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetModel,
FNetTokenizerFast,
)
from transformers.models.fnet.modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetBasicFourierTransform,
is_scipy_available,
)
# Override ConfigTester
class FNetConfigTester(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
if self.has_text_modality:
self.parent.assertTrue(hasattr(config, "vocab_size"))
self.parent.assertTrue(hasattr(config, "hidden_size"))
self.parent.assertTrue(hasattr(config, "num_hidden_layers"))
class FNetModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
def get_config(self):
return FNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
tpu_short_seq_length=self.seq_length,
)
@require_torch
def create_and_check_fourier_transform(self, config):
hidden_states = floats_tensor([self.batch_size, self.seq_length, config.hidden_size])
transform = FNetBasicFourierTransform(config)
fftn_output = transform(hidden_states)
config.use_tpu_fourier_optimizations = True
if is_scipy_available():
transform = FNetBasicFourierTransform(config)
dft_output = transform(hidden_states)
config.max_position_embeddings = 4097
transform = FNetBasicFourierTransform(config)
fft_output = transform(hidden_states)
if is_scipy_available():
self.parent.assertTrue(torch.allclose(fftn_output[0][0], dft_output[0][0], atol=1e-4))
self.parent.assertTrue(torch.allclose(fft_output[0][0], dft_output[0][0], atol=1e-4))
self.parent.assertTrue(torch.allclose(fftn_output[0][0], fft_output[0][0], atol=1e-4))
def create_and_check_model(self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels):
model = FNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_pretraining(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_next_sentence_prediction(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForNextSentencePrediction(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
token_type_ids=token_type_ids,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
model = FNetForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = FNetForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = FNetForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = FNetForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids}
return config, inputs_dict
@require_torch
class FNetModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
FNetModel,
FNetForPreTraining,
FNetForMaskedLM,
FNetForNextSentencePrediction,
FNetForMultipleChoice,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
)
if is_torch_available()
else ()
)
# Skip Tests
test_pruning = False
test_torchscript = False
test_head_masking = False
test_pruning = False
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["next_sentence_label"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
# Overriden Tests
def test_attention_outputs(self):
pass
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
),
msg=f"Tuple and dict output are not equal. Difference: {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`: {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}.",
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
# tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
# dict_inputs = self._prepare_for_class(inputs_dict, model_class)
# check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
hidden_states = outputs.hidden_states[0]
hidden_states.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
def setUp(self):
self.model_tester = FNetModelTester(self)
self.config_tester = FNetConfigTester(self, config_class=FNetConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in FNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = FNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class FNetModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_for_masked_lm(self):
"""
For comparison:
1. Modify the pre-training model `__call__` to skip computing metrics and return masked_lm_output like so:
```
...
sequence_output, pooled_output = EncoderModel(
self.config, random_seed=self.random_seed, name="encoder")(
input_ids, input_mask, type_ids, deterministic=deterministic)
masked_lm_output = nn.Dense(
self.config.d_emb,
kernel_init=default_kernel_init,
name="predictions_dense")(
sequence_output)
masked_lm_output = nn.gelu(masked_lm_output)
masked_lm_output = nn.LayerNorm(
epsilon=LAYER_NORM_EPSILON, name="predictions_layer_norm")(
masked_lm_output)
masked_lm_logits = layers.OutputProjection(
kernel=self._get_embedding_table(), name="predictions_output")(
masked_lm_output)
next_sentence_logits = layers.OutputProjection(
n_out=2, kernel_init=default_kernel_init, name="classification")(
pooled_output)
return masked_lm_logits
...
```
2. Run the following:
>>> import jax.numpy as jnp
>>> import sentencepiece as spm
>>> from flax.training import checkpoints
>>> from f_net.models import PreTrainingModel
>>> from f_net.configs.pretraining import get_config, ModelArchitecture
>>> pretrained_params = checkpoints.restore_checkpoint('./f_net/f_net_checkpoint', None) # Location of original checkpoint
>>> pretrained_config = get_config()
>>> pretrained_config.model_arch = ModelArchitecture.F_NET
>>> vocab_filepath = "./f_net/c4_bpe_sentencepiece.model" # Location of the sentence piece model
>>> tokenizer = spm.SentencePieceProcessor()
>>> tokenizer.Load(vocab_filepath)
>>> with pretrained_config.unlocked():
>>> pretrained_config.vocab_size = tokenizer.GetPieceSize()
>>> tokens = jnp.array([[0, 1, 2, 3, 4, 5]])
>>> type_ids = jnp.zeros_like(tokens, dtype="i4")
>>> attention_mask = jnp.ones_like(tokens) # Dummy. This gets deleted inside the model.
>>> flax_pretraining_model = PreTrainingModel(pretrained_config)
>>> pretrained_model_params = freeze(pretrained_params['target'])
>>> flax_model_outputs = flax_pretraining_model.apply({"params": pretrained_model_params}, tokens, attention_mask, type_ids, None, None, None, None, deterministic=True)
>>> masked_lm_logits[:, :3, :3]
"""
model = FNetForMaskedLM.from_pretrained("google/fnet-base")
model.to(torch_device)
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device)
output = model(input_ids)[0]
vocab_size = 32000
expected_shape = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-1.7819, -7.7384, -7.5002], [-3.4746, -8.5943, -7.7762], [-3.2052, -9.0771, -8.3468]]],
device=torch_device,
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
@slow
@require_tokenizers
def test_inference_long_sentence(self):
model = FNetForMaskedLM.from_pretrained("google/fnet-base")
model.to(torch_device)
tokenizer = FNetTokenizerFast.from_pretrained("google/fnet-base")
inputs = tokenizer(
"the man worked as a [MASK].",
"this is his [MASK].",
return_tensors="pt",
padding="max_length",
max_length=512,
)
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
logits = model(**inputs).logits
predictions_mask_1 = tokenizer.decode(logits[0, 6].topk(5).indices)
predictions_mask_2 = tokenizer.decode(logits[0, 12].topk(5).indices)
self.assertEqual(predictions_mask_1.split(" "), ["man", "child", "teacher", "woman", "model"])
self.assertEqual(predictions_mask_2.split(" "), ["work", "wife", "job", "story", "name"])
@slow
def test_inference_for_next_sentence_prediction(self):
model = FNetForNextSentencePrediction.from_pretrained("google/fnet-base")
model.to(torch_device)
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device)
output = model(input_ids)[0]
expected_shape = torch.Size((1, 2))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[-0.2234, -0.0226]], device=torch_device)
self.assertTrue(torch.allclose(output, expected_slice, atol=1e-4))
@slow
def test_inference_model(self):
model = FNetModel.from_pretrained("google/fnet-base")
model.to(torch_device)
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device)
output = model(input_ids)[0]
expected_shape = torch.Size((1, 6, model.config.hidden_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[4.1541, -0.1051, -0.1667], [-0.9144, 0.2939, -0.0086], [-0.8472, -0.7281, 0.0256]]], device=torch_device
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
| 41.503571
| 315
| 0.669779
|
f3db4bb27de7b8ef2802f6105c2abac117657036
| 1,106
|
py
|
Python
|
plotly/validators/volume/stream/__init__.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | 6
|
2019-05-03T02:12:04.000Z
|
2020-03-01T06:33:21.000Z
|
plotly/validators/volume/stream/__init__.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | null | null | null |
plotly/validators/volume/stream/__init__.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | 5
|
2019-05-18T16:50:11.000Z
|
2021-07-06T21:14:36.000Z
|
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='token', parent_name='volume.stream', **kwargs
):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
no_blank=kwargs.pop('no_blank', True),
role=kwargs.pop('role', 'info'),
strict=kwargs.pop('strict', True),
**kwargs
)
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='maxpoints', parent_name='volume.stream', **kwargs
):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
max=kwargs.pop('max', 10000),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 28.358974
| 76
| 0.611212
|
a244718746e1c913b01ee81eb90eff1ac0f91f31
| 9,047
|
py
|
Python
|
pyleecan/GUI/Dialog/DMachineSetup/SWSlot/SWSlot.py
|
Kelos-Zhu/pyleecan
|
368f8379688e31a6c26d2c1cd426f21dfbceff2a
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/GUI/Dialog/DMachineSetup/SWSlot/SWSlot.py
|
Kelos-Zhu/pyleecan
|
368f8379688e31a6c26d2c1cd426f21dfbceff2a
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/GUI/Dialog/DMachineSetup/SWSlot/SWSlot.py
|
Kelos-Zhu/pyleecan
|
368f8379688e31a6c26d2c1cd426f21dfbceff2a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from numpy import pi
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QMessageBox, QWidget
from .....Classes.LamSlotWind import LamSlotWind
from .....Classes.Slot import Slot
from .....Classes.SlotW10 import SlotW10
from .....Classes.SlotWind import SlotWind
from .....GUI.Dialog.DMachineSetup.SWSlot.Gen_SWSlot import Gen_SWSlot
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot10.PWSlot10 import PWSlot10
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot11.PWSlot11 import PWSlot11
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot12.PWSlot12 import PWSlot12
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot13.PWSlot13 import PWSlot13
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot14.PWSlot14 import PWSlot14
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot15.PWSlot15 import PWSlot15
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot16.PWSlot16 import PWSlot16
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot21.PWSlot21 import PWSlot21
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot22.PWSlot22 import PWSlot22
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot23.PWSlot23 import PWSlot23
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot24.PWSlot24 import PWSlot24
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot25.PWSlot25 import PWSlot25
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot26.PWSlot26 import PWSlot26
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot27.PWSlot27 import PWSlot27
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot28.PWSlot28 import PWSlot28
from .....GUI.Dialog.DMachineSetup.SWSlot.PWSlot29.PWSlot29 import PWSlot29
# List to convert index of combobox to slot type
WIDGET_LIST = [
PWSlot10,
PWSlot11,
PWSlot12,
PWSlot13,
PWSlot14,
PWSlot15,
PWSlot16,
PWSlot21,
PWSlot22,
PWSlot23,
PWSlot24,
PWSlot25,
PWSlot26,
PWSlot27,
PWSlot28,
PWSlot29,
]
INIT_INDEX = [wid.slot_type for wid in WIDGET_LIST]
SLOT_NAME = [wid.slot_name for wid in WIDGET_LIST]
class SWSlot(Gen_SWSlot, QWidget):
"""Step to set the slot with winding
"""
# Signal to DMachineSetup to know that the save popup is needed
saveNeeded = pyqtSignal()
# Information for DMachineSetup nav
step_name = "Slot"
def __init__(self, machine, matlib, is_stator=False):
"""Initialize the GUI according to machine
Parameters
----------
self : SWSlot
A SWSlot widget
machine : Machine
current machine to edit
matlib : MatLib
Material Library
is_stator : bool
To adapt the GUI to set either the stator or the rotor
"""
# Build the interface according to the .ui file
QWidget.__init__(self)
self.setupUi(self)
# Saving arguments
self.machine = machine
self.matlib = matlib
self.is_stator = is_stator
self.b_help.url = "https://eomys.com/produits/manatee/howtos/article/"
self.b_help.url += "how-to-set-up-the-slots"
# Fill the combobox with the available slot
self.c_slot_type.clear()
for slot in SLOT_NAME:
self.c_slot_type.addItem(slot)
# Avoid erase all the parameters when navigating though the slots
self.previous_slot = dict()
for slot_type in INIT_INDEX:
self.previous_slot[slot_type] = None
if self.is_stator:
self.obj = machine.stator
else:
self.obj = machine.rotor
# If the Slot is not set, initialize it with a 1_0
if self.obj.slot is None or type(self.obj.slot) in [SlotWind, Slot]:
self.obj.slot = SlotW10()
self.obj.slot._set_None()
if self.obj.slot.Zs is None:
self.si_Zs.clear()
else:
self.si_Zs.setValue(self.obj.slot.Zs)
self.set_slot_pitch(self.obj.slot.Zs)
# Set the correct index for the type checkbox and display the object
index = INIT_INDEX.index(type(self.obj.slot))
self.c_slot_type.setCurrentIndex(index)
# Update the slot widget
self.s_update_slot()
# Connect the slot
self.c_slot_type.currentIndexChanged.connect(self.s_change_slot)
self.si_Zs.editingFinished.connect(self.set_Zs)
self.b_plot.clicked.connect(self.s_plot)
def emit_save(self):
"""Send a saveNeeded signal to the DMachineSetup
"""
self.saveNeeded.emit()
def set_slot_type(self, index):
"""Initialize self.obj with the slot corresponding to index
Parameters
----------
self : SWSlot
A SWSlot object
index : int
Index of the selected slot type in the list
"""
# Save the slot
self.previous_slot[type(self.obj.slot)] = self.obj.slot
# Call the corresponding constructor
Zs = self.obj.slot.Zs
if self.previous_slot[INIT_INDEX[index]] is None:
# No previous slot of this type
self.obj.slot = INIT_INDEX[index]()
self.obj.slot._set_None() # No default value
self.obj.slot.Zs = Zs
else: # Load the previous slot of this type
self.obj.slot = self.previous_slot[INIT_INDEX[index]]
if self.obj.slot.Zs is not None:
# Update Zs without trying to compute output
self.si_Zs.blockSignals(True)
self.si_Zs.setValue(self.obj.slot.Zs)
self.si_Zs.blockSignals(False)
self.set_slot_pitch(self.obj.slot.Zs)
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_Zs(self):
"""Signal to update the value of Zs according to the spinbox
Parameters
----------
self : SWSlot
A SWSlot object
"""
value = self.si_Zs.value()
self.obj.slot.Zs = value
self.set_slot_pitch(value)
self.w_slot.w_out.comp_output()
# Notify the machine GUI that the machine has changed
self.saveNeeded.emit()
def set_slot_pitch(self, Zs):
"""Update out_slot_pitch with the correct value
Parameters
----------
self : SWSlot
A SWSlot object
Zs : int
The current value of Zs
"""
sp_txt = self.tr("Slot pitch = 360 / Zs = ")
if Zs in [None, 0]:
self.out_Slot_pitch.setText(sp_txt + "?")
else:
Slot_pitch = 360.0 / Zs
Slot_pitch_rad = Slot_pitch * pi / 180
self.out_Slot_pitch.setText(
sp_txt
+ "%.4g" % (Slot_pitch)
+ u" ° ("
+ "%.4g" % (Slot_pitch_rad)
+ " rad)"
)
def s_update_slot(self):
"""Update the slot widget
Parameters
----------
self : SWSlot
A SWSlot object
"""
# Regenerate the pages with the new values
self.w_slot.setParent(None)
self.w_slot = WIDGET_LIST[self.c_slot_type.currentIndex()](self.obj)
self.w_slot.saveNeeded.connect(self.emit_save)
# Refresh the GUI
self.main_layout.removeWidget(self.w_slot)
self.main_layout.insertWidget(1, self.w_slot)
def s_change_slot(self, index):
"""Signal to update the slot object and widget
Parameters
----------
self : SWSlot
A SWSlot object
index : int
Current index of the combobox
"""
# Current slot is removed and replaced by the new one
self.set_slot_type(index)
self.s_update_slot()
def s_plot(self):
"""Try to plot the lamination
Parameters
----------
self : SWSlot
A SWSlot object
"""
# We have to make sure the slot is right before truing to plot it
error = self.check(self.obj)
if error: # Error => Display it
QMessageBox().critical(self, self.tr("Error"), error)
else: # No error => Plot the slot (No winding for LamSquirrelCage)
if self.machine.type_machine == 10:
# For SRM, this is the last step => Plot the complete machine
self.machine.plot()
else:
self.obj.plot(is_lam_only=not (type(self.obj) is LamSlotWind))
@staticmethod
def check(lam):
"""Check that the current lamination have all the needed field set
Parameters
----------
lam: LamSlotWind
Lamination to check
Returns
-------
error: str
Error message (return None if no error)
"""
# Check that everything is set
if lam.slot.Zs is None:
return "You must set Zs !"
# Call the check method of the slot (every slot type have a
# different check method)
index = INIT_INDEX.index(type(lam.slot))
return WIDGET_LIST[index].check(lam)
| 32.19573
| 78
| 0.612579
|
29deacb419ca0cd59c9101f1b097455aa82027f9
| 579
|
py
|
Python
|
flavio/__init__.py
|
AlexandreCarvunis/flavio
|
c21936a5ff004283b08cab543e2c880e35526bb6
|
[
"MIT"
] | null | null | null |
flavio/__init__.py
|
AlexandreCarvunis/flavio
|
c21936a5ff004283b08cab543e2c880e35526bb6
|
[
"MIT"
] | null | null | null |
flavio/__init__.py
|
AlexandreCarvunis/flavio
|
c21936a5ff004283b08cab543e2c880e35526bb6
|
[
"MIT"
] | null | null | null |
from ._version import __version__
from . import physics
from . import statistics
from . import io
from . import parameters
from . import measurements
from . import classes
from .classes import Measurement, Parameter, ParameterConstraints, Observable, NamedInstanceClass
from .config import config
from . import citations
from flavio.physics.eft import WilsonCoefficients
from flavio.parameters import default_parameters
from flavio.functions import sm_prediction, sm_uncertainty, np_uncertainty, sm_error_budget, np_prediction, sm_covariance, combine_measurements, np_covariance
| 41.357143
| 158
| 0.848014
|
e3245390de455fdff455f201fd1a529ab260178a
| 12,275
|
py
|
Python
|
app/main/views/index.py
|
TechforgoodCAST/notifications-admin
|
0a9e06aafd79d0fbe50c26a85bf757aaeaa59340
|
[
"MIT"
] | null | null | null |
app/main/views/index.py
|
TechforgoodCAST/notifications-admin
|
0a9e06aafd79d0fbe50c26a85bf757aaeaa59340
|
[
"MIT"
] | 1
|
2021-10-19T13:34:15.000Z
|
2021-10-19T13:34:15.000Z
|
app/main/views/index.py
|
TechforgoodCAST/notifications-admin
|
0a9e06aafd79d0fbe50c26a85bf757aaeaa59340
|
[
"MIT"
] | 1
|
2021-03-05T13:18:44.000Z
|
2021-03-05T13:18:44.000Z
|
from flask import (
abort,
make_response,
redirect,
render_template,
request,
url_for,
)
from flask_login import current_user
from notifications_utils.international_billing_rates import (
INTERNATIONAL_BILLING_RATES,
)
from notifications_utils.template import HTMLEmailTemplate, LetterImageTemplate
from app import email_branding_client, letter_branding_client, status_api_client
from app.main import main
from app.main.forms import FieldWithNoneOption, SearchByNameForm
from app.main.views.sub_navigation_dictionaries import (
features_nav,
pricing_nav,
using_notify_nav,
)
from app.models.feedback import QUESTION_TICKET_TYPE
from app.utils import get_logo_cdn_domain, hide_from_search_engines
@main.route('/')
def index():
if current_user and current_user.is_authenticated:
return redirect(url_for('main.choose_account'))
return render_template(
'views/signedout.html',
counts=status_api_client.get_count_of_live_services_and_organisations(),
)
@main.route('/error/<int:status_code>')
def error(status_code):
if status_code >= 500:
abort(404)
abort(status_code)
@main.route('/cookies')
def cookies():
return render_template('views/cookies.html')
@main.route('/privacy')
def privacy():
return render_template('views/privacy.html')
@main.route('/accessibility-statement')
def accessibility_statement():
return render_template('views/accessibility_statement.html')
@main.route('/pricing')
def pricing():
return render_template(
'views/pricing/index.html',
sms_rate=0.0158,
international_sms_rates=sorted([
(cc, country['names'], country['billable_units'])
for cc, country in INTERNATIONAL_BILLING_RATES.items()
], key=lambda x: x[0]),
search_form=SearchByNameForm(),
navigation_links=pricing_nav(),
)
@main.route('/pricing/how-to-pay')
def how_to_pay():
return render_template(
'views/pricing/how-to-pay.html',
support_link=url_for('main.feedback', ticket_type=QUESTION_TICKET_TYPE),
navigation_links=pricing_nav(),
)
@main.route('/delivery-and-failure')
@main.route('/features/messages-status')
def delivery_and_failure():
return redirect(url_for('.message_status'), 301)
@main.route('/design-patterns-content-guidance')
def design_content():
return redirect('https://www.gov.uk/service-manual/design/sending-emails-and-text-messages', 301)
@main.route('/_email')
def email_template():
branding_type = 'govuk'
branding_style = request.args.get('branding_style', None)
if branding_style == FieldWithNoneOption.NONE_OPTION_VALUE:
branding_style = None
if branding_style is not None:
email_branding = email_branding_client.get_email_branding(branding_style)['email_branding']
branding_type = email_branding['brand_type']
if branding_type == 'govuk':
brand_text = None
brand_colour = None
brand_logo = None
govuk_banner = True
brand_banner = False
brand_name = None
else:
colour = email_branding['colour']
brand_text = email_branding['text']
brand_colour = colour
brand_logo = ('https://{}/{}'.format(get_logo_cdn_domain(), email_branding['logo'])
if email_branding['logo'] else None)
govuk_banner = branding_type in ['govuk', 'both']
brand_banner = branding_type == 'org_banner'
brand_name = email_branding['name']
template = {
'template_type': 'email',
'subject': 'Email branding preview',
'content': (
'Lorem Ipsum is simply dummy text of the printing and typesetting '
'industry.\n\nLorem Ipsum has been the industry’s standard dummy '
'text ever since the 1500s, when an unknown printer took a galley '
'of type and scrambled it to make a type specimen book. '
'\n\n'
'# History'
'\n\n'
'It has '
'survived not only'
'\n\n'
'* five centuries'
'\n'
'* but also the leap into electronic typesetting'
'\n\n'
'It was '
'popularised in the 1960s with the release of Letraset sheets '
'containing Lorem Ipsum passages, and more recently with desktop '
'publishing software like Aldus PageMaker including versions of '
'Lorem Ipsum.'
'\n\n'
'^ It is a long established fact that a reader will be distracted '
'by the readable content of a page when looking at its layout.'
'\n\n'
'The point of using Lorem Ipsum is that it has a more-or-less '
'normal distribution of letters, as opposed to using ‘Content '
'here, content here’, making it look like readable English.'
'\n\n\n'
'1. One'
'\n'
'2. Two'
'\n'
'10. Three'
'\n\n'
'This is an example of an email sent using Catalyst Notify.'
'\n\n'
'https://www.notifications.service.gov.uk'
)
}
if not bool(request.args):
resp = make_response(str(HTMLEmailTemplate(template)))
else:
resp = make_response(str(HTMLEmailTemplate(
template,
govuk_banner=govuk_banner,
brand_text=brand_text,
brand_colour=brand_colour,
brand_logo=brand_logo,
brand_banner=brand_banner,
brand_name=brand_name,
)))
resp.headers['X-Frame-Options'] = 'SAMEORIGIN'
return resp
@main.route('/_letter')
def letter_template():
branding_style = request.args.get('branding_style')
if branding_style == FieldWithNoneOption.NONE_OPTION_VALUE:
branding_style = None
if branding_style:
filename = letter_branding_client.get_letter_branding(branding_style)['filename']
else:
filename = 'no-branding'
template = {'subject': '', 'content': '', 'template_type': 'letter'}
image_url = url_for('no_cookie.letter_branding_preview_image', filename=filename)
template_image = str(LetterImageTemplate(
template,
image_url=image_url,
page_count=1,
))
resp = make_response(
render_template('views/service-settings/letter-preview.html', template=template_image)
)
resp.headers['X-Frame-Options'] = 'SAMEORIGIN'
return resp
@main.route('/documentation')
def documentation():
return render_template(
'views/documentation.html',
navigation_links=using_notify_nav(),
)
@main.route('/integration-testing')
def integration_testing():
return render_template('views/integration-testing.html'), 410
@main.route('/callbacks')
def callbacks():
return redirect(url_for('main.documentation'), 301)
# --- Features page set --- #
@main.route('/features')
def features():
return render_template(
'views/features.html',
navigation_links=features_nav()
)
@main.route('/features/roadmap', endpoint='roadmap')
def roadmap():
return render_template(
'views/roadmap.html',
navigation_links=features_nav()
)
@main.route('/features/email')
@hide_from_search_engines
def features_email():
return render_template(
'views/features/emails.html',
navigation_links=features_nav()
)
@main.route('/features/sms')
def features_sms():
return render_template(
'views/features/text-messages.html',
navigation_links=features_nav()
)
@main.route('/features/letters')
def features_letters():
return render_template(
'views/features/letters.html',
navigation_links=features_nav()
)
@main.route('/features/security', endpoint='security')
def security():
return render_template(
'views/security.html',
navigation_links=features_nav()
)
@main.route('/features/terms', endpoint='terms')
def terms():
return render_template(
'views/terms-of-use.html',
navigation_links=features_nav(),
)
@main.route('/features/using-notify')
def using_notify():
return render_template(
'views/using-notify.html',
navigation_links=features_nav()
), 410
@main.route('/using-notify/delivery-status')
def message_status():
return render_template(
'views/message-status.html',
navigation_links=using_notify_nav(),
)
@main.route('/features/get-started')
def get_started_old():
return redirect(url_for('.get_started'), 301)
@main.route('/using-notify/get-started')
def get_started():
return render_template(
'views/get-started.html',
navigation_links=using_notify_nav(),
)
@main.route('/using-notify/who-its-for')
def who_its_for():
return redirect(url_for('.who_can_use_notify'), 301)
@main.route('/using-notify/who-can-use-notify')
def who_can_use_notify():
return render_template(
'views/guidance/who-can-use-notify.html',
navigation_links=features_nav(),
)
@main.route('/trial-mode')
@main.route('/features/trial-mode')
def trial_mode():
return redirect(url_for('.trial_mode_new'), 301)
@main.route('/using-notify/trial-mode')
def trial_mode_new():
return render_template(
'views/trial-mode.html',
navigation_links=using_notify_nav(),
)
@main.route('/using-notify/guidance')
def guidance_index():
return render_template(
'views/guidance/index.html',
navigation_links=using_notify_nav(),
)
@main.route('/using-notify/guidance/branding-and-customisation')
def branding_and_customisation():
return render_template(
'views/guidance/branding-and-customisation.html',
navigation_links=using_notify_nav(),
)
@main.route('/using-notify/guidance/create-and-send-messages')
def create_and_send_messages():
return render_template(
'views/guidance/create-and-send-messages.html',
navigation_links=using_notify_nav(),
)
@main.route('/using-notify/guidance/sending-bulk-messages')
def sending_bulk_messages():
return render_template(
'views/guidance/sending-bulk-messages.html',
navigation_links=using_notify_nav(),
)
@main.route('/using-notify/guidance/edit-and-format-messages')
def edit_and_format_messages():
return render_template(
'views/guidance/edit-and-format-messages.html',
navigation_links=using_notify_nav(),
)
@main.route('/using-notify/guidance/send-files-by-email')
def send_files_by_email():
return render_template(
'views/guidance/send-files-by-email.html',
navigation_links=using_notify_nav(),
)
@main.route('/using-notify/guidance/upload-a-letter')
def upload_a_letter():
return render_template(
'views/guidance/upload-a-letter.html',
navigation_links=using_notify_nav(),
)
@main.route('/using-notify/guidance/letter-specification')
def letter_specification():
return render_template(
'views/guidance/letter-specification.html',
navigation_links=using_notify_nav(),
)
# --- Redirects --- #
@main.route('/roadmap', endpoint='old_roadmap')
@main.route('/terms', endpoint='old_terms')
@main.route('/information-security', endpoint='information_security')
@main.route('/using_notify', endpoint='old_using_notify')
@main.route('/information-risk-management', endpoint='information_risk_management')
@main.route('/integration_testing', endpoint='old_integration_testing')
def old_page_redirects():
redirects = {
'main.old_roadmap': 'main.roadmap',
'main.old_terms': 'main.terms',
'main.information_security': 'main.using_notify',
'main.old_using_notify': 'main.using_notify',
'main.information_risk_management': 'main.security',
'main.old_integration_testing': 'main.integration_testing',
}
return redirect(url_for(redirects[request.endpoint]), code=301)
@main.route('/docs/notify-pdf-letter-spec-latest.pdf')
def letter_spec():
return redirect(
'https://docs.notifications.service.gov.uk'
'/documentation/images/notify-pdf-letter-spec-v2.4.pdf'
)
| 28.15367
| 101
| 0.667862
|
c6da4ac44de5e5e7a924afdc7328f27d694abf25
| 700
|
py
|
Python
|
PBO_8081/tugas PBO 1.py
|
ifanamirudin/PBO
|
76ab2b1ca2a4a4e05aa5fae48bfeb45f20018911
|
[
"MIT"
] | null | null | null |
PBO_8081/tugas PBO 1.py
|
ifanamirudin/PBO
|
76ab2b1ca2a4a4e05aa5fae48bfeb45f20018911
|
[
"MIT"
] | null | null | null |
PBO_8081/tugas PBO 1.py
|
ifanamirudin/PBO
|
76ab2b1ca2a4a4e05aa5fae48bfeb45f20018911
|
[
"MIT"
] | null | null | null |
Python 3.8.1 (tags/v3.8.1:1b293b6, Dec 18 2019, 22:39:24) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> x1=input("masukkan nama")
masukkan namaifan hi amirudin
>>> x2=input("masukkan nmp")
masukkan nmp8081
>>> a="masukkan nama"
>>> b="masukkan nmp"
>>> c=a+b
>>> c
'masukkan namamasukkan nmp'
>>> x1=input("masukkan nilai")
masukkan nilai :23
>>> x2=input("masukkan nilai x2 :")
masukkan nilai x2 :11
>>> x3=x1+x2
>>> x3
' :2311'
>>> a=input("masukkan nama depan :")
masukkan nama depan :ifan
>>> b=input("masukkan nama belakan :")
masukkan nama belakan :hiamirudin
>>> c=a+b
>>> c
'ifan hi amirudin'
>>>
| 25.925926
| 95
| 0.638571
|
f920654e0a8fc604bf49f4d312297cc0efcc4a56
| 845
|
py
|
Python
|
src/klasser.py
|
Leopold2020/code_repo
|
24b1a932d77ba1456f8df4978f029dd841c7b177
|
[
"MIT"
] | null | null | null |
src/klasser.py
|
Leopold2020/code_repo
|
24b1a932d77ba1456f8df4978f029dd841c7b177
|
[
"MIT"
] | null | null | null |
src/klasser.py
|
Leopold2020/code_repo
|
24b1a932d77ba1456f8df4978f029dd841c7b177
|
[
"MIT"
] | null | null | null |
class NTI_Elev:
def __init__(self, name, age, sex):
self.name = name
self.age = age
self.sex = sex
def __str__(self):
return f"NTI eleven heter {self.name}, de är {self.age} år gammal och är en {self.sex}."
def return_name(self):
return self.name
class ITaren(NTI_Elev):
def __init__(self, name, age, sex, cleanness):
super().__init__(name, age, sex)
self.cleanness = cleanness
def __str__(self):
return super().__str__() + f" {self.name} är {self.cleanness} ren"
def main():
Person1 = ITaren("Armand", "17", "Man", "Helt ok")
Person2 = ITaren("Bryan", "18", "Man", "inte")
# print(armand)
# print(bryan)
ITare = [Person1, Person2]
for ITaren in ITare:
print(ITaren)
if __name__ == "__main__":
main()
| 20.119048
| 96
| 0.577515
|
b1049a4f01ddf9fa0444d1c852627e5141fbba8a
| 9,333
|
py
|
Python
|
smdebug/tensorflow/utils.py
|
vandanavk/sagemaker-debugger
|
5246cda198295aa1dd1656ad32b30c4bb1e2aec4
|
[
"Apache-2.0"
] | 1
|
2020-08-14T16:10:04.000Z
|
2020-08-14T16:10:04.000Z
|
smdebug/tensorflow/utils.py
|
vandanavk/sagemaker-debugger
|
5246cda198295aa1dd1656ad32b30c4bb1e2aec4
|
[
"Apache-2.0"
] | null | null | null |
smdebug/tensorflow/utils.py
|
vandanavk/sagemaker-debugger
|
5246cda198295aa1dd1656ad32b30c4bb1e2aec4
|
[
"Apache-2.0"
] | null | null | null |
# Standard Library
import collections
import json
from enum import Enum
# Third Party
from tensorflow.contrib.distribute import MirroredStrategy as ContribMirroredStrategy
from tensorflow.python.distribute import values
# First Party
from smdebug.core.modes import ModeKeys
try:
import tensorflow.compat.v1 as tf
except ImportError:
# For TF 1.13
import tensorflow as tf
class TFDistributionStrategy(Enum):
NONE = 0
HOROVOD = 1
MIRRORED = 2
PARAMETER_SERVER = 3
UNSUPPORTED = 100
def node_name(n):
if n.startswith("^"):
return n[1:]
else:
return n.split(":")[0]
def extract_graph_summary(graph_def):
"""Extracts useful information from the graph and returns them."""
name_to_input_name = {} # Keyed by the dest node name.
name_to_node = {} # Keyed by node name.
# Keeps track of node sequences. It is important to still output the
# operations in the original order.
name_to_seq_num = {} # Keyed by node name.
seq = 0
for node in graph_def.node:
n = node_name(node.name)
name_to_node[n] = node
name_to_input_name[n] = [node_name(x) for x in node.input]
name_to_seq_num[n] = seq
seq += 1
return name_to_input_name, name_to_node, name_to_seq_num
def tensor_can_be_saved(root_tensor, subgraph_nodes, unfilled_placeholders):
"""
If a tensor x depends on an unfilled placeholder, then it can't be saved and should be skipped.
This 4th step is done by performing BFS from this tensor x, and going up
its inputs for any node which is not in the subgraph.
If a node reached through this BFS is not in the subgraph
and is an unfilled placeholder, then the tensor x can't be saved.
:param root_tensor: the tensor from which to start BFS
:param subgraph_nodes: the subgraph which can reach the current fetches
:param unfilled_placeholders: placeholders which were not assigned values
:return:
"""
seen, queue = {root_tensor}, collections.deque([root_tensor])
while queue:
tensor = queue.popleft()
if tensor.op.name not in subgraph_nodes:
if len(tensor.op.inputs) == 0 and tensor in unfilled_placeholders:
# current tensor is not in the subgraph,
# but it also has no inputs which might be in the subgraph
# this means tf_tensor is not connected the fetches through the subgraph
return False
for ti in tensor.op.inputs:
if ti not in seen:
seen.add(ti)
queue.append(ti)
return True
def build_fetches_tuple(fetches):
if (
not isinstance(fetches, list)
and not isinstance(fetches, tuple)
and not isinstance(fetches, dict)
):
fetches = [fetches]
original_fetch_ops = get_original_fetch_ops(fetches)
# sorting to create a unique tuple for lists of all orders
original_fetch_ops.sort(key=lambda x: x.name)
# creating a tuple as we need a immutable var for it to server
# as key into a dictionary
original_fetch_ops_tuple = tuple(original_fetch_ops)
return original_fetch_ops_tuple
def get_original_fetch_ops(fetches):
if isinstance(fetches, tf.Tensor) or isinstance(fetches, tf.Variable):
return [fetches.op]
elif isinstance(fetches, tf.Operation):
return [fetches]
elif isinstance(fetches, values.Mirrored):
return [x.op for x in fetches.values]
elif isinstance(fetches, list):
rval = []
for f in fetches:
rval.extend(get_original_fetch_ops(f))
return rval
elif isinstance(fetches, dict):
rval = []
for key in fetches:
rval += get_original_fetch_ops(fetches[key])
return rval
elif fetches is None:
return []
else:
raise RuntimeError("Invalid fetches")
""""
The TF_CONFIG environment variable is the standard way to specify the cluster configuration
to each worker that is part of the cluster.
Given below some examples of TF_CONFIG:
Example of `TF_CONFIG` for chief training worker (must have one and only one):
Note that the chief worker also does the model training job, similar to other
non-chief training workers (see next paragraph). In addition to the model
training, it manages some extra work, e.g., checkpoint saving and restoring,
writing summaries, etc.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "chief", "index": 0}
}'
Example of `TF_CONFIG` for non-chief training worker (optional, could be
multiple):
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "worker", "index": 0}
}'
where the `task.index` should be set as 0, 1, 2, in this example, respectively
for non-chief training workers.
Example of `TF_CONFIG` for parameter server, aka ps (could be multiple):
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "ps", "index": 0}
}'
where the `task.index` should be set as 0 and 1, in this example, respectively
for parameter servers.
Example of `TF_CONFIG` for evaluator task. Evaluator is a special task that is
not part of the training cluster. There could be only one. It is used for
model evaluation.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "evaluator", "index": 0}
}'
NOTE: If the "chief" is missing in TF_CONFIG["cluster"], the worker with index 0 assumes this role.
See https://www.tensorflow.org/guide/distributed_training#setting_up_tf_config_environment_variable
"""
def load_tf_config_json(tf_config: str):
try:
return json.loads(tf_config)
except (json.JSONDecodeError, TypeError):
# if tf_config is None throws TypeError, so return None from next line
return None
def is_parameter_server_strategy(tf_config_json: dict) -> bool:
try:
return "cluster" in tf_config_json and "ps" in tf_config_json["cluster"]
except TypeError:
# when json is None
return False
def get_worker_id_from_tf_config(tf_config_json: dict) -> str:
"""Valid roles in a cluster is "chief", "worker", "ps" and "evaluator"."""
task = tf_config_json["task"]
worker_type = task["type"]
worker_index = task["index"]
return f"{worker_type}_{worker_index}"
def get_num_workers_from_tf_config(tf_config_json: dict) -> int:
workers = tf_config_json["cluster"]["worker"]
if "chief" in tf_config_json["cluster"]:
workers.extend(tf_config_json["cluster"]["chief"])
return len(workers)
def get_chief_worker_from_tf_config(tf_config_json: dict):
if "chief" in tf_config_json["cluster"]:
return "chief_0"
else:
raise NotImplementedError
# todo
def is_mirrored_strategy(strat):
return isinstance(strat, (tf.distribute.MirroredStrategy, ContribMirroredStrategy))
def is_keras_optimizer(obj):
for cls in obj.__class__.__mro__:
if ".".join([cls.__module__, cls.__name__]) == "keras.optimizers.Optimizer":
return True
return False
def get_export_name_for_keras(layer, tensor_type, tensor):
if tensor_type in ["input", "output", "weight"]:
return f"{layer.name}/{tensor_type}s/{tensor.name}"
else:
return None
def get_keras_layer_inputs(layer):
# will throw an exception if _inbound_nodes is not loaded
layer.get_input_at(0)
input_tensors = []
for idx in range(len(layer._inbound_nodes)):
inputs = layer.get_input_at(idx)
if not isinstance(inputs, list):
inputs = [inputs]
for input_index, inp in enumerate(inputs):
input_tensors.append(inp)
return input_tensors
def get_non_device_tensors(tensor_refs):
non_dev_tensors = []
for tensor_ref in tensor_refs:
if not tensor_ref.tf_obj.device:
non_dev_tensors.append(tensor_ref)
return non_dev_tensors
def get_keras_layer_outputs(layer):
# will throw an exception if _inbound_nodes is not loaded
layer.get_output_at(0)
output_tensors = []
for idx in range(len(layer._inbound_nodes)):
outputs = layer.get_output_at(idx)
if not isinstance(outputs, list):
outputs = [outputs]
for output_index, outp in enumerate(outputs):
output_tensors.append(outp)
return output_tensors
def get_keras_mode(mode):
# Should never be called in TF 1.13 where this is not available
from tensorflow.python.keras.utils.mode_keys import ModeKeys as KerasModeKeys
if mode == ModeKeys.TRAIN:
return KerasModeKeys.TRAIN
elif mode == ModeKeys.EVAL:
return KerasModeKeys.TEST
elif mode == ModeKeys.PREDICT:
return KerasModeKeys.PREDICT
| 31.318792
| 101
| 0.663238
|
03bba9823c8aa6959b6e2ebc6b72ef1ff459b7ab
| 1,740
|
py
|
Python
|
astroquery/nist/tests/test_nist.py
|
hdevillepoix/astroquery
|
ce8c500c28424fe841e04741d4230b8f695ee194
|
[
"BSD-3-Clause"
] | 577
|
2015-02-12T18:23:49.000Z
|
2022-03-22T21:38:58.000Z
|
astroquery/nist/tests/test_nist.py
|
hdevillepoix/astroquery
|
ce8c500c28424fe841e04741d4230b8f695ee194
|
[
"BSD-3-Clause"
] | 1,812
|
2015-01-01T08:02:20.000Z
|
2022-03-31T13:03:52.000Z
|
astroquery/nist/tests/test_nist.py
|
hdevillepoix/astroquery
|
ce8c500c28424fe841e04741d4230b8f695ee194
|
[
"BSD-3-Clause"
] | 322
|
2015-02-23T19:31:29.000Z
|
2022-03-25T18:51:30.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import numpy.testing as npt
import pytest
from astropy.table import Table
import astropy.units as u
from ...utils.testing_tools import MockResponse
from ... import nist
DATA_FILES = {'lines': 'nist_out.html'}
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_get(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(nist.Nist, '_request', get_mockreturn)
return mp
def get_mockreturn(method, url, params=None, timeout=10, **kwargs):
filename = data_path(DATA_FILES['lines'])
content = open(filename, 'rb').read()
return MockResponse(content, **kwargs)
def test_parse_wavelength():
minwav, maxwav, unit = nist.core._parse_wavelength(4000 * u.AA,
7000 * u.AA)
npt.assert_approx_equal(minwav, 4000, significant=4)
npt.assert_approx_equal(maxwav, 7000, significant=4)
assert unit == nist.core.Nist.unit_code['Angstrom']
def test_query_async(patch_get):
response = nist.core.Nist.query_async(4000 * u.nm, 7000 * u.nm,
"H I", get_query_payload=True)
assert response['spectra'] == "H I"
assert response['unit'] == nist.core.Nist.unit_code['nm']
response = nist.core.Nist.query_async(4000 * u.nm, 7000 * u.nm, "H I")
assert response is not None
def test_query(patch_get):
result = nist.core.Nist.query(4000 * u.nm, 7000 * u.nm, "H I")
assert isinstance(result, Table)
| 31.071429
| 74
| 0.665517
|
0fea8cb76a628c99707f98ef43eb7949e526fc49
| 281
|
py
|
Python
|
emukit/model_wrappers/__init__.py
|
DavidJanz/emukit
|
7421cb7f4ed831b6581f3686806521ff7fb97e74
|
[
"Apache-2.0"
] | 6
|
2019-06-02T21:23:27.000Z
|
2020-02-17T09:46:30.000Z
|
emukit/model_wrappers/__init__.py
|
DavidJanz/emukit
|
7421cb7f4ed831b6581f3686806521ff7fb97e74
|
[
"Apache-2.0"
] | 4
|
2019-05-17T13:30:21.000Z
|
2019-06-21T13:49:19.000Z
|
emukit/model_wrappers/__init__.py
|
DavidJanz/emukit
|
7421cb7f4ed831b6581f3686806521ff7fb97e74
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from .gpy_model_wrappers import GPyModelWrapper, GPyMultiOutputWrapper # noqa: F401
from .gpy_quadrature_wrappers import BaseGaussianProcessGPy, RBFGPy # noqa: F401
| 40.142857
| 84
| 0.807829
|
dec618ffb1476317c02a49ee40afd094eb8e256e
| 3,535
|
py
|
Python
|
lib_fr/layer_utils/proposal_layer.py
|
chang010453/GRP-HAI
|
60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22
|
[
"MIT"
] | null | null | null |
lib_fr/layer_utils/proposal_layer.py
|
chang010453/GRP-HAI
|
60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22
|
[
"MIT"
] | null | null | null |
lib_fr/layer_utils/proposal_layer.py
|
chang010453/GRP-HAI
|
60f7c7633e33dbdd852f5df3e0a3d1017b6b2a22
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from model.config import cfg
from model.bbox_transform import bbox_transform_inv, clip_boxes, bbox_transform_inv_tf, clip_boxes_tf
from model.nms_wrapper import nms
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, anchors, num_anchors):
"""A simplified version compared to fast/er RCNN
For details please see the technical report
changed:
rpn_cls_prob: [-1,2]
rpn_bbox_pred: [-1,4]
"""
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, num_anchors:]
scores = tf.reshape(scores, shape=(-1,))
rpn_bbox_pred = tf.reshape(rpn_bbox_pred, shape=(-1, 4))
# scores = rpn_cls_prob[:, 1]
proposals = bbox_transform_inv(anchors, rpn_bbox_pred)
proposals = clip_boxes(proposals, im_info[:2])
# Pick the top region proposals
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# Non-maximal suppression
keep = nms(np.hstack((proposals, scores)), nms_thresh)
# Pick th top region proposals after NMS
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Only support single image as input
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
return blob, scores
def proposal_layer_tf(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, anchors, num_anchors):
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, num_anchors:]
scores = tf.reshape(scores, shape=(-1,))
# scores = rpn_cls_prob[:, -1]
rpn_bbox_pred = tf.reshape(rpn_bbox_pred, shape=(-1, 4))
# scores = rpn_cls_prob
proposals = bbox_transform_inv_tf(anchors, rpn_bbox_pred)
proposals = clip_boxes_tf(proposals, im_info[:2])
# 3. get top N to NMS
# if pre_nms_topN > 0:
# pre_nms_topN = tf.minimum(pre_nms_topN, tf.shape(proposals)[0], name='avoid_unenough_boxes')
# scores, top_k_indices = tf.nn.top_k(scores, k=pre_nms_topN)
# proposals = tf.gather(proposals, top_k_indices)
# Non-maximal suppression
indices = tf.image.non_max_suppression(proposals, scores, max_output_size=post_nms_topN, iou_threshold=nms_thresh)
boxes = tf.gather(proposals, indices)
boxes = tf.to_float(boxes)
scores = tf.gather(scores, indices)
scores = tf.reshape(scores, shape=(-1, 1))
# Only support single image as input
batch_inds = tf.zeros((tf.shape(indices)[0], 1), dtype=tf.float32)
blob = tf.concat([batch_inds, boxes], 1)
return blob, scores
| 35.707071
| 118
| 0.676945
|
3d61b62e248d294ddc7be762a7e667fad0d95bb1
| 4,129
|
py
|
Python
|
htdfsdk/web3/providers/websocket.py
|
youngqqcn/htdfsdk
|
c22f213a967c8233bb6ccfb01bf148112efd44db
|
[
"MIT"
] | 2
|
2021-01-21T01:46:29.000Z
|
2021-03-12T05:59:19.000Z
|
htdfsdk/web3/providers/websocket.py
|
youngqqcn/htdfsdk
|
c22f213a967c8233bb6ccfb01bf148112efd44db
|
[
"MIT"
] | null | null | null |
htdfsdk/web3/providers/websocket.py
|
youngqqcn/htdfsdk
|
c22f213a967c8233bb6ccfb01bf148112efd44db
|
[
"MIT"
] | null | null | null |
import asyncio
import json
import logging
import os
from threading import (
Thread,
)
from types import (
TracebackType,
)
from typing import (
Any,
Optional,
Type,
Union,
)
from eth_typing import (
URI,
)
import websockets
from htdfsdk.web3.exceptions import (
ValidationError,
)
from htdfsdk.web3.providers.base import (
JSONBaseProvider,
)
from htdfsdk.web3.types import (
RPCEndpoint,
RPCResponse,
)
RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}
DEFAULT_WEBSOCKET_TIMEOUT = 10
def _start_event_loop(loop: asyncio.AbstractEventLoop) -> None:
asyncio.set_event_loop(loop)
loop.run_forever()
loop.close()
def _get_threaded_loop() -> asyncio.AbstractEventLoop:
new_loop = asyncio.new_event_loop()
thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)
thread_loop.start()
return new_loop
def get_default_endpoint() -> URI:
return URI(os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546'))
class PersistentWebSocket:
def __init__(
self, endpoint_uri: URI, loop: asyncio.AbstractEventLoop, websocket_kwargs: Any
) -> None:
self.ws: websockets.WebSocketClientProtocol = None
self.endpoint_uri = endpoint_uri
self.loop = loop
self.websocket_kwargs = websocket_kwargs
async def __aenter__(self) -> websockets.WebSocketClientProtocol:
if self.ws is None:
self.ws = await websockets.connect(
uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs
)
return self.ws
async def __aexit__(
self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb: TracebackType
) -> None:
if exc_val is not None:
try:
await self.ws.close()
except Exception:
pass
self.ws = None
class WebsocketProvider(JSONBaseProvider):
logger = logging.getLogger("web3.providers.WebsocketProvider")
_loop = None
def __init__(
self,
endpoint_uri: Optional[Union[URI, str]] = None,
websocket_kwargs: Optional[Any] = None,
websocket_timeout: int = DEFAULT_WEBSOCKET_TIMEOUT,
) -> None:
self.endpoint_uri = URI(endpoint_uri)
self.websocket_timeout = websocket_timeout
if self.endpoint_uri is None:
self.endpoint_uri = get_default_endpoint()
if WebsocketProvider._loop is None:
WebsocketProvider._loop = _get_threaded_loop()
if websocket_kwargs is None:
websocket_kwargs = {}
else:
found_restricted_keys = set(websocket_kwargs.keys()).intersection(
RESTRICTED_WEBSOCKET_KWARGS
)
if found_restricted_keys:
raise ValidationError(
'{0} are not allowed in websocket_kwargs, '
'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)
)
self.conn = PersistentWebSocket(
self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs
)
super().__init__()
def __str__(self) -> str:
return "WS connection {0}".format(self.endpoint_uri)
async def coro_make_request(self, request_data: bytes) -> RPCResponse:
async with self.conn as conn:
await asyncio.wait_for(
conn.send(request_data),
timeout=self.websocket_timeout
)
return json.loads(
await asyncio.wait_for(
conn.recv(),
timeout=self.websocket_timeout
)
)
def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
self.logger.debug("Making request WebSocket. URI: %s, "
"Method: %s", self.endpoint_uri, method)
request_data = self.encode_rpc_request(method, params)
future = asyncio.run_coroutine_threadsafe(
self.coro_make_request(request_data),
WebsocketProvider._loop
)
return future.result()
| 29.492857
| 91
| 0.630903
|
60aff374220c9cf3d3d6f604a8747bf3cfbdf8c4
| 1,460
|
py
|
Python
|
setup.py
|
koustuvsinha/bert_score
|
29a20b7393d5bbff8c5830c19670bfb263a011ba
|
[
"MIT"
] | null | null | null |
setup.py
|
koustuvsinha/bert_score
|
29a20b7393d5bbff8c5830c19670bfb263a011ba
|
[
"MIT"
] | null | null | null |
setup.py
|
koustuvsinha/bert_score
|
29a20b7393d5bbff8c5830c19670bfb263a011ba
|
[
"MIT"
] | null | null | null |
from io import open
from setuptools import find_packages, setup
setup(
name="bert_score",
version='0.3.8',
author="Tianyi Zhang*, Varsha Kishore*, Felix Wu*, Kilian Q. Weinberger, and Yoav Artzi",
author_email="tzhang@asapp.com",
description="PyTorch implementation of BERT score",
long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown",
keywords='BERT NLP deep learning google metric',
license='MIT',
url="https://github.com/Tiiiger/bert_score",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=['torch>=1.0.0',
'numpy',
'pandas>=1.0.1',
'requests',
'tqdm>=4.31.1',
'matplotlib',
'transformers>=3.0.0'
],
entry_points={
'console_scripts': [
"bert-score=bert_score_cli.score:main",
"bert-score-show=bert_score_cli.visualize:main",
]
},
include_package_data=True,
python_requires='>=3.6',
tests_require=['pytest'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| 34.761905
| 93
| 0.559589
|
ffcc250a6da76622d23bcf520543ce2a2b346ce2
| 4,407
|
py
|
Python
|
calc.py
|
mourice-oduor/Python-Calculator
|
4550b272c281d95c6be6efacc0988d4e459eb0f7
|
[
"MIT"
] | null | null | null |
calc.py
|
mourice-oduor/Python-Calculator
|
4550b272c281d95c6be6efacc0988d4e459eb0f7
|
[
"MIT"
] | null | null | null |
calc.py
|
mourice-oduor/Python-Calculator
|
4550b272c281d95c6be6efacc0988d4e459eb0f7
|
[
"MIT"
] | null | null | null |
from tkinter import *
import tkinter as tk
import os
import re #(Regular expression)
class Calculator:
def __init__(self, cal):
self.cal = cal
cal.title('Simple Python Calculator')
#cal.iconbitmap(r'/home/net/MORYSO/PYTHON/Game-Dev/games-todo/16. Calculator/calculator.icon')
#cal.iconbitmap('calculator.icon')
dark_grey = '#141414'
med_grey = '#212121'
cus_red = '#c41212'
self.screen = Text(cal, background=dark_grey, font=('Helvetica', 32), height=1, state='disabled',
foreground='white', bd=0, pady=50, padx=5, selectbackground=dark_grey, inactiveselectbackground=dark_grey)
for x in range(1,5):
self.cal.columnconfigure(x, weight=1)
self.cal.rowconfigure(x, weight=1)
self.screen.grid(row=0, column=0, columnspan=5, sticky=W+E+N+S)
self.screen.configure(state='normal')
self.equation = ''
self.cal.geometry('500x600') #Default size to open into
btn1 = self.createButton(7)
btn2 = self.createButton(8)
btn3 = self.createButton(9)
btn4 = self.createButton(u"\u00F7", bg=med_grey)
btn5 = self.createButton(4)
btn6 = self.createButton(5)
btn7 = self.createButton(6)
btn8 = self.createButton(u"\u00D7", bg=med_grey)
btn9 = self.createButton(1)
btn10 = self.createButton(2)
btn11 = self.createButton(3)
btn12 = self.createButton('-', bg=med_grey)
btn13 = self.createButton(',')
btn14 = self.createButton(0)
btn15 = self.createButton(None)
btn16 = self.createButton('+', bg=med_grey)
btn17 = self.createButton('DEL', None, bg=med_grey)
btn18 = self.createButton('CLR', None, bg=med_grey)
btn19 = self.createButton('=', None, bg=cus_red)
btn15.config(state='disabled')
buttons = [btn1,btn2,btn3,btn4,btn5,btn6,btn7,btn8,btn9,btn10,btn11,btn12,btn13,btn14,btn15,btn16,btn17,btn18,btn19]
count = 0
for row in range(1,5):
for col in range(4):
buttons[count].grid(row=row, column=col,sticky=W+E+N+S)
count+=1
buttons[16].grid(row=1, column=4, rowspan=1, sticky=W+E+N+S)
buttons[17].grid(row=2, column=4, rowspan=2, sticky=W+E+N+S)
buttons[18].grid(row=4, column=4, rowspan=1, sticky=W+E+N+S)
def createButton(self, val, write=True, width=5, bg='black'):
return Button(self.cal, text=val, command=lambda:self.click(val, write), width=width, bg=bg, bd=0, fg='white',
font=('Helvetica',24))
def click(self, text, write):
if write == None:
if text == '=' and self.equation:
self.equation = re.sub(u'\u00F7', '/', self.equation)
self.equation = re.sub(u'\u00D7', '*', self.equation)
print(self.equation)
answer = str(eval(self.equation))
self.clear_screen()
self.insert_screen(answer,newline=True)
elif text == "CLR":
self.clear_screen()
elif text == 'DEL':
self.del_screen()
else:
# add text to screen
self.insert_screen(text)
def clear_screen(self):
#to clear screen
#set equation to empty before deleting screen
self.equation = ''
self.screen.configure(state='normal')
self.screen.delete(1.0, END)
self.screen.configure(state='disabled')
def del_screen(self):
#to clear screen
#set equation to empty before deleting screen
self.equation = self.equation[:-1]
self.screen.configure(state='normal')
text = self.screen.get("1.0",END)[:-2]
self.screen.tag_config('val',justify=RIGHT)
self.screen.delete(1.0, END)
self.screen.insert(END,text, 'val')
self.screen.configure(state='disabled')
def insert_screen(self, value,newline=False):
self.screen.configure(state='normal')
self.screen.tag_config('val',justify=RIGHT)
self.screen.insert(END,str(value),'val')
# record every value inserted in screen
self.equation += str(value)
self.screen.configure(state ='disabled')
calc = Tk()
my_gui = Calculator(calc)
calc.mainloop()
if __name__ == '__main__':
print('My Calculator')
| 39.348214
| 133
| 0.597005
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.