hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80e527eafecf513622481ed24659be5b09078b98
| 29
|
py
|
Python
|
python-lib/automat/__init__.py
|
soccasys/automat-tests
|
9ab4cfbd79e362f49e2a352f1610d2786b8e2985
|
[
"BSD-3-Clause"
] | null | null | null |
python-lib/automat/__init__.py
|
soccasys/automat-tests
|
9ab4cfbd79e362f49e2a352f1610d2786b8e2985
|
[
"BSD-3-Clause"
] | null | null | null |
python-lib/automat/__init__.py
|
soccasys/automat-tests
|
9ab4cfbd79e362f49e2a352f1610d2786b8e2985
|
[
"BSD-3-Clause"
] | null | null | null |
from automat.client import *
| 14.5
| 28
| 0.793103
|
8cfffd0ce80e018c1f55fe577f643b1ff43f7654
| 229
|
py
|
Python
|
applications/splitThePhoneNumbers.py
|
silvioedu/HackerRank-Regex-Practice
|
dc2f2719385626375eb719b38baf6315ff23cad4
|
[
"MIT"
] | null | null | null |
applications/splitThePhoneNumbers.py
|
silvioedu/HackerRank-Regex-Practice
|
dc2f2719385626375eb719b38baf6315ff23cad4
|
[
"MIT"
] | null | null | null |
applications/splitThePhoneNumbers.py
|
silvioedu/HackerRank-Regex-Practice
|
dc2f2719385626375eb719b38baf6315ff23cad4
|
[
"MIT"
] | null | null | null |
import re
if __name__ == '__main__':
regex = r"\d+"
for _ in range(int(input())):
match = re.findall(regex, input())
print("CountryCode={},LocalAreaCode={},Number={}".format(match[0], match[1], match[2]))
| 32.714286
| 95
| 0.58952
|
a45eb10d1cdba0b74551aa0508b1943fddb1578a
| 30,414
|
py
|
Python
|
airflow/sensors/smart_sensor.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 8,092
|
2016-04-27T20:32:29.000Z
|
2019-01-05T07:39:33.000Z
|
airflow/sensors/smart_sensor.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 2,961
|
2016-05-05T07:16:16.000Z
|
2019-01-05T08:47:59.000Z
|
airflow/sensors/smart_sensor.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 3,546
|
2016-05-04T20:33:16.000Z
|
2019-01-05T05:14:26.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import json
import logging
import traceback
from logging.config import DictConfigurator # type: ignore
from time import sleep
from sqlalchemy import and_, or_, tuple_
from airflow.compat.functools import cached_property
from airflow.exceptions import AirflowException, AirflowTaskTimeout
from airflow.models import BaseOperator, DagRun, SensorInstance, SkipMixin, TaskInstance
from airflow.settings import LOGGING_CLASS_PATH
from airflow.stats import Stats
from airflow.utils import helpers, timezone
from airflow.utils.context import Context
from airflow.utils.email import send_email
from airflow.utils.log.logging_mixin import set_context
from airflow.utils.module_loading import import_string
from airflow.utils.net import get_hostname
from airflow.utils.session import provide_session
from airflow.utils.state import PokeState, State
from airflow.utils.timeout import timeout
config = import_string(LOGGING_CLASS_PATH)
handler_config = config['handlers']['task']
try:
formatter_config = config['formatters'][handler_config['formatter']]
except Exception as err:
formatter_config = None
print(err)
dictConfigurator = DictConfigurator(config)
class SensorWork:
"""
This class stores a sensor work with decoded context value. It is only used
inside of smart sensor. Create a sensor work based on sensor instance record.
A sensor work object has the following attributes:
`dag_id`: sensor_instance dag_id.
`task_id`: sensor_instance task_id.
`execution_date`: sensor_instance execution_date.
`try_number`: sensor_instance try_number
`poke_context`: Decoded poke_context for the sensor task.
`execution_context`: Decoded execution_context.
`hashcode`: This is the signature of poking job.
`operator`: The sensor operator class.
`op_classpath`: The sensor operator class path
`encoded_poke_context`: The raw data from sensor_instance poke_context column.
`log`: The sensor work logger which will mock the corresponding task instance log.
:param si: The sensor_instance ORM object.
"""
def __init__(self, si):
self.dag_id = si.dag_id
self.task_id = si.task_id
self.execution_date = si.execution_date
self.try_number = si.try_number
self.poke_context = json.loads(si.poke_context) if si.poke_context else {}
self.execution_context = json.loads(si.execution_context) if si.execution_context else {}
self.hashcode = si.hashcode
self.start_date = si.start_date
self.operator = si.operator
self.op_classpath = si.op_classpath
self.encoded_poke_context = si.poke_context
self.si = si
def __eq__(self, other):
if not isinstance(other, SensorWork):
return NotImplemented
return (
self.dag_id == other.dag_id
and self.task_id == other.task_id
and self.execution_date == other.execution_date
and self.try_number == other.try_number
)
@staticmethod
def create_new_task_handler():
"""
Create task log handler for a sensor work.
:return: log handler
"""
from airflow.utils.log.secrets_masker import _secrets_masker
handler_config_copy = {k: handler_config[k] for k in handler_config}
del handler_config_copy['filters']
formatter_config_copy = {k: formatter_config[k] for k in formatter_config}
handler = dictConfigurator.configure_handler(handler_config_copy)
formatter = dictConfigurator.configure_formatter(formatter_config_copy)
handler.setFormatter(formatter)
# We want to share the _global_ filterer instance, not create a new one
handler.addFilter(_secrets_masker())
return handler
@cached_property
def log(self):
"""Return logger for a sensor instance object."""
# The created log_id is used inside of smart sensor as the key to fetch
# the corresponding in memory log handler.
si = self.si
si.raw = False # Otherwise set_context will fail
log_id = "-".join(
[si.dag_id, si.task_id, si.execution_date.strftime("%Y_%m_%dT%H_%M_%S_%f"), str(si.try_number)]
)
logger = logging.getLogger(f'airflow.task.{log_id}')
if len(logger.handlers) == 0:
handler = self.create_new_task_handler()
logger.addHandler(handler)
set_context(logger, si)
line_break = "-" * 120
logger.info(line_break)
logger.info(
"Processing sensor task %s in smart sensor service on host: %s", self.ti_key, get_hostname()
)
logger.info(line_break)
return logger
def close_sensor_logger(self):
"""Close log handler for a sensor work."""
for handler in self.log.handlers:
try:
handler.close()
except Exception as e:
print(e)
@property
def ti_key(self):
"""Key for the task instance that maps to the sensor work."""
return self.dag_id, self.task_id, self.execution_date
@property
def cache_key(self):
"""Key used to query in smart sensor for cached sensor work."""
return self.operator, self.encoded_poke_context
class CachedPokeWork:
"""
Wrapper class for the poke work inside smart sensor. It saves
the sensor_task used to poke and recent poke result state.
state: poke state.
sensor_task: The cached object for executing the poke function.
last_poke_time: The latest time this cached work being called.
to_flush: If we should flush the cached work.
"""
def __init__(self):
self.state = None
self.sensor_task = None
self.last_poke_time = None
self.to_flush = False
def set_state(self, state):
"""
Set state for cached poke work.
:param state: The sensor_instance state.
"""
self.state = state
self.last_poke_time = timezone.utcnow()
def clear_state(self):
"""Clear state for cached poke work."""
self.state = None
def set_to_flush(self):
"""Mark this poke work to be popped from cached dict after current loop."""
self.to_flush = True
def is_expired(self):
"""
The cached task object expires if there is no poke for 20 minutes.
:return: Boolean
"""
return self.to_flush or (timezone.utcnow() - self.last_poke_time).total_seconds() > 1200
class SensorExceptionInfo:
"""
Hold sensor exception information and the type of exception. For possible transient
infra failure, give the task more chance to retry before fail it.
"""
def __init__(
self,
exception_info,
is_infra_failure=False,
infra_failure_retry_window=datetime.timedelta(minutes=130),
):
self._exception_info = exception_info
self._is_infra_failure = is_infra_failure
self._infra_failure_retry_window = infra_failure_retry_window
self._infra_failure_timeout = None
self.set_infra_failure_timeout()
self.fail_current_run = self.should_fail_current_run()
def set_latest_exception(self, exception_info, is_infra_failure=False):
"""
This function set the latest exception information for sensor exception. If the exception
implies an infra failure, this function will check the recorded infra failure timeout
which was set at the first infra failure exception arrives. There is a 6 hours window
for retry without failing current run.
:param exception_info: Details of the exception information.
:param is_infra_failure: If current exception was caused by transient infra failure.
There is a retry window _infra_failure_retry_window that the smart sensor will
retry poke function without failing current task run.
"""
self._exception_info = exception_info
self._is_infra_failure = is_infra_failure
self.set_infra_failure_timeout()
self.fail_current_run = self.should_fail_current_run()
def set_infra_failure_timeout(self):
"""
Set the time point when the sensor should be failed if it kept getting infra
failure.
:return:
"""
# Only set the infra_failure_timeout if there is no existing one
if not self._is_infra_failure:
self._infra_failure_timeout = None
elif self._infra_failure_timeout is None:
self._infra_failure_timeout = timezone.utcnow() + self._infra_failure_retry_window
def should_fail_current_run(self):
""":return: Should the sensor fail"""
return not self.is_infra_failure or timezone.utcnow() > self._infra_failure_timeout
@property
def exception_info(self):
""":return: exception msg."""
return self._exception_info
@property
def is_infra_failure(self):
""":return: If the exception is an infra failure"""
return self._is_infra_failure
def is_expired(self):
""":return: If current exception need to be kept."""
if not self._is_infra_failure:
return True
return timezone.utcnow() > self._infra_failure_timeout + datetime.timedelta(minutes=30)
class SmartSensorOperator(BaseOperator, SkipMixin):
"""
Smart sensor operators are derived from this class.
Smart Sensor operators keep refresh a dictionary by visiting DB.
Taking qualified active sensor tasks. Different from sensor operator,
Smart sensor operators poke for all sensor tasks in the dictionary at
a time interval. When a criteria is met or fail by time out, it update
all sensor task state in task_instance table
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:param poke_interval: Time in seconds that the job should wait in
between each tries.
:param smart_sensor_timeout: Time, in seconds before the internal sensor
job times out if poke_timeout is not defined.
:param shard_min: shard code lower bound (inclusive)
:param shard_max: shard code upper bound (exclusive)
:param poke_timeout: Time, in seconds before the task times out and fails.
"""
ui_color = '#e6f1f2'
def __init__(
self,
poke_interval=180,
smart_sensor_timeout=60 * 60 * 24 * 7,
soft_fail=False,
shard_min=0,
shard_max=100000,
poke_timeout=6.0,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
# super(SmartSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = smart_sensor_timeout
self._validate_input_values()
self.hostname = ""
self.sensor_works = []
self.cached_dedup_works = {}
self.cached_sensor_exceptions = {}
self.max_tis_per_query = 50
self.shard_min = shard_min
self.shard_max = shard_max
self.poke_timeout = poke_timeout
def _validate_input_values(self):
if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:
raise AirflowException("The poke_interval must be a non-negative number")
if not isinstance(self.timeout, (int, float)) or self.timeout < 0:
raise AirflowException("The timeout must be a non-negative number")
@provide_session
def _load_sensor_works(self, session=None):
"""
Refresh sensor instances need to be handled by this operator. Create smart sensor
internal object based on the information persisted in the sensor_instance table.
"""
SI = SensorInstance
with Stats.timer() as timer:
query = (
session.query(SI)
.filter(SI.state == State.SENSING)
.filter(SI.shardcode < self.shard_max, SI.shardcode >= self.shard_min)
)
tis = query.all()
self.log.info("Performance query %s tis, time: %.3f", len(tis), timer.duration)
# Query without checking dagrun state might keep some failed dag_run tasks alive.
# Join with DagRun table will be very slow based on the number of sensor tasks we
# need to handle. We query all smart tasks in this operator
# and expect scheduler correct the states in _change_state_for_tis_without_dagrun()
sensor_works = []
for ti in tis:
try:
sensor_works.append(SensorWork(ti))
except Exception:
self.log.exception("Exception at creating sensor work for ti %s", ti.key)
self.log.info("%d tasks detected.", len(sensor_works))
new_sensor_works = [x for x in sensor_works if x not in self.sensor_works]
self._update_ti_hostname(new_sensor_works)
self.sensor_works = sensor_works
@provide_session
def _update_ti_hostname(self, sensor_works, session=None):
"""
Update task instance hostname for new sensor works.
:param sensor_works: Smart sensor internal object for a sensor task.
:param session: The sqlalchemy session.
"""
DR = DagRun
TI = TaskInstance
def update_ti_hostname_with_count(count, sensor_works):
# Using or_ instead of in_ here to prevent from full table scan.
if session.bind.dialect.name == 'mssql':
ti_filter = or_(
and_(
TI.dag_id == ti_key.dag_id,
TI.task_id == ti_key.task_id,
DR.execution_date == ti_key.execution_date,
)
for ti_key in sensor_works
)
else:
ti_keys = [(x.dag_id, x.task_id, x.execution_date) for x in sensor_works]
ti_filter = or_(
tuple_(TI.dag_id, TI.task_id, DR.execution_date) == ti_key for ti_key in ti_keys
)
for ti in session.query(TI).join(TI.dag_run).filter(ti_filter):
ti.hostname = self.hostname
session.commit()
return count + len(sensor_works)
count = helpers.reduce_in_chunks(
update_ti_hostname_with_count, sensor_works, 0, self.max_tis_per_query
)
if count:
self.log.info("Updated hostname on %s tis.", count)
@provide_session
def _mark_multi_state(self, operator, poke_hash, encoded_poke_context, state, session=None):
"""
Mark state for multiple tasks in the task_instance table to a new state if they have
the same signature as the poke_hash.
:param operator: The sensor's operator class name.
:param poke_hash: The hash code generated from sensor's poke context.
:param encoded_poke_context: The raw encoded poke_context.
:param state: Set multiple sensor tasks to this state.
:param session: The sqlalchemy session.
"""
def mark_state(ti, sensor_instance):
ti.state = state
sensor_instance.state = state
if state in State.finished:
ti.end_date = end_date
ti.set_duration()
SI = SensorInstance
TI = TaskInstance
count_marked = 0
query_result = []
try:
query_result = (
session.query(TI, SI)
.join(
TI,
and_(
TI.dag_id == SI.dag_id,
TI.task_id == SI.task_id,
TI.execution_date == SI.execution_date,
),
)
.filter(SI.state == State.SENSING)
.filter(SI.hashcode == poke_hash)
.filter(SI.operator == operator)
.with_for_update()
.all()
)
end_date = timezone.utcnow()
for ti, sensor_instance in query_result:
if sensor_instance.poke_context != encoded_poke_context:
continue
ti.hostname = self.hostname
if ti.state == State.SENSING:
mark_state(ti=ti, sensor_instance=sensor_instance)
count_marked += 1
else:
# ti.state != State.SENSING
sensor_instance.state = ti.state
session.commit()
except Exception:
self.log.warning(
"Exception _mark_multi_state in smart sensor for hashcode %s",
str(poke_hash), # cast to str in advance for highlighting
exc_info=True,
)
self.log.info("Marked %s tasks out of %s to state %s", count_marked, len(query_result), state)
@provide_session
def _retry_or_fail_task(self, sensor_work, error, session=None):
"""
Change single task state for sensor task. For final state, set the end_date.
Since smart sensor take care all retries in one process. Failed sensor tasks
logically experienced all retries and the try_number should be set to max_tries.
:param sensor_work: The sensor_work with exception.
:param error: The error message for this sensor_work.
:param session: The sqlalchemy session.
"""
def email_alert(task_instance, error_info):
try:
subject, html_content, _ = task_instance.get_email_subject_content(error_info)
email = sensor_work.execution_context.get('email')
send_email(email, subject, html_content)
except Exception:
sensor_work.log.warning("Exception alerting email.", exc_info=True)
def handle_failure(sensor_work, ti):
if sensor_work.execution_context.get('retries') and ti.try_number <= ti.max_tries:
# retry
ti.state = State.UP_FOR_RETRY
if sensor_work.execution_context.get('email_on_retry') and sensor_work.execution_context.get(
'email'
):
sensor_work.log.info("%s sending email alert for retry", sensor_work.ti_key)
email_alert(ti, error)
else:
ti.state = State.FAILED
if sensor_work.execution_context.get(
'email_on_failure'
) and sensor_work.execution_context.get('email'):
sensor_work.log.info("%s sending email alert for failure", sensor_work.ti_key)
email_alert(ti, error)
try:
dag_id, task_id, execution_date = sensor_work.ti_key
TI = TaskInstance
SI = SensorInstance
sensor_instance = (
session.query(SI)
.filter(SI.dag_id == dag_id, SI.task_id == task_id, SI.execution_date == execution_date)
.with_for_update()
.first()
)
if sensor_instance.hashcode != sensor_work.hashcode:
# Return without setting state
return
ti = (
session.query(TI)
.filter(TI.dag_id == dag_id, TI.task_id == task_id, TI.execution_date == execution_date)
.with_for_update()
.first()
)
if ti:
if ti.state == State.SENSING:
ti.hostname = self.hostname
handle_failure(sensor_work, ti)
sensor_instance.state = State.FAILED
ti.end_date = timezone.utcnow()
ti.set_duration()
else:
sensor_instance.state = ti.state
session.merge(sensor_instance)
session.merge(ti)
session.commit()
sensor_work.log.info(
"Task %s got an error: %s. Set the state to failed. Exit.", str(sensor_work.ti_key), error
)
sensor_work.close_sensor_logger()
except AirflowException:
sensor_work.log.warning("Exception on failing %s", sensor_work.ti_key, exc_info=True)
def _check_and_handle_ti_timeout(self, sensor_work):
"""
Check if a sensor task in smart sensor is timeout. Could be either sensor operator timeout
or general operator execution_timeout.
:param sensor_work: SensorWork
"""
task_timeout = sensor_work.execution_context.get('timeout', self.timeout)
task_execution_timeout = sensor_work.execution_context.get('execution_timeout')
if task_execution_timeout:
task_timeout = min(task_timeout, task_execution_timeout)
if (timezone.utcnow() - sensor_work.start_date).total_seconds() > task_timeout:
error = "Sensor Timeout"
sensor_work.log.exception(error)
self._retry_or_fail_task(sensor_work, error)
def _handle_poke_exception(self, sensor_work):
"""
Fail task if accumulated exceptions exceeds retries.
:param sensor_work: SensorWork
"""
sensor_exception = self.cached_sensor_exceptions.get(sensor_work.cache_key)
error = sensor_exception.exception_info
sensor_work.log.exception("Handling poke exception: %s", error)
if sensor_exception.fail_current_run:
if sensor_exception.is_infra_failure:
sensor_work.log.exception(
"Task %s failed by infra failure in smart sensor.", sensor_work.ti_key
)
# There is a risk for sensor object cached in smart sensor keep throwing
# exception and cause an infra failure. To make sure the sensor tasks after
# retry will not fall into same object and have endless infra failure,
# we mark the sensor task after an infra failure so that it can be popped
# before next poke loop.
cache_key = sensor_work.cache_key
self.cached_dedup_works[cache_key].set_to_flush()
else:
sensor_work.log.exception("Task %s failed by exceptions.", sensor_work.ti_key)
self._retry_or_fail_task(sensor_work, error)
else:
sensor_work.log.info("Exception detected, retrying without failing current run.")
self._check_and_handle_ti_timeout(sensor_work)
def _process_sensor_work_with_cached_state(self, sensor_work, state):
if state == PokeState.LANDED:
sensor_work.log.info("Task %s succeeded", str(sensor_work.ti_key))
sensor_work.close_sensor_logger()
if state == PokeState.NOT_LANDED:
# Handle timeout if connection valid but not landed yet
self._check_and_handle_ti_timeout(sensor_work)
elif state == PokeState.POKE_EXCEPTION:
self._handle_poke_exception(sensor_work)
def _execute_sensor_work(self, sensor_work):
ti_key = sensor_work.ti_key
log = sensor_work.log or self.log
log.info("Sensing ti: %s", str(ti_key))
log.info("Poking with arguments: %s", sensor_work.encoded_poke_context)
cache_key = sensor_work.cache_key
if cache_key not in self.cached_dedup_works:
# create an empty cached_work for a new cache_key
self.cached_dedup_works[cache_key] = CachedPokeWork()
cached_work = self.cached_dedup_works[cache_key]
if cached_work.state is not None:
# Have a valid cached state, don't poke twice in certain time interval
self._process_sensor_work_with_cached_state(sensor_work, cached_work.state)
return
try:
with timeout(seconds=self.poke_timeout):
if self.poke(sensor_work):
# Got a landed signal, mark all tasks waiting for this partition
cached_work.set_state(PokeState.LANDED)
self._mark_multi_state(
sensor_work.operator,
sensor_work.hashcode,
sensor_work.encoded_poke_context,
State.SUCCESS,
)
log.info("Task %s succeeded", str(ti_key))
sensor_work.close_sensor_logger()
else:
# Not landed yet. Handle possible timeout
cached_work.set_state(PokeState.NOT_LANDED)
self._check_and_handle_ti_timeout(sensor_work)
self.cached_sensor_exceptions.pop(cache_key, None)
except Exception as e:
# The retry_infra_failure decorator inside hive_hooks will raise exception with
# is_infra_failure == True. Long poking timeout here is also considered an infra
# failure. Other exceptions should fail.
is_infra_failure = getattr(e, 'is_infra_failure', False) or isinstance(e, AirflowTaskTimeout)
exception_info = traceback.format_exc()
cached_work.set_state(PokeState.POKE_EXCEPTION)
if cache_key in self.cached_sensor_exceptions:
self.cached_sensor_exceptions[cache_key].set_latest_exception(
exception_info, is_infra_failure=is_infra_failure
)
else:
self.cached_sensor_exceptions[cache_key] = SensorExceptionInfo(
exception_info, is_infra_failure=is_infra_failure
)
self._handle_poke_exception(sensor_work)
def flush_cached_sensor_poke_results(self):
"""Flush outdated cached sensor states saved in previous loop."""
for key, cached_work in self.cached_dedup_works.copy().items():
if cached_work.is_expired():
self.cached_dedup_works.pop(key, None)
else:
cached_work.state = None
for ti_key, sensor_exception in self.cached_sensor_exceptions.copy().items():
if sensor_exception.fail_current_run or sensor_exception.is_expired():
self.cached_sensor_exceptions.pop(ti_key, None)
def poke(self, sensor_work):
"""
Function that the sensors defined while deriving this class should
override.
"""
cached_work = self.cached_dedup_works[sensor_work.cache_key]
if not cached_work.sensor_task:
init_args = dict(list(sensor_work.poke_context.items()) + [('task_id', sensor_work.task_id)])
operator_class = import_string(sensor_work.op_classpath)
cached_work.sensor_task = operator_class(**init_args)
return cached_work.sensor_task.poke(sensor_work.poke_context)
def _emit_loop_stats(self):
try:
count_poke = 0
count_poke_success = 0
count_poke_exception = 0
count_exception_failures = 0
count_infra_failure = 0
for cached_work in self.cached_dedup_works.values():
if cached_work.state is None:
continue
count_poke += 1
if cached_work.state == PokeState.LANDED:
count_poke_success += 1
elif cached_work.state == PokeState.POKE_EXCEPTION:
count_poke_exception += 1
for cached_exception in self.cached_sensor_exceptions.values():
if cached_exception.is_infra_failure and cached_exception.fail_current_run:
count_infra_failure += 1
if cached_exception.fail_current_run:
count_exception_failures += 1
Stats.gauge("smart_sensor_operator.poked_tasks", count_poke)
Stats.gauge("smart_sensor_operator.poked_success", count_poke_success)
Stats.gauge("smart_sensor_operator.poked_exception", count_poke_exception)
Stats.gauge("smart_sensor_operator.exception_failures", count_exception_failures)
Stats.gauge("smart_sensor_operator.infra_failures", count_infra_failure)
except Exception:
self.log.exception("Exception at getting loop stats %s")
def execute(self, context: Context):
started_at = timezone.utcnow()
self.hostname = get_hostname()
while True:
poke_start_time = timezone.utcnow()
self.flush_cached_sensor_poke_results()
self._load_sensor_works()
self.log.info("Loaded %s sensor_works", len(self.sensor_works))
Stats.gauge("smart_sensor_operator.loaded_tasks", len(self.sensor_works))
for sensor_work in self.sensor_works:
self._execute_sensor_work(sensor_work)
duration = (timezone.utcnow() - poke_start_time).total_seconds()
self.log.info("Taking %s to execute %s tasks.", duration, len(self.sensor_works))
Stats.timing("smart_sensor_operator.loop_duration", duration)
Stats.gauge("smart_sensor_operator.executed_tasks", len(self.sensor_works))
self._emit_loop_stats()
if duration < self.poke_interval:
sleep(self.poke_interval - duration)
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
self.log.info("Time is out for smart sensor.")
return
def on_kill(self):
pass
| 40.177015
| 110
| 0.635102
|
6f820b9982903ee83c2d6ec98f30ce9e18207a92
| 60
|
py
|
Python
|
samt/__init__.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | 1
|
2020-01-16T08:40:00.000Z
|
2020-01-16T08:40:00.000Z
|
samt/__init__.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | null | null | null |
samt/__init__.py
|
Killerhaschen/marvin-telegram-bot
|
c65e890a00450ed6ed4312d93e259db0c080ef6d
|
[
"MIT"
] | 1
|
2019-10-16T08:11:51.000Z
|
2019-10-16T08:11:51.000Z
|
from .samt import Bot, Answer, logger
from .helper import *
| 20
| 37
| 0.75
|
0df08b31fff9b1421b7a7673466a6344a9eaa43b
| 1,563
|
py
|
Python
|
telethon/errors/__init__.py
|
fx2hero/Telethon
|
2c9d43d60027a0ae4bd37bb80cd5e1020b703cbc
|
[
"MIT"
] | 141
|
2018-10-04T10:02:15.000Z
|
2022-03-18T08:47:01.000Z
|
telethon/errors/__init__.py
|
fx2hero/Telethon
|
2c9d43d60027a0ae4bd37bb80cd5e1020b703cbc
|
[
"MIT"
] | 34
|
2018-10-04T08:28:01.000Z
|
2020-11-02T09:36:02.000Z
|
telethon/errors/__init__.py
|
fx2hero/Telethon
|
2c9d43d60027a0ae4bd37bb80cd5e1020b703cbc
|
[
"MIT"
] | 110
|
2018-10-04T04:28:11.000Z
|
2022-03-22T05:49:02.000Z
|
"""
This module holds all the base and automatically generated errors that the
Telegram API has. See telethon_generator/errors.json for more.
"""
import re
from .common import (
ReadCancelledError, TypeNotFoundError, InvalidChecksumError,
InvalidBufferError, SecurityError, CdnFileTamperedError,
AlreadyInConversationError, BadMessageError, MultiError
)
# This imports the base errors too, as they're imported there
from .rpcbaseerrors import *
from .rpcerrorlist import *
def rpc_message_to_error(rpc_error, request):
"""
Converts a Telegram's RPC Error to a Python error.
:param rpc_error: the RpcError instance.
:param request: the request that caused this error.
:return: the RPCError as a Python exception that represents this error.
"""
# Try to get the error by direct look-up, otherwise regex
cls = rpc_errors_dict.get(rpc_error.error_message, None)
if cls:
return cls(request=request)
for msg_regex, cls in rpc_errors_re:
m = re.match(msg_regex, rpc_error.error_message)
if m:
capture = int(m.group(1)) if m.groups() else None
return cls(request=request, capture=capture)
# Some errors are negative:
# * -500 for "No workers running",
# * -503 for "Timeout"
#
# We treat them as if they were positive, so -500 will be treated
# as a `ServerError`, etc.
cls = base_errors.get(abs(rpc_error.error_code), RPCError)
return cls(request=request, message=rpc_error.error_message,
code=rpc_error.error_code)
| 33.978261
| 75
| 0.707614
|
62a0b10e83ae58e938283a833f2ed37c407a04fb
| 48,330
|
py
|
Python
|
venv/Lib/site-packages/sklearn/preprocessing/tests/test_encoders.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/sklearn/preprocessing/tests/test_encoders.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/sklearn/preprocessing/tests/test_encoders.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
import numpy as np
from scipy import sparse
import pytest
from sklearn.exceptions import NotFittedError
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import _convert_container
from sklearn.utils import is_scalar_nan
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
def test_one_hot_encoder_sparse_dense():
# check that sparse and dense will give the same results
X = np.array([[3, 2, 1], [0, 1, 1]])
enc_sparse = OneHotEncoder()
enc_dense = OneHotEncoder(sparse=False)
X_trans_sparse = enc_sparse.fit_transform(X)
X_trans_dense = enc_dense.fit_transform(X)
assert X_trans_sparse.shape == (2, 5)
assert X_trans_dense.shape == (2, 5)
assert sparse.issparse(X_trans_sparse)
assert not sparse.issparse(X_trans_dense)
# check outcome
assert_array_equal(
X_trans_sparse.toarray(), [[0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0, 1.0]]
)
assert_array_equal(X_trans_sparse.toarray(), X_trans_dense)
def test_one_hot_encoder_handle_unknown():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
X2 = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown="error")
oh.fit(X)
with pytest.raises(ValueError, match="Found unknown categories"):
oh.transform(X2)
# Test the ignore option, ignores unknown features (giving all 0's)
oh = OneHotEncoder(handle_unknown="ignore")
oh.fit(X)
X2_passed = X2.copy()
assert_array_equal(
oh.transform(X2_passed).toarray(),
np.array([[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]]),
)
# ensure transformed data was not modified in place
assert_allclose(X2, X2_passed)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown="42")
with pytest.raises(ValueError, match="handle_unknown should be either"):
oh.fit(X)
def test_one_hot_encoder_not_fitted():
X = np.array([["a"], ["b"]])
enc = OneHotEncoder(categories=["a", "b"])
msg = (
"This OneHotEncoder instance is not fitted yet. "
"Call 'fit' with appropriate arguments before using this "
"estimator."
)
with pytest.raises(NotFittedError, match=msg):
enc.transform(X)
def test_one_hot_encoder_handle_unknown_strings():
X = np.array(["11111111", "22", "333", "4444"]).reshape((-1, 1))
X2 = np.array(["55555", "22"]).reshape((-1, 1))
# Non Regression test for the issue #12470
# Test the ignore option, when categories are numpy string dtype
# particularly when the known category strings are larger
# than the unknown category strings
oh = OneHotEncoder(handle_unknown="ignore")
oh.fit(X)
X2_passed = X2.copy()
assert_array_equal(
oh.transform(X2_passed).toarray(),
np.array([[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]),
)
# ensure transformed data was not modified in place
assert_array_equal(X2, X2_passed)
@pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64])
@pytest.mark.parametrize("input_dtype", [np.int32, np.float32, np.float64])
def test_one_hot_encoder_dtype(input_dtype, output_dtype):
X = np.asarray([[0, 1]], dtype=input_dtype).T
X_expected = np.asarray([[1, 0], [0, 1]], dtype=output_dtype)
oh = OneHotEncoder(categories="auto", dtype=output_dtype)
assert_array_equal(oh.fit_transform(X).toarray(), X_expected)
assert_array_equal(oh.fit(X).transform(X).toarray(), X_expected)
oh = OneHotEncoder(categories="auto", dtype=output_dtype, sparse=False)
assert_array_equal(oh.fit_transform(X), X_expected)
assert_array_equal(oh.fit(X).transform(X), X_expected)
@pytest.mark.parametrize("output_dtype", [np.int32, np.float32, np.float64])
def test_one_hot_encoder_dtype_pandas(output_dtype):
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]})
X_expected = np.array([[1, 0, 1, 0], [0, 1, 0, 1]], dtype=output_dtype)
oh = OneHotEncoder(dtype=output_dtype)
assert_array_equal(oh.fit_transform(X_df).toarray(), X_expected)
assert_array_equal(oh.fit(X_df).transform(X_df).toarray(), X_expected)
oh = OneHotEncoder(dtype=output_dtype, sparse=False)
assert_array_equal(oh.fit_transform(X_df), X_expected)
assert_array_equal(oh.fit(X_df).transform(X_df), X_expected)
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_one_hot_encoder_feature_names(get_names):
enc = OneHotEncoder()
X = [
["Male", 1, "girl", 2, 3],
["Female", 41, "girl", 1, 10],
["Male", 51, "boy", 12, 3],
["Male", 91, "girl", 21, 30],
]
enc.fit(X)
feature_names = getattr(enc, get_names)()
if get_names == "get_feature_names":
assert isinstance(feature_names, np.ndarray)
assert_array_equal(
[
"x0_Female",
"x0_Male",
"x1_1",
"x1_41",
"x1_51",
"x1_91",
"x2_boy",
"x2_girl",
"x3_1",
"x3_2",
"x3_12",
"x3_21",
"x4_3",
"x4_10",
"x4_30",
],
feature_names,
)
feature_names2 = enc.get_feature_names(["one", "two", "three", "four", "five"])
feature_names2 = getattr(enc, get_names)(["one", "two", "three", "four", "five"])
assert_array_equal(
[
"one_Female",
"one_Male",
"two_1",
"two_41",
"two_51",
"two_91",
"three_boy",
"three_girl",
"four_1",
"four_2",
"four_12",
"four_21",
"five_3",
"five_10",
"five_30",
],
feature_names2,
)
with pytest.raises(ValueError, match="input_features should have length"):
getattr(enc, get_names)(["one", "two"])
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_one_hot_encoder_feature_names_unicode(get_names):
enc = OneHotEncoder()
X = np.array([["c❤t1", "dat2"]], dtype=object).T
enc.fit(X)
feature_names = getattr(enc, get_names)()
assert_array_equal(["x0_c❤t1", "x0_dat2"], feature_names)
feature_names = getattr(enc, get_names)(input_features=["n👍me"])
assert_array_equal(["n👍me_c❤t1", "n👍me_dat2"], feature_names)
def test_one_hot_encoder_set_params():
X = np.array([[1, 2]]).T
oh = OneHotEncoder()
# set params on not yet fitted object
oh.set_params(categories=[[0, 1, 2, 3]])
assert oh.get_params()["categories"] == [[0, 1, 2, 3]]
assert oh.fit_transform(X).toarray().shape == (2, 4)
# set params on already fitted object
oh.set_params(categories=[[0, 1, 2, 3, 4]])
assert oh.fit_transform(X).toarray().shape == (2, 5)
def check_categorical_onehot(X):
enc = OneHotEncoder(categories="auto")
Xtr1 = enc.fit_transform(X)
enc = OneHotEncoder(categories="auto", sparse=False)
Xtr2 = enc.fit_transform(X)
assert_allclose(Xtr1.toarray(), Xtr2)
assert sparse.isspmatrix_csr(Xtr1)
return Xtr1.toarray()
@pytest.mark.parametrize(
"X",
[
[["def", 1, 55], ["abc", 2, 55]],
np.array([[10, 1, 55], [5, 2, 55]]),
np.array([["b", "A", "cat"], ["a", "B", "cat"]], dtype=object),
np.array([["b", 1, "cat"], ["a", np.nan, "cat"]], dtype=object),
np.array([["b", 1, "cat"], ["a", float("nan"), "cat"]], dtype=object),
np.array([[None, 1, "cat"], ["a", 2, "cat"]], dtype=object),
np.array([[None, 1, None], ["a", np.nan, None]], dtype=object),
np.array([[None, 1, None], ["a", float("nan"), None]], dtype=object),
],
ids=[
"mixed",
"numeric",
"object",
"mixed-nan",
"mixed-float-nan",
"mixed-None",
"mixed-None-nan",
"mixed-None-float-nan",
],
)
def test_one_hot_encoder(X):
Xtr = check_categorical_onehot(np.array(X)[:, [0]])
assert_allclose(Xtr, [[0, 1], [1, 0]])
Xtr = check_categorical_onehot(np.array(X)[:, [0, 1]])
assert_allclose(Xtr, [[0, 1, 1, 0], [1, 0, 0, 1]])
Xtr = OneHotEncoder(categories="auto").fit_transform(X)
assert_allclose(Xtr.toarray(), [[0, 1, 1, 0, 1], [1, 0, 0, 1, 1]])
@pytest.mark.parametrize("sparse_", [False, True])
@pytest.mark.parametrize("drop", [None, "first"])
def test_one_hot_encoder_inverse(sparse_, drop):
X = [["abc", 2, 55], ["def", 1, 55], ["abc", 3, 55]]
enc = OneHotEncoder(sparse=sparse_, drop=drop)
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
assert_array_equal(enc.inverse_transform(X_tr), exp)
X = [[2, 55], [1, 55], [3, 55]]
enc = OneHotEncoder(sparse=sparse_, categories="auto", drop=drop)
X_tr = enc.fit_transform(X)
exp = np.array(X)
assert_array_equal(enc.inverse_transform(X_tr), exp)
if drop is None:
# with unknown categories
# drop is incompatible with handle_unknown=ignore
X = [["abc", 2, 55], ["def", 1, 55], ["abc", 3, 55]]
enc = OneHotEncoder(
sparse=sparse_,
handle_unknown="ignore",
categories=[["abc", "def"], [1, 2], [54, 55, 56]],
)
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
exp[2, 1] = None
assert_array_equal(enc.inverse_transform(X_tr), exp)
# with an otherwise numerical output, still object if unknown
X = [[2, 55], [1, 55], [3, 55]]
enc = OneHotEncoder(
sparse=sparse_, categories=[[1, 2], [54, 56]], handle_unknown="ignore"
)
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
exp[2, 0] = None
exp[:, 1] = None
assert_array_equal(enc.inverse_transform(X_tr), exp)
# incorrect shape raises
X_tr = np.array([[0, 1, 1], [1, 0, 1]])
msg = re.escape("Shape of the passed X data is not correct")
with pytest.raises(ValueError, match=msg):
enc.inverse_transform(X_tr)
@pytest.mark.parametrize("sparse_", [False, True])
@pytest.mark.parametrize(
"X, X_trans",
[
([[2, 55], [1, 55], [2, 55]], [[0, 1, 1], [0, 0, 0], [0, 1, 1]]),
(
[["one", "a"], ["two", "a"], ["three", "b"], ["two", "a"]],
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0]],
),
],
)
def test_one_hot_encoder_inverse_transform_raise_error_with_unknown(
X, X_trans, sparse_
):
"""Check that `inverse_transform` raise an error with unknown samples, no
dropped feature, and `handle_unknow="error`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/14934
"""
enc = OneHotEncoder(sparse=sparse_).fit(X)
msg = (
r"Samples \[(\d )*\d\] can not be inverted when drop=None and "
r"handle_unknown='error' because they contain all zeros"
)
if sparse_:
# emulate sparse data transform by a one-hot encoder sparse.
X_trans = _convert_container(X_trans, "sparse")
with pytest.raises(ValueError, match=msg):
enc.inverse_transform(X_trans)
def test_one_hot_encoder_inverse_if_binary():
X = np.array([["Male", 1], ["Female", 3], ["Female", 2]], dtype=object)
ohe = OneHotEncoder(drop="if_binary", sparse=False)
X_tr = ohe.fit_transform(X)
assert_array_equal(ohe.inverse_transform(X_tr), X)
# check that resetting drop option without refitting does not throw an error
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@pytest.mark.parametrize("drop", ["if_binary", "first", None])
@pytest.mark.parametrize("reset_drop", ["if_binary", "first", None])
def test_one_hot_encoder_drop_reset(get_names, drop, reset_drop):
X = np.array([["Male", 1], ["Female", 3], ["Female", 2]], dtype=object)
ohe = OneHotEncoder(drop=drop, sparse=False)
ohe.fit(X)
X_tr = ohe.transform(X)
feature_names = getattr(ohe, get_names)()
ohe.set_params(drop=reset_drop)
assert_array_equal(ohe.inverse_transform(X_tr), X)
assert_allclose(ohe.transform(X), X_tr)
assert_array_equal(getattr(ohe, get_names)(), feature_names)
@pytest.mark.parametrize("method", ["fit", "fit_transform"])
@pytest.mark.parametrize("X", [[1, 2], np.array([3.0, 4.0])])
def test_X_is_not_1D(X, method):
oh = OneHotEncoder()
msg = "Expected 2D array, got 1D array instead"
with pytest.raises(ValueError, match=msg):
getattr(oh, method)(X)
@pytest.mark.parametrize("method", ["fit", "fit_transform"])
def test_X_is_not_1D_pandas(method):
pd = pytest.importorskip("pandas")
X = pd.Series([6, 3, 4, 6])
oh = OneHotEncoder()
msg = "Expected 2D array, got 1D array instead"
with pytest.raises(ValueError, match=msg):
getattr(oh, method)(X)
@pytest.mark.parametrize(
"X, cat_exp, cat_dtype",
[
([["abc", 55], ["def", 55]], [["abc", "def"], [55]], np.object_),
(np.array([[1, 2], [3, 2]]), [[1, 3], [2]], np.integer),
(
np.array([["A", "cat"], ["B", "cat"]], dtype=object),
[["A", "B"], ["cat"]],
np.object_,
),
(np.array([["A", "cat"], ["B", "cat"]]), [["A", "B"], ["cat"]], np.str_),
(np.array([[1, 2], [np.nan, 2]]), [[1, np.nan], [2]], np.float_),
(
np.array([["A", np.nan], [None, np.nan]], dtype=object),
[["A", None], [np.nan]],
np.object_,
),
(
np.array([["A", float("nan")], [None, float("nan")]], dtype=object),
[["A", None], [float("nan")]],
np.object_,
),
],
ids=[
"mixed",
"numeric",
"object",
"string",
"missing-float",
"missing-np.nan-object",
"missing-float-nan-object",
],
)
def test_one_hot_encoder_categories(X, cat_exp, cat_dtype):
# order of categories should not depend on order of samples
for Xi in [X, X[::-1]]:
enc = OneHotEncoder(categories="auto")
enc.fit(Xi)
# assert enc.categories == 'auto'
assert isinstance(enc.categories_, list)
for res, exp in zip(enc.categories_, cat_exp):
res_list = res.tolist()
if is_scalar_nan(exp[-1]):
assert is_scalar_nan(res_list[-1])
assert res_list[:-1] == exp[:-1]
else:
assert res.tolist() == exp
assert np.issubdtype(res.dtype, cat_dtype)
@pytest.mark.parametrize(
"X, X2, cats, cat_dtype",
[
(
np.array([["a", "b"]], dtype=object).T,
np.array([["a", "d"]], dtype=object).T,
[["a", "b", "c"]],
np.object_,
),
(
np.array([[1, 2]], dtype="int64").T,
np.array([[1, 4]], dtype="int64").T,
[[1, 2, 3]],
np.int64,
),
(
np.array([["a", "b"]], dtype=object).T,
np.array([["a", "d"]], dtype=object).T,
[np.array(["a", "b", "c"])],
np.object_,
),
(
np.array([[None, "a"]], dtype=object).T,
np.array([[None, "b"]], dtype=object).T,
[[None, "a", "z"]],
object,
),
(
np.array([["a", "b"]], dtype=object).T,
np.array([["a", np.nan]], dtype=object).T,
[["a", "b", "z"]],
object,
),
(
np.array([["a", None]], dtype=object).T,
np.array([["a", np.nan]], dtype=object).T,
[["a", None, "z"]],
object,
),
(
np.array([["a", np.nan]], dtype=object).T,
np.array([["a", None]], dtype=object).T,
[["a", np.nan, "z"]],
object,
),
],
ids=[
"object",
"numeric",
"object-string",
"object-string-none",
"object-string-nan",
"object-None-and-nan",
"object-nan-and-None",
],
)
def test_one_hot_encoder_specified_categories(X, X2, cats, cat_dtype):
enc = OneHotEncoder(categories=cats)
exp = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
assert_array_equal(enc.fit_transform(X).toarray(), exp)
assert list(enc.categories[0]) == list(cats[0])
assert enc.categories_[0].tolist() == list(cats[0])
# manually specified categories should have same dtype as
# the data when coerced from lists
assert enc.categories_[0].dtype == cat_dtype
# when specifying categories manually, unknown categories should already
# raise when fitting
enc = OneHotEncoder(categories=cats)
with pytest.raises(ValueError, match="Found unknown categories"):
enc.fit(X2)
enc = OneHotEncoder(categories=cats, handle_unknown="ignore")
exp = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
assert_array_equal(enc.fit(X2).transform(X2).toarray(), exp)
def test_one_hot_encoder_unsorted_categories():
X = np.array([["a", "b"]], dtype=object).T
enc = OneHotEncoder(categories=[["b", "a", "c"]])
exp = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
assert_array_equal(enc.fit(X).transform(X).toarray(), exp)
assert_array_equal(enc.fit_transform(X).toarray(), exp)
assert enc.categories_[0].tolist() == ["b", "a", "c"]
assert np.issubdtype(enc.categories_[0].dtype, np.object_)
# unsorted passed categories still raise for numerical values
X = np.array([[1, 2]]).T
enc = OneHotEncoder(categories=[[2, 1, 3]])
msg = "Unsorted categories are not supported"
with pytest.raises(ValueError, match=msg):
enc.fit_transform(X)
# np.nan must be the last category in categories[0] to be considered sorted
X = np.array([[1, 2, np.nan]]).T
enc = OneHotEncoder(categories=[[1, np.nan, 2]])
with pytest.raises(ValueError, match=msg):
enc.fit_transform(X)
def test_one_hot_encoder_specified_categories_mixed_columns():
# multiple columns
X = np.array([["a", "b"], [0, 2]], dtype=object).T
enc = OneHotEncoder(categories=[["a", "b", "c"], [0, 1, 2]])
exp = np.array([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 1.0]])
assert_array_equal(enc.fit_transform(X).toarray(), exp)
assert enc.categories_[0].tolist() == ["a", "b", "c"]
assert np.issubdtype(enc.categories_[0].dtype, np.object_)
assert enc.categories_[1].tolist() == [0, 1, 2]
# integer categories but from object dtype data
assert np.issubdtype(enc.categories_[1].dtype, np.object_)
def test_one_hot_encoder_pandas():
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame({"A": ["a", "b"], "B": [1, 2]})
Xtr = check_categorical_onehot(X_df)
assert_allclose(Xtr, [[1, 0, 1, 0], [0, 1, 0, 1]])
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@pytest.mark.parametrize(
"drop, expected_names",
[
("first", ["x0_c", "x2_b"]),
("if_binary", ["x0_c", "x1_2", "x2_b"]),
(["c", 2, "b"], ["x0_b", "x2_a"]),
],
ids=["first", "binary", "manual"],
)
def test_one_hot_encoder_feature_names_drop(get_names, drop, expected_names):
X = [["c", 2, "a"], ["b", 2, "b"]]
ohe = OneHotEncoder(drop=drop)
ohe.fit(X)
feature_names = getattr(ohe, get_names)()
if get_names == "get_feature_names":
assert isinstance(feature_names, np.ndarray)
assert_array_equal(expected_names, feature_names)
def test_one_hot_encoder_drop_equals_if_binary():
# Canonical case
X = [[10, "yes"], [20, "no"], [30, "yes"]]
expected = np.array(
[[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]]
)
expected_drop_idx = np.array([None, 0])
ohe = OneHotEncoder(drop="if_binary", sparse=False)
result = ohe.fit_transform(X)
assert_array_equal(ohe.drop_idx_, expected_drop_idx)
assert_allclose(result, expected)
# with only one cat, the behaviour is equivalent to drop=None
X = [["true", "a"], ["false", "a"], ["false", "a"]]
expected = np.array([[1.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
expected_drop_idx = np.array([0, None])
ohe = OneHotEncoder(drop="if_binary", sparse=False)
result = ohe.fit_transform(X)
assert_array_equal(ohe.drop_idx_, expected_drop_idx)
assert_allclose(result, expected)
@pytest.mark.parametrize(
"X",
[
[["abc", 2, 55], ["def", 1, 55]],
np.array([[10, 2, 55], [20, 1, 55]]),
np.array([["a", "B", "cat"], ["b", "A", "cat"]], dtype=object),
],
ids=["mixed", "numeric", "object"],
)
def test_ordinal_encoder(X):
enc = OrdinalEncoder()
exp = np.array([[0, 1, 0], [1, 0, 0]], dtype="int64")
assert_array_equal(enc.fit_transform(X), exp.astype("float64"))
enc = OrdinalEncoder(dtype="int64")
assert_array_equal(enc.fit_transform(X), exp)
@pytest.mark.parametrize(
"X, X2, cats, cat_dtype",
[
(
np.array([["a", "b"]], dtype=object).T,
np.array([["a", "d"]], dtype=object).T,
[["a", "b", "c"]],
np.object_,
),
(
np.array([[1, 2]], dtype="int64").T,
np.array([[1, 4]], dtype="int64").T,
[[1, 2, 3]],
np.int64,
),
(
np.array([["a", "b"]], dtype=object).T,
np.array([["a", "d"]], dtype=object).T,
[np.array(["a", "b", "c"])],
np.object_,
),
],
ids=["object", "numeric", "object-string-cat"],
)
def test_ordinal_encoder_specified_categories(X, X2, cats, cat_dtype):
enc = OrdinalEncoder(categories=cats)
exp = np.array([[0.0], [1.0]])
assert_array_equal(enc.fit_transform(X), exp)
assert list(enc.categories[0]) == list(cats[0])
assert enc.categories_[0].tolist() == list(cats[0])
# manually specified categories should have same dtype as
# the data when coerced from lists
assert enc.categories_[0].dtype == cat_dtype
# when specifying categories manually, unknown categories should already
# raise when fitting
enc = OrdinalEncoder(categories=cats)
with pytest.raises(ValueError, match="Found unknown categories"):
enc.fit(X2)
def test_ordinal_encoder_inverse():
X = [["abc", 2, 55], ["def", 1, 55]]
enc = OrdinalEncoder()
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
assert_array_equal(enc.inverse_transform(X_tr), exp)
# incorrect shape raises
X_tr = np.array([[0, 1, 1, 2], [1, 0, 1, 0]])
msg = re.escape("Shape of the passed X data is not correct")
with pytest.raises(ValueError, match=msg):
enc.inverse_transform(X_tr)
def test_ordinal_encoder_handle_unknowns_string():
enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-2)
X_fit = np.array([["a", "x"], ["b", "y"], ["c", "z"]], dtype=object)
X_trans = np.array([["c", "xy"], ["bla", "y"], ["a", "x"]], dtype=object)
enc.fit(X_fit)
X_trans_enc = enc.transform(X_trans)
exp = np.array([[2, -2], [-2, 1], [0, 0]], dtype="int64")
assert_array_equal(X_trans_enc, exp)
X_trans_inv = enc.inverse_transform(X_trans_enc)
inv_exp = np.array([["c", None], [None, "y"], ["a", "x"]], dtype=object)
assert_array_equal(X_trans_inv, inv_exp)
@pytest.mark.parametrize("dtype", [float, int])
def test_ordinal_encoder_handle_unknowns_numeric(dtype):
enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-999)
X_fit = np.array([[1, 7], [2, 8], [3, 9]], dtype=dtype)
X_trans = np.array([[3, 12], [23, 8], [1, 7]], dtype=dtype)
enc.fit(X_fit)
X_trans_enc = enc.transform(X_trans)
exp = np.array([[2, -999], [-999, 1], [0, 0]], dtype="int64")
assert_array_equal(X_trans_enc, exp)
X_trans_inv = enc.inverse_transform(X_trans_enc)
inv_exp = np.array([[3, None], [None, 8], [1, 7]], dtype=object)
assert_array_equal(X_trans_inv, inv_exp)
@pytest.mark.parametrize(
"params, err_type, err_msg",
[
(
{"handle_unknown": "use_encoded_value"},
TypeError,
"unknown_value should be an integer or np.nan when handle_unknown "
"is 'use_encoded_value', got None.",
),
(
{"unknown_value": -2},
TypeError,
"unknown_value should only be set when handle_unknown is "
"'use_encoded_value', got -2.",
),
(
{"handle_unknown": "use_encoded_value", "unknown_value": "bla"},
TypeError,
"unknown_value should be an integer or np.nan when handle_unknown "
"is 'use_encoded_value', got bla.",
),
(
{"handle_unknown": "use_encoded_value", "unknown_value": 1},
ValueError,
"The used value for unknown_value (1) is one of the values "
"already used for encoding the seen categories.",
),
(
{"handle_unknown": "ignore"},
ValueError,
"handle_unknown should be either 'error' or 'use_encoded_value', "
"got ignore.",
),
],
)
def test_ordinal_encoder_handle_unknowns_raise(params, err_type, err_msg):
# Check error message when validating input parameters
X = np.array([["a", "x"], ["b", "y"]], dtype=object)
encoder = OrdinalEncoder(**params)
with pytest.raises(err_type, match=err_msg):
encoder.fit(X)
def test_ordinal_encoder_handle_unknowns_nan():
# Make sure unknown_value=np.nan properly works
enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=np.nan)
X_fit = np.array([[1], [2], [3]])
enc.fit(X_fit)
X_trans = enc.transform([[1], [2], [4]])
assert_array_equal(X_trans, [[0], [1], [np.nan]])
def test_ordinal_encoder_handle_unknowns_nan_non_float_dtype():
# Make sure an error is raised when unknown_value=np.nan and the dtype
# isn't a float dtype
enc = OrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=np.nan, dtype=int
)
X_fit = np.array([[1], [2], [3]])
with pytest.raises(ValueError, match="dtype parameter should be a float dtype"):
enc.fit(X_fit)
def test_ordinal_encoder_raise_categories_shape():
X = np.array([["Low", "Medium", "High", "Medium", "Low"]], dtype=object).T
cats = ["Low", "Medium", "High"]
enc = OrdinalEncoder(categories=cats)
msg = "Shape mismatch: if categories is an array,"
with pytest.raises(ValueError, match=msg):
enc.fit(X)
def test_encoder_dtypes():
# check that dtypes are preserved when determining categories
enc = OneHotEncoder(categories="auto")
exp = np.array([[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]], dtype="float64")
for X in [
np.array([[1, 2], [3, 4]], dtype="int64"),
np.array([[1, 2], [3, 4]], dtype="float64"),
np.array([["a", "b"], ["c", "d"]]), # str dtype
np.array([[b"a", b"b"], [b"c", b"d"]]), # bytes dtype
np.array([[1, "a"], [3, "b"]], dtype="object"),
]:
enc.fit(X)
assert all([enc.categories_[i].dtype == X.dtype for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
X = [[1, 2], [3, 4]]
enc.fit(X)
assert all([np.issubdtype(enc.categories_[i].dtype, np.integer) for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
X = [[1, "a"], [3, "b"]]
enc.fit(X)
assert all([enc.categories_[i].dtype == "object" for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
def test_encoder_dtypes_pandas():
# check dtype (similar to test_categorical_encoder_dtypes for dataframes)
pd = pytest.importorskip("pandas")
enc = OneHotEncoder(categories="auto")
exp = np.array(
[[1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 0.0, 1.0]],
dtype="float64",
)
X = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}, dtype="int64")
enc.fit(X)
assert all([enc.categories_[i].dtype == "int64" for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
X = pd.DataFrame({"A": [1, 2], "B": ["a", "b"], "C": [3.0, 4.0]})
X_type = [X["A"].dtype, X["B"].dtype, X["C"].dtype]
enc.fit(X)
assert all([enc.categories_[i].dtype == X_type[i] for i in range(3)])
assert_array_equal(enc.transform(X).toarray(), exp)
def test_one_hot_encoder_warning():
enc = OneHotEncoder()
X = [["Male", 1], ["Female", 3]]
np.testing.assert_no_warnings(enc.fit_transform, X)
@pytest.mark.parametrize("missing_value", [np.nan, None, float("nan")])
def test_one_hot_encoder_drop_manual(missing_value):
cats_to_drop = ["def", 12, 3, 56, missing_value]
enc = OneHotEncoder(drop=cats_to_drop)
X = [
["abc", 12, 2, 55, "a"],
["def", 12, 1, 55, "a"],
["def", 12, 3, 56, missing_value],
]
trans = enc.fit_transform(X).toarray()
exp = [[1, 0, 1, 1, 1], [0, 1, 0, 1, 1], [0, 0, 0, 0, 0]]
assert_array_equal(trans, exp)
assert enc.drop is cats_to_drop
dropped_cats = [
cat[feature] for cat, feature in zip(enc.categories_, enc.drop_idx_)
]
X_inv_trans = enc.inverse_transform(trans)
X_array = np.array(X, dtype=object)
# last value is np.nan
if is_scalar_nan(cats_to_drop[-1]):
assert_array_equal(dropped_cats[:-1], cats_to_drop[:-1])
assert is_scalar_nan(dropped_cats[-1])
assert is_scalar_nan(cats_to_drop[-1])
# do not include the last column which includes missing values
assert_array_equal(X_array[:, :-1], X_inv_trans[:, :-1])
# check last column is the missing value
assert_array_equal(X_array[-1, :-1], X_inv_trans[-1, :-1])
assert is_scalar_nan(X_array[-1, -1])
assert is_scalar_nan(X_inv_trans[-1, -1])
else:
assert_array_equal(dropped_cats, cats_to_drop)
assert_array_equal(X_array, X_inv_trans)
@pytest.mark.parametrize(
"X_fit, params, err_msg",
[
(
[["Male"], ["Female"]],
{"drop": "second"},
"Wrong input for parameter `drop`",
),
(
[["abc", 2, 55], ["def", 1, 55], ["def", 3, 59]],
{"drop": np.asarray("b", dtype=object)},
"Wrong input for parameter `drop`",
),
(
[["abc", 2, 55], ["def", 1, 55], ["def", 3, 59]],
{"drop": ["ghi", 3, 59]},
"The following categories were supposed",
),
],
)
def test_one_hot_encoder_invalid_params(X_fit, params, err_msg):
enc = OneHotEncoder(**params)
with pytest.raises(ValueError, match=err_msg):
enc.fit(X_fit)
@pytest.mark.parametrize("drop", [["abc", 3], ["abc", 3, 41, "a"]])
def test_invalid_drop_length(drop):
enc = OneHotEncoder(drop=drop)
err_msg = "`drop` should have length equal to the number"
with pytest.raises(ValueError, match=err_msg):
enc.fit([["abc", 2, 55], ["def", 1, 55], ["def", 3, 59]])
@pytest.mark.parametrize("density", [True, False], ids=["sparse", "dense"])
@pytest.mark.parametrize("drop", ["first", ["a", 2, "b"]], ids=["first", "manual"])
def test_categories(density, drop):
ohe_base = OneHotEncoder(sparse=density)
ohe_test = OneHotEncoder(sparse=density, drop=drop)
X = [["c", 1, "a"], ["a", 2, "b"]]
ohe_base.fit(X)
ohe_test.fit(X)
assert_array_equal(ohe_base.categories_, ohe_test.categories_)
if drop == "first":
assert_array_equal(ohe_test.drop_idx_, 0)
else:
for drop_cat, drop_idx, cat_list in zip(
drop, ohe_test.drop_idx_, ohe_test.categories_
):
assert cat_list[int(drop_idx)] == drop_cat
assert isinstance(ohe_test.drop_idx_, np.ndarray)
assert ohe_test.drop_idx_.dtype == object
@pytest.mark.parametrize("Encoder", [OneHotEncoder, OrdinalEncoder])
def test_encoders_has_categorical_tags(Encoder):
assert "categorical" in Encoder()._get_tags()["X_types"]
# TODO: Remove in 1.2 when get_feature_names is removed
def test_one_hot_encoder_get_feature_names_deprecated():
X = np.array([["cat", "dog"]], dtype=object).T
enc = OneHotEncoder().fit(X)
msg = "get_feature_names is deprecated in 1.0"
with pytest.warns(FutureWarning, match=msg):
enc.get_feature_names()
# deliberately omit 'OS' as an invalid combo
@pytest.mark.parametrize(
"input_dtype, category_dtype", ["OO", "OU", "UO", "UU", "US", "SO", "SU", "SS"]
)
@pytest.mark.parametrize("array_type", ["list", "array", "dataframe"])
def test_encoders_string_categories(input_dtype, category_dtype, array_type):
"""Check that encoding work with object, unicode, and byte string dtypes.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/15616
https://github.com/scikit-learn/scikit-learn/issues/15726
https://github.com/scikit-learn/scikit-learn/issues/19677
"""
X = np.array([["b"], ["a"]], dtype=input_dtype)
categories = [np.array(["b", "a"], dtype=category_dtype)]
ohe = OneHotEncoder(categories=categories, sparse=False).fit(X)
X_test = _convert_container(
[["a"], ["a"], ["b"], ["a"]], array_type, dtype=input_dtype
)
X_trans = ohe.transform(X_test)
expected = np.array([[0, 1], [0, 1], [1, 0], [0, 1]])
assert_allclose(X_trans, expected)
oe = OrdinalEncoder(categories=categories).fit(X)
X_trans = oe.transform(X_test)
expected = np.array([[1], [1], [0], [1]])
assert_array_equal(X_trans, expected)
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
@pytest.mark.parametrize("missing_value", [np.nan, None])
def test_ohe_missing_values_get_feature_names(get_names, missing_value):
# encoder with missing values with object dtypes
X = np.array([["a", "b", missing_value, "a", missing_value]], dtype=object).T
ohe = OneHotEncoder(sparse=False, handle_unknown="ignore").fit(X)
names = getattr(ohe, get_names)()
assert_array_equal(names, ["x0_a", "x0_b", f"x0_{missing_value}"])
def test_ohe_missing_value_support_pandas():
# check support for pandas with mixed dtypes and missing values
pd = pytest.importorskip("pandas")
df = pd.DataFrame(
{
"col1": ["dog", "cat", None, "cat"],
"col2": np.array([3, 0, 4, np.nan], dtype=float),
},
columns=["col1", "col2"],
)
expected_df_trans = np.array(
[
[0, 1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 1],
]
)
Xtr = check_categorical_onehot(df)
assert_allclose(Xtr, expected_df_trans)
@pytest.mark.parametrize("pd_nan_type", ["pd.NA", "np.nan"])
def test_ohe_missing_value_support_pandas_categorical(pd_nan_type):
# checks pandas dataframe with categorical features
if pd_nan_type == "pd.NA":
# pd.NA is in pandas 1.0
pd = pytest.importorskip("pandas", minversion="1.0")
pd_missing_value = pd.NA
else: # np.nan
pd = pytest.importorskip("pandas")
pd_missing_value = np.nan
df = pd.DataFrame(
{
"col1": pd.Series(["c", "a", pd_missing_value, "b", "a"], dtype="category"),
}
)
expected_df_trans = np.array(
[
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0],
]
)
ohe = OneHotEncoder(sparse=False, handle_unknown="ignore")
df_trans = ohe.fit_transform(df)
assert_allclose(expected_df_trans, df_trans)
assert len(ohe.categories_) == 1
assert_array_equal(ohe.categories_[0][:-1], ["a", "b", "c"])
assert np.isnan(ohe.categories_[0][-1])
def test_ohe_drop_first_handle_unknown_ignore_warns():
"""Check drop='first' and handle_unknown='ignore' during transform."""
X = [["a", 0], ["b", 2], ["b", 1]]
ohe = OneHotEncoder(drop="first", sparse=False, handle_unknown="ignore")
X_trans = ohe.fit_transform(X)
X_expected = np.array(
[
[0, 0, 0],
[1, 0, 1],
[1, 1, 0],
]
)
assert_allclose(X_trans, X_expected)
# Both categories are unknown
X_test = [["c", 3]]
X_expected = np.array([[0, 0, 0]])
warn_msg = (
r"Found unknown categories in columns \[0, 1\] during "
"transform. These unknown categories will be encoded as all "
"zeros"
)
with pytest.warns(UserWarning, match=warn_msg):
X_trans = ohe.transform(X_test)
assert_allclose(X_trans, X_expected)
# inverse_transform maps to None
X_inv = ohe.inverse_transform(X_expected)
assert_array_equal(X_inv, np.array([["a", 0]], dtype=object))
def test_ohe_drop_if_binary_handle_unknown_ignore_warns():
"""Check drop='if_binary' and handle_unknown='ignore' during transform."""
X = [["a", 0], ["b", 2], ["b", 1]]
ohe = OneHotEncoder(drop="if_binary", sparse=False, handle_unknown="ignore")
X_trans = ohe.fit_transform(X)
X_expected = np.array(
[
[0, 1, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
]
)
assert_allclose(X_trans, X_expected)
# Both categories are unknown
X_test = [["c", 3]]
X_expected = np.array([[0, 0, 0, 0]])
warn_msg = (
r"Found unknown categories in columns \[0, 1\] during "
"transform. These unknown categories will be encoded as all "
"zeros"
)
with pytest.warns(UserWarning, match=warn_msg):
X_trans = ohe.transform(X_test)
assert_allclose(X_trans, X_expected)
# inverse_transform maps to None
X_inv = ohe.inverse_transform(X_expected)
assert_array_equal(X_inv, np.array([["a", None]], dtype=object))
def test_ohe_drop_first_explicit_categories():
"""Check drop='first' and handle_unknown='ignore' during fit with
categories passed in."""
X = [["a", 0], ["b", 2], ["b", 1]]
ohe = OneHotEncoder(
drop="first",
sparse=False,
handle_unknown="ignore",
categories=[["b", "a"], [1, 2]],
)
ohe.fit(X)
X_test = [["c", 1]]
X_expected = np.array([[0, 0]])
warn_msg = (
r"Found unknown categories in columns \[0\] during transform. "
r"These unknown categories will be encoded as all zeros"
)
with pytest.warns(UserWarning, match=warn_msg):
X_trans = ohe.transform(X_test)
assert_allclose(X_trans, X_expected)
def test_ordinal_encoder_passthrough_missing_values_float_errors_dtype():
"""Test ordinal encoder with nan passthrough fails when dtype=np.int32."""
X = np.array([[np.nan, 3.0, 1.0, 3.0]]).T
oe = OrdinalEncoder(dtype=np.int32)
msg = (
r"There are missing values in features \[0\]. For OrdinalEncoder "
"to passthrough missing values, the dtype parameter must be a "
"float"
)
with pytest.raises(ValueError, match=msg):
oe.fit(X)
def test_ordinal_encoder_passthrough_missing_values_float():
"""Test ordinal encoder with nan on float dtypes."""
X = np.array([[np.nan, 3.0, 1.0, 3.0]], dtype=np.float64).T
oe = OrdinalEncoder().fit(X)
assert len(oe.categories_) == 1
assert_allclose(oe.categories_[0], [1.0, 3.0, np.nan])
X_trans = oe.transform(X)
assert_allclose(X_trans, [[np.nan], [1.0], [0.0], [1.0]])
X_inverse = oe.inverse_transform(X_trans)
assert_allclose(X_inverse, X)
@pytest.mark.parametrize("pd_nan_type", ["pd.NA", "np.nan"])
def test_ordinal_encoder_missing_value_support_pandas_categorical(pd_nan_type):
"""Check ordinal encoder is compatible with pandas."""
# checks pandas dataframe with categorical features
if pd_nan_type == "pd.NA":
# pd.NA is in pandas 1.0
pd = pytest.importorskip("pandas", minversion="1.0")
pd_missing_value = pd.NA
else: # np.nan
pd = pytest.importorskip("pandas")
pd_missing_value = np.nan
df = pd.DataFrame(
{
"col1": pd.Series(["c", "a", pd_missing_value, "b", "a"], dtype="category"),
}
)
oe = OrdinalEncoder().fit(df)
assert len(oe.categories_) == 1
assert_array_equal(oe.categories_[0][:3], ["a", "b", "c"])
assert np.isnan(oe.categories_[0][-1])
df_trans = oe.transform(df)
assert_allclose(df_trans, [[2.0], [0.0], [np.nan], [1.0], [0.0]])
X_inverse = oe.inverse_transform(df_trans)
assert X_inverse.shape == (5, 1)
assert_array_equal(X_inverse[:2, 0], ["c", "a"])
assert_array_equal(X_inverse[3:, 0], ["b", "a"])
assert np.isnan(X_inverse[2, 0])
@pytest.mark.parametrize(
"X, X2, cats, cat_dtype",
[
(
(
np.array([["a", np.nan]], dtype=object).T,
np.array([["a", "b"]], dtype=object).T,
[np.array(["a", np.nan, "d"], dtype=object)],
np.object_,
)
),
(
(
np.array([["a", np.nan]], dtype=object).T,
np.array([["a", "b"]], dtype=object).T,
[np.array(["a", np.nan, "d"], dtype=object)],
np.object_,
)
),
(
(
np.array([[2.0, np.nan]], dtype=np.float64).T,
np.array([[3.0]], dtype=np.float64).T,
[np.array([2.0, 4.0, np.nan])],
np.float64,
)
),
],
ids=[
"object-None-missing-value",
"object-nan-missing_value",
"numeric-missing-value",
],
)
def test_ordinal_encoder_specified_categories_missing_passthrough(
X, X2, cats, cat_dtype
):
"""Test ordinal encoder for specified categories."""
oe = OrdinalEncoder(categories=cats)
exp = np.array([[0.0], [np.nan]])
assert_array_equal(oe.fit_transform(X), exp)
# manually specified categories should have same dtype as
# the data when coerced from lists
assert oe.categories_[0].dtype == cat_dtype
# when specifying categories manually, unknown categories should already
# raise when fitting
oe = OrdinalEncoder(categories=cats)
with pytest.raises(ValueError, match="Found unknown categories"):
oe.fit(X2)
@pytest.mark.parametrize(
"X, expected_X_trans, X_test",
[
(
np.array([[1.0, np.nan, 3.0]]).T,
np.array([[0.0, np.nan, 1.0]]).T,
np.array([[4.0]]),
),
(
np.array([[1.0, 4.0, 3.0]]).T,
np.array([[0.0, 2.0, 1.0]]).T,
np.array([[np.nan]]),
),
(
np.array([["c", np.nan, "b"]], dtype=object).T,
np.array([[1.0, np.nan, 0.0]]).T,
np.array([["d"]], dtype=object),
),
(
np.array([["c", "a", "b"]], dtype=object).T,
np.array([[2.0, 0.0, 1.0]]).T,
np.array([[np.nan]], dtype=object),
),
],
)
def test_ordinal_encoder_handle_missing_and_unknown(X, expected_X_trans, X_test):
"""Test the interaction between missing values and handle_unknown"""
oe = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
X_trans = oe.fit_transform(X)
assert_allclose(X_trans, expected_X_trans)
assert_allclose(oe.transform(X_test), [[-1.0]])
def test_ordinal_encoder_sparse():
"""Check that we raise proper error with sparse input in OrdinalEncoder.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19878
"""
X = np.array([[3, 2, 1], [0, 1, 1]])
X_sparse = sparse.csr_matrix(X)
encoder = OrdinalEncoder()
err_msg = "A sparse matrix was passed, but dense data is required"
with pytest.raises(TypeError, match=err_msg):
encoder.fit(X_sparse)
with pytest.raises(TypeError, match=err_msg):
encoder.fit_transform(X_sparse)
X_trans = encoder.fit_transform(X)
X_trans_sparse = sparse.csr_matrix(X_trans)
with pytest.raises(TypeError, match=err_msg):
encoder.inverse_transform(X_trans_sparse)
def test_ordinal_encoder_fit_with_unseen_category():
"""Check OrdinalEncoder.fit works with unseen category when
`handle_unknown="use_encoded_value"`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19872
"""
X = np.array([0, 0, 1, 0, 2, 5])[:, np.newaxis]
oe = OrdinalEncoder(
categories=[[-1, 0, 1]], handle_unknown="use_encoded_value", unknown_value=-999
)
oe.fit(X)
oe = OrdinalEncoder(categories=[[-1, 0, 1]], handle_unknown="error")
with pytest.raises(ValueError, match="Found unknown categories"):
oe.fit(X)
@pytest.mark.parametrize(
"X_train",
[
[["AA", "B"]],
np.array([["AA", "B"]], dtype="O"),
np.array([["AA", "B"]], dtype="U"),
],
)
@pytest.mark.parametrize(
"X_test",
[
[["A", "B"]],
np.array([["A", "B"]], dtype="O"),
np.array([["A", "B"]], dtype="U"),
],
)
def test_ordinal_encoder_handle_unknown_string_dtypes(X_train, X_test):
"""Checks that `OrdinalEncoder` transforms string dtypes.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19872
"""
enc = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-9)
enc.fit(X_train)
X_trans = enc.transform(X_test)
assert_allclose(X_trans, [[-9, 0]])
def test_ordinal_encoder_python_integer():
"""Check that `OrdinalEncoder` accepts Python integers that are potentially
larger than 64 bits.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20721
"""
X = np.array(
[
44253463435747313673,
9867966753463435747313673,
44253462342215747313673,
442534634357764313673,
]
).reshape(-1, 1)
encoder = OrdinalEncoder().fit(X)
assert_array_equal(encoder.categories_, np.sort(X, axis=0).T)
X_trans = encoder.transform(X)
assert_array_equal(X_trans, [[0], [3], [2], [1]])
| 34.769784
| 89
| 0.578605
|
4952e8e475178894137417b67fc87f8d5367cb6d
| 17,264
|
py
|
Python
|
onpolicy/envs/mpe/environment.py
|
LUMO666/Highway
|
05e1ad318bd14d405bd78d612e5706f7db2b3266
|
[
"MIT"
] | 5
|
2021-06-15T05:06:10.000Z
|
2021-12-01T05:11:49.000Z
|
onpolicy/envs/mpe/environment.py
|
ReinholdM/on-policy
|
121044954756b5317230e30d47e409802991458b
|
[
"MIT"
] | null | null | null |
onpolicy/envs/mpe/environment.py
|
ReinholdM/on-policy
|
121044954756b5317230e30d47e409802991458b
|
[
"MIT"
] | null | null | null |
import gym
from gym import spaces
from gym.envs.registration import EnvSpec
import numpy as np
from .multi_discrete import MultiDiscrete
# update bounds to center around agent
cam_range = 2
# environment for all agents in the multiagent world
# currently code assumes that no agents will be created/destroyed at runtime!
class MultiAgentEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, world, reset_callback=None, reward_callback=None,
observation_callback=None, info_callback=None,
done_callback=None, post_step_callback=None,
shared_viewer=True, discrete_action=True):
self.world = world
self.world_length = self.world.world_length
self.current_step = 0
self.agents = self.world.policy_agents
# set required vectorized gym env property
self.n = len(world.policy_agents)
# scenario callbacks
self.reset_callback = reset_callback
self.reward_callback = reward_callback
self.observation_callback = observation_callback
self.info_callback = info_callback
self.done_callback = done_callback
self.post_step_callback = post_step_callback
# environment parameters
# self.discrete_action_space = True
self.discrete_action_space = discrete_action
# if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector
self.discrete_action_input = False
# if true, even the action is continuous, action will be performed discretely
self.force_discrete_action = world.discrete_action if hasattr(
world, 'discrete_action') else False
# in this env, force_discrete_action == False��because world do not have discrete_action
# if true, every agent has the same reward
self.shared_reward = world.collaborative if hasattr(
world, 'collaborative') else False
#self.shared_reward = False
self.time = 0
# configure spaces
self.action_space = []
self.observation_space = []
self.share_observation_space = []
share_obs_dim = 0
for agent in self.agents:
total_action_space = []
# physical action space
if self.discrete_action_space:
u_action_space = spaces.Discrete(world.dim_p * 2 + 1)
else:
u_action_space = spaces.Box(
low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32) # [-1,1]
if agent.movable:
total_action_space.append(u_action_space)
# communication action space
if self.discrete_action_space:
c_action_space = spaces.Discrete(world.dim_c)
else:
c_action_space = spaces.Box(low=0.0, high=1.0, shape=(
world.dim_c,), dtype=np.float32) # [0,1]
#c_action_space = spaces.Discrete(world.dim_c)
if not agent.silent:
total_action_space.append(c_action_space)
# total action space
if len(total_action_space) > 1:
# all action spaces are discrete, so simplify to MultiDiscrete action space
if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):
act_space = MultiDiscrete(
[[0, act_space.n-1] for act_space in total_action_space])
else:
act_space = spaces.Tuple(total_action_space)
self.action_space.append(act_space)
else:
self.action_space.append(total_action_space[0])
# observation space
obs_dim = len(observation_callback(agent, self.world))
share_obs_dim += obs_dim
self.observation_space.append(spaces.Box(
low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32)) # [-inf,inf]
agent.action.c = np.zeros(self.world.dim_c)
self.share_observation_space = [spaces.Box(
low=-np.inf, high=+np.inf, shape=(share_obs_dim,), dtype=np.float32)] * self.n
# rendering
self.shared_viewer = shared_viewer
if self.shared_viewer:
self.viewers = [None]
else:
self.viewers = [None] * self.n
self._reset_render()
def seed(self, seed=None):
if seed is None:
np.random.seed(1)
else:
np.random.seed(seed)
# step this is env.step()
def step(self, action_n):
self.current_step += 1
obs_n = []
reward_n = []
done_n = []
info_n = []
self.agents = self.world.policy_agents
# set action for each agent
for i, agent in enumerate(self.agents):
self._set_action(action_n[i], agent, self.action_space[i])
# advance world state
self.world.step() # core.step()
# record observation for each agent
for i, agent in enumerate(self.agents):
obs_n.append(self._get_obs(agent))
reward_n.append([self._get_reward(agent)])
done_n.append(self._get_done(agent))
info = {'individual_reward': self._get_reward(agent)}
env_info = self._get_info(agent)
if 'fail' in env_info.keys():
info['fail'] = env_info['fail']
info_n.append(info)
# all agents get total reward in cooperative case, if shared reward, all agents have the same reward, and reward is sum
reward = np.sum(reward_n)
if self.shared_reward:
reward_n = [[reward]] * self.n
if self.post_step_callback is not None:
self.post_step_callback(self.world)
return obs_n, reward_n, done_n, info_n
def reset(self):
self.current_step = 0
# reset world
self.reset_callback(self.world)
# reset renderer
self._reset_render()
# record observations for each agent
obs_n = []
self.agents = self.world.policy_agents
for agent in self.agents:
obs_n.append(self._get_obs(agent))
return obs_n
# get info used for benchmarking
def _get_info(self, agent):
if self.info_callback is None:
return {}
return self.info_callback(agent, self.world)
# get observation for a particular agent
def _get_obs(self, agent):
if self.observation_callback is None:
return np.zeros(0)
return self.observation_callback(agent, self.world)
# get dones for a particular agent
# unused right now -- agents are allowed to go beyond the viewing screen
def _get_done(self, agent):
if self.done_callback is None:
if self.current_step >= self.world_length:
return True
else:
return False
return self.done_callback(agent, self.world)
# get reward for a particular agent
def _get_reward(self, agent):
if self.reward_callback is None:
return 0.0
return self.reward_callback(agent, self.world)
# set env action for a particular agent
def _set_action(self, action, agent, action_space, time=None):
agent.action.u = np.zeros(self.world.dim_p)
agent.action.c = np.zeros(self.world.dim_c)
# process action
if isinstance(action_space, MultiDiscrete):
act = []
size = action_space.high - action_space.low + 1
index = 0
for s in size:
act.append(action[index:(index+s)])
index += s
action = act
else:
action = [action]
if agent.movable:
# physical action
if self.discrete_action_input:
agent.action.u = np.zeros(self.world.dim_p)
# process discrete action
if action[0] == 1:
agent.action.u[0] = -1.0
if action[0] == 2:
agent.action.u[0] = +1.0
if action[0] == 3:
agent.action.u[1] = -1.0
if action[0] == 4:
agent.action.u[1] = +1.0
d = self.world.dim_p
else:
if self.discrete_action_space:
agent.action.u[0] += action[0][1] - action[0][2]
agent.action.u[1] += action[0][3] - action[0][4]
d = 5
else:
if self.force_discrete_action:
p = np.argmax(action[0][0:self.world.dim_p])
action[0][:] = 0.0
action[0][p] = 1.0
agent.action.u = action[0][0:self.world.dim_p]
d = self.world.dim_p
sensitivity = 5.0
if agent.accel is not None:
sensitivity = agent.accel
agent.action.u *= sensitivity
if (not agent.silent) and (not isinstance(action_space, MultiDiscrete)):
action[0] = action[0][d:]
else:
action = action[1:]
if not agent.silent:
# communication action
if self.discrete_action_input:
agent.action.c = np.zeros(self.world.dim_c)
agent.action.c[action[0]] = 1.0
else:
agent.action.c = action[0]
action = action[1:]
# make sure we used all elements of action
assert len(action) == 0
# reset rendering assets
def _reset_render(self):
self.render_geoms = None
self.render_geoms_xform = None
def render(self, mode='human', close=True):
if close:
# close any existic renderers
for i, viewer in enumerate(self.viewers):
if viewer is not None:
viewer.close()
self.viewers[i] = None
return []
if mode == 'human':
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
message = ''
for agent in self.world.agents:
comm = []
for other in self.world.agents:
if other is agent:
continue
if np.all(other.state.c == 0):
word = '_'
else:
word = alphabet[np.argmax(other.state.c)]
message += (other.name + ' to ' +
agent.name + ': ' + word + ' ')
print(message)
for i in range(len(self.viewers)):
# create viewers (if necessary)
if self.viewers[i] is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from . import rendering
self.viewers[i] = rendering.Viewer(700, 700)
# create rendering geometry
if self.render_geoms is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from . import rendering
self.render_geoms = []
self.render_geoms_xform = []
self.comm_geoms = []
for entity in self.world.entities:
geom = rendering.make_circle(entity.size)
xform = rendering.Transform()
entity_comm_geoms = []
if 'agent' in entity.name:
geom.set_color(*entity.color, alpha=0.5)
if not entity.silent:
dim_c = self.world.dim_c
# make circles to represent communication
for ci in range(dim_c):
comm = rendering.make_circle(entity.size / dim_c)
comm.set_color(1, 1, 1)
comm.add_attr(xform)
offset = rendering.Transform()
comm_size = (entity.size / dim_c)
offset.set_translation(ci * comm_size * 2 -
entity.size + comm_size, 0)
comm.add_attr(offset)
entity_comm_geoms.append(comm)
else:
geom.set_color(*entity.color)
if entity.channel is not None:
dim_c = self.world.dim_c
# make circles to represent communication
for ci in range(dim_c):
comm = rendering.make_circle(entity.size / dim_c)
comm.set_color(1, 1, 1)
comm.add_attr(xform)
offset = rendering.Transform()
comm_size = (entity.size / dim_c)
offset.set_translation(ci * comm_size * 2 -
entity.size + comm_size, 0)
comm.add_attr(offset)
entity_comm_geoms.append(comm)
geom.add_attr(xform)
self.render_geoms.append(geom)
self.render_geoms_xform.append(xform)
self.comm_geoms.append(entity_comm_geoms)
for wall in self.world.walls:
corners = ((wall.axis_pos - 0.5 * wall.width, wall.endpoints[0]),
(wall.axis_pos - 0.5 *
wall.width, wall.endpoints[1]),
(wall.axis_pos + 0.5 *
wall.width, wall.endpoints[1]),
(wall.axis_pos + 0.5 * wall.width, wall.endpoints[0]))
if wall.orient == 'H':
corners = tuple(c[::-1] for c in corners)
geom = rendering.make_polygon(corners)
if wall.hard:
geom.set_color(*wall.color)
else:
geom.set_color(*wall.color, alpha=0.5)
self.render_geoms.append(geom)
# add geoms to viewer
# for viewer in self.viewers:
# viewer.geoms = []
# for geom in self.render_geoms:
# viewer.add_geom(geom)
for viewer in self.viewers:
viewer.geoms = []
for geom in self.render_geoms:
viewer.add_geom(geom)
for entity_comm_geoms in self.comm_geoms:
for geom in entity_comm_geoms:
viewer.add_geom(geom)
results = []
for i in range(len(self.viewers)):
from . import rendering
if self.shared_viewer:
pos = np.zeros(self.world.dim_p)
else:
pos = self.agents[i].state.p_pos
self.viewers[i].set_bounds(
pos[0]-cam_range, pos[0]+cam_range, pos[1]-cam_range, pos[1]+cam_range)
# update geometry positions
for e, entity in enumerate(self.world.entities):
self.render_geoms_xform[e].set_translation(*entity.state.p_pos)
if 'agent' in entity.name:
self.render_geoms[e].set_color(*entity.color, alpha=0.5)
if not entity.silent:
for ci in range(self.world.dim_c):
color = 1 - entity.state.c[ci]
self.comm_geoms[e][ci].set_color(
color, color, color)
else:
self.render_geoms[e].set_color(*entity.color)
if entity.channel is not None:
for ci in range(self.world.dim_c):
color = 1 - entity.channel[ci]
self.comm_geoms[e][ci].set_color(
color, color, color)
# render to display or array
results.append(self.viewers[i].render(
return_rgb_array=mode == 'rgb_array'))
return results
# create receptor field locations in local coordinate frame
def _make_receptor_locations(self, agent):
receptor_type = 'polar'
range_min = 0.05 * 2.0
range_max = 1.00
dx = []
# circular receptive field
if receptor_type == 'polar':
for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):
for distance in np.linspace(range_min, range_max, 3):
dx.append(
distance * np.array([np.cos(angle), np.sin(angle)]))
# add origin
dx.append(np.array([0.0, 0.0]))
# grid receptive field
if receptor_type == 'grid':
for x in np.linspace(-range_max, +range_max, 5):
for y in np.linspace(-range_max, +range_max, 5):
dx.append(np.array([x, y]))
return dx
| 40.055684
| 127
| 0.530178
|
768cf41dae9f5ab0c097a8fce414da43030768e4
| 941
|
py
|
Python
|
main.py
|
amitport/A2C
|
b9db1587656679bfadf8c409ceebbb7ac4034a22
|
[
"Apache-2.0"
] | 181
|
2018-01-05T18:09:24.000Z
|
2022-03-27T06:16:56.000Z
|
main.py
|
yo-yrl/A2C
|
b9db1587656679bfadf8c409ceebbb7ac4034a22
|
[
"Apache-2.0"
] | 14
|
2018-02-05T01:03:07.000Z
|
2021-03-19T09:32:50.000Z
|
main.py
|
yo-yrl/A2C
|
b9db1587656679bfadf8c409ceebbb7ac4034a22
|
[
"Apache-2.0"
] | 49
|
2018-01-12T05:02:59.000Z
|
2022-03-05T05:07:18.000Z
|
import tensorflow as tf
from utils.utils import create_experiment_dirs
from utils.utils import parse_args
from A2C import A2C
def main():
# Parse the JSON arguments
config_args = parse_args()
tf.reset_default_graph()
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=config_args.num_envs,
inter_op_parallelism_threads=config_args.num_envs)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# Prepare Directories
config_args.experiment_dir, config_args.summary_dir, config_args.checkpoint_dir, config_args.output_dir, config_args.test_dir = \
create_experiment_dirs(config_args.experiment_dir)
a2c = A2C(sess, config_args)
if config_args.to_train:
a2c.train()
if config_args.to_test:
a2c.test(total_timesteps=10000000)
if __name__ == '__main__':
main()
| 28.515152
| 133
| 0.714134
|
89ffd0bd732422b99ce801096ba364f6454e1c03
| 15,208
|
py
|
Python
|
acq4/devices/MockClamp/MockClamp.py
|
campagnola/acq4
|
09699c07d8949950f6df149cf17892aaa3a37402
|
[
"MIT"
] | null | null | null |
acq4/devices/MockClamp/MockClamp.py
|
campagnola/acq4
|
09699c07d8949950f6df149cf17892aaa3a37402
|
[
"MIT"
] | 3
|
2016-03-29T15:32:27.000Z
|
2017-01-13T20:03:50.000Z
|
acq4/devices/MockClamp/MockClamp.py
|
campagnola/acq4
|
09699c07d8949950f6df149cf17892aaa3a37402
|
[
"MIT"
] | 2
|
2016-08-27T17:22:07.000Z
|
2016-10-19T21:51:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, with_statement
import os
import pyqtgraph.multiprocess as mp
from acq4.devices.AxoPatch200 import CancelException
from acq4.devices.DAQGeneric import DAQGeneric, DAQGenericTask, DAQGenericTaskGui
from acq4.devices.PatchClamp import PatchClamp
from pyqtgraph.WidgetGroup import WidgetGroup
from acq4.util import Qt
from acq4.util.Mutex import Mutex
from acq4.util.debug import printExc
Ui_MockClampDevGui = Qt.importTemplate('.devTemplate')
ivModes = {'I=0': 'IC', 'VC': 'VC', 'IC': 'IC'}
modeNames = ['VC', 'I=0', 'IC']
class MockClamp(PatchClamp):
def __init__(self, dm, config, name):
PatchClamp.__init__(self, dm, config, name)
# Generate config to use for DAQ
self.devLock = Mutex(Mutex.Recursive)
daqConfig = {
'command': config['Command'],
'primary': config['ScaledSignal'],
}
self.holding = {
'VC': config.get('vcHolding', -0.05),
'IC': config.get('icHolding', 0.0)
}
self.mode = 'I=0'
self.config = config
# create a daq device under the hood
self.daqDev = DAQGeneric(dm, daqConfig, '{}Daq'.format(name))
try:
self.setHolding()
except:
printExc("Error while setting holding value:")
# Start a remote process to run the simulation.
self.process = mp.Process()
rsys = self.process._import('sys')
rsys._setProxyOptions(returnType='proxy') # need to access remote path by proxy, not by value
rsys.path.append(os.path.abspath(os.path.dirname(__file__)))
if config['simulator'] == 'builtin':
self.simulator = self.process._import('hhSim')
elif config['simulator'] == 'neuron':
self.simulator = self.process._import('neuronSim')
dm.declareInterface(name, ['clamp'], self)
def createTask(self, cmd, parentTask):
return MockClampTask(self, cmd, parentTask)
def taskInterface(self, taskRunner):
return MockClampTaskGui(self, taskRunner)
def deviceInterface(self, win):
return MockClampDevGui(self)
def setHolding(self, mode=None, value=None, force=False):
global ivModes
with self.devLock:
currentMode = self.getMode()
if mode is None:
mode = currentMode
ivMode = ivModes[mode] ## determine vc/ic
if value is None:
value = self.holding[ivMode]
else:
self.holding[ivMode] = value
if ivMode == ivModes[currentMode] or force:
# gain = self.getCmdGain(mode)
## override the scale since getChanScale won't necessarily give the correct value
## (we may be about to switch modes)
# DAQGeneric.setChanHolding(self, 'command', value, scale=gain)
pass
self.sigHoldingChanged.emit('primary', self.holding.copy())
def setChanHolding(self, chan, value=None):
if chan == 'command':
self.setHolding(value=value)
else:
self.daqDev.setChanHolding(self, chan, value)
def getChanHolding(self, chan):
if chan == 'command':
return self.getHolding()
else:
return self.daqDev.getChanHolding(chan)
def getHolding(self, mode=None):
global ivModes
with self.devLock:
if mode is None:
mode = self.getMode()
ivMode = ivModes[mode] ## determine vc/ic
return self.holding[ivMode]
def getState(self):
return {
'mode': self.getMode(),
}
def listModes(self):
global modeNames
return modeNames
def setMode(self, mode):
"""Set the mode of the AxoPatch (by requesting user intervention). Takes care of switching holding levels in I=0 mode if needed."""
mode = mode.upper()
startMode = self.getMode()
if startMode == mode:
return
startIvMode = ivModes[startMode]
ivMode = ivModes[mode]
if (startIvMode == 'VC' and ivMode == 'IC') or (startIvMode == 'IC' and ivMode == 'VC'):
## switch to I=0 first
# self.requestModeSwitch('I=0')
self.mode = 'I=0'
self.setHolding(ivMode, force=True) ## we're in I=0 mode now, so it's ok to force the holding value.
### TODO:
### If mode switches back the wrong direction, we need to reset the holding value and cancel.
self.mode = ivMode
self.sigStateChanged.emit(self.getState())
def getMode(self):
return self.mode
def getChanUnits(self, chan):
global ivModes
iv = ivModes[self.getMode()]
if iv == 'VC':
units = ['V', 'A']
else:
units = ['A', 'V']
if chan == 'command':
return units[0]
elif chan == 'secondary':
return units[0]
elif chan == 'primary':
return units[1]
def readChannel(self, ch):
pass
def quit(self):
# self.process.send(None)
self.process.close()
self.daqDev.quit()
def getDAQName(self):
"""Return the DAQ name used by this device. (assumes there is only one DAQ for now)"""
return self.config['Command']['device']
def autoPipetteOffset(self):
"""Automatically set the pipette offset.
"""
pass
def autoBridgeBalance(self):
"""Automatically set the bridge balance.
"""
pass
def autoCapComp(self):
"""Automatically configure capacitance compensation.
"""
pass
class MockClampTask(DAQGenericTask):
def __init__(self, dev, cmd, parentTask):
## make a few changes for compatibility with multiclamp
if 'daqProtocol' not in cmd:
cmd['daqProtocol'] = {}
daqP = cmd['daqProtocol']
if 'command' in cmd:
if 'holding' in cmd:
daqP['command'] = {'command': cmd['command'], 'holding': cmd['holding']}
else:
daqP['command'] = {'command': cmd['command']}
daqP['command']['lowLevelConf'] = {'mockFunc': self.write}
cmd['daqProtocol']['primary'] = {'record': True, 'lowLevelConf': {'mockFunc': self.read}}
DAQGenericTask.__init__(self, dev.daqDev, cmd['daqProtocol'], parentTask)
self.cmd = cmd
self.clampDev = dev
modPath = os.path.abspath(os.path.split(__file__)[0])
def configure(self):
### Record initial state or set initial value
##if 'holding' in self.cmd:
## self.dev.setHolding(self.cmd['mode'], self.cmd['holding'])
if 'mode' in self.cmd:
self.clampDev.setMode(self.cmd['mode'])
mode = self.clampDev.getMode()
self.ampState = {
'mode': mode,
'primaryUnits': 'A' if mode == 'VC' else 'V',
# copying multiclamp format here, but should eventually pick something more universal
'ClampParams': ({
'BridgeBalResist': 0,
'BridgeBalEnable': True,
} if mode == 'IC' else {}),
}
### Do not configure daq until mode is set. Otherwise, holding values may be incorrect.
DAQGenericTask.configure(self)
def read(self):
## Called by DAQGeneric to simulate a read-from-DAQ
res = self.job.result(timeout=30)._getValue()
return res
def write(self, data, dt):
## Called by DAQGeneric to simulate a write-to-DAQ
self.job = self.clampDev.simulator.run({'data': data, 'dt': dt, 'mode': self.cmd['mode']}, _callSync='async')
def isDone(self):
## check on neuron process
# return self.process.poll() is not None
return True
def stop(self, abort=False):
DAQGenericTask.stop(self, abort)
def getResult(self):
result = DAQGenericTask.getResult(self)
result._info[-1]['startTime'] = next(iter(result._info[-1][self.clampDev.getDAQName()].values()))['startTime']
result._info[-1]['ClampState'] = self.ampState
return result
class MockClampTaskGui(DAQGenericTaskGui):
def __init__(self, dev, taskRunner):
DAQGenericTaskGui.__init__(self, dev.daqDev, taskRunner, ownUi=False)
self.clampDev = dev
self.layout = Qt.QGridLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.splitter1 = Qt.QSplitter()
self.splitter1.setOrientation(Qt.Qt.Horizontal)
self.layout.addWidget(self.splitter1)
self.splitter2 = Qt.QSplitter()
self.splitter2.setOrientation(Qt.Qt.Vertical)
self.modeCombo = Qt.QComboBox()
self.splitter2.addWidget(self.modeCombo)
self.modeCombo.addItems(self.clampDev.listModes())
self.splitter3 = Qt.QSplitter()
self.splitter3.setOrientation(Qt.Qt.Vertical)
(w1, p1) = self.createChannelWidget('primary')
(w2, p2) = self.createChannelWidget('command')
self.cmdWidget = w2
self.inputWidget = w1
self.cmdPlot = p2
self.inputPlot = p1
self.cmdWidget.setMeta('x', siPrefix=True, suffix='s', dec=True)
self.cmdWidget.setMeta('y', siPrefix=True, dec=True)
self.splitter1.addWidget(self.splitter2)
self.splitter1.addWidget(self.splitter3)
self.splitter2.addWidget(w1)
self.splitter2.addWidget(w2)
self.splitter3.addWidget(p1)
self.splitter3.addWidget(p2)
self.splitter1.setSizes([100, 500])
self.stateGroup = WidgetGroup([
(self.splitter1, 'splitter1'),
(self.splitter2, 'splitter2'),
(self.splitter3, 'splitter3'),
])
self.modeCombo.currentIndexChanged.connect(self.modeChanged)
self.modeChanged()
def saveState(self):
"""Return a dictionary representing the current state of the widget."""
state = {}
state['daqState'] = DAQGenericTaskGui.saveState(self)
state['mode'] = self.getMode()
# state['holdingEnabled'] = self.ctrl.holdingCheck.isChecked()
# state['holding'] = self.ctrl.holdingSpin.value()
return state
def restoreState(self, state):
"""Restore the state of the widget from a dictionary previously generated using saveState"""
# print 'state: ', state
# print 'DaqGeneric : ', dir(DAQGenericTaskGui)
if 'mode' in state:
self.modeCombo.setCurrentIndex(self.modeCombo.findText(state['mode']))
# self.ctrl.holdingCheck.setChecked(state['holdingEnabled'])
# if state['holdingEnabled']:
# self.ctrl.holdingSpin.setValue(state['holding'])
if 'daqState' in state:
return DAQGenericTaskGui.restoreState(self, state['daqState'])
else:
return None
def generateTask(self, params=None):
daqTask = DAQGenericTaskGui.generateTask(self, params)
task = {
'mode': self.getMode(),
'daqProtocol': daqTask
}
return task
def modeChanged(self):
global ivModes
ivm = ivModes[self.getMode()]
w = self.cmdWidget
if ivm == 'VC':
scale = 1e-3
cmdUnits = 'V'
inpUnits = 'A'
else:
scale = 1e-12
cmdUnits = 'A'
inpUnits = 'V'
self.inputWidget.setUnits(inpUnits)
self.cmdWidget.setUnits(cmdUnits)
self.cmdWidget.setMeta('y', minStep=scale, step=scale * 10, value=0.)
self.inputPlot.setLabel('left', units=inpUnits)
self.cmdPlot.setLabel('left', units=cmdUnits)
# w.setScale(scale)
# for s in w.getSpins():
# s.setOpts(minStep=scale)
self.cmdWidget.updateHolding()
def getMode(self):
return str(self.modeCombo.currentText())
def sequenceChanged(self):
self.sigSequenceChanged.emit(self.clampDev.name())
def getChanHolding(self, chan):
if chan == 'command':
return self.clampDev.getHolding(self.getMode())
else:
raise Exception("Can't get holding value for channel %s" % chan)
class MockClampDevGui(Qt.QWidget):
def __init__(self, dev):
Qt.QWidget.__init__(self)
self.dev = dev
self.ui = Ui_MockClampDevGui()
self.ui.setupUi(self)
self.ui.vcHoldingSpin.setOpts(step=1, minStep=1e-3, dec=True, suffix='V', siPrefix=True)
self.ui.icHoldingSpin.setOpts(step=1, minStep=1e-12, dec=True, suffix='A', siPrefix=True)
# self.ui.modeCombo.currentIndexChanged.connect(self.modeComboChanged)
self.modeRadios = {
'VC': self.ui.vcModeRadio,
'IC': self.ui.icModeRadio,
'I=0': self.ui.i0ModeRadio,
}
self.updateStatus()
for v in self.modeRadios.values():
v.toggled.connect(self.modeRadioChanged)
self.ui.vcHoldingSpin.valueChanged.connect(self.vcHoldingChanged)
self.ui.icHoldingSpin.valueChanged.connect(self.icHoldingChanged)
self.dev.sigHoldingChanged.connect(self.devHoldingChanged)
self.dev.sigStateChanged.connect(self.devStateChanged)
def updateStatus(self):
global modeNames
mode = self.dev.getMode()
if mode is None:
return
vcHold = self.dev.getHolding('VC')
icHold = self.dev.getHolding('IC')
self.modeRadios[mode].setChecked(True)
# self.ui.modeCombo.setCurrentIndex(self.ui.modeCombo.findText(mode))
self.ui.vcHoldingSpin.setValue(vcHold)
self.ui.icHoldingSpin.setValue(icHold)
def devHoldingChanged(self, chan, hval):
if isinstance(hval, dict):
self.ui.vcHoldingSpin.blockSignals(True)
self.ui.icHoldingSpin.blockSignals(True)
self.ui.vcHoldingSpin.setValue(hval['VC'])
self.ui.icHoldingSpin.setValue(hval['IC'])
self.ui.vcHoldingSpin.blockSignals(False)
self.ui.icHoldingSpin.blockSignals(False)
def devStateChanged(self):
mode = self.dev.getMode()
for r in self.modeRadios.values():
r.blockSignals(True)
# self.ui.modeCombo.blockSignals(True)
# self.ui.modeCombo.setCurrentIndex(self.ui.modeCombo.findText(mode))
self.modeRadios[mode].setChecked(True)
# self.ui.modeCombo.blockSignals(False)
for r in self.modeRadios.values():
r.blockSignals(False)
def vcHoldingChanged(self):
self.dev.setHolding('VC', self.ui.vcHoldingSpin.value())
def icHoldingChanged(self):
self.dev.setHolding('IC', self.ui.icHoldingSpin.value())
def modeRadioChanged(self, m):
try:
if not m:
return
for mode, r in self.modeRadios.items():
if r.isChecked():
self.dev.setMode(mode)
except CancelException:
self.updateStatus()
| 33.946429
| 139
| 0.598698
|
05709aae01fd4368494d31f610638b5fcc5ddad7
| 603
|
py
|
Python
|
app/__init__.py
|
nicholas-oduor/News-API
|
ee52f775b20816edd61667ab2a08db23977708f2
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
nicholas-oduor/News-API
|
ee52f775b20816edd61667ab2a08db23977708f2
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
nicholas-oduor/News-API
|
ee52f775b20816edd61667ab2a08db23977708f2
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_bootstrap import Bootstrap
from config import config_options
bootstrap = Bootstrap()
def create_app(config_name):
app = Flask(__name__)
# Creating the app configurations
app.config.from_object(config_options[config_name])
# Initializing flask extensions
bootstrap.init_app(app)
# Will add the views and forms
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# setting config
from .request import configure_request
configure_request(app)
return app
| 22.333333
| 55
| 0.751244
|
81f0348c6f36bf839409016f641e8cf98edac51c
| 9,045
|
py
|
Python
|
pybamm/solvers/algebraic_solver.py
|
dion-w/PyBaMM
|
aeb9bcc82bb5dc3fba4fa045c4cad9d2d41b6359
|
[
"BSD-3-Clause"
] | 1
|
2022-03-24T01:23:16.000Z
|
2022-03-24T01:23:16.000Z
|
pybamm/solvers/algebraic_solver.py
|
sxwangxiang/PyBaMM
|
23157aebce218444edc83b525dfb2c7fc8637598
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/solvers/algebraic_solver.py
|
sxwangxiang/PyBaMM
|
23157aebce218444edc83b525dfb2c7fc8637598
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Algebraic solver class
#
import casadi
import pybamm
import numpy as np
from scipy import optimize
from scipy.sparse import issparse
class AlgebraicSolver(pybamm.BaseSolver):
"""Solve a discretised model which contains only (time independent) algebraic
equations using a root finding algorithm.
Uses scipy.optimize.root.
Note: this solver could be extended for quasi-static models, or models in
which the time derivative is manually discretised and results in a (possibly
nonlinear) algebaric system at each time level.
Parameters
----------
method : str, optional
The method to use to solve the system (default is "lm"). If it starts with
"lsq", least-squares minimization is used. The method for least-squares can be
specified in the form "lsq_methodname"
tol : float, optional
The tolerance for the solver (default is 1e-6).
extra_options : dict, optional
Any options to pass to the rootfinder. Vary depending on which method is chosen.
Please consult `SciPy documentation <https://tinyurl.com/ybr6cfqs>`_ for
details.
"""
def __init__(self, method="lm", tol=1e-6, extra_options=None):
super().__init__(method=method)
self.tol = tol
self.extra_options = extra_options or {}
self.name = "Algebraic solver ({})".format(method)
self.algebraic_solver = True
pybamm.citations.register("Virtanen2020")
@property
def tol(self):
return self._tol
@tol.setter
def tol(self, value):
self._tol = value
def _integrate(self, model, t_eval, inputs_dict=None):
"""
Calculate the solution of the algebraic equations through root-finding
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate.
t_eval : :class:`numpy.array`, size (k,)
The times at which to compute the solution
inputs_dict : dict, optional
Any input parameters to pass to the model when solving
"""
inputs_dict = inputs_dict or {}
if model.convert_to_format == "casadi":
inputs = casadi.vertcat(*[x for x in inputs_dict.values()])
else:
inputs = inputs_dict
y0 = model.y0
if isinstance(y0, casadi.DM):
y0 = y0.full()
y0 = y0.flatten()
# The casadi algebraic solver can read rhs equations, but leaves them unchanged
# i.e. the part of the solution vector that corresponds to the differential
# equations will be equal to the initial condition provided. This allows this
# solver to be used for initialising the DAE solvers
# Split y0 into differential and algebraic
if model.rhs == {}:
len_rhs = 0
else:
len_rhs = model.rhs_eval(t_eval[0], y0, inputs).shape[0]
y0_diff, y0_alg = np.split(y0, [len_rhs])
test_result = model.algebraic_eval(0, y0, inputs)
if isinstance(test_result, casadi.DM):
def algebraic(t, y):
result = model.algebraic_eval(t, y, inputs)
return result.full().flatten()
else:
def algebraic(t, y):
result = model.algebraic_eval(t, y, inputs)
return result.flatten()
y_alg = np.empty((len(y0_alg), len(t_eval)))
timer = pybamm.Timer()
integration_time = 0
for idx, t in enumerate(t_eval):
def root_fun(y_alg):
"Evaluates algebraic using y"
y = np.concatenate([y0_diff, y_alg])
out = algebraic(t, y)
pybamm.logger.debug(
"Evaluating algebraic equations at t={}, L2-norm is {}".format(
t * model.timescale_eval, np.linalg.norm(out)
)
)
return out
jac = model.jac_algebraic_eval
if jac:
if issparse(jac(t_eval[0], y0, inputs)):
def jac_fn(y_alg):
"""
Evaluates jacobian using y0_diff (fixed) and y_alg (varying)
"""
y = np.concatenate([y0_diff, y_alg])
return jac(0, y, inputs)[:, len_rhs:].toarray()
else:
def jac_fn(y_alg):
"""
Evaluates jacobian using y0_diff (fixed) and y_alg (varying)
"""
y = np.concatenate([y0_diff, y_alg])
return jac(0, y, inputs)[:, len_rhs:]
else:
jac_fn = None
itr = 0
maxiter = 2
success = False
while not success:
# Methods which use least-squares are specified as either "lsq",
# which uses the default method, or with "lsq__methodname"
if self.method.startswith("lsq"):
if self.method == "lsq":
method = "trf"
else:
method = self.method[5:]
if jac_fn is None:
jac_fn = "2-point"
timer.reset()
sol = optimize.least_squares(
root_fun,
y0_alg,
method=method,
ftol=self.tol,
jac=jac_fn,
bounds=model.bounds,
**self.extra_options,
)
integration_time += timer.time()
# Methods which use minimize are specified as either "minimize",
# which uses the default method, or with "minimize__methodname"
elif self.method.startswith("minimize"):
# Adapt the root function for minimize
def root_norm(y):
return np.sum(root_fun(y) ** 2)
if jac_fn is None:
jac_norm = None
else:
def jac_norm(y):
return np.sum(2 * root_fun(y) * jac_fn(y), 0)
if self.method == "minimize":
method = None
else:
method = self.method[10:]
extra_options = self.extra_options
if np.any(model.bounds[0] != -np.inf) or np.any(
model.bounds[1] != np.inf
):
bounds = [
(lb, ub) for lb, ub in zip(model.bounds[0], model.bounds[1])
]
extra_options["bounds"] = bounds
timer.reset()
sol = optimize.minimize(
root_norm,
y0_alg,
method=method,
tol=self.tol,
jac=jac_norm,
**extra_options,
)
integration_time += timer.time()
else:
timer.reset()
sol = optimize.root(
root_fun,
y0_alg,
method=self.method,
tol=self.tol,
jac=jac_fn,
options=self.extra_options,
)
integration_time += timer.time()
if sol.success and np.all(abs(sol.fun) < self.tol):
# update initial guess for the next iteration
y0_alg = sol.x
# update solution array
y_alg[:, idx] = y0_alg
success = True
elif not sol.success:
raise pybamm.SolverError(
"Could not find acceptable solution: {}".format(sol.message)
)
else:
y0_alg = sol.x
if itr > maxiter:
raise pybamm.SolverError(
"Could not find acceptable solution: solver terminated "
"successfully, but maximum solution error "
"({}) above tolerance ({})".format(
np.max(abs(sol.fun)), self.tol
)
)
itr += 1
# Concatenate differential part
y_diff = np.r_[[y0_diff] * len(t_eval)].T
y_sol = np.r_[y_diff, y_alg]
# Return solution object (no events, so pass None to t_event, y_event)
sol = pybamm.Solution(t_eval, y_sol, model, inputs_dict, termination="success")
sol.integration_time = integration_time
return sol
| 38.164557
| 88
| 0.487341
|
425c38c1b2293f035d02b0ea3a933f65fdd0c526
| 6,278
|
py
|
Python
|
train.py
|
orris27/nus_cs5242_project_group27
|
2aa5722e8d9d722ff1a3a37f36cb35c1ece481f8
|
[
"MIT"
] | null | null | null |
train.py
|
orris27/nus_cs5242_project_group27
|
2aa5722e8d9d722ff1a3a37f36cb35c1ece481f8
|
[
"MIT"
] | null | null | null |
train.py
|
orris27/nus_cs5242_project_group27
|
2aa5722e8d9d722ff1a3a37f36cb35c1ece481f8
|
[
"MIT"
] | null | null | null |
import json
import os
import csv
import numpy as np
import misc.utils as utils
import opts
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_value_
from dataloader import VideoDataset
from models import DecoderRNN, EncoderRNN, S2VTAttModel, S2VTModel
from torch import nn
from torch.utils.data import DataLoader
def train(loader, model, crit, optimizer, lr_scheduler, opt, testloader):
#object2idx = json.load(open('mydata/object1_object2.json', 'r'))
#relationship2idx = json.load(open('mydata/relationship.json', 'r'))
object2idx = json.load(open(opt['object1_object2_json']))
relationship2idx = json.load(open(opt['relationship_json']))
idx2object = {idx:obj for obj, idx in object2idx.items()}
idx2relationship = {idx:rel for rel, idx in relationship2idx.items()}
model.train()
for epoch in range(opt["epochs"]):
lr_scheduler.step()
iteration = 0
# If start self crit training
for data in loader:
torch.cuda.synchronize()
fc_feats = data['fc_feats'].cuda()
labels = data['labels'].cuda()
#masks = data['masks'].cuda()
optimizer.zero_grad()
seq_probs, _ = model(fc_feats, labels, 'train')
loss = crit(seq_probs, labels)
loss.backward()
clip_grad_value_(model.parameters(), opt['grad_clip'])
optimizer.step()
train_loss = loss.item()
torch.cuda.synchronize()
iteration += 1
print("iter %d (epoch %d), train_loss = %.6f" %
(iteration, epoch, train_loss))
if epoch % opt["save_checkpoint_every"] == 0:
csvfile_digit_test = open(os.path.join(opt["checkpoint_path"], 'test_%d_digit.csv'%epoch), 'w', newline='')
writer_digit_test = csv.DictWriter(csvfile_digit_test, fieldnames=['ID', 'label'], delimiter=',')
writer_digit_test.writeheader()
csvfile_word_test = open(os.path.join(opt["checkpoint_path"], 'test_%d_word.csv'%epoch), 'w', newline='')
writer_word_test = csv.DictWriter(csvfile_word_test, fieldnames=['ID', 'label'], delimiter=',')
writer_word_test.writeheader()
model.eval()
iteration_test = 0
for data in testloader:
if iteration_test >= 1:
assert False
torch.cuda.synchronize()
fc_feats = data['fc_feats'].cuda()
with torch.no_grad():
seq_logprobs, preds5_list = model(fc_feats, None, 'test')
torch.cuda.synchronize()
iteration_test += 1
model.train()
preds = torch.stack(preds5_list, 1) # (119, 3, 5)
torch.cuda.synchronize()
idx = 0
for vi in range(preds.shape[0]):
for caption_id in range(preds.shape[1]):
indices = preds[vi][caption_id].cpu().numpy()
digits = ' '.join([str(elm) for elm in indices])
if caption_id == 1: # relationship
words = ' '.join([idx2relationship[elm] for elm in indices])
else: # object
words = ' '.join([idx2object[elm] for elm in indices])
writer_digit_test.writerow({'ID': idx, 'label': digits})
writer_word_test.writerow({'ID': idx, 'label': words})
idx += 1
csvfile_digit_test.close()
csvfile_word_test.close()
print('%d is saved successfully!!'%(epoch))
model_path = os.path.join(opt["checkpoint_path"], 'model_final.pth' )
torch.save(model.state_dict(), model_path)
print("model saved to %s" % (model_path))
def main(opt):
dataset = VideoDataset(opt, 'train')
dataset_test = VideoDataset(opt, 'test')
dataloader = DataLoader(dataset, batch_size=opt["batch_size"], shuffle=True)
dataloader_test = DataLoader(dataset_test, batch_size=opt["batch_size"], shuffle=False)
opt["obj_vocab_size"] = dataset.get_obj_vocab_size()
opt["rel_vocab_size"] = dataset.get_rel_vocab_size()
if opt["model"] == 'S2VTModel':
model = S2VTModel(
opt["vocab_size"],
opt["max_len"],
opt["dim_hidden"],
opt["dim_word"],
opt['dim_vid'],
rnn_cell=opt['rnn_type'],
n_layers=opt['num_layers'],
rnn_dropout_p=opt["rnn_dropout_p"])
elif opt["model"] == "S2VTAttModel":
encoder = EncoderRNN(
opt["dim_vid"],
opt["dim_hidden"],
bidirectional=opt["bidirectional"],
input_dropout_p=opt["input_dropout_p"],
rnn_cell=opt['rnn_type'],
rnn_dropout_p=opt["rnn_dropout_p"])
decoder = DecoderRNN(
opt["obj_vocab_size"],
opt["rel_vocab_size"],
opt["max_len"],
opt["dim_hidden"],
opt["dim_word"],
input_dropout_p=opt["input_dropout_p"],
rnn_cell=opt['rnn_type'],
rnn_dropout_p=opt["rnn_dropout_p"],
bidirectional=opt["bidirectional"])
model = S2VTAttModel(encoder, decoder)
model = model.cuda()
crit = utils.ObjRelCriterion()
#rl_crit = utils.RewardCriterion()
optimizer = optim.Adam(
model.parameters(),
lr=opt["learning_rate"],
weight_decay=opt["weight_decay"])
exp_lr_scheduler = optim.lr_scheduler.StepLR(
optimizer,
step_size=opt["learning_rate_decay_every"],
gamma=opt["learning_rate_decay_rate"])
train(dataloader, model, crit, optimizer, exp_lr_scheduler, opt, dataloader_test)
if __name__ == '__main__':
opt = opts.parse_opt()
opt = vars(opt)
if not os.path.isdir(opt["checkpoint_path"]):
os.mkdir(opt["checkpoint_path"])
opt["checkpoint_path"] = os.path.join(opt["checkpoint_path"], '%s'%utils.get_timestamp())
os.makedirs(opt["checkpoint_path"], exist_ok=False)
opt_json = os.path.join(opt["checkpoint_path"], 'opt_info.json')
with open(opt_json, 'w') as f:
json.dump(opt, f)
print('save opt details to %s' % (opt_json))
main(opt)
| 37.369048
| 119
| 0.592386
|
83859ff6cd23ae768dfb7ad1de6da818f55659c7
| 1,558
|
py
|
Python
|
nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 7
|
2017-02-17T08:54:26.000Z
|
2022-03-10T20:57:23.000Z
|
nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 1
|
2016-04-25T15:07:09.000Z
|
2016-04-25T15:07:09.000Z
|
nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py
|
moloney/nipype
|
a7a9c85c79cb1412ba03406074f83200447ef50b
|
[
"Apache-2.0"
] | 2
|
2017-09-23T16:22:00.000Z
|
2019-08-01T14:18:52.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..dti import ComputeMeanDiffusivity
def test_ComputeMeanDiffusivity_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
in_file=dict(
argstr='< %s',
mandatory=True,
position=1,
),
inputdatatype=dict(argstr='-inputdatatype %s', ),
inputmodel=dict(argstr='-inputmodel %s', ),
out_file=dict(
argstr='> %s',
genfile=True,
position=-1,
),
outputdatatype=dict(argstr='-outputdatatype %s', ),
scheme_file=dict(
argstr='%s',
position=2,
),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
)
inputs = ComputeMeanDiffusivity.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ComputeMeanDiffusivity_outputs():
output_map = dict(md=dict(), )
outputs = ComputeMeanDiffusivity.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 29.961538
| 67
| 0.56611
|
92b248bfaa9d8433be2c2db26b6344fff3a2bfc7
| 1,502
|
py
|
Python
|
docker/src/clawpack-5.3.1/pyclaw/development/DMPFOR/dmpfor_test.py
|
ian-r-rose/visualization
|
ed6d9fab95eb125e7340ab3fad3ed114ed3214af
|
[
"CC-BY-4.0"
] | 11
|
2017-01-04T18:19:48.000Z
|
2021-02-21T01:46:33.000Z
|
docker/src/clawpack-5.3.1/pyclaw/development/DMPFOR/dmpfor_test.py
|
ian-r-rose/visualization
|
ed6d9fab95eb125e7340ab3fad3ed114ed3214af
|
[
"CC-BY-4.0"
] | 8
|
2016-09-22T20:49:51.000Z
|
2019-09-06T23:28:13.000Z
|
docker/src/clawpack-5.3.1/pyclaw/development/DMPFOR/dmpfor_test.py
|
ian-r-rose/visualization
|
ed6d9fab95eb125e7340ab3fad3ed114ed3214af
|
[
"CC-BY-4.0"
] | 13
|
2016-09-22T20:20:06.000Z
|
2020-07-13T14:48:32.000Z
|
from petsc4py import PETSc
import numpy as np
import DMPFOR
global_nx =3
global_ny =2
dof=4
da = PETSc.DA().create(dim=2,
dof=dof,
sizes=[global_nx, global_ny],
#periodic_type = PETSc.DA.PeriodicType.GHOSTED_XYZ,
#stencil_type=self.STENCIL,
#stencil_width=2,
comm=PETSc.COMM_WORLD)
gVec = da.createGlobalVector()
lVec = da.createLocalVector()
ranges = da.getRanges()
nx_start = ranges[0][0]
nx_end = ranges[0][1]
ny_start = ranges[1][0]
ny_end = ranges[1][1]
nx = nx_end - nx_start
ny = ny_end - ny_start
q = np.empty((dof, nx, ny), order='F')
for i in range(0,nx):
for j in range(0,ny):
for k in range(0,dof):
q[k,i,j] = k+10*i+100*j
gVec.array = q
q = gVec.array.reshape((dof, nx, ny), order='F')
print "da array from python"
print q
print "da array from fortran"
DMPFOR.dmpfor(q,dof,nx,ny)
print "da array from python after rolling axises using rollaxis"
rolled_q_1 = np.rollaxis(q,0,3)
rolled_q_1 = np.reshape(rolled_q_1,(nx,ny,dof),order='F')
print rolled_q_1
print "da array from fortran after rolling axises using rollaxis"
DMPFOR.dmpfor(rolled_q_1,nx,ny,dof)
print "da array from python after rolling axises using element by element copy"
rolled_q_2 = np.empty((nx,ny,dof),order='F')
for i in range(0,nx):
for j in range(0,ny):
for k in range(0,dof):
rolled_q_2[i,j,k] = q[k,i,j]
print rolled_q_2
print "da array from fortran after rolling axises using element by element copy"
DMPFOR.dmpfor(rolled_q_2,nx,ny,dof)
| 18.775
| 80
| 0.695739
|
89f4e19917d007b2e6a7fa7d129c98923f44ee4f
| 34,453
|
py
|
Python
|
other_layer/transferleraning_v4(mix7)/retrain.py
|
yuwei998/Deep_Transfer
|
41ebb3bb096f5fc417a900bd539650fd65189554
|
[
"MIT"
] | 4
|
2018-07-31T01:49:14.000Z
|
2020-07-21T11:30:41.000Z
|
other_layer/transferleraning_v4(mix7)/retrain.py
|
yuwei998/Deep_Transfer
|
41ebb3bb096f5fc417a900bd539650fd65189554
|
[
"MIT"
] | null | null | null |
other_layer/transferleraning_v4(mix7)/retrain.py
|
yuwei998/Deep_Transfer
|
41ebb3bb096f5fc417a900bd539650fd65189554
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import hashlib
import os.path
import random
import re
import struct
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
import tensorflow.contrib.slim as slim
FLAGS = None
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
BOTTLENECK_TENSOR_NAME = 'mixed_6/join:0'
BATCH_SIZE=5
BOTTLENECK_TENSOR_SIZE=2048
#inceptionv3模型输出的shape
BOTTLENECK_TENSOR_SHAPE = [None,17,17,768]
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
print('WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
hash_name = re.sub(r'_nohash_.*$', '', file_name)
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category):
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def create_inception_graph():
with tf.Session() as sess:
model_filename = os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
# bottleneck_values = np.squeeze(bottleneck_values)
#将bottleneck_values改为行向量以助于保存
num=bottleneck_values.shape[0]*bottleneck_values.shape[1]*bottleneck_values.shape[2]*bottleneck_values.shape[3]
bottleneck_values=np.reshape(bottleneck_values,(num,))
return bottleneck_values
#下载模型
def maybe_download_and_extract():
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL,
filepath,
_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_list_of_floats_to_file(list_of_floats , file_path):
s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)
with open(file_path, 'wb') as f:
f.write(s)
def read_list_of_floats_from_file(file_path):
with open(file_path, 'rb') as f:
s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())
return list(s)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor):
print('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index, image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data, jpeg_data_tensor, bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor):
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
#将读到的bottleneck_values转为tensor形式并去掉batch_size维度
bottleneck_values = np.squeeze(np.array(bottleneck_values).reshape((1,17,17,768)))
except:
print("Invalid float found, recreating bottleneck")
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
# 将读到的bottleneck_values转为tensor形式并去掉batch_size维度
bottleneck_values = np.squeeze(np.array(bottleneck_values).reshape((1,17, 17, 768)))
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
bottleneck_tensor):
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck = run_bottleneck_on_image(sess, distorted_image_data,
resized_input_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness):
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)
precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
MODEL_INPUT_DEPTH])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor, shape=BOTTLENECK_TENSOR_SHAPE,
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
#返回batch_size的大小
batch=tf.shape(bottleneck_input)[0]
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
net = bottleneck_input
with tf.variable_scope('mix7'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1],
scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 192, [1, 7],
scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 192, [7, 1],
scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 192, [1, 7],
scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3',padding='SAME',stride=[1,1])
branch_3 = slim.conv2d(branch_3, 192, [1, 1],
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
with tf.variable_scope('mix8'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [1, 7],
scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
with tf.variable_scope('mix9'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1,384, [3, 1], scope='Conv2d_0b_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, 384, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3',padding='SAME',stride=[1,1])
branch_3 = slim.conv2d(
branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
with tf.variable_scope('mix10'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net,448, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(
branch_2, 384, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1x3'),
slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='AvgPool_0a_3x3',padding='SAME',stride=[1,1])
branch_3 = slim.conv2d(
branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
with tf.name_scope('my_pool'):
net=tf.nn.avg_pool(net,ksize=[1,8,8,1],strides=[1,1,1,1],padding='VALID')
net=tf.reshape(net,[batch,2048])
with tf.name_scope('second_layer'):
with tf.name_scope('weights'):
layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(net, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def main(_):
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
# Set up the pre-trained graph.
maybe_download_and_extract()
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
create_inception_graph())
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
print('Only one valid folder of images found at ' + FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
sess = tf.Session()
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(len(image_lists.keys()),
FLAGS.final_tensor_name,
bottleneck_tensor)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every time
# with distortions applied, or from the cache stored on disk.
if do_distort_images:
train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
train_bottlenecks, train_ground_truth, _ = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run([merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
train_accuracy * 100))
print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size,
'testing', FLAGS.bottleneck_dir,
FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%% (N=%d)' % (
test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
print('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
print('%70s %s' % (test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type = str,
default = './image200d',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type = str,
default = './output_graph_new.pb',
help = 'Where to save the trained graph.'
)
parser.add_argument(
'--output_labels',
type = str,
default='./output_labels_new.txt',
help = 'Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type = str,
default = './retrain_logs',
help = 'Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type = int,
default = 15000, # 4000
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default = 0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type = int,
default = 10,
help = 'What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type = int,
default = 10,
help = 'What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type = int,
default = 10,
help = 'How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type = int,
default = 5,
help = 'How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type = int,
default = -1,
help = """\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type = int,
default = 5,
help = """\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default = False,
help = """\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type = str,
default = './data/imagenet',
help = """\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default = './data/bottleneck',
help = 'Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type = str,
default = 'final_result',
help = """\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default = False,
help = """\
Whether to randomly flip half of the training images horizontally.\
""",
action = 'store_true'
)
parser.add_argument(
'--random_crop',
type = int,
default = 0,
help = """\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type = int,
default = 0,
help = """\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 42.170135
| 139
| 0.660639
|
647f8525ea1cd51334edcc27015a7bf1913e7b9b
| 9,177
|
py
|
Python
|
fice_nike/agent_base.py
|
kenoss/FightingICE-nike
|
e8a38207481c20fc764271b5cc35b462caff5d49
|
[
"Apache-2.0"
] | null | null | null |
fice_nike/agent_base.py
|
kenoss/FightingICE-nike
|
e8a38207481c20fc764271b5cc35b462caff5d49
|
[
"Apache-2.0"
] | null | null | null |
fice_nike/agent_base.py
|
kenoss/FightingICE-nike
|
e8a38207481c20fc764271b5cc35b462caff5d49
|
[
"Apache-2.0"
] | null | null | null |
# Camel case is necesarry...
from .action import Action
class AgentBase:
THRESHOLD_FRAME_COUNT_FOR_THE_CASE_SIMULATOR_BROKEN = 30
def __init__(self, environment):
self.environment = environment
self.gateway = environment.gateway
self._skill_flag_continue_count = 0
def close(self):
pass
def getInformation(self, frameData):
# Getting the frame data of the current frame
self.frameData = frameData
self.cc.setFrameData(self.frameData, self.player)
# please define this method when you use FightingICE version 3.20 or later
def roundEnd(self, x, y, z):
print(f"P1 HP: {x}")
print(f"P2 HP: {y}")
print(f"Cummrative Reward: {x - y}")
# print(z)
# please define this method when you use FightingICE version 4.00 or later
def getScreenData(self, sd):
pass
def initialize(self, gameData, player):
# Initializng the command center, the simulator and some other things
self.inputKey = self.gateway.jvm.struct.Key()
self.frameData = self.gateway.jvm.struct.FrameData()
self.cc = self.gateway.jvm.aiinterface.CommandCenter()
self.player = player
self.gameData = gameData
self.simulator = self.gameData.getSimulator()
return 0
def input(self):
# Return the input for the current frame
return self.inputKey
def policy(self, state):
'''
Args:
state:
Frame data. See method `processing`.
Returns:
input_keys: [Action] or Action
'''
raise NotImplementedError()
# def sendCommand(self):
# pass
def processing(self):
try:
self._processing()
except Exception as e:
import sys
import traceback
print(traceback.format_exception(*sys.exc_info()))
print(traceback.format_tb(e.__traceback__))
def _processing(self):
# First we check whether we are at the end of the round
if self.frameData.getEmptyFlag() or self.frameData.getRemainingFramesNumber() <= 0:
self.isGameJustStarted = True
return
if not self.isGameJustStarted:
# Simulate the delay and look ahead 2 frames. The simulator class exists already in FightingICE
self.frameData = self.simulator.simulate(self.frameData, self.player, None, None, 17)
# You can pass actions to the simulator by writing as follows:
# actions = self.gateway.jvm.java.util.ArrayDeque()
# actions.add(self.gateway.jvm.enumerate.Action.STAND_A)
# self.frameData = self.simulator.simulate(self.frameData, self.player, actions, actions, 17)
else:
# If the game just started, no point on simulating
self.isGameJustStarted = False
self.cc.setFrameData(self.frameData, self.player)
if self.cc.getSkillFlag():
self._skill_flag_continue_count += 1
# If there is a previous "command" still in execution, then keep doing it
self.inputKey = self.cc.getSkillKey()
# The case simulator seems to be broken.
if self._skill_flag_continue_count >= self.THRESHOLD_FRAME_COUNT_FOR_THE_CASE_SIMULATOR_BROKEN:
print('=' * 100)
print('ERROR: self._skill_flag_continue_count >= self.THRESHOLD_FRAME_COUNT_FOR_THE_CASE_SIMULATOR_BROKEN')
print('=' * 100)
self.cc.skillCancel()
else:
self._skill_flag_continue_count = 0
# We empty the keys and cancel skill just in case
# self.inputKey.empty()
# self.cc.skillCancel()
inputs = self.policy(self.frameData)
if inputs is None:
xs = [Action.NEUTRAL]
elif type(inputs) is not list:
assert inputs in Action
xs = [inputs]
else:
xs = inputs
command = ' '.join([x.value for x in xs])
print(command)
self.cc.commandCall(command)
# def processing(self):
# # Just compute the input for the current frame
# if self.frameData.getEmptyFlag() or self.frameData.getRemainingFramesNumber() <= 0:
# self.isGameJustStarted = True
# return
# # Just spam kick
# self.cc.setFrameData(self.frameData, self.player)
# # distance = self.frameData.getDistanceX()
# # my = self.frameData.getCharacter(self.player)
# # energy = my.getEnergy()
# # my_x = my.getX()
# # my_state = my.getState()
# # opp = self.frameData.getCharacter(not self.player)
# # opp_x = opp.getX()
# # opp_state = opp.getState()
# # xDifference = my_x - opp_x
# if self.cc.getSkillFlag():
# self.inputKey = self.cc.getSkillKey()
# return
# self.inputKey.empty()
# self.cc.skillCancel()
# a = self.policy(self.frameData)
# dic = {
# "↙": "1", "↓": "2", "↘": "3",
# "←": "4", "→": "6",
# "↖": "7", "↑": "8", "↗": "9"
# }
# if a is not None:
# for k, v in dic.items():
# a = a.replace(k, v)
# self.cc.commandCall(a)
# else:
# self.cc.commandCall("_")
# # self.sendCommand()
# This part is mandatory
class Java:
implements = ["aiinterface.AIInterface"]
"""
def processing(self):
# First we check whether we are at the end of the round
if self.frameData.getEmptyFlag() or self.frameData.getRemainingFramesNumber() <= 0:
self.isGameJustStarted = True
return
if not self.isGameJustStarted:
# Simulate the delay and look ahead 2 frames. The simulator class exists already in FightingICE
self.frameData = self.simulator.simulate(self.frameData, self.player, None, None, 17)
#You can pass actions to the simulator by writing as follows:
#actions = self.gateway.jvm.java.util.ArrayDeque()
#actions.add(self.gateway.jvm.enumerate.Action.STAND_A)
#self.frameData = self.simulator.simulate(self.frameData, self.player, actions, actions, 17)
else:
# If the game just started, no point on simulating
self.isGameJustStarted = False
self.cc.setFrameData(self.frameData, self.player)
distance = self.frameData.getDistanceX()
my = self.frameData.getCharacter(self.player)
energy = my.getEnergy()
my_x = my.getX()
my_state = my.getState()
opp = self.frameData.getCharacter(not self.player)
opp_x = opp.getX()
opp_state = opp.getState()
xDifference = my_x - opp_x
if self.cc.getSkillFlag():
# If there is a previous "command" still in execution, then keep doing it
self.inputKey = self.cc.getSkillKey()
return
# We empty the keys and cancel skill just in case
self.inputKey.empty()
self.cc.skillCancel()
self.policy(my, opp, distance)
self.sendCommand()
"""
"""
def processing(self):
# First we check whether we are at the end of the round
if self.frameData.getEmptyFlag() or self.frameData.getRemainingFramesNumber() <= 0:
self.isGameJustStarted = True
return
if not self.isGameJustStarted:
# Simulate the delay and look ahead 2 frames. The simulator class exists already in FightingICE
self.frameData = self.simulator.simulate(self.frameData, self.player, None, None, 17)
#You can pass actions to the simulator by writing as follows:
#actions = self.gateway.jvm.java.util.ArrayDeque()
#actions.add(self.gateway.jvm.enumerate.Action.STAND_A)
#self.frameData = self.simulator.simulate(self.frameData, self.player, actions, actions, 17)
else:
# If the game just started, no point on simulating
self.isGameJustStarted = False
self.cc.setFrameData(self.frameData, self.player)
distance = self.frameData.getDistanceX()
my = self.frameData.getCharacter(self.player)
energy = my.getEnergy()
my_x = my.getX()
my_state = my.getState()
opp = self.frameData.getCharacter(not self.player)
opp_x = opp.getX()
opp_state = opp.getState()
xDifference = my_x - opp_x
if self.cc.getSkillFlag():
self.inputKey = self.cc.getSkillKey()
return
self.inputKey.empty()
self.cc.skillCancel()
self.policy(my, opp, distance)
self.sendCommand()
"""
| 37.610656
| 123
| 0.57546
|
fcb771fb539cc232cb8a9aa82cbaabf99615b2d4
| 6,154
|
py
|
Python
|
requests_ntlm2/requests_ntlm2.py
|
jkugler/requests-ntlm2
|
d5cc3a39b228fb2f3d95275101cfbff17aeb8e07
|
[
"ISC"
] | null | null | null |
requests_ntlm2/requests_ntlm2.py
|
jkugler/requests-ntlm2
|
d5cc3a39b228fb2f3d95275101cfbff17aeb8e07
|
[
"ISC"
] | null | null | null |
requests_ntlm2/requests_ntlm2.py
|
jkugler/requests-ntlm2
|
d5cc3a39b228fb2f3d95275101cfbff17aeb8e07
|
[
"ISC"
] | null | null | null |
from requests.auth import AuthBase
from .core import NtlmCompatibility, get_auth_type_from_header, get_cbt_data, get_ntlm_credentials
from .dance import HttpNtlmContext
class HttpNtlmAuth(AuthBase):
"""
HTTP NTLM Authentication Handler for Requests.
"""
def __init__(
self, username,
password,
send_cbt=True,
ntlm_compatibility=NtlmCompatibility.NTLMv2_DEFAULT,
ntlm_strict_mode=False
):
"""Create an authentication handler for NTLM over HTTP.
:param str username: Username in 'domain\\username' format
:param str password: Password
:param bool send_cbt: Will send the channel bindings over a
HTTPS channel (Default: True)
:param ntlm_compatibility: The Lan Manager Compatibility Level to use with the auth message
:param ntlm_strict_mode: If False, tries to Type 2 (ie challenge response) NTLM message
that does not conform to the NTLM spec
"""
self.username, self.password, self.domain = get_ntlm_credentials(username, password)
if self.domain:
self.domain = self.domain.upper()
self.password = password
self.send_cbt = send_cbt
self.ntlm_compatibility = ntlm_compatibility
self.ntlm_strict_mode = ntlm_strict_mode
# This exposes the encrypt/decrypt methods used to encrypt and decrypt
# messages sent after ntlm authentication. These methods are utilised
# by libraries that call requests_ntlm to encrypt and decrypt the
# messages sent after authentication
self.session_security = None
def retry_using_http_ntlm_auth(
self, auth_header_field, auth_header, response, auth_type, args
):
# Get the certificate of the server if using HTTPS for CBT
cbt_data = None
if self.send_cbt:
cbt_data = get_cbt_data(response)
# Attempt to authenticate using HTTP NTLM challenge/response
if auth_header in response.request.headers:
return response
content_length = int(
response.request.headers.get("Content-Length", "0"), base=10
)
if hasattr(response.request.body, "seek"):
if content_length > 0:
response.request.body.seek(-content_length, 1)
else:
response.request.body.seek(0, 0)
# Consume content and release the original connection
# to allow our new request to reuse the same one.
response.content
response.raw.release_conn()
request = response.request.copy()
ntlm_context = HttpNtlmContext(
self.username,
self.password,
domain=self.domain,
auth_type=auth_type,
cbt_data=cbt_data,
ntlm_compatibility=self.ntlm_compatibility,
ntlm_strict_mode=self.ntlm_strict_mode
)
request.headers[auth_header] = ntlm_context.get_negotiate_header()
# A streaming response breaks authentication.
# This can be fixed by not streaming this request, which is safe
# because the returned response3 will still have stream=True set if
# specified in args. In addition, we expect this request to give us a
# challenge and not the real content, so the content will be short
# anyway.
args_nostream = dict(args, stream=False)
response2 = response.connection.send(request, **args_nostream)
# needed to make NTLM auth compatible with requests-2.3.0
# Consume content and release the original connection
# to allow our new request to reuse the same one.
response2.content
response2.raw.release_conn()
request = response2.request.copy()
# this is important for some web applications that store
# authentication-related info in cookies (it took a long time to
# figure out)
if response2.headers.get("set-cookie"):
request.headers["Cookie"] = response2.headers.get("set-cookie")
# get the challenge
ntlm_context.set_challenge_from_header(response2.headers[auth_header_field])
# build response
# Get the response based on the challenge message
request.headers[auth_header] = ntlm_context.get_authenticate_header()
response3 = response2.connection.send(request, **args)
# Update the history.
response3.history.append(response)
response3.history.append(response2)
# Get the session_security object created by ntlm-auth for signing and
# sealing of messages
self.session_security = ntlm_context.session_security
return response3
def response_hook(self, r, **kwargs):
"""The actual hook handler."""
if r.status_code == 401:
# Handle server auth.
www_authenticate = r.headers.get("www-authenticate", "")
auth_type = get_auth_type_from_header(www_authenticate)
if auth_type is not None:
return self.retry_using_http_ntlm_auth(
"www-authenticate", "Authorization", r, auth_type, kwargs
)
elif r.status_code == 407:
# If we didn't have server auth, do proxy auth.
proxy_authenticate = r.headers.get("proxy-authenticate", "")
auth_type = get_auth_type_from_header(proxy_authenticate)
if auth_type is not None:
return self.retry_using_http_ntlm_auth(
"proxy-authenticate", "Proxy-Authorization", r, auth_type, kwargs
)
return r
def __call__(self, r):
# we must keep the connection because NTLM authenticates the
# connection, not single requests
r.headers["Connection"] = "Keep-Alive"
r.register_hook("response", self.response_hook)
return r
def extract_username_and_password(self):
if self.domain:
return "{}\\{}".format(self.domain, self.username), self.password
return self.username, self.password
| 38.949367
| 99
| 0.646246
|
0c1e1749f59edad319e9a16cb6bbe124b8d66541
| 4,034
|
py
|
Python
|
research/deeplab/core/nas_network_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
research/deeplab/core/nas_network_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
research/deeplab/core/nas_network_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for resnet_v1_beta module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
from tensorflow.contrib import training as contrib_training
from deeplab.core import nas_genotypes
from deeplab.core import nas_network
arg_scope = contrib_framework.arg_scope
slim = contrib_slim
def create_test_input(batch, height, width, channels):
"""Creates test input tensor."""
if None in [batch, height, width, channels]:
return tf.placeholder(tf.float32, (batch, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch, 1, 1, channels]))
class NASNetworkTest(tf.test.TestCase):
"""Tests with complete small NAS networks."""
def _pnasnet(self,
images,
backbone,
num_classes,
is_training=True,
output_stride=16,
final_endpoint=None):
"""Build PNASNet model backbone."""
hparams = contrib_training.HParams(
filter_scaling_rate=2.0,
num_conv_filters=10,
drop_path_keep_prob=1.0,
total_training_steps=200000,
)
if not is_training:
hparams.set_hparam('drop_path_keep_prob', 1.0)
cell = nas_genotypes.PNASCell(hparams.num_conv_filters,
hparams.drop_path_keep_prob,
len(backbone),
hparams.total_training_steps)
with arg_scope([slim.dropout, slim.batch_norm], is_training=is_training):
return nas_network._build_nas_base(
images,
cell=cell,
backbone=backbone,
num_classes=num_classes,
hparams=hparams,
reuse=tf.AUTO_REUSE,
scope='pnasnet_small',
final_endpoint=final_endpoint)
def testFullyConvolutionalEndpointShapes(self):
num_classes = 10
backbone = [0, 0, 0, 1, 2, 1, 2, 2, 3, 3, 2, 1]
inputs = create_test_input(None, 321, 321, 3)
with slim.arg_scope(nas_network.nas_arg_scope()):
_, end_points = self._pnasnet(inputs, backbone, num_classes)
endpoint_to_shape = {
'Stem': [None, 81, 81, 128],
'Cell_0': [None, 81, 81, 50],
'Cell_1': [None, 81, 81, 50],
'Cell_2': [None, 81, 81, 50],
'Cell_3': [None, 41, 41, 100],
'Cell_4': [None, 21, 21, 200],
'Cell_5': [None, 41, 41, 100],
'Cell_6': [None, 21, 21, 200],
'Cell_7': [None, 21, 21, 200],
'Cell_8': [None, 11, 11, 400],
'Cell_9': [None, 11, 11, 400],
'Cell_10': [None, 21, 21, 200],
'Cell_11': [None, 41, 41, 100]
}
for endpoint, shape in endpoint_to_shape.items():
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
if __name__ == '__main__':
tf.test.main()
| 36.017857
| 81
| 0.602132
|
ccd4bd7ae371a3e8ef31161b4f230ef64a90a758
| 8,889
|
py
|
Python
|
vb_suite/timeseries.py
|
josericardo/pandas
|
fe9aa125c19ce2b22a0c4aabedd68b24df6cb98e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2019-05-21T21:07:03.000Z
|
2019-05-21T21:07:03.000Z
|
vb_suite/timeseries.py
|
josericardo/pandas
|
fe9aa125c19ce2b22a0c4aabedd68b24df6cb98e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
vb_suite/timeseries.py
|
josericardo/pandas
|
fe9aa125c19ce2b22a0c4aabedd68b24df6cb98e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
from vbench.api import Benchmark
from datetime import datetime
common_setup = """from pandas_vb_common import *
from datetime import timedelta
N = 100000
try:
rng = date_range('1/1/2000', periods=N, freq='min')
except NameError:
rng = DateRange('1/1/2000', periods=N, offset=datetools.Minute())
def date_range(start=None, end=None, periods=None, freq=None):
return DateRange(start, end, periods=periods, offset=freq)
if hasattr(Series, 'convert'):
Series.resample = Series.convert
ts = Series(np.random.randn(N), index=rng)
"""
#----------------------------------------------------------------------
# Lookup value in large time series, hash map population
setup = common_setup + """
rng = date_range('1/1/2000', periods=1500000, freq='s')
ts = Series(1, index=rng)
"""
stmt = "ts[ts.index[len(ts) // 2]]; ts.index._cleanup()"
timeseries_large_lookup_value = Benchmark(stmt, setup,
start_date=datetime(2012, 1, 1))
#----------------------------------------------------------------------
# Test slice minutely series
timeseries_slice_minutely = Benchmark('ts[:10000]', common_setup)
#----------------------------------------------------------------------
# Test conversion
setup = common_setup + """
"""
timeseries_1min_5min_ohlc = Benchmark(
"ts[:10000].resample('5min', how='ohlc')",
common_setup,
start_date=datetime(2012, 5, 1))
timeseries_1min_5min_mean = Benchmark(
"ts[:10000].resample('5min', how='mean')",
common_setup,
start_date=datetime(2012, 5, 1))
#----------------------------------------------------------------------
# Irregular alignment
setup = common_setup + """
lindex = np.random.permutation(N)[:N // 2]
rindex = np.random.permutation(N)[:N // 2]
left = Series(ts.values.take(lindex), index=ts.index.take(lindex))
right = Series(ts.values.take(rindex), index=ts.index.take(rindex))
"""
timeseries_add_irregular = Benchmark('left + right', setup)
#----------------------------------------------------------------------
# Sort large irregular time series
setup = common_setup + """
N = 100000
rng = date_range('1/1/2000', periods=N, freq='s')
rng = rng.take(np.random.permutation(N))
ts = Series(np.random.randn(N), index=rng)
"""
timeseries_sort_index = Benchmark('ts.sort_index()', setup,
start_date=datetime(2012, 4, 1))
#----------------------------------------------------------------------
# Shifting, add offset
setup = common_setup + """
rng = date_range('1/1/2000', periods=10000, freq='T')
"""
datetimeindex_add_offset = Benchmark('rng + timedelta(minutes=2)', setup,
start_date=datetime(2012, 4, 1))
setup = common_setup + """
N = 10000
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.random.randn(N), index=rng)
dates = date_range('1/1/1990', periods=N * 10, freq='5s')
"""
timeseries_asof_single = Benchmark('ts.asof(dates[0])', setup,
start_date=datetime(2012, 4, 27))
timeseries_asof = Benchmark('ts.asof(dates)', setup,
start_date=datetime(2012, 4, 27))
setup = setup + 'ts[250:5000] = np.nan'
timeseries_asof_nan = Benchmark('ts.asof(dates)', setup,
start_date=datetime(2012, 4, 27))
#----------------------------------------------------------------------
# Time zone stuff
setup = common_setup + """
rng = date_range('1/1/2000', '3/1/2000', tz='US/Eastern')
"""
timeseries_timestamp_tzinfo_cons = \
Benchmark('rng[0]', setup, start_date=datetime(2012, 5, 5))
#----------------------------------------------------------------------
# Resampling period
setup = common_setup + """
rng = period_range('1/1/2000', '1/1/2001', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
"""
timeseries_period_downsample_mean = \
Benchmark("ts.resample('D', how='mean')", setup,
start_date=datetime(2012, 4, 25))
setup = common_setup + """
rng = date_range('1/1/2000', '1/1/2001', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
"""
timeseries_timestamp_downsample_mean = \
Benchmark("ts.resample('D', how='mean')", setup,
start_date=datetime(2012, 4, 25))
#----------------------------------------------------------------------
# to_datetime
setup = common_setup + """
rng = date_range('1/1/2000', periods=20000, freq='h')
strings = [x.strftime('%Y-%m-%d %H:%M:%S') for x in rng]
"""
timeseries_to_datetime_iso8601 = \
Benchmark('to_datetime(strings)', setup,
start_date=datetime(2012, 7, 11))
setup = common_setup + """
rng = date_range('1/1/2000', periods=10000, freq='D')
strings = Series(rng.year*10000+rng.month*100+rng.day,dtype=np.int64).apply(str)
"""
timeseries_to_datetime_YYYYMMDD = \
Benchmark('to_datetime(strings,format="%Y%m%d")', setup,
start_date=datetime(2012, 7, 1))
# ---- infer_freq
# infer_freq
setup = common_setup + """
from pandas.tseries.frequencies import infer_freq
rng = date_range('1/1/1700', freq='D', periods=100000)
a = rng[:50000].append(rng[50002:])
"""
timeseries_infer_freq = \
Benchmark('infer_freq(a)', setup, start_date=datetime(2012, 7, 1))
# setitem PeriodIndex
setup = common_setup + """
rng = period_range('1/1/1990', freq='S', periods=20000)
df = DataFrame(index=range(len(rng)))
"""
period_setitem = \
Benchmark("df['col'] = rng", setup,
start_date=datetime(2012, 8, 1))
setup = common_setup + """
rng = date_range('1/1/2000 9:30', periods=10000, freq='S', tz='US/Eastern')
"""
datetimeindex_normalize = \
Benchmark('rng.normalize()', setup,
start_date=datetime(2012, 9, 1))
setup = common_setup + """
from pandas.tseries.offsets import Second
s1 = date_range('1/1/2000', periods=100, freq='S')
curr = s1[-1]
slst = []
for i in range(100):
slst.append(curr + Second()), periods=100, freq='S')
curr = slst[-1][-1]
"""
# dti_append_tz = \
# Benchmark('s1.append(slst)', setup, start_date=datetime(2012, 9, 1))
setup = common_setup + """
rng = date_range('1/1/2000', periods=1000, freq='H')
df = DataFrame(np.random.randn(len(rng), 2), rng)
"""
dti_reset_index = \
Benchmark('df.reset_index()', setup, start_date=datetime(2012, 9, 1))
setup = common_setup + """
rng = date_range('1/1/2000', periods=1000, freq='H',
tz='US/Eastern')
df = DataFrame(np.random.randn(len(rng), 2), index=rng)
"""
dti_reset_index_tz = \
Benchmark('df.reset_index()', setup, start_date=datetime(2012, 9, 1))
setup = common_setup + """
rng = date_range('1/1/2000', periods=1000, freq='T')
index = rng.repeat(10)
"""
datetimeindex_unique = Benchmark('index.unique()', setup,
start_date=datetime(2012, 7, 1))
# tz_localize with infer argument. This is an attempt to emulate the results
# of read_csv with duplicated data. Not passing infer_dst will fail
setup = common_setup + """
dst_rng = date_range('10/29/2000 1:00:00',
'10/29/2000 1:59:59', freq='S')
index = date_range('10/29/2000', '10/29/2000 00:59:59', freq='S')
index = index.append(dst_rng)
index = index.append(dst_rng)
index = index.append(date_range('10/29/2000 2:00:00',
'10/29/2000 3:00:00', freq='S'))
"""
datetimeindex_infer_dst = \
Benchmark('index.tz_localize("US/Eastern", infer_dst=True)',
setup, start_date=datetime(2013, 9, 30))
#----------------------------------------------------------------------
# Resampling: fast-path various functions
setup = common_setup + """
rng = date_range('20130101',periods=100000,freq='50L')
df = DataFrame(np.random.randn(100000,2),index=rng)
"""
dataframe_resample_mean_string = \
Benchmark("df.resample('1s', how='mean')", setup)
dataframe_resample_mean_numpy = \
Benchmark("df.resample('1s', how=np.mean)", setup)
dataframe_resample_min_string = \
Benchmark("df.resample('1s', how='min')", setup)
dataframe_resample_min_numpy = \
Benchmark("df.resample('1s', how=np.min)", setup)
dataframe_resample_max_string = \
Benchmark("df.resample('1s', how='max')", setup)
dataframe_resample_max_numpy = \
Benchmark("df.resample('1s', how=np.max)", setup)
#----------------------------------------------------------------------
# DatetimeConverter
setup = common_setup + """
from pandas.tseries.converter import DatetimeConverter
"""
datetimeindex_converter = \
Benchmark('DatetimeConverter.convert(rng, None, None)',
setup, start_date=datetime(2013, 1, 1))
# Adding custom business day
setup = common_setup + """
import datetime as dt
import pandas as pd
date = dt.datetime(2011,1,1)
cday = pd.offsets.CustomBusinessDay()
"""
timeseries_custom_bday_incr = \
Benchmark("date + cday",setup)
# Increment by n
timeseries_custom_bday_incr_n = \
Benchmark("date + 10 * cday",setup)
| 29.828859
| 80
| 0.598155
|
97348efc2664c26be58cbba88e5496b35ff1850b
| 180
|
py
|
Python
|
155A I_love_username.py
|
YasirAhmad-EccentriX/CodeForces
|
d004b4d1b52a360ac6c06870e0a237345771e32c
|
[
"MIT"
] | 1
|
2021-01-29T16:30:09.000Z
|
2021-01-29T16:30:09.000Z
|
155A I_love_username.py
|
YasirAhmad-EccentriX/CodeForces
|
d004b4d1b52a360ac6c06870e0a237345771e32c
|
[
"MIT"
] | null | null | null |
155A I_love_username.py
|
YasirAhmad-EccentriX/CodeForces
|
d004b4d1b52a360ac6c06870e0a237345771e32c
|
[
"MIT"
] | null | null | null |
l=input()
l=[int(i) for i in input().split()]
max=min=l[0]
c=0
for i in l:
if i<min:
min=i
c+=1
elif i>max:
max=i
c+=1
print(c)
| 13.846154
| 36
| 0.422222
|
927bfa24e1d71afb6273413689d5949daa62d59c
| 9,490
|
py
|
Python
|
DPrepB-C/ska_sip/imageoperations/images/deconvolution.py
|
SKA-ScienceDataProcessor/SIP-DPrep
|
7b98bfa4d9f76c6f8bafcb97613e2533cc9426fd
|
[
"Apache-2.0"
] | 1
|
2019-01-23T13:03:42.000Z
|
2019-01-23T13:03:42.000Z
|
DPrepB-C/ska_sip/imageoperations/images/deconvolution.py
|
SKA-ScienceDataProcessor/SIP-DPrep
|
7b98bfa4d9f76c6f8bafcb97613e2533cc9426fd
|
[
"Apache-2.0"
] | null | null | null |
DPrepB-C/ska_sip/imageoperations/images/deconvolution.py
|
SKA-ScienceDataProcessor/SIP-DPrep
|
7b98bfa4d9f76c6f8bafcb97613e2533cc9426fd
|
[
"Apache-2.0"
] | null | null | null |
"""deconvolution.py: A script for deconvolution of polarisation data."""
import numpy as np
import logging
LOG = logging.getLogger(__name__)
from data_models.memory_data_models import Image
from data_models.parameters import get_parameter
from libs.image.operations import create_image_from_array
from libs.image.cleaners import hogbom, overlapIndices
from data_models.polarisation import PolarisationFrame
def hogbom_complex(dirty_q, dirty_u, psf_q, psf_u, window, gain, thresh, niter, fracthresh):
"""Clean the point spread function from a dirty Q+iU image
This uses the complex Hogbom CLEAN for polarised data (2016MNRAS.462.3483P)
The starting-point for the code was the standard Hogbom clean algorithm available in ARL.
Args:
dirty_q (numpy array): The dirty Q Image, i.e., the Q Image to be deconvolved.
dirty_u (numpy array): The dirty U Image, i.e., the U Image to be deconvolved.
psf_q (numpy array): The point spread-function in Stokes Q.
psf_u (numpy array): The point spread-function in Stokes U.
window (float): Regions where clean components are allowed. If True, entire
dirty Image is allowed.
gain (float): The "loop gain", i.e., the fraction of the brightest pixel that
is removed in each iteration.
thresh (float): Cleaning stops when the maximum of the absolute deviation of
the residual is less than this value.
niter (int): Maximum number of components to make if the threshold `thresh` is not hit.
fracthresh (float): The predefined fractional threshold at which to stop cleaning.
Returns:
comps.real: real clean component image.
comps.imag: imaginary clean component image.
res.real: real residual image.
res.imag: imaginary residual image.
"""
assert 0.0 < gain < 2.0
assert niter > 0
# Form complex Q+iU from the polarisation data:
dirty_complex = 1j*dirty_u
dirty_complex += dirty_q
LOG.info("hogbom_mod: Max abs in dirty image = %.6f" % np.max(np.abs(dirty_complex)))
absolutethresh = max(thresh, fracthresh * np.absolute(dirty_complex).max())
LOG.info("hogbom_mod: Start of minor cycle")
LOG.info("hogbom_mod: This minor cycle will stop at %d iterations or peak < %s" % \
(niter, absolutethresh))
comps = np.zeros(dirty_complex.shape, dtype='complex128')
res = np.array(dirty_complex)
assert np.all(psf_q == psf_u)
pmax = psf_q.max()
assert pmax > 0.0
LOG.info("hogbom: Max abs in dirty Image = %.6f" % np.absolute(res).max())
for i in range(niter):
if window is not None:
mx, my = np.unravel_index((np.absolute(res * window)).argmax(), dirty_complex.shape)
else:
mx, my = np.unravel_index((np.absolute(res)).argmax(), dirty_complex.shape)
mval = res[mx, my] * gain / pmax
comps[mx, my] += mval
a1o, a2o = overlapIndices(dirty_complex, psf_q, mx, my)
if niter < 10 or i % (niter // 10) == 0:
LOG.info("hogbom: Minor cycle %d, peak %s at [%d, %d]" % (i, res[mx, my], mx, my))
res[a1o[0]:a1o[1], a1o[2]:a1o[3]] -= psf_q[a2o[0]:a2o[1], a2o[2]:a2o[3]] * mval
if np.abs(res[mx, my]) < absolutethresh:
LOG.info("hogbom: Stopped at iteration %d, peak %s at [%d, %d]" % \
(i, res[mx, my], mx, my))
break
LOG.info("hogbom: End of minor cycle")
return comps.real, comps.imag, res.real, res.imag
def deconvolve_cube_complex(dirty: Image, psf: Image, **kwargs) -> (Image, Image):
""" Clean using the complex Hogbom algorithm for polarised data (2016MNRAS.462.3483P)
The algorithm available is:
hogbom-complex: See: Pratley L. & Johnston-Hollitt M., (2016), MNRAS, 462, 3483.
This code is based upon the deconvolve_cube code for standard Hogbom clean available in ARL.
Args:
dirty (numpy array): The dirty image, i.e., the image to be deconvolved.
psf (numpy array): The point spread-function.
window (float): Regions where clean components are allowed. If True, entire
dirty Image is allowed.
algorithm (str): Cleaning algorithm: 'hogbom-complex' only.
gain (float): The "loop gain", i.e., the fraction of the brightest pixel that is
removed in each iteration.
threshold (float): Cleaning stops when the maximum of the absolute deviation of the
residual is less than this value.
niter (int): Maximum number of components to make if the threshold `thresh` is not hit.
fractional_threshold (float): The predefined fractional threshold at which to stop cleaning.
Returns:
comp_image: clean component image.
residual_image: residual image.
"""
assert isinstance(dirty, Image), "Type is %s" % (type(dirty))
assert isinstance(psf, Image), "Type is %s" % (type(psf))
window_shape = get_parameter(kwargs, 'window_shape', None)
if window_shape == 'quarter':
qx = dirty.shape[3] // 4
qy = dirty.shape[2] // 4
window = np.zeros_like(dirty.data)
window[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0
LOG.info('deconvolve_cube_complex: Cleaning inner quarter of each sky plane')
else:
window = None
psf_support = get_parameter(kwargs, 'psf_support', None)
if isinstance(psf_support, int):
if (psf_support < psf.shape[2] // 2) and ((psf_support < psf.shape[3] // 2)):
centre = [psf.shape[2] // 2, psf.shape[3] // 2]
psf.data = psf.data[..., (centre[0] - psf_support):(centre[0] + psf_support),
(centre[1] - psf_support):(centre[1] + psf_support)]
LOG.info('deconvolve_cube_complex: PSF support = +/- %d pixels' % (psf_support))
algorithm = get_parameter(kwargs, 'algorithm', 'msclean')
if algorithm == 'hogbom-complex':
LOG.info("deconvolve_cube_complex: Hogbom-complex clean of each polarisation \
and channel separately")
gain = get_parameter(kwargs, 'gain', 0.7)
assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2"
thresh = get_parameter(kwargs, 'threshold', 0.0)
assert thresh >= 0.0
niter = get_parameter(kwargs, 'niter', 100)
assert niter > 0
fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1)
assert 0.0 <= fracthresh < 1.0
comp_array = np.zeros(dirty.data.shape)
residual_array = np.zeros(dirty.data.shape)
for channel in range(dirty.data.shape[0]):
for pol in range(dirty.data.shape[1]):
if pol == 0 or pol == 3:
if psf.data[channel, pol, :, :].max():
LOG.info("deconvolve_cube_complex: Processing pol %d, channel %d" % \
(pol, channel))
if window is None:
comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
None, gain, thresh, niter, fracthresh)
else:
comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
window[channel, pol, :, :], gain, thresh, niter, fracthresh)
else:
LOG.info("deconvolve_cube_complex: Skipping pol %d, channel %d" % \
(pol, channel))
if pol == 1:
if psf.data[channel, 1:2, :, :].max():
LOG.info("deconvolve_cube_complex: Processing pol 1 and 2, channel %d" % \
(channel))
if window is None:
comp_array[channel, 1, :, :], comp_array[channel, 2, :, :], residual_array[channel, 1, :, :], residual_array[channel, 2, :, :] = hogbom_complex(dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], None, gain, thresh, niter, fracthresh)
else:
comp_array[channel, 1, :, :], comp_array[channel, 2, :, :], residual_array[channel, 1, :, :], residual_array[channel, 2, :, :] = hogbom_complex(dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh)
else:
LOG.info("deconvolve_cube_complex: Skipping pol 1 and 2, channel %d" % \
(channel))
if pol == 2:
continue
comp_image = create_image_from_array(comp_array, dirty.wcs, \
polarisation_frame=PolarisationFrame('stokesIQUV'))
residual_image = create_image_from_array(residual_array, dirty.wcs, \
polarisation_frame=PolarisationFrame('stokesIQUV'))
else:
raise ValueError('deconvolve_cube_complex: Unknown algorithm %s' % algorithm)
return comp_image, residual_image
| 52.142857
| 348
| 0.589252
|
d780829a1f5101333577a1ca9adcb960529213f1
| 595
|
py
|
Python
|
mathematics_dataset/util/__init__.py
|
PhysicsTeacher13/Mathematics_Dataset
|
7f13bf661e6f36d61542bf0360b27f31eb9efe20
|
[
"Apache-2.0"
] | 1,577
|
2019-04-03T10:05:30.000Z
|
2022-03-29T17:56:14.000Z
|
mathematics_dataset/util/__init__.py
|
aliceheiman/mathematics_dataset
|
89a928868d832d47387f704d2122c71e530ec4aa
|
[
"Apache-2.0"
] | 15
|
2019-04-03T18:55:03.000Z
|
2022-01-12T16:04:12.000Z
|
mathematics_dataset/util/__init__.py
|
LaudateCorpus1/mathematics_dataset
|
e91dba649b843597c14b9d84dfe92bff79b7d299
|
[
"Apache-2.0"
] | 226
|
2019-04-03T13:28:36.000Z
|
2022-03-27T18:41:01.000Z
|
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 42.5
| 74
| 0.766387
|
4e9156ffc791e3e2811aa46b95358606f0797154
| 5,000
|
py
|
Python
|
pyquil/api/tests/test_rewrite_arithmetic.py
|
dwillmer/pyquil
|
f9a8504d20729b79f07ec4730c93f4b84d6439eb
|
[
"Apache-2.0"
] | 1
|
2021-11-30T21:03:15.000Z
|
2021-11-30T21:03:15.000Z
|
pyquil/api/tests/test_rewrite_arithmetic.py
|
dwillmer/pyquil
|
f9a8504d20729b79f07ec4730c93f4b84d6439eb
|
[
"Apache-2.0"
] | null | null | null |
pyquil/api/tests/test_rewrite_arithmetic.py
|
dwillmer/pyquil
|
f9a8504d20729b79f07ec4730c93f4b84d6439eb
|
[
"Apache-2.0"
] | null | null | null |
from pyquil.quil import Program
from pyquil.quilatom import Qubit, Frame
from pyquil.quilbase import DefFrame
from pyquil.api._rewrite_arithmetic import rewrite_arithmetic
from rpcq.messages import (
ParameterAref,
ParameterSpec,
RewriteArithmeticResponse,
)
def test_rewrite_arithmetic_no_params():
prog = Program("X 0")
response = rewrite_arithmetic(prog)
assert response == RewriteArithmeticResponse(quil=Program("X 0").out())
def test_rewrite_arithmetic_simple_mref():
prog = Program("DECLARE theta REAL", "RZ(theta) 0")
response = rewrite_arithmetic(prog)
assert response == RewriteArithmeticResponse(
original_memory_descriptors={"theta": ParameterSpec(length=1, type="REAL")},
quil=Program("DECLARE __P1 REAL[1]", "DECLARE theta REAL[1]", "RZ(__P1[0]) 0").out(),
recalculation_table={ParameterAref(index=0, name="__P1"): "theta[0]/(2*pi)"},
)
def test_rewrite_arithmetic_duplicate_exprs():
prog = Program(
"DECLARE theta REAL",
"RZ(theta*1.5) 0",
"RX(theta*1.5) 0", # this is not a native gate, but it is a protoquil program
)
response = rewrite_arithmetic(prog)
assert response == RewriteArithmeticResponse(
original_memory_descriptors={"theta": ParameterSpec(length=1, type="REAL")},
recalculation_table={ParameterAref(index=0, name="__P1"): "theta[0]*1.5/(2*pi)"},
quil=Program(
"DECLARE __P1 REAL[1]", "DECLARE theta REAL[1]", "RZ(__P1[0]) 0", "RX(__P1[0]) 0"
).out(),
)
def test_rewrite_arithmetic_mixed():
prog = Program(
"DECLARE theta REAL", "DECLARE beta REAL", "RZ(3 * theta) 0", "RZ(beta+theta) 0",
)
response = rewrite_arithmetic(prog)
assert response.original_memory_descriptors == {
"theta": ParameterSpec(length=1, type="REAL"),
"beta": ParameterSpec(length=1, type="REAL"),
}
assert response.recalculation_table == {
ParameterAref(index=0, name="__P2"): "3*theta[0]/(2*pi)",
ParameterAref(index=1, name="__P2"): "(beta[0] + theta[0])/(2*pi)",
}
assert (
response.quil
== Program(
"DECLARE __P2 REAL[2]",
"DECLARE theta REAL[1]",
"DECLARE beta REAL[1]",
"RZ(__P2[0]) 0",
"RZ(__P2[1]) 0",
).out()
)
def test_rewrite_arithmetic_set_scale():
prog = Program("DECLARE theta REAL", 'SET-SCALE 0 "rf" 1.0', 'SET-SCALE 0 "rf" theta',)
response = rewrite_arithmetic(prog)
assert response == RewriteArithmeticResponse(
original_memory_descriptors={"theta": ParameterSpec(length=1, type="REAL")},
recalculation_table={ParameterAref(index=0, name="__P1"): "theta[0]/8"},
quil=Program(
"DECLARE __P1 REAL[1]",
"DECLARE theta REAL[1]",
'SET-SCALE 0 "rf" 1.0',
'SET-SCALE 0 "rf" __P1[0]',
).out(),
)
def test_rewrite_arithmetic_frequency():
fdefn0 = DefFrame(frame=Frame([Qubit(0)], "rf"), center_frequency=10.0, sample_rate=20.0,)
fdefn1 = DefFrame(frame=Frame([Qubit(1)], "rf"), sample_rate=20.0,)
prog = Program(
fdefn0,
fdefn1,
"DECLARE theta REAL",
'SET-FREQUENCY 0 "rf" theta',
'SHIFT-FREQUENCY 0 "rf" theta',
'SET-FREQUENCY 1 "rf" theta',
)
response = rewrite_arithmetic(prog)
assert response == RewriteArithmeticResponse(
original_memory_descriptors={"theta": ParameterSpec(length=1, type="REAL")},
recalculation_table={
ParameterAref(index=0, name="__P1"): "(theta[0] - 10.0)/20.0",
ParameterAref(index=1, name="__P1"): "theta[0]/20.0",
},
quil=Program(
fdefn0,
fdefn1,
"DECLARE __P1 REAL[2]",
"DECLARE theta REAL[1]",
'SET-FREQUENCY 0 "rf" __P1[0]',
'SHIFT-FREQUENCY 0 "rf" __P1[0]',
'SET-FREQUENCY 1 "rf" __P1[1]',
).out(),
)
def test_rewrite_arithmetic_mixed_mutations():
fdefn = DefFrame(frame=Frame([Qubit(0)], "rf"), center_frequency=10.0, sample_rate=20.0,)
prog = Program(
fdefn,
"DECLARE theta REAL",
'SET-FREQUENCY 0 "rf" theta',
'SET-PHASE 0 "rf" theta',
'SET-SCALE 0 "rf" theta',
)
response = rewrite_arithmetic(prog)
assert response == RewriteArithmeticResponse(
original_memory_descriptors={"theta": ParameterSpec(length=1, type="REAL")},
recalculation_table={
ParameterAref(index=0, name="__P1"): "(theta[0] - 10.0)/20.0",
ParameterAref(index=1, name="__P1"): "theta[0]/(2*pi)",
ParameterAref(index=2, name="__P1"): "theta[0]/8",
},
quil=Program(
fdefn,
"DECLARE __P1 REAL[3]",
"DECLARE theta REAL[1]",
'SET-FREQUENCY 0 "rf" __P1[0]',
'SET-PHASE 0 "rf" __P1[1]',
'SET-SCALE 0 "rf" __P1[2]',
).out(),
)
| 33.783784
| 94
| 0.5956
|
40b4cf17405e86a4ac753ac882c29f49e76565ab
| 417
|
py
|
Python
|
Python/WebDev/Django/Tutorial/blog/models.py
|
michaelg29/Programming
|
2544a742393c6d94e93879f70246ce17d0997011
|
[
"MIT"
] | 2
|
2021-04-05T20:44:54.000Z
|
2022-01-13T05:25:11.000Z
|
Python/WebDev/Django/Tutorial/blog/models.py
|
michaelg29/Programming
|
2544a742393c6d94e93879f70246ce17d0997011
|
[
"MIT"
] | 12
|
2020-02-17T05:19:01.000Z
|
2022-03-17T14:56:38.000Z
|
Python/WebDev/Django/Tutorial/blog/models.py
|
michaelg29/Programming
|
2544a742393c6d94e93879f70246ce17d0997011
|
[
"MIT"
] | 1
|
2022-01-25T16:48:21.000Z
|
2022-01-25T16:48:21.000Z
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE) # if user deleted, delete post
def __str__(self):
return self.title
| 32.076923
| 93
| 0.7506
|
3b098121ab4d928e9c805659b286849202350960
| 5,820
|
py
|
Python
|
bin/train.py
|
koreyou/multidomain-sentiment
|
766afdb839483c38a141c8d1d60c6faa75bd2684
|
[
"CC0-1.0"
] | null | null | null |
bin/train.py
|
koreyou/multidomain-sentiment
|
766afdb839483c38a141c8d1d60c6faa75bd2684
|
[
"CC0-1.0"
] | null | null | null |
bin/train.py
|
koreyou/multidomain-sentiment
|
766afdb839483c38a141c8d1d60c6faa75bd2684
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import json
import logging
import os
import chainer
import dill # This is for joblib to use dill. Do NOT delete it.
import click
import six
from chainer import training
from chainer.training import extensions
from joblib import Memory
import multidomain_sentiment
from multidomain_sentiment.dataset.blitzer import prepare_blitzer_data
from multidomain_sentiment.training import SaveRestore, EarlyStoppingTrigger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@click.command()
@click.argument('dataset', type=click.Path(exists=True))
@click.argument('word2vec', type=click.Path(exists=True))
@click.option('--epoch', '-e', type=int, default=15,
help='Number of sweeps over the dataset to train')
@click.option('--frequency', '-f', default=[1, 'epoch'],
type=(int, click.Choice(['epoch', 'iteration'])),
help='Frequency of taking a snapshot')
@click.option('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
@click.option('--out', '-o', default='result',
help='Directory to output the result and temporaly file')
@click.option('--model', default='cnn', type=click.Choice(["cnn", "rnn"]))
@click.option('--batchsize', '-b', type=int, default=300,
help='Number of images in each mini-batch')
@click.option('--lr', type=float, default=0.001, help='Learning rate')
@click.option('--fix_embedding', type=bool, default=False,
help='Fix word embedding during training')
@click.option('--resume', '-r', default='',
help='Resume the training from snapshot')
def run(dataset, word2vec, epoch, frequency, gpu, out, model, batchsize, lr,
fix_embedding, resume):
"""
Train multi-domain user review classification using Blitzer et al.'s dataset
(https://www.cs.jhu.edu/~mdredze/datasets/sentiment/)
Please refer README.md for details.
"""
memory = Memory(cachedir=out, verbose=1)
w2v, vocab, train_dataset, dev_dataset, _, label_dict, domain_dict = \
memory.cache(prepare_blitzer_data)(dataset, word2vec)
if model == 'rnn':
model = multidomain_sentiment.models.create_rnn_predictor(
len(domain_dict), w2v.shape[0], w2v.shape[1], 300, len(label_dict),
2, 300, dropout_rnn=0.1, initialEmb=w2v, dropout_emb=0.1,
fix_embedding=fix_embedding
)
elif model == 'cnn':
model = multidomain_sentiment.models.create_cnn_predictor(
len(domain_dict), w2v.shape[0], w2v.shape[1], 300, len(label_dict),
300, dropout_fc=0.1, initialEmb=w2v, dropout_emb=0.1,
fix_embedding=fix_embedding
)
else:
assert not "should not get here"
classifier = multidomain_sentiment.models.MultiDomainClassifier(
model, domain_dict=domain_dict)
if gpu >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(gpu).use()
classifier.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.Adam(alpha=lr)
optimizer.setup(classifier)
train_iter = chainer.iterators.SerialIterator(train_dataset, batchsize)
# Set up a trainer
updater = training.StandardUpdater(
train_iter, optimizer, device=gpu,
converter=multidomain_sentiment.training.convert)
if dev_dataset is not None:
stop_trigger = EarlyStoppingTrigger(
monitor='validation/main/loss', max_trigger=(epoch, 'epoch'))
trainer = training.Trainer(updater, stop_trigger, out=out)
logger.info("train: {}, dev: {}".format(
len(train_dataset), len(dev_dataset)))
# Evaluate the model with the development dataset for each epoch
dev_iter = chainer.iterators.SerialIterator(
dev_dataset, batchsize, repeat=False, shuffle=False)
evaluator = extensions.Evaluator(
dev_iter, classifier, device=gpu,
converter=multidomain_sentiment.training.convert)
trainer.extend(evaluator, trigger=frequency)
# This works together with EarlyStoppingTrigger to provide more reliable
# early stopping
trainer.extend(
SaveRestore(),
trigger=chainer.training.triggers.MinValueTrigger(
'validation/main/loss'))
else:
trainer = training.Trainer(updater, (epoch, 'epoch'), out=out)
logger.info("train: {}".format(len(train_dataset)))
# SaveRestore will save the snapshot when dev_dataset is available
trainer.extend(extensions.snapshot(), trigger=frequency)
logger.info("With labels: %s" % json.dumps(label_dict))
# Take a snapshot for each specified epoch
if gpu < 0:
# ParameterStatistics does not work with GPU as of chainer 2.x
# https://github.com/chainer/chainer/issues/3027
trainer.extend(extensions.ParameterStatistics(
model, trigger=(100, 'iteration')), priority=99)
# Write a log of evaluation statistics for each iteration
trainer.extend(extensions.LogReport(trigger=(1, 'iteration')), priority=98)
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
'validation/main/accuracy']), trigger=frequency, priority=97)
if resume:
# Resume from a snapshot
chainer.serializers.load_npz(resume, trainer)
logger.info("Started training")
trainer.run()
# Save final model (without trainer)
chainer.serializers.save_npz(os.path.join(out, 'trained_model'), model)
with open(os.path.join(out, 'vocab.json'), 'w') as fout:
json.dump(vocab, fout)
if __name__ == '__main__':
run()
| 40.137931
| 80
| 0.673883
|
a8e2a35e4a1aae11f736a4bc966684c90698fe90
| 414
|
py
|
Python
|
setup.py
|
knu2xs/arcgis-demographic-clustering
|
1cccd4b534c565321cf8d6ca6b68c41bccd74a62
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
knu2xs/arcgis-demographic-clustering
|
1cccd4b534c565321cf8d6ca6b68c41bccd74a62
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
knu2xs/arcgis-demographic-clustering
|
1cccd4b534c565321cf8d6ca6b68c41bccd74a62
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import find_packages, setup
with open('README.md', 'r') as readme:
long_description = readme.read()
setup(
name='arcgis_clustering',
package_dir={"": "src"},
packages=find_packages('src'),
version='0.0.0',
description='Demographic clutering using ArcGIS Business Analyst data.',
long_description=long_description,
author='Joel McCune',
license='Apache 2.0',
)
| 25.875
| 76
| 0.693237
|
a0474ac8bb9b302c61d3f34f3986d4545d168782
| 2,004
|
py
|
Python
|
utils/framework.py
|
kosuke55/DeepDepthDenoising
|
b1e7b95ca2e03384005b0540f865ddc7066a3e93
|
[
"MIT"
] | 1
|
2020-04-15T16:46:56.000Z
|
2020-04-15T16:46:56.000Z
|
utils/framework.py
|
panluDreamer/DeepDepthDenoising
|
a994f495cd90193c78c1824367ce1462ae52752f
|
[
"MIT"
] | null | null | null |
utils/framework.py
|
panluDreamer/DeepDepthDenoising
|
a994f495cd90193c78c1824367ce1462ae52752f
|
[
"MIT"
] | null | null | null |
import torch
import datetime
import numpy
import random
from .opt import *
from .visualization import *
def initialize(args):
# create and init device
print("{} | Torch Version: {}".format(datetime.datetime.now(), torch.__version__))
if args.seed > 0:
print("Set to reproducibility mode with seed: {}".format(args.seed))
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
numpy.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
gpus = [int(id) for id in args.gpu.split(',') if int(id) >= 0]
device = torch.device("cuda:{}" .format(gpus[0]) if torch.cuda.is_available() and len(gpus) > 0 and gpus[0] >= 0 else "cpu")
print("Training {0} for {1} epochs using a batch size of {2} on {3}".format(args.name, args.epochs, args.batch_size, device))
# create visualizer
visualizer = NullVisualizer() if args.visdom is None\
else VisdomVisualizer(args.name, args.visdom,\
count=4 if 4 <= args.batch_size else args.batch_size)
if args.visdom is None:
args.visdom_iters = 0
# create & init model
model_params = {
'width': 640,
'height': 360,
'ndf': args.ndf,
'dilation': args.dilation,
'norm_type': args.normalization,
'upsample_type': args.upsample_type
}
return device, visualizer, model_params
def init_optimizer(model, args):
opt_params = OptimizerParameters(learning_rate=args.lr, momentum=args.momentum,\
momentum2=args.momentum2, epsilon=args.epsilon)
optimizer = get_optimizer(args.optimizer, model.parameters(), opt_params)
if args.opt_state is not None:
opt_state = torch.load(args.opt_state)
print("Loading previously saved optimizer state from {}".format(args.opt_state))
optimizer.load_state_dict(opt_state["optimizer_state_dict"])
return optimizer
| 40.897959
| 129
| 0.662675
|
6b7f95a2d4b1f51013bb067d31447975b02bd2ea
| 2,512
|
py
|
Python
|
alembic/versions/67c47a7c2029_add_unique_constraints_and_indices.py
|
hasadna/open-bus-stride-db
|
368ef4ca8b6916ad1a502b9445cd47e27aa778b4
|
[
"MIT"
] | null | null | null |
alembic/versions/67c47a7c2029_add_unique_constraints_and_indices.py
|
hasadna/open-bus-stride-db
|
368ef4ca8b6916ad1a502b9445cd47e27aa778b4
|
[
"MIT"
] | 2
|
2022-01-17T11:07:33.000Z
|
2022-03-18T08:17:48.000Z
|
alembic/versions/67c47a7c2029_add_unique_constraints_and_indices.py
|
hasadna/open-bus-stride-db
|
368ef4ca8b6916ad1a502b9445cd47e27aa778b4
|
[
"MIT"
] | null | null | null |
"""add unique constraints and indices
Revision ID: 67c47a7c2029
Revises: 366df3cd0db1
Create Date: 2021-11-07 17:14:06.435914+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '67c47a7c2029'
down_revision = '366df3cd0db1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('idx_route_journey_ref_vehicle_ref', 'siri_ride', ['siri_route_id', 'journey_ref', 'vehicle_ref'], unique=True)
op.create_index('idx_ride_stop_order', 'siri_ride_stop', ['siri_ride_id', 'siri_stop_id', 'order'], unique=True)
op.create_index('idx_operator_ref_line_ref', 'siri_route', ['operator_ref', 'line_ref'], unique=True)
op.drop_index('ix_siri_snapshot_snapshot_id', table_name='siri_snapshot')
op.create_index(op.f('ix_siri_snapshot_snapshot_id'), 'siri_snapshot', ['snapshot_id'], unique=True)
op.drop_index('ix_siri_stop_code', table_name='siri_stop')
op.create_index(op.f('ix_siri_stop_code'), 'siri_stop', ['code'], unique=True)
op.create_index(op.f('ix_siri_vehicle_location_recorded_at_time'), 'siri_vehicle_location', ['recorded_at_time'], unique=False)
op.create_index(op.f('ix_siri_vehicle_location_siri_ride_stop_id'), 'siri_vehicle_location', ['siri_ride_stop_id'], unique=False)
op.create_index(op.f('ix_siri_vehicle_location_siri_snapshot_id'), 'siri_vehicle_location', ['siri_snapshot_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_siri_vehicle_location_siri_snapshot_id'), table_name='siri_vehicle_location')
op.drop_index(op.f('ix_siri_vehicle_location_siri_ride_stop_id'), table_name='siri_vehicle_location')
op.drop_index(op.f('ix_siri_vehicle_location_recorded_at_time'), table_name='siri_vehicle_location')
op.drop_index(op.f('ix_siri_stop_code'), table_name='siri_stop')
op.create_index('ix_siri_stop_code', 'siri_stop', ['code'], unique=False)
op.drop_index(op.f('ix_siri_snapshot_snapshot_id'), table_name='siri_snapshot')
op.create_index('ix_siri_snapshot_snapshot_id', 'siri_snapshot', ['snapshot_id'], unique=False)
op.drop_index('idx_operator_ref_line_ref', table_name='siri_route')
op.drop_index('idx_ride_stop_order', table_name='siri_ride_stop')
op.drop_index('idx_route_journey_ref_vehicle_ref', table_name='siri_ride')
# ### end Alembic commands ###
| 53.446809
| 133
| 0.761943
|
96f5cbda9d91e34242c631f749cd0a20e2da47c6
| 1,208
|
py
|
Python
|
Software/Backend/app/model/Usuario.py
|
davidsgv/Sistemas-Transaccionales
|
a26904742bd163461aca7e8039448441b4a98fb9
|
[
"MIT"
] | null | null | null |
Software/Backend/app/model/Usuario.py
|
davidsgv/Sistemas-Transaccionales
|
a26904742bd163461aca7e8039448441b4a98fb9
|
[
"MIT"
] | null | null | null |
Software/Backend/app/model/Usuario.py
|
davidsgv/Sistemas-Transaccionales
|
a26904742bd163461aca7e8039448441b4a98fb9
|
[
"MIT"
] | null | null | null |
from app.model.Base import selectDB, insertBD
from uuid import uuid4
from flask import request
class ManejoUsuarios():
@staticmethod
def IniciarSesion(correo, contrasena, navegador):
#Verificar datos de usuario
query = "EXEC dbo.spVerificarDatosUsuario @correo = '{}', @clave = '{}'".format(correo, contrasena)
data = selectDB(query)
results = recorrerDatos(data)
if len(results) <= 0:
return 0
#generar token
token = uuid4()
query = "EXEC dbo.spCrearSesion @correo = '{}', @navegador = '{}', @token = '{}'".format(correo,navegador, token)
insertBD(query)
return token
def VerificarSesion():
try:
token = request.json["token"]
except:
return False
#verificar token de usuario
query = "EXEC spVerificarSesionToken @token = '{}'".format(token)
data = selectDB(query)
results = recorrerDatos(data)
if len(results) <= 0:
return False
return True
def recorrerDatos(data):
results = []
for row in data:
results.append(row)
return results
| 26.26087
| 121
| 0.572848
|
fdb6c1e04052045149030c71061aaa1b9e7e7506
| 5,704
|
py
|
Python
|
ParlAI/parlai/tasks/fvqa/agents.py
|
UmaTaru/run
|
be29e4d41a4de3dee27cd6796801bfe51382d294
|
[
"MIT"
] | 163
|
2019-06-23T14:07:57.000Z
|
2022-02-25T23:06:07.000Z
|
ParlAI/parlai/tasks/fvqa/agents.py
|
UmaTaru/run
|
be29e4d41a4de3dee27cd6796801bfe51382d294
|
[
"MIT"
] | 8
|
2019-07-24T12:41:31.000Z
|
2022-02-10T00:17:20.000Z
|
ParlAI/parlai/tasks/fvqa/agents.py
|
UmaTaru/run
|
be29e4d41a4de3dee27cd6796801bfe51382d294
|
[
"MIT"
] | 31
|
2019-06-26T01:21:07.000Z
|
2021-09-06T17:23:24.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.agents import Teacher
from parlai.core.image_featurizers import ImageLoader
from parlai.core.metrics import Metrics
from .build import build
import json
import random
import os
def _path(opt):
build(opt)
questions_path = os.path.join(
opt['datapath'], 'FVQA', 'new_dataset_release', 'all_qs_dict_release.json'
)
trainset_path = os.path.join(opt['datapath'], 'FVQA', 'Name_Lists')
image_path = os.path.join(
opt['datapath'], 'FVQA', 'new_dataset_release', 'images', ''
)
return questions_path, trainset_path, image_path
class SplitTeacher(Teacher):
"""FVQA Teacher, which loads the json VQA data and implements its own
`act` method for interacting with student agent.
Use "fvqa:split:X" to choose between splits 0-4 (inclusive), or just
"fvqa" to use the default split (0).
"""
def __init__(self, opt, shared=None):
super().__init__(opt)
dt = opt['datatype'].split(':')[0]
if dt not in ('train', 'test'):
raise RuntimeError('Not valid datatype (only train/test).')
task = opt.get('task', 'fvqa:split:0')
task_num = 0 # default to train/split 0
split = task.split(':')
if len(split) > 2:
task_num = split[2]
if task_num not in [str(i) for i in range(5)]:
raise RuntimeError('Invalid train/test split ID (0-4 inclusive)')
if not hasattr(self, 'factmetrics'):
if shared and shared.get('factmetrics'):
self.factmetrics = shared['factmetrics']
else:
self.factmetrics = Metrics(opt)
self.datatype = opt['datatype']
questions_path, trainset_path, self.image_path = _path(opt)
if shared and 'ques' in shared:
self.ques = shared['ques']
else:
self._setup_data(questions_path, trainset_path, dt, task_num)
self.len = len(self.ques)
self.asked_question = False
# for ordered data in batch mode (especially, for validation and
# testing), each teacher in the batch gets a start index and a step
# size so they all process disparate sets of the data
self.step_size = opt.get('batchsize', 1)
self.data_offset = opt.get('batchindex', 0)
self.image_loader = ImageLoader(opt)
self.reset()
def num_examples(self):
return self.len
def num_episodes(self):
return self.len
def report(self):
r = super().report()
r['factmetrics'] = self.factmetrics.report()
return r
def reset(self):
# Reset the dialog so that it is at the start of the epoch,
# and all metrics are reset.
super().reset()
self.lastY = None
self.episode_idx = self.data_offset - self.step_size
self.epochDone = False
def reset_metrics(self):
super().reset_metrics()
self.factmetrics.clear()
def observe(self, observation):
"""Process observation for metrics."""
if self.lastY is not None:
if self.asked_question:
self.metrics.update(observation, self.lastY[0])
else:
self.factmetrics.update(observation, self.lastY[1])
self.lastY = None
return observation
def act(self):
if self.asked_question:
self.asked_question = False
action = {'text': 'Which fact supports this answer?', 'episode_done': True}
if self.datatype.startswith('train'):
action['labels'] = self.lastY[1]
if (
self.datatype != 'train'
and self.episode_idx + self.step_size >= self.num_episodes()
):
self.epochDone = True
return action
if self.datatype == 'train':
self.episode_idx = random.randrange(self.len)
else:
self.episode_idx = (self.episode_idx + self.step_size) % self.num_episodes()
self.asked_question = True
qa = self.ques[self.episode_idx]
question = qa['question']
img_path = self.image_path + qa['img_file']
action = {
'image': self.image_loader.load(img_path),
'text': question,
'episode_done': False,
}
human_readable = qa['fact_surface'].replace('[', '').replace(']', '')
self.lastY = [[qa['answer']], [human_readable]]
if self.datatype.startswith('train'):
action['labels'] = self.lastY[0]
return action
def share(self):
shared = super().share()
shared['factmetrics'] = self.factmetrics
shared['ques'] = self.ques
if hasattr(self, 'facts'):
shared['facts'] = self.facts
return shared
def _setup_data(self, questions_path, trainset_path, datatype, task_num):
print('loading: ' + questions_path)
with open(questions_path) as questions_file:
questions = json.load(questions_file)
train_test_images = set()
fn = os.path.join(trainset_path, '{}_list_{}.txt'.format(datatype, task_num))
with open(fn) as imageset:
for line in imageset:
train_test_images.add(line.strip())
self.ques = [
questions[k]
for k in sorted(questions.keys())
if questions[k]['img_file'] in train_test_images
]
class DefaultTeacher(SplitTeacher):
pass
| 32.781609
| 88
| 0.596599
|
8b3ee71c2d5b8458d11f059bceb2555813acc48b
| 10,324
|
py
|
Python
|
glm-plotter/GLMparser.py
|
jdechalendar/glmplotter
|
e715d4c646d739d906f96db075b2995fcf02da72
|
[
"MIT"
] | 6
|
2017-03-03T04:07:47.000Z
|
2020-08-24T13:44:19.000Z
|
glm-plotter/GLMparser.py
|
jdechalendar/glmplotter
|
e715d4c646d739d906f96db075b2995fcf02da72
|
[
"MIT"
] | 4
|
2018-12-10T02:42:35.000Z
|
2019-07-20T19:22:02.000Z
|
glm-plotter/GLMparser.py
|
jdechalendar/glm-plotter
|
e715d4c646d739d906f96db075b2995fcf02da72
|
[
"MIT"
] | 6
|
2016-08-25T17:16:25.000Z
|
2020-06-19T05:53:39.000Z
|
"""
JAC - jdechalendar@stanford.edu
"""
import pandas as pd
import re
def readGLM(iFile, verb=0):
"""
Main function to parse GLM files
Modified July 12, 2016 - the returned objs are now a list
Use list comprehensions to extract list of objects of a given class
"""
with open(iFile) as f:
lines = f.readlines()
modules = set()
objs = []
commands = []
iLine = -1
while True:
iLine += 1
if iLine > len(lines)-1:
break
if lines[iLine].strip().startswith('object'):
read_objs, iLine = readObj(iLine, lines)
for obj in read_objs:
objs.append(obj)
if lines[iLine].startswith('module'):
modules.add(lines[iLine][7:].split('{')[0].strip(';').strip())
if lines[iLine].startswith('#'):
commands.append(lines[iLine])
if verb:
classList = [obj['class'] for obj in objs]
countClass = {x: classList.count(x) for x in classList}
print('Objects classes: ' + str(
[str(a[0]) + ': ' + str(a[1]) for a in zip(
countClass.keys(), countClass.values())]))
print('Modules: ' + str(modules))
# post processing step to make sure the parent fields are correct
# this was added to deal with the old glm syntax
# the startLine and name fields are always there
# the name_oldGLM is optional
startLines = [obj['startLine'] for obj in objs]
names = [obj['name'] for obj in objs]
names_oldGLM = [obj['name_oldGLM']
if 'name_oldGLM' in obj else '' for obj in objs]
for obj in objs:
if 'parent' in obj:
if obj['parent'] not in names:
if obj['parent'] in names_oldGLM:
parentObjID = names_oldGLM.index(obj['parent'])
obj['parent'] = objs[parentObjID]['name']
elif obj['parent'] in startLines:
parentObjID = startLines.index(obj['parent'])
obj['parent'] = objs[parentObjID]['name']
else:
raise ValueError("Cannot find this child's parent!")
return objs, modules, commands
def readObj(startLine, lines, parent=''):
"""
Read an object or objects (note that they can be nested)
In old GLM files, there can be an object identifier given after a colon in
the opening statement, or no name.
In order of preference, we choose as our name the name, the object
identifier (name_oldGLM) and the line of the opening statement
The startLine and name fields are always there, the name_oldGLM is optional
"""
obj = [dict()]
iLine = startLine
# remove leading/trailing whitespace and comments
currLine = lines[iLine].split('//')[0].strip()
if currLine.startswith('object'):
objClass = currLine[len('object '):].split('{')[0].split(':')
obj[0]['class'] = objClass[0].strip()
obj[0]['startLine'] = str(iLine)
# old syntax for glm files:
# there can be an id for the object after a colon
if len(objClass) > 1 and objClass[1].strip():
# consider this is the name for now. If there is a name later on it
# will be overwritten
obj[0]['name_oldGLM'] = objClass[1].strip()
if len(parent) > 0:
obj[0]['parent'] = parent
while True:
iLine += 1
# remove leading/trailing whitespace and comments
currLine = lines[iLine].split('//')[0].strip()
# ignore end of object / comment / opening bracket
if (currLine.find('}') > -1 and not currLine.find('{') > -1):
break # done parsing object
if (len(currLine) == 0 or currLine == "{"):
continue # done parsing line
if currLine.startswith('object'): # Reading another object
if 'name' in obj[0]:
parent = obj[0]['name']
elif 'name_oldGLM' in obj[0]:
parent=obj[0]['name_oldGLM']
else:
parent=obj[0]['startLine']
child, iLine = readObj(iLine, lines, parent=parent)
obj.extend(child)
else:
data = lines[iLine].split(';')[0].strip() # only take stuff before ';'
tmp = re.split('''\t| (?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', data)
if len(tmp) > 0:
if len(tmp) == 2:
obj[0][tmp[0]] = tmp[1].strip('"')
# tmp[0].lower().startswith('rated')
# or tmp[0].lower().startswith('reconnect_time'):
# second case means there can be a unit
elif len(tmp) == 3:
# we expect a unit
obj[0][tmp[0]] = tmp[1:]
else:
print('Line %d - Not expected' % iLine)
print(lines[iLine])
# Check the name field is set for all objects we parsed
for a in obj:
if 'name' not in a:
if 'name_oldGLM' in a:
a['name'] = a['name_oldGLM']
else:
a['name'] = a['startLine']
return obj, iLine
def readClock(lines, currLine):
"""
This function reads the clock in the GLM file
"""
while True:
currLine += 1
if (lines[currLine].find('}') > -1
and not lines[currLine].find('{') > -1):
break
if lines[currLine].strip().startswith('timezone'):
timezone = lines[currLine].strip().split(
';')[0].strip('timezone ').strip('"')
if lines[currLine].strip().startswith('starttime'):
tmp = lines[currLine].strip().split(';')[0].strip('starttime ')
tmp = pd.to_datetime(tmp)
starttime = tmp
if lines[currLine].strip().startswith('stoptime'):
tmp = lines[currLine].strip().split(';')[0].strip('stoptime ')
tmp = pd.to_datetime(tmp)
stoptime = tmp
return timezone, starttime, stoptime
def getObjs(objs, attr, val=''):
"""
one liner to get list of all the objects whose attribute attr has value val
Example call: getObjs(objs, 'class', 'load')
"""
if val:
return [obj for obj in objs if attr in obj and obj[attr] == val]
else:
return [obj for obj in objs if attr in obj]
def getAieul(objs, name):
parent = [obj for obj in objs if obj['name'] == name]
if len(parent) == 0:
parent = [
obj for obj in objs if 'name_oldGLM' in obj and obj['name_oldGLM']
== name]
if len(parent) == 0:
parent = [obj['parent'] for obj in objs if obj['startLine'] == name]
if len(parent) != 1:
raise ValueError('The name to object relation should be bijective!')
if 'parent' in parent[0]:
parent = getAieul(objs, parent[0]['parent'])
return parent
def createD3JSON(objs, fileNm_out=''):
"""
This function creates a json file that will be used for plotting the GLM
objects by the D3 force algorithm
We use hardcoded decisions of which links and nodes should be plotted
See GLMtoJSON notebook to change this
Inputs are the objs object from the readGLM function and a file to write
the output to.
If no file name is provided, returns the json string
"""
# define links I want to plot
link_type = ['overhead_line', 'switch', 'underground_line',
'regulator', 'transformer', 'triplex_line', 'fuse']
link_objs = [obj for obj in objs if obj['class'] in link_type]
links = list(
zip([getAieul(objs, link['from'])[0]['name'] for link in link_objs],
[getAieul(objs, link['to'])[0]['name'] for link in link_objs],
[link['class'] for link in link_objs]))
# define nodes I want to plot
node_type = ['node', 'load', 'meter', 'triplex_meter', 'triplex_node']
parent_objs = [obj for obj in objs if 'parent' not in obj]
node_objs = [obj for obj in parent_objs if obj['class'] in node_type]
# children I want to plot
child_type = ['diesel_dg', 'capacitor']
children = dict([(obj['parent'], obj['class'])
for obj in objs if obj['class'] in child_type])
# find unique nodes
unique_nodes = list(
set([n1 for n1, n2, n3 in links] + [n2 for n1, n2, n3 in links]
+ [nd['name'] for nd in node_objs]))
if len(unique_nodes) > len(node_objs):
print('I had to add ' + str(len(unique_nodes) - len(node_objs))
+ ' nodes to draw the links - something is off')
classNm = [
next((obj['class'] for obj in node_objs if obj["name"] == nd), '')
for nd in unique_nodes]
child = [children[nd] if nd in children else '' for nd in unique_nodes]
JSONstr = ''
JSONstr += '{\n "nodes":[\n'
if len(unique_nodes) > 0:
for iNode in range(len(unique_nodes)-1):
JSONstr = (JSONstr + ' {"name":"' + unique_nodes[iNode]
+ '","classNm":"'+str(classNm[iNode]) + '","child":"'
+ str(child[iNode])+'"},\n')
JSONstr += (' {"name":"' + unique_nodes[len(unique_nodes)-1]
+ '","classNm":"' + str(classNm[len(unique_nodes)-1])
+ '","child":"'+str(child[len(unique_nodes)-1])+'"}\n')
JSONstr += ' ],\n "links":[\n'
if len(links) > 0:
for iLink in range(len(links)-1):
JSONstr += (' {"source":'
+ str(unique_nodes.index(links[iLink][0]))
+ ',"target":'
+ str(unique_nodes.index(links[iLink][1]))
+ ',"linkType":"' + links[iLink][2] + '"},\n')
JSONstr += (' {"source":'
+ str(unique_nodes.index(links[len(links)-1][0]))
+ ',"target":'
+ str(unique_nodes.index(links[len(links)-1][1]))
+ ',"linkType":"' + links[len(links)-1][2] + '"}\n')
JSONstr += ' ]\n}'
if fileNm_out:
with open(fileNm_out, 'w') as f:
f.write(JSONstr)
JSONstr = '' # if we wrote to file, don't return it
return JSONstr
| 39.555556
| 87
| 0.537389
|
eb697021280c40e8705ae0b0ff0b6471fb050e43
| 401
|
py
|
Python
|
backend/z_vent_30438/wsgi.py
|
crowdbotics-apps/z-vent-30438
|
d06342c9a6a8feb2f5a2c75c90c2b14d4ab59110
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/z_vent_30438/wsgi.py
|
crowdbotics-apps/z-vent-30438
|
d06342c9a6a8feb2f5a2c75c90c2b14d4ab59110
|
[
"FTL",
"AML",
"RSA-MD"
] | 6
|
2021-09-14T01:39:57.000Z
|
2021-10-31T15:32:36.000Z
|
backend/z_vent_30438/wsgi.py
|
crowdbotics-apps/z-vent-30438
|
d06342c9a6a8feb2f5a2c75c90c2b14d4ab59110
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for z_vent_30438 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'z_vent_30438.settings')
application = get_wsgi_application()
| 23.588235
| 78
| 0.790524
|
ac6655b69a0182b23cb4f11995ab808e3baf1f0e
| 4,411
|
py
|
Python
|
goodsorigin/views.py
|
caiwei-0627/GreaterWMS
|
db3d7b200343fd9eea07fa3fcdf33c4c9c37c4b4
|
[
"Apache-2.0"
] | 3
|
2020-10-19T05:55:28.000Z
|
2020-11-12T03:55:06.000Z
|
goodsorigin/views.py
|
coolflywms/GreaterWMS
|
0509b8842bbd8cae9540b6f13f7b3cb6fa5a911c
|
[
"Apache-2.0"
] | 1
|
2020-07-24T07:34:36.000Z
|
2020-07-24T07:34:36.000Z
|
goodsorigin/views.py
|
Singosgu/Elvis_WMS
|
e6911b7daae76be640ece8946104af24b6cf0fa6
|
[
"MIT"
] | 4
|
2020-09-04T13:35:15.000Z
|
2020-10-16T15:10:38.000Z
|
from rest_framework import viewsets
from .models import ListModel
from . import serializers
from utils.page import MyPageNumberPagination
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from .filter import Filter
from rest_framework.exceptions import APIException
class APIViewSet(viewsets.ModelViewSet):
"""
retrieve:
Response a data list(get)
list:
Response a data list(all)
create:
Create a data line(post)
delete:
Delete a data line(delete)
partial_update:
Partial_update a data(patch:partial_update)
update:
Update a data(put:update)
"""
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
if id is None:
return ListModel.objects.filter(openid=self.request.auth.openid, is_delete=False)
else:
return ListModel.objects.filter(openid=self.request.auth.openid, id=id, is_delete=False)
else:
return ListModel.objects.none()
def get_serializer_class(self):
if self.action in ['list', 'retrieve', 'destroy']:
return serializers.GoodsoriginGetSerializer
elif self.action in ['create']:
return serializers.GoodsoriginPostSerializer
elif self.action in ['update']:
return serializers.GoodsoriginUpdateSerializer
elif self.action in ['partial_update']:
return serializers.GoodsoriginPartialUpdateSerializer
else:
return self.http_method_not_allowed(request=self.request)
def create(self, request, *args, **kwargs):
data = self.request.data
data['openid'] = self.request.auth.openid
if ListModel.objects.filter(openid=data['openid'], goods_origin=data['goods_origin'], is_delete=False).exists():
raise APIException({"detail": "Data exists"})
else:
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def update(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot update data which not yours"})
data = self.request.data
if ListModel.objects.filter(openid=self.request.auth.openid,goods_origin=data['goods_origin'], is_delete=False).exists():
raise APIException({"detail": "Data exists"})
else:
serializer = self.get_serializer(qs, data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def partial_update(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot partial_update data which not yours"})
else:
data = self.request.data
serializer = self.get_serializer(qs, data=data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def destroy(self, request, pk):
qs = self.get_object()
if qs.openid != self.request.auth.openid:
raise APIException({"detail": "Cannot delete data which not yours"})
else:
qs.is_delete = True
qs.save()
serializer = self.get_serializer(qs, many=False)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
| 39.035398
| 129
| 0.643845
|
bdc790faf745b81e8e0f4c0e50f296a38fbc9819
| 1,893
|
py
|
Python
|
examples/plot_scalar_example.py
|
stevegolton/toppra
|
846e2a7f5b87e0e1884b244b07d5fd661edcd9bd
|
[
"MIT"
] | 342
|
2017-07-26T17:37:19.000Z
|
2022-03-28T19:50:27.000Z
|
examples/plot_scalar_example.py
|
stevegolton/toppra
|
846e2a7f5b87e0e1884b244b07d5fd661edcd9bd
|
[
"MIT"
] | 151
|
2017-11-30T06:14:29.000Z
|
2022-03-29T02:06:08.000Z
|
examples/plot_scalar_example.py
|
stevegolton/toppra
|
846e2a7f5b87e0e1884b244b07d5fd661edcd9bd
|
[
"MIT"
] | 134
|
2017-08-18T21:35:39.000Z
|
2022-03-25T03:43:08.000Z
|
"""
Retime an one dimensional path
===============================
"""
################################################################################
# Import necessary libraries.
import toppra as ta
import toppra.constraint as constraint
import toppra.algorithm as algo
import numpy as np
import matplotlib.pyplot as plt
ta.setup_logging("INFO")
################################################################################
# We now generate a simply path. When constructing a path, you must
# "align" the waypoint properly yourself. For instance, if the
# waypoints are [0, 1, 10] like in the above example, the path
# position should be aligned like [0, 0.1, 1.0]. If this is not done,
# the CubicSpline Interpolator might result undesirable oscillating
# paths!
waypts = [[0], [1], [10]]
path = ta.SplineInterpolator([0, 0.1, 1.0], waypts)
################################################################################
# Setup the velocity and acceleration
vlim = np.array([[-3, 3]])
alim = np.array([[-4, 4]])
pc_vel = constraint.JointVelocityConstraint(vlim)
pc_acc = constraint.JointAccelerationConstraint(
alim, discretization_scheme=constraint.DiscretizationType.Interpolation)
################################################################################
# Setup the problem instance and solve it.
instance = algo.TOPPRA([pc_vel, pc_acc], path, solver_wrapper='seidel')
jnt_traj = instance.compute_trajectory(0, 0)
################################################################################
# We can now visualize the result
duration = jnt_traj.duration
print("Found optimal trajectory with duration {:f} sec".format(duration))
ts = np.linspace(0, duration, 100)
fig, axs = plt.subplots(3, 1, sharex=True)
qs = jnt_traj.eval(ts)
qds = jnt_traj.evald(ts)
qdds = jnt_traj.evaldd(ts)
axs[0].plot(ts, qs)
axs[1].plot(ts, qds)
axs[2].plot(ts, qdds)
plt.show()
| 35.055556
| 80
| 0.570523
|
0463f9ce01c4566ca0abd0498408cb78e4fc78e9
| 5,101
|
py
|
Python
|
tests/applier/data_postgres_cdc_applier_inactive_applied_tables.py
|
albertteoh/data_pipeline
|
a99f1c7412375b3e9f4115108fcdde517b2e71a6
|
[
"Apache-2.0"
] | 16
|
2017-10-31T21:43:26.000Z
|
2019-08-11T08:49:06.000Z
|
tests/applier/data_postgres_cdc_applier_inactive_applied_tables.py
|
albertteoh/data_pipeline
|
a99f1c7412375b3e9f4115108fcdde517b2e71a6
|
[
"Apache-2.0"
] | 1
|
2017-11-01T06:25:56.000Z
|
2017-11-01T06:25:56.000Z
|
tests/applier/data_postgres_cdc_applier_inactive_applied_tables.py
|
albertteoh/data_pipeline
|
a99f1c7412375b3e9f4115108fcdde517b2e71a6
|
[
"Apache-2.0"
] | 9
|
2017-10-30T05:23:15.000Z
|
2022-02-17T03:53:09.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import data_pipeline.constants.const as const
from .data_common import TestCase, UPDATE_SSP_SQL
tests=[
TestCase(
description="Skip insert and apply update statements belonging to inactive tables each with end of batch",
input_table_name="INACTIVE_TABLE",
input_commit_statements=[
'',
'insert into "SYS"."INACTIVE_TABLE"("COMPNDPK_1","COMPNDPK_2","COMPNDPK_3","COMPNDPK_4","COMPNDPK_5","COL_TS_0","COL_V_1","COL_V_2","COL_V_3","COL_V_4","COL_N_5","COL_N_6","COL_N_7","COL_N_8","COL_N_9","COL_NAS_9") values (\'26\',\'26.1\',\'26.2\',\'26.3\',\'26.4\',TO_TIMESTAMP(\'2017-04-19 12:14:22\'),\'26_varchar_1\',\'26_varchar_2\',\'26_varchar_3\',\'26_varchar_4\',\'26.5\',\'26.6\',\'26.7\',\'26.8\',\'26.9\',\'This is a nasty string ??a??????????\')',
'',
'',
'update "SYS"."INACTIVE_TABLE" set "COL_V_2" = \'26.9\' where "COMPNDPK_1" = \'26\'',
''],
input_record_types=[const.START_OF_BATCH, const.DATA, const.END_OF_BATCH, const.START_OF_BATCH, const.DATA, const.END_OF_BATCH],
input_operation_codes=['', const.INSERT, '', '', const.UPDATE, ''],
input_primary_key_fields="COMPNDPK_1",
input_record_counts=[0, 0, 1, 0, 0, 1],
input_commit_lsns=[0, 0, 0, 0, 0, 0],
expect_sql_execute_called=[
None,
None,
None,
None,
None,
None],
expect_execute_called_times=[0, 0, 0, 0, 0, 0],
expect_audit_db_execute_sql_called=[None, None, None, None, None, None],
expect_commit_called_times=[0, 0, 0, 0, 0, 0],
expect_insert_row_count=[0, 0, 0, 0, 0, 0],
expect_update_row_count=[0, 0, 0, 0, 0, 0],
expect_delete_row_count=[0, 0, 0, 0, 0, 0],
expect_source_row_count=[0, 0, 0, 0, 0, 0],
expect_batch_committed=[const.UNCOMMITTED, const.UNCOMMITTED, const.UNCOMMITTED, const.UNCOMMITTED, const.UNCOMMITTED, const.UNCOMMITTED,]
)
, TestCase(
description="Don't skip insert and apply update statements not belonging to inactive tables each with end of batch",
input_table_name="CONNCT_CDC_PK5_COLS10",
input_commit_statements=[
'',
'insert into "SYS"."CONNCT_CDC_PK5_COLS10"("COMPNDPK_1","COMPNDPK_2","COMPNDPK_3","COMPNDPK_4","COMPNDPK_5","COL_TS_0","COL_V_1","COL_V_2","COL_V_3","COL_V_4","COL_N_5","COL_N_6","COL_N_7","COL_N_8","COL_N_9","COL_NAS_9") values (\'26\',\'26.1\',\'26.2\',\'26.3\',\'26.4\',TO_TIMESTAMP(\'2017-04-19 12:14:22\'),\'26_varchar_1\',\'26_varchar_2\',\'26_varchar_3\',\'26_varchar_4\',\'26.5\',\'26.6\',\'26.7\',\'26.8\',\'26.9\',\'This is a nasty string ??a??????????\')',
'',
'',
'update "SYS"."CONNCT_CDC_PK5_COLS10" set "COL_V_2" = \'26.9\' where "COMPNDPK_1" = \'26\'',
''],
input_record_types=[const.START_OF_BATCH, const.DATA, const.END_OF_BATCH, const.START_OF_BATCH, const.DATA, const.END_OF_BATCH],
input_operation_codes=['', const.INSERT, '', '', const.UPDATE, ''],
input_primary_key_fields="COMPNDPK_1",
input_record_counts=[0, 0, 1, 0, 0, 1],
input_commit_lsns=[0, 0, 0, 0, 0, 0],
expect_sql_execute_called=[
None,
"INSERT INTO ctl.CONNCT_CDC_PK5_COLS10 ( COL_NAS_9, COL_N_5, COL_N_6, COL_N_7, COL_N_8, COL_N_9, COL_TS_0, COL_V_1, COL_V_2, COL_V_3, COL_V_4, COMPNDPK_1, COMPNDPK_2, COMPNDPK_3, COMPNDPK_4, COMPNDPK_5 ) VALUES ( 'This is a nasty string ??a??????????', '26.5', '26.6', '26.7', '26.8', '26.9', '2017-04-19 12:14:22', '26_varchar_1', '26_varchar_2', '26_varchar_3', '26_varchar_4', '26', '26.1', '26.2', '26.3', '26.4' ); -- lsn: 0, offset: 1",
None,
None,
"UPDATE ctl.CONNCT_CDC_PK5_COLS10 SET COL_V_2 = '26.9' WHERE COMPNDPK_1 = '26'; -- lsn: 0, offset: 1",
None],
expect_execute_called_times=[0, 1, 1, 1, 2, 2],
expect_audit_db_execute_sql_called=[None, None, None, None, None, (UPDATE_SSP_SQL, ('CDCApply', 0, 'myprofile', 1, 'ctl', 'connct_cdc_pk5_cols10'))],
expect_commit_called_times=[0, 0, 1, 1, 1, 2],
expect_insert_row_count=[0, 1, 1, 1, 1, 1],
expect_update_row_count=[0, 0, 0, 0, 1, 1],
expect_delete_row_count=[0, 0, 0, 0, 0, 0],
expect_source_row_count=[0, 1, 1, 1, 2, 2],
expect_batch_committed=[const.UNCOMMITTED, const.UNCOMMITTED, const.COMMITTED, const.UNCOMMITTED, const.UNCOMMITTED, const.COMMITTED,]
)
]
| 56.677778
| 476
| 0.662223
|
d5c9c1544b8161150890e15486242a4e0a5dba41
| 314
|
py
|
Python
|
Chapter 15/setup.py
|
bpbpublications/Building-Smart-Robots-Using-ROS
|
62f473d0cc94966806c1b66f46e9fba812080338
|
[
"MIT"
] | null | null | null |
Chapter 15/setup.py
|
bpbpublications/Building-Smart-Robots-Using-ROS
|
62f473d0cc94966806c1b66f46e9fba812080338
|
[
"MIT"
] | null | null | null |
Chapter 15/setup.py
|
bpbpublications/Building-Smart-Robots-Using-ROS
|
62f473d0cc94966806c1b66f46e9fba812080338
|
[
"MIT"
] | null | null | null |
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['object_detection'],
package_dir={'': 'src'})
setup(**setup_args)
| 26.166667
| 61
| 0.770701
|
719e8a2e317b25455d76f9746824548d25aebbbb
| 5,804
|
py
|
Python
|
Code/convertor.py
|
Helvboy/Semester-Project
|
d1bd2349ff8833178eb6120fd99ad668eadb25e0
|
[
"Unlicense"
] | null | null | null |
Code/convertor.py
|
Helvboy/Semester-Project
|
d1bd2349ff8833178eb6120fd99ad668eadb25e0
|
[
"Unlicense"
] | null | null | null |
Code/convertor.py
|
Helvboy/Semester-Project
|
d1bd2349ff8833178eb6120fd99ad668eadb25e0
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon May 3 20:37:20 2021
@author: Eloi Schlegel
"""
import numpy as np
def cloud_points_to_pixels(points, densities, resolution=0.1):
"""
Transform the a cloud points into a pixelized image
N - number of points \n
D - number of dimensions \n
L - Length of the image \n
H - Height of the image \n
Ni - number of graduation on the scale for the i-Dimension, extrema included
Parameters
----------
points : np.ndarray of float
[NxD] - all the points of the cloud
densities : np.ndarray of float
[Nx1] - densities at each point
resolution : float, optional
The default is 0.1. Set the distance between 2 pixels
Returns
-------
pixels : np.ndarray of float
[LxH] - array containing the densities of each points in the space
"""
N, D = points.shape
minima = np.min(points,0)
maxima = np.max(points,0)
scales = []
#scale creation for each D-dimension
for i in range(D):
scales.append(np.arange(minima[i], maxima[i]+0.5*resolution, resolution) )
pixels = np.zeros(tuple([np.shape(scales[i])[0]for i in range(D)]))
# keep the last assigned value ( should make a mean of the assigned values)
for i in range(N):
idxs = locate_points_on_scale(points[i], scales)
pixels[tuple(idxs)] = densities[i]
return pixels
def locate_points_on_scale(pt, scales):
"""
Find the location of a point on a scale and give back the indexs
of the nearest graduations from the point
D - number of dimensions
Ni - number of graduation on the scale for the i-Dimension, extrema included
Parameters
----------
pt : np.ndarray of float
[1xD] - coordinates of a point
scales : list of np.ndarray of float
[N1x1] .... [Ndx1] - scale of each dimension
Returns
-------
idxs : np.ndarray of int
[Dx1] - index of where the point correspond
"""
D = len(pt)
idxs = np.zeros(D).astype(int)
for i in range(D):
idxs[i] = np.argmin(abs(scales[i] - pt[i]) )
return idxs
def voxels_to_cloud_points(elements, vertices):
"""
Transform the voxels into a cloud points in D-dimension.
E - number of voxels \n
V - number of vertices which define the voxel \n
N - number of whole vertices \n
D - number of dimensions
Parameters
----------
elements : np.ndarray of int
[ExV] - indices of the points which define an element
vertices : np.ndarray of float
[NxD] - coordinates of the vertices
Returns
-------
points : np.ndarray of float
[NxD] - coordinates of each voxel center
"""
points = []
for idxs_vertices in elements:
points = np.append( points, voxel_middle( vertices[idxs_vertices-1]) )
return points.reshape( -1, vertices.shape[1])
def voxel_middle( vertices):
"""
Calculate the coordinates of the middle of a voxel defined by N-vertices
in D-dimension
N - number of points \n
D - number of dimensions
Parameters
----------
vertices : np.ndarray of float
[NxD] - vertices of the element
Returns
-------
coord : np.ndarray of float
[Dx1] - coordinates of the center
"""
coord = np.mean(vertices, 0)
return coord
def format_convertor(coordinates, links):
"""
Convert the matrix with index of the points in a matrix with the coordinates
of all the points.
N - number of points
D - number of dimensions
P - number of links
Parameters
----------
coordinates : np.ndarray of float
[NxD] - coordinates of points
links : np.ndarray of int
[Px2] - indices of the end points of the path
Returns
-------
links_matrix : np.ndarray of float
[NxD] - coordinates of the end points of the path
"""
nb_path = len(links)
D = np.shape(coordinates)[1]
links_coor = np.zeros((nb_path, D*2))
for i in range(nb_path):
id_ps = links[i]
links_coor[i,0:D] = coordinates[id_ps[0]]
links_coor[i,D:] = coordinates[id_ps[1]]
return links_coor
def coord_2_id( pts, coordinates):
"""
Find the id of the point in the coordinates matrix ( for 2 points )
P is the number of points to manage
N is the number of skeleton's points
Parameters
----------
pts : np.ndarray of float
[Px2] - coordinates of the points to evaluate
coordinates : np.ndarray of float
[Nx2] - coordinates of all the known points
Returns
-------
pt_id : np.ndarray of int
[Px1] - indices of the points in the array coordinates
"""
pt_id = []
for i in range(len(pts)):
#find the id of the node i
temp1 = np.array(np.where(coordinates[1:,0] == pts[i,0]))
temp2 = np.array(np.where(coordinates[1:,1] == pts[i,1]))
for x in range( temp1.shape[1]):
for y in range( temp2.shape[1]):
if ( temp1[0,x] == temp2[0,y]):
break
if ( temp1[0,x] == temp2[0,y]):
break
#Import to add the "+1" to have the id and not the index
pt_id = np.append(pt_id, temp1[0,x]+1 )
pt_id = np.asarray(pt_id)
return pt_id.astype(int)
if __name__ == '__main__':
print('convertor executed')
| 25.795556
| 83
| 0.56306
|
7776b8a64df60f519435997d51ff89a7bdc6ba00
| 1,278
|
py
|
Python
|
test.py
|
TimSC/pgmap-query
|
02a274172ec651e41db89bffd7b11d1f91eb5488
|
[
"MIT"
] | 2
|
2018-09-01T00:32:35.000Z
|
2019-03-24T15:10:41.000Z
|
test.py
|
TimSC/pgmap-query
|
02a274172ec651e41db89bffd7b11d1f91eb5488
|
[
"MIT"
] | 5
|
2018-02-05T01:53:48.000Z
|
2019-05-12T20:22:14.000Z
|
test.py
|
TimSC/pgmap-query
|
02a274172ec651e41db89bffd7b11d1f91eb5488
|
[
"MIT"
] | 1
|
2021-03-18T10:15:41.000Z
|
2021-03-18T10:15:41.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import pgmap
import io
import string
def ReadConfig(fina):
fi = open(fina, "rt")
settings = {}
for li in fi.readlines():
lisp = map(string.strip, li.split(":"))
if len(lisp) < 2:
continue
settings[lisp[0]] = lisp[1]
return settings
if __name__=="__main__":
settings = ReadConfig("config.cfg")
p = pgmap.PgMap("dbname={} user={} password='{}' hostaddr={} port=5432".format(
settings["dbname"], settings["dbuser"], settings["dbpass"], settings["dbhost"]),
settings["dbtableprefix"], settings["dbtabletestprefix"])
print ("Connected to database", p.Ready())
t = p.GetTransaction(b"ACCESS SHARE")
if 0:
sio = io.BytesIO()
#enc = pgmap.PyO5mEncode(sio)
enc = pgmap.PyOsmXmlEncode(sio)
print (t.MapQuery((-1.1473846,50.7360206,-0.9901428,50.8649113), 0, enc))
data = sio.getvalue()
print (len(data), "bytes")
if 1:
osmData = pgmap.OsmData()
objectIds = [1000594005591, 1000595178493, 1000594446551]
t.GetObjectsById("node", pgmap.seti64(objectIds), osmData);
print (len(osmData.nodes))
for i in range(len(osmData.nodes)):
node = osmData.nodes[i]
print (type(node))
print (node.objId, node.lat, node.lon)
t.Commit()
| 25.058824
| 83
| 0.680751
|
7f212436c965591733004b7d0df70a2f3cb122f6
| 14,507
|
py
|
Python
|
actstream/tests.py
|
lincolnloop/django-activity-stream
|
b1b4884624fab982b35fbcbd28ed321d12e3d054
|
[
"BSD-3-Clause"
] | null | null | null |
actstream/tests.py
|
lincolnloop/django-activity-stream
|
b1b4884624fab982b35fbcbd28ed321d12e3d054
|
[
"BSD-3-Clause"
] | null | null | null |
actstream/tests.py
|
lincolnloop/django-activity-stream
|
b1b4884624fab982b35fbcbd28ed321d12e3d054
|
[
"BSD-3-Clause"
] | null | null | null |
from random import choice
from django.db import connection
from django.db.models import get_model
from django.test import TestCase
from django.conf import settings
from django.contrib.auth.models import User, AnonymousUser, Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.template.loader import Template, Context
from actstream.models import Action, Follow, model_stream, user_stream,\
setup_generic_relations
from actstream.actions import follow, unfollow
from actstream.exceptions import ModelNotActionable
from actstream.signals import action
from actstream import settings as actstream_settings
class LTE(int):
def __new__(cls, n):
obj = super(LTE, cls).__new__(cls, n)
obj.n = n
return obj
def __eq__(self, other):
return other <= self.n
def __repr__(self):
return "<= %s" % self.n
class ActivityBaseTestCase(TestCase):
actstream_models = ()
def setUp(self):
self.old_MODELS = actstream_settings.MODELS
actstream_settings.MODELS = {}
for model in self.actstream_models:
actstream_settings.MODELS[model.lower()] = \
get_model(*model.split('.'))
setup_generic_relations()
def tearDown(self):
actstream_settings.MODELS = self.old_MODELS
class ActivityTestCase(ActivityBaseTestCase):
urls = 'actstream.urls'
actstream_models = ('auth.User', 'auth.Group', 'sites.Site')
def setUp(self):
super(ActivityTestCase, self).setUp()
self.group = Group.objects.create(name='CoolGroup')
self.group2 = Group.objects.create(name='Nerds')
self.user1 = User.objects.get_or_create(username='admin')[0]
self.user1.set_password('admin')
self.user1.is_superuser = self.user1.is_staff = True
self.user1.save()
self.user2 = User.objects.get_or_create(username='Two')[0]
# User1 joins group
self.user1.groups.add(self.group)
action.send(self.user1, verb='joined', target=self.group)
# User1 follows User2
follow(self.user1, self.user2)
# User2 joins group
self.user2.groups.add(self.group)
action.send(self.user2, verb='joined', target=self.group)
# User2 follows group
follow(self.user2, self.group)
# User2 follows the second group (not just as an actor).
follow(self.user2, self.group2, actor_only=False)
# User1 comments on group
# Use a site object here and predict the "__unicode__ method output"
action.send(self.user1, verb='commented on', target=self.group)
self.comment = Site.objects.create(
domain="admin: Sweet Group!...")
# Group responds to comment
action.send(self.group, verb='responded to', target=self.comment)
def test_aauser1(self):
self.assertEqual(map(unicode, self.user1.actor_actions.all()), [
u'admin commented on CoolGroup 0 minutes ago',
u'admin started following Two 0 minutes ago',
u'admin joined CoolGroup 0 minutes ago',
])
def test_user2(self):
self.assertEqual(map(unicode, Action.objects.actor(self.user2)), [
u'Two started following Nerds 0 minutes ago',
u'Two started following CoolGroup 0 minutes ago',
u'Two joined CoolGroup 0 minutes ago',
])
def test_group(self):
self.assertEqual(map(unicode, Action.objects.actor(self.group)),
[u'CoolGroup responded to admin: Sweet Group!... 0 minutes ago'])
def test_empty_follow_stream(self):
unfollow(self.user1, self.user2)
self.assert_(not user_stream(self.user1))
def test_stream(self):
self.assertEqual(map(unicode, Action.objects.user(self.user1)), [
u'Two started following Nerds 0 minutes ago',
u'Two started following CoolGroup 0 minutes ago',
u'Two joined CoolGroup 0 minutes ago',
])
self.assertEqual(map(unicode, Action.objects.user(self.user2)), [
u'CoolGroup responded to admin: Sweet Group!... 0 minutes ago',
u'Two started following Nerds 0 minutes ago',
])
def test_stream_stale_follows(self):
"""
Action.objects.user() should ignore Follow objects with stale actor
references.
"""
self.user2.delete()
self.assert_(not 'Two' in str(Action.objects.user(self.user1)))
def test_rss(self):
rss = self.client.get('/feed/').content
self.assert_(rss.startswith('<?xml version="1.0" encoding="utf-8"?>\n'
'<rss xmlns:atom="http://www.w3.org/2005/Atom" version="2.0">'))
self.assert_(rss.find('Activity feed for your followed actors') > -1)
def test_atom(self):
atom = self.client.get('/feed/atom/').content
self.assert_(atom.startswith('<?xml version="1.0" encoding="utf-8"?>\n'
'<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="%s">' %
settings.LANGUAGE_CODE))
self.assert_(atom.find('Activity feed for your followed actors') > -1)
def test_action_object(self):
action.send(self.user1, verb='created comment',
action_object=self.comment, target=self.group)
created_action = Action.objects.get(verb='created comment')
self.assertEqual(created_action.actor, self.user1)
self.assertEqual(created_action.action_object, self.comment)
self.assertEqual(created_action.target, self.group)
self.assertEqual(unicode(created_action),
u'admin created comment admin: Sweet Group!... on CoolGroup 0 '
'minutes ago')
def test_doesnt_generate_duplicate_follow_records(self):
g = Group.objects.get_or_create(name='DupGroup')[0]
s = User.objects.get_or_create(username='dupuser')[0]
f1 = follow(s, g)
self.assertTrue(f1 is not None, "Should have received a new follow "
"record")
self.assertTrue(isinstance(f1, Follow), "Returns a Follow object")
self.assertEquals(1, Follow.objects.filter(user=s, object_id=g.pk,
content_type=ContentType.objects.get_for_model(g)).count(),
"Should only have 1 follow record here")
f2 = follow(s, g)
self.assertEquals(1, Follow.objects.filter(user=s, object_id=g.pk,
content_type=ContentType.objects.get_for_model(g)).count(),
"Should still only have 1 follow record here")
self.assertTrue(f2 is not None, "Should have received a Follow object")
self.assertTrue(isinstance(f2, Follow), "Returns a Follow object")
self.assertEquals(f1, f2, "Should have received the same Follow "
"object that I first submitted")
def test_zzzz_no_orphaned_actions(self):
actions = self.user1.actor_actions.count()
self.user2.delete()
self.assertEqual(actions, self.user1.actor_actions.count() + 1)
def test_generic_relation_accessors(self):
self.assertEqual(self.user2.actor_actions.count(), 3)
self.assertEqual(self.user2.target_actions.count(), 1)
self.assertEqual(self.user2.action_object_actions.count(), 0)
def test_bad_actionable_model(self):
self.assertRaises(ModelNotActionable, follow, self.user1,
ContentType.objects.get_for_model(self.user1))
def test_hidden_action(self):
action = self.user1.actor_actions.all()[0]
action.public = False
action.save()
self.assert_(not action in self.user1.actor_actions.public())
def test_tag_follow_url(self):
src = '{% load activity_tags %}{% follow_url user %}'
output = Template(src).render(Context({'user': self.user1}))
ct = ContentType.objects.get_for_model(User)
self.assertEqual(output, '/follow/%s/%s/' % (ct.pk, self.user1.pk))
def test_model_actions_with_kwargs(self):
"""
Testing the model_actions method of the ActionManager
by passing kwargs
"""
self.assertEqual(map(unicode, model_stream(self.user1, verb='commented on')), [
u'admin commented on CoolGroup 0 minutes ago',
])
def test_user_stream_with_kwargs(self):
"""
Testing the user method of the ActionManager by passing additional
filters in kwargs
"""
self.assertEqual(map(unicode, Action.objects.user(self.user1, verb='joined')), [
u'Two joined CoolGroup 0 minutes ago',
])
def test_is_following_filter(self):
src = '{% load activity_tags %}{% if user|is_following:group %}yup{% endif %}'
self.assertEqual(Template(src).render(Context({
'user': self.user2, 'group': self.group
})), u'yup')
self.assertEqual(Template(src).render(Context({
'user': self.user1, 'group': self.group
})), u'')
class ZombieTest(ActivityBaseTestCase):
actstream_models = ('auth.User',)
human = 10
zombie = 1
def setUp(self):
super(ZombieTest, self).setUp()
settings.DEBUG = True
player_generator = lambda n, count: [User.objects.create(
username='%s%d' % (n, i)) for i in range(count)]
self.humans = player_generator('human', self.human)
self.zombies = player_generator('zombie', self.zombie)
self.zombie_apocalypse()
def tearDown(self):
settings.DEBUG = False
super(ZombieTest, self).tearDown()
def zombie_apocalypse(self):
humans = self.humans[:]
zombies = self.zombies[:]
while humans:
for z in self.zombies:
victim = choice(humans)
humans.remove(victim)
zombies.append(victim)
action.send(z, verb='killed', target=victim)
if not humans:
break
def check_query_count(self, queryset):
ci = len(connection.queries)
result = list([map(unicode, (x.actor, x.target, x.action_object))
for x in queryset])
self.assertTrue(len(connection.queries) - ci <= 4,
'Too many queries, got %d expected no more than 4' %
len(connection.queries))
return result
def test_query_count(self):
queryset = model_stream(User)
result = self.check_query_count(queryset)
self.assertEqual(len(result), 10)
def test_query_count_sliced(self):
queryset = model_stream(User)[:5]
result = self.check_query_count(queryset)
self.assertEqual(len(result), 5)
class GFKManagerTestCase(TestCase):
def setUp(self):
self.user_ct = ContentType.objects.get_for_model(User)
self.group_ct = ContentType.objects.get_for_model(Group)
self.group, _ = Group.objects.get_or_create(name='CoolGroup')
self.user1, _ = User.objects.get_or_create(username='admin')
self.user2, _ = User.objects.get_or_create(username='Two')
self.user3, _ = User.objects.get_or_create(username='Three')
self.user4, _ = User.objects.get_or_create(username='Four')
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user2.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user3.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user4.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='joined',
target_content_type=self.group_ct,
target_object_id=self.group.id
)
def test_fetch_generic_relations(self):
# baseline without fetch_generic_relations
_actions = Action.objects.filter(actor_content_type=self.user_ct,
actor_object_id=self.user1.id)
actions = lambda: _actions._clone()
num_content_types = len(set(actions().values_list(
'target_content_type_id', flat=True)))
n = actions().count()
# compare to fetching only 1 generic relation
self.assertNumQueries(LTE(n + 1),
lambda: [a.target for a in actions()])
self.assertNumQueries(LTE(num_content_types + 2),
lambda: [a.target for a in
actions().fetch_generic_relations('target')])
action_targets = [(a.id, a.target) for a in actions()]
action_targets_fetch_generic = [(a.id, a.target) for a in
actions().fetch_generic_relations('target')]
self.assertEqual(action_targets, action_targets_fetch_generic)
# compare to fetching all generic relations
num_content_types = len(set(sum(actions().values_list(
'actor_content_type_id', 'target_content_type_id'), ())))
self.assertNumQueries(LTE(2 * n + 1),
lambda: [(a.actor, a.target) for a in actions()])
self.assertNumQueries(LTE(num_content_types + 2),
lambda: [(a.actor, a.target) for a in
actions().fetch_generic_relations()])
action_actor_targets = [(a.id, a.actor, a.target) for a in actions()]
action_actor_targets_fetch_generic_all = [
(a.id, a.actor, a.target) for a in
actions().fetch_generic_relations()]
self.assertEqual(action_actor_targets,
action_actor_targets_fetch_generic_all)
# fetch only 1 generic relation, but access both gfks
self.assertNumQueries(LTE(n + num_content_types + 2),
lambda: [(a.actor, a.target) for a in
actions().fetch_generic_relations('target')])
action_actor_targets_fetch_generic_target = [
(a.id, a.actor, a.target) for a in
actions().fetch_generic_relations('target')]
self.assertEqual(action_actor_targets,
action_actor_targets_fetch_generic_target)
| 39.52861
| 88
| 0.636934
|
144e88c9c9e7499ac681e175f75900f7f928ce83
| 6,435
|
py
|
Python
|
src/kubernetes/client/models/v1alpha1_pod_preset_spec.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 184
|
2017-12-20T21:50:06.000Z
|
2022-03-19T13:24:58.000Z
|
src/kubernetes/client/models/v1alpha1_pod_preset_spec.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 15
|
2018-01-17T17:30:51.000Z
|
2021-12-16T14:25:09.000Z
|
src/kubernetes/client/models/v1alpha1_pod_preset_spec.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 136
|
2018-01-09T22:52:06.000Z
|
2022-02-24T13:26:18.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1PodPresetSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, env=None, env_from=None, selector=None, volume_mounts=None, volumes=None):
"""
V1alpha1PodPresetSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'env': 'list[V1EnvVar]',
'env_from': 'list[V1EnvFromSource]',
'selector': 'V1LabelSelector',
'volume_mounts': 'list[V1VolumeMount]',
'volumes': 'list[V1Volume]'
}
self.attribute_map = {
'env': 'env',
'env_from': 'envFrom',
'selector': 'selector',
'volume_mounts': 'volumeMounts',
'volumes': 'volumes'
}
self._env = env
self._env_from = env_from
self._selector = selector
self._volume_mounts = volume_mounts
self._volumes = volumes
@property
def env(self):
"""
Gets the env of this V1alpha1PodPresetSpec.
Env defines the collection of EnvVar to inject into containers.
:return: The env of this V1alpha1PodPresetSpec.
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""
Sets the env of this V1alpha1PodPresetSpec.
Env defines the collection of EnvVar to inject into containers.
:param env: The env of this V1alpha1PodPresetSpec.
:type: list[V1EnvVar]
"""
self._env = env
@property
def env_from(self):
"""
Gets the env_from of this V1alpha1PodPresetSpec.
EnvFrom defines the collection of EnvFromSource to inject into containers.
:return: The env_from of this V1alpha1PodPresetSpec.
:rtype: list[V1EnvFromSource]
"""
return self._env_from
@env_from.setter
def env_from(self, env_from):
"""
Sets the env_from of this V1alpha1PodPresetSpec.
EnvFrom defines the collection of EnvFromSource to inject into containers.
:param env_from: The env_from of this V1alpha1PodPresetSpec.
:type: list[V1EnvFromSource]
"""
self._env_from = env_from
@property
def selector(self):
"""
Gets the selector of this V1alpha1PodPresetSpec.
Selector is a label query over a set of resources, in this case pods. Required.
:return: The selector of this V1alpha1PodPresetSpec.
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this V1alpha1PodPresetSpec.
Selector is a label query over a set of resources, in this case pods. Required.
:param selector: The selector of this V1alpha1PodPresetSpec.
:type: V1LabelSelector
"""
self._selector = selector
@property
def volume_mounts(self):
"""
Gets the volume_mounts of this V1alpha1PodPresetSpec.
VolumeMounts defines the collection of VolumeMount to inject into containers.
:return: The volume_mounts of this V1alpha1PodPresetSpec.
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""
Sets the volume_mounts of this V1alpha1PodPresetSpec.
VolumeMounts defines the collection of VolumeMount to inject into containers.
:param volume_mounts: The volume_mounts of this V1alpha1PodPresetSpec.
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def volumes(self):
"""
Gets the volumes of this V1alpha1PodPresetSpec.
Volumes defines the collection of Volume to inject into the pod.
:return: The volumes of this V1alpha1PodPresetSpec.
:rtype: list[V1Volume]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""
Sets the volumes of this V1alpha1PodPresetSpec.
Volumes defines the collection of Volume to inject into the pod.
:param volumes: The volumes of this V1alpha1PodPresetSpec.
:type: list[V1Volume]
"""
self._volumes = volumes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1PodPresetSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.986486
| 105
| 0.591919
|
5be70be1f368fd47d6033ba61c4c6b57b87df8e1
| 1,248
|
py
|
Python
|
src/surveys18/tests/test_phone.py
|
wups101/alss-dev
|
6dde342e4de4f0d94df2769e83f6fefc79100daf
|
[
"MIT"
] | null | null | null |
src/surveys18/tests/test_phone.py
|
wups101/alss-dev
|
6dde342e4de4f0d94df2769e83f6fefc79100daf
|
[
"MIT"
] | null | null | null |
src/surveys18/tests/test_phone.py
|
wups101/alss-dev
|
6dde342e4de4f0d94df2769e83f6fefc79100daf
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.core.management import call_command
from surveys18.models import Survey, Phone
class ModelTestCase(TestCase):
"""
models: Phone, Survey
reference models :
data: phone.yaml, survey.yaml
main: Phone associate survey, the one farmer has many phones.
"""
def setUp(self):
# load fixtures
call_command('loaddata', 'test/survey.yaml', verbosity=0)
call_command('loaddata', 'test/phone.yaml', verbosity=0)
def test_loaddata(self):
survey_list = Survey.objects.all()
self.assertEquals(len(survey_list), 3)
phone_list = Phone.objects.all()
self.assertEquals(len(phone_list), 3)
def test_create_phone(self):
survey_id = Survey.objects.get(id=3)
phone_list_before_size = len(Phone.objects.all())
#new value
Phone.objects.create(survey=survey_id, phone=22222222)
phone_list_after_size = len(Phone.objects.all())
self.assertEquals(phone_list_after_size, phone_list_before_size+1)
def test_survey_delete(self):
Survey.objects.filter(id=1).delete()
phone_list = Phone.objects.filter(survey__id=1)
self.assertEquals(phone_list.count(), 0)
| 29.023256
| 74
| 0.680288
|
7aa22d7aaebbee0c3af1bc76393c2ea52f68638b
| 5,807
|
py
|
Python
|
src/python/ensembl/production/datafile/scraper/parsers/base.py
|
luca-drf/ensembl-production
|
8251026004d786f5a160584f3550227adc395cc1
|
[
"Apache-2.0"
] | null | null | null |
src/python/ensembl/production/datafile/scraper/parsers/base.py
|
luca-drf/ensembl-production
|
8251026004d786f5a160584f3550227adc395cc1
|
[
"Apache-2.0"
] | null | null | null |
src/python/ensembl/production/datafile/scraper/parsers/base.py
|
luca-drf/ensembl-production
|
8251026004d786f5a160584f3550227adc395cc1
|
[
"Apache-2.0"
] | null | null | null |
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base File Parser for DataFile Scraper package"""
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Tuple, Optional
from urllib.parse import urljoin
from ..utils import blake2bsum, make_release
_FILE_COMPRESSIONS_MAP = {"gz": "gzip"}
@dataclass
class FileURL:
url: str
headers: Dict[str, str]
@dataclass
class Species:
production_name: str
division: str
taxon_id: int
species_taxon_id: int
classifications: List[str]
@dataclass
class Release:
ens: int
eg: int
date: str
@dataclass
class Assembly:
default: str
accession: str
provider_name: str
genome_id: int
@dataclass
class BaseOptMetadata:
file_extension: Optional[str]
@dataclass
class BaseFileMetadata:
file_type: str
species: str
ens_release: int
@dataclass
class FileMetadata:
file_path: str
file_set_id: Optional[int]
file_format: str
release: Release
assembly: Assembly
file_size: int
file_last_modified: str
blake2bsum: str
urls: List[FileURL]
species: Species
optional_data: BaseOptMetadata
@dataclass
class Result:
file_metadata: Optional[FileMetadata]
errors: List[str]
class FileParserError(ValueError):
def __init__(self, message: str, errors: dict) -> None:
super().__init__(message)
self.errors = errors
class BaseFileParser:
def __init__(self, **options) -> None:
self._options = options
self._ftp_dirs = [
(Path(options.get("ftp_dir_ens", "")).resolve(), options.get("ftp_url_ens")),
(Path(options.get("ftp_dir_eg", "")).resolve(), options.get("ftp_url_eg")),
]
def parse_metadata(self, metadata: dict) -> Result:
errors: List[str] = []
file_path = (Path(metadata["file_dir"]) / metadata["file_name"]).resolve()
try:
base_metadata = self.get_base_metadata(metadata)
except ValueError as e:
errors.append(f"Error parsing base metadata: {e}")
try:
optional_data = self.get_optional_metadata(metadata)
except ValueError as e:
errors.append(f"Error parsing optional metadata: {e}")
if errors:
errors.insert(0, f"Cannot parse {file_path}")
return Result(None, errors)
file_stats = file_path.stat()
last_modified = (
datetime.fromtimestamp(file_stats.st_mtime).astimezone().isoformat()
)
b2bsum_chunk_size = self._options.get("b2bsum_chunk_size")
b2bsum = blake2bsum(file_path, b2bsum_chunk_size).hex()
ftp_uri = self.get_ftp_uri(file_path)
file_metadata = FileMetadata(
file_path=str(file_path),
file_set_id=metadata.get("file_set_id"),
file_format=metadata["file_format"],
release=Release(
*make_release(int(metadata["ens_release"])), date=metadata["release_date"]
),
assembly=Assembly(
default=metadata["assembly_default"],
accession=metadata["assembly_accession"],
provider_name="",
genome_id=metadata["genome_id"],
),
file_size=file_stats.st_size,
file_last_modified=last_modified,
blake2bsum=b2bsum,
urls=[FileURL(ftp_uri, {})],
species=Species(
production_name=base_metadata.species,
division=metadata["division"],
taxon_id=metadata["taxon_id"],
species_taxon_id=metadata["species_taxon_id"],
classifications=[metadata["division"]],
),
optional_data=optional_data,
)
result = Result(file_metadata, errors)
return result
def _ftp_paths(self, file_path: Path) -> Tuple[str, Optional[Path]]:
for ftp_root_dir, ftp_root_uri in self._ftp_dirs:
try:
relative_path = file_path.relative_to(ftp_root_dir)
return ftp_root_uri, relative_path
except (ValueError, TypeError):
pass
return "", None
def get_ftp_uri(self, file_path: Path) -> str:
ftp_root_uri, relative_path = self._ftp_paths(file_path)
if relative_path is not None:
ftp_uri = urljoin(ftp_root_uri, str(relative_path))
return ftp_uri
return "none"
def get_base_metadata(self, metadata: dict) -> BaseFileMetadata:
raise NotImplementedError(
"Calling abstract method: BaseFileParser.get_base_metadata"
)
def get_optional_metadata(self, metadata: dict) -> BaseOptMetadata:
raise NotImplementedError(
"Calling abstract method: BaseFileParser.get_optional_metadata"
)
class FileParser(BaseFileParser):
def get_base_metadata(self, metadata: dict) -> BaseFileMetadata:
return BaseFileMetadata(
file_type=metadata["file_format"].lower(),
species=metadata["species"].lower(),
ens_release=int(metadata["ens_release"]),
)
| 31.053476
| 90
| 0.64715
|
c6caf9c84eb2f194be4efeebf8974f24b922ee30
| 565
|
py
|
Python
|
actor_critic.py
|
escribano89/tennis-maddpg
|
f7877bdb0a9f9e773b3d4a1d4847338b9cea9a8e
|
[
"MIT"
] | null | null | null |
actor_critic.py
|
escribano89/tennis-maddpg
|
f7877bdb0a9f9e773b3d4a1d4847338b9cea9a8e
|
[
"MIT"
] | null | null | null |
actor_critic.py
|
escribano89/tennis-maddpg
|
f7877bdb0a9f9e773b3d4a1d4847338b9cea9a8e
|
[
"MIT"
] | 1
|
2020-12-20T19:07:42.000Z
|
2020-12-20T19:07:42.000Z
|
# -*- coding: utf-8 -*-
from actor import Actor
from critic import Critic
from params import DEVICE
class ActorCritic():
def __init__(self, n_agents, state_size, action_size, seed):
critic_input_size = (state_size+action_size)*n_agents
self.actor_regular = Actor(state_size, action_size, seed).to(DEVICE)
self.actor_target = Actor(state_size, action_size, seed).to(DEVICE)
self.critic_regular = Critic(critic_input_size, seed).to(DEVICE)
self.critic_target = Critic(critic_input_size, seed).to(DEVICE)
| 37.666667
| 76
| 0.704425
|
0a1e93fae4f966a6c5026ee3e224d1fcac5d0c1b
| 747
|
py
|
Python
|
RIT33PYT/Codigo_1/DesignPattern/recordar.py
|
Sbastdia/Ejercicios-probables-examen
|
1074dcb08c5daf7bda0c0e9bbacb96c991895f32
|
[
"Apache-2.0"
] | null | null | null |
RIT33PYT/Codigo_1/DesignPattern/recordar.py
|
Sbastdia/Ejercicios-probables-examen
|
1074dcb08c5daf7bda0c0e9bbacb96c991895f32
|
[
"Apache-2.0"
] | null | null | null |
RIT33PYT/Codigo_1/DesignPattern/recordar.py
|
Sbastdia/Ejercicios-probables-examen
|
1074dcb08c5daf7bda0c0e9bbacb96c991895f32
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Patrón de diseño Llamada
"""
__author__ = "Sébastien CHAZALLET"
__copyright__ = "Copyright 2012"
__credits__ = ["Sébastien CHAZALLET", "InsPyration.org", "Ediciones ENI"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Sébastien CHAZALLET"
__email__ = "sebastien.chazallet@laposte.net"
__status__ = "Production"
def callback():
print('Función de llamada')
def do(value, *args, callback):
print('Action')
if value > 0:
callback()
do(0, callback=callback)
do(1, callback=callback)
class A:
def __init__(self, name):
self.name = name
def do(self, *args, callback):
callback(self.name)
class B:
def print(self, name):
print(name)
a = A('Test')
a.do(callback=B().print)
| 17.372093
| 73
| 0.690763
|
b0b5428493f6de53bbe201bdb2b321e0e7be21ed
| 54,969
|
py
|
Python
|
tinytag/tinytag.py
|
russpoutine/tinytag
|
2c03b0b4b15056feb7c8051fa39728bcd71be30d
|
[
"MIT"
] | null | null | null |
tinytag/tinytag.py
|
russpoutine/tinytag
|
2c03b0b4b15056feb7c8051fa39728bcd71be30d
|
[
"MIT"
] | null | null | null |
tinytag/tinytag.py
|
russpoutine/tinytag
|
2c03b0b4b15056feb7c8051fa39728bcd71be30d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tinytag - an audio meta info reader
# Copyright (c) 2014-2018 Tom Wallroth
#
# Sources on github:
# http://github.com/devsnd/tinytag/
# MIT License
# Copyright (c) 2014-2019 Tom Wallroth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import json
from collections import MutableMapping, OrderedDict
import codecs
from functools import reduce
import struct
import os
import io
import sys
from io import BytesIO
import re
DEBUG = os.environ.get('DEBUG', False) # some of the parsers can print debug info
class TinyTagException(LookupError): # inherit LookupError for backwards compat
pass
def _read(fh, nbytes): # helper function to check if we haven't reached EOF
b = fh.read(nbytes)
if len(b) < nbytes:
raise TinyTagException('Unexpected end of file')
return b
def stderr(*args):
sys.stderr.write('%s\n' % ' '.join(args))
sys.stderr.flush()
def _bytes_to_int_le(b):
fmt = {1: '<B', 2: '<H', 4: '<I', 8: '<Q'}.get(len(b))
return struct.unpack(fmt, b)[0] if fmt is not None else 0
def _bytes_to_int(b):
return reduce(lambda accu, elem: (accu << 8) + elem, b, 0)
class TinyTag(object):
def __init__(self, filehandler, filesize, ignore_errors=False):
if isinstance(filehandler, str):
raise Exception('Use `TinyTag.get(filepath)` instead of `TinyTag(filepath)`')
self._filehandler = filehandler
self.filesize = filesize
self.album = None
self.albumartist = None
self.artist = None
self.audio_offset = None
self.bitrate = None
self.channels = None
self.comment = None
self.composer = None
self.disc = None
self.disc_total = None
self.duration = None
self.genre = None
self.samplerate = None
self.title = None
self.track = None
self.track_total = None
self.year = None
self._load_image = False
self._image_data = None
self._ignore_errors = ignore_errors
def as_dict(self):
return {k: v for k, v in self.__dict__.items() if not k.startswith('_')}
@classmethod
def is_supported(cls, filename):
return cls._get_parser_for_filename(filename) is not None
def get_image(self):
return self._image_data
@classmethod
def _get_parser_for_filename(cls, filename):
mapping = {
(b'.mp3',): ID3,
(b'.oga', b'.ogg', b'.opus'): Ogg,
(b'.wav',): Wave,
(b'.flac',): Flac,
(b'.wma',): Wma,
(b'.m4b', b'.m4a', b'.mp4'): MP4,
}
if not isinstance(filename, bytes): # convert filename to binary
filename = filename.encode('ASCII', errors='ignore').lower()
for ext, tagclass in mapping.items():
if filename.endswith(ext):
return tagclass
@classmethod
def _get_parser_for_file_handle(cls, fh):
# https://en.wikipedia.org/wiki/List_of_file_signatures
magic_bytes_mapping = {
b'^ID3': ID3,
b'^\xff\xfb': ID3,
b'^OggS': Ogg,
b'^RIFF....WAVE': Wave,
b'^fLaC': Flac,
b'^\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C': Wma,
b'....ftypM4A': MP4, # https://www.file-recovery.com/m4a-signature-format.htm
}
header = fh.peek(max(len(sig) for sig in magic_bytes_mapping))
for magic, parser in magic_bytes_mapping.items():
if re.match(magic, header):
return parser
@classmethod
def get_parser_class(cls, filename, filehandle):
if cls != TinyTag: # if `get` is invoked on TinyTag, find parser by ext
return cls # otherwise use the class on which `get` was invoked
parser_class = cls._get_parser_for_filename(filename)
if parser_class is not None:
return parser_class
# try determining the file type by magic byte header
parser_class = cls._get_parser_for_file_handle(filehandle)
if parser_class is not None:
return parser_class
raise TinyTagException('No tag reader found to support filetype! ')
@classmethod
def get(cls, filename, tags=True, duration=True, image=False, ignore_errors=False):
try: # cast pathlib.Path to str
import pathlib
if isinstance(filename, pathlib.Path):
filename = str(filename.absolute())
except ImportError:
pass
else:
filename = os.path.expanduser(filename)
size = os.path.getsize(filename)
if not size > 0:
return TinyTag(None, 0)
with io.open(filename, 'rb') as af:
parser_class = cls.get_parser_class(filename, af)
tag = parser_class(af, size, ignore_errors=ignore_errors)
tag.load(tags=tags, duration=duration, image=image)
return tag
def __str__(self):
return json.dumps(OrderedDict(sorted(self.as_dict().items())))
def __repr__(self):
return str(self)
def load(self, tags, duration, image=False):
self._load_image = image
if tags:
self._parse_tag(self._filehandler)
if duration:
if tags: # rewind file if the tags were already parsed
self._filehandler.seek(0)
self._determine_duration(self._filehandler)
def _set_field(self, fieldname, bytestring, transfunc=None):
"""convienience function to set fields of the tinytag by name.
the payload (bytestring) can be changed using the transfunc"""
if getattr(self, fieldname): # do not overwrite existing data
return
value = bytestring if transfunc is None else transfunc(bytestring)
if DEBUG:
stderr('Setting field "%s" to "%s"' % (fieldname, value))
if fieldname == 'genre':
genre_id = 255
if value.isdigit(): # funky: id3v1 genre hidden in a id3v2 field
genre_id = int(value)
else: # funkier: the TCO may contain genres in parens, e.g. '(13)'
genre_in_parens = re.match('^\\((\\d+)\\)$', value)
if genre_in_parens:
genre_id = int(genre_in_parens.group(1))
if 0 <= genre_id < len(ID3.ID3V1_GENRES):
value = ID3.ID3V1_GENRES[genre_id]
if fieldname in ("track", "disc"):
if type(value).__name__ in ('str', 'unicode') and '/' in value:
current, total = value.split('/')[:2]
setattr(self, "%s_total" % fieldname, total)
else:
current = value
setattr(self, fieldname, current)
else:
setattr(self, fieldname, value)
def _determine_duration(self, fh):
raise NotImplementedError()
def _parse_tag(self, fh):
raise NotImplementedError()
def update(self, other):
# update the values of this tag with the values from another tag
for key in ['track', 'track_total', 'title', 'artist',
'album', 'albumartist', 'year', 'duration',
'genre', 'disc', 'disc_total', 'comment', 'composer']:
if not getattr(self, key) and getattr(other, key):
setattr(self, key, getattr(other, key))
@staticmethod
def _unpad(s):
# strings in mp3 and asf *may* be terminated with a zero byte at the end
return s.replace('\x00', '')
class MP4(TinyTag):
# see: https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/Metadata/Metadata.html
# and: https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/QTFFChap2/qtff2.html
class Parser:
# https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/Metadata/Metadata.html#//apple_ref/doc/uid/TP40000939-CH1-SW34
ATOM_DECODER_BY_TYPE = {
0: lambda x: x, # 'reserved',
1: lambda x: codecs.decode(x, 'utf-8', 'replace'), # UTF-8
2: lambda x: codecs.decode(x, 'utf-16', 'replace'), # UTF-16
3: lambda x: codecs.decode(x, 's/jis', 'replace'), # S/JIS
# 16: duration in millis
13: lambda x: x, # JPEG
14: lambda x: x, # PNG
21: lambda x: struct.unpack('>b', x)[0], # BE Signed int
22: lambda x: struct.unpack('>B', x)[0], # BE Unsigned int
23: lambda x: struct.unpack('>f', x)[0], # BE Float32
24: lambda x: struct.unpack('>d', x)[0], # BE Float64
# 27: lambda x: x, # BMP
# 28: lambda x: x, # QuickTime Metadata atom
65: lambda x: struct.unpack('b', x)[0], # 8-bit Signed int
66: lambda x: struct.unpack('>h', x)[0], # BE 16-bit Signed int
67: lambda x: struct.unpack('>i', x)[0], # BE 32-bit Signed int
74: lambda x: struct.unpack('>q', x)[0], # BE 64-bit Signed int
75: lambda x: struct.unpack('B', x)[0], # 8-bit Unsigned int
76: lambda x: struct.unpack('>H', x)[0], # BE 16-bit Unsigned int
77: lambda x: struct.unpack('>I', x)[0], # BE 32-bit Unsigned int
78: lambda x: struct.unpack('>Q', x)[0], # BE 64-bit Unsigned int
}
@classmethod
def make_data_atom_parser(cls, fieldname):
def parse_data_atom(data_atom):
data_type = struct.unpack('>I', data_atom[:4])[0]
conversion = cls.ATOM_DECODER_BY_TYPE.get(data_type)
if conversion is None:
stderr('Cannot convert data type: %s' % data_type)
return {} # don't know how to convert data atom
# skip header & null-bytes, convert rest
return {fieldname: conversion(data_atom[8:])}
return parse_data_atom
@classmethod
def make_number_parser(cls, fieldname1, fieldname2):
def _(data_atom):
number_data = data_atom[8:14]
numbers = struct.unpack('>HHH', number_data)
# for some reason the first number is always irrelevant.
return {fieldname1: numbers[1], fieldname2: numbers[2]}
return _
@classmethod
def parse_id3v1_genre(cls, data_atom):
# dunno why the genre is offset by -1 but that's how mutagen does it
idx = struct.unpack('>H', data_atom[8:])[0] - 1
if idx < len(ID3.ID3V1_GENRES):
return {'genre': ID3.ID3V1_GENRES[idx]}
return {'genre': None}
@classmethod
def parse_audio_sample_entry(cls, data):
# this atom also contains the esds atom:
# https://ffmpeg.org/doxygen/0.6/mov_8c-source.html
# http://xhelmboyx.tripod.com/formats/mp4-layout.txt
datafh = BytesIO(data)
datafh.seek(16, os.SEEK_CUR) # jump over version and flags
channels = struct.unpack('>H', datafh.read(2))[0]
datafh.seek(2, os.SEEK_CUR) # jump over bit_depth
datafh.seek(2, os.SEEK_CUR) # jump over QT compr id & pkt size
sr = struct.unpack('>I', datafh.read(4))[0]
esds_atom_size = struct.unpack('>I', data[28:32])[0]
esds_atom = BytesIO(data[36:36 + esds_atom_size])
# http://sasperger.tistory.com/103
esds_atom.seek(22, os.SEEK_CUR) # jump over most data...
esds_atom.seek(4, os.SEEK_CUR) # jump over max bitrate
avg_br = struct.unpack('>I', esds_atom.read(4))[0] / 1000.0 # kbit/s
return {'channels': channels, 'samplerate': sr, 'bitrate': avg_br}
@classmethod
def parse_mvhd(cls, data):
# http://stackoverflow.com/a/3639993/1191373
walker = BytesIO(data)
version = struct.unpack('b', walker.read(1))[0]
walker.seek(3, os.SEEK_CUR) # jump over flags
if version == 0: # uses 32 bit integers for timestamps
walker.seek(8, os.SEEK_CUR) # jump over create & mod times
time_scale = struct.unpack('>I', walker.read(4))[0]
duration = struct.unpack('>I', walker.read(4))[0]
else: # version == 1: # uses 64 bit integers for timestamps
walker.seek(16, os.SEEK_CUR) # jump over create & mod times
time_scale = struct.unpack('>I', walker.read(4))[0]
duration = struct.unpack('>q', walker.read(8))[0]
return {'duration': float(duration) / time_scale}
@classmethod
def debug_atom(cls, data):
stderr(data) # use this function to inspect atoms in an atom tree
return {}
# The parser tree: Each key is an atom name which is traversed if existing.
# Leaves of the parser tree are callables which receive the atom data.
# callables return {fieldname: value} which is updates the TinyTag.
META_DATA_TREE = {b'moov': {b'udta': {b'meta': {b'ilst': {
# see: http://atomicparsley.sourceforge.net/mpeg-4files.html
b'\xa9alb': {b'data': Parser.make_data_atom_parser('album')},
b'\xa9ART': {b'data': Parser.make_data_atom_parser('artist')},
b'aART': {b'data': Parser.make_data_atom_parser('albumartist')},
# b'cpil': {b'data': Parser.make_data_atom_parser('compilation')},
b'\xa9cmt': {b'data': Parser.make_data_atom_parser('comment')},
b'disk': {b'data': Parser.make_number_parser('disc', 'disc_total')},
b'\xa9wrt': {b'data': Parser.make_data_atom_parser('composer')},
b'\xa9day': {b'data': Parser.make_data_atom_parser('year')},
b'\xa9gen': {b'data': Parser.make_data_atom_parser('genre')},
b'gnre': {b'data': Parser.parse_id3v1_genre},
b'\xa9nam': {b'data': Parser.make_data_atom_parser('title')},
b'trkn': {b'data': Parser.make_number_parser('track', 'track_total')},
}}}}}
# see: https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/QTFFChap3/qtff3.html
AUDIO_DATA_TREE = {
b'moov': {
b'mvhd': Parser.parse_mvhd,
b'trak': {b'mdia': {b"minf": {b"stbl": {b"stsd": {b'mp4a':
Parser.parse_audio_sample_entry
}}}}}
}
}
IMAGE_DATA_TREE = {b'moov': {b'udta': {b'meta': {b'ilst': {
b'covr': {b'data': Parser.make_data_atom_parser('_image_data')},
}}}}}
VERSIONED_ATOMS = {b'meta', b'stsd'} # those have an extra 4 byte header
FLAGGED_ATOMS = {b'stsd'} # these also have an extra 4 byte header
def _determine_duration(self, fh):
self._traverse_atoms(fh, path=self.AUDIO_DATA_TREE)
def _parse_tag(self, fh):
self._traverse_atoms(fh, path=self.META_DATA_TREE)
if self._load_image: # A bit inefficient, we rewind the file
self._filehandler.seek(0) # to parse it again for the image
self._traverse_atoms(fh, path=self.IMAGE_DATA_TREE)
def _traverse_atoms(self, fh, path, stop_pos=None, curr_path=None):
header_size = 8
atom_header = fh.read(header_size)
while len(atom_header) == header_size:
atom_size = struct.unpack('>I', atom_header[:4])[0] - header_size
atom_type = atom_header[4:]
if curr_path is None: # keep track how we traversed in the tree
curr_path = [atom_type]
if atom_size <= 0: # empty atom, jump to next one
atom_header = fh.read(header_size)
continue
if DEBUG:
stderr('%s pos: %d atom: %s len: %d' % (' ' * 4 * len(curr_path), fh.tell() - header_size, atom_type, atom_size + header_size))
if atom_type in self.VERSIONED_ATOMS: # jump atom version for now
fh.seek(4, os.SEEK_CUR)
if atom_type in self.FLAGGED_ATOMS: # jump atom flags for now
fh.seek(4, os.SEEK_CUR)
sub_path = path.get(atom_type, None)
# if the path leaf is a dict, traverse deeper into the tree:
if issubclass(type(sub_path), MutableMapping):
atom_end_pos = fh.tell() + atom_size
self._traverse_atoms(fh, path=sub_path, stop_pos=atom_end_pos,
curr_path=curr_path + [atom_type])
# if the path-leaf is a callable, call it on the atom data
elif callable(sub_path):
for fieldname, value in sub_path(fh.read(atom_size)).items():
if DEBUG:
stderr(' ' * 4 * len(curr_path), 'FIELD: ', fieldname)
if fieldname:
self._set_field(fieldname, value)
# if no action was specified using dict or callable, jump over atom
else:
fh.seek(atom_size, os.SEEK_CUR)
# check if we have reached the end of this branch:
if stop_pos and fh.tell() >= stop_pos:
return # return to parent (next parent node in tree)
atom_header = fh.read(header_size) # read next atom
class ID3(TinyTag):
FRAME_ID_TO_FIELD = { # Mapping from Frame ID to a field of the TinyTag
'COMM': 'comment', 'COM': 'comment',
'TRCK': 'track', 'TRK': 'track',
'TYER': 'year', 'TYE': 'year',
'TALB': 'album', 'TAL': 'album',
'TPE1': 'artist', 'TP1': 'artist',
'TIT2': 'title', 'TT2': 'title',
'TCON': 'genre', 'TCO': 'genre',
'TPOS': 'disc',
'TPE2': 'albumartist', 'TCOM': 'composer',
}
IMAGE_FRAME_IDS = {'APIC', 'PIC'}
PARSABLE_FRAME_IDS = set(FRAME_ID_TO_FIELD.keys()).union(IMAGE_FRAME_IDS)
_MAX_ESTIMATION_SEC = 30
_CBR_DETECTION_FRAME_COUNT = 5
_USE_XING_HEADER = True # much faster, but can be deactivated for testing
ID3V1_GENRES = [
'Blues', 'Classic Rock', 'Country', 'Dance', 'Disco',
'Funk', 'Grunge', 'Hip-Hop', 'Jazz', 'Metal', 'New Age', 'Oldies',
'Other', 'Pop', 'R&B', 'Rap', 'Reggae', 'Rock', 'Techno', 'Industrial',
'Alternative', 'Ska', 'Death Metal', 'Pranks', 'Soundtrack',
'Euro-Techno', 'Ambient', 'Trip-Hop', 'Vocal', 'Jazz+Funk', 'Fusion',
'Trance', 'Classical', 'Instrumental', 'Acid', 'House', 'Game',
'Sound Clip', 'Gospel', 'Noise', 'AlternRock', 'Bass', 'Soul', 'Punk',
'Space', 'Meditative', 'Instrumental Pop', 'Instrumental Rock',
'Ethnic', 'Gothic', 'Darkwave', 'Techno-Industrial', 'Electronic',
'Pop-Folk', 'Eurodance', 'Dream', 'Southern Rock', 'Comedy', 'Cult',
'Gangsta', 'Top 40', 'Christian Rap', 'Pop/Funk', 'Jungle',
'Native American', 'Cabaret', 'New Wave', 'Psychadelic', 'Rave',
'Showtunes', 'Trailer', 'Lo-Fi', 'Tribal', 'Acid Punk', 'Acid Jazz',
'Polka', 'Retro', 'Musical', 'Rock & Roll', 'Hard Rock',
# Wimamp Extended Genres
'Folk', 'Folk-Rock', 'National Folk', 'Swing', 'Fast Fusion', 'Bebob',
'Latin', 'Revival', 'Celtic', 'Bluegrass', 'Avantgarde', 'Gothic Rock',
'Progressive Rock', 'Psychedelic Rock', 'Symphonic Rock', 'Slow Rock',
'Big Band', 'Chorus', 'Easy Listening', 'Acoustic', 'Humour', 'Speech',
'Chanson', 'Opera', 'Chamber Music', 'Sonata', 'Symphony', 'Booty Bass',
'Primus', 'Porn Groove', 'Satire', 'Slow Jam', 'Club', 'Tango', 'Samba',
'Folklore', 'Ballad', 'Power Ballad', 'Rhythmic Soul', 'Freestyle',
'Duet', 'Punk Rock', 'Drum Solo', 'A capella', 'Euro-House',
'Dance Hall', 'Goa', 'Drum & Bass',
# according to https://de.wikipedia.org/wiki/Liste_der_ID3v1-Genres:
'Club-House', 'Hardcore Techno', 'Terror', 'Indie', 'BritPop',
'', # don't use ethnic slur ("Negerpunk", WTF!)
'Polsk Punk', 'Beat', 'Christian Gangsta Rap', 'Heavy Metal',
'Black Metal', 'Contemporary Christian', 'Christian Rock',
# WinAmp 1.91
'Merengue', 'Salsa', 'Thrash Metal', 'Anime', 'Jpop', 'Synthpop',
# WinAmp 5.6
'Abstract', 'Art Rock', 'Baroque', 'Bhangra', 'Big Beat', 'Breakbeat',
'Chillout', 'Downtempo', 'Dub', 'EBM', 'Eclectic', 'Electro',
'Electroclash', 'Emo', 'Experimental', 'Garage', 'Illbient',
'Industro-Goth', 'Jam Band', 'Krautrock', 'Leftfield', 'Lounge',
'Math Rock', 'New Romantic', 'Nu-Breakz', 'Post-Punk', 'Post-Rock',
'Psytrance', 'Shoegaze', 'Space Rock', 'Trop Rock', 'World Music',
'Neoclassical', 'Audiobook', 'Audio Theatre', 'Neue Deutsche Welle',
'Podcast', 'Indie Rock', 'G-Funk', 'Dubstep', 'Garage Rock', 'Psybient',
]
def __init__(self, filehandler, filesize, *args, **kwargs):
TinyTag.__init__(self, filehandler, filesize, *args, **kwargs)
# save position after the ID3 tag for duration mesurement speedup
self._bytepos_after_id3v2 = 0
@classmethod
def set_estimation_precision(cls, estimation_in_seconds):
cls._MAX_ESTIMATION_SEC = estimation_in_seconds
# see this page for the magic values used in mp3:
# http://www.mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm
samplerates = [
[11025, 12000, 8000], # MPEG 2.5
[], # reserved
[22050, 24000, 16000], # MPEG 2
[44100, 48000, 32000], # MPEG 1
]
v1l1 = [0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 0]
v1l2 = [0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0]
v1l3 = [0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0]
v2l1 = [0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0]
v2l2 = [0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0]
v2l3 = v2l2
bitrate_by_version_by_layer = [
[None, v2l3, v2l2, v2l1], # MPEG Version 2.5 # note that the layers go
None, # reserved # from 3 to 1 by design.
[None, v2l3, v2l2, v2l1], # MPEG Version 2 # the first layer id is
[None, v1l3, v1l2, v1l1], # MPEG Version 1 # reserved
]
samples_per_frame = 1152 # the default frame size for mp3
channels_per_channel_mode = [
2, # 00 Stereo
2, # 01 Joint stereo (Stereo)
2, # 10 Dual channel (2 mono channels)
1, # 11 Single channel (Mono)
]
@staticmethod
def _parse_xing_header(fh):
# see: http://www.mp3-tech.org/programmer/sources/vbrheadersdk.zip
fh.seek(4, os.SEEK_CUR) # read over Xing header
header_flags = struct.unpack('>i', fh.read(4))[0]
frames = byte_count = toc = vbr_scale = None
if header_flags & 1: # FRAMES FLAG
frames = struct.unpack('>i', fh.read(4))[0]
if header_flags & 2: # BYTES FLAG
byte_count = struct.unpack('>i', fh.read(4))[0]
if header_flags & 4: # TOC FLAG
toc = [struct.unpack('>i', fh.read(4))[0] for _ in range(100)]
if header_flags & 8: # VBR SCALE FLAG
vbr_scale = struct.unpack('>i', fh.read(4))[0]
return frames, byte_count, toc, vbr_scale
def _determine_duration(self, fh):
max_estimation_frames = (ID3._MAX_ESTIMATION_SEC * 44100) // ID3.samples_per_frame
frame_size_accu = 0
header_bytes = 4
frames = 0 # count frames for determining mp3 duration
bitrate_accu = 0 # add up bitrates to find average bitrate to detect
last_bitrates = [] # CBR mp3s (multiple frames with same bitrates)
# seek to first position after id3 tag (speedup for large header)
fh.seek(self._bytepos_after_id3v2)
while True:
# reading through garbage until 11 '1' sync-bits are found
b = fh.peek(4)
if len(b) < 4:
break # EOF
sync, conf, bitrate_freq, rest = struct.unpack('BBBB', b[0:4])
br_id = (bitrate_freq >> 4) & 0x0F # biterate id
sr_id = (bitrate_freq >> 2) & 0x03 # sample rate id
padding = 1 if bitrate_freq & 0x02 > 0 else 0
mpeg_id = (conf >> 3) & 0x03
layer_id = (conf >> 1) & 0x03
channel_mode = (rest >> 6) & 0x03
# check for eleven 1s, validate bitrate and sample rate
if not b[:2] > b'\xFF\xE0' or br_id > 14 or br_id == 0 or sr_id == 3 or layer_id == 0 or mpeg_id == 1:
idx = b.find(b'\xFF', 1) # invalid frame, find next sync header
if idx == -1:
idx = len(b) # not found: jump over the current peek buffer
fh.seek(max(idx, 1), os.SEEK_CUR)
continue
try:
self.channels = self.channels_per_channel_mode[channel_mode]
frame_bitrate = ID3.bitrate_by_version_by_layer[mpeg_id][layer_id][br_id]
self.samplerate = ID3.samplerates[mpeg_id][sr_id]
except (IndexError, TypeError):
raise TinyTagException('mp3 parsing failed')
# There might be a xing header in the first frame that contains
# all the info we need, otherwise parse multiple frames to find the
# accurate average bitrate
if frames == 0 and ID3._USE_XING_HEADER:
xing_header_offset = b.find(b'Xing')
if xing_header_offset != -1:
fh.seek(xing_header_offset, os.SEEK_CUR)
xframes, byte_count, toc, vbr_scale = ID3._parse_xing_header(fh)
if xframes and xframes != 0 and byte_count:
self.duration = xframes * ID3.samples_per_frame / float(self.samplerate)
self.bitrate = int(byte_count * 8 / self.duration / 1000)
self.audio_offset = fh.tell()
return
continue
frames += 1 # it's most probably an mp3 frame
bitrate_accu += frame_bitrate
if frames == 1:
self.audio_offset = fh.tell()
if frames <= ID3._CBR_DETECTION_FRAME_COUNT:
last_bitrates.append(frame_bitrate)
fh.seek(4, os.SEEK_CUR) # jump over peeked bytes
frame_length = (144000 * frame_bitrate) // self.samplerate + padding
frame_size_accu += frame_length
# if bitrate does not change over time its probably CBR
is_cbr = (frames == ID3._CBR_DETECTION_FRAME_COUNT and
len(set(last_bitrates)) == 1)
if frames == max_estimation_frames or is_cbr:
# try to estimate duration
fh.seek(-128, 2) # jump to last byte (leaving out id3v1 tag)
audio_stream_size = fh.tell() - self.audio_offset
est_frame_count = audio_stream_size / (frame_size_accu / float(frames))
samples = est_frame_count * ID3.samples_per_frame
self.duration = samples / float(self.samplerate)
self.bitrate = int(bitrate_accu / frames)
return
if frame_length > 1: # jump over current frame body
fh.seek(frame_length - header_bytes, os.SEEK_CUR)
if self.samplerate:
self.duration = frames * ID3.samples_per_frame / float(self.samplerate)
def _parse_tag(self, fh):
self._parse_id3v2(fh)
attrs = ['track', 'track_total', 'title', 'artist', 'album', 'albumartist', 'year', 'genre']
has_all_tags = all(getattr(self, attr) for attr in attrs)
if not has_all_tags and self.filesize > 128:
fh.seek(-128, os.SEEK_END) # try parsing id3v1 in last 128 bytes
self._parse_id3v1(fh)
def _parse_id3v2(self, fh):
# for info on the specs, see: http://id3.org/Developer%20Information
header = struct.unpack('3sBBB4B', _read(fh, 10))
tag = codecs.decode(header[0], 'ISO-8859-1')
# check if there is an ID3v2 tag at the beginning of the file
if tag == 'ID3':
major, rev = header[1:3]
if DEBUG:
stderr('Found id3 v2.%s' % major)
# unsync = (header[3] & 0x80) > 0
extended = (header[3] & 0x40) > 0
# experimental = (header[3] & 0x20) > 0
# footer = (header[3] & 0x10) > 0
size = self._calc_size(header[4:8], 7)
self._bytepos_after_id3v2 = size
end_pos = fh.tell() + size
parsed_size = 0
if extended: # just read over the extended header.
size_bytes = struct.unpack('4B', _read(fh, 6)[0:4])
extd_size = self._calc_size(size_bytes, 7)
fh.seek(extd_size - 6, os.SEEK_CUR) # jump over extended_header
while parsed_size < size:
frame_size = self._parse_frame(fh, id3version=major)
if frame_size == 0:
break
parsed_size += frame_size
fh.seek(end_pos, os.SEEK_SET)
def _parse_id3v1(self, fh):
if fh.read(3) == b'TAG': # check if this is an ID3 v1 tag
def asciidecode(x):
return self._unpad(codecs.decode(x, 'latin1'))
fields = fh.read(30 + 30 + 30 + 4 + 30 + 1)
self._set_field('title', fields[:30], transfunc=asciidecode)
self._set_field('artist', fields[30:60], transfunc=asciidecode)
self._set_field('album', fields[60:90], transfunc=asciidecode)
self._set_field('year', fields[90:94], transfunc=asciidecode)
comment = fields[94:124]
if b'\x00\x00' < comment[-2:] < b'\x01\x00':
self._set_field('track', str(ord(comment[-1:])))
comment = comment[:-2]
self._set_field('comment', comment, transfunc=asciidecode)
genre_id = ord(fields[124:125])
if genre_id < len(ID3.ID3V1_GENRES):
self.genre = ID3.ID3V1_GENRES[genre_id]
def _parse_frame(self, fh, id3version=False):
# ID3v2.2 especially ugly. see: http://id3.org/id3v2-00
frame_header_size = 6 if id3version == 2 else 10
frame_size_bytes = 3 if id3version == 2 else 4
binformat = '3s3B' if id3version == 2 else '4s4B2B'
bits_per_byte = 7 if id3version == 4 else 8 # only id3v2.4 is synchsafe
frame_header_data = fh.read(frame_header_size)
if len(frame_header_data) != frame_header_size:
return 0
frame = struct.unpack(binformat, frame_header_data)
frame_id = self._decode_string(frame[0])
frame_size = self._calc_size(frame[1:1+frame_size_bytes], bits_per_byte)
if DEBUG:
stderr('Found id3 Frame %s at %d-%d of %d' % (frame_id, fh.tell(), fh.tell() + frame_size, self.filesize))
if frame_size > 0:
# flags = frame[1+frame_size_bytes:] # dont care about flags.
if frame_id not in ID3.PARSABLE_FRAME_IDS: # jump over unparsable frames
fh.seek(frame_size, os.SEEK_CUR)
return frame_size
content = fh.read(frame_size)
fieldname = ID3.FRAME_ID_TO_FIELD.get(frame_id)
if fieldname:
self._set_field(fieldname, content, self._decode_string)
elif frame_id in self.IMAGE_FRAME_IDS and self._load_image:
# See section 4.14: http://id3.org/id3v2.4.0-frames
if frame_id == 'PIC': # ID3 v2.2:
desc_end_pos = content.index(b'\x00', 1) + 1
else: # ID3 v2.3+
mimetype_end_pos = content.index(b'\x00', 1) + 1
desc_start_pos = mimetype_end_pos + 1 # jump over picture type
desc_end_pos = content.index(b'\x00', desc_start_pos) + 1
if content[desc_end_pos:desc_end_pos+1] == b'\x00':
desc_end_pos += 1 # the description ends with 1 or 2 null bytes
self._image_data = content[desc_end_pos:]
return frame_size
return 0
def _decode_string(self, bytestr):
try: # it's not my fault, this is the spec.
first_byte = bytestr[:1]
if first_byte == b'\x00': # ISO-8859-1
bytestr = bytestr[1:]
encoding = 'ISO-8859-1'
elif first_byte == b'\x01': # UTF-16 with BOM
bytestr = bytestr[1:]
if bytestr[:5] == b'eng\xff\xfe':
bytestr = bytestr[3:] # remove language (but leave BOM)
if bytestr[:5] == b'eng\xfe\xff':
bytestr = bytestr[3:] # remove language (but leave BOM)
if bytestr[:4] == b'eng\x00':
bytestr = bytestr[4:] # remove language
if bytestr[:1] == b'\x00':
bytestr = bytestr[1:] # strip optional additional null byte
# read byte order mark to determine endianess
encoding = 'UTF-16be' if bytestr[0:2] == b'\xfe\xff' else 'UTF-16le'
# strip the bom and optional null bytes
bytestr = bytestr[2:] if len(bytestr) % 2 == 0 else bytestr[2:-1]
# remove ADDITIONAL EXTRA BOM :facepalm:
if bytestr[:4] == b'\x00\x00\xff\xfe':
bytestr = bytestr[4:]
elif first_byte == b'\x02': # UTF-16LE
# strip optional null byte, if byte count uneven
bytestr = bytestr[1:-1] if len(bytestr) % 2 == 0 else bytestr[1:]
encoding = 'UTF-16le'
elif first_byte == b'\x03': # UTF-8
bytestr = bytestr[1:]
encoding = 'UTF-8'
else:
bytestr = bytestr
encoding = 'ISO-8859-1' # wild guess
if bytestr[:4] == b'eng\x00':
bytestr = bytestr[4:] # remove language
errors = 'ignore' if self._ignore_errors else 'strict'
return self._unpad(codecs.decode(bytestr, encoding, errors))
except UnicodeDecodeError:
raise TinyTagException('Error decoding ID3 Tag!')
def _calc_size(self, bytestr, bits_per_byte):
# length of some mp3 header fields is described by 7 or 8-bit-bytes
return reduce(lambda accu, elem: (accu << bits_per_byte) + elem, bytestr, 0)
class Ogg(TinyTag):
def __init__(self, filehandler, filesize, *args, **kwargs):
TinyTag.__init__(self, filehandler, filesize, *args, **kwargs)
self._tags_parsed = False
self._max_samplenum = 0 # maximum sample position ever read
def _determine_duration(self, fh):
max_page_size = 65536 # https://xiph.org/ogg/doc/libogg/ogg_page.html
if not self._tags_parsed:
self._parse_tag(fh) # determine sample rate
fh.seek(0) # and rewind to start
if self.filesize > max_page_size:
fh.seek(-max_page_size, 2) # go to last possible page position
while True:
b = fh.peek(4)
if len(b) == 0:
return # EOF
if b[:4] == b'OggS': # look for an ogg header
for _ in self._parse_pages(fh):
pass # parse all remaining pages
self.duration = self._max_samplenum / float(self.samplerate)
else:
idx = b.find(b'OggS') # try to find header in peeked data
seekpos = idx if idx != -1 else len(b) - 3
fh.seek(max(seekpos, 1), os.SEEK_CUR)
def _parse_tag(self, fh):
page_start_pos = fh.tell() # set audio_offest later if its audio data
for packet in self._parse_pages(fh):
walker = BytesIO(packet)
if packet[0:7] == b"\x01vorbis":
(channels, self.samplerate, max_bitrate, bitrate,
min_bitrate) = struct.unpack("<B4i", packet[11:28])
if not self.audio_offset:
self.bitrate = bitrate / 1024.0
self.audio_offset = page_start_pos
elif packet[0:7] == b"\x03vorbis":
walker.seek(7, os.SEEK_CUR) # jump over header name
self._parse_vorbis_comment(walker)
elif packet[0:8] == b'OpusHead': # parse opus header
# https://www.videolan.org/developers/vlc/modules/codec/opus_header.c
# https://mf4.xiph.org/jenkins/view/opus/job/opusfile-unix/ws/doc/html/structOpusHead.html
walker.seek(8, os.SEEK_CUR) # jump over header name
(version, ch, _, sr, _, _) = struct.unpack("<BBHIHB", walker.read(11))
if (version & 0xF0) == 0: # only major version 0 supported
self.channels = ch
self.samplerate = 48000 # internally opus always uses 48khz
elif packet[0:8] == b'OpusTags': # parse opus metadata:
walker.seek(8, os.SEEK_CUR) # jump over header name
self._parse_vorbis_comment(walker)
else:
break
page_start_pos = fh.tell()
def _parse_vorbis_comment(self, fh):
# for the spec, see: http://xiph.org/vorbis/doc/v-comment.html
# discnumber tag based on: https://en.wikipedia.org/wiki/Vorbis_comment
# https://sno.phy.queensu.ca/~phil/exiftool/TagNames/Vorbis.html
comment_type_to_attr_mapping = {
'album': 'album',
'albumartist': 'albumartist',
'title': 'title',
'artist': 'artist',
'date': 'year',
'tracknumber': 'track',
'discnumber': 'disc',
'genre': 'genre',
'description': 'comment',
'composer': 'composer',
}
vendor_length = struct.unpack('I', fh.read(4))[0]
fh.seek(vendor_length, os.SEEK_CUR) # jump over vendor
elements = struct.unpack('I', fh.read(4))[0]
for i in range(elements):
length = struct.unpack('I', fh.read(4))[0]
try:
keyvalpair = codecs.decode(fh.read(length), 'UTF-8')
except UnicodeDecodeError:
continue
if '=' in keyvalpair:
key, value = keyvalpair.split('=', 1)
fieldname = comment_type_to_attr_mapping.get(key.lower())
if fieldname:
self._set_field(fieldname, value)
def _parse_pages(self, fh):
# for the spec, see: https://wiki.xiph.org/Ogg
previous_page = b'' # contains data from previous (continuing) pages
header_data = fh.read(27) # read ogg page header
while len(header_data) != 0:
header = struct.unpack('<4sBBqIIiB', header_data)
oggs, version, flags, pos, serial, pageseq, crc, segments = header
self._max_samplenum = max(self._max_samplenum, pos)
if oggs != b'OggS' or version != 0:
raise TinyTagException('Not a valid ogg file!')
segsizes = struct.unpack('B'*segments, fh.read(segments))
total = 0
for segsize in segsizes: # read all segments
total += segsize
if total < 255: # less than 255 bytes means end of page
yield previous_page + fh.read(total)
previous_page = b''
total = 0
if total != 0:
if total % 255 == 0:
previous_page += fh.read(total)
else:
yield previous_page + fh.read(total)
previous_page = b''
header_data = fh.read(27)
class Wave(TinyTag):
# https://sno.phy.queensu.ca/~phil/exiftool/TagNames/RIFF.html
riff_mapping = {
b'INAM': 'title',
b'TITL': 'title',
b'IART': 'artist',
b'ICMT': 'comment',
b'ICRD': 'year',
b'IGNR': 'genre',
b'TRCK': 'track',
b'PRT1': 'track',
b'PRT2': 'track_number',
b'YEAR': 'year',
# riff format is lacking the composer field.
}
def __init__(self, filehandler, filesize, *args, **kwargs):
TinyTag.__init__(self, filehandler, filesize, *args, **kwargs)
self._duration_parsed = False
def _determine_duration(self, fh):
# see: https://ccrma.stanford.edu/courses/422/projects/WaveFormat/
# and: https://en.wikipedia.org/wiki/WAV
riff, size, fformat = struct.unpack('4sI4s', fh.read(12))
if riff != b'RIFF' or fformat != b'WAVE':
raise TinyTagException('not a wave file!')
bitdepth = 16 # assume 16bit depth (CD quality)
chunk_header = fh.read(8)
while len(chunk_header) == 8:
subchunkid, subchunksize = struct.unpack('4sI', chunk_header)
if subchunkid == b'fmt ':
_, self.channels, self.samplerate = struct.unpack('HHI', fh.read(8))
_, _, bitdepth = struct.unpack('<IHH', fh.read(8))
self.bitrate = self.samplerate * self.channels * bitdepth / 1024.0
elif subchunkid == b'data':
self.duration = float(subchunksize)/self.channels/self.samplerate/(bitdepth/8)
self.audio_offest = fh.tell() - 8 # rewind to data header
fh.seek(subchunksize, 1)
elif subchunkid == b'LIST':
is_info = fh.read(4) # check INFO header
if is_info != b'INFO': # jump over non-INFO sections
fh.seek(subchunksize - 4, os.SEEK_CUR)
else:
sub_fh = BytesIO(fh.read(subchunksize - 4))
field = sub_fh.read(4)
while len(field) == 4:
data_length = struct.unpack('I', sub_fh.read(4))[0]
data = sub_fh.read(data_length).split(b'\x00', 1)[0] # strip zero-byte
data = codecs.decode(data, 'utf-8')
fieldname = self.riff_mapping.get(field)
if fieldname:
self._set_field(fieldname, data)
field = sub_fh.read(4)
elif subchunkid == b'id3 ' or subchunkid == b'ID3 ':
id3 = ID3(fh, 0)
id3._parse_id3v2(fh)
self.update(id3)
else: # some other chunk, just skip the data
fh.seek(subchunksize, 1)
chunk_header = fh.read(8)
self._duration_parsed = True
def _parse_tag(self, fh):
if not self._duration_parsed:
self._determine_duration(fh) # parse whole file to determine tags:(
class Flac(TinyTag):
METADATA_STREAMINFO = 0
METADATA_VORBIS_COMMENT = 4
def load(self, tags, duration, image=False):
header = self._filehandler.peek(4)
if header[:3] == b'ID3': # parse ID3 header if it exists
id3 = ID3(self._filehandler, 0)
id3._parse_id3v2(self._filehandler)
self.update(id3)
header = self._filehandler.peek(4) # after ID3 should be fLaC
if header[:4] != b'fLaC':
raise TinyTagException('Invalid flac header')
self._filehandler.seek(4, os.SEEK_CUR)
self._determine_duration(self._filehandler, skip_tags=not tags)
def _determine_duration(self, fh, skip_tags=False):
# for spec, see https://xiph.org/flac/ogg_mapping.html
header_data = fh.read(4)
while len(header_data):
meta_header = struct.unpack('B3B', header_data)
block_type = meta_header[0] & 0x7f
is_last_block = meta_header[0] & 0x80
size = _bytes_to_int(meta_header[1:4])
# http://xiph.org/flac/format.html#metadata_block_streaminfo
if block_type == Flac.METADATA_STREAMINFO:
stream_info_header = fh.read(size)
if len(stream_info_header) < 34: # invalid streaminfo
return
header = struct.unpack('HH3s3s8B16s', stream_info_header)
# From the ciph documentation:
# py | <bits>
# ----------------------------------------------
# H | <16> The minimum block size (in samples)
# H | <16> The maximum block size (in samples)
# 3s | <24> The minimum frame size (in bytes)
# 3s | <24> The maximum frame size (in bytes)
# 8B | <20> Sample rate in Hz.
# | <3> (number of channels)-1.
# | <5> (bits per sample)-1.
# | <36> Total samples in stream.
# 16s| <128> MD5 signature
# min_blk, max_blk, min_frm, max_frm = header[0:4]
# min_frm = _bytes_to_int(struct.unpack('3B', min_frm))
# max_frm = _bytes_to_int(struct.unpack('3B', max_frm))
# channels--. bits total samples
# |----- samplerate -----| |-||----| |---------~ ~----|
# 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000
# #---4---# #---5---# #---6---# #---7---# #--8-~ ~-12-#
self.samplerate = _bytes_to_int(header[4:7]) >> 4
self.channels = ((header[6] >> 1) & 0x07) + 1
# bit_depth = ((header[6] & 1) << 4) + ((header[7] & 0xF0) >> 4)
# bit_depth = (bit_depth + 1)
total_sample_bytes = [(header[7] & 0x0F)] + list(header[8:12])
total_samples = _bytes_to_int(total_sample_bytes)
self.duration = float(total_samples) / self.samplerate
if self.duration > 0:
self.bitrate = self.filesize / self.duration * 8 / 1024
elif block_type == Flac.METADATA_VORBIS_COMMENT and not skip_tags:
oggtag = Ogg(fh, 0)
oggtag._parse_vorbis_comment(fh)
self.update(oggtag)
elif block_type >= 127:
return # invalid block type
else:
fh.seek(size, 1) # seek over this block
if is_last_block:
return
header_data = fh.read(4)
class Wma(TinyTag):
ASF_CONTENT_DESCRIPTION_OBJECT = b'3&\xb2u\x8ef\xcf\x11\xa6\xd9\x00\xaa\x00b\xcel'
ASF_EXTENDED_CONTENT_DESCRIPTION_OBJECT = b'@\xa4\xd0\xd2\x07\xe3\xd2\x11\x97\xf0\x00\xa0\xc9^\xa8P'
STREAM_BITRATE_PROPERTIES_OBJECT = b'\xceu\xf8{\x8dF\xd1\x11\x8d\x82\x00`\x97\xc9\xa2\xb2'
ASF_FILE_PROPERTY_OBJECT = b'\xa1\xdc\xab\x8cG\xa9\xcf\x11\x8e\xe4\x00\xc0\x0c Se'
ASF_STREAM_PROPERTIES_OBJECT = b'\x91\x07\xdc\xb7\xb7\xa9\xcf\x11\x8e\xe6\x00\xc0\x0c Se'
STREAM_TYPE_ASF_AUDIO_MEDIA = b'@\x9ei\xf8M[\xcf\x11\xa8\xfd\x00\x80_\\D+'
# see:
# http://web.archive.org/web/20131203084402/http://msdn.microsoft.com/en-us/library/bb643323.aspx
# and (japanese, but none the less helpful)
# http://uguisu.skr.jp/Windows/format_asf.html
def __init__(self, filehandler, filesize, *args, **kwargs):
TinyTag.__init__(self, filehandler, filesize, *args, **kwargs)
self.__tag_parsed = False
def _determine_duration(self, fh):
if not self.__tag_parsed:
self._parse_tag(fh)
def read_blocks(self, fh, blocks):
# blocks are a list(tuple('fieldname', byte_count, cast_int), ...)
decoded = {}
for block in blocks:
val = fh.read(block[1])
if block[2]:
val = _bytes_to_int_le(val)
decoded[block[0]] = val
return decoded
def __bytes_to_guid(self, obj_id_bytes):
return '-'.join([
hex(_bytes_to_int_le(obj_id_bytes[:-12]))[2:].zfill(6),
hex(_bytes_to_int_le(obj_id_bytes[-12:-10]))[2:].zfill(4),
hex(_bytes_to_int_le(obj_id_bytes[-10:-8]))[2:].zfill(4),
hex(_bytes_to_int(obj_id_bytes[-8:-6]))[2:].zfill(4),
hex(_bytes_to_int(obj_id_bytes[-6:]))[2:].zfill(12),
])
def __decode_string(self, bytestring):
return self._unpad(codecs.decode(bytestring, 'utf-16'))
def __decode_ext_desc(self, value_type, value):
""" decode ASF_EXTENDED_CONTENT_DESCRIPTION_OBJECT values"""
if value_type == 0: # Unicode string
return self.__decode_string(value)
elif value_type == 1: # BYTE array
return value
elif 1 < value_type < 6: # DWORD / QWORD / WORD
return _bytes_to_int_le(value)
def _parse_tag(self, fh):
self.__tag_parsed = True
guid = fh.read(16) # 128 bit GUID
if guid != b'0&\xb2u\x8ef\xcf\x11\xa6\xd9\x00\xaa\x00b\xcel':
return # not a valid ASF container! see: http://www.garykessler.net/library/file_sigs.html
struct.unpack('Q', fh.read(8))[0] # size
struct.unpack('I', fh.read(4))[0] # obj_count
if fh.read(2) != b'\x01\x02':
# http://web.archive.org/web/20131203084402/http://msdn.microsoft.com/en-us/library/bb643323.aspx#_Toc521913958
return # not a valid asf header!
while True:
object_id = fh.read(16)
object_size = _bytes_to_int_le(fh.read(8))
if object_size == 0 or object_size > self.filesize:
break # invalid object, stop parsing.
if object_id == Wma.ASF_CONTENT_DESCRIPTION_OBJECT:
len_blocks = self.read_blocks(fh, [
('title_length', 2, True),
('author_length', 2, True),
('copyright_length', 2, True),
('description_length', 2, True),
('rating_length', 2, True),
])
data_blocks = self.read_blocks(fh, [
('title', len_blocks['title_length'], False),
('artist', len_blocks['author_length'], False),
('', len_blocks['copyright_length'], True),
('comment', len_blocks['description_length'], False),
('', len_blocks['rating_length'], True),
])
for field_name, bytestring in data_blocks.items():
if field_name:
self._set_field(field_name, bytestring, self.__decode_string)
elif object_id == Wma.ASF_EXTENDED_CONTENT_DESCRIPTION_OBJECT:
mapping = {
'WM/TrackNumber': 'track',
'WM/PartOfSet': 'disc',
'WM/Year': 'year',
'WM/AlbumArtist': 'albumartist',
'WM/Genre': 'genre',
'WM/AlbumTitle': 'album',
'WM/Composer': 'composer',
}
# see: http://web.archive.org/web/20131203084402/http://msdn.microsoft.com/en-us/library/bb643323.aspx#_Toc509555195
descriptor_count = _bytes_to_int_le(fh.read(2))
for _ in range(descriptor_count):
name_len = _bytes_to_int_le(fh.read(2))
name = self.__decode_string(fh.read(name_len))
value_type = _bytes_to_int_le(fh.read(2))
value_len = _bytes_to_int_le(fh.read(2))
value = fh.read(value_len)
field_name = mapping.get(name)
if field_name:
field_value = self.__decode_ext_desc(value_type, value)
self._set_field(field_name, field_value)
elif object_id == Wma.ASF_FILE_PROPERTY_OBJECT:
blocks = self.read_blocks(fh, [
('file_id', 16, False),
('file_size', 8, False),
('creation_date', 8, True),
('data_packets_count', 8, True),
('play_duration', 8, True),
('send_duration', 8, True),
('preroll', 8, True),
('flags', 4, False),
('minimum_data_packet_size', 4, True),
('maximum_data_packet_size', 4, True),
('maximum_bitrate', 4, False),
])
self.duration = blocks.get('play_duration') / float(10000000)
elif object_id == Wma.ASF_STREAM_PROPERTIES_OBJECT:
blocks = self.read_blocks(fh, [
('stream_type', 16, False),
('error_correction_type', 16, False),
('time_offset', 8, True),
('type_specific_data_length', 4, True),
('error_correction_data_length', 4, True),
('flags', 2, True),
('reserved', 4, False)
])
already_read = 0
if blocks['stream_type'] == Wma.STREAM_TYPE_ASF_AUDIO_MEDIA:
stream_info = self.read_blocks(fh, [
('codec_id_format_tag', 2, True),
('number_of_channels', 2, True),
('samples_per_second', 4, True),
('avg_bytes_per_second', 4, True),
('block_alignment', 2, True),
('bits_per_sample', 2, True),
])
self.samplerate = stream_info['samples_per_second']
self.bitrate = stream_info['avg_bytes_per_second'] * 8 / float(1000)
already_read = 16
fh.seek(blocks['type_specific_data_length'] - already_read, os.SEEK_CUR)
fh.seek(blocks['error_correction_data_length'], os.SEEK_CUR)
else:
fh.seek(object_size - 24, os.SEEK_CUR) # read over onknown object ids
| 47.79913
| 143
| 0.563045
|
36b9b7cc4c139f1a3a4d25488217c2bcf4f3365f
| 4,898
|
py
|
Python
|
app/user/tests/test_user_api.py
|
carlos28d/recipe-app-api
|
1eb7e4958124f20c126211a22157fe9c6eed2ff2
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
carlos28d/recipe-app-api
|
1eb7e4958124f20c126211a22157fe9c6eed2ff2
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
carlos28d/recipe-app-api
|
1eb7e4958124f20c126211a22157fe9c6eed2ff2
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with valid payload is successful"""
payload = {
'email': 'test@gmail.com',
'password': 'testpass',
'name': 'Test name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists fails"""
payload = {
'email': 'test@gmail.com',
'password': 'pw',
'name': 'Test',
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that the password must be more than 5 characters"""
payload = {
'email': 'test@gmail.com',
'password': 'pw',
'name': 'Test',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'test@gmail.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@gmail.com', password='testpass')
payload = {'email': 'test@gmail.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doesn't exist"""
payload = {'email': 'test@gmail.com', 'password': ' testpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApuTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='test@gmail.com',
password='testpass',
name='name'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in used"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'newpassword123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 34.492958
| 77
| 0.645978
|
1650e3e721c1c931a323dd61363042ea35b8ce27
| 2,005
|
py
|
Python
|
src/jk_jsoncfghelper2/jdef.py
|
jkpubsrc/python-module-jk-jsoncfghelper2
|
2c584c946db5d290525ff49f5c809cec14616247
|
[
"Apache-1.1"
] | null | null | null |
src/jk_jsoncfghelper2/jdef.py
|
jkpubsrc/python-module-jk-jsoncfghelper2
|
2c584c946db5d290525ff49f5c809cec14616247
|
[
"Apache-1.1"
] | null | null | null |
src/jk_jsoncfghelper2/jdef.py
|
jkpubsrc/python-module-jk-jsoncfghelper2
|
2c584c946db5d290525ff49f5c809cec14616247
|
[
"Apache-1.1"
] | null | null | null |
from typing import Union
class JDefStructure(object):
def __init__(self, name:str, structure:Union[list,tuple]):
assert isinstance(name, str)
assert isinstance(structure, (list, tuple))
self.name = name
self.structure = structure
#
#def cloneObject(self):
# return JDefStructure(self.name, list(self.structure))
##
#
class JDef(object):
def __init__(self, name:str, dataType:Union[str,JDefStructure], required:bool = True,
minValue = None, maxValue = None, allowedValues:Union[list,tuple] = None,
minLength:int = None, maxLength:int = None, elementTypes:Union[list,tuple] = None,
nullable:bool = False):
assert isinstance(name, str)
assert isinstance(dataType, (str, JDefStructure))
assert isinstance(required, bool)
assert isinstance(nullable, bool)
if minValue is not None:
assert isinstance(minValue, (float, int))
if maxValue is not None:
assert isinstance(maxValue, (float, int))
if allowedValues is not None:
assert isinstance(allowedValues, (tuple, list))
if minLength is not None:
assert isinstance(minLength, int)
if maxLength is not None:
assert isinstance(maxLength, int)
if elementTypes is not None:
assert isinstance(elementTypes, (tuple, list))
for item in elementTypes:
assert isinstance(item, (str, JDefStructure))
self.name = name
self.dataType = dataType
self.required = required
self.minValue = minValue
self.maxValue = maxValue
self.allowedValues = allowedValues
self.minLength = minLength
self.maxLength = maxLength
self.elementTypes = elementTypes
self.nullable = nullable
#
def __str__(self):
return self.name + ":" + self.dataType
#
def __repr__(self):
return self.name + ":" + self.dataType
#
#def cloneObject(self):
# return JDef(self.name,
# dataType,
# minValue,
# maxValue,
# list(allowedValues) if allowedValues is not None else None,
# minLength,
# maxLength,
# list(elementTypes) if elementTypes is not None else None
# )
##
#
| 20.252525
| 86
| 0.710224
|
3f5ba386936e94191a349b3fa5b8a2079c7e585b
| 31,184
|
py
|
Python
|
pymatgen/analysis/adsorption.py
|
robertcdickson/pymatgen
|
fb65122a7c471b9ffd0d01f2341339fb1dee2af1
|
[
"MIT"
] | null | null | null |
pymatgen/analysis/adsorption.py
|
robertcdickson/pymatgen
|
fb65122a7c471b9ffd0d01f2341339fb1dee2af1
|
[
"MIT"
] | null | null | null |
pymatgen/analysis/adsorption.py
|
robertcdickson/pymatgen
|
fb65122a7c471b9ffd0d01f2341339fb1dee2af1
|
[
"MIT"
] | null | null | null |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes used to enumerate surface sites
and to find adsorption sites on slabs
"""
import itertools
import os
import numpy as np
from matplotlib import patches
from matplotlib.path import Path
from monty.serialization import loadfn
from scipy.spatial import Delaunay
from pymatgen import vis
from pymatgen.core.structure import Structure
from pymatgen.analysis.local_env import VoronoiNN
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.operations import SymmOp
from pymatgen.core.surface import generate_all_slabs
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import in_coord_list_pbc
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Joseph Montoya"
__credits__ = "Richard Tran"
__email__ = "montoyjh@lbl.gov"
__status__ = "Development"
__date__ = "December 2, 2015"
class AdsorbateSiteFinder:
"""
This class finds adsorbate sites on slabs and generates
adsorbate structures according to user-defined criteria.
The algorithm for finding sites is essentially as follows:
1. Determine "surface sites" by finding those within
a height threshold along the miller index of the
highest site
2. Create a network of surface sites using the Delaunay
triangulation of the surface sites
3. Assign on-top, bridge, and hollow adsorption sites
at the nodes, edges, and face centers of the Del.
Triangulation
4. Generate structures from a molecule positioned at
these sites
"""
def __init__(self, slab, selective_dynamics=False, height=0.9, mi_vec=None):
"""
Create an AdsorbateSiteFinder object.
Args:
slab (Slab): slab object for which to find adsorbate sites
selective_dynamics (bool): flag for whether to assign
non-surface sites as fixed for selective dynamics
height (float): height criteria for selection of surface sites
mi_vec (3-D array-like): vector corresponding to the vector
concurrent with the miller index, this enables use with
slabs that have been reoriented, but the miller vector
must be supplied manually
"""
# get surface normal from miller index
if mi_vec:
self.mvec = mi_vec
else:
self.mvec = get_mi_vec(slab)
slab = self.assign_site_properties(slab, height)
if selective_dynamics:
slab = self.assign_selective_dynamics(slab)
self.slab = slab
@classmethod
def from_bulk_and_miller(
cls,
structure,
miller_index,
min_slab_size=8.0,
min_vacuum_size=10.0,
max_normal_search=None,
center_slab=True,
selective_dynamics=False,
undercoord_threshold=0.09,
):
"""
This method constructs the adsorbate site finder from a bulk
structure and a miller index, which allows the surface sites
to be determined from the difference in bulk and slab coordination,
as opposed to the height threshold.
Args:
structure (Structure): structure from which slab
input to the ASF is constructed
miller_index (3-tuple or list): miller index to be used
min_slab_size (float): min slab size for slab generation
min_vacuum_size (float): min vacuum size for slab generation
max_normal_search (int): max normal search for slab generation
center_slab (bool): whether to center slab in slab generation
selective dynamics (bool): whether to assign surface sites
to selective dynamics
undercoord_threshold (float): threshold of "undercoordation"
to use for the assignment of surface sites. Default is
0.1, for which surface sites will be designated if they
are 10% less coordinated than their bulk counterpart
"""
# TODO: for some reason this works poorly with primitive cells
# may want to switch the coordination algorithm eventually
vnn_bulk = VoronoiNN(tol=0.05)
bulk_coords = [len(vnn_bulk.get_nn(structure, n)) for n in range(len(structure))]
struct = structure.copy(site_properties={"bulk_coordinations": bulk_coords})
slabs = generate_all_slabs(
struct,
max_index=max(miller_index),
min_slab_size=min_slab_size,
min_vacuum_size=min_vacuum_size,
max_normal_search=max_normal_search,
center_slab=center_slab,
)
slab_dict = {slab.miller_index: slab for slab in slabs}
if miller_index not in slab_dict:
raise ValueError("Miller index not in slab dict")
this_slab = slab_dict[miller_index]
vnn_surface = VoronoiNN(tol=0.05, allow_pathological=True)
surf_props, undercoords = [], []
this_mi_vec = get_mi_vec(this_slab)
mi_mags = [np.dot(this_mi_vec, site.coords) for site in this_slab]
average_mi_mag = np.average(mi_mags)
for n, site in enumerate(this_slab):
bulk_coord = this_slab.site_properties["bulk_coordinations"][n]
slab_coord = len(vnn_surface.get_nn(this_slab, n))
mi_mag = np.dot(this_mi_vec, site.coords)
undercoord = (bulk_coord - slab_coord) / bulk_coord
undercoords += [undercoord]
if undercoord > undercoord_threshold and mi_mag > average_mi_mag:
surf_props += ["surface"]
else:
surf_props += ["subsurface"]
new_site_properties = {
"surface_properties": surf_props,
"undercoords": undercoords,
}
new_slab = this_slab.copy(site_properties=new_site_properties)
return cls(new_slab, selective_dynamics)
def find_surface_sites_by_height(self, slab, height=0.9, xy_tol=0.05):
"""
This method finds surface sites by determining which sites are within
a threshold value in height from the topmost site in a list of sites
Args:
site_list (list): list of sites from which to select surface sites
height (float): threshold in angstroms of distance from topmost
site in slab along the slab c-vector to include in surface
site determination
xy_tol (float): if supplied, will remove any sites which are
within a certain distance in the miller plane.
Returns:
list of sites selected to be within a threshold of the highest
"""
# Get projection of coordinates along the miller index
m_projs = np.array([np.dot(site.coords, self.mvec) for site in slab.sites])
# Mask based on window threshold along the miller index.
mask = (m_projs - np.amax(m_projs)) >= -height
surf_sites = [slab.sites[n] for n in np.where(mask)[0]]
if xy_tol:
# sort surface sites by height
surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites)]
surf_sites.reverse()
unique_sites, unique_perp_fracs = [], []
for site in surf_sites:
this_perp = site.coords - np.dot(site.coords, self.mvec)
this_perp_frac = slab.lattice.get_fractional_coords(this_perp)
if not in_coord_list_pbc(unique_perp_fracs, this_perp_frac):
unique_sites.append(site)
unique_perp_fracs.append(this_perp_frac)
surf_sites = unique_sites
return surf_sites
def assign_site_properties(self, slab, height=0.9):
"""
Assigns site properties.
"""
if "surface_properties" in slab.site_properties.keys():
return slab
surf_sites = self.find_surface_sites_by_height(slab, height)
surf_props = ["surface" if site in surf_sites else "subsurface" for site in slab.sites]
return slab.copy(site_properties={"surface_properties": surf_props})
def get_extended_surface_mesh(self, repeat=(5, 5, 1)):
"""
Gets an extended surface mesh for to use for adsorption
site finding by constructing supercell of surface sites
Args:
repeat (3-tuple): repeat for getting extended surface mesh
"""
surf_str = Structure.from_sites(self.surface_sites)
surf_str.make_supercell(repeat)
return surf_str
@property
def surface_sites(self):
"""
convenience method to return a list of surface sites
"""
return [site for site in self.slab.sites if site.properties["surface_properties"] == "surface"]
def subsurface_sites(self):
"""
convenience method to return list of subsurface sites
"""
return [site for site in self.slab.sites if site.properties["surface_properties"] == "subsurface"]
def find_adsorption_sites(
self,
distance=2.0,
put_inside=True,
symm_reduce=1e-2,
near_reduce=1e-2,
positions=["ontop", "bridge", "hollow"],
no_obtuse_hollow=True,
):
"""
Finds surface sites according to the above algorithm. Returns
a list of corresponding cartesian coordinates.
Args:
distance (float): distance from the coordinating ensemble
of atoms along the miller index for the site (i. e.
the distance from the slab itself)
put_inside (bool): whether to put the site inside the cell
symm_reduce (float): symm reduction threshold
near_reduce (float): near reduction threshold
positions (list): which positions to include in the site finding
"ontop": sites on top of surface sites
"bridge": sites at edges between surface sites in Delaunay
triangulation of surface sites in the miller plane
"hollow": sites at centers of Delaunay triangulation faces
"subsurface": subsurface positions projected into miller plane
no_obtuse_hollow (bool): flag to indicate whether to include
obtuse triangular ensembles in hollow sites
"""
ads_sites = {k: [] for k in positions}
if "ontop" in positions:
ads_sites["ontop"] = [s.coords for s in self.surface_sites]
if "subsurface" in positions:
# Get highest site
ref = self.slab.sites[np.argmax(self.slab.cart_coords[:, 2])]
# Project diff between highest site and subs site into miller
ss_sites = [
self.mvec * np.dot(ref.coords - s.coords, self.mvec) + s.coords for s in self.subsurface_sites()
]
ads_sites["subsurface"] = ss_sites
if "bridge" in positions or "hollow" in positions:
mesh = self.get_extended_surface_mesh()
sop = get_rot(self.slab)
dt = Delaunay([sop.operate(m.coords)[:2] for m in mesh])
# TODO: refactor below to properly account for >3-fold
for v in dt.simplices:
if -1 not in v:
dots = []
for i_corner, i_opp in zip(range(3), ((1, 2), (0, 2), (0, 1))):
corner, opp = v[i_corner], [v[o] for o in i_opp]
vecs = [mesh[d].coords - mesh[corner].coords for d in opp]
vecs = [vec / np.linalg.norm(vec) for vec in vecs]
dots.append(np.dot(*vecs))
# Add bridge sites at midpoints of edges of D. Tri
if "bridge" in positions:
ads_sites["bridge"].append(self.ensemble_center(mesh, opp))
# Prevent addition of hollow sites in obtuse triangles
obtuse = no_obtuse_hollow and (np.array(dots) < 1e-5).any()
# Add hollow sites at centers of D. Tri faces
if "hollow" in positions and not obtuse:
ads_sites["hollow"].append(self.ensemble_center(mesh, v))
for key, sites in ads_sites.items():
# Pare off outer sites for bridge/hollow
if key in ["bridge", "hollow"]:
frac_coords = [self.slab.lattice.get_fractional_coords(ads_site) for ads_site in sites]
frac_coords = [
frac_coord
for frac_coord in frac_coords
if (frac_coord[0] > 1 and frac_coord[0] < 4 and frac_coord[1] > 1 and frac_coord[1] < 4)
]
sites = [self.slab.lattice.get_cartesian_coords(frac_coord) for frac_coord in frac_coords]
if near_reduce:
sites = self.near_reduce(sites, threshold=near_reduce)
if put_inside:
sites = [put_coord_inside(self.slab.lattice, coord) for coord in sites]
if symm_reduce:
sites = self.symm_reduce(sites, threshold=symm_reduce)
sites = [site + distance * self.mvec for site in sites]
ads_sites[key] = sites
ads_sites["all"] = sum(ads_sites.values(), [])
return ads_sites
def symm_reduce(self, coords_set, threshold=1e-6):
"""
Reduces the set of adsorbate sites by finding removing
symmetrically equivalent duplicates
Args:
coords_set: coordinate set in cartesian coordinates
threshold: tolerance for distance equivalence, used
as input to in_coord_list_pbc for dupl. checking
"""
surf_sg = SpacegroupAnalyzer(self.slab, 0.1)
symm_ops = surf_sg.get_symmetry_operations()
unique_coords = []
# Convert to fractional
coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set]
for coords in coords_set:
incoord = False
for op in symm_ops:
if in_coord_list_pbc(unique_coords, op.operate(coords), atol=threshold):
incoord = True
break
if not incoord:
unique_coords += [coords]
# convert back to cartesian
return [self.slab.lattice.get_cartesian_coords(coords) for coords in unique_coords]
def near_reduce(self, coords_set, threshold=1e-4):
"""
Prunes coordinate set for coordinates that are within
threshold
Args:
coords_set (Nx3 array-like): list or array of coordinates
threshold (float): threshold value for distance
"""
unique_coords = []
coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set]
for coord in coords_set:
if not in_coord_list_pbc(unique_coords, coord, threshold):
unique_coords += [coord]
return [self.slab.lattice.get_cartesian_coords(coords) for coords in unique_coords]
@classmethod
def ensemble_center(cls, site_list, indices, cartesian=True):
"""
Finds the center of an ensemble of sites selected from
a list of sites. Helper method for the find_adsorption_sites
algorithm.
Args:
site_list (list of sites): list of sites
indices (list of ints): list of ints from which to select
sites from site list
cartesian (bool): whether to get average fractional or
cartesian coordinate
"""
if cartesian:
return np.average([site_list[i].coords for i in indices], axis=0)
return np.average([site_list[i].frac_coords for i in indices], axis=0)
def add_adsorbate(self, molecule, ads_coord, repeat=None, translate=True, reorient=True):
"""
Adds an adsorbate at a particular coordinate. Adsorbate
represented by a Molecule object and is translated to (0, 0, 0) if
translate is True, or positioned relative to the input adsorbate
coordinate if translate is False.
Args:
molecule (Molecule): molecule object representing the adsorbate
ads_coord (array): coordinate of adsorbate position
repeat (3-tuple or list): input for making a supercell of slab
prior to placing the adsorbate
translate (bool): flag on whether to translate the molecule so
that its CoM is at the origin prior to adding it to the surface
reorient (bool): flag on whether to reorient the molecule to
have its z-axis concurrent with miller index
"""
molecule = molecule.copy()
if translate:
# Translate the molecule so that the center of mass of the atoms
# that have the most negative z coordinate is at (0, 0, 0)
front_atoms = molecule.copy()
front_atoms._sites = [s for s in molecule.sites if s.coords[2] == min(s.coords[2] for s in molecule.sites)]
x, y, z = front_atoms.center_of_mass
molecule.translate_sites(vector=[-x, -y, -z])
if reorient:
# Reorient the molecule along slab m_index
sop = get_rot(self.slab)
molecule.apply_operation(sop.inverse)
struct = self.slab.copy()
if repeat:
struct.make_supercell(repeat)
if "surface_properties" in struct.site_properties.keys():
molecule.add_site_property("surface_properties", ["adsorbate"] * molecule.num_sites)
if "selective_dynamics" in struct.site_properties.keys():
molecule.add_site_property("selective_dynamics", [[True, True, True]] * molecule.num_sites)
for site in molecule:
struct.append(
site.specie,
ads_coord + site.coords,
coords_are_cartesian=True,
properties=site.properties,
)
return struct
@classmethod
def assign_selective_dynamics(cls, slab):
"""
Helper function to assign selective dynamics site_properties
based on surface, subsurface site properties
Args:
slab (Slab): slab for which to assign selective dynamics
"""
sd_list = []
sd_list = [
[False, False, False] if site.properties["surface_properties"] == "subsurface" else [True, True, True]
for site in slab.sites
]
new_sp = slab.site_properties
new_sp["selective_dynamics"] = sd_list
return slab.copy(site_properties=new_sp)
def generate_adsorption_structures(
self,
molecule,
repeat=None,
min_lw=5.0,
translate=True,
reorient=True,
find_args=None,
):
"""
Function that generates all adsorption structures for a given
molecular adsorbate. Can take repeat argument or minimum
length/width of precursor slab as an input
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
translate (bool): flag on whether to translate the molecule so
that its CoM is at the origin prior to adding it to the surface
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
if repeat is None:
xrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[0]))
yrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[1]))
repeat = [xrep, yrep, 1]
structs = []
find_args = find_args or {}
for coords in self.find_adsorption_sites(**find_args)["all"]:
structs.append(
self.add_adsorbate(
molecule,
coords,
repeat=repeat,
translate=translate,
reorient=reorient,
)
)
return structs
def adsorb_both_surfaces(
self,
molecule,
repeat=None,
min_lw=5.0,
translate=True,
reorient=True,
find_args=None,
):
"""
Function that generates all adsorption structures for a given
molecular adsorbate on both surfaces of a slab. This is useful
for calculating surface energy where both surfaces need to be
equivalent or if we want to calculate nonpolar systems.
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
# Get the adsorbed surfaces first
find_args = find_args or {}
adslabs = self.generate_adsorption_structures(
molecule,
repeat=repeat,
min_lw=min_lw,
translate=translate,
reorient=reorient,
find_args=find_args,
)
new_adslabs = []
for adslab in adslabs:
# Find the adsorbate sites and indices in each slab
_, adsorbates, indices = False, [], []
for i, site in enumerate(adslab.sites):
if site.surface_properties == "adsorbate":
adsorbates.append(site)
indices.append(i)
# Start with the clean slab
adslab.remove_sites(indices)
slab = adslab.copy()
# For each site, we add it back to the slab along with a
# symmetrically equivalent position on the other side of
# the slab using symmetry operations
for adsorbate in adsorbates:
p2 = adslab.get_symmetric_site(adsorbate.frac_coords)
slab.append(adsorbate.specie, p2, properties={"surface_properties": "adsorbate"})
slab.append(
adsorbate.specie,
adsorbate.frac_coords,
properties={"surface_properties": "adsorbate"},
)
new_adslabs.append(slab)
return new_adslabs
def generate_substitution_structures(
self,
atom,
target_species=None,
sub_both_sides=False,
range_tol=1e-2,
dist_from_surf=0,
):
"""
Function that performs substitution-type doping on the surface and
returns all possible configurations where one dopant is substituted
per surface. Can substitute one surface or both.
Args:
atom (str): atom corresponding to substitutional dopant
sub_both_sides (bool): If true, substitute an equivalent
site on the other surface
target_species (list): List of specific species to substitute
range_tol (float): Find viable substitution sites at a specific
distance from the surface +- this tolerance
dist_from_surf (float): Distance from the surface to find viable
substitution sites, defaults to 0 to substitute at the surface
"""
target_species = target_species or []
# Get symmetrized structure in case we want to substitue both sides
sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure()
# Define a function for substituting a site
def substitute(site, i):
slab = self.slab.copy()
props = self.slab.site_properties
if sub_both_sides:
# Find an equivalent site on the other surface
eq_indices = [indices for indices in sym_slab.equivalent_indices if i in indices][0]
for ii in eq_indices:
if f"{sym_slab[ii].frac_coords[2]:.6f}" != f"{site.frac_coords[2]:.6f}":
props["surface_properties"][ii] = "substitute"
slab.replace(ii, atom)
break
props["surface_properties"][i] = "substitute"
slab.replace(i, atom)
slab.add_site_property("surface_properties", props["surface_properties"])
return slab
# Get all possible substitution sites
substituted_slabs = []
# Sort sites so that we can define a range relative to the position of the
# surface atoms, i.e. search for sites above (below) the bottom (top) surface
sorted_sites = sorted(sym_slab, key=lambda site: site.frac_coords[2])
if sorted_sites[0].surface_properties == "surface":
d = sorted_sites[0].frac_coords[2] + dist_from_surf
else:
d = sorted_sites[-1].frac_coords[2] - dist_from_surf
for i, site in enumerate(sym_slab):
if d - range_tol < site.frac_coords[2] < d + range_tol:
if target_species and site.species_string in target_species:
substituted_slabs.append(substitute(site, i))
elif not target_species:
substituted_slabs.append(substitute(site, i))
matcher = StructureMatcher()
return [s[0] for s in matcher.group_structures(substituted_slabs)]
def get_mi_vec(slab):
"""
Convenience function which returns the unit vector aligned
with the miller index.
"""
mvec = np.cross(slab.lattice.matrix[0], slab.lattice.matrix[1])
return mvec / np.linalg.norm(mvec)
def get_rot(slab):
"""
Gets the transformation to rotate the z axis into the miller index
"""
new_z = get_mi_vec(slab)
a, b, c = slab.lattice.matrix
new_x = a / np.linalg.norm(a)
new_y = np.cross(new_z, new_x)
x, y, z = np.eye(3)
rot_matrix = np.array([np.dot(*el) for el in itertools.product([x, y, z], [new_x, new_y, new_z])]).reshape(3, 3)
rot_matrix = np.transpose(rot_matrix)
sop = SymmOp.from_rotation_and_translation(rot_matrix)
return sop
def put_coord_inside(lattice, cart_coordinate):
"""
converts a cartesian coordinate such that it is inside the unit cell.
"""
fc = lattice.get_fractional_coords(cart_coordinate)
return lattice.get_cartesian_coords([c - np.floor(c) for c in fc])
def reorient_z(structure):
"""
reorients a structure such that the z axis is concurrent with the
normal to the A-B plane
"""
struct = structure.copy()
sop = get_rot(struct)
struct.apply_operation(sop)
return struct
# Get color dictionary
colors = loadfn(os.path.join(os.path.dirname(vis.__file__), "ElementColorSchemes.yaml"))
color_dict = {el: [j / 256.001 for j in colors["Jmol"][el]] for el in colors["Jmol"].keys()}
def plot_slab(
slab,
ax,
scale=0.8,
repeat=5,
window=1.5,
draw_unit_cell=True,
decay=0.2,
adsorption_sites=True,
inverse=False,
):
"""
Function that helps visualize the slab in a 2-D plot, for
convenient viewing of output of AdsorbateSiteFinder.
Args:
slab (slab): Slab object to be visualized
ax (axes): matplotlib axes with which to visualize
scale (float): radius scaling for sites
repeat (int): number of repeating unit cells to visualize
window (float): window for setting the axes limits, is essentially
a fraction of the unit cell limits
draw_unit_cell (bool): flag indicating whether or not to draw cell
decay (float): how the alpha-value decays along the z-axis
inverse (bool): invert z axis to plot opposite surface
"""
orig_slab = slab.copy()
slab = reorient_z(slab)
orig_cell = slab.lattice.matrix.copy()
if repeat:
slab.make_supercell([repeat, repeat, 1])
coords = np.array(sorted(slab.cart_coords, key=lambda x: x[2]))
sites = sorted(slab.sites, key=lambda x: x.coords[2])
alphas = 1 - decay * (np.max(coords[:, 2]) - coords[:, 2])
alphas = alphas.clip(min=0)
corner = [0, 0, slab.lattice.get_fractional_coords(coords[-1])[-1]]
corner = slab.lattice.get_cartesian_coords(corner)[:2]
verts = orig_cell[:2, :2]
lattsum = verts[0] + verts[1]
# inverse coords, sites, alphas, to show other side of slab
if inverse:
alphas = np.array(reversed(alphas))
sites = list(reversed(sites))
coords = np.array(reversed(coords))
# Draw circles at sites and stack them accordingly
for n, coord in enumerate(coords):
r = sites[n].specie.atomic_radius * scale
ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2), r, color="w", zorder=2 * n))
color = color_dict[sites[n].species_string]
ax.add_patch(
patches.Circle(
coord[:2] - lattsum * (repeat // 2),
r,
facecolor=color,
alpha=alphas[n],
edgecolor="k",
lw=0.3,
zorder=2 * n + 1,
)
)
# Adsorption sites
if adsorption_sites:
asf = AdsorbateSiteFinder(orig_slab)
if inverse:
inverse_slab = orig_slab.copy()
inverse_slab.make_supercell([1, 1, -1])
asf = AdsorbateSiteFinder(inverse_slab)
ads_sites = asf.find_adsorption_sites()["all"]
sop = get_rot(orig_slab)
ads_sites = [sop.operate(ads_site)[:2].tolist() for ads_site in ads_sites]
ax.plot(*zip(*ads_sites), color="k", marker="x", markersize=10, mew=1, linestyle="", zorder=10000)
# Draw unit cell
if draw_unit_cell:
verts = np.insert(verts, 1, lattsum, axis=0).tolist()
verts += [[0.0, 0.0]]
verts = [[0.0, 0.0]] + verts
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
verts = [(np.array(vert) + corner).tolist() for vert in verts]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor="none", lw=2, alpha=0.5, zorder=2 * n + 2)
ax.add_patch(patch)
ax.set_aspect("equal")
center = corner + lattsum / 2.0
extent = np.max(lattsum)
lim_array = [center - extent * window, center + extent * window]
x_lim = [ele[0] for ele in lim_array]
y_lim = [ele[1] for ele in lim_array]
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
return ax
| 41.303311
| 119
| 0.614129
|
82fbe0bd2fb66507cf7c0660328063a4fdbe037b
| 12,166
|
py
|
Python
|
.github/scripts/test_trymerge.py
|
qqaatw/pytorch
|
44764f131b040a41a6dcf1304bb635c574bf5a3b
|
[
"Intel"
] | null | null | null |
.github/scripts/test_trymerge.py
|
qqaatw/pytorch
|
44764f131b040a41a6dcf1304bb635c574bf5a3b
|
[
"Intel"
] | null | null | null |
.github/scripts/test_trymerge.py
|
qqaatw/pytorch
|
44764f131b040a41a6dcf1304bb635c574bf5a3b
|
[
"Intel"
] | null | null | null |
#!/usr/bin/env python3
# Tests implemented in this file are relying on GitHub GraphQL APIs
# In order to avoid test flakiness, results of the queries
# are cached in gql_mocks.json
# PyTorch Lint workflow does not have GITHUB_TOKEN defined to avoid
# flakiness, so if you are making changes to merge_rules or
# GraphQL queries in trymerge.py, please make sure to delete `gql_mocks.json`
# And re-run the test locally with ones PAT
import json
import os
from hashlib import sha256
from trymerge import find_matching_merge_rule, gh_graphql, gh_get_team_members, GitHubPR, MergeRule, MandatoryChecksMissingError
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
from typing import cast, Any, List, Optional
from unittest import TestCase, main, mock
from urllib.error import HTTPError
def mocked_gh_graphql(query: str, **kwargs: Any) -> Any:
gql_db_fname = os.path.join(os.path.dirname(__file__), "gql_mocks.json")
def get_mocked_queries() -> Any:
if not os.path.exists(gql_db_fname):
return {}
with open(gql_db_fname, encoding="utf-8") as f:
return json.load(f)
def save_mocked_queries(obj: Any) -> None:
with open(gql_db_fname, encoding="utf-8", mode="w") as f:
json.dump(obj, f, indent=2)
f.write("\n")
key = f"query_sha={sha256(query.encode('utf-8')).hexdigest()} " + " ".join([f"{k}={kwargs[k]}" for k in sorted(kwargs.keys())])
mocked_queries = get_mocked_queries()
if key in mocked_queries:
return mocked_queries[key]
try:
rc = gh_graphql(query, **kwargs)
except HTTPError as err:
if err.code == 401:
err_msg = "If you are seeing this message during workflow run, please make sure to update gql_mocks.json"
err_msg += f" locally, by deleting it and running {os.path.basename(__file__)} with "
err_msg += " GitHub Personal Access Token passed via GITHUB_TOKEN environment variable"
if os.getenv("GITHUB_TOKEN") is None:
err_msg = "Failed to update cached GraphQL queries as GITHUB_TOKEN is not defined." + err_msg
raise RuntimeError(err_msg) from err
mocked_queries[key] = rc
save_mocked_queries(mocked_queries)
return rc
def mock_parse_args(revert: bool = False,
force: bool = False) -> Any:
class Object(object):
def __init__(self) -> None:
self.revert = revert
self.force = force
self.pr_num = 76123
self.dry_run = True
self.comment_id = 0
self.on_mandatory = False
self.on_green = False
self.reason = 'this is for testing'
return Object()
def mock_revert(repo: GitRepo, pr: GitHubPR, *,
dry_run: bool = False,
comment_id: Optional[int] = None,
reason: Optional[str] = None) -> None:
pass
def mock_merge(pr_num: int, repo: GitRepo,
dry_run: bool = False,
force: bool = False,
comment_id: Optional[int] = None,
mandatory_only: bool = False,
on_green: bool = False,
timeout_minutes: int = 400,
stale_pr_days: int = 3) -> None:
pass
def mock_gh_get_info() -> Any:
return {"closed": False, "isCrossRepository": False}
def mocked_read_merge_rules(repo: Optional[GitRepo], org: str, project: str) -> List[MergeRule]:
mock_merge_rules = """
[
{
"name": "mock with nonexistent check",
"patterns": ["*"],
"approved_by": [],
"mandatory_checks_name": [
"Facebook CLA Check",
"Lint",
"nonexistent"
]
}
]
"""
rc = json.loads(mock_merge_rules, object_hook=lambda x: MergeRule(**x))
return cast(List[MergeRule], rc)
class TestGitHubPR(TestCase):
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_match_rules(self, mocked_gql: Any) -> None:
"Tests that PR passes merge rules"
pr = GitHubPR("pytorch", "pytorch", 77700)
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
self.assertTrue(find_matching_merge_rule(pr, repo) is not None)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_lint_fails(self, mocked_gql: Any) -> None:
"Tests that PR fails mandatory lint check"
pr = GitHubPR("pytorch", "pytorch", 74649)
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
self.assertRaises(RuntimeError, lambda: find_matching_merge_rule(pr, repo))
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_last_comment(self, mocked_gql: Any) -> None:
"Tests that last comment can be fetched"
pr = GitHubPR("pytorch", "pytorch", 71759)
comment = pr.get_last_comment()
self.assertEqual(comment.author_login, "github-actions")
self.assertIsNone(comment.editor_login)
self.assertTrue("You've committed this PR" in comment.body_text)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_author_null(self, mocked_gql: Any) -> None:
""" Tests that PR author can be computed
If reply contains NULL
"""
pr = GitHubPR("pytorch", "pytorch", 71759)
author = pr.get_author()
self.assertTrue(author is not None)
self.assertTrue("@" in author)
self.assertTrue(pr.get_diff_revision() is None)
# PR with multiple contributors, but creator id is not among authors
pr = GitHubPR("pytorch", "pytorch", 75095)
self.assertEqual(pr.get_pr_creator_login(), "mruberry")
author = pr.get_author()
self.assertTrue(author is not None)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_large_diff(self, mocked_gql: Any) -> None:
"Tests that PR with 100+ files can be fetched"
pr = GitHubPR("pytorch", "pytorch", 73099)
self.assertTrue(pr.get_changed_files_count() > 100)
flist = pr.get_changed_files()
self.assertEqual(len(flist), pr.get_changed_files_count())
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_internal_changes(self, mocked_gql: Any) -> None:
"Tests that PR with internal changes is detected"
pr = GitHubPR("pytorch", "pytorch", 73969)
self.assertTrue(pr.has_internal_changes())
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_checksuites_pagination(self, mocked_gql: Any) -> None:
"Tests that PR with lots of checksuits can be fetched"
pr = GitHubPR("pytorch", "pytorch", 73811)
self.assertGreater(len(pr.get_checkrun_conclusions()), 0)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_comments_pagination(self, mocked_gql: Any) -> None:
"Tests that PR with 50+ comments can be fetched"
pr = GitHubPR("pytorch", "pytorch", 31093)
self.assertGreater(len(pr.get_comments()), 50)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_gql_complexity(self, mocked_gql: Any) -> None:
"Fetch comments and conclusions for PR with 60 commits"
# Previous version of GrapQL query used to cause HTTP/502 error
# see https://gist.github.com/malfet/9b93bc7eeddeaf1d84546efc4f0c577f
pr = GitHubPR("pytorch", "pytorch", 68111)
self.assertGreater(len(pr.get_comments()), 20)
self.assertGreater(len(pr.get_checkrun_conclusions()), 3)
self.assertGreater(pr.get_commit_count(), 60)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_team_members(self, mocked_gql: Any) -> None:
"Test fetching team members works"
dev_infra_team = gh_get_team_members("pytorch", "pytorch-dev-infra")
self.assertGreater(len(dev_infra_team), 2)
with self.assertWarns(Warning):
non_existing_team = gh_get_team_members("pytorch", "qwertyuiop")
self.assertEqual(len(non_existing_team), 0)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_author_many_commits(self, mocked_gql: Any) -> None:
""" Tests that authors for all commits can be fetched
"""
pr = GitHubPR("pytorch", "pytorch", 76118)
authors = pr.get_authors()
self.assertGreater(pr.get_commit_count(), 100)
self.assertGreater(len(authors), 50)
self.assertTrue("@" in pr.get_author())
@mock.patch('trymerge.read_merge_rules', side_effect=mocked_read_merge_rules)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_pending_status_check(self, mocked_gql: Any, mocked_read_merge_rules: Any) -> None:
""" Tests that PR with nonexistent/pending status checks fails with the right reason.
"""
pr = GitHubPR("pytorch", "pytorch", 76118)
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
self.assertRaisesRegex(MandatoryChecksMissingError,
".*are pending/not yet run.*",
lambda: find_matching_merge_rule(pr, repo))
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_author_many_reviews(self, mocked_gql: Any) -> None:
""" Tests that all reviews can be fetched
"""
pr = GitHubPR("pytorch", "pytorch", 76123)
approved_by = pr.get_approved_by()
self.assertGreater(len(approved_by), 0)
assert pr._reviews is not None # to pacify mypy
self.assertGreater(len(pr._reviews), 100)
@mock.patch('trymerge.gh_graphql', side_effect=mocked_gh_graphql)
def test_get_checkruns_many_runs(self, mocked_gql: Any) -> None:
""" Tests that all checkruns can be fetched
"""
pr = GitHubPR("pytorch", "pytorch", 77700)
conclusions = pr.get_checkrun_conclusions()
self.assertTrue("linux-docs / build-docs (cpp)" in conclusions.keys())
@mock.patch('trymerge.gh_get_pr_info', return_value=mock_gh_get_info())
@mock.patch('trymerge.parse_args', return_value=mock_parse_args(True, False))
@mock.patch('trymerge.try_revert', side_effect=mock_revert)
def test_main_revert(self, mock_revert: Any, mock_parse_args: Any, gh_get_pr_info: Any) -> None:
import trymerge
trymerge.main()
mock_revert.assert_called_once()
@mock.patch('trymerge.gh_get_pr_info', return_value=mock_gh_get_info())
@mock.patch('trymerge.parse_args', return_value=mock_parse_args(False, True))
@mock.patch('trymerge.merge', side_effect=mock_merge)
def test_main_force(self, mock_merge: Any, mock_parse_args: Any, mock_gh_get_info: Any) -> None:
import trymerge
trymerge.main()
mock_merge.assert_called_once_with(mock.ANY,
mock.ANY,
dry_run=mock.ANY,
force=True,
comment_id=mock.ANY,
on_green=False,
mandatory_only=False)
@mock.patch('trymerge.gh_get_pr_info', return_value=mock_gh_get_info())
@mock.patch('trymerge.parse_args', return_value=mock_parse_args(False, False))
@mock.patch('trymerge.merge', side_effect=mock_merge)
def test_main_merge(self, mock_merge: Any, mock_parse_args: Any, mock_gh_get_info: Any) -> None:
import trymerge
trymerge.main()
mock_merge.assert_called_once_with(mock.ANY,
mock.ANY,
dry_run=mock.ANY,
force=False,
comment_id=mock.ANY,
on_green=False,
mandatory_only=False)
if __name__ == "__main__":
main()
| 44.564103
| 131
| 0.639076
|
99aa125dcdf38bb38c1de50f1b2555d6a156c473
| 5,521
|
py
|
Python
|
asposepdfcloud/models/annotations_info_response.py
|
kaferi/aspose-pdf-cloud-python
|
48f70742fec1e41644ec0b658db3f174ba845304
|
[
"MIT"
] | 7
|
2018-06-11T17:44:44.000Z
|
2022-02-08T05:52:48.000Z
|
asposepdfcloud/models/annotations_info_response.py
|
kaferi/aspose-pdf-cloud-python
|
48f70742fec1e41644ec0b658db3f174ba845304
|
[
"MIT"
] | 1
|
2021-03-20T22:16:15.000Z
|
2021-06-27T15:11:52.000Z
|
asposepdfcloud/models/annotations_info_response.py
|
kaferi/aspose-pdf-cloud-python
|
48f70742fec1e41644ec0b658db3f174ba845304
|
[
"MIT"
] | 4
|
2018-04-18T19:41:12.000Z
|
2021-06-21T13:12:24.000Z
|
# coding: utf-8
"""
Aspose.PDF Cloud API Reference
Copyright (c) 2021 Aspose.PDF Cloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
OpenAPI spec version: 3.0
"""
from pprint import pformat
from six import iteritems
import re
class AnnotationsInfoResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'int',
'status': 'str',
'annotations': 'AnnotationsInfo'
}
attribute_map = {
'code': 'Code',
'status': 'Status',
'annotations': 'Annotations'
}
def __init__(self, code=None, status=None, annotations=None):
"""
AnnotationsInfoResponse - a model defined in Swagger
"""
self._code = None
self._status = None
self._annotations = None
self.code = code
if status is not None:
self.status = status
if annotations is not None:
self.annotations = annotations
@property
def code(self):
"""
Gets the code of this AnnotationsInfoResponse.
Response status code.
:return: The code of this AnnotationsInfoResponse.
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this AnnotationsInfoResponse.
Response status code.
:param code: The code of this AnnotationsInfoResponse.
:type: int
"""
if code is None:
raise ValueError("Invalid value for `code`, must not be `None`")
self._code = code
@property
def status(self):
"""
Gets the status of this AnnotationsInfoResponse.
Response status.
:return: The status of this AnnotationsInfoResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this AnnotationsInfoResponse.
Response status.
:param status: The status of this AnnotationsInfoResponse.
:type: str
"""
self._status = status
@property
def annotations(self):
"""
Gets the annotations of this AnnotationsInfoResponse.
Annotations info
:return: The annotations of this AnnotationsInfoResponse.
:rtype: AnnotationsInfo
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
"""
Sets the annotations of this AnnotationsInfoResponse.
Annotations info
:param annotations: The annotations of this AnnotationsInfoResponse.
:type: AnnotationsInfo
"""
self._annotations = annotations
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, AnnotationsInfoResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.605
| 78
| 0.600978
|
0d283fc4b23dffb2a4c96b15af72fafe06cd3d69
| 564
|
py
|
Python
|
05 - Basics of lists, keyboard control/motion_implicit.py
|
PableraShow/python-exercises
|
e1648fd42f3009ec6fb1e2096852b6d399e91d5b
|
[
"MIT"
] | 8
|
2018-10-01T17:35:57.000Z
|
2022-02-01T08:12:12.000Z
|
05 - Basics of lists, keyboard control/motion_implicit.py
|
PableraShow/python-exercises
|
e1648fd42f3009ec6fb1e2096852b6d399e91d5b
|
[
"MIT"
] | null | null | null |
05 - Basics of lists, keyboard control/motion_implicit.py
|
PableraShow/python-exercises
|
e1648fd42f3009ec6fb1e2096852b6d399e91d5b
|
[
"MIT"
] | 6
|
2018-07-22T19:15:21.000Z
|
2022-02-05T07:54:58.000Z
|
# Ball motion with an implicit timer
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
vel = [0, 1] # pixels per update (1/60 seconds)
# define event handlers
def draw(canvas):
# Update ball position
ball_pos[0] += vel[0]
ball_pos[1] += vel[1]
# Draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, 'Red', 'White')
# create frame
frame = simplegui.create_frame('Motion', WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
# start frame
frame.start()
| 18.8
| 64
| 0.693262
|
d6157dfb55b230dcfc9363cf23f59fea2f01b469
| 211
|
py
|
Python
|
etlaas_stream/infrastructure.py
|
etlaas/etlaas-stream
|
59f811807e0d639c8787c0676e39d5a8db20789f
|
[
"MIT"
] | null | null | null |
etlaas_stream/infrastructure.py
|
etlaas/etlaas-stream
|
59f811807e0d639c8787c0676e39d5a8db20789f
|
[
"MIT"
] | null | null | null |
etlaas_stream/infrastructure.py
|
etlaas/etlaas-stream
|
59f811807e0d639c8787c0676e39d5a8db20789f
|
[
"MIT"
] | null | null | null |
import simplejson
from typing import Any
def default_loads(s: str) -> Any:
return simplejson.loads(s, use_decimal=True)
def default_dumps(d: Any) -> str:
return simplejson.dumps(d, use_decimal=True)
| 19.181818
| 48
| 0.734597
|
445fd79bf777773a2998a18aa6714d8c7129f5c6
| 7,373
|
py
|
Python
|
DeblurGAN-tf/00-access/mode.py
|
NALLEIN/Ascend
|
dbe83758cc180b21c41661a1dfad399193a7851c
|
[
"Apache-2.0"
] | null | null | null |
DeblurGAN-tf/00-access/mode.py
|
NALLEIN/Ascend
|
dbe83758cc180b21c41661a1dfad399193a7851c
|
[
"Apache-2.0"
] | null | null | null |
DeblurGAN-tf/00-access/mode.py
|
NALLEIN/Ascend
|
dbe83758cc180b21c41661a1dfad399193a7851c
|
[
"Apache-2.0"
] | null | null | null |
import os
import tensorflow as tf
from PIL import Image
import numpy as np
import time
import util
# from skimage.measure import compare_ssim as ssim
import data
def train(args, model, sess, saver):
if args.fine_tuning :
saver.restore(sess, args.pre_trained_model)
print("saved model is loaded for fine-tuning!")
print("model path is %s"%(args.pre_trained_model))
num_imgs = len(os.listdir(args.train_Sharp_path))
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(args.logdir,sess.graph)
if args.test_with_train:
f = open("valid_logs.txt", 'w')
epoch = 0
step = num_imgs // args.batch_size
if args.in_memory:
blur_imgs = util.image_loader(args.train_Blur_path, args.load_X, args.load_Y)
sharp_imgs = util.image_loader(args.train_Sharp_path, args.load_X, args.load_Y)
# lr_hr_ds, n_data = data.load_train_dataset(args.train_Sharp_path, args.train_Blur_path, args.ext, args.batch_size)
# next_element = lr_hr_ds.get_next()
while epoch < args.max_epoch:
random_index = np.random.permutation(len(blur_imgs))
for k in range(step):
s_time = time.time()
# blur_batch, sharp_batch = sess.run(next_element)
blur_batch, sharp_batch = util.batch_gen(blur_imgs, sharp_imgs, args.patch_size, args.batch_size, random_index, k, args.augmentation)
for t in range(args.critic_updates):
_, D_loss = sess.run([model.D_train, model.D_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch, model.epoch : epoch})
_, G_loss = sess.run([model.G_train, model.G_loss], feed_dict = {model.blur : blur_batch, model.sharp : sharp_batch, model.epoch : epoch})
e_time = time.time()
if epoch % args.log_freq == 0:
summary = sess.run(merged, feed_dict = {model.blur : blur_batch, model.sharp: sharp_batch})
train_writer.add_summary(summary, epoch)
if args.test_with_train:
test(args, model, sess, saver, f, epoch, loading = False)
print("%d training epoch completed" % epoch)
print("D_loss : %0.4f, \t G_loss : %0.4f"%(D_loss, G_loss))
print("Elpased time : %0.4f"%(e_time - s_time))
if ((epoch + 1) % args.model_save_freq ==0):
saver.save(sess, './model/DeblurrGAN', global_step = epoch, write_meta_graph = False)
epoch += 1
saver.save(sess, './model/DeblurrGAN_last', write_meta_graph = False)
else:
while epoch < args.max_epoch:
sess.run(model.data_loader.init_op['tr_init'])
for k in range(step):
s_time = time.time()
for t in range(args.critic_updates):
_, D_loss = sess.run([model.D_train, model.D_loss], feed_dict = {model.epoch : epoch})
_, G_loss = sess.run([model.G_train, model.G_loss], feed_dict = {model.epoch : epoch})
e_time = time.time()
if epoch % args.log_freq == 0:
summary = sess.run(merged)
train_writer.add_summary(summary, epoch)
if args.test_with_train:
test(args, model, sess, saver, f, epoch, loading = False)
print("%d training epoch completed" % epoch)
print("D_loss : %0.4f, \t G_loss : %0.4f"%(D_loss, G_loss))
print("Elpased time : %0.4f"%(e_time - s_time))
if ((epoch) % args.model_save_freq ==0):
saver.save(sess, './model/DeblurrGAN', global_step = epoch, write_meta_graph = False)
epoch += 1
saver.save(sess, './model/DeblurrGAN_last', global_step = epoch, write_meta_graph = False)
if args.test_with_train:
f.close()
def test(args, model, sess, saver, file, step = -1, loading = False):
if loading:
saver.restore(sess, args.pre_trained_model)
print("saved model is loaded for test!")
print("model path is %s"%args.pre_trained_model)
blur_img_name = sorted(os.listdir(args.test_Blur_path))
sharp_img_name = sorted(os.listdir(args.test_Sharp_path))
PSNR_list = []
ssim_list = []
if args.in_memory :
blur_imgs = util.image_loader(args.test_Blur_path, args.load_X, args.load_Y, is_train = False)
sharp_imgs = util.image_loader(args.test_Sharp_path, args.load_X, args.load_Y, is_train = False)
for i, ele in enumerate(blur_imgs):
blur = np.expand_dims(ele, axis = 0)
sharp = np.expand_dims(sharp_imgs[i], axis = 0)
output, psnr, ssim = sess.run([model.output, model.PSNR, model.ssim], feed_dict = {model.blur : blur, model.sharp : sharp})
if args.save_test_result:
output = Image.fromarray(output[0])
split_name = blur_img_name[i].split('.')
output.save(os.path.join(args.result_path, '%s_sharp.png'%(''.join(map(str, split_name[:-1])))))
PSNR_list.append(psnr)
ssim_list.append(ssim)
else:
sess.run(model.data_loader.init_op['val_init'])
for i in range(len(blur_img_name)):
output, psnr, ssim = sess.run([model.output, model.PSNR, model.ssim])
if args.save_test_result:
output = Image.fromarray(output[0])
split_name = blur_img_name[i].split('.')
output.save(os.path.join(args.result_path, '%s_sharp.png'%(''.join(map(str, split_name[:-1])))))
PSNR_list.append(psnr)
ssim_list.append(ssim)
length = len(PSNR_list)
mean_PSNR = sum(PSNR_list) / length
mean_ssim = sum(ssim_list) / length
if step == -1:
file.write('PSNR : 0.4f SSIM : %0.4f'%(mean_PSNR, mean_ssim))
file.close()
else :
file.write("%d-epoch step PSNR : %0.4f SSIM : %0.4f \n"%(step, mean_PSNR, mean_ssim))
def test_only(args, model, sess, saver):
saver.restore(sess,args.pre_trained_model)
print("saved model is loaded for test only!")
print("model path is %s"%args.pre_trained_model)
blur_img_name = sorted(os.listdir(args.test_Blur_path))
if args.in_memory :
blur_imgs = util.image_loader(args.test_Blur_path, args.load_X, args.load_Y, is_train = False)
for i, ele in enumerate(blur_imgs):
blur = np.expand_dims(ele, axis = 0)
if args.chop_forward:
output = util.recursive_forwarding(blur, args.chop_size, sess, model, args.chop_shave)
output = Image.fromarray(output[0])
else:
output = sess.run(model.output, feed_dict = {model.blur : blur})
output = Image.fromarray(output[0])
split_name = blur_img_name[i].split('.')
output.save(os.path.join(args.result_path, '%s_sharp.png'%(''.join(map(str, split_name[:-1])))))
else:
sess.run(model.data_loader.init_op['te_init'])
for i in range(len(blur_img_name)):
output = sess.run(model.output)
output = Image.fromarray(output[0])
split_name = blur_img_name[i].split('.')
output.save(os.path.join(args.result_path, '%s_sharp.png'%(''.join(map(str, split_name[:-1])))))
| 39.218085
| 158
| 0.606809
|
ef998d785a4046f92df56917e63bf9f47049b706
| 2,667
|
py
|
Python
|
awd/embedding_mul_test.py
|
nkcr/overlap-ml
|
58f55ae906f880b550f2fcf3fb56338678fd3d8f
|
[
"BSD-3-Clause"
] | 3
|
2019-11-14T22:43:41.000Z
|
2021-04-15T03:59:57.000Z
|
awd/embedding_mul_test.py
|
nkcr/overlap-ml
|
58f55ae906f880b550f2fcf3fb56338678fd3d8f
|
[
"BSD-3-Clause"
] | 7
|
2019-09-19T20:25:25.000Z
|
2022-02-10T00:31:46.000Z
|
awd/embedding_mul_test.py
|
nkcr/overlap-ml
|
58f55ae906f880b550f2fcf3fb56338678fd3d8f
|
[
"BSD-3-Clause"
] | 1
|
2020-05-28T23:34:43.000Z
|
2020-05-28T23:34:43.000Z
|
import unittest
import mock
from mock import Mock
import numpy as np
from embedding_mul import EmbeddingMul
import torch
import argparse
import logging
"""Simple test class for EmbeddingMul class
Author: Noémien Kocher
Date: Fall 2018
Run this class with `python3 embedding_mul_test.py` or
`python3 unittest -m embedding_mul_test.py`
"""
class EmbeddingMulTest(unittest.TestCase):
def test_forward(self):
"""Simple test using tensors"""
emsize = 3
num_token = 4
emm = EmbeddingMul(num_token, 'cpu')
# 0 1 2
# 3 4 5
# 6 7 8
# 9 10 11
weights = torch.tensor(range(emsize*num_token)
).view(num_token, emsize).float()
input = torch.tensor(
[[3.0, 2, 1], [0, 3, 2], [2, 2, 0]])
expected = torch.tensor([
[[9, 10, 11], [6, 7, 8], [3, 4, 5]],
[[0, 1, 2], [9, 10, 11], [6, 7, 8]],
[[6, 7, 8], [6, 7, 8], [0, 1, 2]]
]).float()
result = emm(input, weights, -1)
self.assertEqual(result.detach().numpy().tolist(),
expected.numpy().tolist())
def test_forward2(self):
"""Test using the original embedding module from pytorch"""
emsize = 3
num_token = 4
emm = EmbeddingMul(num_token, 'cpu')
# 0 1 2
# 3 4 5
# 6 7 8
# 9 10 11
weights = torch.tensor(range(emsize*num_token)
).view(num_token, emsize).float()
true_em = torch.nn.Embedding(num_token, emsize)
true_em.weight.data = weights
input = torch.tensor([[3.0, 2, 1], [0, 3, 2], [2, 2, 0]])
expected = true_em(input.long()).float()
result = emm(input, weights, -1)
self.assertEqual(result.detach().numpy().tolist(),
expected.detach().numpy().tolist())
def test_forward_grad(self):
num_tokens = 3
input = torch.tensor([[0, 1]])
weights = torch.tensor([[1.0, 2], [3, 4], [5, 6]])
emm = EmbeddingMul(num_tokens, 'cpu')
emm._requires_grad = True
X_hat = emm(input, weights, -1)
W = torch.tensor([[2.0], [3]])
X_hhat = torch.mm(X_hat[0], W)
y = torch.sum(X_hhat)
y.backward()
# Those results were computed by hand
expected = torch.tensor([
[[8.0, 18.0, 28.0], [8.0, 18.0, 28.0]]
])
result = emm.last_oh.grad
self.assertEqual(result.numpy().tolist(), expected.numpy().tolist())
if __name__ == "__main__":
unittest.main()
| 28.37234
| 76
| 0.523435
|
3829dc5e6917458072ee981267b831a394fae049
| 11,975
|
py
|
Python
|
service/base.py
|
gregbugaj/TextGenerator
|
d92431bdce31914a108417f7b00ed80b8f994298
|
[
"MIT"
] | null | null | null |
service/base.py
|
gregbugaj/TextGenerator
|
d92431bdce31914a108417f7b00ed80b8f994298
|
[
"MIT"
] | null | null | null |
service/base.py
|
gregbugaj/TextGenerator
|
d92431bdce31914a108417f7b00ed80b8f994298
|
[
"MIT"
] | null | null | null |
from lxml.etree import Element, SubElement, tostring
from utils import log
import shutil
import json
import os
import cv2
import numpy as np
def get_pic_dir(out_put_dir):
img_dir = os.path.join(out_put_dir, "img")
pic_dir = os.path.join(img_dir, "pic")
return pic_dir
def get_fragment_dir(out_put_dir):
img_dir = os.path.join(out_put_dir, "img")
fragment_dir = os.path.join(img_dir, "fragment")
return fragment_dir
def get_data_dir(out_put_dir):
data_dir = os.path.join(out_put_dir, "data")
return data_dir
def get_label_data_dir(out_put_dir):
label_data = os.path.join(out_put_dir, "label_data")
return label_data
def get_voc_data_dir(out_put_dir):
voc_data = os.path.join(out_put_dir, "voc_data")
return voc_data
def get_lsvt_data_dir(out_put_dir):
lsvt_data = os.path.join(out_put_dir, "lsvt_data")
return lsvt_data
def get_icidar_data_dir(out_put_dir):
icdr_data = os.path.join(out_put_dir, "icdar_data")
return icdr_data
def get_mask_data_dir(out_put_dir):
mask_data = os.path.join(out_put_dir, "mask_data")
return mask_data
def gen_all_pic():
"""
Generate all pictures
:return:
"""
from service import conf
gen_count = conf['base']['count_per_process']
index = 0
while index < gen_count:
log.info("-" * 20 + " generate new picture {index}/{gen_count}".format(index=index,
gen_count=gen_count) + "-" * 20)
dump_data = gen_pic()
# Write label
if dump_data:
add_label_data(dump_data)
# Write voc
if conf['base']['gen_voc']:
gen_voc(dump_data)
# index += 1
if conf['base']['gen_lsvt']:
gen_lsvt(dump_data)
# index += 1
#
if conf['base']['gen_icdar']:
gen_icdar(dump_data)
# index += 1
index += 1
def gen_pic():
from service import layout_provider
layout = layout_provider.gen_next_layout()
if not layout.is_empty():
dump_data = layout.dump()
# layout.show(draw_rect=True)
return dump_data
else:
log.info("-" * 10 + "layout is empty" + "-" * 10)
return None
def add_label_data(layout_data):
"""
Write label file
:return:
"""
from service import conf
out_put_dir = conf['provider']['layout']['out_put_dir']
label_data_dir = get_label_data_dir(out_put_dir=out_put_dir)
os.makedirs(label_data_dir, exist_ok=True)
label_file_path = os.path.join(label_data_dir, "label_{pid}.txt".format(pid=os.getpid()))
fragment_dir = get_fragment_dir(out_put_dir)
fragment_list = layout_data['fragment']
with open(label_file_path, 'a+') as f:
for fragment in fragment_list:
fragment_name = fragment['fragment_name']
fragment_img_src_path = os.path.join(fragment_dir, fragment_name)
fragment_img_dst_path = os.path.join(label_data_dir, fragment_name)
shutil.copy(fragment_img_src_path, fragment_img_dst_path)
txt = fragment['data']
img_name = fragment['fragment_name']
line = img_name + "^" + txt + os.linesep
f.write(line)
log.info("gen label data success!")
def gen_voc(layout_data):
"""
Generate voc data set
:return:
"""
from service import conf
out_put_dir = conf['provider']['layout']['out_put_dir']
voc_data_dir = get_voc_data_dir(out_put_dir=out_put_dir)
voc_img_dir = os.path.join(voc_data_dir, "voc_img")
voc_xml_dir = os.path.join(voc_data_dir, "voc_xml")
os.makedirs(voc_img_dir, exist_ok=True)
os.makedirs(voc_xml_dir, exist_ok=True)
pic_dir = get_pic_dir(out_put_dir)
pic_name = layout_data['pic_name']
pic_path = os.path.join(pic_dir, pic_name)
pic_save_to_path = os.path.join(voc_img_dir, pic_name)
# Copy picture
shutil.copy(pic_path, pic_save_to_path)
log.info("copy img success")
# Generate label text
_gen_voc(voc_xml_dir, data=layout_data)
log.info("voc data gen success")
def _gen_voc(save_dir, data, image_format='png'):
w = data['width']
h = data['height']
node_root = Element('annotation')
'''folder'''
node_folder = SubElement(node_root, 'folder')
node_folder.text = 'JPEGImages'
'''filename'''
node_filename = SubElement(node_root, 'filename')
node_filename.text = data['pic_name']
'''source'''
node_source = SubElement(node_root, 'source')
node_database = SubElement(node_source, 'database')
node_database.text = 'The VOC2007 Database'
node_annotation = SubElement(node_source, 'annotation')
node_annotation.text = 'PASCAL VOC2007'
node_image = SubElement(node_source, 'image')
node_image.text = 'flickr'
'''size'''
node_size = SubElement(node_root, 'size')
node_width = SubElement(node_size, 'width')
node_width.text = str(w)
node_height = SubElement(node_size, 'height')
node_height.text = str(h)
node_depth = SubElement(node_size, 'depth')
node_depth.text = '3'
'''segmented'''
node_segmented = SubElement(node_root, 'segmented')
node_segmented.text = '0'
'''object coord and label'''
for i, fragment in enumerate(data['fragment']):
node_object = SubElement(node_root, 'object')
node_name = SubElement(node_object, 'name')
node_name.text = fragment['orientation'][0] + "_text"
node_truncated = SubElement(node_object, 'truncated')
node_truncated.text = '0'
node_difficult = SubElement(node_object, 'difficult')
node_difficult.text = '0'
node_bndbox = SubElement(node_object, 'bndbox')
node_xmin = SubElement(node_bndbox, 'xmin')
node_xmin.text = str(fragment['box'][0])
node_ymin = SubElement(node_bndbox, 'ymin')
node_ymin.text = str(fragment['box'][1])
node_xmax = SubElement(node_bndbox, 'xmax')
node_xmax.text = str(fragment['box'][2])
node_ymax = SubElement(node_bndbox, 'ymax')
node_ymax.text = str(fragment['box'][3])
xml = tostring(node_root, pretty_print=True) # Format display, the newline of the newline
save_xml = os.path.join(save_dir, data['pic_name'].replace(image_format, 'xml'))
with open(save_xml, 'wb') as f:
f.write(xml)
def imwrite(path, img):
try:
print(path)
cv2.imwrite(path, img)
except Exception as ident:
print(ident)
def gen_icdar(layout_data):
"""
Generate ICDAR format
:param layout_data:
:return:
"""
# mask_img = Image.new('RGBA', self.bg_img.size, (255, 255, 255, 0))
# name = hashlib.sha1(mask_img.tobytes()).hexdigest()
# pic_name = "pic_" + name + ".png"
# pic_dir ='/tmp/pics2'
# # convert from RGBA->RGB
# background = Image.new('RGB', mask_img.size, (255,255,255))
# background.paste(mask_img, mask = mask_img.split()[3])
# inv_img = ImageOps.invert(background)
# pic_path = os.path.join(pic_dir, pic_name)
# with open(pic_path, 'wb') as f:
# inv_img.save(f, "png")
print ("Generating ICDAR dataformat")
from service import conf
out_put_dir = conf['provider']['layout']['out_put_dir']
icdr_data_dir = get_icidar_data_dir(out_put_dir=out_put_dir)
icdar_data_img_dir = os.path.join(icdr_data_dir)
os.makedirs(icdar_data_img_dir, exist_ok=True)
pic_dir = get_pic_dir(out_put_dir)
pic_name = layout_data['pic_name']
pic_path = os.path.join(pic_dir, pic_name)
pic_save_to_path = os.path.join(icdar_data_img_dir, pic_name)
# Copy picture
# shutil.copy(pic_path, pic_save_to_path)
im = cv2.imread(pic_path, cv2.IMREAD_GRAYSCALE)
bin = cv2.threshold(im, 128, 255, cv2.THRESH_BINARY)[1]
imwrite(pic_save_to_path, bin)
log.info("copy img success")
# Generate label text
# _gen_icdar(layout_data)
name = pic_name.split('.')[0]
icidar_label_path = os.path.join(icdr_data_dir, "gt_{name}.txt".format(name=name))
log.info("ICIDAR data gen success")
# _x0, _y0, _x1, _y1,_x2, _y2, _x3, _y3, txt
fragment_list = layout_data['fragment']
with open(icidar_label_path, 'w') as f:
for fragment in fragment_list:
txt = fragment['data']
rotate_box = fragment['rotate_box']
char_boxes = fragment['char_boxes']
contour = np.array(char_boxes, dtype=np.int)
# print('--' * 80)
# print (rotate_box)
# # print (char_boxes)
# print('--' * 25)
# for box in char_boxes:
# print(box)
min_x = np.amin(contour[:,:,0])
max_x = np.amax(contour[:,:,0])
min_y = np.amin(contour[:,:,1])
max_y = np.amax(contour[:,:,1])
w = max_x - min_x
h = max_y - min_y
_x0, _y0 = min_x, min_y
_x1, _y1 = min_x, min_y+h
_x2, _y2= min_x+w, min_y+h
_x3, _y3= min_x+w, min_y
print("{},{},{},{},{},{},{},{},{}".format(_x0, _y0, _x1, _y1, _x2, _y2, _x3, _y3, txt))
f.write("{},{},{},{},{},{},{},{},{}\n".format(_x0, _y0, _x1, _y1, _x2, _y2, _x3, _y3, txt))
continue
# vmin = np.amin(char_boxes, axis=1)
print('****')
print("{},{},{},{} : {}, {}".format(min_x, max_x, min_y, max_y, w, h))
# mar = cv2.minAreaRect(contour)
os.system.exit()
# print (char_boxes)
# print("txt = {txt} : {box}".format(txt=txt, box=box))
_x0, _y0 = rotate_box[0][0], rotate_box[0][1]
_x1, _y1 = rotate_box[1][0], rotate_box[1][1]
_x2, _y2 = rotate_box[2][0], rotate_box[2][1]
_x3, _y3 = rotate_box[3][0], rotate_box[3][1]
f.write("{},{},{},{},{},{},{},{},{}\n".format(_x0, _y0, _x1, _y1, _x2, _y2, _x3, _y3, txt))
def gen_lsvt(layout_data):
"""
:param layout_data:
:return:
"""
from service import conf
out_put_dir = conf['provider']['layout']['out_put_dir']
lsvt_data_dir = get_lsvt_data_dir(out_put_dir=out_put_dir)
lsvt_data_img_dir = os.path.join(lsvt_data_dir, "train")
os.makedirs(lsvt_data_img_dir, exist_ok=True)
lsvt_json_path = os.path.join(lsvt_data_dir, "train_full_labels_{pid}.json".format(pid=os.getpid()))
pic_dir = get_pic_dir(out_put_dir)
pic_name = layout_data['pic_name']
pic_path = os.path.join(pic_dir, pic_name)
pic_save_to_path = os.path.join(lsvt_data_img_dir, pic_name)
# Copy picture
shutil.copy(pic_path, pic_save_to_path)
log.info("copy img success")
# Generate label text
_gen_lsvt(layout_data, lsvt_json_path)
log.info("voc data gen success")
def _gen_lsvt(layout_data, lsvt_json_path):
"""
:param layout_data:
:param lsvt_json_path:
:return:
"""
pic_name = layout_data['pic_name']
pic_name = pic_name.split('.')[0]
fragment_list = layout_data['fragment']
print(lsvt_json_path)
if not os.path.exists(lsvt_json_path):
fp = open(lsvt_json_path, "w")
fp.close()
with open(lsvt_json_path, 'r') as f:
text = f.read()
if text == '':
load_dict = dict()
else:
load_dict = json.loads(text)
with open(lsvt_json_path, 'w') as f:
lsvt_dict_list = list()
for fragment in fragment_list:
txt = fragment['data']
rotate_box = fragment['rotate_box']
char_boxes = fragment['char_boxes']
lsvt_info = dict(transcription=txt, points=rotate_box, char_boxes=char_boxes, illegibility=False)
lsvt_dict_list.append(lsvt_info)
load_dict.update({pic_name: lsvt_dict_list})
# f.seek(0)
json.dump(load_dict, f)
| 32.808219
| 111
| 0.616868
|
9d09cd5f47132a0da099aaf61287672e2b6c2666
| 7,710
|
py
|
Python
|
tests/base.py
|
helobinvn/jenkins-job-builder
|
c1d6ba6fbb7de6f9a98b35cfc1b84dcb0260f486
|
[
"Apache-2.0"
] | null | null | null |
tests/base.py
|
helobinvn/jenkins-job-builder
|
c1d6ba6fbb7de6f9a98b35cfc1b84dcb0260f486
|
[
"Apache-2.0"
] | 5
|
2015-03-12T20:21:46.000Z
|
2015-11-30T15:56:42.000Z
|
tests/base.py
|
helobinvn/jenkins-job-builder
|
c1d6ba6fbb7de6f9a98b35cfc1b84dcb0260f486
|
[
"Apache-2.0"
] | 4
|
2015-10-15T08:41:49.000Z
|
2019-01-04T07:22:36.000Z
|
#!/usr/bin/env python
#
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import codecs
import logging
import os
import re
import doctest
import json
import operator
import testtools
from testtools.content import text_content
import xml.etree.ElementTree as XML
from six.moves import configparser
# This dance deals with the fact that we want unittest.mock if
# we're on Python 3.4 and later, and non-stdlib mock otherwise.
try:
from unittest import mock
except ImportError:
import mock # noqa
import jenkins_jobs.local_yaml as yaml
from jenkins_jobs.builder import XmlJob, YamlParser
from jenkins_jobs.modules import (project_flow,
project_matrix,
project_maven,
project_multijob)
def get_scenarios(fixtures_path, in_ext='yaml', out_ext='xml',
plugins_info_ext='plugins_info.yaml'):
"""Returns a list of scenarios, each scenario being described
by two parameters (yaml and xml filenames by default).
- content of the fixture output file (aka expected)
"""
scenarios = []
files = []
for dirpath, dirs, fs in os.walk(fixtures_path):
files.extend([os.path.join(dirpath, f) for f in fs])
input_files = [f for f in files if re.match(r'.*\.{0}$'.format(in_ext), f)]
for input_filename in input_files:
if input_filename.endswith(plugins_info_ext):
continue
output_candidate = re.sub(r'\.{0}$'.format(in_ext),
'.{0}'.format(out_ext), input_filename)
# Make sure the input file has a output counterpart
if output_candidate not in files:
raise Exception(
"No {0} file named '{1}' to match {2} file '{3}'"
.format(out_ext.upper(), output_candidate,
in_ext.upper(), input_filename))
plugins_info_candidate = re.sub(r'\.{0}$'.format(in_ext),
'.{0}'.format(plugins_info_ext),
input_filename)
if plugins_info_candidate not in files:
plugins_info_candidate = None
conf_candidate = re.sub(r'\.yaml$', '.conf', input_filename)
# If present, add the configuration file
if conf_candidate not in files:
conf_candidate = None
scenarios.append((input_filename, {
'in_filename': input_filename,
'out_filename': output_candidate,
'conf_filename': conf_candidate,
'plugins_info_filename': plugins_info_candidate,
}))
return scenarios
class BaseTestCase(object):
scenarios = []
fixtures_path = None
# TestCase settings:
maxDiff = None # always dump text difference
longMessage = True # keep normal error message when providing our
logging.basicConfig()
def _read_utf8_content(self):
# Read XML content, assuming it is unicode encoded
xml_content = u"%s" % codecs.open(self.out_filename,
'r', 'utf-8').read()
return xml_content
def _read_yaml_content(self, filename):
with open(filename, 'r') as yaml_file:
yaml_content = yaml.load(yaml_file)
return yaml_content
def test_yaml_snippet(self):
if not self.out_filename or not self.in_filename:
return
if self.conf_filename is not None:
config = configparser.ConfigParser()
config.readfp(open(self.conf_filename))
else:
config = {}
expected_xml = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
project = None
if ('project-type' in yaml_content):
if (yaml_content['project-type'] == "maven"):
project = project_maven.Maven(None)
elif (yaml_content['project-type'] == "matrix"):
project = project_matrix.Matrix(None)
elif (yaml_content['project-type'] == "flow"):
project = project_flow.Flow(None)
elif (yaml_content['project-type'] == "multijob"):
project = project_multijob.MultiJob(None)
if project:
xml_project = project.root_xml(yaml_content)
else:
xml_project = XML.Element('project')
plugins_info = None
if self.plugins_info_filename is not None:
plugins_info = self._read_yaml_content(self.plugins_info_filename)
self.addDetail("plugins-info-filename",
text_content(self.plugins_info_filename))
self.addDetail("plugins-info",
text_content(str(plugins_info)))
parser = YamlParser(config, plugins_info)
pub = self.klass(parser.registry)
# Generate the XML tree directly with modules/general
pub.gen_xml(parser, xml_project, yaml_content)
# Prettify generated XML
pretty_xml = XmlJob(xml_project, 'fixturejob').output().decode('utf-8')
self.assertThat(
pretty_xml,
testtools.matchers.DocTestMatches(expected_xml,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
class SingleJobTestCase(BaseTestCase):
def test_yaml_snippet(self):
expected_xml = self._read_utf8_content()
if self.conf_filename:
config = configparser.ConfigParser()
config.readfp(open(self.conf_filename))
else:
config = None
parser = YamlParser(config)
parser.parse(self.in_filename)
# Generate the XML tree
parser.expandYaml()
parser.generateXML()
parser.xml_jobs.sort(key=operator.attrgetter('name'))
# Prettify generated XML
pretty_xml = u"\n".join(job.output().decode('utf-8')
for job in parser.xml_jobs)
self.assertThat(
pretty_xml,
testtools.matchers.DocTestMatches(expected_xml,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
class JsonTestCase(BaseTestCase):
def test_yaml_snippet(self):
expected_json = self._read_utf8_content()
yaml_content = self._read_yaml_content(self.in_filename)
pretty_json = json.dumps(yaml_content, indent=4,
separators=(',', ': '))
self.assertThat(
pretty_json,
testtools.matchers.DocTestMatches(expected_json,
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
)
| 35.860465
| 79
| 0.594553
|
ebdaf83ab179b6d328e4d2fac28ff19df82d82fd
| 8,521
|
py
|
Python
|
concept_explanations/ipca.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
concept_explanations/ipca.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
concept_explanations/ipca.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper file to run the discover concept algorithm in the toy dataset."""
# lint as: python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl import app
import keras
from keras.activations import sigmoid
import keras.backend as K
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import Layer
from keras.models import Model
from keras.optimizers import Adam
from keras.optimizers import SGD
import numpy as np
from numpy import inf
from numpy.random import seed
from scipy.special import comb
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import set_random_seed
seed(0)
set_random_seed(0)
# global variables
init = keras.initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=None)
batch_size = 128
step = 200
min_weight_arr = []
min_index_arr = []
concept_arr = {}
class Weight(Layer):
"""Simple Weight class."""
def __init__(self, dim, **kwargs):
self.dim = dim
super(Weight, self).__init__(**kwargs)
def build(self, input_shape):
# creates a trainable weight variable for this layer.
self.kernel = self.add_weight(
name='proj', shape=self.dim, initializer=init, trainable=True)
super(Weight, self).build(input_shape)
def call(self, x):
return self.kernel
def compute_output_shape(self, input_shape):
return self.dim
def reduce_var(x, axis=None, keepdims=False):
"""Returns variance of a tensor, alongside the specified axis."""
m = tf.reduce_mean(x, axis=axis, keep_dims=True)
devs_squared = tf.square(x - m)
return tf.reduce_mean(devs_squared, axis=axis, keep_dims=keepdims)
def concept_loss(cov, cov0, i, n_concept, lmbd=5.):
"""Creates a concept loss based on reconstruction loss."""
def loss(y_true, y_pred):
if i == 0:
return tf.reduce_mean(
tf.keras.backend.binary_crossentropy(y_true, y_pred))
else:
return tf.reduce_mean(
tf.keras.backend.binary_crossentropy(y_true, y_pred)
) + lmbd * K.mean(cov - np.eye(n_concept)) + lmbd * K.mean(cov0)
return loss
def concept_variance(cov, cov0, i, n_concept):
"""Creates a concept loss based on reconstruction variance."""
def loss(_, y_pred):
if i == 0:
return 1. * tf.reduce_mean(reduce_var(y_pred, axis=0))
else:
return 1. * tf.reduce_mean(reduce_var(y_pred, axis=0)) + 10. * K.mean(
cov - np.eye(n_concept)) + 10. * K.mean(cov0)
return loss
def ipca_model(concept_arraynew2,
dense2,
predict,
f_train,
y_train,
f_val,
y_val,
n_concept,
verbose=False,
epochs=20,
metric='binary_accuracy'):
"""Returns main function of ipca."""
pool1f_input = Input(shape=(f_train.shape[1],), name='pool1_input')
cluster_input = K.variable(concept_arraynew2)
proj_weight = Weight((f_train.shape[1], n_concept))(pool1f_input)
proj_weight_n = Lambda(lambda x: K.l2_normalize(x, axis=0))(proj_weight)
eye = K.eye(n_concept) * 1e-5
proj_recon_t = Lambda(
lambda x: K.dot(x, tf.linalg.inv(K.dot(K.transpose(x), x) + eye)))(
proj_weight)
proj_recon = Lambda(lambda x: K.dot(K.dot(x[0], x[2]), K.transpose(x[1])))(
[pool1f_input, proj_weight, proj_recon_t])
# proj_recon2 = Lambda(lambda x: x[0] - K.dot(K.dot(x[0],K.dot(x[1],
# tf.linalg.inv(K.dot(K.transpose(x[1]), x[1]) + 1e-5 * K.eye(n_concept)))),
# K.transpose(x[1])))([pool1f_input, proj_weight])
cov1 = Lambda(lambda x: K.mean(K.dot(x[0], x[1]), axis=1))(
[cluster_input, proj_weight_n])
cov0 = Lambda(lambda x: x - K.mean(x, axis=0, keepdims=True))(cov1)
cov0_abs = Lambda(lambda x: K.abs(K.l2_normalize(x, axis=0)))(cov0)
cov0_abs_flat = Lambda(lambda x: K.reshape(x, (-1, n_concept)))(cov0_abs)
cov = Lambda(lambda x: K.dot(K.transpose(x), x))(cov0_abs_flat)
fc2_pr = dense2(proj_recon)
softmax_pr = predict(fc2_pr)
# fc2_pr2 = dense2(proj_recon2)
# softmax_pr2 = predict(fc2_pr2)
finetuned_model_pr = Model(inputs=pool1f_input, outputs=softmax_pr)
# finetuned_model_pr2 = Model(inputs=pool1f_input, outputs=softmax_pr2)
# finetuned_model_pr2.compile(loss=
# concept_loss(cov,cov0_abs,0),
# optimizer = sgd(lr=0.),
# metrics=['binary_accuracy'])
finetuned_model_pr.layers[-1].activation = sigmoid
print(finetuned_model_pr.layers[-1].activation)
finetuned_model_pr.layers[-1].trainable = False
# finetuned_model_pr2.layers[-1].trainable = False
finetuned_model_pr.layers[-2].trainable = False
finetuned_model_pr.layers[-3].trainable = False
# finetuned_model_pr2.layers[-2].trainable = False
finetuned_model_pr.compile(
loss=concept_loss(cov, cov0_abs, 0, n_concept),
optimizer=Adam(lr=0.001),
metrics=[metric])
# finetuned_model_pr2.compile(
# loss=concept_variance(cov, cov0_abs, 0),
# optimizer=SGD(lr=0.0),
# metrics=['binary_accuracy'])
if verbose:
print(finetuned_model_pr.summary())
# finetuned_model_pr2.summary()
finetuned_model_pr.fit(
f_train,
y_train,
batch_size=50,
epochs=epochs,
validation_data=(f_val, y_val),
verbose=verbose)
finetuned_model_pr.layers[-1].trainable = False
finetuned_model_pr.layers[-2].trainable = False
finetuned_model_pr.layers[-3].trainable = False
finetuned_model_pr.compile(
loss=concept_loss(cov, cov0_abs, 1, n_concept),
optimizer=Adam(lr=0.001),
metrics=[metric])
return finetuned_model_pr # , finetuned_model_pr2
def ipca_model_shap(dense2, predict, n_concept, input_size, concept_matrix):
"""returns model that calculates of SHAP."""
pool1f_input = Input(shape=(input_size,), name='cluster1')
concept_mask = Input(shape=(n_concept,), name='mask')
proj_weight = Weight((input_size, n_concept))(pool1f_input)
concept_mask_r = Lambda(lambda x: K.mean(x, axis=0, keepdims=True))(
concept_mask)
proj_weight_m = Lambda(lambda x: x[0] * x[1])([proj_weight, concept_mask_r])
eye = K.eye(n_concept) * 1e-10
proj_recon_t = Lambda(
lambda x: K.dot(x, tf.linalg.inv(K.dot(K.transpose(x), x) + eye)))(
proj_weight_m)
proj_recon = Lambda(lambda x: K.dot(K.dot(x[0], x[2]), K.transpose(x[1])))(
[pool1f_input, proj_weight_m, proj_recon_t])
fc2_pr = dense2(proj_recon)
softmax_pr = predict(fc2_pr)
finetuned_model_pr = Model(
inputs=[pool1f_input, concept_mask], outputs=softmax_pr)
finetuned_model_pr.compile(
loss='categorical_crossentropy',
optimizer=SGD(lr=0.000),
metrics=['accuracy'])
finetuned_model_pr.summary()
finetuned_model_pr.layers[-7].set_weights([concept_matrix])
return finetuned_model_pr
def get_acc(binary_sample, f_val, y_val_logit, shap_model, verbose=False):
"""Returns accuracy."""
acc = shap_model.evaluate(
[f_val, np.tile(np.array(binary_sample), (f_val.shape[0], 1))],
y_val_logit,
verbose=verbose)[1]
return acc
def shap_kernel(n, k):
"""Returns kernel of shapley in KernelSHAP."""
return (n-1)*1.0/((n-k)*k*comb(n, k))
def get_shap(nc, f_val, y_val_logit, shap_model, full_acc, null_acc, n_concept):
"""Returns ConceptSHAP."""
inputs = list(itertools.product([0, 1], repeat=n_concept))
outputs = [(get_acc(k, f_val, y_val_logit, shap_model)-null_acc)/
(full_acc-null_acc) for k in inputs]
kernel = [shap_kernel(nc, np.sum(ii)) for ii in inputs]
x = np.array(inputs)
y = np.array(outputs)
k = np.array(kernel)
k[k == inf] = 0
xkx = np.matmul(np.matmul(x.transpose(), np.diag(k)), x)
xky = np.matmul(np.matmul(x.transpose(), np.diag(k)), y)
expl = np.matmul(np.linalg.pinv(xkx), xky)
return expl
def main(_):
return
if __name__ == '__main__':
app.run(main)
| 33.547244
| 80
| 0.685366
|
2acef4966fd3250fb27f4a4d7579d498d736a0aa
| 2,124
|
py
|
Python
|
osna/cli.py
|
tapilab/elevate-osna-starter
|
7cf11c302cbd210a21d633d966a612adc2fc81af
|
[
"MIT"
] | 1
|
2019-07-24T17:02:59.000Z
|
2019-07-24T17:02:59.000Z
|
osna/cli.py
|
tapilab/elevate-osna-starter
|
7cf11c302cbd210a21d633d966a612adc2fc81af
|
[
"MIT"
] | 2
|
2021-11-15T17:48:49.000Z
|
2022-02-10T00:37:43.000Z
|
osna/cli.py
|
tapilab/elevate-osna-starter
|
7cf11c302cbd210a21d633d966a612adc2fc81af
|
[
"MIT"
] | 1
|
2019-08-02T15:10:21.000Z
|
2019-08-02T15:10:21.000Z
|
# -*- coding: utf-8 -*-
"""Console script for elevate_osna."""
import click
import glob
import pickle
import sys
import numpy as np
import pandas as pd
import re
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score, classification_report
from . import credentials_path, clf_path
@click.group()
def main(args=None):
"""Console script for osna."""
return 0
@main.command('web')
@click.option('-t', '--twitter-credentials', required=False, type=click.Path(exists=True), show_default=True, default=credentials_path, help='a json file of twitter tokens')
@click.option('-p', '--port', required=False, default=5000, show_default=True, help='port of web server')
def web(twitter_credentials, port):
from .app import app
app.run(host='0.0.0.0', debug=True, port=port)
@main.command('stats')
@click.argument('directory', type=click.Path(exists=True))
def stats(directory):
"""
Read all files in this directory and its subdirectories and print statistics.
"""
print('reading from %s' % directory)
# use glob to iterate all files matching desired pattern (e.g., .json files).
# recursively search subdirectories.
@main.command('train')
@click.argument('directory', type=click.Path(exists=True))
def train(directory):
"""
Train a classifier and save it.
"""
print('reading from %s' % directory)
# (1) Read the data...
#
# (2) Create classifier and vectorizer.
clf = LogisticRegression() # set best parameters
vec = CountVectorizer() # set best parameters
# (3) do cross-validation and print out validation metrics
# (classification_report)
# (4) Finally, train on ALL data one final time and
# train...
# save the classifier
pickle.dump((clf, vec), open(clf_path, 'wb'))
def make_features(df):
## Add your code to create features.
pass
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 29.5
| 173
| 0.704802
|
05892971a4c5d642b6e1f03454772c86653f4a26
| 3,985
|
py
|
Python
|
social/backends/exacttarget.py
|
SeanHayes/python-social-auth
|
4d70b23eb603c1d9753a7982bd7b3bab7cf18d48
|
[
"BSD-3-Clause"
] | 1
|
2016-02-06T03:08:09.000Z
|
2016-02-06T03:08:09.000Z
|
social/backends/exacttarget.py
|
SeanHayes/python-social-auth
|
4d70b23eb603c1d9753a7982bd7b3bab7cf18d48
|
[
"BSD-3-Clause"
] | null | null | null |
social/backends/exacttarget.py
|
SeanHayes/python-social-auth
|
4d70b23eb603c1d9753a7982bd7b3bab7cf18d48
|
[
"BSD-3-Clause"
] | 1
|
2016-09-02T13:08:22.000Z
|
2016-09-02T13:08:22.000Z
|
"""
ExactTarget OAuth support.
Support Authentication from IMH using JWT token and pre-shared key.
Requires package pyjwt
"""
from datetime import timedelta, datetime
import jwt
from social.exceptions import AuthFailed, AuthCanceled
from social.backends.oauth import BaseOAuth2
class ExactTargetOAuth2(BaseOAuth2):
name = 'exacttarget'
def get_user_details(self, response):
"""Use the email address of the user, suffixed by _et"""
user = response.get('token', {})\
.get('request', {})\
.get('user', {})
if 'email' in user:
user['username'] = user['email']
return user
def get_user_id(self, details, response):
"""
Create a user ID from the ET user ID. Uses details rather than the
default response, as only the token is available in response. details
is much richer:
{
'expiresIn': 1200,
'username': 'example@example.com',
'refreshToken': '1234567890abcdef',
'internalOauthToken': 'jwttoken.......',
'oauthToken': 'yetanothertoken',
'id': 123456,
'culture': 'en-US',
'timezone': {
'shortName': 'CST',
'offset': -6.0,
'dst': False,
'longName': '(GMT-06:00) Central Time (No Daylight Saving)'
},
'email': 'example@example.com'
}
"""
return '{0}'.format(details.get('id'))
def uses_redirect(self):
return False
def auth_url(self):
return None
def process_error(self, data):
if data.get('error'):
error = self.data.get('error_description') or self.data['error']
raise AuthFailed(self, error)
def do_auth(self, token, *args, **kwargs):
dummy, secret = self.get_key_and_secret()
try: # Decode the token, using the Application Signature from settings
decoded = jwt.decode(token, secret)
except jwt.DecodeError: # Wrong signature, fail authentication
raise AuthCanceled(self)
kwargs.update({'response': {'token': decoded}, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
token = self.data.get('jwt', {})
if not token:
raise AuthFailed(self, 'Authentication Failed')
return self.do_auth(token, *args, **kwargs)
def extra_data(self, user, uid, response, details):
"""Load extra details from the JWT token"""
data = {
'id': details.get('id'),
'email': details.get('email'),
# OAuth token, for use with legacy SOAP API calls:
# http://bit.ly/13pRHfo
'internalOauthToken': details.get('internalOauthToken'),
# Token for use with the Application ClientID for the FUEL API
'oauthToken': details.get('oauthToken'),
# If the token has expired, use the FUEL API to get a new token see
# http://bit.ly/10v1K5l and http://bit.ly/11IbI6F - set legacy=1
'refreshToken': details.get('refreshToken'),
}
# The expiresIn value determines how long the tokens are valid for.
# Take a bit off, then convert to an int timestamp
expiresSeconds = details.get('expiresIn', 0) - 30
expires = datetime.utcnow() + timedelta(seconds=expiresSeconds)
data['expires'] = (expires - datetime(1970, 1, 1)).total_seconds()
if response.get('token'):
token = response['token']
org = token.get('request', {}).get('organization')
if org:
data['stack'] = org.get('stackKey')
data['enterpriseId'] = org.get('enterpriseId')
return data
| 37.952381
| 79
| 0.568883
|
e58dd4cf2ae3188658bf2829ba425e9f5c51f8ab
| 1,001
|
py
|
Python
|
Projects/Online Workouts/w3resource/Tuple/program-1.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 1
|
2019-09-23T15:51:45.000Z
|
2019-09-23T15:51:45.000Z
|
Projects/Online Workouts/w3resource/Tuple/program-1.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 5
|
2021-02-08T20:47:19.000Z
|
2022-03-12T00:35:44.000Z
|
Projects/Online Workouts/w3resource/Tuple/program-1.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
############################################################################################
# #
# Program purpose: Creates a tuple. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : December 13, 2019 #
# #
############################################################################################
import random
def create_tuple() -> tuple:
return random.randint(0, 10), random.randint(0, 10)
if __name__ == "__main__":
temp_A = () # Create an empty tuple
temp_B = tuple() # Create tuple with built-in
print(f'Tuple A: {temp_A}\nTuple B: {temp_B}')
print(f'Random tuple: {create_tuple()}')
| 45.5
| 92
| 0.317682
|
f077f05f5b4f71dcb8ebc94f6f32efe0cf096be1
| 8,066
|
py
|
Python
|
Lib/test/test_docxmlrpc.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 854
|
2017-09-11T16:42:28.000Z
|
2022-03-27T14:17:09.000Z
|
Lib/test/test_docxmlrpc.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 164
|
2017-09-24T20:40:32.000Z
|
2021-10-30T01:35:05.000Z
|
Lib/test/test_docxmlrpc.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 73
|
2017-09-13T18:07:48.000Z
|
2022-03-17T13:02:29.000Z
|
from xmlrpc.server import DocXMLRPCServer
import http.client
import sys
import threading
from test import support
import unittest
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def make_server():
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
def annotation(x: int):
""" Use function annotations. """
return x
class ClassWithAnnotation:
def method_annotation(self, x: bytes):
return x.decode()
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
serv.register_function(annotation)
serv.register_instance(ClassWithAnnotation())
return serv
except:
serv.server_close()
raise
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.serv = make_server()
self.thread = threading.Thread(target=self.serv.serve_forever)
self.thread.start()
PORT = self.serv.server_address[1]
self.client = http.client.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
self.serv.shutdown()
self.thread.join()
self.serv.server_close()
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server raises an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn((b'<dl><dt><a name="-<lambda>"><strong>'
b'<lambda></strong></a>(x, y)</dt></dl>'),
response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
b'<tt>Add two instances together. This '
b'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
b'PEP008</a>, but has nothing<br>\nto do '
b'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
b'RFC1952</a>. Case should matter: pEp008 '
b'and rFC1952. Things<br>\nthat start '
b'with http and ftp should be '
b'auto-linked, too:<br>\n<a href="http://google.com">'
b'http://google.com</a>.</tt></dd></dl>'), response)
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the presence of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-system.methodHelp"><strong>system.methodHelp'
b'</strong></a>(method_name)</dt><dd><tt><a href="#-system.method'
b'Help">system.methodHelp</a>(\'add\') => "Adds '
b'two integers together"<br>\n <br>\nReturns a'
b' string containing documentation for '
b'the specified method.</tt></dd></dl>\n<dl><dt><a name'
b'="-system.methodSignature"><strong>system.methodSignature</strong>'
b'</a>(method_name)</dt><dd><tt><a href="#-system.methodSignature">'
b'system.methodSignature</a>(\'add\') => [double, '
b'int, int]<br>\n <br>\nReturns a list '
b'describing the signature of the method.'
b' In the<br>\nabove example, the add '
b'method takes two integers as arguments'
b'<br>\nand returns a double result.<br>\n '
b'<br>\nThis server does NOT support system'
b'.methodSignature.</tt></dd></dl>'), response)
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(b"""Try self.<strong>add</strong>, too.""",
response.read())
def test_annotations(self):
""" Test that annotations works as expected """
self.client.request("GET", "/")
response = self.client.getresponse()
docstring = (b'' if sys.flags.optimize >= 2 else
b'<dd><tt>Use function annotations.</tt></dd>')
self.assertIn(
(b'<dl><dt><a name="-annotation"><strong>annotation</strong></a>'
b'(x: int)</dt>' + docstring + b'</dl>\n'
b'<dl><dt><a name="-method_annotation"><strong>'
b'method_annotation</strong></a>(x: bytes)</dt></dl>'),
response.read())
if __name__ == '__main__':
unittest.main()
| 40.532663
| 82
| 0.608976
|
8709bd76a3d888de0d87dc9379a2c2397c14ae37
| 10,250
|
py
|
Python
|
simfempy/examples/paraview/drivencavity.py
|
beckerrh/simfempy
|
10bf30f330d921bffaa8572bbd73286da5971905
|
[
"MIT"
] | null | null | null |
simfempy/examples/paraview/drivencavity.py
|
beckerrh/simfempy
|
10bf30f330d921bffaa8572bbd73286da5971905
|
[
"MIT"
] | 3
|
2018-12-18T16:36:52.000Z
|
2019-01-29T18:34:55.000Z
|
simfempy/examples/paraview/drivencavity.py
|
beckerrh/fempy
|
dd7214ea7f6d81a5200fcb4a91f07a5cd3322e9e
|
[
"MIT"
] | 1
|
2021-06-09T15:49:51.000Z
|
2021-06-09T15:49:51.000Z
|
# state file generated using paraview version 5.9.1
#### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# ----------------------------------------------------------------
# setup views used in the visualization
# ----------------------------------------------------------------
# get the material library
materialLibrary1 = GetMaterialLibrary()
# Create a new 'Render View'
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [2242, 1496]
renderView1.AxesGrid = 'GridAxes3DActor'
renderView1.CenterOfRotation = [0.5, 0.5, 0.5]
renderView1.StereoType = 'Crystal Eyes'
renderView1.CameraPosition = [-2.1802040995430074, -1.3689428935375778, 1.2209097503810526]
renderView1.CameraFocalPoint = [0.4999999999999995, 0.500000000000001, 0.5000000000000009]
renderView1.CameraViewUp = [0.10119623761043704, 0.22838002365876958, 0.9682984489748561]
renderView1.CameraFocalDisk = 1.0
renderView1.CameraParallelScale = 0.8660254037844386
renderView1.BackEnd = 'OSPRay raycaster'
renderView1.OSPRayMaterialLibrary = materialLibrary1
SetActiveView(None)
# ----------------------------------------------------------------
# setup view layouts
# ----------------------------------------------------------------
# create new layout object 'Layout #1'
layout1 = CreateLayout(name='Layout #1')
layout1.AssignView(0, renderView1)
layout1.SetSize(2242, 1496)
# ----------------------------------------------------------------
# restore active view
SetActiveView(renderView1)
# ----------------------------------------------------------------
# ----------------------------------------------------------------
# setup the data processing pipelines
# ----------------------------------------------------------------
# create a new 'XML Unstructured Grid Reader'
drivenCavity3dvtu = XMLUnstructuredGridReader(registrationName='drivenCavity3d.vtu', FileName=['/Users/becker/Programs/simfempy/drivenCavity3d.vtu'])
drivenCavity3dvtu.CellArrayStatus = ['P']
drivenCavity3dvtu.PointArrayStatus = ['V_0', 'V_1', 'V_2']
drivenCavity3dvtu.TimeArray = 'None'
# create a new 'Calculator'
calculator1 = Calculator(registrationName='Calculator1', Input=drivenCavity3dvtu)
calculator1.ResultArrayName = 'V'
calculator1.Function = 'V_0*iHat+V_1*jHat+V_2*kHat'
# create a new 'Slice'
slice1 = Slice(registrationName='Slice1', Input=calculator1)
slice1.SliceType = 'Plane'
slice1.HyperTreeGridSlicer = 'Plane'
slice1.SliceOffsetValues = [0.0]
# init the 'Plane' selected for 'SliceType'
slice1.SliceType.Origin = [0.5, 0.21791778158879116, 0.5]
slice1.SliceType.Normal = [0.0, 1.0, 0.0]
# init the 'Plane' selected for 'HyperTreeGridSlicer'
slice1.HyperTreeGridSlicer.Origin = [0.5, 0.5, 0.5]
# create a new 'Glyph'
glyph1 = Glyph(registrationName='Glyph1', Input=slice1,
GlyphType='Arrow')
glyph1.OrientationArray = ['POINTS', 'V']
glyph1.ScaleArray = ['POINTS', 'V']
glyph1.ScaleFactor = 0.3
glyph1.GlyphTransform = 'Transform2'
# ----------------------------------------------------------------
# setup the visualization in view 'renderView1'
# ----------------------------------------------------------------
# show data from drivenCavity3dvtu
drivenCavity3dvtuDisplay = Show(drivenCavity3dvtu, renderView1, 'UnstructuredGridRepresentation')
# get color transfer function/color map for 'V_2'
v_2LUT = GetColorTransferFunction('V_2')
v_2LUT.RGBPoints = [-0.3055438635809993, 0.231373, 0.298039, 0.752941, -0.034780728504573255, 0.865003, 0.865003, 0.865003, 0.23598240657185277, 0.705882, 0.0156863, 0.14902]
v_2LUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for 'V_2'
v_2PWF = GetOpacityTransferFunction('V_2')
v_2PWF.Points = [-0.3055438635809993, 0.0, 0.5, 0.0, 0.23598240657185277, 1.0, 0.5, 0.0]
v_2PWF.ScalarRangeInitialized = 1
# trace defaults for the display properties.
drivenCavity3dvtuDisplay.Representation = 'Outline'
drivenCavity3dvtuDisplay.ColorArrayName = ['POINTS', 'V_2']
drivenCavity3dvtuDisplay.LookupTable = v_2LUT
drivenCavity3dvtuDisplay.SelectTCoordArray = 'None'
drivenCavity3dvtuDisplay.SelectNormalArray = 'None'
drivenCavity3dvtuDisplay.SelectTangentArray = 'None'
drivenCavity3dvtuDisplay.OSPRayScaleArray = 'V_0'
drivenCavity3dvtuDisplay.OSPRayScaleFunction = 'PiecewiseFunction'
drivenCavity3dvtuDisplay.SelectOrientationVectors = 'None'
drivenCavity3dvtuDisplay.ScaleFactor = 0.1
drivenCavity3dvtuDisplay.SelectScaleArray = 'None'
drivenCavity3dvtuDisplay.GlyphType = 'Arrow'
drivenCavity3dvtuDisplay.GlyphTableIndexArray = 'None'
drivenCavity3dvtuDisplay.GaussianRadius = 0.005
drivenCavity3dvtuDisplay.SetScaleArray = ['POINTS', 'V_0']
drivenCavity3dvtuDisplay.ScaleTransferFunction = 'PiecewiseFunction'
drivenCavity3dvtuDisplay.OpacityArray = ['POINTS', 'V_0']
drivenCavity3dvtuDisplay.OpacityTransferFunction = 'PiecewiseFunction'
drivenCavity3dvtuDisplay.DataAxesGrid = 'GridAxesRepresentation'
drivenCavity3dvtuDisplay.PolarAxes = 'PolarAxesRepresentation'
drivenCavity3dvtuDisplay.ScalarOpacityFunction = v_2PWF
drivenCavity3dvtuDisplay.ScalarOpacityUnitDistance = 0.10413061241559457
drivenCavity3dvtuDisplay.OpacityArrayName = ['POINTS', 'V_0']
# init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
drivenCavity3dvtuDisplay.ScaleTransferFunction.Points = [-0.15607901124981832, 0.0, 0.5, 0.0, 1.2248496966186877, 1.0, 0.5, 0.0]
# init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
drivenCavity3dvtuDisplay.OpacityTransferFunction.Points = [-0.15607901124981832, 0.0, 0.5, 0.0, 1.2248496966186877, 1.0, 0.5, 0.0]
# show data from slice1
slice1Display = Show(slice1, renderView1, 'GeometryRepresentation')
# get color transfer function/color map for 'P'
pLUT = GetColorTransferFunction('P')
pLUT.RGBPoints = [-0.25406850868022934, 0.231373, 0.298039, 0.752941, 0.011884899410226968, 0.865003, 0.865003, 0.865003, 0.2778383075006833, 0.705882, 0.0156863, 0.14902]
pLUT.ScalarRangeInitialized = 1.0
# trace defaults for the display properties.
slice1Display.Representation = 'Surface'
slice1Display.ColorArrayName = ['CELLS', 'P']
slice1Display.LookupTable = pLUT
slice1Display.SelectTCoordArray = 'None'
slice1Display.SelectNormalArray = 'None'
slice1Display.SelectTangentArray = 'None'
slice1Display.OSPRayScaleArray = 'Result'
slice1Display.OSPRayScaleFunction = 'PiecewiseFunction'
slice1Display.SelectOrientationVectors = 'Result'
slice1Display.ScaleFactor = 0.1
slice1Display.SelectScaleArray = 'None'
slice1Display.GlyphType = 'Arrow'
slice1Display.GlyphTableIndexArray = 'None'
slice1Display.GaussianRadius = 0.005
slice1Display.SetScaleArray = ['POINTS', 'Result']
slice1Display.ScaleTransferFunction = 'PiecewiseFunction'
slice1Display.OpacityArray = ['POINTS', 'Result']
slice1Display.OpacityTransferFunction = 'PiecewiseFunction'
slice1Display.DataAxesGrid = 'GridAxesRepresentation'
slice1Display.PolarAxes = 'PolarAxesRepresentation'
# init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
slice1Display.ScaleTransferFunction.Points = [-0.12639338035797343, 0.0, 0.5, 0.0, 1.2172975418333962, 1.0, 0.5, 0.0]
# init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
slice1Display.OpacityTransferFunction.Points = [-0.12639338035797343, 0.0, 0.5, 0.0, 1.2172975418333962, 1.0, 0.5, 0.0]
# show data from glyph1
glyph1Display = Show(glyph1, renderView1, 'GeometryRepresentation')
# trace defaults for the display properties.
glyph1Display.Representation = 'Surface'
glyph1Display.ColorArrayName = [None, '']
glyph1Display.SelectTCoordArray = 'None'
glyph1Display.SelectNormalArray = 'None'
glyph1Display.SelectTangentArray = 'None'
glyph1Display.OSPRayScaleArray = 'Result'
glyph1Display.OSPRayScaleFunction = 'PiecewiseFunction'
glyph1Display.SelectOrientationVectors = 'Result'
glyph1Display.ScaleFactor = 0.11089509502053262
glyph1Display.SelectScaleArray = 'None'
glyph1Display.GlyphType = 'Arrow'
glyph1Display.GlyphTableIndexArray = 'None'
glyph1Display.GaussianRadius = 0.0055447547510266305
glyph1Display.SetScaleArray = ['POINTS', 'Result']
glyph1Display.ScaleTransferFunction = 'PiecewiseFunction'
glyph1Display.OpacityArray = ['POINTS', 'Result']
glyph1Display.OpacityTransferFunction = 'PiecewiseFunction'
glyph1Display.DataAxesGrid = 'GridAxesRepresentation'
glyph1Display.PolarAxes = 'PolarAxesRepresentation'
# init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
glyph1Display.ScaleTransferFunction.Points = [-0.13732019728357697, 0.0, 0.5, 0.0, 1.0630968654724513, 1.0, 0.5, 0.0]
# init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
glyph1Display.OpacityTransferFunction.Points = [-0.13732019728357697, 0.0, 0.5, 0.0, 1.0630968654724513, 1.0, 0.5, 0.0]
# setup the color legend parameters for each legend in this view
# get color legend/bar for pLUT in view renderView1
pLUTColorBar = GetScalarBar(pLUT, renderView1)
pLUTColorBar.Title = 'P'
pLUTColorBar.ComponentTitle = ''
# set color bar visibility
pLUTColorBar.Visibility = 1
# get color legend/bar for v_2LUT in view renderView1
v_2LUTColorBar = GetScalarBar(v_2LUT, renderView1)
v_2LUTColorBar.WindowLocation = 'UpperRightCorner'
v_2LUTColorBar.Title = 'V_2'
v_2LUTColorBar.ComponentTitle = ''
# set color bar visibility
v_2LUTColorBar.Visibility = 1
# show color legend
drivenCavity3dvtuDisplay.SetScalarBarVisibility(renderView1, True)
# show color legend
slice1Display.SetScalarBarVisibility(renderView1, True)
# ----------------------------------------------------------------
# setup color maps and opacity mapes used in the visualization
# note: the Get..() functions create a new object, if needed
# ----------------------------------------------------------------
# get opacity transfer function/opacity map for 'P'
pPWF = GetOpacityTransferFunction('P')
pPWF.Points = [-0.25406850868022934, 0.0, 0.5, 0.0, 0.2778383075006833, 1.0, 0.5, 0.0]
pPWF.ScalarRangeInitialized = 1
# ----------------------------------------------------------------
# restore active source
SetActiveSource(slice1)
# ----------------------------------------------------------------
if __name__ == '__main__':
# generate extracts
SaveExtracts(ExtractsOutputDirectory='extracts')
| 43.067227
| 174
| 0.728488
|
4a2adf8d6fc8370552eacd2a76f036bd8f3b50d9
| 4,497
|
py
|
Python
|
indad/utils.py
|
theerawatramchuen/ind_knn_ad
|
44720394c69c796d531d32e9a757c8526eef45cb
|
[
"MIT"
] | 75
|
2021-07-09T09:50:00.000Z
|
2022-03-31T11:05:34.000Z
|
indad/utils.py
|
theerawatramchuen/ind_knn_ad
|
44720394c69c796d531d32e9a757c8526eef45cb
|
[
"MIT"
] | 24
|
2021-07-22T10:04:28.000Z
|
2022-03-11T00:07:30.000Z
|
indad/utils.py
|
theerawatramchuen/ind_knn_ad
|
44720394c69c796d531d32e9a757c8526eef45cb
|
[
"MIT"
] | 28
|
2021-07-24T18:03:25.000Z
|
2022-02-09T10:41:39.000Z
|
import sys
import yaml
from tqdm import tqdm
from datetime import datetime
import torch
from torch import tensor
from torchvision import transforms
from PIL import ImageFilter
from sklearn import random_projection
TQDM_PARAMS = {
"file" : sys.stdout,
"bar_format" : " {l_bar}{bar:10}{r_bar}{bar:-10b}",
}
def get_tqdm_params():
return TQDM_PARAMS
class GaussianBlur:
def __init__(self, radius : int = 4):
self.radius = radius
self.unload = transforms.ToPILImage()
self.load = transforms.ToTensor()
self.blur_kernel = ImageFilter.GaussianBlur(radius=4)
def __call__(self, img):
map_max = img.max()
final_map = self.load(
self.unload(img[0]/map_max).filter(self.blur_kernel)
)*map_max
return final_map
def get_coreset_idx_randomp(
z_lib : tensor,
n : int = 1000,
eps : float = 0.90,
float16 : bool = True,
force_cpu : bool = False,
) -> tensor:
"""Returns n coreset idx for given z_lib.
Performance on AMD3700, 32GB RAM, RTX3080 (10GB):
CPU: 40-60 it/s, GPU: 500+ it/s (float32), 1500+ it/s (float16)
Args:
z_lib: (n, d) tensor of patches.
n: Number of patches to select.
eps: Agression of the sparse random projection.
float16: Cast all to float16, saves memory and is a bit faster (on GPU).
force_cpu: Force cpu, useful in case of GPU OOM.
Returns:
coreset indices
"""
print(f" Fitting random projections. Start dim = {z_lib.shape}.")
try:
transformer = random_projection.SparseRandomProjection(eps=eps)
z_lib = torch.tensor(transformer.fit_transform(z_lib))
print(f" DONE. Transformed dim = {z_lib.shape}.")
except ValueError:
print( " Error: could not project vectors. Please increase `eps`.")
select_idx = 0
last_item = z_lib[select_idx:select_idx+1]
coreset_idx = [torch.tensor(select_idx)]
min_distances = torch.linalg.norm(z_lib-last_item, dim=1, keepdims=True)
# The line below is not faster than linalg.norm, although i'm keeping it in for
# future reference.
# min_distances = torch.sum(torch.pow(z_lib-last_item, 2), dim=1, keepdims=True)
if float16:
last_item = last_item.half()
z_lib = z_lib.half()
min_distances = min_distances.half()
if torch.cuda.is_available() and not force_cpu:
last_item = last_item.to("cuda")
z_lib = z_lib.to("cuda")
min_distances = min_distances.to("cuda")
for _ in tqdm(range(n-1), **TQDM_PARAMS):
distances = torch.linalg.norm(z_lib-last_item, dim=1, keepdims=True) # broadcasting step
# distances = torch.sum(torch.pow(z_lib-last_item, 2), dim=1, keepdims=True) # broadcasting step
min_distances = torch.minimum(distances, min_distances) # iterative step
select_idx = torch.argmax(min_distances) # selection step
# bookkeeping
last_item = z_lib[select_idx:select_idx+1]
min_distances[select_idx] = 0
coreset_idx.append(select_idx.to("cpu"))
return torch.stack(coreset_idx)
def print_and_export_results(results : dict, method : str):
"""Writes results to .yaml and serialized results to .txt."""
print("\n ╭────────────────────────────╮")
print( " │ Results summary │")
print( " ┢━━━━━━━━━━━━━━━━━━━━━━━━━━━━┪")
print( f" ┃ average image rocauc: {results['average image rocauc']:.2f} ┃")
print( f" ┃ average pixel rocauc: {results['average pixel rocauc']:.2f} ┃")
print( " ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n")
# write
timestamp = datetime.now().strftime("%d_%m_%Y_%H_%M_%S")
name = f"{method}_{timestamp}"
results_yaml_path = f"./results/{name}.yml"
scoreboard_path = f"./results/{name}.txt"
with open(results_yaml_path, "w") as outfile:
yaml.safe_dump(results, outfile, default_flow_style=False)
with open(scoreboard_path, "w") as outfile:
outfile.write(serialize_results(results["per_class_results"]))
print(f" Results written to {results_yaml_path}")
def serialize_results(results : dict) -> str:
"""Serialize a results dict into something usable in markdown."""
n_first_col = 20
ans = []
for k, v in results.items():
s = k + " "*(n_first_col-len(k))
s = s + f"| {v[0]*100:.1f} | {v[1]*100:.1f} |"
ans.append(s)
return "\n".join(ans)
| 34.592308
| 104
| 0.624194
|
82939abff9d6594ebd4418defc5ed242f05ed765
| 9,660
|
py
|
Python
|
doc/source/conf.py
|
andrejmuhic/cvxpy
|
4877fe9e513cc0f9943d452740ccfb84552c2849
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-03-24T19:39:09.000Z
|
2022-03-24T19:39:09.000Z
|
doc/source/conf.py
|
Shulu-Chen/cvxpy
|
d156d1e6efb562470312aef3468ab938bab32e6e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
doc/source/conf.py
|
Shulu-Chen/cvxpy
|
d156d1e6efb562470312aef3468ab938bab32e6e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# CVXPY documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 27 20:47:07 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# To import CVXPY:
sys.path.insert(0, os.path.abspath('../..'))
# To import sphinx extensions we've put in the repository:
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.append('/home/docs/checkouts/readthedocs.org/user_builds/cvxpy/checkouts/1.0/cvxpy')
__version__ = "1.1.5"
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# To suppress autodoc/numpydoc warning.
# http://stackoverflow.com/questions/12206334/sphinx-autosummary-toctree-contains-reference-to-nonexisting-document-warnings
numpydoc_show_class_members = False
# Since readthedocs.org has trouble compiling `cvxopt`, autodoc fails
# whenever it tries to import a CVXPY module to document it.
# The following code replaces the relevant cvxopt modules with
# a dummy namespace, allowing autodoc to work.
class Mocked(object):
def __setattr__(self, name, value):
self.__dict__[name] = value
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
return None
MOCK_MODULES = ['cvxopt', 'cvxopt.base', 'cvxopt.misc']
sys.modules.update((mod_name, Mocked()) for mod_name in MOCK_MODULES)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CVXPY'
copyright = u'2020, The CVXPY authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import alabaster
table_styling_embed_css = False
html_theme_path = [alabaster.get_path(), "../themes"]
extensions += ['alabaster']
html_theme = 'cvxpy_alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html',
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'cvxgrp',
'github_repo': 'cvxpy',
'github_banner': True,
'github_type': 'star',
'travis_button': False,
'analytics_id': 'UA-50248335-1',
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['../themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cvxpydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cvxpy.tex', u'CVXPY Documentation',
u'Steven Diamond, Eric Chu, Stephen Boyd', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cvxpy', u'CVXPY Documentation',
[u'Steven Diamond, Eric Chu, Stephen Boyd'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cvxpy', u'CVXPY Documentation',
u'Steven Diamond, Eric Chu, Stephen Boyd', 'CVXPY', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 32.2
| 124
| 0.708075
|
5d25555d2c2e2bb724247d8455a9bf9cb28cff3c
| 1,777
|
py
|
Python
|
src/falconpy/_uber_default_preference.py
|
fcremer/falconpy
|
eb2b000f1a73553e355d7b685d2e1006a4975030
|
[
"Unlicense"
] | null | null | null |
src/falconpy/_uber_default_preference.py
|
fcremer/falconpy
|
eb2b000f1a73553e355d7b685d2e1006a4975030
|
[
"Unlicense"
] | null | null | null |
src/falconpy/_uber_default_preference.py
|
fcremer/falconpy
|
eb2b000f1a73553e355d7b685d2e1006a4975030
|
[
"Unlicense"
] | null | null | null |
"""Internal constant library.
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
PREFER_NONETYPE = [
"report_executions_download_get", "report_executions_download.get"
]
| 43.341463
| 71
| 0.676984
|
4cdc4d852d1ae917fd481a940803d7eed457dfec
| 2,010
|
py
|
Python
|
handyspark/sql/schema.py
|
FoundryAI/handyspark
|
bf23522eb0794cce8af2f036347b34df1a2c7b09
|
[
"MIT"
] | null | null | null |
handyspark/sql/schema.py
|
FoundryAI/handyspark
|
bf23522eb0794cce8af2f036347b34df1a2c7b09
|
[
"MIT"
] | null | null | null |
handyspark/sql/schema.py
|
FoundryAI/handyspark
|
bf23522eb0794cce8af2f036347b34df1a2c7b09
|
[
"MIT"
] | null | null | null |
import numpy as np
import datetime
from operator import itemgetter
from pyspark.sql.types import StructType
_mapping = {str: 'string',
bool: 'boolean',
int: 'integer',
float: 'float',
datetime.date: 'date',
datetime.datetime: 'timestamp',
np.bool: 'boolean',
np.int8: 'byte',
np.int16: 'short',
np.int32: 'integer',
np.int64: 'long',
np.float32: 'float',
np.float64: 'double',
np.ndarray: 'array',
object: 'string',
list: 'array',
tuple: 'array',
dict: 'map'}
def generate_schema(columns, nullable_columns='all'):
"""
Parameters
----------
columns: dict of column names (keys) and types (values)
nullables: list of nullable columns, optional, default is 'all'
Returns
-------
schema: StructType
Spark DataFrame schema corresponding to Python/numpy types.
"""
columns = sorted(columns.items())
colnames = list(map(itemgetter(0), columns))
coltypes = list(map(itemgetter(1), columns))
invalid_types = []
new_types = []
keys = list(map(itemgetter(0), list(_mapping.items())))
for coltype in coltypes:
if coltype not in keys:
invalid_types.append(coltype)
else:
if coltype == np.dtype('O'):
new_types.append(str)
else:
new_types.append(keys[keys.index(coltype)])
assert len(invalid_types) == 0, "Invalid type(s) specified: {}".format(str(invalid_types))
if nullable_columns == 'all':
nullables = [True] * len(colnames)
else:
nullables = [col in nullable_columns for col in colnames]
fields = [{"metadata": {}, "name": name, "nullable": nullable, "type": _mapping[typ]}
for name, typ, nullable in zip(colnames, new_types, nullables)]
return StructType.fromJson({"type": "struct", "fields": fields})
| 32.419355
| 94
| 0.569652
|
d07eabae2ff8c8a7b084ae3d6a7e7266d747e87b
| 489
|
py
|
Python
|
relational/scripts/tests/context.py
|
snspam/sn_spam
|
e0bb8e9c843e26e5f4be8a49a960ebf7a0d5bfd5
|
[
"MIT"
] | null | null | null |
relational/scripts/tests/context.py
|
snspam/sn_spam
|
e0bb8e9c843e26e5f4be8a49a960ebf7a0d5bfd5
|
[
"MIT"
] | null | null | null |
relational/scripts/tests/context.py
|
snspam/sn_spam
|
e0bb8e9c843e26e5f4be8a49a960ebf7a0d5bfd5
|
[
"MIT"
] | null | null | null |
import os
import sys
one_level_up = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(1, one_level_up)
from app import config
from app.tests import test_utils
from relational.scripts import comments
from relational.scripts import generator
from relational.scripts import pred_builder
from relational.scripts import psl
from relational.scripts import relational
from relational.scripts import tuffy
from relational.scripts import mrf
from analysis import util
| 30.5625
| 77
| 0.832311
|
bc7d7a40a19d7345bb6b03a4ae09743b23f5f28c
| 19,341
|
py
|
Python
|
selvbetjening/sadmin2/views/event.py
|
animekita/selvbetjening
|
fee63d178fbd5ce2976c04d3a4b2dde6d8691892
|
[
"MIT"
] | null | null | null |
selvbetjening/sadmin2/views/event.py
|
animekita/selvbetjening
|
fee63d178fbd5ce2976c04d3a4b2dde6d8691892
|
[
"MIT"
] | 3
|
2020-02-11T21:54:59.000Z
|
2021-06-10T17:35:21.000Z
|
selvbetjening/sadmin2/views/event.py
|
animekita/selvbetjening
|
fee63d178fbd5ce2976c04d3a4b2dde6d8691892
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from django.contrib.admin.util import NestedObjects
from django.core.urlresolvers import reverse
from django.forms.models import modelformset_factory
from django.http.response import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.db.models import Count
from selvbetjening.businesslogic.events.decorators import suspend_automatic_attendee_price_updates
from selvbetjening.core.members.models import UserLocation
from django.db import router
from selvbetjening.core.user.models import SUser
from selvbetjening.core.events.options.dynamic_selections import SCOPE, dynamic_selections_formset_factory, dynamic_selections
from selvbetjening.core.events.utils import sum_attendee_payment_status
from selvbetjening.core.events.models import Event, Attend, AttendState, Payment, AttendeeComment
from selvbetjening.core.events.signals import request_attendee_pks_signal, attendee_updated_signal
from selvbetjening.sadmin2.forms import EventForm, AttendeeFormattingForm, PaymentForm, \
AttendeeCommentForm, attendee_selection_helper_factory, AttendeeCommentFormSet, ConfirmForm
from selvbetjening.sadmin2.decorators import sadmin_prerequisites
from selvbetjening.sadmin2 import menu
from generic import generic_create_view, search_view
from selvbetjening.sadmin2.graphs.timelines import AbsoluteTimeGraph, AgeTimeGraph
from selvbetjening.sadmin2.graphs.units import AttendeeRegisteredUnit, AttendeePaidUnit, AttendeeCheckedInUnit,\
AttendeeRegisteredAgeUnit
from selvbetjening.sadmin2.views.reports import insecure_reports_address
@sadmin_prerequisites
def event_overview(request, event_pk):
event = get_object_or_404(Event, pk=event_pk)
total = sum_attendee_payment_status(event.attendees)
# returns a set of dictionaries with {'state': x, 'is_new': y, 'count': z}
status = Attend.objects.filter(event=event).values('state', 'is_new').annotate(count=Count('pk'))
status_flat = {}
for item in status:
status_flat['%s_new' % item['state'] if item['is_new'] else item['state']] = item['count']
for item in status:
if item['is_new']:
status_flat.setdefault(item['state'], 0)
status_flat[item['state']] += item['count']
return render(request,
'sadmin2/event/overview.html',
{
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_event,
'sadmin2_menu_tab_active': 'overview',
'event': event,
'total': total,
'status': status_flat
})
@sadmin_prerequisites
def event_attendees(request, event_pk):
event = get_object_or_404(Event, pk=event_pk)
columns = ('pk', 'user__username', 'user__first_name', 'user__last_name', 'user__email',
'user__street', 'user__postalcode', 'user__city')
conditions = ('selection__option__pk', 'selection__suboption__pk', 'state', 'paid', 'price')
related = ('comment',)
queryset = event.attendees.select_related('user').all()
context = {
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_attendees',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_event,
'sadmin2_menu_tab_active': 'attendees',
'event': event
}
return search_view(request,
queryset,
'sadmin2/event/attendees.html',
'sadmin2/event/attendees_inner.html',
search_columns=columns,
search_conditions=conditions,
search_related=related,
search_order='-pk',
context=context)
@sadmin_prerequisites
def event_account(request, event_pk):
event = get_object_or_404(Event, pk=event_pk)
if request.method == 'POST':
formatting_form = AttendeeFormattingForm(request.REQUEST, event=event, attendees=event.attendees)
formatting_form.is_valid()
else:
formatting_form = AttendeeFormattingForm(event=event, attendees=event.attendees)
attendees, line_groups, total, show_regular_attendees, show_irregular_attendees, attendee_filter_label = formatting_form.format()
return render(request,
'sadmin2/event/account.html',
{
'sadmin2_menu_main_active': 'event',
'sadmin2_breadcrumbs_active': 'event_account',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_event,
'sadmin2_menu_tab_active': 'account',
'event': event,
'attendees': attendees.order_by('user__username'),
'line_groups': line_groups,
'total': total,
'show_regular_attendees': show_regular_attendees,
'show_irregular_attendees': show_irregular_attendees,
'formatting_form': formatting_form,
'attendee_filter_label': attendee_filter_label
})
@sadmin_prerequisites
def event_settings(request, event_pk):
event = get_object_or_404(Event, pk=event_pk)
if request.method == 'POST':
form = EventForm(request.POST, instance=event)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, _('Settings saved successfully'))
else:
form = EventForm(instance=event)
return render(request,
'sadmin2/generic/form.html',
{
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_settings',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_event,
'sadmin2_menu_tab_active': 'settings',
'event': event,
'form': form
})
@sadmin_prerequisites
def event_copy(request, event_pk):
event = get_object_or_404(Event, pk=event_pk)
if request.method == 'POST':
form = ConfirmForm(request.POST)
if form.is_valid():
messages.add_message(request, messages.SUCCESS, _('Event copied'))
event.copy_and_mutate_self()
return HttpResponseRedirect(reverse('sadmin2:event_settings', kwargs={'event_pk': event.pk}))
else:
form = ConfirmForm()
return render(request,
'sadmin2/event/copy.html',
{
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_copy',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_event,
'sadmin2_menu_tab_active': 'copy',
'event': event,
'form': form
})
@sadmin_prerequisites
def report_check_in(request, event_pk):
event = get_object_or_404(Event, pk=event_pk)
graph = AbsoluteTimeGraph(AbsoluteTimeGraph.SCOPE.hour,
AttendeeCheckedInUnit('Checked-in', event),
accumulative=True)
return render(request, 'sadmin2/graphs/linegraph.html', {
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_report_check_in',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_event,
'sadmin2_menu_tab_active': 'reports',
'event': event,
'title': _('Check-in'),
'date_formatting': '%a %H:%M',
'graph': graph
})
@sadmin_prerequisites
def report_registration(request, event_pk):
event = get_object_or_404(Event, pk=event_pk)
graph = AbsoluteTimeGraph(AbsoluteTimeGraph.SCOPE.week,
AttendeeRegisteredUnit('Registered', event),
AttendeePaidUnit('Paid', event),
accumulative=True)
return render(request, 'sadmin2/graphs/linegraph.html', {
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_report_registration',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_event,
'sadmin2_menu_tab_active': 'reports',
'event': event,
'title': _('Registrations'),
'graph': graph
})
@sadmin_prerequisites
def report_age(request, event_pk):
event = get_object_or_404(Event, pk=event_pk)
graph = AgeTimeGraph(AbsoluteTimeGraph.SCOPE.year,
AttendeeRegisteredAgeUnit('Users', event, event.startdate),
today=event.startdate)
return render(request, 'sadmin2/graphs/linegraph.html', {
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_report_age',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_event,
'sadmin2_menu_tab_active': 'reports',
'event': event,
'title': _('User age'),
'graph': graph
})
@sadmin_prerequisites
def report_address(request, event_pk):
event = get_object_or_404(Event, pk=event_pk)
context = {
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_report_address',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_event,
'sadmin2_menu_tab_active': 'reports',
'event': event
}
return insecure_reports_address(
request,
UserLocation.objects.filter(user__attend__event=event),
extra_context=context
)
@sadmin_prerequisites
def event_attendees_add(request, event_pk):
event = get_object_or_404(Event, pk=event_pk)
if request.method == 'POST':
user = get_object_or_404(SUser, pk=int(request.POST.get('user_pk', 0)))
Attend.objects.create(event=event, user=user)
# TODO update this redirect to go directly to the attendee page when we have one
messages.success(request, _('User %s added to event') % user.username)
return HttpResponseRedirect(reverse('sadmin2:event_attendees', kwargs={'event_pk': event.pk}))
queryset = SUser.objects.exclude(attend__event__pk=event.pk)
columns = ('username', 'first_name', 'last_name', 'email')
context = {
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_attendees_add',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_event,
'sadmin2_menu_tab_active': 'attendees',
'event': event,
}
return search_view(request,
queryset,
'sadmin2/event/attendees_add.html',
'sadmin2/event/attendees_add_inner.html',
search_columns=columns,
context=context
)
@sadmin_prerequisites
def event_attendee(request, event_pk, attendee_pk):
event = get_object_or_404(Event, pk=event_pk)
attendee = get_object_or_404(event.attendees, pk=attendee_pk)
comments = attendee.comments.filter(check_in_announce=True)
selections = dynamic_selections(SCOPE.VIEW_SYSTEM_INVOICE, attendee)
if request.method == 'POST':
action = request.POST.get('action', '')
if action == 'to-state-waiting':
attendee.state = AttendState.waiting
attendee.save()
if action == 'to-state-accepted':
attendee.state = AttendState.accepted
attendee.save()
if action == 'to-state-attended':
attendee.state = AttendState.attended
attendee.save()
if action == 'pay':
Payment.objects.create(
user=attendee.user,
attendee=attendee,
amount=attendee.unpaid,
note='Manual payment',
signee=request.user
)
attendee_updated_signal.send(event_attendee, attendee=attendee)
return HttpResponseRedirect(reverse('sadmin2:event_attendee', kwargs={'event_pk': event.pk, 'attendee_pk': attendee.pk}))
return render(request,
'sadmin2/event/attendee.html',
{
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_attendees_attendee',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_attendee,
'sadmin2_menu_tab_active': 'registration',
'event': event,
'attendee': attendee,
'comments': comments,
'selections': selections
})
@sadmin_prerequisites
def event_attendee_payments(request, event_pk, attendee_pk):
event = get_object_or_404(Event, pk=event_pk)
attendee = get_object_or_404(event.attendees, pk=attendee_pk)
payment_keys = request_attendee_pks_signal.send(None, attendee=attendee)
payments = attendee.payment_set.all()
context = {
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_attendees_attendee_payments',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_attendee,
'sadmin2_menu_tab_active': 'payments',
'event': event,
'attendee': attendee,
'payment_keys': payment_keys,
'payments': payments,
}
def save_callback(payment):
payment.note = 'Manual payment'
payment.signee = request.user
payment.attendee = attendee
payment.user = attendee.user
payment.save()
attendee_updated_signal.send(event_attendee_selections, attendee=attendee)
return generic_create_view(
request,
PaymentForm,
redirect_success_url=reverse('sadmin2:event_attendee_payments',
kwargs={
'event_pk': event.pk,
'attendee_pk': attendee.pk
}),
message_success=_('Payment registered'),
instance_save_callback=save_callback,
template='sadmin2/event/attendee_payments.html',
context=context
)
@sadmin_prerequisites
def event_attendee_selections(request, event_pk, attendee_pk):
event = get_object_or_404(Event, pk=event_pk)
attendee = get_object_or_404(event.attendees, pk=attendee_pk)
DynamicSelectionsFormSet = dynamic_selections_formset_factory(
SCOPE.SADMIN,
event,
helper_factory=attendee_selection_helper_factory
)
if request.method == 'POST':
formset = DynamicSelectionsFormSet(request.POST, user=attendee.user, attendee=attendee)
if formset.is_valid():
formset.save()
messages.success(request, 'Saved selections')
attendee_updated_signal.send(event_attendee_selections, attendee=attendee)
return HttpResponseRedirect(reverse('sadmin2:event_attendee', kwargs={'event_pk': event.pk,
'attendee_pk': attendee.pk}))
else:
formset = DynamicSelectionsFormSet(user=attendee.user, attendee=attendee)
return render(request,
'sadmin2/event/attendee_selections.html',
{
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_attendees_attendee_selections',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_attendee,
'sadmin2_menu_tab_active': 'selections',
'event': event,
'attendee': attendee,
'formset': formset
})
@sadmin_prerequisites
def event_attendee_notes(request, event_pk, attendee_pk):
event = get_object_or_404(Event, pk=event_pk)
attendee = get_object_or_404(event.attendees, pk=attendee_pk)
if request.method == 'POST':
formset = AttendeeCommentFormSet(request.POST, queryset=attendee.comments.all())
if formset.is_valid():
instances = formset.save(commit=False)
for instance in instances:
if instance.pk is None:
instance.attendee = attendee
instance.author = request.user
instance.save()
return HttpResponseRedirect(reverse('sadmin2:event_attendee_notes', kwargs={'event_pk': event.pk,
'attendee_pk': attendee.pk}))
else:
formset = AttendeeCommentFormSet(queryset=attendee.comments.all())
context = {
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_attendees_attendee_notes',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_attendee,
'sadmin2_menu_tab_active': 'notes',
'event': event,
'attendee': attendee,
'comments': attendee.comments.all(),
'formset': formset
}
return render(request,
'sadmin2/event/attendee_notes.html',
context)
@sadmin_prerequisites
def event_attendee_log(request, event_pk, attendee_pk):
event = get_object_or_404(Event, pk=event_pk)
attendee = get_object_or_404(event.attendees, pk=attendee_pk)
context = {
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_attendees_attendee_log',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_attendee,
'sadmin2_menu_tab_active': 'log',
'event': event,
'attendee': attendee,
'log': attendee.log.all().order_by('-id')
}
return render(request,
'sadmin2/event/attendee_log.html',
context)
def _get_deleted_objects(objs):
"""
Slightly simplified version of the function used in the standard admin
"""
if len(objs) == 0:
return []
collector = NestedObjects(using=router.db_for_write(objs[0]))
collector.collect(objs)
def format_callback(obj):
return '%s: %s' % (obj.__class__.__name__, unicode(obj))
return collector.nested(format_callback)
@sadmin_prerequisites
@suspend_automatic_attendee_price_updates
def event_attendee_delete(request, event_pk, attendee_pk):
event = get_object_or_404(Event, pk=event_pk)
attendee = get_object_or_404(event.attendees, pk=attendee_pk)
if request.method == 'POST':
attendee.delete()
messages.success(request, _('Attendee %s deleted' % attendee.user.username))
return HttpResponseRedirect(reverse('sadmin2:event_attendees', kwargs={'event_pk': event.pk}))
context = {
'sadmin2_menu_main_active': 'events',
'sadmin2_breadcrumbs_active': 'event_attendees_attendee_delete',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_attendee,
'sadmin2_menu_tab_active': 'delete',
'event': event,
'attendee': attendee,
'to_be_deleted': _get_deleted_objects([attendee])
}
return render(request,
'sadmin2/event/attendee_delete.html',
context
)
| 34.414591
| 133
| 0.628664
|
96a17abf511133dd2ca08b4656825414b0dc1e0f
| 196
|
py
|
Python
|
django_priority_batch/__init__.py
|
dblenkus/django-priority-batch
|
8cc051b1196e90ef3ccc9d14d1ea277d46e31891
|
[
"Apache-2.0"
] | 1
|
2018-10-16T10:56:53.000Z
|
2018-10-16T10:56:53.000Z
|
django_priority_batch/__init__.py
|
dblenkus/django-priority-batch
|
8cc051b1196e90ef3ccc9d14d1ea277d46e31891
|
[
"Apache-2.0"
] | 3
|
2018-10-19T10:42:01.000Z
|
2018-10-21T10:14:56.000Z
|
django_priority_batch/__init__.py
|
dblenkus/django-priority-batch
|
8cc051b1196e90ef3ccc9d14d1ea277d46e31891
|
[
"Apache-2.0"
] | 4
|
2018-10-19T08:03:48.000Z
|
2020-02-03T19:49:51.000Z
|
""".. Ignore pydocstyle D400.
=====================
Django Priority Batch
=====================
TODO.
"""
from .middleware import Middleware
from .prioritized_batcher import PrioritizedBatcher
| 16.333333
| 51
| 0.612245
|
c7bb2e222aa18ee2953c568fe8b4f4e70356e922
| 36,671
|
py
|
Python
|
contentcuration/contentcuration/viewsets/contentnode.py
|
ozer550/studio
|
b69c625d223350b1b4a6909904ec4b44f80b211d
|
[
"MIT"
] | 6
|
2016-02-02T12:49:54.000Z
|
2017-12-05T21:00:01.000Z
|
contentcuration/contentcuration/viewsets/contentnode.py
|
ozer550/studio
|
b69c625d223350b1b4a6909904ec4b44f80b211d
|
[
"MIT"
] | 195
|
2015-05-19T20:37:51.000Z
|
2018-02-27T19:06:54.000Z
|
contentcuration/contentcuration/viewsets/contentnode.py
|
ozer550/studio
|
b69c625d223350b1b4a6909904ec4b44f80b211d
|
[
"MIT"
] | 33
|
2015-05-06T19:19:09.000Z
|
2017-11-16T12:50:54.000Z
|
import json
from functools import partial
from functools import reduce
from django.conf import settings
from django.core.exceptions import ValidationError as DjangoValidationError
from django.db import IntegrityError
from django.db import models
from django.db.models import Exists
from django.db.models import F
from django.db.models import IntegerField as DjangoIntegerField
from django.db.models import OuterRef
from django.db.models import Q
from django.db.models import Subquery
from django.db.models.functions import Cast
from django.db.models.functions import Coalesce
from django.http import Http404
from django.utils.timezone import now
from django_cte import CTEQuerySet
from django_filters.rest_framework import CharFilter
from django_filters.rest_framework import UUIDFilter
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import roles
from le_utils.constants.labels import accessibility_categories
from le_utils.constants.labels import learning_activities
from le_utils.constants.labels import levels
from le_utils.constants.labels import needs
from le_utils.constants.labels import resource_type
from le_utils.constants.labels import subjects
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.serializers import BooleanField
from rest_framework.serializers import CharField
from rest_framework.serializers import ChoiceField
from rest_framework.serializers import DictField
from rest_framework.serializers import IntegerField
from rest_framework.serializers import ValidationError
from rest_framework.viewsets import ViewSet
from contentcuration.constants import completion_criteria as completion_criteria_validator
from contentcuration.db.models.expressions import IsNull
from contentcuration.db.models.query import RIGHT_JOIN
from contentcuration.db.models.query import With
from contentcuration.db.models.query import WithValues
from contentcuration.models import AssessmentItem
from contentcuration.models import Channel
from contentcuration.models import ContentNode
from contentcuration.models import ContentTag
from contentcuration.models import File
from contentcuration.models import generate_storage_url
from contentcuration.models import PrerequisiteContentRelationship
from contentcuration.models import UUIDField
from contentcuration.tasks import create_async_task
from contentcuration.tasks import get_or_create_async_task
from contentcuration.utils.nodes import calculate_resource_size
from contentcuration.viewsets.base import BulkListSerializer
from contentcuration.viewsets.base import BulkModelSerializer
from contentcuration.viewsets.base import BulkUpdateMixin
from contentcuration.viewsets.base import RequiredFilterSet
from contentcuration.viewsets.base import ValuesViewset
from contentcuration.viewsets.common import ChangeEventMixin
from contentcuration.viewsets.common import DotPathValueMixin
from contentcuration.viewsets.common import JSONFieldDictSerializer
from contentcuration.viewsets.common import NotNullMapArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import UserFilteredPrimaryKeyRelatedField
from contentcuration.viewsets.common import UUIDInFilter
from contentcuration.viewsets.sync.constants import CONTENTNODE
from contentcuration.viewsets.sync.constants import CREATED
from contentcuration.viewsets.sync.constants import DELETED
from contentcuration.viewsets.sync.constants import TASK_ID
from contentcuration.viewsets.sync.utils import generate_delete_event
from contentcuration.viewsets.sync.utils import generate_update_event
from contentcuration.viewsets.sync.utils import log_sync_exception
channel_query = Channel.objects.filter(main_tree__tree_id=OuterRef("tree_id"))
_valid_positions = {"first-child", "last-child", "left", "right"}
class ContentNodeFilter(RequiredFilterSet):
id__in = UUIDInFilter(field_name="id")
root_id = UUIDFilter(method="filter_root_id")
ancestors_of = UUIDFilter(method="filter_ancestors_of")
parent__in = UUIDInFilter(field_name="parent")
_node_id_channel_id___in = CharFilter(method="filter__node_id_channel_id")
class Meta:
model = ContentNode
fields = (
"parent",
"parent__in",
"id__in",
"kind",
"root_id",
"ancestors_of",
"_node_id_channel_id___in",
)
def filter_root_id(self, queryset, name, value):
return queryset.filter(
parent=Channel.objects.filter(pk=value).values_list(
"main_tree__id", flat=True
)
)
def filter_ancestors_of(self, queryset, name, value):
"""
See MPTTModel.get_ancestors()
"""
try:
# Includes the target node in the query
target_node = ContentNode.objects.get(pk=value)
if target_node.is_root_node():
return queryset.filter(pk=value)
return queryset.filter(
tree_id=target_node.tree_id,
lft__lte=target_node.lft,
rght__gte=target_node.rght,
)
except ContentNode.DoesNotExist:
return queryset.none()
def filter__node_id_channel_id(self, queryset, name, value):
query = Q()
values = value.split(",")
num_pairs = len(values) // 2
for i in range(0, num_pairs):
query |= Q(node_id=values[i * 2], channel_id=values[i * 2 + 1])
return queryset.filter(query)
tags_values_cte_fields = {
'tag': models.CharField(),
'node_id': UUIDField()
}
def set_tags(tags_by_id):
tag_tuples = []
tags_relations_to_delete = []
# put all tags into a tuple (tag_name, node_id) to send into SQL
for target_node_id, tag_names in tags_by_id.items():
for tag_name, value in tag_names.items():
tag_tuples.append((tag_name, target_node_id))
# create CTE that holds the tag_tuples data
values_cte = WithValues(tags_values_cte_fields, tag_tuples, name='values_cte')
# create another CTE which will RIGHT join against the tag table, so we get all of our
# tag_tuple data back, plus the tag_id if it exists. Ideally we wouldn't normally use a RIGHT
# join, we would simply swap the tables and do a LEFT, but with the VALUES CTE
# that isn't possible
tags_qs = (
values_cte.join(ContentTag, tag_name=values_cte.col.tag, _join_type=RIGHT_JOIN)
.annotate(
tag=values_cte.col.tag,
node_id=values_cte.col.node_id,
tag_id=F('id'),
)
.values('tag', 'node_id', 'tag_id')
)
tags_cte = With(tags_qs, name='tags_cte')
# the final query, we RIGHT join against the tag relation table so we get the tag_tuple back
# again, plus the tag_id from the previous CTE, plus annotate a boolean of whether
# the relation exists
qs = (
tags_cte.join(
CTEQuerySet(model=ContentNode.tags.through),
contenttag_id=tags_cte.col.tag_id,
contentnode_id=tags_cte.col.node_id,
_join_type=RIGHT_JOIN
)
.with_cte(values_cte)
.with_cte(tags_cte)
.annotate(
tag_name=tags_cte.col.tag,
node_id=tags_cte.col.node_id,
tag_id=tags_cte.col.tag_id,
has_relation=IsNull('contentnode_id', negate=True)
)
.values('tag_name', 'node_id', 'tag_id', 'has_relation')
)
created_tags = {}
for result in qs:
tag_name = result["tag_name"]
node_id = result["node_id"]
tag_id = result["tag_id"]
has_relation = result["has_relation"]
tags = tags_by_id[node_id]
value = tags[tag_name]
# tag wasn't found in the DB, but we're adding it to the node, so create it
if not tag_id and value:
# keep a cache of created tags during the session
if tag_name in created_tags:
tag_id = created_tags[tag_name]
else:
tag, _ = ContentTag.objects.get_or_create(tag_name=tag_name, channel_id=None)
tag_id = tag.pk
created_tags.update({tag_name: tag_id})
# if we're adding the tag but the relation didn't exist, create it now, otherwise
# track the tag as one relation we should delete
if value and not has_relation:
ContentNode.tags.through.objects.get_or_create(
contentnode_id=node_id, contenttag_id=tag_id
)
elif not value and has_relation:
tags_relations_to_delete.append(
Q(contentnode_id=node_id, contenttag_id=tag_id)
)
# delete tags
if tags_relations_to_delete:
ContentNode.tags.through.objects.filter(
reduce(lambda x, y: x | y, tags_relations_to_delete)
).delete()
class ContentNodeListSerializer(BulkListSerializer):
def gather_tags(self, validated_data):
tags_by_id = {}
for obj in validated_data:
try:
tags = obj.pop("tags")
except KeyError:
pass
else:
if tags:
tags_by_id[obj["id"]] = tags
return tags_by_id
def update(self, queryset, all_validated_data):
tags = self.gather_tags(all_validated_data)
modified = now()
for data in all_validated_data:
data["modified"] = modified
all_objects = super(ContentNodeListSerializer, self).update(
queryset, all_validated_data
)
if tags:
set_tags(tags)
return all_objects
class ThresholdField(CharField):
def to_representation(self, value):
return value
def to_internal_value(self, data):
data = super(ThresholdField, self).to_internal_value(data)
try:
data = int(data)
except ValueError:
pass
return data
class CompletionCriteriaSerializer(JSONFieldDictSerializer):
threshold = ThresholdField(allow_null=True)
model = CharField()
learner_managed = BooleanField(required=False)
def update(self, instance, validated_data):
instance = super(CompletionCriteriaSerializer, self).update(instance, validated_data)
try:
completion_criteria_validator.validate(instance)
except DjangoValidationError as e:
raise ValidationError(e)
return instance
class ExtraFieldsOptionsSerializer(JSONFieldDictSerializer):
modality = ChoiceField(choices=(("QUIZ", "Quiz"),), allow_null=True, required=False)
completion_criteria = CompletionCriteriaSerializer(required=False)
class ExtraFieldsSerializer(JSONFieldDictSerializer):
mastery_model = ChoiceField(
choices=exercises.MASTERY_MODELS, allow_null=True, required=False
)
randomize = BooleanField()
m = IntegerField(allow_null=True, required=False)
n = IntegerField(allow_null=True, required=False)
options = ExtraFieldsOptionsSerializer(required=False)
class TagField(DotPathValueMixin, DictField):
pass
class MetadataLabelBooleanField(BooleanField):
def bind(self, field_name, parent):
# By default the bind method of the Field class sets the source_attrs to field_name.split(".").
# As we have literal field names that include "." we need to override this behavior.
# Otherwise it will attempt to set the source_attrs to a nested path, assuming that it is a source path,
# not a materialized path. This probably means that it was a bad idea to use "." in the materialized path,
# but alea iacta est.
super(MetadataLabelBooleanField, self).bind(field_name, parent)
self.source_attrs = [self.source]
class MetadataLabelsField(JSONFieldDictSerializer):
def __init__(self, choices, *args, **kwargs):
self.choices = choices
# Instantiate the superclass normally
super().__init__(*args, **kwargs)
def get_fields(self):
fields = {}
for label_id, label_name in self.choices:
field = MetadataLabelBooleanField(required=False, label=label_name, allow_null=True)
fields[label_id] = field
return fields
class ContentNodeSerializer(BulkModelSerializer):
"""
This is a write only serializer - we leverage it to do create and update
operations, but read operations are handled by the Viewset.
"""
parent = UserFilteredPrimaryKeyRelatedField(
queryset=ContentNode.objects.all(), required=False
)
extra_fields = ExtraFieldsSerializer(required=False)
tags = TagField(required=False)
# Fields for metadata labels
grade_levels = MetadataLabelsField(levels.choices, required=False)
resource_types = MetadataLabelsField(resource_type.choices, required=False)
learning_activities = MetadataLabelsField(learning_activities.choices, required=False)
accessibility_labels = MetadataLabelsField(accessibility_categories.choices, required=False)
categories = MetadataLabelsField(subjects.choices, required=False)
learner_needs = MetadataLabelsField(needs.choices, required=False)
dict_fields = [
"extra_fields",
"grade_levels",
"resource_types",
"learning_activities",
"accessibility_labels",
"categories",
"learner_needs",
]
class Meta:
model = ContentNode
fields = (
"id",
"title",
"description",
"kind",
"language",
"license",
"license_description",
"copyright_holder",
"author",
"role_visibility",
"aggregator",
"provider",
"extra_fields",
"thumbnail_encoding",
"parent",
"complete",
"changed",
"tags",
"grade_levels",
"resource_types",
"learning_activities",
"accessibility_labels",
"categories",
"learner_needs",
"suggested_duration",
)
list_serializer_class = ContentNodeListSerializer
nested_writes = True
def validate(self, data):
tags = data.get("tags")
if tags is not None:
for tag in tags:
if len(tag) > 30:
raise ValidationError("tag is greater than 30 characters")
return data
def create(self, validated_data):
# Creating a new node, by default put it in the orphanage on initial creation.
if "parent" not in validated_data:
validated_data["parent_id"] = settings.ORPHANAGE_ROOT_ID
tags = None
if "tags" in validated_data:
tags = validated_data.pop("tags")
instance = super(ContentNodeSerializer, self).create(validated_data)
if tags:
set_tags({instance.id: tags})
return instance
def update(self, instance, validated_data):
if "parent" in validated_data:
raise ValidationError(
{"parent": "This field should only be changed by a move operation"}
)
for field in self.dict_fields:
field_data = validated_data.pop(field, None)
if field_data is not None:
validated_data[field] = self.fields[field].update(
getattr(instance, field), field_data
)
if "tags" in validated_data:
tags = validated_data.pop("tags")
set_tags({instance.id: tags})
return super(ContentNodeSerializer, self).update(instance, validated_data)
def retrieve_thumbail_src(item):
""" Get either the encoding or the url to use as the <img> src attribute """
try:
if item.get("thumbnail_encoding"):
encoding = json.loads(item.get("thumbnail_encoding"))
if encoding:
return encoding.get("base64")
except ValueError:
pass
if (
item["thumbnail_checksum"] is not None
and item["thumbnail_extension"] is not None
):
return generate_storage_url(
"{}.{}".format(item["thumbnail_checksum"], item["thumbnail_extension"])
)
return None
def get_title(item):
# If it's the root, use the channel name (should be original channel name)
return item["title"] if item["parent_id"] else item["original_channel_name"]
class PrerequisitesUpdateHandler(ViewSet):
"""
Dummy viewset for handling create and delete changes for prerequisites
"""
def _get_values_from_change(self, change):
return {
"target_node_id": change["key"][0],
"prerequisite_id": change["key"][1],
}
def _execute_changes(self, change_type, data):
if data:
if change_type == CREATED:
PrerequisiteContentRelationship.objects.bulk_create(
[PrerequisiteContentRelationship(**d) for d in data]
)
elif change_type == DELETED:
PrerequisiteContentRelationship.objects.filter(
reduce(lambda x, y: x | y, map(lambda x: Q(**x), data))
).delete()
def _check_permissions(self, changes):
# Filter the passed in contentondes, on both side of the relationship
allowed_contentnodes = set(
ContentNode.filter_edit_queryset(
ContentNode.objects.all(), self.request.user
)
.filter(
id__in=list(map(lambda x: x["key"][0], changes))
+ list(map(lambda x: x["key"][1], changes))
)
.values_list("id", flat=True)
)
valid_changes = []
errors = []
for change in changes:
if (
change["key"][0] in allowed_contentnodes
and change["key"][1] in allowed_contentnodes
):
valid_changes.append(change)
else:
change.update({"errors": ValidationError("Not found").detail})
errors.append(change)
return valid_changes, errors
def _check_valid(self, changes):
# Don't allow prerequisites to be created across different trees
# or on themselves
valid_changes = []
errors = []
tree_id_lookup = {
c["id"]: c["tree_id"]
for c in ContentNode.objects.filter(
id__in=list(map(lambda x: x["key"][0], changes))
+ list(map(lambda x: x["key"][1], changes))
).values("id", "tree_id")
}
# Do a lookup on existing prerequisite relationships in the opposite direction to the ones we are trying to set
# Create a lookup string of prerequisite_id:target_node_id which we will compare against target_node_id:prerequisite_id
existing_relationships_lookup = {
"{}:{}".format(p["prerequisite_id"], p["target_node_id"])
for p in PrerequisiteContentRelationship.objects.filter(
# First part of the key is the target_node_id and prerequisite_id the second, so we reverse them here
reduce(
lambda x, y: x | y,
map(
lambda x: Q(
target_node_id=x["key"][1], prerequisite_id=x["key"][0]
),
changes,
),
)
).values("target_node_id", "prerequisite_id")
}
for change in changes:
if change["key"][0] == change["key"][1]:
change.update(
{
"errors": ValidationError(
"Prerequisite relationship cannot be self referential"
).detail
}
)
errors.append(change)
elif tree_id_lookup[change["key"][0]] != tree_id_lookup[change["key"][1]]:
change.update(
{
"errors": ValidationError(
"Prerequisite relationship cannot cross trees"
).detail
}
)
errors.append(change)
elif (
"{}:{}".format(change["key"][0], change["key"][1])
in existing_relationships_lookup
):
change.update(
{
"errors": ValidationError(
"Prerequisite relationship cannot be reciprocal"
).detail
}
)
errors.append(change)
else:
valid_changes.append(change)
return valid_changes, errors
def _handle_relationship_changes(self, changes):
change_types = set(map(lambda x: x["type"], changes))
if len(change_types) > 1:
raise TypeError("Mixed change types passed to change handler")
change_type = tuple(change_types)[0]
permissioned_changes, permission_errors = self._check_permissions(changes)
if change_type == CREATED and permissioned_changes:
# Only do validation on create operations and if there are any changes left to validate
valid_changes, validation_errors = self._check_valid(permissioned_changes)
errors = permission_errors + validation_errors
else:
# For delete operations, just check permissions, but let invalid
# relationships be deleted
valid_changes = permissioned_changes
errors = permission_errors
data = list(map(self._get_values_from_change, valid_changes))
# In Django 2.2 add ignore_conflicts to make this fool proof
try:
self._execute_changes(change_type, data)
except IntegrityError as e:
for change in valid_changes:
change.update({"errors": str(e)})
errors.append(change)
return errors or None, None
def create_from_changes(self, changes):
return self._handle_relationship_changes(changes)
def delete_from_changes(self, changes):
return self._handle_relationship_changes(changes)
def dict_if_none(obj, field_name=None):
return obj[field_name] if obj[field_name] else {}
# Apply mixin first to override ValuesViewset
class ContentNodeViewSet(BulkUpdateMixin, ChangeEventMixin, ValuesViewset):
queryset = ContentNode.objects.all()
serializer_class = ContentNodeSerializer
permission_classes = [IsAuthenticated]
filterset_class = ContentNodeFilter
values = (
"id",
"content_id",
"title",
"description",
"author",
"assessment_item_count",
"provider",
"aggregator",
"content_tags",
"role_visibility",
"kind__kind",
"language_id",
"license_id",
"license_description",
"copyright_holder",
"extra_fields",
"node_id",
"root_id",
"channel_id",
"original_source_node_id",
"original_channel_id",
"original_channel_name",
"original_node_id",
"original_parent_id",
"total_count",
"resource_count",
"error_count",
"has_updated_descendants",
"has_new_descendants",
"coach_count",
"thumbnail_checksum",
"thumbnail_extension",
"thumbnail_encoding",
"published",
"modified",
"has_children",
"parent_id",
"complete",
"changed",
"lft",
"grade_levels",
"resource_types",
"learning_activities",
"accessibility_labels",
"categories",
"learner_needs",
"suggested_duration",
)
field_map = {
"language": "language_id",
"license": "license_id",
"tags": "content_tags",
"kind": "kind__kind",
"thumbnail_src": retrieve_thumbail_src,
"title": get_title,
"parent": "parent_id",
"grade_levels": partial(dict_if_none, field_name="grade_levels"),
"resource_types": partial(dict_if_none, field_name="resource_types"),
"learning_activities": partial(dict_if_none, field_name="learning_activities"),
"accessibility_labels": partial(dict_if_none, field_name="accessibility_labels"),
"categories": partial(dict_if_none, field_name="categories"),
"learner_needs": partial(dict_if_none, field_name="learner_needs"),
}
def _annotate_channel_id(self, queryset):
return queryset.annotate(
channel_id=Subquery(channel_query.values_list("id", flat=True)[:1])
)
def get_queryset(self):
queryset = super(ContentNodeViewSet, self).get_queryset()
return self._annotate_channel_id(queryset)
def get_edit_queryset(self):
queryset = super(ContentNodeViewSet, self).get_edit_queryset()
return self._annotate_channel_id(queryset)
@action(detail=True, methods=["get"])
def requisites(self, request, pk=None):
if not pk:
raise Http404
# Here we are fetching the entire prerequisite relationship tree
# for the channel. It is possible that this could get very large,
# and cause performance issues, and it may not need to be loaded
# on every fetch.
# However, in order to detect potential cyclic prerequisite chains,
# we load the entire channel's prerequisite tree at once.
# Do a filter just on the tree_id of the target node, as relationships
# should not be cross channel, and are not meaningful if they are.
prereq_table_entries = PrerequisiteContentRelationship.objects.filter(
target_node__tree_id=Cast(
ContentNode.objects.filter(pk=pk).values_list("tree_id", flat=True)[:1],
output_field=DjangoIntegerField(),
)
).values("target_node_id", "prerequisite_id")
return Response(
list(
map(
lambda x: {
"target_node": x["target_node_id"],
"prerequisite": x["prerequisite_id"],
},
prereq_table_entries,
)
),
)
@action(detail=True, methods=["get"])
def size(self, request, pk=None):
if not pk:
raise Http404
task_info = None
node = self.get_object()
# currently we restrict triggering calculations through the API to the channel root node
if not node.is_root_node():
raise Http404
# we don't force the calculation, so if the channel is large, it returns the cached size
size, stale = calculate_resource_size(node=node, force=False)
if stale:
# When stale, that means the value is not up-to-date with modified files in the DB,
# and the channel is significantly large, so we'll queue an async task for calculation.
# We don't really need more than one queued async calculation task, so we use
# get_or_create_async_task to ensure a task is queued, as well as return info about it
task_args = dict(node_id=node.pk, channel_id=node.channel_id)
task_info = get_or_create_async_task(
"calculate-resource-size", self.request.user, **task_args
)
changes = []
if task_info is not None:
changes.append(self.create_task_event(task_info))
return Response({
"size": size,
"stale": stale,
"changes": changes
})
def annotate_queryset(self, queryset):
queryset = queryset.annotate(total_count=(F("rght") - F("lft") - 1) / 2)
descendant_resources = (
ContentNode.objects.filter(
tree_id=OuterRef("tree_id"),
lft__gt=OuterRef("lft"),
rght__lt=OuterRef("rght"),
)
.exclude(kind_id=content_kinds.TOPIC)
.values("id", "role_visibility", "changed")
.order_by()
)
all_descendants = (
ContentNode.objects.filter(
tree_id=OuterRef("tree_id"),
lft__gt=OuterRef("lft"),
rght__lt=OuterRef("rght"),
)
.values("id", "complete", "published")
.order_by()
)
# Get count of descendant nodes with errors
descendant_errors = all_descendants.filter(complete=False)
changed_descendants = descendant_resources.filter(changed=True)
thumbnails = File.objects.filter(
contentnode=OuterRef("id"), preset__thumbnail=True
)
original_channel_name = Coalesce(
Subquery(
Channel.objects.filter(pk=OuterRef("original_channel_id")).values(
"name"
)[:1]
),
Subquery(
Channel.objects.filter(main_tree__tree_id=OuterRef("tree_id")).values(
"name"
)[:1]
),
)
original_node = ContentNode.objects.filter(
node_id=OuterRef("original_source_node_id")
).filter(node_id=F("original_source_node_id"))
root_id = ContentNode.objects.filter(
tree_id=OuterRef("tree_id"), parent__isnull=True
).values_list("id", flat=True)[:1]
assessment_items = (
AssessmentItem.objects.filter(contentnode_id=OuterRef("id"), deleted=False)
.values_list("assessment_id", flat=True)
.distinct()
)
queryset = queryset.annotate(
resource_count=SQCount(descendant_resources, field="id"),
coach_count=SQCount(
descendant_resources.filter(role_visibility=roles.COACH), field="id",
),
assessment_item_count=SQCount(assessment_items, field="assessment_id"),
error_count=SQCount(descendant_errors, field="id"),
has_updated_descendants=Exists(
changed_descendants.filter(published=True).values("id")
),
has_new_descendants=Exists(
changed_descendants.filter(published=False).values("id")
),
thumbnail_checksum=Subquery(thumbnails.values("checksum")[:1]),
thumbnail_extension=Subquery(
thumbnails.values("file_format__extension")[:1]
),
original_channel_name=original_channel_name,
original_parent_id=Subquery(original_node.values("parent_id")[:1]),
has_children=Exists(
ContentNode.objects.filter(parent=OuterRef("id")).values("pk")
),
root_id=Subquery(root_id),
)
queryset = queryset.annotate(content_tags=NotNullMapArrayAgg("tags__tag_name"))
return queryset
def validate_targeting_args(self, target, position):
position = position or "last-child"
if target is None:
raise ValidationError("A target must be specified")
try:
target = self.get_edit_queryset().get(pk=target)
except ContentNode.DoesNotExist:
raise ValidationError("Target: {} does not exist".format(target))
except ValueError:
raise ValidationError("Invalid target specified: {}".format(target))
if position not in _valid_positions:
raise ValidationError(
"Invalid position specified, must be one of {}".format(
", ".join(_valid_positions)
)
)
return target, position
def move_from_changes(self, changes):
errors = []
changes_to_return = []
for move in changes:
# Move change will have key, must also have target property
# optionally can include the desired position.
move_error, move_change = self.move(
move["key"], target=move.get("target"), position=move.get("position")
)
if move_error:
move.update({"errors": [move_error]})
errors.append(move)
if move_change:
changes_to_return.append(move_change)
return errors, changes_to_return
def move(self, pk, target=None, position=None):
try:
contentnode = self.get_edit_queryset().get(pk=pk)
except ContentNode.DoesNotExist:
error = ValidationError("Specified node does not exist")
return str(error), None
try:
target, position = self.validate_targeting_args(target, position)
channel_id = target.channel_id
task_args = {
"user_id": self.request.user.id,
"channel_id": channel_id,
"node_id": contentnode.id,
"target_id": target.id,
"position": position,
}
task, task_info = create_async_task(
"move-nodes", self.request.user, **task_args
)
return (
None,
None,
)
except ValidationError as e:
return str(e), None
def copy_from_changes(self, changes):
errors = []
changes_to_return = []
for copy in changes:
# Copy change will have key, must also have other attributes, defined in `copy`
# Just pass as keyword arguments here to let copy do the validation
copy_errors, copy_changes = self.copy(copy["key"], **copy)
if copy_errors:
copy.update({"errors": copy_errors})
errors.append(copy)
if copy_changes:
changes_to_return.extend(copy_changes)
return errors, changes_to_return
def copy(
self,
pk,
from_key=None,
target=None,
position=None,
mods=None,
excluded_descendants=None,
**kwargs
):
try:
target, position = self.validate_targeting_args(target, position)
except ValidationError as e:
return str(e), None
try:
source = self.get_queryset().get(pk=from_key)
except ContentNode.DoesNotExist:
error = ValidationError("Copy source node does not exist")
return str(error), [generate_delete_event(pk, CONTENTNODE)]
# Affected channel for the copy is the target's channel
channel_id = target.channel_id
if ContentNode.objects.filter(pk=pk).exists():
error = ValidationError("Copy pk already exists")
return str(error), None
task_args = {
"user_id": self.request.user.id,
"channel_id": channel_id,
"source_id": source.id,
"target_id": target.id,
"pk": pk,
"mods": mods,
"excluded_descendants": excluded_descendants,
"position": position,
}
task, task_info = create_async_task(
"duplicate-nodes", self.request.user, **task_args
)
return (
None,
[generate_update_event(pk, CONTENTNODE, {TASK_ID: task_info.task_id})],
)
def delete_from_changes(self, changes):
errors = []
changes_to_return = []
queryset = self.get_edit_queryset().order_by()
for change in changes:
try:
instance = queryset.get(**dict(self.values_from_key(change["key"])))
task_args = {
"user_id": self.request.user.id,
"channel_id": instance.channel_id,
"node_id": instance.id,
}
task, task_info = create_async_task(
"delete-node", self.request.user, **task_args
)
except ContentNode.DoesNotExist:
# If the object already doesn't exist, as far as the user is concerned
# job done!
pass
except Exception as e:
log_sync_exception(e)
change["errors"] = [str(e)]
errors.append(change)
return errors, changes_to_return
| 36.488557
| 127
| 0.6194
|
eebc522f722f37ea19f0e56f143d03a21d127824
| 449
|
py
|
Python
|
recursive_algorithms/Tower of Hanoi/Python/tower_of_hanoi.py
|
avi-pal/al-go-rithms
|
5167a20f1db7b366ff19f2962c1746a02e4f5067
|
[
"CC0-1.0"
] | 1,253
|
2017-06-06T07:19:25.000Z
|
2022-03-30T17:07:58.000Z
|
recursive_algorithms/Tower of Hanoi/Python/tower_of_hanoi.py
|
rishabh99-rc/al-go-rithms
|
4df20d7ef7598fda4bc89101f9a99aac94cdd794
|
[
"CC0-1.0"
] | 554
|
2017-09-29T18:56:01.000Z
|
2022-02-21T15:48:13.000Z
|
recursive_algorithms/Tower of Hanoi/Python/tower_of_hanoi.py
|
rishabh99-rc/al-go-rithms
|
4df20d7ef7598fda4bc89101f9a99aac94cdd794
|
[
"CC0-1.0"
] | 2,226
|
2017-09-29T19:59:59.000Z
|
2022-03-25T08:59:55.000Z
|
# Recursive Python function to solve tower of hanoi
def TowerOfHanoi(n , from_rod, to_rod, aux_rod):
if n == 1:
print("Move disk 1 from rod",from_rod,"to rod",to_rod)
return
TowerOfHanoi(n-1, from_rod, aux_rod, to_rod)
print("Move disk",n,"from rod",from_rod,"to rod",to_rod)
TowerOfHanoi(n-1, aux_rod, to_rod, from_rod)
# Driver code
n = 4
TowerOfHanoi(n, 'A', 'C', 'B')
# A, C, B are the name of rods
# Contributed By Harshit Agrawal
| 26.411765
| 57
| 0.697105
|
d39d713ace8cee7c4098547ecc240d4a3d346cb2
| 17,220
|
py
|
Python
|
automonkey/tcloud_update.py
|
wangshaoce/monkey_tcloud
|
124f2a06caad2aeabdd1d8254d3fa2765c264d8e
|
[
"MIT"
] | null | null | null |
automonkey/tcloud_update.py
|
wangshaoce/monkey_tcloud
|
124f2a06caad2aeabdd1d8254d3fa2765c264d8e
|
[
"MIT"
] | null | null | null |
automonkey/tcloud_update.py
|
wangshaoce/monkey_tcloud
|
124f2a06caad2aeabdd1d8254d3fa2765c264d8e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import logging
import traceback
from datetime import datetime
import requests
from .config import DefaultConfig
logger = logging.getLogger(__name__)
class TCloud(object):
def __init__(self, task_id, device_id, monkey_id, tcloud_url, process=0):
self.task_id = task_id
self.monkey_id = monkey_id
self.device_id = device_id
self.tcloud_url = tcloud_url if tcloud_url is not None else DefaultConfig.TCLOUD_URL
self.anr = 0
self.crash = 0
self.process = process
# monkey update
def on_get_app_version(self, version):
if version is not None:
self.update_monkey(app_version=version)
def on_download_app(self, status):
if status:
download_app_status = 1
self.update_monkey(download_app_status=download_app_status)
else:
download_app_status = 2
end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.update_monkey(download_app_status=download_app_status)
def on_monkey_end(self, ):
end_time = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
self.update_monkey(process=100, end_time=end_time)
def on_monkey_begin(self, jenkins_url):
self.update_monkey(jenkins_url=jenkins_url)
# task update
def on_task_begin(self):
process = 0
begin_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.update_task(process=process, begin_time=begin_time)
def on_task_end(self, process, activity_count, activity_tested_count, activity_all, activity_tested, anr_count,
crash_count, crash_rate, exception_count, exception_run_time, run_time):
end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.update_task(process=process, end_time=end_time, activity_count=activity_count, anr_count=anr_count,
activity_tested_count=activity_tested_count, activity_all=activity_all,
crash_count=crash_count,
activity_tested=activity_tested, crash_rate=crash_rate, exception_count=exception_count,
exception_run_time=exception_run_time, run_time=run_time)
def on_running_status(self, status, error_msg):
self.update_task(running_error_reason=error_msg, running_status=status)
def on_device_connect(self, status):
if status:
device_connect_status = 1
self.update_task(device_connect_status=device_connect_status)
else:
device_connect_status = 2
end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.update_task(process=100, activity_count=0, activity_tested_count=0, activity_all='',
activity_tested='', anr_count=0, crash_count=0, crash_rate=0, exception_count=0,
exception_run_time=0, setup_install_app_status=0,
setup_uninstall_app_status=0, start_app_status=0, login_app_status=0,
teardown_uninstall_app_status=0, end_time=end_time, run_time=0,
device_connect_status=device_connect_status, screen_lock_status=0)
def on_screen_lock(self, status):
if status:
screen_lock_status = 1
self.update_task(screen_lock_status=screen_lock_status)
else:
screen_lock_status = 2
end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.update_task(process=100, activity_count=0, activity_tested_count=0, activity_all='',
activity_tested='', anr_count=0, crash_count=0, crash_rate=0, exception_count=0,
exception_run_time=0, setup_install_app_status=0,
setup_uninstall_app_status=0, start_app_status=0, login_app_status=0,
teardown_uninstall_app_status=0, end_time=end_time, run_time=0,
screen_lock_status=screen_lock_status)
def on_setup_uninstall_app(self, status):
if status:
setup_uninstall_app_status = 1
self.update_task(setup_uninstall_app_status=setup_uninstall_app_status)
else:
setup_uninstall_app_status = 2
self.update_task(process=100, activity_count=0, activity_tested_count=0, activity_all='',
activity_tested='', anr_count=0, crash_count=0, crash_rate=0, exception_count=0,
exception_run_time=0, setup_uninstall_app_status=setup_uninstall_app_status,
start_app_status=0, login_app_status=0, teardown_uninstall_app_status=0, run_time=0)
def on_setup_install_app(self, status):
if status:
setup_install_app_status = 1
self.update_task(setup_install_app_status=setup_install_app_status)
else:
setup_install_app_status = 2
end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.update_task(process=100, activity_count=0, activity_tested_count=0, activity_all='',
activity_tested='', anr_count=0, crash_count=0, crash_rate=0, exception_count=0,
exception_run_time=0, setup_install_app_status=setup_install_app_status,
start_app_status=0, login_app_status=0, teardown_uninstall_app_status=0,
end_time=end_time, run_time=0)
def on_start_app(self, status):
if status:
start_app_status = 1
self.update_task(start_app_status=start_app_status)
else:
start_app_status = 2
end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.update_task(process=100, activity_count=0, activity_tested_count=0, activity_all='',
activity_tested='', anr_count=0, crash_count=0, crash_rate=0, exception_count=0,
exception_run_time=0, start_app_status=start_app_status, login_app_status=0,
teardown_uninstall_app_status=0, end_time=end_time, run_time=0)
def on_login_app(self, status):
if status:
login_app_status = 1
self.update_task(login_app_status=login_app_status)
else:
login_app_status = 2
end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.update_task(process=100, activity_count=0, activity_tested_count=0, activity_all='',
activity_tested='', anr_count=0, crash_count=0, crash_rate=0, exception_count=0,
exception_run_time=0, login_app_status=login_app_status, teardown_uninstall_app_status=0,
end_time=end_time, run_time=0)
def on_teardown_uninstall_app(self, status):
if status:
teardown_uninstall_app_status = 1
self.update_task(teardown_uninstall_app_status=teardown_uninstall_app_status)
else:
teardown_uninstall_app_status = 2
end_time = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
self.update_task(process=100, activity_count=0, activity_tested_count=0, activity_all='',
activity_tested='', anr_count=0, crash_count=0, crash_rate=0, exception_count=0,
exception_run_time=0, teardown_uninstall_app_status=teardown_uninstall_app_status,
run_time=0)
# on user cancel
def on_user_cancel_task(self):
self.update_task(current_stage=1000)
# on user cancel success
def on_user_cancel_stask_success(self):
self.update_task(current_stage=1001)
# on device_not connect on running
def on_device_disconnect_on_running(self):
self.update_task(current_stage=1002)
# 当 anr crash process 发生改变时 上传
def on_anr_crash_changed(self, process, anr, crash):
if self.anr != anr or self.crash != crash or self.process != process:
self.anr = anr
self.crash = crash
self.process = process
self.update_task(process=process, anr_count=anr, crash_count=crash)
# upload errorlog
def on_errorlog_upload(self, logs):
for key in logs.keys():
log = logs.get(key)
self.upload_log(int(self.monkey_id), int(self.task_id), log.get('error_type'),
json.dumps(log.get('error_message')), log.get('error_count'))
# upload report
def on_report_upload(self, report_url, report_type):
self.upload_report(int(self.monkey_id), int(self.task_id), report_url, report_type)
def update_monkey(self, end_time=None, process=None, jenkins_url=None, status=None, app_version=None,
begin_time=None, report_url=None, run_time=None, actual_run_time=None, download_app_status=None):
try:
logger.info('({}) update monkey'.format(self.device_id))
request_data_template = {
"begin_time": begin_time,
"process": process,
"jenkins_url": jenkins_url,
"status": status,
"app_version": app_version,
"report_url": report_url,
"end_time": end_time,
"run_time": run_time,
"actual_run_time": actual_run_time,
"download_app_status": download_app_status
}
request_data = {}
for key in request_data_template.keys():
value = request_data_template.get(key)
if value is not None:
request_data[key] = value
logger.info(request_data)
request_url = '{}/v1/monkey/{}'.format(self.tcloud_url, self.monkey_id)
response = requests.request(method='POST', url=request_url, json=request_data)
if response.ok:
logger.info(response.text)
logger.info('({}) update monkey <{}> success'.format(self.device_id, self.monkey_id))
return True
return False
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
return False
def update_task(self, process=None, begin_time=None, activity_count=None, activity_tested_count=None,
activity_all=None, activity_tested=None, anr_count=None, crash_count=None, crash_rate=None,
exception_count=None, exception_run_time=None,
setup_install_app_status=None, setup_uninstall_app_status=None, start_app_status=None,
login_app_status=None, teardown_uninstall_app_status=None, end_time=None, run_time=None,
device_connect_status=None, screen_lock_status=None, running_status=None,
running_error_reason=None, current_stage=None):
try:
logger.info('({}) update task'.format(self.device_id))
request_data_template = {
"begin_time": begin_time,
"process": process,
"activity_count": activity_count,
"activity_tested_count": activity_tested_count,
"activity_all": activity_all,
"activity_tested": activity_tested,
"anr_count": anr_count,
"crash_count": crash_count,
"crash_rate": crash_rate,
"exception_count": exception_count,
"exception_run_time": exception_run_time,
"device_connect_status": device_connect_status,
"screen_lock_status": screen_lock_status,
"setup_install_app_status": setup_install_app_status,
"setup_uninstall_app_status": setup_uninstall_app_status,
"start_app_status": start_app_status,
"login_app_status": login_app_status,
"teardown_uninstall_app_status": teardown_uninstall_app_status,
"end_time": end_time,
"run_time": run_time,
"running_status": running_status,
"running_error_reason": running_error_reason,
"current_stage": current_stage,
}
request_data = {}
for key in request_data_template.keys():
value = request_data_template.get(key)
if value is not None:
request_data[key] = value
logger.info(request_data)
request_url = '{}/v1/monkey/devicestatus/{}'.format(self.tcloud_url, self.task_id)
response = requests.request(method='POST', url=request_url, json=request_data)
if response.ok:
logger.info(response.text)
logger.info('({}) update task <{}> success'.format(self.device_id, self.task_id))
return True
return False
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
return False
def upload_log(self, monkey_id=None, task_id=None, error_type=None, error_message=None, error_count=None):
try:
logger.info('({}) upload log'.format(self.device_id))
request_data_template = {
'monkey_id': monkey_id,
'task_id': task_id,
'error_type': error_type,
'error_message': error_message,
'error_count': error_count
}
request_data = {}
for key in request_data_template.keys():
value = request_data_template.get(key)
if value is not None:
request_data[key] = value
logger.info(request_data)
request_url = '{}/v1/monkey/errorlog'.format(self.tcloud_url)
response = requests.request(method='POST', url=request_url, json=request_data)
if response.ok:
logger.info(response.text)
logger.info('({}) upload log success'.format(self.device_id))
return True
return False
except Exception as e:
logger.error(e)
traceback.print_exc()
return False
def upload_report(self, monkey_id=None, task_id=None, report_url=None, report_type=None):
try:
logger.info('({}) upload report'.format(self.device_id))
request_data_template = {
'monkey_id': monkey_id,
'task_id': task_id,
'report_url': report_url,
'report_type': report_type
}
request_data = {}
for key in request_data_template.keys():
value = request_data_template.get(key)
if value is not None:
request_data[key] = value
logger.info(request_data)
request_url = '{}/v1/monkey/report'.format(self.tcloud_url)
response = requests.request(method='POST', url=request_url, json=request_data)
if response.ok:
logger.info(response.text)
logger.info('({}) upload report success'.format(self.device_id))
return True
return False
except Exception as e:
logger.error(e)
traceback.print_exc()
return False
def get_monkey_cancel_status(self, task_id):
try:
logger.info('({}) get monkey cancel status {}'.format(self.device_id, task_id))
request_url = '{}/v1/monkey/cancel?task_id={}'.format(self.tcloud_url, task_id)
response = requests.request(method='GET', url=request_url)
if response.ok:
logger.info(response.json())
return response.json().get('data').get('cancel_status')
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
def using_monkey_device(self, serial):
try:
logger.info('using monkey device now!')
request_data = {
'serial': serial
}
request_url = '{}/v1/monkey/device/using'.format(self.tcloud_url)
response = requests.request(method='POST', url=request_url, json=request_data)
if response.ok:
logger.info(response.json())
return True
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
def release_monkey_device(self, serial):
try:
logger.info('release monkey device now!')
request_data = {
'serial': serial
}
request_url = '{}/v1/monkey/device/release'.format(self.tcloud_url)
response = requests.request(method='POST', url=request_url, json=request_data)
if response.ok:
logger.info(response.json())
return True
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
| 43.705584
| 119
| 0.602207
|
e0c0e13c95aec028b9ef0d984171f6799fdcf2b7
| 466
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/layout/radialaxis/_visible.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/layout/radialaxis/_visible.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/layout/radialaxis/_visible.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="visible", parent_name="layout.radialaxis", **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 31.066667
| 78
| 0.639485
|
6dd552b7d50c6adea3820073ded82f03a55d964c
| 2,210
|
py
|
Python
|
mne/tests/test_morph_map.py
|
stevemats/mne-python
|
47051833f21bb372d60afc3adbf4305648ac7f69
|
[
"BSD-3-Clause"
] | 1
|
2022-02-19T08:13:49.000Z
|
2022-02-19T08:13:49.000Z
|
mne/tests/test_morph_map.py
|
LiFeng-SECUC/mne-python
|
732bb1f994e64e41a8e95dcc10dc98c22cac95c0
|
[
"BSD-3-Clause"
] | 2
|
2018-10-29T09:09:34.000Z
|
2019-08-02T16:24:09.000Z
|
mne/tests/test_morph_map.py
|
LiFeng-SECUC/mne-python
|
732bb1f994e64e41a8e95dcc10dc98c22cac95c0
|
[
"BSD-3-Clause"
] | 1
|
2021-07-22T17:57:33.000Z
|
2021-07-22T17:57:33.000Z
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import os
import os.path as op
from shutil import copyfile
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from mne.datasets import testing
from mne.utils import catch_logging, _record_warnings
from mne import read_morph_map
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_morph_maps(tmp_path):
"""Test reading and creating morph maps."""
# make a new fake subjects_dir
tempdir = str(tmp_path)
for subject in ('sample', 'sample_ds', 'fsaverage_ds'):
os.mkdir(op.join(tempdir, subject))
os.mkdir(op.join(tempdir, subject, 'surf'))
regs = ('reg', 'left_right') if subject == 'fsaverage_ds' else ('reg',)
for hemi in ['lh', 'rh']:
for reg in regs:
args = [subject, 'surf', hemi + '.sphere.' + reg]
copyfile(op.join(subjects_dir, *args),
op.join(tempdir, *args))
for subject_from, subject_to, xhemi in (
('fsaverage_ds', 'sample_ds', False),
('fsaverage_ds', 'fsaverage_ds', True)):
# trigger the creation of morph-maps dir and create the map
with catch_logging() as log:
mmap = read_morph_map(subject_from, subject_to, tempdir,
xhemi=xhemi, verbose=True)
log = log.getvalue()
assert 'does not exist' in log
assert 'Creating' in log
mmap2 = read_morph_map(subject_from, subject_to, subjects_dir,
xhemi=xhemi)
assert len(mmap) == len(mmap2)
for m1, m2 in zip(mmap, mmap2):
# deal with sparse matrix stuff
diff = (m1 - m2).data
assert_allclose(diff, np.zeros_like(diff), atol=1e-3, rtol=0)
# This will also trigger creation, but it's trivial
with _record_warnings():
mmap = read_morph_map('sample', 'sample', subjects_dir=tempdir)
for mm in mmap:
assert (mm - sparse.eye(mm.shape[0], mm.shape[0])).sum() == 0
| 36.229508
| 79
| 0.628959
|
8eb1b1b80043e61fe090fe4fdc9c1ce7fdbbecad
| 408
|
py
|
Python
|
rental/migrations/0026_auto_20191206_1217.py
|
rjNemo/villafleurie
|
87e4674d5d7f26475b8debe300cbf4263dc18ebd
|
[
"MIT"
] | null | null | null |
rental/migrations/0026_auto_20191206_1217.py
|
rjNemo/villafleurie
|
87e4674d5d7f26475b8debe300cbf4263dc18ebd
|
[
"MIT"
] | 3
|
2020-03-06T18:45:44.000Z
|
2020-07-31T11:47:52.000Z
|
rental/migrations/0026_auto_20191206_1217.py
|
rjNemo/villafleurie
|
87e4674d5d7f26475b8debe300cbf4263dc18ebd
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0 on 2019-12-06 12:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rental', '0025_auto_20191206_1215'),
]
operations = [
migrations.AlterField(
model_name='place',
name='calendar',
field=models.CharField(blank=True, max_length=350, null=True),
),
]
| 21.473684
| 74
| 0.607843
|
602c209f2e54793eb3d71c9cd40d0b11faf6a241
| 737
|
py
|
Python
|
project/machine_learning_model/model_predict.py
|
wanwanjiajia/adversarial_attacks_defense
|
d22dbfa7de5c6af901dbd2ed26e7eb079bd0c215
|
[
"BSD-2-Clause"
] | null | null | null |
project/machine_learning_model/model_predict.py
|
wanwanjiajia/adversarial_attacks_defense
|
d22dbfa7de5c6af901dbd2ed26e7eb079bd0c215
|
[
"BSD-2-Clause"
] | null | null | null |
project/machine_learning_model/model_predict.py
|
wanwanjiajia/adversarial_attacks_defense
|
d22dbfa7de5c6af901dbd2ed26e7eb079bd0c215
|
[
"BSD-2-Clause"
] | null | null | null |
# _*_coding:utf-8 _*_
# @Time :2020/01/03 12:20
# @Author : Wanjia Zheng
# @File :model_predict.py
# @Software :PyCharm
from sklearn.model_selection import cross_validate
def model_predict(model, X_train_reduced, Y_train):
scores = cross_validate(model, X_train_reduced, Y_train.ravel(), cv=10,
scoring=['precision', 'f1', 'accuracy', 'recall'], return_train_score=False)
auc = str(round(100 * scores['test_accuracy'].mean(), 2)) + "%"
recall = str(round(100 * scores['test_recall'].mean(), 2)) + "%"
f1 = str(round(100 * scores['test_f1'].mean(), 2)) + "%"
precision = str(round(100 * scores['test_precision'].mean(), 2)) + "%"
return auc, recall, f1, precision
| 32.043478
| 104
| 0.624152
|
d2da4cc3de17b7d9d98ae81089c7de186f7d3236
| 5,762
|
py
|
Python
|
tests/usecases/list_permissions_test.py
|
aneeq009/merou
|
7a87b43aaf64244932fa460842132a2d9329e704
|
[
"Apache-2.0"
] | 58
|
2017-05-26T06:46:24.000Z
|
2022-03-25T20:55:51.000Z
|
tests/usecases/list_permissions_test.py
|
aneeq009/merou
|
7a87b43aaf64244932fa460842132a2d9329e704
|
[
"Apache-2.0"
] | 74
|
2017-06-16T17:48:37.000Z
|
2022-03-28T23:09:54.000Z
|
tests/usecases/list_permissions_test.py
|
aneeq009/merou
|
7a87b43aaf64244932fa460842132a2d9329e704
|
[
"Apache-2.0"
] | 43
|
2017-05-20T22:11:51.000Z
|
2022-03-25T00:24:56.000Z
|
from dataclasses import replace
from datetime import datetime
from time import time
from typing import TYPE_CHECKING
from grouper.constants import PERMISSION_CREATE
from grouper.entities.pagination import ListPermissionsSortKey, PaginatedList, Pagination
from grouper.entities.permission import Permission
from grouper.usecases.list_permissions import ListPermissionsUI
if TYPE_CHECKING:
from tests.setup import SetupTest
from typing import List
class MockUI(ListPermissionsUI):
def __init__(self, sort: bool = False) -> None:
self.sort = sort
def listed_permissions(self, permissions: PaginatedList[Permission], can_create: bool) -> None:
if self.sort:
self.permissions = replace(permissions, values=sorted(permissions.values))
else:
self.permissions = permissions
self.can_create = can_create
def create_test_data(setup):
# type: (SetupTest) -> List[Permission]
"""Sets up a very basic test graph and returns the permission objects.
Be careful not to include milliseconds in the creation timestamps since this causes different
behavior on SQLite (which preserves them) and MySQL (which drops them).
"""
early_date = datetime.utcfromtimestamp(1)
now_minus_one_second = datetime.utcfromtimestamp(int(time() - 1))
now = datetime.utcfromtimestamp(int(time()))
permissions = [
Permission(
name="first-permission",
description="first",
created_on=now_minus_one_second,
audited=False,
enabled=True,
),
Permission(
name="audited-permission", description="", created_on=now, audited=True, enabled=True
),
Permission(
name="early-permission",
description="is early",
created_on=early_date,
audited=False,
enabled=True,
),
]
with setup.transaction():
for permission in permissions:
setup.create_permission(
name=permission.name,
description=permission.description,
created_on=permission.created_on,
audited=permission.audited,
)
setup.create_permission("disabled", enabled=False)
setup.create_user("gary@a.co")
return permissions
def test_simple_list_permissions(setup):
# type: (SetupTest) -> None
permissions = create_test_data(setup)
mock_ui = MockUI(sort=True)
usecase = setup.usecase_factory.create_list_permissions_usecase(mock_ui)
usecase.simple_list_permissions()
assert not mock_ui.can_create
expected = PaginatedList(values=sorted(permissions), total=3, offset=0, limit=None)
assert mock_ui.permissions == expected
def test_list_permissions_pagination(setup):
# type: (SetupTest) -> None
permissions = create_test_data(setup)
mock_ui = MockUI()
usecase = setup.usecase_factory.create_list_permissions_usecase(mock_ui)
# Sorted by name, limited to 2.
pagination = Pagination(
sort_key=ListPermissionsSortKey.NAME, reverse_sort=False, offset=0, limit=2
)
usecase.list_permissions("gary@a.co", pagination, audited_only=False)
expected = PaginatedList(values=sorted(permissions)[:2], total=3, offset=0, limit=2)
assert mock_ui.permissions == expected
# Sorted by date, using offset, limit longer than remaining items.
pagination = Pagination(
sort_key=ListPermissionsSortKey.DATE, reverse_sort=False, offset=2, limit=10
)
usecase.list_permissions("gary@a.co", pagination, audited_only=False)
expected_values = sorted(permissions, key=lambda p: p.created_on)[2:]
expected = PaginatedList(values=expected_values, total=3, offset=2, limit=10)
assert mock_ui.permissions == expected
# Sorted by name, reversed, limit of one 1 and offset of 1.
pagination = Pagination(
sort_key=ListPermissionsSortKey.NAME, reverse_sort=True, offset=1, limit=1
)
usecase.list_permissions("gary@a.co", pagination, audited_only=False)
expected_values = sorted(permissions, reverse=True)[1:2]
expected = PaginatedList(values=expected_values, total=3, offset=1, limit=1)
assert mock_ui.permissions == expected
def test_list_permissions_audited_only(setup):
# type: (SetupTest) -> None
permissions = create_test_data(setup)
mock_ui = MockUI()
usecase = setup.usecase_factory.create_list_permissions_usecase(mock_ui)
pagination = Pagination(
sort_key=ListPermissionsSortKey.NAME, reverse_sort=False, offset=0, limit=None
)
usecase.list_permissions("gary@a.co", pagination, audited_only=True)
expected_values = [p for p in permissions if p.name == "audited-permission"]
expected = PaginatedList(values=expected_values, total=1, offset=0, limit=None)
assert mock_ui.permissions == expected
def test_list_permissions_can_create(setup):
# type: (SetupTest) -> None
setup.create_permission(PERMISSION_CREATE)
create_test_data(setup)
mock_ui = MockUI()
usecase = setup.usecase_factory.create_list_permissions_usecase(mock_ui)
# User has no permissions.
pagination = Pagination(
sort_key=ListPermissionsSortKey.NAME, reverse_sort=False, offset=0, limit=None
)
usecase.list_permissions("gary@a.co", pagination, audited_only=False)
assert not mock_ui.can_create
# If the user is added to a group with the right permission, can_create should be true.
with setup.transaction():
setup.add_user_to_group("gary@a.co", "creators")
setup.grant_permission_to_group(PERMISSION_CREATE, "*", "creators")
usecase.list_permissions("gary@a.co", pagination, audited_only=False)
assert mock_ui.can_create
| 38.932432
| 99
| 0.708608
|
078b16eab822167b11ee1375c530f09a25c35580
| 986
|
py
|
Python
|
gpypi/exc.py
|
tastuteche/g-pypi-py3
|
ea0fceb573415193993f2a03bd06a9fffa755892
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2015-03-02T02:20:43.000Z
|
2015-03-02T02:20:43.000Z
|
gpypi/exc.py
|
iElectric/g-pypi
|
48b1d83346fecbba4c3eb3a7f80d29ae9ca41ef8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
gpypi/exc.py
|
iElectric/g-pypi
|
48b1d83346fecbba4c3eb3a7f80d29ae9ca41ef8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Exceptions module
=============================
"""
class GPyPiException(Exception):
"""Core exception class, all exception inherit from this class."""
class GPyPiInvalidAtom(GPyPiException):
"""Raised when determining Portage Atom did not succeed."""
class GPyPiNoSetupFile(GPyPiException):
"""Raised if no setup.py was found."""
class GPyPiNoDistribution(GPyPiException):
"""Raised if unpacked directory could not be found."""
class GPyPiCouldNotUnpackEbuild(GPyPiException):
"""Raised if unpacking failed."""
class GPyPiInvalidParameter(GPyPiException):
"""Raised CLI parameter is not valid."""
class GPyPiCouldNotCreateEbuildPath(GPyPiException):
"""Raised when directory for an ebuild could not be created."""
class GPyPiOverlayDoesNotExist(GPyPiException):
""""""
class GPyPiConfigurationError(GPyPiException):
""""""
class GPyPiValidationError(GPyPiException):
""""""
| 20.541667
| 70
| 0.700811
|
b56d2092b9905ad4e741a2b1c1355d3a376e0be9
| 1,763
|
py
|
Python
|
roles/openshift_health_checker/openshift_checks/etcd_traffic.py
|
Roscoe198/Ansible-Openshift
|
b874bef456852ef082a27dfec4f2d7d466702370
|
[
"Apache-2.0"
] | 164
|
2015-07-29T17:35:04.000Z
|
2021-12-16T16:38:04.000Z
|
roles/openshift_health_checker/openshift_checks/etcd_traffic.py
|
Roscoe198/Ansible-Openshift
|
b874bef456852ef082a27dfec4f2d7d466702370
|
[
"Apache-2.0"
] | 3,634
|
2015-06-09T13:49:15.000Z
|
2022-03-23T20:55:44.000Z
|
roles/openshift_health_checker/openshift_checks/etcd_traffic.py
|
Roscoe198/Ansible-Openshift
|
b874bef456852ef082a27dfec4f2d7d466702370
|
[
"Apache-2.0"
] | 250
|
2015-06-08T19:53:11.000Z
|
2022-03-01T04:51:23.000Z
|
"""Check that scans journalctl for messages caused as a symptom of increased etcd traffic."""
from openshift_checks import OpenShiftCheck
class EtcdTraffic(OpenShiftCheck):
"""Check if host is being affected by an increase in etcd traffic."""
name = "etcd_traffic"
tags = ["health", "etcd"]
def is_active(self):
"""Skip hosts that do not have etcd in their group names."""
group_names = self.get_var("group_names", default=[])
valid_group_names = "oo_etcd_to_config" in group_names
version = self.get_major_minor_version(self.get_var("openshift_image_tag"))
valid_version = version in ((3, 4), (3, 5))
return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version
def run(self):
openshift_is_containerized = self.get_var("openshift_is_containerized")
unit = "etcd_container" if openshift_is_containerized else "etcd"
log_matchers = [{
"start_regexp": r"Starting Etcd Server",
"regexp": r"etcd: sync duration of [^,]+, expected less than 1s",
"unit": unit
}]
match = self.execute_module("search_journalctl", {"log_matchers": log_matchers})
if match.get("matched"):
msg = ("Higher than normal etcd traffic detected.\n"
"OpenShift 3.4 introduced an increase in etcd traffic.\n"
"Upgrading to OpenShift 3.6 is recommended in order to fix this issue.\n"
"Please refer to https://access.redhat.com/solutions/2916381 for more information.")
return {"failed": True, "msg": msg}
if match.get("failed"):
return {"failed": True, "msg": "\n".join(match.get("errors"))}
return {}
| 39.177778
| 103
| 0.636982
|
562a70c5b0d93d7ebc36844ed5d177bbfa375c54
| 292
|
py
|
Python
|
glosel/glosel/doctype/scheme_management_item/scheme_management_item.py
|
patilsangram/Glosel
|
12b00f7821e63056ef0e4e34e64f2906f927e273
|
[
"MIT"
] | null | null | null |
glosel/glosel/doctype/scheme_management_item/scheme_management_item.py
|
patilsangram/Glosel
|
12b00f7821e63056ef0e4e34e64f2906f927e273
|
[
"MIT"
] | null | null | null |
glosel/glosel/doctype/scheme_management_item/scheme_management_item.py
|
patilsangram/Glosel
|
12b00f7821e63056ef0e4e34e64f2906f927e273
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, New Indictrans Technologies PVT LTD and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SchemeManagementItem(Document):
pass
| 26.545455
| 74
| 0.794521
|
5582b7d97200517901b0a025b0d58fcb97a3b3bc
| 192
|
py
|
Python
|
fastemplate/objects/user.py
|
htbrandao/fastemplate
|
239784538e1dd7ea8a6d5105cb9cb27b1d7f53cb
|
[
"MIT"
] | 8
|
2021-07-28T13:18:02.000Z
|
2022-02-05T23:10:02.000Z
|
fastemplate/objects/user.py
|
htbrandao/fastemplate
|
239784538e1dd7ea8a6d5105cb9cb27b1d7f53cb
|
[
"MIT"
] | null | null | null |
fastemplate/objects/user.py
|
htbrandao/fastemplate
|
239784538e1dd7ea8a6d5105cb9cb27b1d7f53cb
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
@dataclass
class User:
"""
Schema to represent a user when dealing with authentication.
"""
username: str
full_name: str
owner: bool
| 17.454545
| 64
| 0.682292
|
11e601abe0317adc8258604c8f9c2f89f9f2afb8
| 1,170
|
py
|
Python
|
sympy/utilities/tests/test_matchpy_connector.py
|
Nisarg-Chaudhari/sympy
|
c9dbb014993f6248be0a4a3d514545cbf7d4c1c1
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/utilities/tests/test_matchpy_connector.py
|
Nisarg-Chaudhari/sympy
|
c9dbb014993f6248be0a4a3d514545cbf7d4c1c1
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/utilities/tests/test_matchpy_connector.py
|
Nisarg-Chaudhari/sympy
|
c9dbb014993f6248be0a4a3d514545cbf7d4c1c1
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy import symbols
from sympy.external import import_module
from sympy.utilities.matchpy_connector import WildDot, WildPlus, WildStar
matchpy = import_module("matchpy")
x, y, z = symbols("x y z")
def _get_first_match(expr, pattern):
from matchpy import ManyToOneMatcher, Pattern
matcher = ManyToOneMatcher()
matcher.add(Pattern(pattern))
return next(iter(matcher.match(expr)))
def test_matchpy_connector():
if matchpy is None:
return
from multiset import Multiset
from matchpy import Pattern, Substitution
w_ = WildDot("w_")
w__ = WildPlus("w__")
w___ = WildStar("w___")
expr = x + y
pattern = x + w_
p, subst = _get_first_match(expr, pattern)
assert p == Pattern(pattern)
assert subst == Substitution({'w_': y})
expr = x + y + z
pattern = x + w__
p, subst = _get_first_match(expr, pattern)
assert p == Pattern(pattern)
assert subst == Substitution({'w__': Multiset([y, z])})
expr = x + y + z
pattern = x + y + z + w___
p, subst = _get_first_match(expr, pattern)
assert p == Pattern(pattern)
assert subst == Substitution({'w___': Multiset()})
| 25.434783
| 73
| 0.659829
|
23285da38c07efcc690eeed03f9fdff7659cd463
| 622
|
py
|
Python
|
var/spack/repos/builtin/packages/py-rdflib-jsonld/package.py
|
MatMaul/spack
|
46c56c163cd0b437c96492b0fa1f3d4bbc4fb492
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-rdflib-jsonld/package.py
|
MatMaul/spack
|
46c56c163cd0b437c96492b0fa1f3d4bbc4fb492
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-rdflib-jsonld/package.py
|
MatMaul/spack
|
46c56c163cd0b437c96492b0fa1f3d4bbc4fb492
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyRdflibJsonld(PythonPackage):
"""RDFLib plugin providing JSON-LD parsing and serialization."""
homepage = "https://github.com/RDFLib/rdflib-jsonld"
url = "https://pypi.io/packages/source/r/rdflib-jsonld/rdflib-jsonld-0.4.0.tar.gz"
version('0.4.0', '69097f57d302791a2959c07e110c47cf')
depends_on('py-setuptools', type='build')
depends_on('py-rdflib', type='run')
| 31.1
| 91
| 0.723473
|
d12f92fa7d7f5287242a896c1fa9406c7079ad3b
| 588
|
py
|
Python
|
deftools/printerator.py
|
tobias-fyi/deftools
|
3389ff2cc02fb36732d8f0bff71267cc5a6fd6ad
|
[
"MIT"
] | 3
|
2020-02-06T16:35:11.000Z
|
2020-02-29T09:31:55.000Z
|
deftools/printerator.py
|
tobias-fyi/deftools
|
3389ff2cc02fb36732d8f0bff71267cc5a6fd6ad
|
[
"MIT"
] | 82
|
2020-01-29T23:48:32.000Z
|
2021-09-08T02:09:30.000Z
|
deftools/printerator.py
|
tobias-fyi/deftools
|
3389ff2cc02fb36732d8f0bff71267cc5a6fd6ad
|
[
"MIT"
] | 10
|
2020-02-20T16:59:19.000Z
|
2020-05-28T15:27:13.000Z
|
"""Generator-based script to build a dataset from a filesystem."""
import os
class FileReader:
"""Base class for reading through a file system."""
def __init__(self, src, valid_exts=(".jpg", ".jpeg", ".png")):
self.src = src
self.valid_exts = valid_exts
def file_printer(self):
for filename in sorted(os.listdir(self.src)):
yield filename
def main(src):
reader = FileReader(src)
for f in reader.file_printer():
print(f)
if __name__ == "__main__":
main("/Users/Tobias/workshop/buildbox/forecut/assets_/images")
| 21.777778
| 66
| 0.641156
|
ad0a6c82f832c5a7fb241a2b9eed05ad1b0a6983
| 13,750
|
py
|
Python
|
var/spack/repos/builtin/packages/openblas/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/openblas/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3
|
2022-03-09T09:15:39.000Z
|
2022-03-09T09:15:42.000Z
|
var/spack/repos/builtin/packages/openblas/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-01-05T20:00:52.000Z
|
2021-01-05T20:00:52.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
from spack import *
from spack.package_test import compare_output_file, compile_c_and_execute
class Openblas(MakefilePackage):
"""OpenBLAS: An optimized BLAS library"""
homepage = 'https://www.openblas.net'
url = 'https://github.com/xianyi/OpenBLAS/archive/v0.2.19.tar.gz'
git = 'https://github.com/xianyi/OpenBLAS.git'
version('develop', branch='develop')
version('0.3.10', sha256='0484d275f87e9b8641ff2eecaa9df2830cbe276ac79ad80494822721de6e1693')
version('0.3.9', sha256='17d4677264dfbc4433e97076220adc79b050e4f8a083ea3f853a53af253bc380')
version('0.3.8', sha256='8f86ade36f0dbed9ac90eb62575137388359d97d8f93093b38abe166ad7ef3a8')
version('0.3.7', sha256='bde136122cef3dd6efe2de1c6f65c10955bbb0cc01a520c2342f5287c28f9379')
version('0.3.6', sha256='e64c8fe083832ffbc1459ab6c72f71d53afd3b36e8497c922a15a06b72e9002f')
version('0.3.5', sha256='0950c14bd77c90a6427e26210d6dab422271bc86f9fc69126725833ecdaa0e85')
version('0.3.4', sha256='4b4b4453251e9edb5f57465bf2b3cf67b19d811d50c8588cdf2ea1f201bb834f')
version('0.3.3', sha256='49d88f4494ae780e3d7fa51769c00d982d7cdb73e696054ac3baa81d42f13bab')
version('0.3.2', sha256='e8ba64f6b103c511ae13736100347deb7121ba9b41ba82052b1a018a65c0cb15')
version('0.3.1', sha256='1f5e956f35f3acdd3c74516e955d797a320c2e0135e31d838cbdb3ea94d0eb33')
version('0.3.0', sha256='cf51543709abe364d8ecfb5c09a2b533d2b725ea1a66f203509b21a8e9d8f1a1')
version('0.2.20', sha256='5ef38b15d9c652985774869efd548b8e3e972e1e99475c673b25537ed7bcf394')
version('0.2.19', sha256='9c40b5e4970f27c5f6911cb0a28aa26b6c83f17418b69f8e5a116bb983ca8557')
version('0.2.18', sha256='7d9f8d4ea4a65ab68088f3bb557f03a7ac9cb5036ef2ba30546c3a28774a4112')
version('0.2.17', sha256='0fe836dfee219ff4cadcc3567fb2223d9e0da5f60c7382711fb9e2c35ecf0dbf')
version('0.2.16', sha256='766f350d0a4be614812d535cead8c816fc3ad3b9afcd93167ea5e4df9d61869b')
version('0.2.15', sha256='73c40ace5978282224e5e122a41c8388c5a19e65a6f2329c2b7c0b61bacc9044')
variant('ilp64', default=False, description='Force 64-bit Fortran native integers')
variant('pic', default=True, description='Build position independent code')
variant('shared', default=True, description='Build shared libraries')
variant('consistent_fpcsr', default=False, description='Synchronize FP CSR between threads (x86/x86_64 only)')
variant(
'threads', default='none',
description='Multithreading support',
values=('pthreads', 'openmp', 'none'),
multi=False
)
# virtual dependency
provides('blas')
provides('lapack')
# OpenBLAS >=3.0 has an official way to disable internal parallel builds
patch('make.patch', when='@0.2.16:0.2.20')
# This patch is in a pull request to OpenBLAS that has not been handled
# https://github.com/xianyi/OpenBLAS/pull/915
# UPD: the patch has been merged starting version 0.2.20
patch('openblas_icc.patch', when='@:0.2.19%intel')
patch('openblas_icc_openmp.patch', when='@:0.2.20%intel@16.0:')
patch('openblas_icc_fortran.patch', when='%intel@16.0:')
patch('openblas_icc_fortran2.patch', when='%intel@18.0:')
# See https://github.com/spack/spack/issues/15385
patch('lapack-0.3.9-xerbl.patch', when='@0.3.8:0.3.9 %intel')
# Fixes compilation error on POWER8 with GCC 7
# https://github.com/xianyi/OpenBLAS/pull/1098
patch('power8.patch', when='@0.2.18:0.2.19 %gcc@7.1.0: target=power8')
# Change file comments to work around clang 3.9 assembler bug
# https://github.com/xianyi/OpenBLAS/pull/982
patch('openblas0.2.19.diff', when='@0.2.19')
# Fix CMake export symbol error
# https://github.com/xianyi/OpenBLAS/pull/1703
patch('openblas-0.3.2-cmake.patch', when='@0.3.1:0.3.2')
# Disable experimental TLS code that lead to many threading issues
# https://github.com/xianyi/OpenBLAS/issues/1735#issuecomment-422954465
# https://github.com/xianyi/OpenBLAS/issues/1761#issuecomment-421039174
# https://github.com/xianyi/OpenBLAS/pull/1765
patch('https://github.com/xianyi/OpenBLAS/commit/4d183e5567346f80f2ef97eb98f8601c47f8cb56.patch',
sha256='714aea33692304a50bd0ccde42590c176c82ded4a8ac7f06e573dc8071929c33',
when='@0.3.3')
# Fix parallel build issues on filesystems
# with missing sub-second timestamp resolution
patch('https://github.com/xianyi/OpenBLAS/commit/79ea839b635d1fd84b6ce8a47e086f01d64198e6.patch',
sha256='f1b066a4481a50678caeb7656bf3e6764f45619686ac465f257c8017a2dc1ff0',
when='@0.3.0:0.3.3')
# Fix https://github.com/xianyi/OpenBLAS/issues/2431
# Patch derived from https://github.com/xianyi/OpenBLAS/pull/2424
patch('openblas-0.3.8-darwin.patch', when='@0.3.8 platform=darwin')
# Fix ICE in LLVM 9.0.0 https://github.com/xianyi/OpenBLAS/pull/2329
# Patch as in https://github.com/xianyi/OpenBLAS/pull/2597
patch('openblas_appleclang11.patch', when='@0.3.8:0.3.9 %apple-clang@11.0.3')
# Add conditions to f_check to determine the Fujitsu compiler
patch('openblas_fujitsu.patch', when='%fj')
patch('openblas_fujitsu2.patch', when='@0.3.10 %fj')
# See https://github.com/spack/spack/issues/3036
conflicts('%intel@16', when='@0.2.15:0.2.19')
conflicts('+consistent_fpcsr', when='threads=none',
msg='FPCSR consistency only applies to multithreading')
conflicts('threads=openmp', when='%apple-clang', msg="Apple's clang does not support OpenMP")
conflicts('threads=openmp @:0.2.19', when='%clang', msg='OpenBLAS @:0.2.19 does not support OpenMP with clang!')
@property
def parallel(self):
# unclear whether setting `-j N` externally was supported before 0.3
return self.spec.version >= Version('0.3.0')
@run_before('edit')
def check_compilers(self):
# As of 06/2016 there is no mechanism to specify that packages which
# depends on Blas/Lapack need C or/and Fortran symbols. For now
# require both.
if self.compiler.fc is None:
raise InstallError(
'OpenBLAS requires both C and Fortran compilers!'
)
@staticmethod
def _read_targets(target_file):
"""Parse a list of available targets from the OpenBLAS/TargetList.txt
file.
"""
micros = []
re_target = re.compile(r'^[A-Z0-9_]+$')
for line in target_file:
match = re_target.match(line)
if match is not None:
micros.append(line.strip().lower())
return micros
def _microarch_target_args(self):
"""Given a spack microarchitecture and a list of targets found in
OpenBLAS' TargetList.txt, determine the best command-line arguments.
"""
# Read available openblas targets
targetlist_name = join_path(self.stage.source_path, "TargetList.txt")
if os.path.exists(targetlist_name):
with open(targetlist_name) as f:
available_targets = self._read_targets(f)
else:
available_targets = []
# Get our build microarchitecture
microarch = self.spec.target
# List of arguments returned by this function
args = []
# List of available architectures, and possible aliases
openblas_arch = set(['alpha', 'arm', 'ia64', 'mips', 'mips64',
'power', 'sparc', 'zarch'])
openblas_arch_map = {
'amd64': 'x86_64',
'powerpc64': 'power',
'i386': 'x86',
'aarch64': 'arm64',
}
openblas_arch.update(openblas_arch_map.keys())
openblas_arch.update(openblas_arch_map.values())
# Add spack-only microarchitectures to list
skylake = set(["skylake", "skylake_avx512"])
available_targets = set(available_targets) | skylake | openblas_arch
# Find closest ancestor that is known to build in blas
if microarch.name not in available_targets:
for microarch in microarch.ancestors:
if microarch.name in available_targets:
break
if self.version >= Version("0.3"):
# 'ARCH' argument causes build errors in older OpenBLAS
# see https://github.com/spack/spack/issues/15385
arch_name = microarch.family.name
if arch_name in openblas_arch:
# Apply possible spack->openblas arch name mapping
arch_name = openblas_arch_map.get(arch_name, arch_name)
args.append('ARCH=' + arch_name)
if microarch.vendor == 'generic':
# User requested a generic platform, or we couldn't find a good
# match for the requested one. Allow OpenBLAS to determine
# an optimized kernel at run time.
args.append('DYNAMIC_ARCH=1')
elif microarch.name in skylake:
# Special case for renaming skylake family
args.append('TARGET=SKYLAKEX')
if microarch.name == "skylake":
# Special case for disabling avx512 instructions
args.append('NO_AVX512=1')
else:
args.append('TARGET=' + microarch.name.upper())
return args
@property
def make_defs(self):
# Configure fails to pick up fortran from FC=/abs/path/to/fc, but
# works fine with FC=/abs/path/to/gfortran.
# When mixing compilers make sure that
# $SPACK_ROOT/lib/spack/env/<compiler> have symlinks with reasonable
# names and hack them inside lib/spack/spack/compilers/<compiler>.py
make_defs = [
'CC={0}'.format(spack_cc),
'FC={0}'.format(spack_fc),
]
# force OpenBLAS to use externally defined parallel build
if self.spec.version < Version('0.3'):
make_defs.append('MAKE_NO_J=1') # flag defined by our make.patch
else:
make_defs.append('MAKE_NB_JOBS=0') # flag provided by OpenBLAS
# Add target and architecture flags
make_defs += self._microarch_target_args()
if '~shared' in self.spec:
if '+pic' in self.spec:
make_defs.extend([
'CFLAGS={0}'.format(self.compiler.cc_pic_flag),
'FFLAGS={0}'.format(self.compiler.f77_pic_flag)
])
make_defs += ['NO_SHARED=1']
# fix missing _dggsvd_ and _sggsvd_
if self.spec.satisfies('@0.2.16'):
make_defs += ['BUILD_LAPACK_DEPRECATED=1']
# Add support for multithreading
if self.spec.satisfies('threads=openmp'):
make_defs += ['USE_OPENMP=1', 'USE_THREAD=1']
elif self.spec.satisfies('threads=pthreads'):
make_defs += ['USE_OPENMP=0', 'USE_THREAD=1']
else:
make_defs += ['USE_OPENMP=0', 'USE_THREAD=0']
# 64bit ints
if '+ilp64' in self.spec:
make_defs += ['INTERFACE64=1']
# Synchronize floating-point control and status register (FPCSR)
# between threads (x86/x86_64 only).
if '+consistent_fpcsr' in self.spec:
make_defs += ['CONSISTENT_FPCSR=1']
# Prevent errors in `as` assembler from newer instructions
if self.spec.satisfies('%gcc@:4.8.4'):
make_defs.append('NO_AVX2=1')
return make_defs
@property
def headers(self):
# As in netlib-lapack, the only public headers for cblas and lapacke in
# openblas are cblas.h and lapacke.h. The remaining headers are private
# headers either included in one of these two headers, or included in
# one of the source files implementing functions declared in these
# headers.
return find_headers(['cblas', 'lapacke'], self.prefix.include)
@property
def build_targets(self):
targets = ['libs', 'netlib']
# Build shared if variant is set.
if '+shared' in self.spec:
targets += ['shared']
return self.make_defs + targets
@run_after('build')
@on_package_attributes(run_tests=True)
def check_build(self):
make('tests', *self.make_defs, parallel=False)
@property
def install_targets(self):
make_args = [
'install',
'PREFIX={0}'.format(self.prefix),
]
return make_args + self.make_defs
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
spec = self.spec
# Openblas may pass its own test but still fail to compile Lapack
# symbols. To make sure we get working Blas and Lapack, do a small
# test.
source_file = join_path(os.path.dirname(self.module.__file__),
'test_cblas_dgemm.c')
blessed_file = join_path(os.path.dirname(self.module.__file__),
'test_cblas_dgemm.output')
include_flags = spec['openblas'].headers.cpp_flags
link_flags = spec['openblas'].libs.ld_flags
if self.compiler.name == 'intel':
link_flags += ' -lifcore'
if self.spec.satisfies('threads=pthreads'):
link_flags += ' -lpthread'
if spec.satisfies('threads=openmp'):
link_flags += ' -lpthread ' + self.compiler.openmp_flag
output = compile_c_and_execute(
source_file, [include_flags], link_flags.split()
)
compare_output_file(output, blessed_file)
| 43.238994
| 116
| 0.655636
|
b42568f99316032f0a799faf900b09ad7a192e26
| 40
|
py
|
Python
|
tests/delete_deployment_test.py
|
BIX-Digital/mlflow-openshift
|
1909191503ec2071d6f1f4e79cd859e7d7ad15e2
|
[
"Apache-2.0"
] | 4
|
2021-01-06T07:49:21.000Z
|
2021-11-30T13:49:50.000Z
|
tests/delete_deployment_test.py
|
pille321/mlflow-openshift
|
0450a9ca7ae25dcd462324c41cccc894d40b7be5
|
[
"Apache-2.0"
] | 2
|
2021-02-27T10:10:27.000Z
|
2022-02-08T08:13:50.000Z
|
tests/delete_deployment_test.py
|
pille321/mlflow-openshift
|
0450a9ca7ae25dcd462324c41cccc894d40b7be5
|
[
"Apache-2.0"
] | 1
|
2021-01-11T06:22:16.000Z
|
2021-01-11T06:22:16.000Z
|
# TODO: check if it is really gone
pass
| 13.333333
| 34
| 0.725
|
5c5a5f32f22ff496dd984ad17aeb475bb6bc47ab
| 16,858
|
py
|
Python
|
src/cltk/phonology/syllabify.py
|
clemsciences/cltkv1
|
88bd45419e254f69c9467ce6b2543249c57d1503
|
[
"MIT"
] | 2
|
2019-08-18T21:10:01.000Z
|
2020-05-05T09:19:40.000Z
|
src/cltk/phonology/syllabify.py
|
clemsciences/cltkv1
|
88bd45419e254f69c9467ce6b2543249c57d1503
|
[
"MIT"
] | 62
|
2019-02-01T18:48:11.000Z
|
2020-08-19T17:52:53.000Z
|
src/cltk/phonology/syllabify.py
|
clemsciences/cltkv1
|
88bd45419e254f69c9467ce6b2543249c57d1503
|
[
"MIT"
] | 8
|
2019-04-07T03:47:31.000Z
|
2020-07-01T07:02:59.000Z
|
import logging
import unicodedata
from collections import defaultdict
from typing import List
from cltk.core.exceptions import CLTKException
from cltk.phonology.middle_english.syllabifier import Syllabifier as ME_Syllabifier
from cltk.phonology.middle_high_german.syllabifier import Syllabifier as MHG_Syllabifier
from cltk.phonology.old_english.syllabifier import Syllabifier as OE_Syllabifier
from cltk.phonology.old_norse.syllabifier import hierarchy as old_norse_hierarchy
from cltk.phonology.old_norse.syllabifier import (
ipa_hierarchy as ipa_old_norse_hierarchy,
)
__author__ = [
"Eleftheria Chatziargyriou <ele.hatzy@gmail.com>",
"Clément Besnier <clemsciences@aol.com>",
]
__license__ = "MIT License. See LICENSE."
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
def get_onsets(text, vowels="aeiou", threshold=0.0002):
"""
Source: Resonances in Middle High German: New Methodologies in Prosody,
2017, C. L. Hench
:param text: str list: text to be analysed
:param vowels: str: valid vowels constituting the syllable
:param threshold: minimum frequency count for valid onset, C. Hench noted
that the algorithm produces the best result for an untagged wordset of MHG,
when retaining onsets which appear in at least 0.02% of the words
Let's test it on the opening lines of Nibelungenlied
>>> text = ['uns', 'ist', 'in', 'alten', 'mæren', 'wunders', 'vil', 'geseit', 'von', 'helden', 'lobebæren', 'von', 'grôzer', 'arebeit', 'von', 'fröuden', 'hôchgezîten', 'von', 'weinen', 'und', 'von', 'klagen', 'von', 'küener', 'recken', 'strîten', 'muget', 'ir', 'nu', 'wunder', 'hœren', 'sagen']
>>> vowels = "aeiouæœôîöü"
>>> get_onsets(text, vowels=vowels)
['lt', 'm', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'ld', 'l', 'b', 'gr', 'z', 'fr', 'd', 'chg', 't', 'n', 'kl', 'k', 'ck', 'str']
Of course, this is an insignificant sample, but we could try and see
how modifying the threshold affects the returned onset:
>>> get_onsets(text, threshold = 0.05, vowels=vowels)
['m', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'b', 'z', 't', 'n']
"""
onset_dict = defaultdict(lambda: 0)
n = len(text)
for word in text:
onset = ""
candidates = []
for l in word:
if l not in vowels:
onset += l
else:
if onset != "":
candidates.append(onset)
onset = ""
for c in candidates:
onset_dict[c] += 1
return [onset for onset, i in onset_dict.items() if i / n > threshold]
class Syllabifier:
def __init__(
self,
low_vowels=None,
mid_vowels=None,
high_vowels=None,
flaps=None,
laterals=None,
nasals=None,
fricatives=None,
plosives=None,
language=None,
break_geminants=False,
variant=None,
):
self.break_geminants = break_geminants
self.invalid_onsets = []
self.invalid_ultima = []
if language == "enm":
hierarchy = [[] for _ in range(len(set(ME_Syllabifier.values())))]
for k in ME_Syllabifier:
hierarchy[ME_Syllabifier[k] - 1].append(k)
self.set_hierarchy(hierarchy)
self.set_vowels(hierarchy[0])
self.invalid_ultima = ["a", "ae", "æ", "e", "ea", "eo", "i", "o", "u", "y"]
elif language == "ang":
hierarchy = [[] for _ in range(len(set(OE_Syllabifier.values())))]
for k in OE_Syllabifier:
hierarchy[OE_Syllabifier[k] - 1].append(k)
self.set_hierarchy(hierarchy)
self.set_vowels(hierarchy[0])
elif language == "gmh":
hierarchy = [[] for _ in range(len(set(MHG_Syllabifier.values())))]
for k in MHG_Syllabifier:
hierarchy[MHG_Syllabifier[k] - 1].append(k)
self.set_hierarchy(hierarchy)
self.set_vowels(hierarchy[0])
elif language == "non":
self.set_hierarchy(old_norse_hierarchy)
self.set_vowels(old_norse_hierarchy[0])
elif language == "non" and variant == "ipa":
self.set_hierarchy(ipa_old_norse_hierarchy)
self.set_vowels(ipa_old_norse_hierarchy[0])
else:
self.low_vowels = [] if low_vowels is None else low_vowels
self.mid_vowels = [] if mid_vowels is None else mid_vowels
self.high_vowels = [] if high_vowels is None else high_vowels
self.vowels = self.low_vowels + self.mid_vowels + self.high_vowels
self.flaps = [] if flaps is None else flaps
self.laterals = [] if laterals is None else laterals
self.nasals = [] if nasals is None else nasals
self.fricatives = [] if fricatives is None else fricatives
self.plosives = [] if plosives is None else plosives
self.consonants = (
self.flaps + self.laterals + self.fricatives + self.plosives
)
# Dictionary indicating sonority hierarchy
self.hierarchy = {key: 0 for key in self.low_vowels}
self.hierarchy.update({key: 1 for key in self.mid_vowels})
self.hierarchy.update({key: 2 for key in self.high_vowels})
self.hierarchy.update({key: 3 for key in self.flaps})
self.hierarchy.update({key: 4 for key in self.laterals})
self.hierarchy.update({key: 5 for key in self.nasals})
self.hierarchy.update({key: 6 for key in self.fricatives})
self.hierarchy.update({key: 7 for key in self.plosives})
def set_invalid_onsets(self, invalid_onsets):
self.invalid_onsets = invalid_onsets
def set_invalid_ultima(self, invalid_ultima):
self.invalid_ultima = invalid_ultima
def set_hierarchy(self, hierarchy):
"""
Sets an alternative sonority hierarchy, note that you will also need
to specify the vowelset with the set_vowels, in order for the module
to correctly identify each nucleus.
The order of the phonemes defined is by decreased consonantality
>>> s = Syllabifier()
>>> s.set_hierarchy([['i', 'u'], ['e'], ['a'], ['r'], ['m', 'n'], ['f']])
>>> s.set_vowels(['i', 'u', 'e', 'a'])
>>> s.syllabify('feminarum')
['fe', 'mi', 'na', 'rum']
"""
self.hierarchy = dict([(k, i) for i, j in enumerate(hierarchy) for k in j])
def set_vowels(self, vowels):
"""
Define the vowel set of the syllabifier module
>>> s = Syllabifier()
>>> s.set_vowels(['i', 'u', 'e', 'a'])
>>> s.vowels
['i', 'u', 'e', 'a']
"""
self.vowels = vowels
def syllabify(self, word, mode="SSP"):
if mode == "SSP":
return self.syllabify_ssp(word)
def syllabify_ssp(self, word):
"""
Syllabifies a word according to the Sonority Sequencing Principle
:param word: Word to be syllabified
:return: List consisting of syllables
First you need to define the matters of articulation
>>> high_vowels = ['a']
>>> mid_vowels = ['e']
>>> low_vowels = ['i', 'u']
>>> flaps = ['r']
>>> nasals = ['m', 'n']
>>> fricatives = ['f']
>>> s = Syllabifier(high_vowels=high_vowels, mid_vowels=mid_vowels, low_vowels=low_vowels, flaps=flaps, nasals=nasals, fricatives=fricatives)
>>> s.syllabify("feminarum")
['fe', 'mi', 'na', 'rum']
Not specifying your alphabet results in an error:
>>> s.syllabify("foemina")
Traceback (most recent call last):
...
cltk.core.exceptions.CLTKException
Additionally, you can utilize the language parameter:
>>> s = Syllabifier(language='gmh')
>>> s.syllabify('lobebæren')
['lo', 'be', 'bæ', 'ren']
>>> s = Syllabifier(language='enm')
>>> s.syllabify("huntyng")
['hun', 'tyng']
>>> s = Syllabifier(language='ang')
>>> s.syllabify("arcebiscop")
['ar', 'ce', 'bis', 'cop']
The break_geminants parameter ensures a breakpoint is placed between geminants:
>>> geminant_s = Syllabifier(break_geminants=True)
>>> hierarchy = [["a", "á", "æ", "e", "é", "i", "í", "o", "ǫ", "ø", "ö", "œ", "ó", "u", "ú", "y", "ý"], ["j"], ["m"], ["n"], ["p", "b", "d", "g", "t", "k"], ["c", "f", "s", "h", "v", "x", "þ", "ð"], ["r"], ["l"]]
>>> geminant_s.set_hierarchy(hierarchy)
>>> geminant_s.set_vowels(hierarchy[0])
>>> geminant_s.syllabify("ennitungl")
['en', 'ni', 'tungl']
"""
# List indicating the syllable indices
syllables = []
find_nucleus = True
i = 0
try:
# Replace each letter occurence with its corresponding number
# indicating its position in the sonority hierarchy
encoded = list(map(lambda x: self.hierarchy[x], word))
except KeyError:
LOG.error(
"The given string contains invalid characters. "
"Make sure to define the mater of articulation for each phoneme."
)
raise CLTKException
while i < len(word) - 1:
# Search for nucleus
while word[i] not in self.vowels and i < len(word) - 1 and find_nucleus:
i += 1
if find_nucleus is True:
i += 1
if i >= len(word) - 1:
break
else:
# If the break_geminants parameter is set to True, prioritize geminants
if self.break_geminants and word[i - 1] == word[i]:
syllables.append(i - 1)
find_nucleus = True
# If a cluster of three phonemes with the same values exist, break syllable
elif encoded[i - 1] == encoded[i] == encoded[i + 1]:
syllables.append(i)
find_nucleus = True
elif encoded[i] > encoded[i - 1] and encoded[i] > encoded[i + 1]:
syllables.append(i)
find_nucleus = True
elif encoded[i] < encoded[i - 1] and encoded[i] < encoded[i + 1]:
syllables.append(i)
find_nucleus = True
else:
find_nucleus = False
i += 1
for n, k in enumerate(syllables):
word = word[: k + n + 1] + "." + word[k + n + 1 :]
word = word.split(".")
# Check if last syllable has a nucleus
if sum([x in self.vowels for x in word[-1]]) == 0:
word[-2] += word[-1]
word = word[:-1]
return self.onset_maximization(word)
def onset_maximization(self, syllables):
for i, syl in enumerate(syllables):
if i != len(syllables) - 1:
if (
syllables[i + 1][0] in self.vowels
and syllables[i + 1][-1] not in self.vowels
):
syllables[i + 1] = syllables[i][-1] + syllables[i + 1]
syllables[i] = syllables[i][:-1]
return self.legal_onsets(syllables)
def legal_onsets(self, syllables):
"""
Filters syllable respecting the legality principle
:param syllables: str list
The method scans for invalid syllable onsets:
>>> s = Syllabifier(["i", "u", "y"], ["o", "ø", "e"], ["a"], ["r"], ["l"], ["m", "n"], ["f", "v", "s", "h"], ["k", "g", "b", "p", "t", "d"])
>>> s.set_invalid_onsets(['lm'])
>>> s.legal_onsets(['a', 'lma', 'tigr'])
['al', 'ma', 'tigr']
You can also define invalid syllable ultima:
>>> s.set_invalid_ultima(['gr'])
>>> s.legal_onsets(['al', 'ma', 'ti', 'gr'])
['al', 'ma', 'tigr']
"""
vowels = self.vowels
for i in range(1, len(syllables)):
onset = ""
for letter in syllables[i]:
if letter in vowels:
break
onset += letter
for j in range(len(onset)):
# Check whether the given onset is valid
if onset[j:] not in self.invalid_onsets:
syllables[i - 1] += onset[:j]
syllables[i] = syllables[i][j:]
break
# Check whether ultima is invalid
if syllables[-1] in self.invalid_ultima:
syllables[-2] += syllables[-1]
syllables = syllables[:-1]
return syllables
def syllabify_ipa(self, word):
"""
Parses IPA string
:param word: word to be syllabified
"""
word = word[1:-1]
word = "".join(
l
for l in unicodedata.normalize("NFD", word)
if unicodedata.category(l) != "Mn"
)
return self.syllabify_ssp(word)
def syllabify_phonemes(self, phonological_word):
"""
:param phonological_word: result of Transcriber().text_to_phonemes in cltk.phonology.utils
:return:
"""
phoneme_lengths = []
l_transcribed_word = []
for phoneme in phonological_word:
phoneme_lengths.append(len(phoneme.ipar))
l_transcribed_word.append(phoneme.ipar)
transcribed_word = "".join(l_transcribed_word)
transcribed_word = transcribed_word.replace("ː", "")
syllabified_transcribed_word = self.syllabify_ssp(transcribed_word)
syllabified_phonological_word = []
counter = 0 # number of IPA character processed
for i, sts in enumerate(syllabified_transcribed_word):
syllabified_phonological_word.append([])
syllable_len = len(sts)
somme = 0
while somme < syllable_len:
somme += phoneme_lengths[counter]
syllabified_phonological_word[i].append(phonological_word[counter])
counter += 1
return syllabified_phonological_word
class Syllable:
"""
A syllable has three main constituents:
- onset
- nucleus
- coda
Source: https://en.wikipedia.org/wiki/Syllable
"""
def __init__(self, text: str, vowels: List[str], consonants: List[str]):
"""
:param text: a syllable
:param vowels: list of characters
:param consonants: list of characters
"""
self.onset = []
self.nucleus = []
self.coda = []
self.text = text
self.consonants = consonants
self.vowels = vowels
self._compute_syllable(text)
def _compute_syllable(self, text):
"""
>>> sylla1 = Syllable("armr", ["a"], ["r", "m"])
>>> sylla1.onset
[]
>>> sylla1.nucleus
['a']
>>> sylla1.coda
['r', 'm', 'r']
>>> sylla2 = Syllable("gangr", ["a"], ["g", "n", "r"])
>>> sylla2.onset
['g']
>>> sylla2.nucleus
['a']
>>> sylla2.coda
['n', 'g', 'r']
>>> sylla3 = Syllable("aurr", ["a", "u"], ["r"])
>>> sylla3.nucleus
['a', 'u']
>>> sylla3.coda
['r', 'r']
:param text: a syllable
:return:
"""
is_in_onset = True
is_in_nucleus = False
is_in_coda = False
if len(text) > 0:
for c in text:
if is_in_onset and c in self.consonants:
self.onset.append(c)
elif is_in_onset and c in self.vowels:
is_in_onset = False
is_in_nucleus = True
self.nucleus.append(c)
elif is_in_nucleus and c in self.vowels:
self.nucleus.append(c)
elif is_in_nucleus and c in self.consonants:
is_in_nucleus = False
is_in_coda = True
self.coda.append(c)
elif is_in_coda and c in self.consonants:
self.coda.append(c)
elif is_in_coda and c in self.vowels:
raise ValueError(
"This is not a correct syllable "
"(a vowel '{}' cannot be inserted in coda)".format(c)
)
else:
raise ValueError("{} is an unknown character".format(c))
if len(self.nucleus) == 0:
raise ValueError("This is not a correct syllable")
else:
raise ValueError("A syllable can't be void")
def __str__(self):
return "".join(self.onset) + "".join(self.nucleus) + "".join(self.coda)
| 33.581673
| 300
| 0.541049
|
8c1d50425b9bf4df2bd89324c89706a11647b9df
| 168
|
py
|
Python
|
setup.py
|
fracek/cairo-dap
|
6df0c4e56ca613b24b33ad8851ee7b27f650a4d8
|
[
"Apache-2.0"
] | 4
|
2021-05-13T08:17:23.000Z
|
2022-02-22T18:13:30.000Z
|
setup.py
|
fracek/cairo-dap
|
6df0c4e56ca613b24b33ad8851ee7b27f650a4d8
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
fracek/cairo-dap
|
6df0c4e56ca613b24b33ad8851ee7b27f650a4d8
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
setup(
name='cairo_dap',
entry_points={
'console_scripts': [
'cairo-dap=cairo_dap.cli:main'
]
}
)
| 15.272727
| 42
| 0.559524
|
a7c078d0060928b15e5d2fa7606245c556e528ed
| 58,907
|
py
|
Python
|
superset/connectors/sqla/models.py
|
venter-zhu/incubator-superset
|
329e72885c20d3a36f70dbe2909c4665f51ec92f
|
[
"Apache-2.0"
] | 1
|
2020-11-05T11:16:15.000Z
|
2020-11-05T11:16:15.000Z
|
superset/connectors/sqla/models.py
|
venter-zhu/incubator-superset
|
329e72885c20d3a36f70dbe2909c4665f51ec92f
|
[
"Apache-2.0"
] | 38
|
2020-11-23T22:26:13.000Z
|
2022-02-17T07:39:23.000Z
|
superset/connectors/sqla/models.py
|
venter-zhu/incubator-superset
|
329e72885c20d3a36f70dbe2909c4665f51ec92f
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from collections import defaultdict, OrderedDict
from contextlib import closing
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from typing import Any, Dict, Hashable, List, NamedTuple, Optional, Tuple, Union
import pandas as pd
import sqlalchemy as sa
import sqlparse
from flask import escape, Markup
from flask_appbuilder import Model
from flask_babel import lazy_gettext as _
from jinja2.exceptions import TemplateError
from sqlalchemy import (
and_,
asc,
Boolean,
Column,
DateTime,
desc,
Enum,
ForeignKey,
Integer,
or_,
select,
String,
Table,
Text,
)
from sqlalchemy.exc import CompileError
from sqlalchemy.orm import backref, Query, relationship, RelationshipProperty, Session
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.sql import column, ColumnElement, literal_column, table, text
from sqlalchemy.sql.expression import Label, Select, TextAsFrom
from sqlalchemy.types import TypeEngine
from superset import app, db, is_feature_enabled, security_manager
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.constants import NULL_STRING
from superset.db_engine_specs.base import TimestampExpression
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import (
DatabaseNotFound,
QueryObjectValidationError,
SupersetSecurityException,
)
from superset.jinja_context import (
BaseTemplateProcessor,
ExtraCache,
get_template_processor,
)
from superset.models.annotations import Annotation
from superset.models.core import Database
from superset.models.helpers import AuditMixinNullable, QueryResult
from superset.result_set import SupersetResultSet
from superset.sql_parse import ParsedQuery
from superset.typing import Metric, QueryObjectDict
from superset.utils import core as utils, import_datasource
config = app.config
metadata = Model.metadata # pylint: disable=no-member
logger = logging.getLogger(__name__)
class SqlaQuery(NamedTuple):
extra_cache_keys: List[Any]
labels_expected: List[str]
prequeries: List[str]
sqla_query: Select
class QueryStringExtended(NamedTuple):
labels_expected: List[str]
prequeries: List[str]
sql: str
@dataclass
class MetadataResult:
added: List[str] = field(default_factory=list)
removed: List[str] = field(default_factory=list)
modified: List[str] = field(default_factory=list)
class AnnotationDatasource(BaseDatasource):
"""Dummy object so we can query annotations using 'Viz' objects just like
regular datasources.
"""
cache_timeout = 0
changed_on = None
type = "annotation"
column_names = [
"created_on",
"changed_on",
"id",
"start_dttm",
"end_dttm",
"layer_id",
"short_descr",
"long_descr",
"json_metadata",
"created_by_fk",
"changed_by_fk",
]
def query(self, query_obj: QueryObjectDict) -> QueryResult:
error_message = None
qry = db.session.query(Annotation)
qry = qry.filter(Annotation.layer_id == query_obj["filter"][0]["val"])
if query_obj["from_dttm"]:
qry = qry.filter(Annotation.start_dttm >= query_obj["from_dttm"])
if query_obj["to_dttm"]:
qry = qry.filter(Annotation.end_dttm <= query_obj["to_dttm"])
status = utils.QueryStatus.SUCCESS
try:
df = pd.read_sql_query(qry.statement, db.engine)
except Exception as ex: # pylint: disable=broad-except
df = pd.DataFrame()
status = utils.QueryStatus.FAILED
logger.exception(ex)
error_message = utils.error_msg_from_exception(ex)
return QueryResult(
status=status,
df=df,
duration=timedelta(0),
query="",
error_message=error_message,
)
def get_query_str(self, query_obj: QueryObjectDict) -> str:
raise NotImplementedError()
def values_for_column(self, column_name: str, limit: int = 10000) -> List[Any]:
raise NotImplementedError()
class TableColumn(Model, BaseColumn):
"""ORM object for table columns, each table can have multiple columns"""
__tablename__ = "table_columns"
__table_args__ = (UniqueConstraint("table_id", "column_name"),)
table_id = Column(Integer, ForeignKey("tables.id"))
table = relationship(
"SqlaTable",
backref=backref("columns", cascade="all, delete-orphan"),
foreign_keys=[table_id],
)
is_dttm = Column(Boolean, default=False)
expression = Column(Text)
python_date_format = Column(String(255))
export_fields = [
"table_id",
"column_name",
"verbose_name",
"is_dttm",
"is_active",
"type",
"groupby",
"filterable",
"expression",
"description",
"python_date_format",
]
update_from_object_fields = [s for s in export_fields if s not in ("table_id",)]
export_parent = "table"
@property
def is_numeric(self) -> bool:
"""
Check if the column has a numeric datatype.
"""
db_engine_spec = self.table.database.db_engine_spec
return db_engine_spec.is_db_column_type_match(
self.type, utils.DbColumnType.NUMERIC
)
@property
def is_string(self) -> bool:
"""
Check if the column has a string datatype.
"""
db_engine_spec = self.table.database.db_engine_spec
return db_engine_spec.is_db_column_type_match(
self.type, utils.DbColumnType.STRING
)
@property
def is_temporal(self) -> bool:
"""
Check if the column has a temporal datatype. If column has been set as
temporal/non-temporal (`is_dttm` is True or False respectively), return that
value. This usually happens during initial metadata fetching or when a column
is manually set as temporal (for this `python_date_format` needs to be set).
"""
if self.is_dttm is not None:
return self.is_dttm
db_engine_spec = self.table.database.db_engine_spec
return db_engine_spec.is_db_column_type_match(
self.type, utils.DbColumnType.TEMPORAL
)
def get_sqla_col(self, label: Optional[str] = None) -> Column:
label = label or self.column_name
if self.expression:
col = literal_column(self.expression)
else:
db_engine_spec = self.table.database.db_engine_spec
type_ = db_engine_spec.get_sqla_column_type(self.type)
col = column(self.column_name, type_=type_)
col = self.table.make_sqla_column_compatible(col, label)
return col
@property
def datasource(self) -> RelationshipProperty:
return self.table
def get_time_filter(
self,
start_dttm: DateTime,
end_dttm: DateTime,
time_range_endpoints: Optional[
Tuple[utils.TimeRangeEndpoint, utils.TimeRangeEndpoint]
],
) -> ColumnElement:
col = self.get_sqla_col(label="__time")
l = []
if start_dttm:
l.append(
col >= text(self.dttm_sql_literal(start_dttm, time_range_endpoints))
)
if end_dttm:
if (
time_range_endpoints
and time_range_endpoints[1] == utils.TimeRangeEndpoint.EXCLUSIVE
):
l.append(
col < text(self.dttm_sql_literal(end_dttm, time_range_endpoints))
)
else:
l.append(col <= text(self.dttm_sql_literal(end_dttm, None)))
return and_(*l)
def get_timestamp_expression(
self, time_grain: Optional[str], label: Optional[str] = None
) -> Union[TimestampExpression, Label]:
"""
Return a SQLAlchemy Core element representation of self to be used in a query.
:param time_grain: Optional time grain, e.g. P1Y
:param label: alias/label that column is expected to have
:return: A TimeExpression object wrapped in a Label if supported by db
"""
label = label or utils.DTTM_ALIAS
db_ = self.table.database
pdf = self.python_date_format
is_epoch = pdf in ("epoch_s", "epoch_ms")
if not self.expression and not time_grain and not is_epoch:
sqla_col = column(self.column_name, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
if self.expression:
col = literal_column(self.expression)
else:
col = column(self.column_name)
time_expr = db_.db_engine_spec.get_timestamp_expr(
col, pdf, time_grain, self.type
)
return self.table.make_sqla_column_compatible(time_expr, label)
@classmethod
def import_obj(cls, i_column: "TableColumn") -> "TableColumn":
def lookup_obj(lookup_column: TableColumn) -> TableColumn:
return (
db.session.query(TableColumn)
.filter(
TableColumn.table_id == lookup_column.table_id,
TableColumn.column_name == lookup_column.column_name,
)
.first()
)
return import_datasource.import_simple_obj(db.session, i_column, lookup_obj)
def dttm_sql_literal(
self,
dttm: DateTime,
time_range_endpoints: Optional[
Tuple[utils.TimeRangeEndpoint, utils.TimeRangeEndpoint]
],
) -> str:
"""Convert datetime object to a SQL expression string"""
sql = (
self.table.database.db_engine_spec.convert_dttm(self.type, dttm)
if self.type
else None
)
if sql:
return sql
tf = self.python_date_format
# Fallback to the default format (if defined) only if the SIP-15 time range
# endpoints, i.e., [start, end) are enabled.
if not tf and time_range_endpoints == (
utils.TimeRangeEndpoint.INCLUSIVE,
utils.TimeRangeEndpoint.EXCLUSIVE,
):
tf = (
self.table.database.get_extra()
.get("python_date_format_by_column_name", {})
.get(self.column_name)
)
if tf:
if tf in ["epoch_ms", "epoch_s"]:
seconds_since_epoch = int(dttm.timestamp())
if tf == "epoch_s":
return str(seconds_since_epoch)
return str(seconds_since_epoch * 1000)
return f"'{dttm.strftime(tf)}'"
# TODO(john-bodley): SIP-15 will explicitly require a type conversion.
return f"""'{dttm.strftime("%Y-%m-%d %H:%M:%S.%f")}'"""
@property
def data(self) -> Dict[str, Any]:
attrs = (
"id",
"column_name",
"verbose_name",
"description",
"expression",
"filterable",
"groupby",
"is_dttm",
"type",
"python_date_format",
)
return {s: getattr(self, s) for s in attrs if hasattr(self, s)}
class SqlMetric(Model, BaseMetric):
"""ORM object for metrics, each table can have multiple metrics"""
__tablename__ = "sql_metrics"
__table_args__ = (UniqueConstraint("table_id", "metric_name"),)
table_id = Column(Integer, ForeignKey("tables.id"))
table = relationship(
"SqlaTable",
backref=backref("metrics", cascade="all, delete-orphan"),
foreign_keys=[table_id],
)
expression = Column(Text, nullable=False)
extra = Column(Text)
export_fields = [
"metric_name",
"verbose_name",
"metric_type",
"table_id",
"expression",
"description",
"d3format",
"extra",
"warning_text",
]
update_from_object_fields = list(
[s for s in export_fields if s not in ("table_id",)]
)
export_parent = "table"
def get_sqla_col(self, label: Optional[str] = None) -> Column:
label = label or self.metric_name
sqla_col = literal_column(self.expression)
return self.table.make_sqla_column_compatible(sqla_col, label)
@property
def perm(self) -> Optional[str]:
return (
("{parent_name}.[{obj.metric_name}](id:{obj.id})").format(
obj=self, parent_name=self.table.full_name
)
if self.table
else None
)
def get_perm(self) -> Optional[str]:
return self.perm
@classmethod
def import_obj(cls, i_metric: "SqlMetric") -> "SqlMetric":
def lookup_obj(lookup_metric: SqlMetric) -> SqlMetric:
return (
db.session.query(SqlMetric)
.filter(
SqlMetric.table_id == lookup_metric.table_id,
SqlMetric.metric_name == lookup_metric.metric_name,
)
.first()
)
return import_datasource.import_simple_obj(db.session, i_metric, lookup_obj)
def get_extra_dict(self) -> Dict[str, Any]:
try:
return json.loads(self.extra)
except (TypeError, json.JSONDecodeError):
return {}
@property
def is_certified(self) -> bool:
return bool(self.get_extra_dict().get("certification"))
@property
def certified_by(self) -> Optional[str]:
return self.get_extra_dict().get("certification", {}).get("certified_by")
@property
def certification_details(self) -> Optional[str]:
return self.get_extra_dict().get("certification", {}).get("details")
@property
def data(self) -> Dict[str, Any]:
attrs = ("is_certified", "certified_by", "certification_details")
attr_dict = {s: getattr(self, s) for s in attrs}
attr_dict.update(super().data)
return attr_dict
sqlatable_user = Table(
"sqlatable_user",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("table_id", Integer, ForeignKey("tables.id")),
)
class SqlaTable( # pylint: disable=too-many-public-methods,too-many-instance-attributes
Model, BaseDatasource
):
"""An ORM object for SqlAlchemy table references"""
type = "table"
query_language = "sql"
is_rls_supported = True
metric_class = SqlMetric
column_class = TableColumn
owner_class = security_manager.user_model
__tablename__ = "tables"
__table_args__ = (UniqueConstraint("database_id", "table_name"),)
table_name = Column(String(250), nullable=False)
main_dttm_col = Column(String(250))
database_id = Column(Integer, ForeignKey("dbs.id"), nullable=False)
fetch_values_predicate = Column(String(1000))
owners = relationship(owner_class, secondary=sqlatable_user, backref="tables")
database = relationship(
"Database",
backref=backref("tables", cascade="all, delete-orphan"),
foreign_keys=[database_id],
)
schema = Column(String(255))
sql = Column(Text)
is_sqllab_view = Column(Boolean, default=False)
template_params = Column(Text)
extra = Column(Text)
baselink = "tablemodelview"
export_fields = [
"table_name",
"main_dttm_col",
"description",
"default_endpoint",
"database_id",
"offset",
"cache_timeout",
"schema",
"sql",
"params",
"template_params",
"filter_select_enabled",
"fetch_values_predicate",
"extra",
]
update_from_object_fields = [f for f in export_fields if not f == "database_id"]
export_parent = "database"
export_children = ["metrics", "columns"]
sqla_aggregations = {
"COUNT_DISTINCT": lambda column_name: sa.func.COUNT(sa.distinct(column_name)),
"COUNT": sa.func.COUNT,
"SUM": sa.func.SUM,
"AVG": sa.func.AVG,
"MIN": sa.func.MIN,
"MAX": sa.func.MAX,
}
def make_sqla_column_compatible(
self, sqla_col: Column, label: Optional[str] = None
) -> Column:
"""Takes a sqlalchemy column object and adds label info if supported by engine.
:param sqla_col: sqlalchemy column instance
:param label: alias/label that column is expected to have
:return: either a sql alchemy column or label instance if supported by engine
"""
label_expected = label or sqla_col.name
db_engine_spec = self.database.db_engine_spec
if db_engine_spec.allows_column_aliases:
label = db_engine_spec.make_label_compatible(label_expected)
sqla_col = sqla_col.label(label)
sqla_col._df_label_expected = label_expected # pylint: disable=protected-access
return sqla_col
def __repr__(self) -> str:
return self.name
@property
def changed_by_name(self) -> str:
if not self.changed_by:
return ""
return str(self.changed_by)
@property
def changed_by_url(self) -> str:
if not self.changed_by:
return ""
return f"/superset/profile/{self.changed_by.username}"
@property
def connection(self) -> str:
return str(self.database)
@property
def description_markeddown(self) -> str:
return utils.markdown(self.description)
@property
def datasource_name(self) -> str:
return self.table_name
@property
def datasource_type(self) -> str:
return self.type
@property
def database_name(self) -> str:
return self.database.name
@classmethod
def get_datasource_by_name(
cls,
session: Session,
datasource_name: str,
schema: Optional[str],
database_name: str,
) -> Optional["SqlaTable"]:
schema = schema or None
query = (
session.query(cls)
.join(Database)
.filter(cls.table_name == datasource_name)
.filter(Database.database_name == database_name)
)
# Handling schema being '' or None, which is easier to handle
# in python than in the SQLA query in a multi-dialect way
for tbl in query.all():
if schema == (tbl.schema or None):
return tbl
return None
@property
def link(self) -> Markup:
name = escape(self.name)
anchor = f'<a target="_blank" href="{self.explore_url}">{name}</a>'
return Markup(anchor)
def get_schema_perm(self) -> Optional[str]:
"""Returns schema permission if present, database one otherwise."""
return security_manager.get_schema_perm(self.database, self.schema)
def get_perm(self) -> str:
return f"[{self.database}].[{self.table_name}](id:{self.id})"
@property
def name(self) -> str:
if not self.schema:
return self.table_name
return "{}.{}".format(self.schema, self.table_name)
@property
def full_name(self) -> str:
return utils.get_datasource_full_name(
self.database, self.table_name, schema=self.schema
)
@property
def dttm_cols(self) -> List[str]:
l = [c.column_name for c in self.columns if c.is_dttm]
if self.main_dttm_col and self.main_dttm_col not in l:
l.append(self.main_dttm_col)
return l
@property
def num_cols(self) -> List[str]:
return [c.column_name for c in self.columns if c.is_numeric]
@property
def any_dttm_col(self) -> Optional[str]:
cols = self.dttm_cols
return cols[0] if cols else None
@property
def html(self) -> str:
df = pd.DataFrame((c.column_name, c.type) for c in self.columns)
df.columns = ["field", "type"]
return df.to_html(
index=False,
classes=("dataframe table table-striped table-bordered " "table-condensed"),
)
@property
def sql_url(self) -> str:
return self.database.sql_url + "?table_name=" + str(self.table_name)
def external_metadata(self) -> List[Dict[str, str]]:
db_engine_spec = self.database.db_engine_spec
if self.sql:
engine = self.database.get_sqla_engine(schema=self.schema)
sql = self.get_template_processor().process_template(self.sql)
parsed_query = ParsedQuery(sql)
if not parsed_query.is_readonly():
raise SupersetSecurityException(
SupersetError(
error_type=SupersetErrorType.DATASOURCE_SECURITY_ACCESS_ERROR,
message=_("Only `SELECT` statements are allowed"),
level=ErrorLevel.ERROR,
)
)
statements = parsed_query.get_statements()
if len(statements) > 1:
raise SupersetSecurityException(
SupersetError(
error_type=SupersetErrorType.DATASOURCE_SECURITY_ACCESS_ERROR,
message=_("Only single queries supported"),
level=ErrorLevel.ERROR,
)
)
# TODO(villebro): refactor to use same code that's used by
# sql_lab.py:execute_sql_statements
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
query = self.database.apply_limit_to_sql(statements[0])
db_engine_spec.execute(cursor, query)
result = db_engine_spec.fetch_data(cursor, limit=1)
result_set = SupersetResultSet(
result, cursor.description, db_engine_spec
)
cols = result_set.columns
else:
db_dialect = self.database.get_dialect()
cols = self.database.get_columns(
self.table_name, schema=self.schema or None
)
for col in cols:
try:
if isinstance(col["type"], TypeEngine):
col["type"] = db_engine_spec.column_datatype_to_string(
col["type"], db_dialect
)
except CompileError:
col["type"] = "UNKNOWN"
return cols
@property
def time_column_grains(self) -> Dict[str, Any]:
return {
"time_columns": self.dttm_cols,
"time_grains": [grain.name for grain in self.database.grains()],
}
@property
def select_star(self) -> Optional[str]:
# show_cols and latest_partition set to false to avoid
# the expensive cost of inspecting the DB
return self.database.select_star(
self.table_name, schema=self.schema, show_cols=False, latest_partition=False
)
@property
def data(self) -> Dict[str, Any]:
data_ = super().data
if self.type == "table":
grains = self.database.grains() or []
if grains:
grains = [(g.duration, g.name) for g in grains]
data_["granularity_sqla"] = utils.choicify(self.dttm_cols)
data_["time_grain_sqla"] = grains
data_["main_dttm_col"] = self.main_dttm_col
data_["fetch_values_predicate"] = self.fetch_values_predicate
data_["template_params"] = self.template_params
data_["is_sqllab_view"] = self.is_sqllab_view
return data_
def values_for_column(self, column_name: str, limit: int = 10000) -> List[Any]:
"""Runs query against sqla to retrieve some
sample values for the given column.
"""
cols = {col.column_name: col for col in self.columns}
target_col = cols[column_name]
tp = self.get_template_processor()
qry = (
select([target_col.get_sqla_col()])
.select_from(self.get_from_clause(tp))
.distinct()
)
if limit:
qry = qry.limit(limit)
if self.fetch_values_predicate:
tp = self.get_template_processor()
try:
qry = qry.where(text(tp.process_template(self.fetch_values_predicate)))
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error in jinja expression in fetch values predicate: %(msg)s",
msg=ex.message,
)
)
engine = self.database.get_sqla_engine()
sql = "{}".format(qry.compile(engine, compile_kwargs={"literal_binds": True}))
sql = self.mutate_query_from_config(sql)
df = pd.read_sql_query(sql=sql, con=engine)
return df[column_name].to_list()
def mutate_query_from_config(self, sql: str) -> str:
"""Apply config's SQL_QUERY_MUTATOR
Typically adds comments to the query with context"""
sql_query_mutator = config["SQL_QUERY_MUTATOR"]
if sql_query_mutator:
username = utils.get_username()
sql = sql_query_mutator(sql, username, security_manager, self.database)
return sql
def get_template_processor(self, **kwargs: Any) -> BaseTemplateProcessor:
return get_template_processor(table=self, database=self.database, **kwargs)
def get_query_str_extended(self, query_obj: QueryObjectDict) -> QueryStringExtended:
sqlaq = self.get_sqla_query(**query_obj)
sql = self.database.compile_sqla_query(sqlaq.sqla_query)
logger.info(sql)
sql = sqlparse.format(sql, reindent=True)
sql = self.mutate_query_from_config(sql)
return QueryStringExtended(
labels_expected=sqlaq.labels_expected, sql=sql, prequeries=sqlaq.prequeries
)
def get_query_str(self, query_obj: QueryObjectDict) -> str:
query_str_ext = self.get_query_str_extended(query_obj)
all_queries = query_str_ext.prequeries + [query_str_ext.sql]
return ";\n\n".join(all_queries) + ";"
def get_sqla_table(self) -> table:
tbl = table(self.table_name)
if self.schema:
tbl.schema = self.schema
return tbl
def get_from_clause(
self, template_processor: Optional[BaseTemplateProcessor] = None
) -> Union[table, TextAsFrom]:
# Supporting arbitrary SQL statements in place of tables
if self.sql:
from_sql = self.sql
if template_processor:
try:
from_sql = template_processor.process_template(from_sql)
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error in jinja expression in FROM clause: %(msg)s",
msg=ex.message,
)
)
from_sql = sqlparse.format(from_sql, strip_comments=True)
if len(sqlparse.split(from_sql)) > 1:
raise QueryObjectValidationError(
_("Virtual dataset query cannot consist of multiple statements")
)
parsed_query = ParsedQuery(from_sql)
if not (parsed_query.is_unknown() or parsed_query.is_readonly()):
raise QueryObjectValidationError(
_("Virtual dataset query must be read-only")
)
return TextAsFrom(sa.text(from_sql), []).alias("expr_qry")
return self.get_sqla_table()
def adhoc_metric_to_sqla(
self, metric: Dict[str, Any], columns_by_name: Dict[str, Any]
) -> Optional[Column]:
"""
Turn an adhoc metric into a sqlalchemy column.
:param dict metric: Adhoc metric definition
:param dict columns_by_name: Columns for the current table
:returns: The metric defined as a sqlalchemy column
:rtype: sqlalchemy.sql.column
"""
expression_type = metric.get("expressionType")
label = utils.get_metric_name(metric)
if expression_type == utils.AdhocMetricExpressionType.SIMPLE:
column_name = metric["column"].get("column_name")
table_column = columns_by_name.get(column_name)
if table_column:
sqla_column = table_column.get_sqla_col()
else:
sqla_column = column(column_name)
sqla_metric = self.sqla_aggregations[metric["aggregate"]](sqla_column)
elif expression_type == utils.AdhocMetricExpressionType.SQL:
sqla_metric = literal_column(metric.get("sqlExpression"))
else:
return None
return self.make_sqla_column_compatible(sqla_metric, label)
def _get_sqla_row_level_filters(
self, template_processor: BaseTemplateProcessor
) -> List[str]:
"""
Return the appropriate row level security filters for
this table and the current user.
:param BaseTemplateProcessor template_processor: The template
processor to apply to the filters.
:returns: A list of SQL clauses to be ANDed together.
:rtype: List[str]
"""
filters_grouped: Dict[Union[int, str], List[str]] = defaultdict(list)
try:
for filter_ in security_manager.get_rls_filters(self):
clause = text(
f"({template_processor.process_template(filter_.clause)})"
)
filters_grouped[filter_.group_key or filter_.id].append(clause)
return [or_(*clauses) for clauses in filters_grouped.values()]
except TemplateError as ex:
raise QueryObjectValidationError(
_("Error in jinja expression in RLS filters: %(msg)s", msg=ex.message,)
)
def get_sqla_query( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
self,
metrics: List[Metric],
granularity: str,
from_dttm: Optional[datetime],
to_dttm: Optional[datetime],
columns: Optional[List[str]] = None,
groupby: Optional[List[str]] = None,
filter: Optional[ # pylint: disable=redefined-builtin
List[Dict[str, Any]]
] = None,
is_timeseries: bool = True,
timeseries_limit: int = 15,
timeseries_limit_metric: Optional[Metric] = None,
row_limit: Optional[int] = None,
row_offset: Optional[int] = None,
inner_from_dttm: Optional[datetime] = None,
inner_to_dttm: Optional[datetime] = None,
orderby: Optional[List[Tuple[ColumnElement, bool]]] = None,
extras: Optional[Dict[str, Any]] = None,
order_desc: bool = True,
) -> SqlaQuery:
"""Querying any sqla table from this common interface"""
template_kwargs = {
"from_dttm": from_dttm,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"row_offset": row_offset,
"to_dttm": to_dttm,
"filter": filter,
"columns": {col.column_name: col for col in self.columns},
}
is_sip_38 = is_feature_enabled("SIP_38_VIZ_REARCHITECTURE")
template_kwargs.update(self.template_params_dict)
extra_cache_keys: List[Any] = []
template_kwargs["extra_cache_keys"] = extra_cache_keys
template_processor = self.get_template_processor(**template_kwargs)
db_engine_spec = self.database.db_engine_spec
prequeries: List[str] = []
orderby = orderby or []
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
# Database spec supports join-free timeslot grouping
time_groupby_inline = db_engine_spec.time_groupby_inline
columns_by_name: Dict[str, TableColumn] = {
col.column_name: col for col in self.columns
}
metrics_by_name: Dict[str, SqlMetric] = {m.metric_name: m for m in self.metrics}
if not granularity and is_timeseries:
raise QueryObjectValidationError(
_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"
)
)
if (
not metrics
and not columns
and (is_sip_38 or (not is_sip_38 and not groupby))
):
raise QueryObjectValidationError(_("Empty query?"))
metrics_exprs: List[ColumnElement] = []
for metric in metrics:
if utils.is_adhoc_metric(metric):
assert isinstance(metric, dict)
metrics_exprs.append(self.adhoc_metric_to_sqla(metric, columns_by_name))
elif isinstance(metric, str) and metric in metrics_by_name:
metrics_exprs.append(metrics_by_name[metric].get_sqla_col())
else:
raise QueryObjectValidationError(
_("Metric '%(metric)s' does not exist", metric=metric)
)
if metrics_exprs:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr, label = literal_column("COUNT(*)"), "ccount"
main_metric_expr = self.make_sqla_column_compatible(main_metric_expr, label)
select_exprs: List[Column] = []
groupby_exprs_sans_timestamp = OrderedDict()
assert extras is not None
if (is_sip_38 and metrics and columns) or (not is_sip_38 and groupby):
# dedup columns while preserving order
columns_ = columns if is_sip_38 else groupby
assert columns_
groupby = list(dict.fromkeys(columns_))
select_exprs = []
for selected in groupby:
# if groupby field/expr equals granularity field/expr
if selected == granularity:
time_grain = extras.get("time_grain_sqla")
sqla_col = columns_by_name[selected]
outer = sqla_col.get_timestamp_expression(time_grain, selected)
# if groupby field equals a selected column
elif selected in columns_by_name:
outer = columns_by_name[selected].get_sqla_col()
else:
outer = literal_column(f"({selected})")
outer = self.make_sqla_column_compatible(outer, selected)
groupby_exprs_sans_timestamp[outer.name] = outer
select_exprs.append(outer)
elif columns:
for selected in columns:
select_exprs.append(
columns_by_name[selected].get_sqla_col()
if selected in columns_by_name
else self.make_sqla_column_compatible(literal_column(selected))
)
metrics_exprs = []
time_range_endpoints = extras.get("time_range_endpoints")
groupby_exprs_with_timestamp = OrderedDict(groupby_exprs_sans_timestamp.items())
if granularity:
dttm_col = columns_by_name[granularity]
time_grain = extras.get("time_grain_sqla")
time_filters = []
if is_timeseries:
timestamp = dttm_col.get_timestamp_expression(time_grain)
select_exprs += [timestamp]
groupby_exprs_with_timestamp[timestamp.name] = timestamp
# Use main dttm column to support index with secondary dttm columns.
if (
db_engine_spec.time_secondary_columns
and self.main_dttm_col in self.dttm_cols
and self.main_dttm_col != dttm_col.column_name
):
time_filters.append(
columns_by_name[self.main_dttm_col].get_time_filter(
from_dttm, to_dttm, time_range_endpoints
)
)
time_filters.append(
dttm_col.get_time_filter(from_dttm, to_dttm, time_range_endpoints)
)
select_exprs += metrics_exprs
labels_expected = [
c._df_label_expected # pylint: disable=protected-access
for c in select_exprs
]
select_exprs = db_engine_spec.make_select_compatible(
groupby_exprs_with_timestamp.values(), select_exprs
)
qry = sa.select(select_exprs)
tbl = self.get_from_clause(template_processor)
if (is_sip_38 and metrics) or (not is_sip_38 and not columns):
qry = qry.group_by(*groupby_exprs_with_timestamp.values())
where_clause_and = []
having_clause_and = []
for flt in filter: # type: ignore
if not all([flt.get(s) for s in ["col", "op"]]):
continue
col = flt["col"]
op = flt["op"].upper()
col_obj = columns_by_name.get(col)
if col_obj:
is_list_target = op in (
utils.FilterOperator.IN.value,
utils.FilterOperator.NOT_IN.value,
)
eq = self.filter_values_handler(
values=flt.get("val"),
target_column_is_numeric=col_obj.is_numeric,
is_list_target=is_list_target,
)
if op in (
utils.FilterOperator.IN.value,
utils.FilterOperator.NOT_IN.value,
):
cond = col_obj.get_sqla_col().in_(eq)
if isinstance(eq, str) and NULL_STRING in eq:
cond = or_(
cond,
col_obj.get_sqla_col() # pylint: disable=singleton-comparison
== None,
)
if op == utils.FilterOperator.NOT_IN.value:
cond = ~cond
where_clause_and.append(cond)
else:
if col_obj.is_numeric:
eq = utils.cast_to_num(flt["val"])
if op == utils.FilterOperator.EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() == eq)
elif op == utils.FilterOperator.NOT_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() != eq)
elif op == utils.FilterOperator.GREATER_THAN.value:
where_clause_and.append(col_obj.get_sqla_col() > eq)
elif op == utils.FilterOperator.LESS_THAN.value:
where_clause_and.append(col_obj.get_sqla_col() < eq)
elif op == utils.FilterOperator.GREATER_THAN_OR_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() >= eq)
elif op == utils.FilterOperator.LESS_THAN_OR_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() <= eq)
elif op == utils.FilterOperator.LIKE.value:
where_clause_and.append(col_obj.get_sqla_col().like(eq))
elif op == utils.FilterOperator.IS_NULL.value:
where_clause_and.append(
col_obj.get_sqla_col() # pylint: disable=singleton-comparison
== None
)
elif op == utils.FilterOperator.IS_NOT_NULL.value:
where_clause_and.append(
col_obj.get_sqla_col() # pylint: disable=singleton-comparison
!= None
)
else:
raise QueryObjectValidationError(
_("Invalid filter operation type: %(op)s", op=op)
)
if is_feature_enabled("ROW_LEVEL_SECURITY"):
where_clause_and += self._get_sqla_row_level_filters(template_processor)
if extras:
where = extras.get("where")
if where:
try:
where = template_processor.process_template(where)
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error in jinja expression in WHERE clause: %(msg)s",
msg=ex.message,
)
)
where_clause_and += [sa.text("({})".format(where))]
having = extras.get("having")
if having:
try:
having = template_processor.process_template(having)
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error in jinja expression in HAVING clause: %(msg)s",
msg=ex.message,
)
)
having_clause_and += [sa.text("({})".format(having))]
if granularity:
qry = qry.where(and_(*(time_filters + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
# To ensure correct handling of the ORDER BY labeling we need to reference the
# metric instance if defined in the SELECT clause.
metrics_exprs_by_label = {
m._label: m for m in metrics_exprs # pylint: disable=protected-access
}
for col, ascending in orderby:
direction = asc if ascending else desc
if utils.is_adhoc_metric(col):
col = self.adhoc_metric_to_sqla(col, columns_by_name)
elif col in columns_by_name:
col = columns_by_name[col].get_sqla_col()
if isinstance(col, Label):
label = col._label # pylint: disable=protected-access
if label in metrics_exprs_by_label:
col = metrics_exprs_by_label[label]
qry = qry.order_by(direction(col))
if row_limit:
qry = qry.limit(row_limit)
if row_offset:
qry = qry.offset(row_offset)
if (
is_timeseries # pylint: disable=too-many-boolean-expressions
and timeseries_limit
and not time_groupby_inline
and ((is_sip_38 and columns) or (not is_sip_38 and groupby))
):
if self.database.db_engine_spec.allows_joins:
# some sql dialects require for order by expressions
# to also be in the select clause -- others, e.g. vertica,
# require a unique inner alias
inner_main_metric_expr = self.make_sqla_column_compatible(
main_metric_expr, "mme_inner__"
)
inner_groupby_exprs = []
inner_select_exprs = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
inner = self.make_sqla_column_compatible(gby_obj, gby_name + "__")
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
inner_select_exprs += [inner_main_metric_expr]
subq = select(inner_select_exprs).select_from(tbl)
inner_time_filter = dttm_col.get_time_filter(
inner_from_dttm or from_dttm,
inner_to_dttm or to_dttm,
time_range_endpoints,
)
subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))
subq = subq.group_by(*inner_groupby_exprs)
ob = inner_main_metric_expr
if timeseries_limit_metric:
ob = self._get_timeseries_orderby(
timeseries_limit_metric, metrics_by_name, columns_by_name
)
direction = desc if order_desc else asc
subq = subq.order_by(direction(ob))
subq = subq.limit(timeseries_limit)
on_clause = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
# in this case the column name, not the alias, needs to be
# conditionally mutated, as it refers to the column alias in
# the inner query
col_name = db_engine_spec.make_label_compatible(gby_name + "__")
on_clause.append(gby_obj == column(col_name))
tbl = tbl.join(subq.alias(), and_(*on_clause))
else:
if timeseries_limit_metric:
orderby = [
(
self._get_timeseries_orderby(
timeseries_limit_metric,
metrics_by_name,
columns_by_name,
),
False,
)
]
# run prequery to get top groups
prequery_obj = {
"is_timeseries": False,
"row_limit": timeseries_limit,
"metrics": metrics,
"granularity": granularity,
"from_dttm": inner_from_dttm or from_dttm,
"to_dttm": inner_to_dttm or to_dttm,
"filter": filter,
"orderby": orderby,
"extras": extras,
"columns": columns,
"order_desc": True,
}
if not is_sip_38:
prequery_obj["groupby"] = groupby
result = self.query(prequery_obj)
prequeries.append(result.query)
dimensions = [
c
for c in result.df.columns
if c not in metrics and c in groupby_exprs_sans_timestamp
]
top_groups = self._get_top_groups(
result.df, dimensions, groupby_exprs_sans_timestamp
)
qry = qry.where(top_groups)
return SqlaQuery(
extra_cache_keys=extra_cache_keys,
labels_expected=labels_expected,
sqla_query=qry.select_from(tbl),
prequeries=prequeries,
)
def _get_timeseries_orderby(
self,
timeseries_limit_metric: Metric,
metrics_by_name: Dict[str, SqlMetric],
columns_by_name: Dict[str, TableColumn],
) -> Optional[Column]:
if utils.is_adhoc_metric(timeseries_limit_metric):
assert isinstance(timeseries_limit_metric, dict)
ob = self.adhoc_metric_to_sqla(timeseries_limit_metric, columns_by_name)
elif (
isinstance(timeseries_limit_metric, str)
and timeseries_limit_metric in metrics_by_name
):
ob = metrics_by_name[timeseries_limit_metric].get_sqla_col()
else:
raise QueryObjectValidationError(
_("Metric '%(metric)s' does not exist", metric=timeseries_limit_metric)
)
return ob
def _get_top_groups( # pylint: disable=no-self-use
self,
df: pd.DataFrame,
dimensions: List[str],
groupby_exprs: "OrderedDict[str, Any]",
) -> ColumnElement:
groups = []
for _unused, row in df.iterrows():
group = []
for dimension in dimensions:
group.append(groupby_exprs[dimension] == row[dimension])
groups.append(and_(*group))
return or_(*groups)
def query(self, query_obj: QueryObjectDict) -> QueryResult:
qry_start_dttm = datetime.now()
query_str_ext = self.get_query_str_extended(query_obj)
sql = query_str_ext.sql
status = utils.QueryStatus.SUCCESS
errors = None
error_message = None
def mutator(df: pd.DataFrame) -> None:
"""
Some engines change the case or generate bespoke column names, either by
default or due to lack of support for aliasing. This function ensures that
the column names in the DataFrame correspond to what is expected by
the viz components.
:param df: Original DataFrame returned by the engine
"""
labels_expected = query_str_ext.labels_expected
if df is not None and not df.empty:
if len(df.columns) != len(labels_expected):
raise QueryObjectValidationError(
f"For {sql}, df.columns: {df.columns}"
f" differs from {labels_expected}"
)
df.columns = labels_expected
try:
df = self.database.get_df(sql, self.schema, mutator)
except Exception as ex: # pylint: disable=broad-except
df = pd.DataFrame()
status = utils.QueryStatus.FAILED
logger.warning(
"Query %s on schema %s failed", sql, self.schema, exc_info=True
)
db_engine_spec = self.database.db_engine_spec
errors = db_engine_spec.extract_errors(ex)
error_message = utils.error_msg_from_exception(ex)
return QueryResult(
status=status,
df=df,
duration=datetime.now() - qry_start_dttm,
query=sql,
errors=errors,
error_message=error_message,
)
def get_sqla_table_object(self) -> Table:
return self.database.get_table(self.table_name, schema=self.schema)
def fetch_metadata(self, commit: bool = True) -> MetadataResult:
"""
Fetches the metadata for the table and merges it in
:param commit: should the changes be committed or not.
:return: Tuple with lists of added, removed and modified column names.
"""
new_columns = self.external_metadata()
metrics = []
any_date_col = None
db_engine_spec = self.database.db_engine_spec
old_columns = db.session.query(TableColumn).filter(TableColumn.table == self)
old_columns_by_name = {col.column_name: col for col in old_columns}
results = MetadataResult(
removed=[
col
for col in old_columns_by_name
if col not in {col["name"] for col in new_columns}
]
)
# clear old columns before adding modified columns back
self.columns = []
for col in new_columns:
old_column = old_columns_by_name.get(col["name"], None)
if not old_column:
results.added.append(col["name"])
new_column = TableColumn(
column_name=col["name"], type=col["type"], table=self
)
new_column.is_dttm = new_column.is_temporal
db_engine_spec.alter_new_orm_column(new_column)
else:
new_column = old_column
if new_column.type != col["type"]:
results.modified.append(col["name"])
new_column.type = col["type"]
new_column.groupby = True
new_column.filterable = True
self.columns.append(new_column)
if not any_date_col and new_column.is_temporal:
any_date_col = col["name"]
metrics.append(
SqlMetric(
metric_name="count",
verbose_name="COUNT(*)",
metric_type="count",
expression="COUNT(*)",
)
)
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
self.add_missing_metrics(metrics)
# Apply config supplied mutations.
config["SQLA_TABLE_MUTATOR"](self)
db.session.merge(self)
if commit:
db.session.commit()
return results
@classmethod
def import_obj(
cls,
i_datasource: "SqlaTable",
database_id: Optional[int] = None,
import_time: Optional[int] = None,
) -> int:
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overrided if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_sqlatable(table_: "SqlaTable") -> "SqlaTable":
return (
db.session.query(SqlaTable)
.join(Database)
.filter(
SqlaTable.table_name == table_.table_name,
SqlaTable.schema == table_.schema,
Database.id == table_.database_id,
)
.first()
)
def lookup_database(table_: SqlaTable) -> Database:
try:
return (
db.session.query(Database)
.filter_by(database_name=table_.params_dict["database_name"])
.one()
)
except NoResultFound:
raise DatabaseNotFound(
_(
"Database '%(name)s' is not found",
name=table_.params_dict["database_name"],
)
)
return import_datasource.import_datasource(
db.session,
i_datasource,
lookup_database,
lookup_sqlatable,
import_time,
database_id,
)
@classmethod
def query_datasources_by_name(
cls,
session: Session,
database: Database,
datasource_name: str,
schema: Optional[str] = None,
) -> List["SqlaTable"]:
query = (
session.query(cls)
.filter_by(database_id=database.id)
.filter_by(table_name=datasource_name)
)
if schema:
query = query.filter_by(schema=schema)
return query.all()
@staticmethod
def default_query(qry: Query) -> Query:
return qry.filter_by(is_sqllab_view=False)
def has_extra_cache_key_calls(self, query_obj: QueryObjectDict) -> bool:
"""
Detects the presence of calls to `ExtraCache` methods in items in query_obj that
can be templated. If any are present, the query must be evaluated to extract
additional keys for the cache key. This method is needed to avoid executing the
template code unnecessarily, as it may contain expensive calls, e.g. to extract
the latest partition of a database.
:param query_obj: query object to analyze
:return: True if there are call(s) to an `ExtraCache` method, False otherwise
"""
templatable_statements: List[str] = []
if self.sql:
templatable_statements.append(self.sql)
if self.fetch_values_predicate:
templatable_statements.append(self.fetch_values_predicate)
extras = query_obj.get("extras", {})
if "where" in extras:
templatable_statements.append(extras["where"])
if "having" in extras:
templatable_statements.append(extras["having"])
if is_feature_enabled("ROW_LEVEL_SECURITY") and self.is_rls_supported:
templatable_statements += [
f.clause for f in security_manager.get_rls_filters(self)
]
for statement in templatable_statements:
if ExtraCache.regex.search(statement):
return True
return False
def get_extra_cache_keys(self, query_obj: QueryObjectDict) -> List[Hashable]:
"""
The cache key of a SqlaTable needs to consider any keys added by the parent
class and any keys added via `ExtraCache`.
:param query_obj: query object to analyze
:return: The extra cache keys
"""
extra_cache_keys = super().get_extra_cache_keys(query_obj)
if self.has_extra_cache_key_calls(query_obj):
sqla_query = self.get_sqla_query(**query_obj)
extra_cache_keys += sqla_query.extra_cache_keys
return extra_cache_keys
sa.event.listen(SqlaTable, "after_insert", security_manager.set_perm)
sa.event.listen(SqlaTable, "after_update", security_manager.set_perm)
RLSFilterRoles = Table(
"rls_filter_roles",
metadata,
Column("id", Integer, primary_key=True),
Column("role_id", Integer, ForeignKey("ab_role.id"), nullable=False),
Column("rls_filter_id", Integer, ForeignKey("row_level_security_filters.id")),
)
RLSFilterTables = Table(
"rls_filter_tables",
metadata,
Column("id", Integer, primary_key=True),
Column("table_id", Integer, ForeignKey("tables.id")),
Column("rls_filter_id", Integer, ForeignKey("row_level_security_filters.id")),
)
class RowLevelSecurityFilter(Model, AuditMixinNullable):
"""
Custom where clauses attached to Tables and Roles.
"""
__tablename__ = "row_level_security_filters"
id = Column(Integer, primary_key=True)
filter_type = Column(
Enum(*[filter_type.value for filter_type in utils.RowLevelSecurityFilterType])
)
group_key = Column(String(255), nullable=True)
roles = relationship(
security_manager.role_model,
secondary=RLSFilterRoles,
backref="row_level_security_filters",
)
tables = relationship(
SqlaTable, secondary=RLSFilterTables, backref="row_level_security_filters"
)
clause = Column(Text, nullable=False)
| 37.353836
| 115
| 0.59122
|
b7a3746496724c9221bfa73a8eeda4576eadc965
| 5,253
|
py
|
Python
|
numpyro/_downloads/1290c9ac531016f80fb7c5656adffcb4/bnn.py
|
fehiepsi/website
|
7b0165deebc1379105d8b9d8051187f7d9914446
|
[
"MIT"
] | null | null | null |
numpyro/_downloads/1290c9ac531016f80fb7c5656adffcb4/bnn.py
|
fehiepsi/website
|
7b0165deebc1379105d8b9d8051187f7d9914446
|
[
"MIT"
] | null | null | null |
numpyro/_downloads/1290c9ac531016f80fb7c5656adffcb4/bnn.py
|
fehiepsi/website
|
7b0165deebc1379105d8b9d8051187f7d9914446
|
[
"MIT"
] | null | null | null |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Bayesian Neural Network
=======================
We demonstrate how to use NUTS to do inference on a simple (small)
Bayesian neural network with two hidden layers.
"""
import argparse
import os
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from jax import vmap
import jax.numpy as jnp
import jax.random as random
import numpyro
from numpyro import handlers
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
matplotlib.use('Agg') # noqa: E402
# the non-linearity we use in our neural network
def nonlin(x):
return jnp.tanh(x)
# a two-layer bayesian neural network with computational flow
# given by D_X => D_H => D_H => D_Y where D_H is the number of
# hidden units. (note we indicate tensor dimensions in the comments)
def model(X, Y, D_H):
D_X, D_Y = X.shape[1], 1
# sample first layer (we put unit normal priors on all weights)
w1 = numpyro.sample("w1", dist.Normal(jnp.zeros((D_X, D_H)), jnp.ones((D_X, D_H)))) # D_X D_H
z1 = nonlin(jnp.matmul(X, w1)) # N D_H <= first layer of activations
# sample second layer
w2 = numpyro.sample("w2", dist.Normal(jnp.zeros((D_H, D_H)), jnp.ones((D_H, D_H)))) # D_H D_H
z2 = nonlin(jnp.matmul(z1, w2)) # N D_H <= second layer of activations
# sample final layer of weights and neural network output
w3 = numpyro.sample("w3", dist.Normal(jnp.zeros((D_H, D_Y)), jnp.ones((D_H, D_Y)))) # D_H D_Y
z3 = jnp.matmul(z2, w3) # N D_Y <= output of the neural network
# we put a prior on the observation noise
prec_obs = numpyro.sample("prec_obs", dist.Gamma(3.0, 1.0))
sigma_obs = 1.0 / jnp.sqrt(prec_obs)
# observe data
numpyro.sample("Y", dist.Normal(z3, sigma_obs), obs=Y)
# helper function for HMC inference
def run_inference(model, args, rng_key, X, Y, D_H):
start = time.time()
kernel = NUTS(model)
mcmc = MCMC(kernel, args.num_warmup, args.num_samples, num_chains=args.num_chains,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True)
mcmc.run(rng_key, X, Y, D_H)
mcmc.print_summary()
print('\nMCMC elapsed time:', time.time() - start)
return mcmc.get_samples()
# helper function for prediction
def predict(model, rng_key, samples, X, D_H):
model = handlers.substitute(handlers.seed(model, rng_key), samples)
# note that Y will be sampled in the model because we pass Y=None here
model_trace = handlers.trace(model).get_trace(X=X, Y=None, D_H=D_H)
return model_trace['Y']['value']
# create artificial regression dataset
def get_data(N=50, D_X=3, sigma_obs=0.05, N_test=500):
D_Y = 1 # create 1d outputs
np.random.seed(0)
X = jnp.linspace(-1, 1, N)
X = jnp.power(X[:, np.newaxis], jnp.arange(D_X))
W = 0.5 * np.random.randn(D_X)
Y = jnp.dot(X, W) + 0.5 * jnp.power(0.5 + X[:, 1], 2.0) * jnp.sin(4.0 * X[:, 1])
Y += sigma_obs * np.random.randn(N)
Y = Y[:, np.newaxis]
Y -= jnp.mean(Y)
Y /= jnp.std(Y)
assert X.shape == (N, D_X)
assert Y.shape == (N, D_Y)
X_test = jnp.linspace(-1.3, 1.3, N_test)
X_test = jnp.power(X_test[:, np.newaxis], jnp.arange(D_X))
return X, Y, X_test
def main(args):
N, D_X, D_H = args.num_data, 3, args.num_hidden
X, Y, X_test = get_data(N=N, D_X=D_X)
# do inference
rng_key, rng_key_predict = random.split(random.PRNGKey(0))
samples = run_inference(model, args, rng_key, X, Y, D_H)
# predict Y_test at inputs X_test
vmap_args = (samples, random.split(rng_key_predict, args.num_samples * args.num_chains))
predictions = vmap(lambda samples, rng_key: predict(model, rng_key, samples, X_test, D_H))(*vmap_args)
predictions = predictions[..., 0]
# compute mean prediction and confidence interval around median
mean_prediction = jnp.mean(predictions, axis=0)
percentiles = np.percentile(predictions, [5.0, 95.0], axis=0)
# make plots
fig, ax = plt.subplots(1, 1)
# plot training data
ax.plot(X[:, 1], Y[:, 0], 'kx')
# plot 90% confidence level of predictions
ax.fill_between(X_test[:, 1], percentiles[0, :], percentiles[1, :], color='lightblue')
# plot mean prediction
ax.plot(X_test[:, 1], mean_prediction, 'blue', ls='solid', lw=2.0)
ax.set(xlabel="X", ylabel="Y", title="Mean predictions with 90% CI")
plt.savefig('bnn_plot.pdf')
plt.tight_layout()
if __name__ == "__main__":
assert numpyro.__version__.startswith('0.4.0')
parser = argparse.ArgumentParser(description="Bayesian neural network example")
parser.add_argument("-n", "--num-samples", nargs="?", default=2000, type=int)
parser.add_argument("--num-warmup", nargs='?', default=1000, type=int)
parser.add_argument("--num-chains", nargs='?', default=1, type=int)
parser.add_argument("--num-data", nargs='?', default=100, type=int)
parser.add_argument("--num-hidden", nargs='?', default=5, type=int)
parser.add_argument("--device", default='cpu', type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
| 34.333333
| 106
| 0.667238
|
c1f52524d1750111d36361ad53c7d81382167865
| 42,473
|
py
|
Python
|
workers/pull_request_worker/pull_request_worker/worker.py
|
ishagarg06/augur
|
2295d48288d243c9ac01bf54ba140ca756828929
|
[
"MIT"
] | null | null | null |
workers/pull_request_worker/pull_request_worker/worker.py
|
ishagarg06/augur
|
2295d48288d243c9ac01bf54ba140ca756828929
|
[
"MIT"
] | null | null | null |
workers/pull_request_worker/pull_request_worker/worker.py
|
ishagarg06/augur
|
2295d48288d243c9ac01bf54ba140ca756828929
|
[
"MIT"
] | null | null | null |
import ast, json, logging, os, sys, time, traceback, requests
from datetime import datetime
from multiprocessing import Process, Queue
from urllib.parse import urlparse
import pandas as pd
import sqlalchemy as s
from sqlalchemy import MetaData
from sqlalchemy.ext.automap import automap_base
from workers.standard_methods import *
from sqlalchemy.sql.expression import bindparam
class GHPullRequestWorker:
"""
Worker that collects Pull Request related data from the Github API and stores it in our database.
:param task: most recent task the broker added to the worker's queue
:param config: holds info like api keys, descriptions, and database connection strings
"""
def __init__(self, config, task=None):
self._task = task
self._child = None
self._queue = Queue()
self._maintain_queue = Queue()
self.working_on = None
self.config = config
LOG_FORMAT = '%(levelname)s:[%(name)s]: %(message)s'
logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO, format=LOG_FORMAT)
logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid())))
self.db = None
self.table = None
self.API_KEY = self.config['key']
self.tool_source = 'GitHub Pull Request Worker'
self.tool_version = '0.0.1' # See __init__.py
self.data_source = 'GitHub API'
self.results_counter = 0
self.headers = {'Authorization': f'token {self.API_KEY}'}
self.history_id = None
self.finishing_task = True
self.specs = {
"id": self.config['id'],
"location": self.config['location'],
"qualifications": [
{
"given": [['github_url']],
"models":['pull_requests', 'pull_request_commits', 'pull_request_files']
}
],
"config": [self.config]
}
self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
self.config['user'], self.config['password'], self.config['host'],
self.config['port'], self.config['database']
)
#Database connections
logging.info("Making database connections...\n")
dbschema = 'augur_data'
self.db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(dbschema)})
helper_schema = 'augur_operations'
self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(helper_schema)})
metadata = MetaData()
helper_metadata = MetaData()
metadata.reflect(self.db, only=['contributors', 'pull_requests',
'pull_request_assignees', 'pull_request_events', 'pull_request_labels',
'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo',
'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits',
'pull_request_files'])
helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth'])
Base = automap_base(metadata=metadata)
HelperBase = automap_base(metadata=helper_metadata)
Base.prepare()
HelperBase.prepare()
self.contributors_table = Base.classes.contributors.__table__
self.pull_requests_table = Base.classes.pull_requests.__table__
self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__
self.pull_request_events_table = Base.classes.pull_request_events.__table__
self.pull_request_labels_table = Base.classes.pull_request_labels.__table__
self.pull_request_message_ref_table = Base.classes.pull_request_message_ref.__table__
self.pull_request_meta_table = Base.classes.pull_request_meta.__table__
self.pull_request_repo_table = Base.classes.pull_request_repo.__table__
self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__
self.pull_request_teams_table = Base.classes.pull_request_teams.__table__
self.message_table = Base.classes.message.__table__
self.pull_request_commits_table = Base.classes.pull_request_commits.__table__
self.pull_request_files_table = Base.classes.pull_request_files.__table__
self.history_table = HelperBase.classes.worker_history.__table__
self.job_table = HelperBase.classes.worker_job.__table__
logging.info("Querying starting ids info...\n")
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1
self.pr_id_inc = get_max_id(self, 'pull_requests', 'pull_request_id')
self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id')
self.msg_id_inc = get_max_id(self, 'message', 'msg_id')
self.pr_msg_ref_id_inc = get_max_id(self, 'pull_request_message_ref', 'pr_msg_ref_id')
self.label_id_inc = get_max_id(self, 'pull_request_labels', 'pr_label_id')
self.event_id_inc = get_max_id(self, 'pull_request_events', 'pr_event_id')
self.reviewer_id_inc = get_max_id(self, 'pull_request_reviewers', 'pr_reviewer_map_id')
self.assignee_id_inc = get_max_id(self, 'pull_request_assignees', 'pr_assignee_map_id')
self.pr_meta_id_inc = get_max_id(self, 'pull_request_meta', 'pr_repo_meta_id')
# Organize different api keys/oauths available
init_oauths(self)
# Send broker hello message
connect_to_broker(self)
# self.pull_requests_graphql({
# 'job_type': 'MAINTAIN',
# 'models': ['pull_request_files'],
# 'display_name': 'pull_request_files model for url: https://github.com/zephyrproject-rtos/actions_sandbox.git',
# 'given': {
# 'github_url': 'https://github.com/zephyrproject-rtos/actions_sandbox.git'
# }
# }, 25201)
def update_config(self, config):
""" Method to update config and set a default
"""
self.config = {
"display_name": "",
"description": "",
"required": 1,
"type": "string"
}
self.config.update(config)
self.API_KEY = self.config['key']
@property
def task(self):
""" Property that is returned when the worker's current task is referenced
"""
return self._task
@task.setter
def task(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
github_url = value['given']['github_url']
repo_url_SQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(github_url))
rs = pd.read_sql(repo_url_SQL, self.db, params={})
try:
repo_id = int(rs.iloc[0]['repo_id'])
if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN":
self._queue.put(value)
if 'focused_task' in value:
if value['focused_task'] == 1:
self.finishing_task = True
except Exception as e:
logging.error(f"error: {e}, or that repo is not in our database: {value}\n")
self._task = value
self.run()
def cancel(self):
""" Delete/cancel current task
"""
self._task = None
def run(self):
""" Kicks off the processing of the queue if it is not already being processed
Gets run whenever a new task is added
"""
logging.info("Running...\n")
self._child = Process(target=self.collect, args=())
self._child.start()
def collect(self):
""" Function to process each entry in the worker's task queue
Determines what action to take based off the message type
"""
while True:
if not self._queue.empty():
message = self._queue.get()
self.working_on = message['job_type']
else:
break
logging.info("Popped off message: {}\n".format(str(message)))
if message['job_type'] == 'STOP':
break
if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':
raise ValueError('{} is not a recognized task type'.format(message['job_type']))
pass
# Query all repos with repo url of given task
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(message['given']['github_url']))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
try:
if message['models'][0] == 'pull_requests':
self.pull_requests_model(message, repo_id)
elif message['models'][0] == 'pull_request_commits':
self.pull_request_commits_model(message, repo_id)
elif message['models'][0] == 'pull_request_files':
self.pull_requests_graphql(message, repo_id)
except Exception as e:
register_task_failure(self, message, repo_id, e)
pass
def graphql_paginate(self, query, data_subjects, before_parameters=None):
""" Paginate a GitHub GraphQL query backwards
:param query: A string, holds the GraphQL query
:rtype: A Pandas DataFrame, contains all data contained in the pages
"""
logging.info(f'Start paginate with params: \n{data_subjects} '
f'\n{before_parameters}')
def all_items(dictionary):
for key, value in dictionary.items():
if type(value) is dict:
yield (key, value)
yield from all_items(value)
else:
yield (key, value)
if not before_parameters:
before_parameters = {}
for subject, _ in all_items(data_subjects):
before_parameters[subject] = ''
start_cursor = None
has_previous_page = True
base_url = 'https://api.github.com/graphql'
tuples = []
def find_root_of_subject(data, key_subject):
key_nest = None
for subject, nest in data.items():
if key_subject in nest:
key_nest = nest[key_subject]
break
elif type(nest) == dict:
return find_root_of_subject(nest, key_subject)
else:
raise KeyError
return key_nest
for data_subject, nest in data_subjects.items():
logging.info(f'Beginning paginate process for field {data_subject} '
f'for query: {query}')
page_count = 0
while has_previous_page:
page_count += 1
num_attempts = 3
success = False
for attempt in range(num_attempts):
logging.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint '
f'page number {page_count}\n')
response = requests.post(base_url, json={'query': query.format(
**before_parameters)}, headers=self.headers)
update_gh_rate_limit(self, response)
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if 'errors' in data:
logging.info("Error!: {}".format(data['errors']))
if data['errors'][0]['type'] == 'RATE_LIMITED':
update_gh_rate_limit(self, response)
num_attempts -= 1
continue
if 'data' in data:
success = True
root = find_root_of_subject(data, data_subject)
page_info = root['pageInfo']
data = root['edges']
break
else:
logging.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url))
break
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
num_attempts -= 1
update_gh_rate_limit(self, response, temporarily_disable=True)
if data['message'] == 'Bad credentials':
update_gh_rate_limit(self, response, bad_credentials=True)
if not success:
logging.info('GraphQL query failed: {}'.format(query))
continue
before_parameters.update({
data_subject: ', before: \"{}\"'.format(page_info['startCursor'])
})
has_previous_page = page_info['hasPreviousPage']
tuples += data
logging.info(f'Paged through {page_count} pages and '
f'collected {len(tuples)} data points\n')
if not nest:
return tuples
return tuples + self.graphql_paginate(query, data_subjects[subject],
before_parameters=before_parameters)
def pull_requests_graphql(self, task_info, repo_id):
owner, repo = get_owner_repo(task_info['given']['github_url'])
# query existing PRs and the respective url we will append the commits url to
pr_number_sql = s.sql.text("""
SELECT DISTINCT pr_src_number as pr_src_number, pull_requests.pull_request_id
FROM pull_requests--, pull_request_meta
WHERE repo_id = {}
""".format(repo_id))
pr_numbers = pd.read_sql(pr_number_sql, self.db, params={})
pr_file_rows = []
for index, pull_request in enumerate(pr_numbers.itertuples()):
logging.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}')
query = """
{{
repository(owner:"%s", name:"%s"){{
pullRequest (number: %s) {{
""" % (owner, repo, pull_request.pr_src_number) + """
files (last: 100{files}) {{
pageInfo {{
hasPreviousPage
hasNextPage
endCursor
startCursor
}}
edges {{
node {{
additions
deletions
path
}}
}}
}}
}}
}}
}}
"""
pr_file_rows += [{
'pull_request_id': pull_request.pull_request_id,
'pr_file_additions': pr_file['node']['additions'] + 5,
'pr_file_deletions': pr_file['node']['deletions'],
'pr_file_path': pr_file['node']['path'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': 'GitHub API',
} for pr_file in self.graphql_paginate(query, {'files': None})]
# Get current table values
table_values_sql = s.sql.text("""
SELECT pull_request_files.*
FROM pull_request_files, pull_requests
WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id
AND repo_id = :repo_id
""")
logging.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n')
table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': repo_id})
# Compare queried values against table values for dupes/updates
if len(pr_file_rows) > 0:
table_columns = pr_file_rows[0].keys()
else:
logging.info(f'No rows need insertion for repo {repo_id}\n')
register_task_completion(self, task_info, repo_id, 'pull_request_files')
# Compare queried values against table values for dupes/updates
pr_file_rows_df = pd.DataFrame(pr_file_rows)
pr_file_rows_df = pr_file_rows_df.dropna(subset=['pull_request_id'])
pr_file_rows_df['need_update'] = 0
dupe_columns = ['pull_request_id', 'pr_file_path']
update_columns = ['pr_file_additions', 'pr_file_deletions']
logging.info(f'{pr_file_rows_df}')
logging.info(f'{table_values}')
need_insertion = pr_file_rows_df.merge(table_values, suffixes=('','_table'),
how='outer', indicator=True, on=dupe_columns).loc[
lambda x : x['_merge']=='left_only'][table_columns]
need_updates = pr_file_rows_df.merge(table_values, on=dupe_columns, suffixes=('','_table'),
how='inner',indicator=False)[table_columns].merge(table_values,
on=update_columns, suffixes=('','_table'), how='outer',indicator=True
).loc[lambda x : x['_merge']=='left_only'][table_columns]
need_updates['b_pull_request_id'] = need_updates['pull_request_id']
need_updates['b_pr_file_path'] = need_updates['pr_file_path']
pr_file_insert_rows = need_insertion.to_dict('records')
pr_file_update_rows = need_updates.to_dict('records')
logging.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and '
f'{len(need_updates)} updates.\n')
if len(pr_file_update_rows) > 0:
success = False
while not success:
try:
self.db.execute(
self.pull_request_files_table.update().where(
self.pull_request_files_table.c.pull_request_id == bindparam('b_pull_request_id') and
self.pull_request_files_table.c.pr_file_path == bindparam('b_pr_file_path')).values(
pr_file_additions=bindparam('pr_file_additions'),
pr_file_deletions=bindparam('pr_file_deletions')),
pr_file_update_rows
)
success = True
except Exception as e:
logging.info('error: {}'.format(e))
time.sleep(5)
if len(pr_file_insert_rows) > 0:
success = False
while not success:
try:
self.db.execute(
self.pull_request_files_table.insert(),
pr_file_insert_rows
)
success = True
except Exception as e:
logging.info('error: {}'.format(e))
time.sleep(5)
register_task_completion(self, task_info, repo_id, 'pull_request_files')
def pull_request_commits_model(self, task_info, repo_id):
""" Queries the commits related to each pull request already inserted in the db """
# query existing PRs and the respective url we will append the commits url to
pr_url_sql = s.sql.text("""
SELECT DISTINCT pr_url, pull_requests.pull_request_id
FROM pull_requests--, pull_request_meta
WHERE repo_id = {}
""".format(repo_id))
urls = pd.read_sql(pr_url_sql, self.db, params={})
for pull_request in urls.itertuples(): # for each url of PRs we have inserted
commits_url = pull_request.pr_url + '/commits?page={}'
table = 'pull_request_commits'
table_pkey = 'pr_cmt_id'
duplicate_col_map = {'pr_cmt_sha': 'sha'}
update_col_map = {}
# Use helper paginate function to iterate the commits url and check for dupes
pr_commits = paginate(self, commits_url, duplicate_col_map, update_col_map, table, table_pkey,
where_clause="where pull_request_id = {}".format(pull_request.pull_request_id))
for pr_commit in pr_commits: # post-pagination, iterate results
if pr_commit['flag'] == 'need_insertion': # if non-dupe
pr_commit_row = {
'pull_request_id': pull_request.pull_request_id,
'pr_cmt_sha': pr_commit['sha'],
'pr_cmt_node_id': pr_commit['node_id'],
'pr_cmt_message': pr_commit['commit']['message'],
# 'pr_cmt_comments_url': pr_commit['comments_url'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': 'GitHub API',
}
result = self.db.execute(self.pull_request_commits_table.insert().values(pr_commit_row))
logging.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n")
register_task_completion(self, task_info, repo_id, 'pull_request_commits')
def pull_requests_model(self, entry_info, repo_id):
"""Pull Request data collection function. Query GitHub API for PhubRs.
:param entry_info: A dictionary consisiting of 'git_url' and 'repo_id'
:type entry_info: dict
"""
github_url = entry_info['given']['github_url']
logging.info('Beginning collection of Pull Requests...\n')
logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n')
record_model_process(self, repo_id, 'pull_requests')
owner, repo = self.get_owner_repo(github_url)
url = (f'https://api.github.com/repos/{owner}/{repo}/pulls?state=all&' +
'direction=asc&per_page=100&page={}')
# Get pull requests that we already have stored
# Set pseudo key (something other than PK) to
# check dupicates with
table = 'pull_requests'
table_pkey = 'pull_request_id'
update_col_map = {'pr_src_state': 'state'}
duplicate_col_map = {'pr_src_id': 'id'}
#list to hold pull requests needing insertion
prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey,
where_clause='WHERE repo_id = {}'.format(repo_id),
value_update_col_map={'pr_augur_contributor_id': float('nan')})
# Discover and remove duplicates before we start inserting
logging.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n")
for pr_dict in prs:
pr = {
'repo_id': repo_id,
'pr_url': pr_dict['url'],
'pr_src_id': pr_dict['id'],
'pr_src_node_id': None,
'pr_html_url': pr_dict['html_url'],
'pr_diff_url': pr_dict['diff_url'],
'pr_patch_url': pr_dict['patch_url'],
'pr_issue_url': pr_dict['issue_url'],
'pr_augur_issue_id': None,
'pr_src_number': pr_dict['number'],
'pr_src_state': pr_dict['state'],
'pr_src_locked': pr_dict['locked'],
'pr_src_title': pr_dict['title'],
'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']),
'pr_body': pr_dict['body'],
'pr_created_at': pr_dict['created_at'],
'pr_updated_at': pr_dict['updated_at'],
'pr_closed_at': pr_dict['closed_at'],
'pr_merged_at': pr_dict['merged_at'],
'pr_merge_commit_sha': pr_dict['merge_commit_sha'],
'pr_teams': None,
'pr_milestone': pr_dict['milestone']['title'] if pr_dict['milestone'] else None,
'pr_commits_url': pr_dict['commits_url'],
'pr_review_comments_url': pr_dict['review_comments_url'],
'pr_review_comment_url': pr_dict['review_comment_url'],
'pr_comments_url': pr_dict['comments_url'],
'pr_statuses_url': pr_dict['statuses_url'],
'pr_meta_head_id': None,
'pr_meta_base_id': None,
'pr_src_issue_url': pr_dict['issue_url'],
'pr_src_comments_url': pr_dict['comments_url'], # NOTE: this seems redundant
'pr_src_review_comments_url': pr_dict['review_comments_url'], # this too
'pr_src_commits_url': pr_dict['commits_url'], # this one also seems redundant
'pr_src_statuses_url': pr_dict['statuses_url'],
'pr_src_author_association': pr_dict['author_association'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': 'GitHub API'
}
if pr_dict['flag'] == 'need_insertion':
logging.info(f'PR {pr_dict["id"]} needs to be inserted\n')
result = self.db.execute(self.pull_requests_table.insert().values(pr))
logging.info(f"Added Pull Request: {result.inserted_primary_key}")
self.pr_id_inc = int(result.inserted_primary_key[0])
elif pr_dict['flag'] == 'need_update':
result = self.db.execute(self.pull_requests_table.update().where(
self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr))
logging.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format(
pr_dict['id']))
self.pr_id_inc = pr_dict['pkey']
else:
logging.info("PR does not need to be inserted. Fetching its id from DB")
pr_id_sql = s.sql.text("""
SELECT pull_request_id FROM pull_requests
WHERE pr_src_id={}
""".format(pr_dict['id']))
self.pr_id_inc = int(pd.read_sql(pr_id_sql, self.db).iloc[0]['pull_request_id'])
self.query_labels(pr_dict['labels'], self.pr_id_inc)
self.query_pr_events(owner, repo, pr_dict['number'], self.pr_id_inc)
self.query_pr_comments(owner, repo, pr_dict['number'], self.pr_id_inc)
self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc)
self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc)
logging.info(f"Inserted PR data for {owner}/{repo}")
self.results_counter += 1
register_task_completion(self, entry_info, repo_id, 'pull_requests')
def query_labels(self, labels, pr_id):
logging.info('Querying PR Labels\n')
if len(labels) == 0:
logging.info('No new labels to add\n')
return
table = 'pull_request_labels'
duplicate_col_map = {'pr_src_id': 'id'}
update_col_map = {}
table_pkey = 'pr_label_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
pr_labels_table_values = get_table_values(self, cols_query, [table])
new_labels = assign_tuple_action(self, labels, pr_labels_table_values, update_col_map, duplicate_col_map,
table_pkey)
logging.info(f'Found {len(new_labels)} labels\n')
for label_dict in new_labels:
label = {
'pull_request_id': pr_id,
'pr_src_id': label_dict['id'],
'pr_src_node_id': label_dict['node_id'],
'pr_src_url': label_dict['url'],
'pr_src_description': label_dict['name'],
'pr_src_color': label_dict['color'],
'pr_src_default_bool': label_dict['default'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if label_dict['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_labels_table.insert().values(label))
logging.info(f"Added PR Label: {result.inserted_primary_key}\n")
logging.info(f"Inserted PR Labels data for PR with id {pr_id}\n")
self.results_counter += 1
self.label_id_inc = int(result.inserted_primary_key[0])
def query_pr_events(self, owner, repo, gh_pr_no, pr_id):
logging.info('Querying PR Events\n')
url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' +
'/events?per_page=100&page={}')
# Get pull request events that we already have stored
# Set our duplicate and update column map keys (something other than PK) to
# check dupicates/needed column updates with
table = 'pull_request_events'
table_pkey = 'pr_event_id'
update_col_map = {}
duplicate_col_map = {'issue_event_src_id': 'id'}
#list to hold contributors needing insertion or update
pr_events = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey)
logging.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n")
for pr_event_dict in pr_events:
if pr_event_dict['actor']:
cntrb_id = find_id_from_login(self, pr_event_dict['actor']['login'])
else:
cntrb_id = 1
pr_event = {
'pull_request_id': pr_id,
'cntrb_id': cntrb_id,
'action': pr_event_dict['event'],
'action_commit_hash': None,
'created_at': pr_event_dict['created_at'],
'issue_event_src_id': pr_event_dict['id'],
'node_id': pr_event_dict['node_id'],
'node_url': pr_event_dict['url'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.pull_request_events_table.insert().values(pr_event))
logging.info(f"Added PR Event: {result.inserted_primary_key}\n")
self.results_counter += 1
self.event_id_inc = int(result.inserted_primary_key[0])
logging.info(f"Inserted PR Events data for PR with id {pr_id}\n")
def query_reviewers(self, reviewers, pr_id):
logging.info('Querying Reviewers')
if reviewers is None or len(reviewers) == 0:
logging.info('No reviewers to add')
return
table = 'pull_request_reviewers'
duplicate_col_map = {'pr_reviewer_map_id': 'id'}
update_col_map = {}
table_pkey = 'pr_reviewer_map_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
reviewers_table_values = get_table_values(self, cols_query, [table])
new_reviewers = assign_tuple_action(self, reviewers, reviewers_table_values, update_col_map, duplicate_col_map,
table_pkey)
for reviewers_dict in new_reviewers:
if 'login' in reviewers_dict:
cntrb_id = find_id_from_login(self, reviewers_dict['login'])
else:
cntrb_id = 1
reviewer = {
'pull_request_id': pr_id,
'cntrb_id': cntrb_id,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if reviewers_dict['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_reviewers_table.insert().values(reviewer))
logging.info(f"Added PR Reviewer {result.inserted_primary_key}")
self.reviewer_id_inc = int(result.inserted_primary_key[0])
self.results_counter += 1
logging.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}")
def query_assignee(self, assignees, pr_id):
logging.info('Querying Assignees')
if assignees is None or len(assignees) == 0:
logging.info('No assignees to add')
return
table = 'pull_request_assignees'
duplicate_col_map = {'pr_assignee_map_id': 'id'}
update_col_map = {}
table_pkey = 'pr_assignee_map_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
assignee_table_values = get_table_values(self, cols_query, [table])
assignees = assign_tuple_action(self, assignees, assignee_table_values, update_col_map, duplicate_col_map,
table_pkey)
for assignee_dict in assignees:
if 'login' in assignee_dict:
cntrb_id = find_id_from_login(self, assignee_dict['login'])
else:
cntrb_id = 1
assignee = {
'pull_request_id': pr_id,
'contrib_id': cntrb_id,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if assignee_dict['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_assignees_table.insert().values(assignee))
logging.info(f'Added PR Assignee {result.inserted_primary_key}')
self.assignee_id_inc = int(result.inserted_primary_key[0])
self.results_counter += 1
logging.info(f'Finished inserting PR Assignee data for PR with id {pr_id}')
def query_pr_meta(self, head, base, pr_id):
logging.info('Querying PR Meta')
table = 'pull_request_meta'
duplicate_col_map = {'pr_sha': 'sha'}
update_col_map = {}
value_update_col_map = {'pr_src_meta_label': None}
table_pkey = 'pr_repo_meta_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
update_keys += list(value_update_col_map.keys())
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
meta_table_values = get_table_values(self, cols_query, [table])
pr_meta_dict = {
'head': assign_tuple_action(self, [head], meta_table_values, update_col_map, duplicate_col_map,
table_pkey, value_update_col_map=value_update_col_map)[0],
'base': assign_tuple_action(self, [base], meta_table_values, update_col_map, duplicate_col_map,
table_pkey, value_update_col_map=value_update_col_map)[0]
}
for pr_side, pr_meta_data in pr_meta_dict.items():
pr_meta = {
'pull_request_id': pr_id,
'pr_head_or_base': pr_side,
'pr_src_meta_label': pr_meta_data['label'],
'pr_src_meta_ref': pr_meta_data['ref'],
'pr_sha': pr_meta_data['sha'],
'cntrb_id': find_id_from_login(self, pr_meta_data['user']['login']) if pr_meta_data['user'] \
and 'login' in pr_meta_data['user'] else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if pr_meta_data['flag'] == 'need_update':
result = self.db.execute(self.pull_request_meta_table.update().where(
self.pull_request_meta_table.c.pr_sha==pr_meta['pr_sha'] and
self.pull_request_meta_table.c.pr_head_or_base==pr_side
).values(pr_meta))
logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format(
issue_dict['id']))
self.issue_id_inc = issue_dict['pkey']
elif pr_meta_data['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_meta_table.insert().values(pr_meta))
logging.info(f'Added PR Head {result.inserted_primary_key}')
self.pr_meta_id_inc = int(result.inserted_primary_key[0])
self.results_counter += 1
else:
pr_meta_id_sql = """
SELECT pr_repo_meta_id FROM pull_request_meta
WHERE pr_sha='{}'
""".format(pr_meta_data['sha'])
self.pr_meta_id_inc = int(pd.read_sql(pr_meta_id_sql, self.db).iloc[0]['pr_repo_meta_id'])
if pr_meta_data['repo']:
self.query_pr_repo(pr_meta_data['repo'], pr_side, self.pr_meta_id_inc)
else:
logging.info('No new PR Head data to add')
logging.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}')
def query_pr_comments(self, owner, repo, gh_pr_no, pr_id):
logging.info('Querying PR Comments')
url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' +
'/comments?per_page=100&page={}')
# Get pull request comments that we already have stored
# Set our duplicate and update column map keys (something other than PK) to
# check dupicates/needed column updates with
table = 'pull_request_message_ref'
table_pkey = 'pr_msg_ref_id'
update_col_map = {}
duplicate_col_map = {'pr_message_ref_src_comment_id': 'id'}
#list to hold contributors needing insertion or update
pr_messages = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey)
logging.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n")
for pr_msg_dict in pr_messages:
if pr_msg_dict['user'] and 'login' in pr_msg_dict['user']:
cntrb_id = find_id_from_login(self, pr_msg_dict['user']['login'])
else:
cntrb_id = 1
msg = {
'rgls_id': None,
'msg_text': pr_msg_dict['body'],
'msg_timestamp': pr_msg_dict['created_at'],
'msg_sender_email': None,
'msg_header': None,
'pltfrm_id': '25150',
'cntrb_id': cntrb_id,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.message_table.insert().values(msg))
logging.info(f'Added PR Comment {result.inserted_primary_key}')
self.msg_id_inc = int(result.inserted_primary_key[0])
pr_msg_ref = {
'pull_request_id': pr_id,
'msg_id': self.msg_id_inc,
'pr_message_ref_src_comment_id': pr_msg_dict['id'],
'pr_message_ref_src_node_id': pr_msg_dict['node_id'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(
self.pull_request_message_ref_table.insert().values(pr_msg_ref)
)
logging.info(f'Added PR Message Ref {result.inserted_primary_key}')
self.pr_msg_ref_id_inc = int(result.inserted_primary_key[0])
self.results_counter += 1
logging.info(f'Finished adding PR Message data for PR with id {pr_id}')
def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id):
logging.info(f'Querying PR {pr_repo_type} repo')
table = 'pull_request_repo'
duplicate_col_map = {'pr_src_repo_id': 'id'}
update_col_map = {}
table_pkey = 'pr_repo_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
pr_repo_table_values = get_table_values(self, cols_query, [table])
new_pr_repo = assign_tuple_action(self, [pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map,
table_pkey)[0]
if new_pr_repo['owner'] and 'login' in new_pr_repo['owner']:
cntrb_id = find_id_from_login(self, new_pr_repo['owner']['login'])
else:
cntrb_id = 1
pr_repo = {
'pr_repo_meta_id': pr_meta_id,
'pr_repo_head_or_base': pr_repo_type,
'pr_src_repo_id': new_pr_repo['id'],
# 'pr_src_node_id': new_pr_repo[0]['node_id'],
'pr_src_node_id': None,
'pr_repo_name': new_pr_repo['name'],
'pr_repo_full_name': new_pr_repo['full_name'],
'pr_repo_private_bool': new_pr_repo['private'],
'pr_cntrb_id': cntrb_id,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if new_pr_repo['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo))
logging.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}')
self.results_counter += 1
logging.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}')
def get_owner_repo(self, github_url):
split = github_url.split('/')
owner = split[-2]
repo = split[-1]
if '.git' in repo:
repo = repo[:-4]
return owner, repo
| 43.251527
| 182
| 0.582229
|
8e3fc2eb9bacb6a3359adbddbe61138706362f9d
| 16,923
|
py
|
Python
|
src/bdbdatastore/src/google/protobuf/descriptor.py
|
sprymak/typhoonae
|
fe31bcc7b21fc14f8aa97b36d66cd7671974543b
|
[
"Apache-2.0"
] | 3
|
2015-12-23T14:26:05.000Z
|
2016-05-09T04:05:51.000Z
|
third_party/google/protobuf/descriptor.py
|
dewitt/webfingerclient-dclinton
|
c13990378c8b0516c84f8507664e0a6ab8eefac5
|
[
"Apache-2.0"
] | 1
|
2016-12-15T12:24:46.000Z
|
2016-12-15T12:24:46.000Z
|
third_party/google/protobuf/descriptor.py
|
dewitt/webfingerclient-dclinton
|
c13990378c8b0516c84f8507664e0a6ab8eefac5
|
[
"Apache-2.0"
] | 1
|
2018-12-02T10:36:08.000Z
|
2018-12-02T10:36:08.000Z
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We probably need to provide deep-copy methods for
# descriptor types. When a FieldDescriptor is passed into
# Descriptor.__init__(), we should make a deep copy and then set
# containing_type on it. Alternatively, we could just get
# rid of containing_type (iit's not needed for reflection.py, at least).
#
# TODO(robinson): Print method?
#
# TODO(robinson): Useful __repr__?
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = 'robinson@google.com (Will Robinson)'
class DescriptorBase(object):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionaility.
"""
def __init__(self, options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options
class Descriptor(DescriptorBase):
"""Descriptor for a protocol message type.
A Descriptor instance has the following attributes:
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
filename: (str) Name of the .proto file containing this message.
containing_type: (Descriptor) Reference to the descriptor of the
type containing us, or None if we have no containing type.
fields: (list of FieldDescriptors) Field descriptors for all
fields in this type.
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "number" attribute in each
FieldDescriptor.
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "name" attribute in each
FieldDescriptor.
nested_types: (list of Descriptors) Descriptor references
for all protocol message types nested within this one.
nested_types_by_name: (dict str -> Descriptor) Same Descriptor
objects as in |nested_types|, but indexed by "name" attribute
in each Descriptor.
enum_types: (list of EnumDescriptors) EnumDescriptor references
for all enums contained within this type.
enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
objects as in |enum_types|, but indexed by "name" attribute
in each EnumDescriptor.
enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
from enum value name to EnumValueDescriptor for that value.
extensions: (list of FieldDescriptor) All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
objects as |extensions|, but indexed by "name" attribute of each
FieldDescriptor.
options: (descriptor_pb2.MessageOptions) Protocol message options or None
to use default message options.
"""
def __init__(self, name, full_name, filename, containing_type,
fields, nested_types, enum_types, extensions, options=None):
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
"""
super(Descriptor, self).__init__(options, 'MessageOptions')
self.name = name
self.full_name = full_name
self.filename = filename
self.containing_type = containing_type
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
# 1. Clients can index fields by "order in which they're listed."
# 2. Clients can easily iterate over all fields with the terse
# syntax: for f in descriptor.fields: ...
self.fields = fields
for field in self.fields:
field.containing_type = self
self.fields_by_number = dict((f.number, f) for f in fields)
self.fields_by_name = dict((f.name, f) for f in fields)
self.nested_types = nested_types
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
self.enum_types = enum_types
for enum_type in self.enum_types:
enum_type.containing_type = self
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
self.enum_values_by_name = dict(
(v.name, v) for t in enum_types for v in t.values)
self.extensions = extensions
for extension in self.extensions:
extension.extension_scope = self
self.extensions_by_name = dict((f.name, f) for f in extensions)
# TODO(robinson): We should have aggressive checking here,
# for example:
# * If you specify a repeated field, you should not be allowed
# to specify a default value.
# * [Other examples here as needed].
#
# TODO(robinson): for this and other *Descriptor classes, we
# might also want to lock things down aggressively (e.g.,
# prevent clients from setting the attributes). Having
# stronger invariants here in general will reduce the number
# of runtime checks we must do in reflection.py...
class FieldDescriptor(DescriptorBase):
"""Descriptor for a single field in a .proto file.
A FieldDescriptor instance has the following attriubtes:
name: (str) Name of this field, exactly as it appears in .proto.
full_name: (str) Name of this field, including containing scope. This is
particularly relevant for extensions.
index: (int) Dense, 0-indexed index giving the order that this
field textually appears within its message in the .proto file.
number: (int) Tag number declared for this field in the .proto file.
type: (One of the TYPE_* constants below) Declared type.
cpp_type: (One of the CPPTYPE_* constants below) C++ type used to
represent this field.
label: (One of the LABEL_* constants below) Tells whether this
field is optional, required, or repeated.
default_value: (Varies) Default value of this field. Only
meaningful for non-repeated scalar fields. Repeated fields
should always set this to [], and non-repeated composite
fields should always set this to None.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
Somewhat confusingly, for extension fields, this is the
descriptor of the EXTENDED message, not the descriptor
of the message containing this field. (See is_extension and
extension_scope below).
message_type: (Descriptor) If a composite field, a descriptor
of the message type contained in this field. Otherwise, this is None.
enum_type: (EnumDescriptor) If this field contains an enum, a
descriptor of that enum. Otherwise, this is None.
is_extension: True iff this describes an extension field.
extension_scope: (Descriptor) Only meaningful if is_extension is True.
Gives the message that immediately contains this extension field.
Will be None iff we're a top-level (file-level) extension field.
options: (descriptor_pb2.FieldOptions) Protocol message field options or
None to use default field options.
"""
# Must be consistent with C++ FieldDescriptor::Type enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# Must be consistent with C++ FieldDescriptor::CppType enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
MAX_CPPTYPE = 10
# Must be consistent with C++ FieldDescriptor::Label enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
def __init__(self, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None):
"""The arguments are as described in the description of FieldDescriptor
attributes above.
Note that containing_type may be None, and may be set later if necessary
(to deal with circular references between message types, for example).
Likewise for extension_scope.
"""
super(FieldDescriptor, self).__init__(options, 'FieldOptions')
self.name = name
self.full_name = full_name
self.index = index
self.number = number
self.type = type
self.cpp_type = cpp_type
self.label = label
self.default_value = default_value
self.containing_type = containing_type
self.message_type = message_type
self.enum_type = enum_type
self.is_extension = is_extension
self.extension_scope = extension_scope
class EnumDescriptor(DescriptorBase):
"""Descriptor for an enum defined in a .proto file.
An EnumDescriptor instance has the following attributes:
name: (str) Name of the enum type.
full_name: (str) Full name of the type, including package name
and any enclosing type(s).
filename: (str) Name of the .proto file in which this appears.
values: (list of EnumValueDescriptors) List of the values
in this enum.
values_by_name: (dict str -> EnumValueDescriptor) Same as |values|,
but indexed by the "name" field of each EnumValueDescriptor.
values_by_number: (dict int -> EnumValueDescriptor) Same as |values|,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type: (Descriptor) Descriptor of the immediate containing
type of this enum, or None if this is an enum defined at the
top level in a .proto file. Set by Descriptor's constructor
if we're passed into one.
options: (descriptor_pb2.EnumOptions) Enum options message or
None to use default enum options.
"""
def __init__(self, name, full_name, filename, values,
containing_type=None, options=None):
"""Arguments are as described in the attribute description above."""
super(EnumDescriptor, self).__init__(options, 'EnumOptions')
self.name = name
self.full_name = full_name
self.filename = filename
self.values = values
for value in self.values:
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
self.values_by_number = dict((v.number, v) for v in values)
self.containing_type = containing_type
class EnumValueDescriptor(DescriptorBase):
"""Descriptor for a single value within an enum.
name: (str) Name of this value.
index: (int) Dense, 0-indexed index giving the order that this
value appears textually within its enum in the .proto file.
number: (int) Actual number assigned to this enum value.
type: (EnumDescriptor) EnumDescriptor to which this value
belongs. Set by EnumDescriptor's constructor if we're
passed into one.
options: (descriptor_pb2.EnumValueOptions) Enum value options message or
None to use default enum value options options.
"""
def __init__(self, name, index, number, type=None, options=None):
"""Arguments are as described in the attribute description above."""
super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type
class ServiceDescriptor(DescriptorBase):
"""Descriptor for a service.
name: (str) Name of the service.
full_name: (str) Full name of the service, including package name.
index: (int) 0-indexed index giving the order that this services
definition appears withing the .proto file.
methods: (list of MethodDescriptor) List of methods provided by this
service.
options: (descriptor_pb2.ServiceOptions) Service options message or
None to use default service options.
"""
def __init__(self, name, full_name, index, methods, options=None):
super(ServiceDescriptor, self).__init__(options, 'ServiceOptions')
self.name = name
self.full_name = full_name
self.index = index
self.methods = methods
# Set the containing service for each method in this service.
for method in self.methods:
method.containing_service = self
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor."""
for method in self.methods:
if name == method.name:
return method
return None
class MethodDescriptor(DescriptorBase):
"""Descriptor for a method in a service.
name: (str) Name of the method within the service.
full_name: (str) Full name of method.
index: (int) 0-indexed index of the method inside the service.
containing_service: (ServiceDescriptor) The service that contains this
method.
input_type: The descriptor of the message that this method accepts.
output_type: The descriptor of the message that this method returns.
options: (descriptor_pb2.MethodOptions) Method options message or
None to use default method options.
"""
def __init__(self, name, full_name, index, containing_service,
input_type, output_type, options=None):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
super(MethodDescriptor, self).__init__(options, 'MethodOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message;
| 38.993088
| 80
| 0.715181
|
b7d185a1dfcdb88c8199b3e33c7778ff91a262a7
| 4,592
|
py
|
Python
|
preferences/views.py
|
jbbqqf/okapi
|
3db29ef1e15685fae304190bd176f75c4e367d03
|
[
"BSD-3-Clause"
] | null | null | null |
preferences/views.py
|
jbbqqf/okapi
|
3db29ef1e15685fae304190bd176f75c4e367d03
|
[
"BSD-3-Clause"
] | null | null | null |
preferences/views.py
|
jbbqqf/okapi
|
3db29ef1e15685fae304190bd176f75c4e367d03
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import IntegrityError
from rest_framework import status
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import (
ListModelMixin, RetrieveModelMixin, UpdateModelMixin, DestroyModelMixin)
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import (
authentication_classes, permission_classes)
from rest_framework.authentication import (
TokenAuthentication, SessionAuthentication)
from rest_framework.filters import DjangoFilterBackend, SearchFilter
from preferences.filters import UserInterfaceFilter, UserPrefFilter
from preferences.models import UserInterface, UserPref
from preferences.serializers import UserInterfaceSerializer, UserPrefSerializer
@authentication_classes((TokenAuthentication, SessionAuthentication,))
@permission_classes((IsAuthenticated,))
class UserInterfaceView(ListModelMixin,
RetrieveModelMixin,
GenericViewSet):
"""
=== A list of all known user interfaces ===
Those objects are readonly API-wise. If you need to add your own entry, you
should contact an admin.
---
list:
parameters:
- name: search
description: contain filter for name and comment
paramType: query
type: string
retrieve:
parameters:
- name: search
description: contain filter for name and comment
paramType: query
type: string
"""
queryset = UserInterface.objects.all()
serializer_class = UserInterfaceSerializer
filter_backends = (DjangoFilterBackend, SearchFilter,)
search_fields = ('name', 'comment',)
filter_class = UserInterfaceFilter
@authentication_classes((TokenAuthentication, SessionAuthentication,))
@permission_classes((IsAuthenticated,))
class UserPrefView(ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
DestroyModelMixin,
GenericViewSet):
"""
=== Allows a user to access his saved prefs ===
An authenticated user can only GET, POST or PATCH his own preferences.
All users do not need to have custom preferences. You should create a new
entry only for users who changed something from the default preferences.
When retrieving user preferences, if you get nothing, you know this user
has default preferences.
A constraint prevents any user to have two different preferences for a
given user interface. It means you filter entries by ui_id, you should
always have only one entry. But BE CAREFUL : user interface ids are not
guaranteed to be constant over time (if database is migrated for example),
even if it won't happen often.
A good practice when you manipulate user preferences if you want to be
100% sure of what you recieve is to filter your results both by id and user
interface name. It should be tested by the interface, and if one fails you
can just use default parameters instead of running the risk to use
corrupted data.
---
list:
parameters:
- name: search
description: contain filter for ui's name and conf
paramType: query
type: string
retrieve:
parameters:
- name: search
description: contain filter for ui's name and conf
paramType: query
type: string
"""
serializer_class = UserPrefSerializer
filter_backends = (DjangoFilterBackend, SearchFilter,)
search_fields = ('ui__name', 'conf',)
filter_class = UserPrefFilter
def get_queryset(self):
queryset = UserPref.objects.filter(user=self.request.user)
return queryset
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
except IntegrityError:
error = {'message': 'Duplicate entry for user {} and ui {}'.format(
self.request.user, serializer.validated_data['ui'])}
return Response(error, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def perform_create(self, serializer):
"""
You can only create preferences for your account.
"""
serializer.save(user=self.request.user)
| 34.787879
| 79
| 0.686847
|
eb124fe3c01c46356a4c028f455c7890eae636a5
| 16,954
|
py
|
Python
|
GBM/GBMSlidingwindows_V2/GBMslidingwindow_LabelT1window.py
|
joshlyman/TextureAnalysis
|
bfbedbd53f62396fdef383408089b37e5ab511d0
|
[
"MIT"
] | 1
|
2020-07-06T01:47:49.000Z
|
2020-07-06T01:47:49.000Z
|
GBM/GBMSlidingwindows_V2/GBMslidingwindow_LabelT1window.py
|
kumarneeraj2005/TextureAnalysis
|
bfbedbd53f62396fdef383408089b37e5ab511d0
|
[
"MIT"
] | null | null | null |
GBM/GBMSlidingwindows_V2/GBMslidingwindow_LabelT1window.py
|
kumarneeraj2005/TextureAnalysis
|
bfbedbd53f62396fdef383408089b37e5ab511d0
|
[
"MIT"
] | 1
|
2020-06-22T08:26:10.000Z
|
2020-06-22T08:26:10.000Z
|
# 2nd version: GBM sliding window for all 6 contrast images and also
# do T2 sliding windows: Label windows which are also belong to T1 windows
# Recently update 0821: change the raw mean setting from 8*8 box normalization
# to raw feature w/o any normalization
# add PI and Necrosis are in another files: Add_PI_GBMSlidingwindow.py and Add_PI_Necro_GBMSlidingwindow.py
import csv
import os
import xml.etree.ElementTree as ET
import fnmatch
import numpy
import math
import SimpleITK
from mahotas.features.texture import haralick_labels
from GLCM import GLCMFeatures
from Gabor import ExtendGaborFeatures
from LBP import ExtendLBPFeatures
from GBM import GBMslidingWindowBoxMappingCoordinate
rootDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset'
coorDir = '/Users/yanzhexu/Desktop/Research/GBM/18/patient_biopsy_coordinates_18.csv'
mapDir = '/Users/yanzhexu/Desktop/Research/GBM/18/map between pt numbers and pt label letters.txt'
outputDir = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm_V2/addYlabel/GBM_SlidingWindow_TextureMap'
def GrayScaleNormalization(imgArray, imgMax,imgMin):
imgRange = imgMax - imgMin
imgArray = (imgArray - imgMin) * (255.0 / imgRange)
# transfer to closest int
imgArray = numpy.rint(imgArray).astype(numpy.int16)
return imgArray
def Read2DImage(fileName, rotateAngle=0):
rawImage = SimpleITK.ReadImage(fileName)
imgArray = SimpleITK.GetArrayFromImage(rawImage)
# Convert 3D Image to 2D
if len(imgArray.shape) == 3:
imgArray = imgArray[0, :, :]
return imgArray
# check if point is inside ROI boundary or outside boundary
def point_inside_polygon(x,y,poly):
n = len(poly)
inside =False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# check if box covers part of boundary
def checkboxinout(testx, testy, xycoord):
# check if box covers part of boundary
b1 = point_inside_polygon(testx - 4, testy - 4, xycoord)
b1h = point_inside_polygon(testx, testy - 4, xycoord)
b2 = point_inside_polygon(testx - 4, testy + 4, xycoord)
b2h = point_inside_polygon(testx - 4, testy, xycoord)
b3 = point_inside_polygon(testx + 4, testy - 4, xycoord)
b3h = point_inside_polygon(testx, testy + 4, xycoord)
b4 = point_inside_polygon(testx + 4, testy + 4, xycoord)
b4h = point_inside_polygon(testx + 4, testy, xycoord)
if b1 != True or b1h != True or b2 != True or b2h != True or b3 != True or b3h != True or b4 != True or b4h != True:
# False means one point of them is outside boundary, that means window is in boundary
return False
else:
return True
# get points inside ROI
# biopsy: 3 conditions: 1. in boundary 2. off boundary but in rectangle box 3. off boundary and out of box
def T2chooseinoutcoord(contourx1,contourx2,contoury1,contoury2,T1xycoord,T2xycoord,biopsycoordinatelist):
print biopsycoordinatelist
windowptlist = list()
# for each point inside rectangle plot, check if each point inside boundary or outside boundary, inside: True, outside: False
for testx in range(contourx1,contourx2+1):
for testy in range(contoury1,contoury2+1):
# check if point is inside T2 boundary or not
T2inorout = point_inside_polygon(testx, testy, T2xycoord)
if T2inorout == True:
# check if point is inside T1 boundary or not
T1inorout = point_inside_polygon(testx, testy, T1xycoord)
windowptlist.append(list())
windowptlist[len(windowptlist)-1].append(testx)
windowptlist[len(windowptlist)-1].append(testy)
# False means in boundary, window is boundary window, 1: center pt of boundary window, 0: not boundary, inside boundary pt
if checkboxinout(testx,testy,T2xycoord) == False:
windowptlist[len(windowptlist) - 1].append(1)
else:
windowptlist[len(windowptlist) - 1].append(0)
# 1: biopsy pt (inside), 0: not biopsy pt (inside)
if [testx,testy] in biopsycoordinatelist:
# remove from biopsy list
biopsycoordinatelist.remove([testx, testy])
windowptlist[len(windowptlist) - 1].append(1)
else:
windowptlist[len(windowptlist) - 1].append(0)
# 1: this window is also T1 window, 0: not belong to T1
if T1inorout == True:
windowptlist[len(windowptlist)-1].append(1)
else:
windowptlist[len(windowptlist)-1].append(0)
# if point is off boundary and in rectangle box, but in biopsy, then add this pt, and add 2: off boundary pt, and 1: biopsy pt, and 1: T1 window center pt
elif [testx, testy] in biopsycoordinatelist:
print 'biopsy in rectangle box:',[testx,testy]
# if out of boundary but in rectangle box
biopsycoordinatelist.remove([testx,testy])
T1inorout = point_inside_polygon(testx, testy, T1xycoord)
windowptlist.append(list())
windowptlist[len(windowptlist) - 1].append(testx)
windowptlist[len(windowptlist) - 1].append(testy)
windowptlist[len(windowptlist) - 1].append(2)
windowptlist[len(windowptlist) - 1].append(1)
# 1: this window is also T1 window, 0: not belong to T1
if T1inorout == True:
windowptlist[len(windowptlist) - 1].append(1)
else:
windowptlist[len(windowptlist) - 1].append(0)
# if point is off boundary and out of rectangle box, add them all
if len(biopsycoordinatelist)!=0:
print 'rest biopsy:',biopsycoordinatelist
for biopsyclist in biopsycoordinatelist:
testx = biopsyclist[0]
testy = biopsyclist[1]
print 'biopsy out of rectangle box:', biopsyclist
T1inorout = point_inside_polygon(testx, testy, T1xycoord)
windowptlist.append(list())
windowptlist[len(windowptlist) - 1].append(testx)
windowptlist[len(windowptlist) - 1].append(testy)
windowptlist[len(windowptlist) - 1].append(2)
windowptlist[len(windowptlist) - 1].append(1)
# 1: this window is also T1 window, 0: not belong to T1
if T1inorout == True:
windowptlist[len(windowptlist) - 1].append(1)
else:
windowptlist[len(windowptlist) - 1].append(0)
return windowptlist
# process boundary points coordinate from XML file
def ParseXMLDrawROI(xmlfile):
tree = ET.parse(xmlfile)
root = tree.getroot()
xcoordlist = list()
ycoordlist = list()
xycoordlist = list()
for child in root.iter('string'):
if not fnmatch.fnmatch(child.text,'*{*}*'):
continue
xcoords = str(child.text).split(',')[0]
ycoords = str(child.text).split(',')[1]
xc = float(xcoords.split('{')[1])
yc = float(ycoords.split('}')[0].replace(' ',''))
xcoordlist.append(xc)
ycoordlist.append(yc)
xycoordlist.append(list())
xycoordlist[len(xycoordlist) - 1].append(xc)
xycoordlist[len(xycoordlist) - 1].append(yc)
xcoordlist.append(xcoordlist[0])
ycoordlist.append(ycoordlist[0])
# get x/y min/max in coords
xmin = min(xcoordlist)
ymin = min(ycoordlist)
xmax = max(xcoordlist)
ymax = max(ycoordlist)
# ceil: get higher int
# floor: get lower int
xmin = int(math.floor(xmin))
xmax = int(math.ceil(xmax))
ymin = int(math.floor(ymin))
ymax = int(math.ceil(ymax))
return xmin,xmax,ymin,ymax,xycoordlist
# get biopsy coordinates
biopsycoordinatefile = GBMslidingWindowBoxMappingCoordinate.getCoordinatefiles(mapDir, coorDir)
def genTextures():
GLCMAngleList = ['Avg']
featureTitle = ['Image Contrast', 'Image Filename','X', 'Y', 'Boundary (1) or not (inside: 0), (outside:2)', 'Biopsy(1) or not (0)','T1 window in T2 (1) or not (0)']
for GLCMAngle in GLCMAngleList:
for featureName in haralick_labels[:-1]:
featureTitle.append(featureName + '_' + GLCMAngle)
LBPRadius = 1
LBPnPoints = 8 * LBPRadius
LBPMethod = 'uniform'
LBPFeatureList = []
for x in xrange(0, LBPnPoints + 1):
LBPFeatureList.append('LBP_%02d' % x)
LBPFeatureList.append('LBP_Other')
featureTitle = featureTitle + LBPFeatureList
Gaborsigma_range = (0.6,1.0)
Gaborfreq_range = (0.1, 0.3, 0.5)
kernel_bank = []
GaborFeatureList = ['Gabor_Mean', 'Gabor_Std']
for GaborSigma in Gaborsigma_range:
for GaborFreq in Gaborfreq_range:
for featureName in GaborFeatureList:
featureTitle.append(featureName + '_' + str(GaborSigma) + '_' + str(GaborFreq))
Gaborkernel_bank = ExtendGaborFeatures.genKernelBank(Gaborsigma_range, Gaborfreq_range, kernel_bank)
MeanStdfeaturelist = ['Raw_Mean','Raw_Std']
Ylabel = ['Ylabel']
featureTitle = featureTitle + MeanStdfeaturelist + Ylabel
dicomnames = ['EPI', 'P', 'Q', 'RCBV', 'SPGRC', 'T2']
for texturemapfile in os.listdir(rootDir):
if texturemapfile.startswith('.'):
continue
if texturemapfile.startswith('..'):
continue
print texturemapfile
patientname = texturemapfile.split('_')[0]
if fnmatch.fnmatch(patientname, "*FSL*"):
newpatientname = patientname.replace("FSL", "")
elif fnmatch.fnmatch(patientname, "*h*"):
newpatientname = patientname.replace("h", "")
else:
newpatientname = patientname
print newpatientname
slicepathfile = os.path.join(rootDir, texturemapfile)
for slicefile in os.listdir(slicepathfile):
if slicefile.startswith('.'):
continue
if slicefile.startswith('..'):
continue
print slicefile
slicenum = slicefile.replace('slice', '')
slicenum = int(slicenum)
dcmxmlfilepath = os.path.join(slicepathfile, slicefile)
dcmfiledict = dict()
for dcmfile in os.listdir(dcmxmlfilepath):
if dcmfile.startswith('.'):
continue
if fnmatch.fnmatch(dcmfile, '*dcm*') is False:
continue
if fnmatch.fnmatch(dcmfile, '*precontrast*'):
continue
if fnmatch.fnmatch(dcmfile, '*C*SPGR*') or fnmatch.fnmatch(dcmfile, '*+C*T1*') or fnmatch.fnmatch(dcmfile,'*T1*+C*'):
SPGRCfile = dcmfile
dcmfiledict['SPGRC'] = SPGRCfile
if fnmatch.fnmatch(dcmfile, '*T2*'):
T2file = dcmfile
dcmfiledict['T2'] = T2file
if fnmatch.fnmatch(dcmfile, '*q*'):
Qfile = dcmfile
dcmfiledict['Q'] = Qfile
if fnmatch.fnmatch(dcmfile, '*p*'):
Pfile = dcmfile
dcmfiledict['P'] = Pfile
if fnmatch.fnmatch(dcmfile, '*rCBV*'):
RCBVfile = dcmfile
dcmfiledict['RCBV'] = RCBVfile
if fnmatch.fnmatch(dcmfile, '*EPI*+C*') or fnmatch.fnmatch(dcmfile, '*+C*EPI*'):
EPIfile = dcmfile
dcmfiledict['EPI'] = EPIfile
for xmlfile in os.listdir(dcmxmlfilepath):
if not fnmatch.fnmatch(xmlfile, '*.xml'):
continue
if fnmatch.fnmatch(xmlfile, '*NECROSIS*') or fnmatch.fnmatch(xmlfile,'*necrosis*'):
continue
if fnmatch.fnmatch(xmlfile, '*C*SPGR*') or fnmatch.fnmatch(xmlfile, '*+C*T1*') or fnmatch.fnmatch(
xmlfile, '*T1*+C*'):
T1xmlfile = xmlfile
if fnmatch.fnmatch(xmlfile, '*T2*'):
T2xmlfile = xmlfile
print '\n'
T1xmlfilepath = os.path.join(dcmxmlfilepath, T1xmlfile)
T2xmlfilepath = os.path.join(dcmxmlfilepath, T2xmlfile)
# only process those slice which has biopsy (CG slice 37 does not have biopsy so it does not have sliding window textures)
if slicenum not in biopsycoordinatefile[newpatientname]:
continue
else:
biopsycoordinatelist = biopsycoordinatefile[newpatientname][slicenum]
T1xmin, T1xmax, T1ymin, T1ymax, T1xycoord = ParseXMLDrawROI(T1xmlfilepath)
T2xmin, T2xmax, T2ymin, T2ymax, T2xycoord = ParseXMLDrawROI(T2xmlfilepath)
# check if coords inside boundary or outside boundary
T2windowptlist = T2chooseinoutcoord(T2xmin, T2xmax, T2ymin, T2ymax, T1xycoord, T2xycoord, biopsycoordinatelist)
# start to do T1
featuresOutFn = 'ROI_Texture_Map.csv'
# start to do T2
T2featuresOutFn = newpatientname + '_' + slicefile + '_' + 'T2' + '_' + featuresOutFn
featuresCSVFn = os.path.join(outputDir, T2featuresOutFn)
with open(featuresCSVFn, 'wb') as featureCSVFile:
featureWriter = csv.writer(featureCSVFile, dialect='excel')
featureWriter.writerow(featureTitle)
for eachdcm in dicomnames:
print eachdcm
dicomfile = dcmfiledict[eachdcm]
dicomfilepath = os.path.join(dcmxmlfilepath, dicomfile)
dicomImage = Read2DImage(dicomfilepath)
for eachpt in T2windowptlist:
meanstd = list()
GLCM = list()
LBP = list()
Gabor = list()
xcoord = int(eachpt[0])
ycoord = int(eachpt[1])
boundaryornot = int(eachpt[2])
biopsyornot = int(eachpt[3])
T1ornot = int(eachpt[4])
aFeature = [eachdcm, dicomfile, xcoord, ycoord, boundaryornot, biopsyornot,T1ornot]
subImage = dicomImage[ycoord - 4:ycoord + 4, xcoord - 4:xcoord + 4]
subImageGLCM = GrayScaleNormalization(subImage, subImage.max(), subImage.min())
Raw_mean = numpy.mean(subImage)
Raw_std = numpy.std(subImage)
meanstd.append(Raw_mean)
meanstd.append(Raw_std)
# GLCM
glcmFeatures = GLCMFeatures.calcFeatures(subImageGLCM)
for GLCMAngle in GLCMAngleList:
for featureName in haralick_labels[:-1]:
GLCM.append(glcmFeatures[GLCMAngle][featureName])
# LBP subimage
subImageLBP = dicomImage[ycoord - 4 - LBPRadius:ycoord + 4 + LBPRadius,
xcoord - 4 - LBPRadius: xcoord + 4 + LBPRadius]
extendsubImageLBP = GrayScaleNormalization(subImageLBP, subImage.max(),
subImage.min())
# need to use extended ROI
LBPs = ExtendLBPFeatures.calcFeatures(extendsubImageLBP, LBPnPoints, LBPRadius,
LBPMethod)
for lbp in LBPs:
LBP.append(lbp)
# Gabor, width = 8
# use extended ROI
GaborFeatures = ExtendGaborFeatures.calcFeatures(dicomImage, xcoord - 4, ycoord - 4,
8, 8,
Gaborkernel_bank, subImage.max(),
subImage.min())
for gaborfeature in GaborFeatures:
for eachg in gaborfeature:
Gabor.append(eachg)
aFeature = aFeature + GLCM + LBP + Gabor + meanstd
featureWriter.writerow(aFeature)
genTextures()
| 36.776573
| 169
| 0.57538
|
e7061eb7d2c1926d914ec648246fb721e8d11e39
| 546
|
py
|
Python
|
image_library/db_setup.py
|
gustavoschaedler/cc-anchorloans
|
946387cc1966ebbff3c0b6e05aaf477e6572db16
|
[
"MIT"
] | null | null | null |
image_library/db_setup.py
|
gustavoschaedler/cc-anchorloans
|
946387cc1966ebbff3c0b6e05aaf477e6572db16
|
[
"MIT"
] | 4
|
2021-03-19T03:13:34.000Z
|
2022-01-13T01:34:26.000Z
|
image_library/db_setup.py
|
gustavoschaedler/cc-anchorloans
|
946387cc1966ebbff3c0b6e05aaf477e6572db16
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///site.db', convert_unicode=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
import image_library.models
Base.metadata.create_all(bind=engine)
| 34.125
| 65
| 0.690476
|
029e3f99a84ae86365e7e55b32e3c14291ecf68d
| 4,918
|
py
|
Python
|
src/keycloak/admin/roles.py
|
Arhimed/python-keycloak-client
|
37f09681b7c1ea9265885e30ba138382978b031e
|
[
"MIT"
] | null | null | null |
src/keycloak/admin/roles.py
|
Arhimed/python-keycloak-client
|
37f09681b7c1ea9265885e30ba138382978b031e
|
[
"MIT"
] | null | null | null |
src/keycloak/admin/roles.py
|
Arhimed/python-keycloak-client
|
37f09681b7c1ea9265885e30ba138382978b031e
|
[
"MIT"
] | null | null | null |
import json
from collections import OrderedDict
from keycloak.admin import KeycloakAdminCollection, KeycloakAdminBaseElement
from keycloak.helpers import to_camel_case
ROLE_KWARGS = [
'description',
'id',
'client_role',
'composite',
'composites',
'container_id',
'scope_param_required'
]
__all__ = ('Role', 'Roles', 'RealmRole', 'RealmRoles', 'ClientRole', 'ClientRoles',)
class Roles(KeycloakAdminCollection):
def create(self, name, **kwargs):
"""
Create new role
http://www.keycloak.org/docs-api/3.4/rest-api/index.html
#_roles_resource
:param str name: Name for the role
:param str description: (optional)
:param str id: (optional)
:param bool client_role: (optional)
:param bool composite: (optional)
:param object composites: (optional)
:param str container_id: (optional)
:param bool scope_param_required: (optional)
"""
payload = OrderedDict(name=name)
for key in ROLE_KWARGS:
if key in kwargs:
payload[to_camel_case(key)] = kwargs[key]
return self._admin.post(
url=self._url_collection(),
data=json.dumps(payload)
)
def by_name(self, role_name):
return Role(role_name=role_name, admin=self._admin)
class Role(KeycloakAdminBaseElement):
_role_name = None
def __init__(self, role_name, *args, **kwargs):
self._role_name = role_name
super(Role, self).__init__(*args, **kwargs)
def update(self, name, **kwargs):
"""
Update existing role.
http://www.keycloak.org/docs-api/3.4/rest-api/index.html#_roles_resource
:param str name: Name for the role
:param str description: (optional)
:param str id: (optional)
:param bool client_role: (optional)
:param bool composite: (optional)
:param object composites: (optional)
:param str container_id: (optional)
:param bool scope_param_required: (optional)
"""
payload = OrderedDict(name=name)
for key in ROLE_KWARGS:
if key in kwargs:
payload[to_camel_case(key)] = kwargs[key]
return self._admin.put(
url=self._admin.get_full_url(self.get_path_dyn('single')),
data=json.dumps(payload)
)
class RealmRole(Role):
_realm_name = None
_paths = {
'single': '/auth/admin/realms/{realm_name}/roles/{role_name}'
}
def __init__(self, realm_name, *args, **kwargs):
self._realm_name = realm_name
super(RealmRole, self).__init__(*args, **kwargs)
# def __repr__(self):
# return '<%s object realm="%s" role="%s">' % (
# self.__class__.__name__, self._realm_name, self._role_name)
class RealmRoles(Roles):
_realm_name = None
_paths = {
'collection': '/auth/admin/realms/{realm_name}/roles'
}
_itemclass = RealmRole
def __init__(self, realm_name, *args, **kwargs):
self._realm_name = realm_name
super(RealmRoles, self).__init__(*args, **kwargs)
def by_name(self, role_name):
return RealmRole(role_name=role_name, admin=self._admin, realm_name=self._realm_name)
def _url_item_params(self, data):
return dict(
admin=self._admin, realm_name=self._realm_name, role_name=data['name']
)
class ClientRole(RealmRole):
_client = None
_paths = {
'single': '/auth/admin/realms/{realm_name}/clients/{client_id}/roles/{role_name}'
}
def __init__(self, client, *args, **kwargs):
super(ClientRole, self).__init__(*args, **kwargs)
from keycloak.admin.clients import Client
if isinstance(client, Client):
self._client = client
else:
self._client = Client(admin=self._admin, realm_name=self._realm_name, id=client)
@property
def _client_id(self):
return self._client.id
class ClientRoles(RealmRoles):
_client = None
_paths = {
'collection': '/auth/admin/realms/{realm_name}/clients/{client_id}/roles'
}
_itemclass = ClientRole
def __init__(self, client, *args, **kwargs):
super(ClientRoles, self).__init__(*args, **kwargs)
from keycloak.admin.clients import Client
if isinstance(client, Client):
self._client = client
else:
self._client = Client(admin=self._admin, realm_name=self._realm_name, id=client)
@property
def _client_id(self):
return self._client.id
def by_name(self, role_name):
return ClientRole(role_name=role_name, admin=self._admin, realm_name=self._realm_name, client=self._client)
def _url_item_params(self, data):
return dict(
admin=self._admin, realm_name=self._realm_name, client=self._client, role_name=data['name']
)
| 28.929412
| 115
| 0.631151
|
15e922ac3d9089dc475cdba6468645bed6cfd50b
| 1,065
|
py
|
Python
|
official/nlp/projects/mobilebert/utils.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | 1
|
2021-05-22T12:50:50.000Z
|
2021-05-22T12:50:50.000Z
|
official/nlp/projects/mobilebert/utils.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
official/nlp/projects/mobilebert/utils.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import numpy as np
def generate_fake_input(batch_size=1, seq_len=5, vocab_size=10000, seed=0):
"""Generate consistent fake integer input sequences."""
np.random.seed(seed)
fake_input = []
for _ in range(batch_size):
fake_input.append([])
for _ in range(seq_len):
fake_input[-1].append(np.random.randint(0, vocab_size))
fake_input = np.asarray(fake_input)
return fake_input
| 35.5
| 76
| 0.723005
|
ba422f0923a5b8928768a3df3097193dc0c47e92
| 63,539
|
py
|
Python
|
lib/spack/spack/test/cmd/ci.py
|
dialvarezs/spack
|
14d4203722daf3abd56a2b6c880214a1338e289f
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
lib/spack/spack/test/cmd/ci.py
|
dialvarezs/spack
|
14d4203722daf3abd56a2b6c880214a1338e289f
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
lib/spack/spack/test/cmd/ci.py
|
dialvarezs/spack
|
14d4203722daf3abd56a2b6c880214a1338e289f
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import filecmp
import json
import os
import shutil
import sys
import pytest
from jsonschema import ValidationError, validate
from llnl.util.filesystem import mkdirp, working_dir
import spack
import spack.binary_distribution
import spack.ci as ci
import spack.compilers as compilers
import spack.config
import spack.environment as ev
import spack.hash_types as ht
import spack.main
import spack.paths as spack_paths
import spack.repo as repo
import spack.util.gpg
import spack.util.spack_yaml as syaml
import spack.util.url as url_util
from spack.schema.buildcache_spec import schema as specfile_schema
from spack.schema.database_index import schema as db_idx_schema
from spack.schema.gitlab_ci import schema as gitlab_ci_schema
from spack.spec import CompilerSpec, Spec
from spack.util.executable import which
from spack.util.mock_package import MockPackageMultiRepo
ci_cmd = spack.main.SpackCommand('ci')
env_cmd = spack.main.SpackCommand('env')
mirror_cmd = spack.main.SpackCommand('mirror')
gpg_cmd = spack.main.SpackCommand('gpg')
install_cmd = spack.main.SpackCommand('install')
uninstall_cmd = spack.main.SpackCommand('uninstall')
buildcache_cmd = spack.main.SpackCommand('buildcache')
pytestmark = [pytest.mark.skipif(sys.platform == "win32",
reason="does not run on windows"),
pytest.mark.maybeslow]
@pytest.fixture()
def ci_base_environment(working_env, tmpdir):
os.environ['CI_PROJECT_DIR'] = tmpdir.strpath
@pytest.fixture(scope='function')
def mock_git_repo(tmpdir):
"""Create a mock git repo with two commits, the last one creating
a .gitlab-ci.yml"""
repo_path = tmpdir.join('mockspackrepo').strpath
mkdirp(repo_path)
git = which('git', required=True)
with working_dir(repo_path):
git('init')
with open('README.md', 'w') as f:
f.write('# Introduction')
with open('.gitlab-ci.yml', 'w') as f:
f.write("""
testjob:
script:
- echo "success"
""")
git('config', '--local', 'user.email', 'testing@spack.io')
git('config', '--local', 'user.name', 'Spack Testing')
# initial commit with README
git('add', 'README.md')
git('-c', 'commit.gpgsign=false', 'commit',
'-m', 'initial commit')
# second commit, adding a .gitlab-ci.yml
git('add', '.gitlab-ci.yml')
git('-c', 'commit.gpgsign=false', 'commit',
'-m', 'add a .gitlab-ci.yml')
yield repo_path
def test_specs_staging(config):
"""Make sure we achieve the best possible staging for the following
spec DAG::
a
/|
c b
|\
e d
|\
f g
In this case, we would expect 'c', 'e', 'f', and 'g' to be in the first stage,
and then 'd', 'b', and 'a' to be put in the next three stages, respectively.
"""
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
g = mock_repo.add_package('g', [], [])
f = mock_repo.add_package('f', [], [])
e = mock_repo.add_package('e', [], [])
d = mock_repo.add_package('d', [f, g], [default, default])
c = mock_repo.add_package('c', [], [])
b = mock_repo.add_package('b', [d, e], [default, default])
mock_repo.add_package('a', [b, c], [default, default])
with repo.use_repositories(mock_repo):
spec_a = Spec('a')
spec_a.concretize()
spec_a_label = ci._spec_deps_key(spec_a)
spec_b_label = ci._spec_deps_key(spec_a['b'])
spec_c_label = ci._spec_deps_key(spec_a['c'])
spec_d_label = ci._spec_deps_key(spec_a['d'])
spec_e_label = ci._spec_deps_key(spec_a['e'])
spec_f_label = ci._spec_deps_key(spec_a['f'])
spec_g_label = ci._spec_deps_key(spec_a['g'])
spec_labels, dependencies, stages = ci.stage_spec_jobs([spec_a])
assert (len(stages) == 4)
assert (len(stages[0]) == 4)
assert (spec_c_label in stages[0])
assert (spec_e_label in stages[0])
assert (spec_f_label in stages[0])
assert (spec_g_label in stages[0])
assert (len(stages[1]) == 1)
assert (spec_d_label in stages[1])
assert (len(stages[2]) == 1)
assert (spec_b_label in stages[2])
assert (len(stages[3]) == 1)
assert (spec_a_label in stages[3])
def test_ci_generate_with_env(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages,
ci_base_environment, mock_binary_index):
"""Make sure we can get a .gitlab-ci.yml from an environment file
which has the gitlab-ci, cdash, and mirrors sections."""
mirror_url = 'https://my.fake.mirror'
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
definitions:
- bootstrap:
- cmake@3.4.3
- old-gcc-pkgs:
- archive-files
- callpath
# specify ^openblas-with-lapack to ensure that builtin.mock repo flake8
# package (which can also provide lapack) is not chosen, as it violates
# a package-level check which requires exactly one fetch strategy (this
# is apparently not an issue for other tests that use it).
- hypre@0.2.15 ^openblas-with-lapack
specs:
- matrix:
- [$old-gcc-pkgs]
mirrors:
some-mirror: {0}
gitlab-ci:
bootstrap:
- name: bootstrap
compiler-agnostic: true
mappings:
- match:
- arch=test-debian6-core2
runner-attributes:
tags:
- donotcare
image: donotcare
service-job-attributes:
image: donotcare
tags: [donotcare]
cdash:
build-group: Not important
url: https://my.fake.cdash
project: Not used
site: Nothing
""".format(mirror_url))
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
found_spec = False
for ci_key in yaml_contents.keys():
if '(bootstrap)' in ci_key:
found_spec = True
assert('cmake' in ci_key)
assert(found_spec)
assert('stages' in yaml_contents)
assert(len(yaml_contents['stages']) == 6)
assert(yaml_contents['stages'][0] == 'stage-0')
assert(yaml_contents['stages'][5] == 'stage-rebuild-index')
assert('rebuild-index' in yaml_contents)
rebuild_job = yaml_contents['rebuild-index']
expected = 'spack buildcache update-index --keys -d {0}'.format(
mirror_url)
assert(rebuild_job['script'][0] == expected)
assert('variables' in yaml_contents)
assert('SPACK_ARTIFACTS_ROOT' in yaml_contents['variables'])
artifacts_root = yaml_contents['variables']['SPACK_ARTIFACTS_ROOT']
assert(artifacts_root == 'jobs_scratch_dir')
def _validate_needs_graph(yaml_contents, needs_graph, artifacts):
for job_name, job_def in yaml_contents.items():
for needs_def_name, needs_list in needs_graph.items():
if job_name.startswith(needs_def_name):
# check job needs against the expected needs definition
j_needs = job_def['needs']
assert all([job_needs['job'][:job_needs['job'].index('/')]
in needs_list for job_needs in j_needs])
assert(all([nl in
[n['job'][:n['job'].index('/')] for n in j_needs]
for nl in needs_list]))
assert all([job_needs['artifacts'] == artifacts
for job_needs in j_needs])
break
def test_ci_generate_bootstrap_gcc(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, ci_base_environment):
"""Test that we can bootstrap a compiler and use it as the
compiler for a spec in the environment"""
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
definitions:
- bootstrap:
- gcc@3.0
- gcc@2.0
specs:
- dyninst%gcc@3.0
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
bootstrap:
- name: bootstrap
compiler-agnostic: true
mappings:
- match:
- arch=test-debian6-x86_64
runner-attributes:
tags:
- donotcare
""")
needs_graph = {
'(bootstrap) conflict': [],
'(bootstrap) gcc': [
'(bootstrap) conflict',
],
'(specs) libelf': [
'(bootstrap) gcc',
],
'(specs) libdwarf': [
'(bootstrap) gcc',
'(specs) libelf',
],
'(specs) dyninst': [
'(bootstrap) gcc',
'(specs) libelf',
'(specs) libdwarf',
],
}
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
_validate_needs_graph(yaml_contents, needs_graph, False)
def test_ci_generate_bootstrap_artifacts_buildcache(tmpdir,
mutable_mock_env_path,
install_mockery,
mock_packages,
ci_base_environment):
"""Test that we can bootstrap a compiler when artifacts buildcache
is turned on"""
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
definitions:
- bootstrap:
- gcc@3.0
specs:
- dyninst%gcc@3.0
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
bootstrap:
- name: bootstrap
compiler-agnostic: true
mappings:
- match:
- arch=test-debian6-x86_64
runner-attributes:
tags:
- donotcare
enable-artifacts-buildcache: True
""")
needs_graph = {
'(bootstrap) conflict': [],
'(bootstrap) gcc': [
'(bootstrap) conflict',
],
'(specs) libelf': [
'(bootstrap) gcc',
'(bootstrap) conflict',
],
'(specs) libdwarf': [
'(bootstrap) gcc',
'(bootstrap) conflict',
'(specs) libelf',
],
'(specs) dyninst': [
'(bootstrap) gcc',
'(bootstrap) conflict',
'(specs) libelf',
'(specs) libdwarf',
],
}
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
_validate_needs_graph(yaml_contents, needs_graph, True)
def test_ci_generate_with_env_missing_section(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, ci_base_environment,
mock_binary_index):
"""Make sure we get a reasonable message if we omit gitlab-ci section"""
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
mirrors:
some-mirror: https://my.fake.mirror
""")
expect_out = 'Error: Environment yaml does not have "gitlab-ci" section'
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test'):
output = ci_cmd('generate', fail_on_error=False, output=str)
assert(expect_out in output)
def test_ci_generate_with_cdash_token(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, ci_base_environment,
mock_binary_index):
"""Make sure we it doesn't break if we configure cdash"""
os.environ.update({
'SPACK_CDASH_AUTH_TOKEN': 'notreallyatokenbutshouldnotmatter',
})
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
enable-artifacts-buildcache: True
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: donotcare
cdash:
build-group: Not important
url: https://my.fake.cdash
project: Not used
site: Nothing
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test'):
copy_to_file = str(tmpdir.join('backup-ci.yml'))
output = ci_cmd('generate', '--copy-to', copy_to_file, output=str)
# That fake token should still have resulted in being unable to
# register build group with cdash, but the workload should
# still have been generated.
expect = 'Problem populating buildgroup'
assert(expect in output)
dir_contents = os.listdir(tmpdir.strpath)
assert('backup-ci.yml' in dir_contents)
orig_file = str(tmpdir.join('.gitlab-ci.yml'))
assert(filecmp.cmp(orig_file, copy_to_file) is True)
def test_ci_generate_with_custom_scripts(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
ci_base_environment, mock_binary_index):
"""Test use of user-provided scripts"""
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
variables:
ONE: plain-string-value
TWO: ${INTERP_ON_BUILD}
before_script:
- mkdir /some/path
- pushd /some/path
- git clone ${SPACK_REPO}
- cd spack
- git checkout ${SPACK_REF}
- popd
script:
- spack -d ci rebuild
after_script:
- rm -rf /some/path/spack
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(spack.main, 'get_version', lambda: '0.15.3')
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
found_it = False
assert('variables' in yaml_contents)
global_vars = yaml_contents['variables']
assert('SPACK_VERSION' in global_vars)
assert(global_vars['SPACK_VERSION'] == '0.15.3')
assert('SPACK_CHECKOUT_VERSION' in global_vars)
assert(global_vars['SPACK_CHECKOUT_VERSION'] == 'v0.15.3')
for ci_key in yaml_contents.keys():
ci_obj = yaml_contents[ci_key]
if 'archive-files' in ci_key:
# Ensure we have variables, possibly interpolated
assert('variables' in ci_obj)
var_d = ci_obj['variables']
assert('ONE' in var_d)
assert(var_d['ONE'] == 'plain-string-value')
assert('TWO' in var_d)
assert(var_d['TWO'] == '${INTERP_ON_BUILD}')
# Ensure we have scripts verbatim
assert('before_script' in ci_obj)
before_script = ci_obj['before_script']
assert(before_script[0] == 'mkdir /some/path')
assert(before_script[1] == 'pushd /some/path')
assert(before_script[2] == 'git clone ${SPACK_REPO}')
assert(before_script[3] == 'cd spack')
assert(before_script[4] == 'git checkout ${SPACK_REF}')
assert(before_script[5] == 'popd')
assert('script' in ci_obj)
assert(ci_obj['script'][0] == 'spack -d ci rebuild')
assert('after_script' in ci_obj)
after_script = ci_obj['after_script'][0]
assert(after_script == 'rm -rf /some/path/spack')
found_it = True
assert(found_it)
def test_ci_generate_pkg_with_deps(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, ci_base_environment):
"""Test pipeline generation for a package w/ dependencies"""
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- flatten-deps
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
enable-artifacts-buildcache: True
mappings:
- match:
- flatten-deps
runner-attributes:
tags:
- donotcare
- match:
- dependency-install
runner-attributes:
tags:
- donotcare
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
found = []
for ci_key in yaml_contents.keys():
ci_obj = yaml_contents[ci_key]
if 'dependency-install' in ci_key:
assert('stage' in ci_obj)
assert(ci_obj['stage'] == 'stage-0')
found.append('dependency-install')
if 'flatten-deps' in ci_key:
assert('stage' in ci_obj)
assert(ci_obj['stage'] == 'stage-1')
found.append('flatten-deps')
assert('flatten-deps' in found)
assert('dependency-install' in found)
def test_ci_generate_for_pr_pipeline(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
ci_base_environment):
"""Test that PR pipelines do not include a final stage job for
rebuilding the mirror index, even if that job is specifically
configured"""
os.environ.update({
'SPACK_PIPELINE_TYPE': 'spack_pull_request',
'SPACK_PR_BRANCH': 'fake-test-branch',
})
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- flatten-deps
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
enable-artifacts-buildcache: True
mappings:
- match:
- flatten-deps
runner-attributes:
tags:
- donotcare
- match:
- dependency-install
runner-attributes:
tags:
- donotcare
service-job-attributes:
image: donotcare
tags: [donotcare]
rebuild-index: False
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
assert('rebuild-index' not in yaml_contents)
assert('variables' in yaml_contents)
pipeline_vars = yaml_contents['variables']
assert('SPACK_PIPELINE_TYPE' in pipeline_vars)
assert(pipeline_vars['SPACK_PIPELINE_TYPE'] == 'spack_pull_request')
def test_ci_generate_with_external_pkg(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
ci_base_environment):
"""Make sure we do not generate jobs for external pkgs"""
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
- externaltest
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
mappings:
- match:
- archive-files
- externaltest
runner-attributes:
tags:
- donotcare
image: donotcare
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
yaml_contents = syaml.load(f)
# Check that the "externaltool" package was not erroneously staged
assert not any('externaltool' in key for key in yaml_contents)
@pytest.mark.xfail(reason='fails intermittently and covered by gitlab ci')
def test_ci_rebuild(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages, monkeypatch,
mock_gnupghome, mock_fetch, ci_base_environment,
mock_binary_index):
working_dir = tmpdir.join('working_dir')
log_dir = os.path.join(working_dir.strpath, 'logs')
repro_dir = os.path.join(working_dir.strpath, 'repro')
env_dir = working_dir.join('concrete_env')
mirror_dir = working_dir.join('mirror')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
broken_specs_path = os.path.join(working_dir.strpath, 'naughty-list')
broken_specs_url = url_util.join('file://', broken_specs_path)
temp_storage_url = 'file:///path/to/per/pipeline/storage'
ci_job_url = 'https://some.domain/group/project/-/jobs/42'
ci_pipeline_url = 'https://some.domain/group/project/-/pipelines/7'
signing_key_dir = spack_paths.mock_gpg_keys_path
signing_key_path = os.path.join(signing_key_dir, 'package-signing-key')
with open(signing_key_path) as fd:
signing_key = fd.read()
spack_yaml_contents = """
spack:
definitions:
- packages: [archive-files]
specs:
- $packages
mirrors:
test-mirror: {0}
gitlab-ci:
broken-specs-url: {1}
temporary-storage-url-prefix: {2}
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: donotcare
cdash:
build-group: Not important
url: https://my.fake.cdash
project: Not used
site: Nothing
""".format(mirror_url, broken_specs_url, temp_storage_url)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write(spack_yaml_contents)
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test') as env:
with env.write_transaction():
env.concretize()
env.write()
if not os.path.exists(env_dir.strpath):
os.makedirs(env_dir.strpath)
shutil.copyfile(env.manifest_path,
os.path.join(env_dir.strpath, 'spack.yaml'))
shutil.copyfile(env.lock_path,
os.path.join(env_dir.strpath, 'spack.lock'))
root_spec_dag_hash = None
for h, s in env.specs_by_hash.items():
if s.name == 'archive-files':
root_spec_dag_hash = h
assert root_spec_dag_hash
def fake_cdash_register(build_name, base_url, project, site, track):
return ('fakebuildid', 'fakestamp')
monkeypatch.setattr(spack.cmd.ci, 'CI_REBUILD_INSTALL_BASE_ARGS', [
'notcommand'
])
monkeypatch.setattr(spack.cmd.ci, 'INSTALL_FAIL_CODE', 127)
with env_dir.as_cwd():
env_cmd('activate', '--without-view', '--sh', '-d', '.')
# Create environment variables as gitlab would do it
os.environ.update({
'SPACK_ARTIFACTS_ROOT': working_dir.strpath,
'SPACK_JOB_LOG_DIR': log_dir,
'SPACK_JOB_REPRO_DIR': repro_dir,
'SPACK_LOCAL_MIRROR_DIR': mirror_dir.strpath,
'SPACK_CONCRETE_ENV_DIR': env_dir.strpath,
'CI_PIPELINE_ID': '7192',
'SPACK_SIGNING_KEY': signing_key,
'SPACK_ROOT_SPEC': root_spec_dag_hash,
'SPACK_JOB_SPEC_DAG_HASH': root_spec_dag_hash,
'SPACK_JOB_SPEC_PKG_NAME': 'archive-files',
'SPACK_COMPILER_ACTION': 'NONE',
'SPACK_CDASH_BUILD_NAME': '(specs) archive-files',
'SPACK_REMOTE_MIRROR_URL': mirror_url,
'SPACK_PIPELINE_TYPE': 'spack_protected_branch',
'CI_JOB_URL': ci_job_url,
'CI_PIPELINE_URL': ci_pipeline_url,
})
ci_cmd('rebuild', fail_on_error=False)
expected_repro_files = [
'install.sh',
'root.json',
'archive-files.json',
'spack.yaml',
'spack.lock'
]
repro_files = os.listdir(repro_dir)
assert(all([f in repro_files for f in expected_repro_files]))
install_script_path = os.path.join(repro_dir, 'install.sh')
install_line = None
with open(install_script_path) as fd:
for line in fd:
if line.startswith('"notcommand"'):
install_line = line
assert(install_line)
def mystrip(s):
return s.strip('"').rstrip('\n').rstrip('"')
install_parts = [mystrip(s) for s in install_line.split(' ')]
assert('--keep-stage' in install_parts)
assert('--no-check-signature' not in install_parts)
assert('--no-add' in install_parts)
assert('-f' in install_parts)
flag_index = install_parts.index('-f')
assert('archive-files.json' in install_parts[flag_index + 1])
broken_spec_file = os.path.join(broken_specs_path, root_spec_dag_hash)
with open(broken_spec_file) as fd:
broken_spec_content = fd.read()
assert(ci_job_url in broken_spec_content)
assert(ci_pipeline_url) in broken_spec_content
env_cmd('deactivate')
def test_ci_nothing_to_rebuild(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages, monkeypatch,
mock_fetch, ci_base_environment, mock_binary_index):
working_dir = tmpdir.join('working_dir')
mirror_dir = working_dir.join('mirror')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
spack_yaml_contents = """
spack:
definitions:
- packages: [archive-files]
specs:
- $packages
mirrors:
test-mirror: {0}
gitlab-ci:
enable-artifacts-buildcache: True
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: donotcare
""".format(mirror_url)
install_cmd('archive-files')
buildcache_cmd('create', '-a', '-f', '-u', '--mirror-url',
mirror_url, 'archive-files')
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write(spack_yaml_contents)
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test') as env:
env.concretize()
root_spec_dag_hash = None
for h, s in env.specs_by_hash.items():
if s.name == 'archive-files':
root_spec_dag_hash = h
# Create environment variables as gitlab would do it
os.environ.update({
'SPACK_ARTIFACTS_ROOT': working_dir.strpath,
'SPACK_JOB_LOG_DIR': 'log_dir',
'SPACK_JOB_REPRO_DIR': 'repro_dir',
'SPACK_LOCAL_MIRROR_DIR': mirror_dir.strpath,
'SPACK_CONCRETE_ENV_DIR': tmpdir.strpath,
'SPACK_ROOT_SPEC': root_spec_dag_hash,
'SPACK_JOB_SPEC_DAG_HASH': root_spec_dag_hash,
'SPACK_JOB_SPEC_PKG_NAME': 'archive-files',
'SPACK_COMPILER_ACTION': 'NONE',
'SPACK_REMOTE_MIRROR_URL': mirror_url,
})
def fake_dl_method(spec, *args, **kwargs):
print('fake download buildcache {0}'.format(spec.name))
monkeypatch.setattr(
spack.binary_distribution, 'download_single_spec', fake_dl_method)
ci_out = ci_cmd('rebuild', output=str)
assert('No need to rebuild archive-files' in ci_out)
assert('fake download buildcache archive-files' in ci_out)
env_cmd('deactivate')
@pytest.mark.disable_clean_stage_check
def test_push_mirror_contents(tmpdir, mutable_mock_env_path,
install_mockery_mutable_config, mock_packages,
mock_fetch, mock_stage, mock_gnupghome,
ci_base_environment, mock_binary_index):
working_dir = tmpdir.join('working_dir')
mirror_dir = working_dir.join('mirror')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
signing_key_dir = spack_paths.mock_gpg_keys_path
signing_key_path = os.path.join(signing_key_dir, 'package-signing-key')
with open(signing_key_path) as fd:
signing_key = fd.read()
ci.import_signing_key(signing_key)
spack_yaml_contents = """
spack:
definitions:
- packages: [patchelf]
specs:
- $packages
mirrors:
test-mirror: {0}
gitlab-ci:
enable-artifacts-buildcache: True
mappings:
- match:
- patchelf
runner-attributes:
tags:
- donotcare
image: donotcare
service-job-attributes:
tags:
- nonbuildtag
image: basicimage
""".format(mirror_url)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write(spack_yaml_contents)
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test') as env:
spec_map = ci.get_concrete_specs(
env, 'patchelf', 'patchelf', 'FIND_ANY')
concrete_spec = spec_map['patchelf']
spec_json = concrete_spec.to_json(hash=ht.dag_hash)
json_path = str(tmpdir.join('spec.json'))
with open(json_path, 'w') as ypfd:
ypfd.write(spec_json)
install_cmd('--keep-stage', json_path)
# env, spec, json_path, mirror_url, build_id, sign_binaries
ci.push_mirror_contents(env, json_path, mirror_url, True)
buildcache_path = os.path.join(mirror_dir.strpath, 'build_cache')
# Now test the --prune-dag (default) option of spack ci generate
mirror_cmd('add', 'test-ci', mirror_url)
outputfile_pruned = str(tmpdir.join('pruned_pipeline.yml'))
ci_cmd('generate', '--output-file', outputfile_pruned)
with open(outputfile_pruned) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
assert('no-specs-to-rebuild' in yaml_contents)
# Make sure there are no other spec jobs or rebuild-index
assert(len(yaml_contents.keys()) == 1)
the_elt = yaml_contents['no-specs-to-rebuild']
assert('tags' in the_elt)
assert('nonbuildtag' in the_elt['tags'])
assert('image' in the_elt)
assert(the_elt['image'] == 'basicimage')
outputfile_not_pruned = str(tmpdir.join('unpruned_pipeline.yml'))
ci_cmd('generate', '--no-prune-dag', '--output-file',
outputfile_not_pruned)
# Test the --no-prune-dag option of spack ci generate
with open(outputfile_not_pruned) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
found_spec_job = False
for ci_key in yaml_contents.keys():
if '(specs) patchelf' in ci_key:
the_elt = yaml_contents[ci_key]
assert('variables' in the_elt)
job_vars = the_elt['variables']
assert('SPACK_SPEC_NEEDS_REBUILD' in job_vars)
assert(job_vars['SPACK_SPEC_NEEDS_REBUILD'] == 'False')
found_spec_job = True
assert(found_spec_job)
mirror_cmd('rm', 'test-ci')
# Test generating buildcache index while we have bin mirror
buildcache_cmd('update-index', '--mirror-url', mirror_url)
index_path = os.path.join(buildcache_path, 'index.json')
with open(index_path) as idx_fd:
index_object = json.load(idx_fd)
validate(index_object, db_idx_schema)
# Now that index is regenerated, validate "buildcache list" output
buildcache_list_output = buildcache_cmd('list', output=str)
assert('patchelf' in buildcache_list_output)
# Also test buildcache_spec schema
bc_files_list = os.listdir(buildcache_path)
for file_name in bc_files_list:
if file_name.endswith('.spec.json.sig'):
spec_json_path = os.path.join(buildcache_path, file_name)
with open(spec_json_path) as json_fd:
json_object = Spec.extract_json_from_clearsig(json_fd.read())
validate(json_object, specfile_schema)
logs_dir = working_dir.join('logs_dir')
if not os.path.exists(logs_dir.strpath):
os.makedirs(logs_dir.strpath)
ci.copy_stage_logs_to_artifacts(concrete_spec, logs_dir.strpath)
logs_dir_list = os.listdir(logs_dir.strpath)
assert('spack-build-out.txt' in logs_dir_list)
# Also just make sure that if something goes wrong with the
# stage logs copy, no exception is thrown
ci.copy_stage_logs_to_artifacts(None, logs_dir.strpath)
dl_dir = working_dir.join('download_dir')
if not os.path.exists(dl_dir.strpath):
os.makedirs(dl_dir.strpath)
buildcache_cmd('download', '--spec-file', json_path, '--path',
dl_dir.strpath)
dl_dir_list = os.listdir(dl_dir.strpath)
assert(len(dl_dir_list) == 2)
def test_push_mirror_contents_exceptions(monkeypatch, capsys):
def failing_access(*args, **kwargs):
raise Exception('Error: Access Denied')
monkeypatch.setattr(spack.ci, '_push_mirror_contents', failing_access)
# Input doesn't matter, as wwe are faking exceptional output
url = 'fakejunk'
ci.push_mirror_contents(None, None, url, None)
captured = capsys.readouterr()
std_out = captured[0]
expect_msg = 'Permission problem writing to {0}'.format(url)
assert expect_msg in std_out
def test_ci_generate_override_runner_attrs(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
ci_base_environment):
"""Test that we get the behavior we want with respect to the provision
of runner attributes like tags, variables, and scripts, both when we
inherit them from the top level, as well as when we override one or
more at the runner level"""
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- flatten-deps
- a
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
tags:
- toplevel
variables:
ONE: toplevelvarone
TWO: toplevelvartwo
before_script:
- pre step one
- pre step two
script:
- main step
after_script:
- post step one
mappings:
- match:
- flatten-deps
runner-attributes:
tags:
- specific-one
variables:
THREE: specificvarthree
- match:
- dependency-install
- match:
- a
runner-attributes:
tags:
- specific-a
- toplevel
variables:
ONE: specificvarone
TWO: specificvartwo
before_script:
- custom pre step one
script:
- custom main step
after_script:
- custom post step one
service-job-attributes:
image: donotcare
tags: [donotcare]
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(
spack.main, 'get_version', lambda: '0.15.3-416-12ad69eb1')
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
assert('variables' in yaml_contents)
global_vars = yaml_contents['variables']
assert('SPACK_VERSION' in global_vars)
assert(global_vars['SPACK_VERSION'] == '0.15.3-416-12ad69eb1')
assert('SPACK_CHECKOUT_VERSION' in global_vars)
assert(global_vars['SPACK_CHECKOUT_VERSION'] == '12ad69eb1')
for ci_key in yaml_contents.keys():
if '(specs) b' in ci_key:
assert(False)
if '(specs) a' in ci_key:
# Make sure a's attributes override variables, and all the
# scripts. Also, make sure the 'toplevel' tag doesn't
# appear twice, but that a's specific extra tag does appear
the_elt = yaml_contents[ci_key]
assert(the_elt['variables']['ONE'] == 'specificvarone')
assert(the_elt['variables']['TWO'] == 'specificvartwo')
assert('THREE' not in the_elt['variables'])
assert(len(the_elt['tags']) == 2)
assert('specific-a' in the_elt['tags'])
assert('toplevel' in the_elt['tags'])
assert(len(the_elt['before_script']) == 1)
assert(the_elt['before_script'][0] ==
'custom pre step one')
assert(len(the_elt['script']) == 1)
assert(the_elt['script'][0] == 'custom main step')
assert(len(the_elt['after_script']) == 1)
assert(the_elt['after_script'][0] ==
'custom post step one')
if '(specs) dependency-install' in ci_key:
# Since the dependency-install match omits any
# runner-attributes, make sure it inherited all the
# top-level attributes.
the_elt = yaml_contents[ci_key]
assert(the_elt['variables']['ONE'] == 'toplevelvarone')
assert(the_elt['variables']['TWO'] == 'toplevelvartwo')
assert('THREE' not in the_elt['variables'])
assert(len(the_elt['tags']) == 1)
assert(the_elt['tags'][0] == 'toplevel')
assert(len(the_elt['before_script']) == 2)
assert(the_elt['before_script'][0] == 'pre step one')
assert(the_elt['before_script'][1] == 'pre step two')
assert(len(the_elt['script']) == 1)
assert(the_elt['script'][0] == 'main step')
assert(len(the_elt['after_script']) == 1)
assert(the_elt['after_script'][0] == 'post step one')
if '(specs) flatten-deps' in ci_key:
# The flatten-deps match specifies that we keep the two
# top level variables, but add a third specifc one. It
# also adds a custom tag which should be combined with
# the top-level tag.
the_elt = yaml_contents[ci_key]
assert(the_elt['variables']['ONE'] == 'toplevelvarone')
assert(the_elt['variables']['TWO'] == 'toplevelvartwo')
assert(the_elt['variables']['THREE'] == 'specificvarthree')
assert(len(the_elt['tags']) == 2)
assert('specific-one' in the_elt['tags'])
assert('toplevel' in the_elt['tags'])
assert(len(the_elt['before_script']) == 2)
assert(the_elt['before_script'][0] == 'pre step one')
assert(the_elt['before_script'][1] == 'pre step two')
assert(len(the_elt['script']) == 1)
assert(the_elt['script'][0] == 'main step')
assert(len(the_elt['after_script']) == 1)
assert(the_elt['after_script'][0] == 'post step one')
def test_ci_generate_with_workarounds(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
ci_base_environment):
"""Make sure the post-processing cli workarounds do what they should"""
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- callpath%gcc@3.0
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
mappings:
- match: ['%gcc@3.0']
runner-attributes:
tags:
- donotcare
image: donotcare
enable-artifacts-buildcache: true
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
ci_cmd('generate', '--output-file', outputfile, '--dependencies')
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
found_one = False
for ci_key in yaml_contents.keys():
if ci_key.startswith('(specs) '):
found_one = True
job_obj = yaml_contents[ci_key]
assert('needs' not in job_obj)
assert('dependencies' in job_obj)
assert(found_one is True)
@pytest.mark.disable_clean_stage_check
def test_ci_rebuild_index(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages, mock_fetch,
mock_stage):
working_dir = tmpdir.join('working_dir')
mirror_dir = working_dir.join('mirror')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
spack_yaml_contents = """
spack:
specs:
- callpath
mirrors:
test-mirror: {0}
gitlab-ci:
mappings:
- match:
- patchelf
runner-attributes:
tags:
- donotcare
image: donotcare
""".format(mirror_url)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write(spack_yaml_contents)
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test') as env:
spec_map = ci.get_concrete_specs(
env, 'callpath', 'callpath', 'FIND_ANY')
concrete_spec = spec_map['callpath']
spec_json = concrete_spec.to_json(hash=ht.dag_hash)
json_path = str(tmpdir.join('spec.json'))
with open(json_path, 'w') as ypfd:
ypfd.write(spec_json)
install_cmd('--keep-stage', '-f', json_path)
buildcache_cmd('create', '-u', '-a', '-f', '--mirror-url',
mirror_url, 'callpath')
ci_cmd('rebuild-index')
buildcache_path = os.path.join(mirror_dir.strpath, 'build_cache')
index_path = os.path.join(buildcache_path, 'index.json')
with open(index_path) as idx_fd:
index_object = json.load(idx_fd)
validate(index_object, db_idx_schema)
def test_ci_generate_bootstrap_prune_dag(
install_mockery_mutable_config, mock_packages, mock_fetch,
mock_archive, mutable_config, monkeypatch, tmpdir,
mutable_mock_env_path, ci_base_environment):
"""Test compiler bootstrapping with DAG pruning. Specifically, make
sure that if we detect the bootstrapped compiler needs to be rebuilt,
we ensure the spec we want to build with that compiler is scheduled
for rebuild as well."""
# Create a temp mirror directory for buildcache usage
mirror_dir = tmpdir.join('mirror_dir')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
# Install a compiler, because we want to put it in a buildcache
install_cmd('gcc@10.1.0%gcc@4.5.0')
# Put installed compiler in the buildcache
buildcache_cmd('create', '-u', '-a', '-f', '-d', mirror_dir.strpath,
'gcc@10.1.0%gcc@4.5.0')
# Now uninstall the compiler
uninstall_cmd('-y', 'gcc@10.1.0%gcc@4.5.0')
monkeypatch.setattr(spack.concretize.Concretizer,
'check_for_compiler_existence', False)
spack.config.set('config:install_missing_compilers', True)
assert CompilerSpec('gcc@10.1.0') not in compilers.all_compiler_specs()
# Configure the mirror where we put that buildcache w/ the compiler
mirror_cmd('add', 'test-mirror', mirror_url)
install_cmd('--no-check-signature', 'a%gcc@10.1.0')
# Put spec built with installed compiler in the buildcache
buildcache_cmd('create', '-u', '-a', '-f', '-d', mirror_dir.strpath,
'a%gcc@10.1.0')
# Now uninstall the spec
uninstall_cmd('-y', 'a%gcc@10.1.0')
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
definitions:
- bootstrap:
- gcc@10.1.0%gcc@4.5.0
specs:
- a%gcc@10.1.0
mirrors:
atestm: {0}
gitlab-ci:
bootstrap:
- name: bootstrap
compiler-agnostic: true
mappings:
- match:
- arch=test-debian6-x86_64
runner-attributes:
tags:
- donotcare
- match:
- arch=test-debian6-core2
runner-attributes:
tags:
- meh
""".format(mirror_url))
# Without this monkeypatch, pipeline generation process would think that
# nothing in the environment needs rebuilding. With the monkeypatch, the
# process sees the compiler as needing a rebuild, which should then result
# in the specs built with that compiler needing a rebuild too.
def fake_get_mirrors_for_spec(spec=None, mirrors_to_check=None,
index_only=False):
if spec.name == 'gcc':
return []
else:
return [{
'spec': spec,
'mirror_url': mirror_url,
}]
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as of:
yaml_contents = of.read()
original_yaml_contents = syaml.load(yaml_contents)
# without the monkeypatch, everything appears up to date and no
# rebuild jobs are generated.
assert(original_yaml_contents)
assert('no-specs-to-rebuild' in original_yaml_contents)
monkeypatch.setattr(spack.binary_distribution,
'get_mirrors_for_spec',
fake_get_mirrors_for_spec)
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as of:
yaml_contents = of.read()
new_yaml_contents = syaml.load(yaml_contents)
assert(new_yaml_contents)
# This 'needs' graph reflects that even though specs 'a' and 'b' do
# not otherwise need to be rebuilt (thanks to DAG pruning), they
# both end up in the generated pipeline because the compiler they
# depend on is bootstrapped, and *does* need to be rebuilt.
needs_graph = {
'(bootstrap) gcc': [],
'(specs) b': [
'(bootstrap) gcc',
],
'(specs) a': [
'(bootstrap) gcc',
'(specs) b',
],
}
_validate_needs_graph(new_yaml_contents, needs_graph, False)
def test_ci_get_stack_changed(mock_git_repo, monkeypatch):
"""Test that we can detect the change to .gitlab-ci.yml in a
mock spack git repo."""
monkeypatch.setattr(spack.paths, 'prefix', mock_git_repo)
assert ci.get_stack_changed('/no/such/env/path') is True
def test_ci_generate_prune_untouched(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages,
ci_base_environment, monkeypatch):
"""Test pipeline generation with pruning works to eliminate
specs that were not affected by a change"""
os.environ.update({
'SPACK_PRUNE_UNTOUCHED': 'TRUE', # enables pruning of untouched specs
})
mirror_url = 'https://my.fake.mirror'
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
- callpath
mirrors:
some-mirror: {0}
gitlab-ci:
mappings:
- match:
- arch=test-debian6-core2
runner-attributes:
tags:
- donotcare
image: donotcare
""".format(mirror_url))
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
def fake_compute_affected(r1=None, r2=None):
return ['libdwarf']
def fake_stack_changed(env_path, rev1='HEAD^', rev2='HEAD'):
return False
with ev.read('test'):
monkeypatch.setattr(
ci, 'compute_affected_packages', fake_compute_affected)
monkeypatch.setattr(
ci, 'get_stack_changed', fake_stack_changed)
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
for ci_key in yaml_contents.keys():
if 'archive-files' in ci_key or 'mpich' in ci_key:
print('Error: archive-files and mpich should have been pruned')
assert(False)
def test_ci_subcommands_without_mirror(tmpdir, mutable_mock_env_path,
mock_packages,
install_mockery, ci_base_environment,
mock_binary_index):
"""Make sure we catch if there is not a mirror and report an error"""
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
gitlab-ci:
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: donotcare
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
# Check the 'generate' subcommand
output = ci_cmd('generate', '--output-file', outputfile,
output=str, fail_on_error=False)
ex = 'spack ci generate requires an env containing a mirror'
assert(ex in output)
# Also check the 'rebuild-index' subcommand
output = ci_cmd('rebuild-index', output=str, fail_on_error=False)
ex = 'spack ci rebuild-index requires an env containing a mirror'
assert(ex in output)
def test_ensure_only_one_temporary_storage():
"""Make sure 'gitlab-ci' section of env does not allow specification of
both 'enable-artifacts-buildcache' and 'temporary-storage-url-prefix'."""
gitlab_ci_template = """
gitlab-ci:
{0}
mappings:
- match:
- notcheckedhere
runner-attributes:
tags:
- donotcare
"""
enable_artifacts = 'enable-artifacts-buildcache: True'
temp_storage = 'temporary-storage-url-prefix: file:///temp/mirror'
specify_both = """{0}
{1}
""".format(enable_artifacts, temp_storage)
specify_neither = ''
# User can specify "enable-artifacts-buildcache" (boolean)
yaml_obj = syaml.load(gitlab_ci_template.format(enable_artifacts))
validate(yaml_obj, gitlab_ci_schema)
# User can also specify "temporary-storage-url-prefix" (string)
yaml_obj = syaml.load(gitlab_ci_template.format(temp_storage))
validate(yaml_obj, gitlab_ci_schema)
# However, specifying both should fail to validate
yaml_obj = syaml.load(gitlab_ci_template.format(specify_both))
with pytest.raises(ValidationError):
validate(yaml_obj, gitlab_ci_schema)
# Specifying neither should be fine too, as neither of these properties
# should be required
yaml_obj = syaml.load(gitlab_ci_template.format(specify_neither))
validate(yaml_obj, gitlab_ci_schema)
def test_ci_generate_temp_storage_url(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
ci_base_environment, mock_binary_index):
"""Verify correct behavior when using temporary-storage-url-prefix"""
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
temporary-storage-url-prefix: file:///work/temp/mirror
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: donotcare
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
with ev.read('test'):
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as of:
pipeline_doc = syaml.load(of.read())
assert('cleanup' in pipeline_doc)
cleanup_job = pipeline_doc['cleanup']
assert('script' in cleanup_job)
cleanup_task = cleanup_job['script'][0]
assert(cleanup_task.startswith('spack -d mirror destroy'))
assert('stages' in pipeline_doc)
stages = pipeline_doc['stages']
# Cleanup job should be 2nd to last, just before rebuild-index
assert('stage' in cleanup_job)
assert(cleanup_job['stage'] == stages[-2])
def test_ci_generate_read_broken_specs_url(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
ci_base_environment):
"""Verify that `broken-specs-url` works as intended"""
spec_a = Spec('a')
spec_a.concretize()
a_dag_hash = spec_a.dag_hash()
spec_flattendeps = Spec('flatten-deps')
spec_flattendeps.concretize()
flattendeps_dag_hash = spec_flattendeps.dag_hash()
# Mark 'a' as broken (but not 'flatten-deps')
broken_spec_a_path = str(tmpdir.join(a_dag_hash))
with open(broken_spec_a_path, 'w') as bsf:
bsf.write('')
broken_specs_url = 'file://{0}'.format(tmpdir.strpath)
# Test that `spack ci generate` notices this broken spec and fails.
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- flatten-deps
- a
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
broken-specs-url: "{0}"
mappings:
- match:
- a
- flatten-deps
- b
- dependency-install
runner-attributes:
tags:
- donotcare
image: donotcare
""".format(broken_specs_url))
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test'):
# Check output of the 'generate' subcommand
output = ci_cmd('generate', output=str, fail_on_error=False)
assert('known to be broken' in output)
ex = '({0})'.format(a_dag_hash)
assert(ex in output)
ex = '({0})'.format(flattendeps_dag_hash)
assert(ex not in output)
def test_ci_reproduce(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages, monkeypatch,
last_two_git_commits, ci_base_environment, mock_binary_index):
working_dir = tmpdir.join('repro_dir')
image_name = 'org/image:tag'
spack_yaml_contents = """
spack:
definitions:
- packages: [archive-files]
specs:
- $packages
mirrors:
test-mirror: file:///some/fake/mirror
gitlab-ci:
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: {0}
""".format(image_name)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write(spack_yaml_contents)
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test') as env:
with env.write_transaction():
env.concretize()
env.write()
if not os.path.exists(working_dir.strpath):
os.makedirs(working_dir.strpath)
shutil.copyfile(env.manifest_path,
os.path.join(working_dir.strpath, 'spack.yaml'))
shutil.copyfile(env.lock_path,
os.path.join(working_dir.strpath, 'spack.lock'))
root_spec = None
job_spec = None
for h, s in env.specs_by_hash.items():
if s.name == 'archive-files':
root_spec = s
job_spec = s
job_spec_json_path = os.path.join(
working_dir.strpath, 'archivefiles.json')
with open(job_spec_json_path, 'w') as fd:
fd.write(job_spec.to_json(hash=ht.dag_hash))
root_spec_json_path = os.path.join(
working_dir.strpath, 'root.json')
with open(root_spec_json_path, 'w') as fd:
fd.write(root_spec.to_json(hash=ht.dag_hash))
artifacts_root = os.path.join(working_dir.strpath, 'scratch_dir')
pipeline_path = os.path.join(artifacts_root, 'pipeline.yml')
ci_cmd('generate', '--output-file', pipeline_path,
'--artifacts-root', artifacts_root)
job_name = ci.get_job_name(
'specs', False, job_spec, 'test-debian6-core2', None)
repro_file = os.path.join(working_dir.strpath, 'repro.json')
repro_details = {
'job_name': job_name,
'job_spec_json': 'archivefiles.json',
'root_spec_json': 'root.json',
'ci_project_dir': working_dir.strpath
}
with open(repro_file, 'w') as fd:
fd.write(json.dumps(repro_details))
install_script = os.path.join(working_dir.strpath, 'install.sh')
with open(install_script, 'w') as fd:
fd.write('#!/bin/bash\n\n#fake install\nspack install blah\n')
spack_info_file = os.path.join(
working_dir.strpath, 'spack_info.txt')
with open(spack_info_file, 'w') as fd:
fd.write('\nMerge {0} into {1}\n\n'.format(
last_two_git_commits[1], last_two_git_commits[0]))
def fake_download_and_extract_artifacts(url, work_dir):
pass
monkeypatch.setattr(ci, 'download_and_extract_artifacts',
fake_download_and_extract_artifacts)
rep_out = ci_cmd('reproduce-build',
'https://some.domain/api/v1/projects/1/jobs/2/artifacts',
'--working-dir',
working_dir.strpath,
output=str)
expect_out = 'docker run --rm -v {0}:{0} -ti {1}'.format(
working_dir.strpath, image_name)
assert(expect_out in rep_out)
| 34.873216
| 85
| 0.571255
|
ed7c50de42e9efa1c42a0490890312e9e5f6cd20
| 1,983
|
py
|
Python
|
tests/test_read_simple_rmd.py
|
vmarceau/jupytext
|
799ea65b693a5f387fe37c9aebad463a97d1dd38
|
[
"MIT"
] | null | null | null |
tests/test_read_simple_rmd.py
|
vmarceau/jupytext
|
799ea65b693a5f387fe37c9aebad463a97d1dd38
|
[
"MIT"
] | null | null | null |
tests/test_read_simple_rmd.py
|
vmarceau/jupytext
|
799ea65b693a5f387fe37c9aebad463a97d1dd38
|
[
"MIT"
] | 1
|
2021-07-02T17:49:27.000Z
|
2021-07-02T17:49:27.000Z
|
import re
from jupytext.compare import compare
import jupytext
from .utils import skip_if_dict_is_not_ordered
@skip_if_dict_is_not_ordered
def test_read_mostly_py_rmd_file(rmd="""---
title: Simple file
---
```{python, echo=TRUE}
import numpy as np
x = np.arange(0, 2*math.pi, eps)
```
```{python, echo=TRUE}
x = np.arange(0,1,eps)
y = np.abs(x)-.5
```
```{r}
ls()
```
```{r, results='asis', magic_args='-i x'}
cat(stringi::stri_rand_lipsum(3), sep='\n\n')
```
"""):
nb = jupytext.reads(rmd, 'Rmd')
assert nb.cells == [{'cell_type': 'raw',
'source': '---\ntitle: Simple file\n---',
'metadata': {}},
{'cell_type': 'code',
'metadata': {'hide_input': False},
'execution_count': None,
'source': 'import numpy as np\n'
'x = np.arange(0, 2*math.pi, eps)',
'outputs': []},
{'cell_type': 'code',
'metadata': {'hide_input': False},
'execution_count': None,
'source': 'x = np.arange(0,1,eps)\ny = np.abs(x)-.5',
'outputs': []},
{'cell_type': 'code',
'metadata': {},
'execution_count': None,
'source': '%%R\nls()',
'outputs': []},
{'cell_type': 'code',
'metadata': {'results': "'asis'"},
'execution_count': None,
'source': "%%R -i x\ncat(stringi::"
"stri_rand_lipsum(3), sep='\n\n')",
'outputs': []}]
rmd2 = jupytext.writes(nb, 'Rmd')
rmd2 = re.sub(r'```{r ', '```{r, ', rmd2)
rmd2 = re.sub(r'```{python ', '```{python, ', rmd2)
compare(rmd, rmd2)
| 32.508197
| 78
| 0.415532
|
4ca3b4f6233000a51671fb1860b4423e15ccaf05
| 4,164
|
py
|
Python
|
dvc/cache.py
|
gyliu513/dvc
|
d932405ee148767c5dbbbc394d6cd414270bf8f0
|
[
"Apache-2.0"
] | 1
|
2020-01-31T14:29:20.000Z
|
2020-01-31T14:29:20.000Z
|
dvc/cache.py
|
gyliu513/dvc
|
d932405ee148767c5dbbbc394d6cd414270bf8f0
|
[
"Apache-2.0"
] | null | null | null |
dvc/cache.py
|
gyliu513/dvc
|
d932405ee148767c5dbbbc394d6cd414270bf8f0
|
[
"Apache-2.0"
] | null | null | null |
"""Manages cache of a dvc repo."""
from __future__ import unicode_literals
import os
from collections import defaultdict
from funcy import cached_property
from dvc.utils.compat import builtin_str
from dvc.config import Config
class CacheConfig(object):
def __init__(self, config):
self.config = config
def set_dir(self, dname, level=None):
from dvc.remote.config import RemoteConfig
configobj = self.config.get_configobj(level)
path = RemoteConfig.resolve_path(dname, configobj.filename)
self.config.set(
Config.SECTION_CACHE, Config.SECTION_CACHE_DIR, path, level=level
)
def _make_remote_property(name):
"""
The config file is stored in a way that allows you to have a
cache for each remote.
This is needed when specifying external outputs
(as they require you to have an external cache location).
Imagine a config file like the following:
['remote "dvc-storage"']
url = ssh://localhost/tmp
ask_password = true
[cache]
ssh = dvc-storage
This method creates a cached property, containing cache named `name`:
self.config == {'ssh': 'dvc-storage'}
self.ssh # a RemoteSSH instance
"""
def getter(self):
from dvc.remote import Remote
remote = self.config.get(name)
if not remote:
return None
return Remote(self.repo, name=remote)
getter.__name__ = builtin_str(name)
return cached_property(getter)
class Cache(object):
"""Class that manages cache locations of a dvc repo.
Args:
repo (dvc.repo.Repo): repo instance that this cache belongs to.
"""
CACHE_DIR = "cache"
def __init__(self, repo):
from dvc.remote import Remote
self.repo = repo
self.config = config = repo.config.config[Config.SECTION_CACHE]
local = config.get(Config.SECTION_CACHE_LOCAL)
if local:
name = Config.SECTION_REMOTE_FMT.format(local)
settings = repo.config.config[name]
else:
default_cache_dir = os.path.join(repo.dvc_dir, self.CACHE_DIR)
cache_dir = config.get(Config.SECTION_CACHE_DIR, default_cache_dir)
cache_type = config.get(Config.SECTION_CACHE_TYPE)
protected = config.get(Config.SECTION_CACHE_PROTECTED)
shared = config.get(Config.SECTION_CACHE_SHARED)
settings = {
Config.PRIVATE_CWD: config.get(
Config.PRIVATE_CWD, repo.dvc_dir
),
Config.SECTION_REMOTE_URL: cache_dir,
Config.SECTION_CACHE_TYPE: cache_type,
Config.SECTION_CACHE_PROTECTED: protected,
Config.SECTION_CACHE_SHARED: shared,
}
self.local = Remote(repo, **settings)
s3 = _make_remote_property(Config.SECTION_CACHE_S3)
gs = _make_remote_property(Config.SECTION_CACHE_GS)
ssh = _make_remote_property(Config.SECTION_CACHE_SSH)
hdfs = _make_remote_property(Config.SECTION_CACHE_HDFS)
azure = _make_remote_property(Config.SECTION_CACHE_AZURE)
class NamedCache(object):
def __init__(self):
self._items = defaultdict(lambda: defaultdict(set))
self.external = defaultdict(set)
@classmethod
def make(cls, scheme, checksum, name):
cache = cls()
cache.add(scheme, checksum, name)
return cache
def __getitem__(self, key):
return self._items[key]
def add(self, scheme, checksum, name):
self._items[scheme][checksum].add(name)
def add_external(self, url, rev, path):
self.external[url, rev].add(path)
def update(self, cache, suffix=""):
for scheme, src in cache._items.items():
dst = self._items[scheme]
for checksum, names in src.items():
if suffix:
dst[checksum].update(n + suffix for n in names)
else:
dst[checksum].update(names)
for repo_pair, files in cache.external.items():
self.external[repo_pair].update(files)
| 29.742857
| 79
| 0.635927
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.