text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Test the post wrangling module and functionality.
"""
##########################################################################
## Imports
##########################################################################
import os
import pickle
import unittest
from .test_models import MongoTestMixin
try:
from unittest import mock
except ImportError:
import mock
from baleen.wrangle import *
from baleen.exceptions import *
from baleen.models import Feed, Post
##########################################################################
## Fixtures
##########################################################################
FIXTURES = os.path.join(os.path.dirname(__file__), "fixtures")
RESULT = os.path.join(FIXTURES, "feedparser_result.pickle")
FEED = Feed(
title=u'The Rumpus.net',
link=u'http://therumpus.net/feed/',
urls={u'htmlurl': u'http://therumpus.net'}, category=u'books',
)
def mocked_requests_get(*args, **kwargs):
class MockResponse:
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
def raise_for_status(self):
if self.status_code != 200:
raise Exception("HTTP {}".format(self.status_code))
text = "Luke, I am your father!"
if args[0] == 'http://example.com/vader/':
return MockResponse(text, 200)
return MockResponse("??", 404)
##########################################################################
## Test Wrangling Posts
##########################################################################
class PostWranglerTests(MongoTestMixin, unittest.TestCase):
def setUp(self):
super().setUp()
self.feed = FEED
self.feed.save()
with open(RESULT, 'rb') as f:
self.entries = pickle.load(f).entries
def test_wrangle_factory(self):
"""
Test multiple types in the feed sync factory
"""
for wrangle in PostWrangler.factory(self.entries, feed=self.feed):
self.assertIsInstance(wrangle, PostWrangler)
def test_wrangle_integration(self):
"""
Test wrangling of all entries in the result.
"""
self.assertEqual(Post.objects.count(), 0)
for wrangle in PostWrangler.factory(self.entries, feed=self.feed):
wrangle.wrangle()
wrangle.wrangle() # Make sure that double wrangle does nothing.
self.assertEqual(Post.objects.count(), 10)
# Ensure there are no duplicates
for wrangle in PostWrangler.factory(self.entries, feed=self.feed):
with self.assertRaises(WranglingError) as cm:
wrangle.wrangle()
self.assertEqual(Post.objects.count(), 10)
def test_is_wrangled(self):
"""
Test the wrangling detection
"""
wrangle = PostWrangler(self.entries[0])
self.assertFalse(wrangle.is_wrangled())
wrangle.wrangle()
self.assertTrue(wrangle.is_wrangled())
def test_save_not_save(self):
"""
Test the wrangle interaction with the database
"""
self.assertEqual(Post.objects.count(), 0)
wrangle = PostWrangler(self.entries[0])
# Don't save the wrangle
wrangle.wrangle(False)
self.assertEqual(Post.objects.count(), 0)
# We've already wrangled so nothing should happen!
wrangle.wrangle()
self.assertEqual(Post.objects.count(), 0)
# Try making something happen directly
wrangle.wrangle().save()
self.assertEqual(Post.objects.count(), 1)
# Toss in something else entirely
wrangle = PostWrangler(self.entries[1])
wrangle.wrangle()
self.assertEqual(Post.objects.count(), 2)
def test_feed_or_not(self):
"""
Test can be saved with or without a feed
"""
withfeed = PostWrangler(self.entries[0], feed=self.feed)
nofeed = PostWrangler(self.entries[1])
post = withfeed.wrangle()
self.assertEqual(post.feed, self.feed)
post = nofeed.wrangle()
self.assertIsNone(post.feed)
@mock.patch('baleen.wrangle.requests.get', side_effect=mocked_requests_get)
def test_fetch_not_wrangled(self, mock_requests):
"""
Assert that fetch requires wrangling
"""
assert mock_requests is requests.get
wrangle = PostWrangler(self.entries[0], feed=self.feed)
with self.assertRaises(FetchError):
wrangle.fetch()
@mock.patch('baleen.wrangle.requests.get', side_effect=mocked_requests_get)
def test_fetch_overwrites_content(self, mock_requests):
"""
Test that the fetch overwrites content.
"""
assert mock_requests is requests.get
wrangle = PostWrangler(self.entries[0], feed=self.feed)
wrangle.wrangle()
self.assertEqual(Post.objects.count(), 1)
wrangle.post.url = 'http://example.com/vader/'
post = wrangle.fetch()
self.assertEqual(Post.objects.count(), 1)
self.assertNotEqual(post.created, post.updated)
self.assertEqual(post.content, "Luke, I am your father!")
@mock.patch('baleen.wrangle.requests.get', side_effect=mocked_requests_get)
def test_fetch_no_save(self, mock_requests):
"""
Test that the fetch does not save on demand.
"""
assert mock_requests is requests.get
wrangle = PostWrangler(self.entries[0], feed=self.feed)
wrangle.wrangle()
self.assertEqual(Post.objects.count(), 1)
wrangle.post.url = 'http://example.com/vader/'
wrangle.fetch(save=False)
self.assertEqual(Post.objects.count(), 1)
post = Post.objects.first()
self.assertDateTimeEqual(post.created, post.updated)
self.assertNotEqual(post.content, "Luke, I am your father!")
@mock.patch('baleen.wrangle.requests.get', side_effect=mocked_requests_get)
def test_fetch_raises_404(self, mock_requests):
"""
Test that fetch raises exception on HTTP error
"""
assert mock_requests is requests.get
wrangle = PostWrangler(self.entries[0], feed=self.feed)
wrangle.wrangle()
self.assertEqual(Post.objects.count(), 1)
with self.assertRaises(FetchError):
wrangle.post.url = 'http://example.com/obiwan/'
wrangle.fetch()
| {
"content_hash": "63594a5d671c9a0722876262886fa3f0",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 79,
"avg_line_length": 32.04,
"alnum_prop": 0.58270911360799,
"repo_name": "janetriley/baleen",
"id": "ef62b222fc3747d0217a814ed92cd728a60c7925",
"size": "6718",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_wrangle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "750"
},
{
"name": "HTML",
"bytes": "15437"
},
{
"name": "Makefile",
"bytes": "747"
},
{
"name": "Python",
"bytes": "164334"
}
],
"symlink_target": ""
} |
import sys
import asyncio
import os
import types
import functools
from collections import namedtuple
from collections.abc import Iterator
import abc
from typing import List, Union, NamedTuple
import uuid
import yaml
import logging
log = logging.getLogger(__name__)
Fire = NamedTuple('Fire', [('rate', int), ('duration', int),
('supervisor', str)])
Idle = NamedTuple('Idle', [])
Terminate = NamedTuple('Terminate', [])
RunnerHistoryItem = NamedTuple('RunnerHistoryItem',
[('at', int), ('succeeded', int),
('failed', int)])
class LauncherResp(object):
def __init__(self, action: Union[Fire, Idle], opaque=None):
self.action = action
self.opaque = opaque
def is_fire(self):
return isinstance(self.action, Fire)
def is_idle(self):
return isinstance(self.action, Idle)
def is_terminate(self):
return isinstance(self.action, Terminate)
class RunnerContext(object):
def __init__(self, host: str, history: List[RunnerHistoryItem]):
self.host = host
self._history = [] if history is None else history
self.opaque = None
def update(self, prev_resp: LauncherResp, history: List[RunnerHistoryItem]):
self._history = history
self.opaque = prev_resp.opaque
@staticmethod
def new_conext(host: str=''):
if not host:
host = uuid.uuid1().hex
return RunnerContext(host, [])
class Launcher(metaclass=abc.ABCMeta):
@abc.abstractmethod
def ask_next(self, runner_ctx: RunnerContext) -> LauncherResp:
"""
:param runner_ctx:
:return:
"""
class IdleLancher(Launcher):
def ask_next(self, runner_ctx: RunnerContext) -> LauncherResp:
log.debug('runner_ctx:{}'.format(runner_ctx))
return LauncherResp(Idle())
class OneShotLancher(Launcher):
def ask_next(self, runner_ctx: RunnerContext) -> LauncherResp:
log.debug('runner_ctx:{}'.format(runner_ctx.__dict__))
if runner_ctx.opaque is not None:
return LauncherResp(Terminate())
else:
#return LauncherResp(Fire(rate=1, start=0, end=10, supervisor=''),
# opaque=1)
return LauncherResp(Fire(rate=1, duration=1, supervisor=''),
opaque=1)
| {
"content_hash": "21980c4c1e1c86a2731203b325bbe81c",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 80,
"avg_line_length": 27.802325581395348,
"alnum_prop": 0.6072772898368883,
"repo_name": "cfchou/choreography",
"id": "1240434eb70ba7d2bc17cdc0b377acaabec82c70",
"size": "2392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "choreography/launcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7348"
},
{
"name": "Shell",
"bytes": "449"
}
],
"symlink_target": ""
} |
from module import XMPPModule
class Help(XMPPModule):
def help(self, feature):
if feature != None:
return 'help: There is no such subfeature to the help module.'
return 'help: The glorious module that provides you with this glorious help.'
def handleMessage(self, msg):
cmds = msg['body'].split(' ')
if cmds[0] == '!help':
if len(cmds) == 1:
message = self.topLevel()
else:
mod = None
cmds[1] = cmds[1].capitalize()
if cmds[1] in self.xmpp.modules:
mod = self.xmpp.modules[cmds[1]]
if mod:
if len(cmds) == 2:
message = mod.help(None)
else:
message = mod.help(cmds[2])
else:
message = 'There is no such module loaded.'
self.xmpp.reply(msg, message)
def topLevel(self):
s = 'halibot help module\n\nUsage: !help [module] [feature]\n\nModules loaded: '
return s + ", ".join(self.xmpp.modules.keys())
| {
"content_hash": "ded4699bb1d8286f802434f57fb6a383",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 82,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.632768361581921,
"repo_name": "Halibot/halibot-legacy",
"id": "63dc75244e0a6e460cbf6a46e46a9ce5224ca981",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/core/help.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14909"
}
],
"symlink_target": ""
} |
import os
import tempfile
pid1 = str(os.getpid())
print "pid1 = %s" % pid1
filename = os.path.join(tempfile.gettempdir(), pid1)
print "filename = %s" % filename
fileh = open(filename, "w")
fileh.write(pid1)
fileh.close()
fileh = open(filename, "r")
pid2 = fileh.read()
if pid1 == pid2:
print "success"
else:
print "failure"
| {
"content_hash": "743b49a24ba9725a0578fe6f331331a5",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 52,
"avg_line_length": 16.85,
"alnum_prop": 0.6646884272997032,
"repo_name": "jtraver/dev",
"id": "dbd0c9b959976110d9f6e5eb28e656aab1f3a762",
"size": "337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pid/pid1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "1116"
},
{
"name": "C",
"bytes": "8043"
},
{
"name": "Dockerfile",
"bytes": "620"
},
{
"name": "Go",
"bytes": "5703"
},
{
"name": "HTML",
"bytes": "152060"
},
{
"name": "Java",
"bytes": "155984"
},
{
"name": "JavaScript",
"bytes": "43306"
},
{
"name": "Jinja",
"bytes": "60"
},
{
"name": "Module Management System",
"bytes": "2029"
},
{
"name": "Perl",
"bytes": "265592"
},
{
"name": "Python",
"bytes": "1242700"
},
{
"name": "Scheme",
"bytes": "4109"
},
{
"name": "Shell",
"bytes": "112285"
}
],
"symlink_target": ""
} |
import itertools
import logging
import multiprocessing
import os
import signal
import sys
import time
import warnings
from collections import defaultdict
from datetime import timedelta
from typing import Collection, DefaultDict, Dict, Iterator, List, Optional, Set, Tuple
from sqlalchemy import func, not_, or_, text
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.callbacks.callback_requests import DagCallbackRequest, SlaCallbackRequest, TaskCallbackRequest
from airflow.callbacks.database_callback_sink import DatabaseCallbackSink
from airflow.callbacks.pipe_callback_sink import PipeCallbackSink
from airflow.configuration import conf
from airflow.dag_processing.manager import DagFileProcessorAgent
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DAG
from airflow.models.dag import DagModel
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.docs import get_docs_url
from airflow.utils.event_scheduler import EventScheduler
from airflow.utils.retries import MAX_DB_RETRIES, retry_db_transaction, run_with_db_retries
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import (
is_lock_not_available_error,
prohibit_commit,
skip_locked,
tuple_in_condition,
with_row_locks,
)
from airflow.utils.state import DagRunState, State, TaskInstanceState
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
def _is_parent_process():
"""
Returns True if the current process is the parent process. False if the current process is a child
process started by multiprocessing.
"""
return multiprocessing.current_process().name == 'MainProcess'
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:param scheduler_idle_sleep_time: The number of seconds to wait between
polls of running processors
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:param log: override the default Logger
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerJob'}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
scheduler_idle_sleep_time: float = conf.getfloat('scheduler', 'scheduler_idle_sleep_time'),
do_pickle: bool = False,
log: Optional[logging.Logger] = None,
processor_poll_interval: Optional[float] = None,
*args,
**kwargs,
):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
if processor_poll_interval:
# TODO: Remove in Airflow 3.0
warnings.warn(
"The 'processor_poll_interval' parameter is deprecated. "
"Please use 'scheduler_idle_sleep_time'.",
DeprecationWarning,
stacklevel=2,
)
scheduler_idle_sleep_time = processor_poll_interval
self._scheduler_idle_sleep_time = scheduler_idle_sleep_time
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
self._standalone_dag_processor = conf.getboolean("scheduler", "standalone_dag_processor")
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get_mandatory_value('database', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
# Dag Processor agent - not used in Dag Processor standalone mode.
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True, load_op_links=False)
self._paused_dag_without_running_dagruns: Set = set()
if conf.getboolean('smart_sensor', 'use_smart_sensor'):
compatible_sensors = set(
map(
lambda l: l.strip(),
conf.get_mandatory_value('smart_sensor', 'sensors_enabled').split(','),
)
)
docs_url = get_docs_url('concepts/smart-sensors.html#migrating-to-deferrable-operators')
warnings.warn(
f'Smart sensors are deprecated, yet can be used for {compatible_sensors} sensors.'
f' Please use Deferrable Operators instead. See {docs_url} for more info.',
DeprecationWarning,
)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None:
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
if not _is_parent_process():
# Only the parent process should perform the cleanup.
return
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame):
if not _is_parent_process():
# Only the parent process should perform the debug dump.
return
try:
sig_name = signal.Signals(signum).name
except Exception:
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def __get_concurrency_maps(
self, states: List[TaskInstanceState], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag max_active_tasks, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:return: list[airflow.models.TaskInstance]
"""
from airflow.utils.db import DBLocks
executable_tis: List[TI] = []
if session.get_bind().dialect.name == "postgresql":
# Optimization: to avoid littering the DB errors of "ERROR: canceling statement due to lock
# timeout", try to take out a transactional advisory lock (unlocks automatically on
# COMMIT/ROLLBACK)
lock_acquired = session.execute(
text("SELECT pg_try_advisory_xact_lock(:id)").bindparams(
id=DBLocks.SCHEDULER_CRITICAL_SECTION.value
)
).scalar()
if not lock_acquired:
# Throw an error like the one that would happen with NOWAIT
raise OperationalError(
"Failed to acquire advisory lock", params=None, orig=RuntimeError('55P03')
)
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = sum(max(0, pool['open']) for pool in pools.values())
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return []
max_tis = min(max_tis, pool_slots_free)
starved_pools = {pool_name for pool_name, stats in pools.items() if stats['open'] <= 0}
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_active_tasks_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_active_tasks_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# dag and task ids that can't be queued because of concurrency limits
starved_dags: Set[str] = set()
starved_tasks: Set[Tuple[str, str]] = set()
pool_num_starving_tasks: DefaultDict[str, int] = defaultdict(int)
for loop_count in itertools.count(start=1):
num_starved_pools = len(starved_pools)
num_starved_dags = len(starved_dags)
num_starved_tasks = len(starved_tasks)
# Get task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.join(TI.dag_run)
.filter(DR.run_type != DagRunType.BACKFILL_JOB, DR.state == DagRunState.RUNNING)
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == TaskInstanceState.SCHEDULED)
.options(selectinload('dag_model'))
.order_by(-TI.priority_weight, DR.execution_date)
)
if starved_pools:
query = query.filter(not_(TI.pool.in_(starved_pools)))
if starved_dags:
query = query.filter(not_(TI.dag_id.in_(starved_dags)))
if starved_tasks:
task_filter = tuple_in_condition((TaskInstance.dag_id, TaskInstance.task_id), starved_tasks)
query = query.filter(not_(task_filter))
query = query.limit(max_tis)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
session=session,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
break
# Put one task instance on each line
task_instance_str = "\n\t".join(repr(x) for x in task_instances_to_examine)
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str
)
for task_instance in task_instances_to_examine:
pool_name = task_instance.pool
pool_stats = pools.get(pool_name)
if not pool_stats:
self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool_name)
starved_pools.add(pool_name)
continue
# Make sure to emit metrics if pool has no starving tasks
pool_num_starving_tasks.setdefault(pool_name, 0)
pool_total = pool_stats["total"]
open_slots = pool_stats["open"]
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s", open_slots, pool_name
)
# Can't schedule any more since there are no more open slots.
pool_num_starving_tasks[pool_name] += 1
num_starving_tasks_total += 1
starved_pools.add(pool_name)
continue
if task_instance.pool_slots > pool_total:
self.log.warning(
"Not executing %s. Requested pool slots (%s) are greater than "
"total pool slots: '%s' for pool: %s.",
task_instance,
task_instance.pool_slots,
pool_total,
pool_name,
)
pool_num_starving_tasks[pool_name] += 1
num_starving_tasks_total += 1
starved_tasks.add((task_instance.dag_id, task_instance.task_id))
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool_name,
)
pool_num_starving_tasks[pool_name] += 1
num_starving_tasks_total += 1
starved_tasks.add((task_instance.dag_id, task_instance.task_id))
# Though we can execute tasks with lower priority if there's enough room
continue
# Check to make sure that the task max_active_tasks of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_active_tasks_per_dag = dag_active_tasks_map[dag_id]
max_active_tasks_per_dag_limit = task_instance.dag_model.max_active_tasks
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_active_tasks_per_dag,
max_active_tasks_per_dag_limit,
)
if current_active_tasks_per_dag >= max_active_tasks_per_dag_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's max_active_tasks limit of %s",
task_instance,
dag_id,
max_active_tasks_per_dag_limit,
)
starved_dags.add(dag_id)
continue
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
# If the dag is missing, fail the task and continue to the next task.
if not serialized_dag:
self.log.error(
"DAG '%s' for task instance %s not found in serialized_dag table",
dag_id,
task_instance,
)
session.query(TI).filter(TI.dag_id == dag_id, TI.state == State.SCHEDULED).update(
{TI.state: State.FAILED}, synchronize_session='fetch'
)
continue
task_concurrency_limit: Optional[int] = None
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).max_active_tis_per_dag
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
starved_tasks.add((task_instance.dag_id, task_instance.task_id))
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_active_tasks_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
pool_stats["open"] = open_slots
is_done = executable_tis or len(task_instances_to_examine) < max_tis
# Check this to avoid accidental infinite loops
found_new_filters = (
len(starved_pools) > num_starved_pools
or len(starved_dags) > num_starved_dags
or len(starved_tasks) > num_starved_tasks
)
if is_done or not found_new_filters:
break
self.log.debug(
"Found no task instances to queue on the %s. iteration "
"but there could be more candidate task instances to check.",
loop_count,
)
for pool_name, num_starving_tasks in pool_num_starving_tasks.items():
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
if len(executable_tis) > 0:
task_instance_str = "\n\t".join(repr(x) for x in executable_tis)
self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone
# on mysql when it's not UTC?
{
TI.state: TaskInstanceState.QUEUED,
TI.queued_dttm: timezone.utcnow(),
TI.queued_by_job_id: self.id,
},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
@provide_session
def _enqueue_task_instances_with_queued_state(
self, task_instances: List[TI], session: Session = None
) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:param session: The session object
"""
# actually enqueue them
for ti in task_instances:
if ti.dag_run.state in State.finished:
ti.set_state(State.NONE, session=session)
continue
command = ti.command_as_list(
local=True,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info("Sending %s to executor with priority %s and queue %s", ti.key, priority, queue)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_enqueue_task_instances(self, session: Session) -> int:
"""
Enqueues TaskInstances for execution.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:return: Number of task instance with state changed.
"""
if self.max_tis_per_query == 0:
max_tis = self.executor.slots_available
else:
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis, session=session)
return len(queued_tis)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""Respond to executor events."""
if not self._standalone_dag_processor and not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, str, int], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s run_id=%s exited with status %s for try_number %s",
ti_key.dag_id,
ti_key.task_id,
ti_key.run_id,
state,
ti_key.try_number,
)
if state in (TaskInstanceState.FAILED, TaskInstanceState.SUCCESS, TaskInstanceState.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
query = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model'))
# row lock this entire set of taskinstances to make sure the scheduler doesn't fail when we have
# multi-schedulers
tis: Iterator[TI] = with_row_locks(
query,
of=TI,
session=session,
**skip_locked(session=session),
)
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
if state == TaskInstanceState.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
msg = (
"TaskInstance Finished: dag_id=%s, task_id=%s, run_id=%s, map_index=%s, "
"run_start_date=%s, run_end_date=%s, "
"run_duration=%s, state=%s, executor_state=%s, try_number=%s, max_tries=%s, job_id=%s, "
"pool=%s, queue=%s, priority_weight=%d, operator=%s, queued_dttm=%s, "
"queued_by_job_id=%s, pid=%s"
)
self.log.info(
msg,
ti.dag_id,
ti.task_id,
ti.run_id,
ti.map_index,
ti.start_date,
ti.end_date,
ti.duration,
ti.state,
state,
try_number,
ti.max_tries,
ti.job_id,
ti.pool,
ti.queue,
ti.priority_weight,
ti.operator,
ti.queued_dttm,
ti.queued_by_job_id,
ti.pid,
)
# There are two scenarios why the same TI with the same try_number is queued
# after executor is finished with it:
# 1) the TI was killed externally and it had no time to mark itself failed
# - in this case we should mark it as failed here.
# 2) the TI has been requeued after getting deferred - in this case either our executor has it
# or the TI is queued by another job. Either ways we should not fail it.
# All of this could also happen if the state is "running",
# but that is handled by the zombie detection.
ti_queued = ti.try_number == buffer_key.try_number and ti.state == TaskInstanceState.QUEUED
ti_requeued = ti.queued_by_job_id != self.id or self.executor.has_task(ti)
if ti_queued and not ti_requeued:
Stats.incr('scheduler.tasks.killed_externally')
msg = (
"Executor reports task instance %s finished (%s) although the "
"task says its %s. (Info: %s) Was the task killed externally?"
)
self.log.error(msg, ti, state, ti.state, info)
# Get task from the Serialized DAG
try:
dag = self.dagbag.get_dag(ti.dag_id)
task = dag.get_task(ti.task_id)
except Exception:
self.log.exception("Marking task instance %s as %s", ti, state)
ti.set_state(state)
continue
ti.task = task
if task.on_retry_callback or task.on_failure_callback:
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance.from_ti(ti),
msg=msg % (ti, state, ti.state, info),
)
self.executor.send_callback(request)
else:
ti.handle_failure(error=msg % (ti, state, ti.state, info), session=session)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
if not self._standalone_dag_processor:
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
if self.processor_agent:
self.log.debug("Using PipeCallbackSink as callback sink.")
self.executor.callback_sink = PipeCallbackSink(
get_sink_pipe=self.processor_agent.get_callbacks_pipe
)
else:
self.log.debug("Using DatabaseCallbackSink as callback sink.")
self.executor.callback_sink = DatabaseCallbackSink()
self.executor.start()
self.register_signals()
if self.processor_agent:
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
if self.processor_agent:
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
settings.Session.remove() # type: ignore
except Exception:
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
raise
finally:
try:
self.executor.end()
except Exception:
self.log.exception("Exception when executing Executor.end")
if self.processor_agent:
try:
self.processor_agent.end()
except Exception:
self.log.exception("Exception when executing DagFileProcessorAgent.end")
self.log.info("Exited execute loop")
def _update_dag_run_state_for_paused_dags(self):
try:
paused_dag_ids = DagModel.get_all_paused_dag_ids()
for dag_id in paused_dag_ids:
if dag_id in self._paused_dag_without_running_dagruns:
continue
dag = SerializedDagModel.get_dag(dag_id)
if dag is None:
continue
dag_runs = DagRun.find(dag_id=dag_id, state=State.RUNNING)
for dag_run in dag_runs:
dag_run.dag = dag
_, callback_to_run = dag_run.update_state(execute_callbacks=False)
if callback_to_run:
self._send_dag_callbacks_to_processor(dag, callback_to_run)
self._paused_dag_without_running_dagruns.add(dag_id)
except Exception as e: # should not fail the scheduler
self.log.exception('Failed to update dag run state for paused dags due to %s', str(e))
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent and not self._standalone_dag_processor:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
timers = EventScheduler()
# Check on start up, then every configured interval
self.adopt_or_reset_orphaned_tasks()
timers.call_regular_interval(
conf.getfloat('scheduler', 'orphaned_tasks_check_interval', fallback=300.0),
self.adopt_or_reset_orphaned_tasks,
)
timers.call_regular_interval(
conf.getfloat('scheduler', 'trigger_timeout_check_interval', fallback=15.0),
self.check_trigger_timeouts,
)
timers.call_regular_interval(
conf.getfloat('scheduler', 'pool_metrics_interval', fallback=5.0),
self._emit_pool_metrics,
)
timers.call_regular_interval(
conf.getfloat('scheduler', 'zombie_detection_interval', fallback=10.0),
self._find_zombies,
)
timers.call_regular_interval(60.0, self._update_dag_run_state_for_paused_dags)
for loop_count in itertools.count(start=1):
with Stats.timer() as timer:
if self.using_sqlite and self.processor_agent:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
if self.processor_agent:
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
# Run any pending timed events
next_event = timers.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(min(self._scheduler_idle_sleep_time, next_event if next_event else 0))
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs,
loop_count,
)
break
if self.processor_agent and self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
" scheduler loops",
self.num_times_parse_dags,
loop_count,
)
break
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
We don't select all dagruns at once, because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_enqueue_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
self._create_dagruns_for_dags(guard, session)
self._start_queued_dagruns(session)
guard.commit()
dag_runs = self._get_next_dagruns_to_examine(DagRunState.RUNNING, session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
callback_tuples = []
for dag_run in dag_runs:
callback_to_run = self._schedule_dag_run(dag_run, session)
callback_tuples.append((dag_run, callback_to_run))
guard.commit()
# Send the callbacks after we commit to ensure the context is up to date when it gets run
for dag_run, callback_to_run in callback_tuples:
dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
continue
# Sending callbacks there as in standalone_dag_processor they are adding to the database,
# so it must be done outside of prohibit_commit.
self._send_dag_callbacks_to_processor(dag, callback_to_run)
with prohibit_commit(session) as guard:
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
num_queued_tis = 0
else:
try:
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_enqueue_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
@retry_db_transaction
def _get_next_dagruns_to_examine(self, state: DagRunState, session: Session):
"""Get Next DagRuns to Examine with retries"""
return DagRun.next_dagruns_to_examine(state, session)
@retry_db_transaction
def _create_dagruns_for_dags(self, guard, session):
"""Find Dag Models needing DagRuns and Create Dag Runs with retries in case of OperationalError"""
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
def _create_dag_runs(self, dag_models: Collection[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
# Bulk Fetch DagRuns with dag_id and execution_date same
# as DagModel.dag_id and DagModel.next_dagrun
# This list is used to verify if the DagRun already exist so that we don't attempt to create
# duplicate dag runs
existing_dagruns = (
session.query(DagRun.dag_id, DagRun.execution_date)
.filter(
tuple_in_condition(
(DagRun.dag_id, DagRun.execution_date),
((dm.dag_id, dm.next_dagrun) for dm in dag_models),
),
)
.all()
)
active_runs_of_dags = defaultdict(
int,
DagRun.active_runs_of_dags(dag_ids=(dm.dag_id for dm in dag_models), session=session),
)
for dag_model in dag_models:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
if not dag:
self.log.error("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
data_interval = dag.get_next_data_interval(dag_model)
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
# We opted to check DagRun existence instead
# of catching an Integrity error and rolling back the session i.e
# we need to set dag.next_dagrun_info if the Dag Run already exists or if we
# create a new one. This is so that in the next Scheduling loop we try to create new runs
# instead of falling in a loop of Integrity Error.
if (dag.dag_id, dag_model.next_dagrun) not in existing_dagruns:
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
state=DagRunState.QUEUED,
data_interval=data_interval,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
active_runs_of_dags[dag.dag_id] += 1
if self._should_update_dag_next_dagruns(dag, dag_model, active_runs_of_dags[dag.dag_id]):
dag_model.calculate_dagrun_date_fields(dag, data_interval)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _should_update_dag_next_dagruns(self, dag, dag_model: DagModel, total_active_runs) -> bool:
"""Check if the dag's next_dagruns_create_after should be updated."""
if total_active_runs >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag_model.dag_id,
total_active_runs,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
return False
return True
def _start_queued_dagruns(
self,
session: Session,
) -> None:
"""Find DagRuns in queued state and decide moving them to running state"""
dag_runs = self._get_next_dagruns_to_examine(DagRunState.QUEUED, session)
active_runs_of_dags = defaultdict(
int,
DagRun.active_runs_of_dags((dr.dag_id for dr in dag_runs), only_running=True, session=session),
)
def _update_state(dag: DAG, dag_run: DagRun):
dag_run.state = DagRunState.RUNNING
dag_run.start_date = timezone.utcnow()
if dag.timetable.periodic:
# TODO: Logically, this should be DagRunInfo.run_after, but the
# information is not stored on a DagRun, only before the actual
# execution on DagModel.next_dagrun_create_after. We should add
# a field on DagRun for this instead of relying on the run
# always happening immediately after the data interval.
expected_start_date = dag.get_run_data_interval(dag_run).end
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(f'dagrun.schedule_delay.{dag.dag_id}', schedule_delay)
for dag_run in dag_runs:
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
continue
active_runs = active_runs_of_dags[dag_run.dag_id]
if dag.max_active_runs and active_runs >= dag.max_active_runs:
self.log.debug(
"DAG %s already has %d active runs, not moving any more runs to RUNNING state %s",
dag.dag_id,
active_runs,
dag_run.execution_date,
)
else:
active_runs_of_dags[dag_run.dag_id] += 1
_update_state(dag, dag_run)
def _schedule_dag_run(
self,
dag_run: DagRun,
session: Session,
) -> Optional[DagCallbackRequest]:
"""
Make scheduling decisions about an individual dag run
:param dag_run: The DagRun to schedule
:return: Callback that needs to be executed
"""
callback: Optional[DagCallbackRequest] = None
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return callback
dag_model = DM.get_dagmodel(dag.dag_id, session)
if (
dag_run.start_date
and dag.dagrun_timeout
and dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.set_state(DagRunState.FAILED)
unfinished_task_instances = (
session.query(TI)
.filter(TI.dag_id == dag_run.dag_id)
.filter(TI.run_id == dag_run.run_id)
.filter(TI.state.in_(State.unfinished))
)
for task_instance in unfinished_task_instances:
task_instance.state = TaskInstanceState.SKIPPED
session.merge(task_instance)
session.flush()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
active_runs = dag.get_num_active_runs(only_running=False, session=session)
# Work out if we should allow creating a new DagRun now?
if self._should_update_dag_next_dagruns(dag, dag_model, active_runs):
dag_model.calculate_dagrun_date_fields(dag, dag.get_run_data_interval(dag_run))
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
run_id=dag_run.run_id,
is_failure_callback=True,
msg='timed_out',
)
return callback_to_execute
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error("Execution date is in future: %s", dag_run.execution_date)
return callback
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
if dag_run.state in State.finished:
active_runs = dag.get_num_active_runs(only_running=False, session=session)
# Work out if we should allow creating a new DagRun now?
if self._should_update_dag_next_dagruns(dag, dag_model, active_runs):
dag_model.calculate_dagrun_date_fields(dag, dag.get_run_data_interval(dag_run))
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
dag_run.schedule_tis(schedulable_tis, session)
return callback_to_run
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(self, dag: DAG, callback: Optional[DagCallbackRequest] = None):
self._send_sla_callbacks_to_processor(dag)
if callback:
self.executor.send_callback(callback)
else:
self.log.debug("callback is empty")
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(task.sla, timedelta) for task in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
request = SlaCallbackRequest(full_filepath=dag.fileloc, dag_id=dag.dag_id)
self.executor.send_callback(request)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats["queued"])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats["running"])
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
for attempt in run_with_db_retries(logger=self.log):
with attempt:
self.log.debug(
"Running SchedulerJob.adopt_or_reset_orphaned_tasks with retries. Try %d of %d",
attempt.retry_state.attempt_number,
MAX_DB_RETRIES,
)
self.log.debug("Calling SchedulerJob.adopt_or_reset_orphaned_tasks method")
try:
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.job_type == "SchedulerJob",
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [TaskInstanceState.QUEUED, TaskInstanceState.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.run_id))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session
# commit (or if passed a session, let caller decide when to commit
session.flush()
except OperationalError:
session.rollback()
raise
return len(to_reset)
@provide_session
def check_trigger_timeouts(self, session: Session = None):
"""
Looks at all tasks that are in the "deferred" state and whose trigger
or execution timeout has passed, so they can be marked as failed.
"""
num_timed_out_tasks = (
session.query(TaskInstance)
.filter(
TaskInstance.state == TaskInstanceState.DEFERRED,
TaskInstance.trigger_timeout < timezone.utcnow(),
)
.update(
# We have to schedule these to fail themselves so it doesn't
# happen inside the scheduler.
{
"state": TaskInstanceState.SCHEDULED,
"next_method": "__fail__",
"next_kwargs": {"error": "Trigger/execution timeout"},
"trigger_id": None,
}
)
)
if num_timed_out_tasks:
self.log.info("Timed out %i deferred tasks without fired triggers", num_timed_out_tasks)
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
self.log.debug("Finding 'running' jobs without a recent heartbeat")
limit_dttm = timezone.utcnow() - timedelta(seconds=self._zombie_threshold_secs)
zombies = (
session.query(TaskInstance, DagModel.fileloc)
.join(LocalTaskJob, TaskInstance.job_id == LocalTaskJob.id)
.join(DagModel, TaskInstance.dag_id == DagModel.dag_id)
.filter(TaskInstance.state == State.RUNNING)
.filter(
or_(
LocalTaskJob.state != State.RUNNING,
LocalTaskJob.latest_heartbeat < limit_dttm,
)
)
.all()
)
if zombies:
self.log.warning("Failing (%s) jobs without heartbeat after %s", len(zombies), limit_dttm)
for ti, file_loc in zombies:
request = TaskCallbackRequest(
full_filepath=file_loc,
simple_task_instance=SimpleTaskInstance.from_ti(ti),
msg=f"Detected {ti} as zombie",
)
self.log.error("Detected zombie job: %s", request)
self.executor.send_callback(request)
Stats.incr('zombies_killed')
| {
"content_hash": "b8c131a9991709a16c0c15e5a98080d9",
"timestamp": "",
"source": "github",
"line_count": 1373,
"max_line_length": 110,
"avg_line_length": 44.450837581937364,
"alnum_prop": 0.5742163818387377,
"repo_name": "danielvdende/incubator-airflow",
"id": "8452950e0467fc7d0bf1621df21816efa75b3ad3",
"size": "61820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/jobs/scheduler_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
import random
import discord
from discord.ext import commands
class Invite:
"""Aya invite link"""
def __init__(self, Aya):
self.Aya = Aya
@commands.command()
async def invite(self):
""" Use this link to add Aya to your server! """
serv_owner = ctx.message.server.owner
color = ('#%06x' % random.randint(8, 0xFFFFFF))
color = int(color[1:], 16)
color = discord.Color(value=color)
em = discord.Embed(color=color,
title='Invite me to your server!',
footer='Aya',
description='[Click here](https://discordapp.com/api/oauth2/authorize?client_id={}&scope=bot&permissions=8)'
.format(self.Aya.user.id))
try:
await self.Aya.say(embed=em)
except discord.HTTPException:
await self.Aya.say('{} I need the embed links permission to send this.'.format(serv_owner.mention))
def setup(Aya):
Aya.add_cog(Invite(Aya))
| {
"content_hash": "82dcc6bcbf51b575874fd597de802adc",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 135,
"avg_line_length": 33.25806451612903,
"alnum_prop": 0.5664403491755577,
"repo_name": "Drinka/Aya",
"id": "54e97c557a854cea883cc3c65663af584bd104e6",
"size": "1031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogs/invite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25042"
}
],
"symlink_target": ""
} |
"""
Setup declaration to install Pyptables
"""
params = dict(
name='pyptables',
version='0.1',
packages=['pyptables'],
url='https://github.com/BenjaminSchubert/Pyptables',
license='MIT',
author='Benjamin Schubert',
author_email='ben.c.schubert@gmail.com',
description='A python wrapper around Iptables to simplify handling of not too complex rules',
include_package_data=True,
classifiers=[
"Topic :: System :: Networking :: Firewalls",
"Development Status :: 3 - Alpha",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5"
]
)
with open("README.md") as _desc:
params["long_description"] = _desc.read()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else:
params['entry_points'] = {
'console_scripts': [
"pyptables = pyptables:run"
]
}
setup(**params)
| {
"content_hash": "3f20c6e37f7101196578c7b2f9fc8bf1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 97,
"avg_line_length": 26.404761904761905,
"alnum_prop": 0.6158701532912534,
"repo_name": "BenjaminSchubert/Pyptables",
"id": "233882ac931c86e4d48add9f0dce9d9b9900e618",
"size": "1157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26161"
}
],
"symlink_target": ""
} |
from setuptools import setup
from setuptools.extension import Extension
try:
import Cython.Distutils
except ImportError:
cmdclass = {}
ext_modules = None
print "WARNING: unable to import Cython."
else:
cmdclass = {"build_ext": Cython.Distutils.build_ext}
ext_modules = [
Extension("borg.bregman", ["borg/bregman.pyx"]),
Extension("borg.models", ["borg/models.pyx"]),
Extension("borg.planners", ["borg/planners.pyx"]),
Extension("borg.statistics", ["borg/statistics.pyx"]),
Extension("borg.domains.max_sat.features", ["borg/domains/max_sat/features.pyx"]),
Extension("borg.domains.max_sat.instance", ["borg/domains/max_sat/instance.pyx"]),
Extension("borg.domains.pb.features", ["borg/domains/pb/features.pyx"]),
Extension("borg.domains.pb.instance", ["borg/domains/pb/instance.pyx"]),
Extension("borg.domains.sat.features", ["borg/domains/sat/features.pyx"]),
Extension("borg.domains.sat.instance", ["borg/domains/sat/instance.pyx"]),
Extension("borg.test.test_statistics_c", ["borg/test/test_statistics_c.pyx"])]
with open("requirements.txt") as file_:
requires = [line for line in file_.readlines() if not line.startswith("git+")]
setup(
name = "borg",
version = "2012.4.01",
cmdclass = cmdclass,
ext_modules = ext_modules,
install_requires = requires,
author = "Bryan Silverthorn",
author_email = "bsilverthorn@gmail.com",
description = "the borg algorithm portfolio toolkit",
license = "MIT",
keywords = "borg algorithm portfolio solver SAT PB satisfiability",
url = "http://nn.cs.utexas.edu/pages/research/borg/",
classifiers = [
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2.6",
"Operating System :: Unix",
"Topic :: Utilities",
"Topic :: Software Development :: Libraries",
"Topic :: Scientific/Engineering :: Artificial Intelligence"])
| {
"content_hash": "bda1375be0c335223bf798c026663833",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 90,
"avg_line_length": 42.234042553191486,
"alnum_prop": 0.653904282115869,
"repo_name": "borg-project/borg",
"id": "3c827206b10e58665b254abecaab7c8b03aeb73b",
"size": "1985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "339794"
}
],
"symlink_target": ""
} |
__version__=''' $Id$ '''
__doc__='''Page Layout and Typography Using Scripts" - higher-level framework for flowing documents'''
from reportlab.platypus.flowables import Flowable, Image, Macro, PageBreak, Preformatted, Spacer, XBox, \
CondPageBreak, KeepTogether, TraceInfo, FailOnWrap, FailOnDraw, PTOContainer, \
KeepInFrame, ParagraphAndImage, ImageAndFlowables
from reportlab.platypus.paragraph import Paragraph, cleanBlockQuotedText, ParaLines
from reportlab.platypus.paraparser import ParaFrag
from reportlab.platypus.tables import Table, TableStyle, CellStyle, LongTable
from reportlab.platypus.frames import Frame
from reportlab.platypus.doctemplate import BaseDocTemplate, NextPageTemplate, PageTemplate, ActionFlowable, \
SimpleDocTemplate, FrameBreak, PageBegin, Indenter, NotAtTopPageBreak
from xpreformatted import XPreformatted
| {
"content_hash": "35715aa6685f93564e9ccd1f5a27fe07",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 109,
"avg_line_length": 70.3076923076923,
"alnum_prop": 0.762582056892779,
"repo_name": "makinacorpus/reportlab-ecomobile",
"id": "381e714778600ccdff28ee8b45bb9fb80eb39568",
"size": "1104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/reportlab/platypus/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "764229"
},
{
"name": "C++",
"bytes": "2019"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "2863462"
},
{
"name": "Shell",
"bytes": "1783"
}
],
"symlink_target": ""
} |
import data
import fft
import fmt
import pair
import walls
__all__ = ['data','fft','fmt','pair','walls']
| {
"content_hash": "8ea05467ac76e36e0d272553a6cbc320",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 45,
"avg_line_length": 15.142857142857142,
"alnum_prop": 0.6698113207547169,
"repo_name": "mphoward/flyft",
"id": "d17dcd38cf0d790fe92706f9ed388ef498a50ed2",
"size": "106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "32007"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
"""A Python implemntation of a kd-tree
This package provides a simple implementation of a kd-tree in Python.
https://en.wikipedia.org/wiki/K-d_tree
"""
from __future__ import print_function
import operator
import math
from collections import deque
from functools import wraps
__author__ = u'Stefan Kögl <stefan@skoegl.net>'
__version__ = '0.12'
__website__ = 'https://github.com/stefankoegl/kdtree'
__license__ = 'ISC license'
# maps child position to its comparison operator
COMPARE_CHILD = {
0: (operator.le, operator.sub),
1: (operator.ge, operator.add),
}
class Node(object):
""" A Node in a kd-tree
A tree is represented by its root node, and every node represents
its subtree"""
def __init__(self, data=None, left=None, right=None):
self.data = data
self.left = left
self.right = right
@property
def is_leaf(self):
""" Returns True if a Node has no subnodes
>>> Node().is_leaf
True
>>> Node( 1, left=Node(2) ).is_leaf
False
"""
return (not self.data) or \
(all(not bool(c) for c, p in self.children))
def preorder(self):
""" iterator for nodes: root, left, right """
if not self:
return
yield self
if self.left:
for x in self.left.preorder():
yield x
if self.right:
for x in self.right.preorder():
yield x
def inorder(self):
""" iterator for nodes: left, root, right """
if not self:
return
if self.left:
for x in self.left.inorder():
yield x
yield self
if self.right:
for x in self.right.inorder():
yield x
def postorder(self):
""" iterator for nodes: left, right, root """
if not self:
return
if self.left:
for x in self.left.postorder():
yield x
if self.right:
for x in self.right.postorder():
yield x
yield self
@property
def children(self):
"""
Returns an iterator for the non-empty children of the Node
The children are returned as (Node, pos) tuples where pos is 0 for the
left subnode and 1 for the right.
>>> len(list(create(dimensions=2).children))
0
>>> len(list(create([ (1, 2) ]).children))
0
>>> len(list(create([ (2, 2), (2, 1), (2, 3) ]).children))
2
"""
if self.left and self.left.data is not None:
yield self.left, 0
if self.right and self.right.data is not None:
yield self.right, 1
def set_child(self, index, child):
""" Sets one of the node's children
index 0 refers to the left, 1 to the right child """
if index == 0:
self.left = child
else:
self.right = child
def height(self):
"""
Returns height of the (sub)tree, without considering
empty leaf-nodes
>>> create(dimensions=2).height()
0
>>> create([ (1, 2) ]).height()
1
>>> create([ (1, 2), (2, 3) ]).height()
2
"""
min_height = int(bool(self))
return max([min_height] + [c.height()+1 for c, p in self.children])
def get_child_pos(self, child):
""" Returns the position if the given child
If the given node is the left child, 0 is returned. If its the right
child, 1 is returned. Otherwise None """
for c, pos in self.children:
if child == c:
return pos
def __repr__(self):
return '<%(cls)s - %(data)s>' % \
dict(cls=self.__class__.__name__, data=repr(self.data))
def __nonzero__(self):
return self.data is not None
__bool__ = __nonzero__
def __eq__(self, other):
if isinstance(other, tuple):
return self.data == other
else:
return self.data == other.data
def __hash__(self):
return id(self)
def require_axis(f):
""" Check if the object of the function has axis and sel_axis members """
@wraps(f)
def _wrapper(self, *args, **kwargs):
if None in (self.axis, self.sel_axis):
raise ValueError('%(func_name) requires the node %(node)s '
'to have an axis and a sel_axis function' %
dict(func_name=f.__name__, node=repr(self)))
return f(self, *args, **kwargs)
return _wrapper
class KDNode(Node):
""" A Node that contains kd-tree specific data and methods """
def __init__(self, data=None, left=None, right=None, axis=None,
sel_axis=None, dimensions=None):
""" Creates a new node for a kd-tree
If the node will be used within a tree, the axis and the sel_axis
function should be supplied.
sel_axis(axis) is used when creating subnodes of the current node. It
receives the axis of the parent node and returns the axis of the child
node. """
super(KDNode, self).__init__(data, left, right)
self.axis = axis
self.sel_axis = sel_axis
self.dimensions = dimensions
@require_axis
def add(self, point):
"""
Adds a point to the current node or iteratively
descends to one of its children.
Users should call add() only to the topmost tree.
"""
current = self
while True:
check_dimensionality([point], dimensions=current.dimensions)
# Adding has hit an empty leaf-node, add here
if current.data is None:
current.data = point
return current
# split on self.axis, recurse either left or right
if point[current.axis] < current.data[current.axis]:
if current.left is None:
current.left = current.create_subnode(point)
return current.left
else:
current = current.left
#self.left.add(point)
else:
if current.right is None:
current.right = current.create_subnode(point)
return current.right
else:
current = current.right
@require_axis
def create_subnode(self, data):
""" Creates a subnode for the current node """
return self.__class__(data,
axis=self.sel_axis(self.axis),
sel_axis=self.sel_axis,
dimensions=self.dimensions)
@require_axis
def find_replacement(self):
""" Finds a replacement for the current node
The replacement is returned as a
(replacement-node, replacements-parent-node) tuple """
if self.right:
child, parent = self.right.extreme_child(min, self.axis)
else:
child, parent = self.left.extreme_child(max, self.axis)
return (child, parent if parent is not None else self)
def should_remove(self, point, node):
""" checks if self's point (and maybe identity) matches """
if not self.data == point:
return False
return (node is None) or (node is self)
@require_axis
def remove(self, point, node=None):
""" Removes the node with the given point from the tree
Returns the new root node of the (sub)tree.
If there are multiple points matching "point", only one is removed. The
optional "node" parameter is used for checking the identity, once the
removeal candidate is decided."""
# Recursion has reached an empty leaf node, nothing here to delete
if not self:
return
# Recursion has reached the node to be deleted
if self.should_remove(point, node):
return self._remove(point)
# Remove direct subnode
if self.left and self.left.should_remove(point, node):
self.left = self.left._remove(point)
elif self.right and self.right.should_remove(point, node):
self.right = self.right._remove(point)
# Recurse to subtrees
if point[self.axis] <= self.data[self.axis]:
if self.left:
self.left = self.left.remove(point, node)
if point[self.axis] >= self.data[self.axis]:
if self.right:
self.right = self.right.remove(point, node)
return self
@require_axis
def _remove(self, point):
# we have reached the node to be deleted here
# deleting a leaf node is trivial
if self.is_leaf:
self.data = None
return self
# we have to delete a non-leaf node here
# find a replacement for the node (will be the new subtree-root)
root, max_p = self.find_replacement()
# self and root swap positions
tmp_l, tmp_r = self.left, self.right
self.left, self.right = root.left, root.right
root.left, root.right = tmp_l if tmp_l is not root else self, tmp_r if tmp_r is not root else self
self.axis, root.axis = root.axis, self.axis
# Special-case if we have not chosen a direct child as the replacement
if max_p is not self:
pos = max_p.get_child_pos(root)
max_p.set_child(pos, self)
max_p.remove(point, self)
else:
root.remove(point, self)
return root
@property
def is_balanced(self):
""" Returns True if the (sub)tree is balanced
The tree is balanced if the heights of both subtrees differ at most by
1 """
left_height = self.left.height() if self.left else 0
right_height = self.right.height() if self.right else 0
if abs(left_height - right_height) > 1:
return False
return all(c.is_balanced for c, _ in self.children)
def rebalance(self):
"""
Returns the (possibly new) root of the rebalanced tree
"""
return create([x.data for x in self.inorder()])
def axis_dist(self, point, axis):
"""
Squared distance at the given axis between
the current Node and the given point
"""
return math.pow(self.data[axis] - point[axis], 2)
def dist(self, point):
"""
Squared distance between the current Node
and the given point
"""
r = range(len(self.data))
return sum([self.axis_dist(point, i) for i in r])
def search_knn(self, point, k, dist=None):
""" Return the k nearest neighbors of point and their distances
point must be an actual point, not a node.
k is the number of results to return. The actual results can be less
(if there aren't more nodes to return) or more in case of equal
distances.
dist is a distance function, expecting two points and returning a
distance value. Distance values can be any compareable type.
The result is an ordered list of (node, distance) tuples.
"""
prev = None
current = self
if dist is None:
get_dist = lambda n: n.dist(point)
else:
get_dist = lambda n: dist(n.data, point)
# the nodes do not keep a reference to their parents
parents = {current: None}
# go down the tree as we would for inserting
while current:
if point[current.axis] < current.data[current.axis]:
# left side
parents[current.left] = current
prev = current
current = current.left
else:
# right side
parents[current.right] = current
prev = current
current = current.right
if not prev:
return []
examined = set()
results = {}
# Go up the tree, looking for better solutions
current = prev
while current:
# search node and update results
current._search_node(point, k, results, examined, get_dist)
current = parents[current]
BY_VALUE = lambda kv: kv[1]
return sorted(results.items(), key=BY_VALUE)
def _search_node(self, point, k, results, examined, get_dist):
examined.add(self)
# get current best
if not results:
bestNode = None
bestDist = float('inf')
else:
bestNode, bestDist = sorted(results.items(), key=lambda n_d: n_d[1], reverse=True)[0]
nodesChanged = False
# If the current node is closer than the current best, then it
# becomes the current best.
nodeDist = get_dist(self)
if nodeDist < bestDist:
if len(results) == k and bestNode:
results.pop(bestNode)
results[self] = nodeDist
nodesChanged = True
# if we're equal to the current best, add it, regardless of k
elif nodeDist == bestDist:
results[self] = nodeDist
nodesChanged = True
# if we don't have k results yet, add it anyway
elif len(results) < k:
results[self] = nodeDist
nodesChanged = True
# get new best only if nodes have changed
if nodesChanged:
bestNode, bestDist = next(iter(
sorted(results.items(), key=lambda n: n[1], reverse=True)
))
# Check whether there could be any points on the other side of the
# splitting plane that are closer to the search point than the current
# best.
for child, pos in self.children:
if child in examined:
continue
examined.add(child)
compare, combine = COMPARE_CHILD[pos]
# Since the hyperplanes are all axis-aligned this is implemented
# as a simple comparison to see whether the difference between the
# splitting coordinate of the search point and current node is less
# than the distance (overall coordinates) from the search point to
# the current best.
nodePoint = self.data[self.axis]
pointPlusDist = combine(point[self.axis], bestDist)
lineIntersects = compare(pointPlusDist, nodePoint)
# If the hypersphere crosses the plane, there could be nearer
# points on the other side of the plane, so the algorithm must move
# down the other branch of the tree from the current node looking
# for closer points, following the same recursive process as the
# entire search.
if lineIntersects:
child._search_node(point, k, results, examined, get_dist)
@require_axis
def search_nn(self, point, dist=None):
"""
Search the nearest node of the given point
point must be an actual point, not a node. The nearest node to the
point is returned. If a location of an actual node is used, the Node
with this location will be returned (not its neighbor).
dist is a distance function, expecting two points and returning a
distance value. Distance values can be any compareable type.
The result is a (node, distance) tuple.
"""
return next(iter(self.search_knn(point, 1, dist)), None)
@require_axis
def search_nn_dist(self, point, distance, best=None):
"""
Search the n nearest nodes of the given point which are within given
distance
point must be a location, not a node. A list containing the n nearest
nodes to the point within the distance will be returned.
"""
if best is None:
best = []
# consider the current node
if self.dist(point) < distance:
best.append(self)
# sort the children, nearer one first (is this really necessairy?)
children = sorted(self.children, key=lambda c_p1: c_p1[0].dist(point))
for child, p in children:
# check if child node needs to be recursed
if self.axis_dist(point, self.axis) < math.pow(distance, 2):
child.search_nn_dist(point, distance, best)
return best
@require_axis
def is_valid(self):
""" Checks recursively if the tree is valid
It is valid if each node splits correctly """
if not self:
return True
if self.left and self.data[self.axis] < self.left.data[self.axis]:
return False
if self.right and self.data[self.axis] > self.right.data[self.axis]:
return False
return all(c.is_valid() for c, _ in self.children) or self.is_leaf
def extreme_child(self, sel_func, axis):
""" Returns a child of the subtree and its parent
The child is selected by sel_func which is either min or max
(or a different function with similar semantics). """
max_key = lambda child_parent: child_parent[0].data[axis]
# we don't know our parent, so we include None
me = [(self, None)] if self else []
child_max = [c.extreme_child(sel_func, axis) for c, _ in self.children]
# insert self for unknown parents
child_max = [(c, p if p is not None else self) for c, p in child_max]
candidates = me + child_max
if not candidates:
return None, None
return sel_func(candidates, key=max_key)
def create(point_list=None, dimensions=None, axis=0, sel_axis=None):
""" Creates a kd-tree from a list of points
All points in the list must be of the same dimensionality.
If no point_list is given, an empty tree is created. The number of
dimensions has to be given instead.
If both a point_list and dimensions are given, the numbers must agree.
Axis is the axis on which the root-node should split.
sel_axis(axis) is used when creating subnodes of a node. It receives the
axis of the parent node and returns the axis of the child node. """
if not point_list and not dimensions:
raise ValueError('either point_list or dimensions must be provided')
elif point_list:
dimensions = check_dimensionality(point_list, dimensions)
# by default cycle through the axis
sel_axis = sel_axis or (lambda prev_axis: (prev_axis+1) % dimensions)
if not point_list:
return KDNode(sel_axis=sel_axis, axis=axis, dimensions=dimensions)
# Sort point list and choose median as pivot element
point_list.sort(key=lambda point: point[axis])
median = len(point_list) // 2
loc = point_list[median]
left = create(point_list[:median], dimensions, sel_axis(axis))
right = create(point_list[median + 1:], dimensions, sel_axis(axis))
return KDNode(loc, left, right, axis=axis, sel_axis=sel_axis)
def check_dimensionality(point_list, dimensions=None):
dimensions = dimensions or len(point_list[0])
for p in point_list:
if len(p) != dimensions:
raise ValueError('All Points in the point_list must have the same dimensionality')
return dimensions
def level_order(tree, include_all=False):
""" Returns an iterator over the tree in level-order
If include_all is set to True, empty parts of the tree are filled
with dummy entries and the iterator becomes infinite. """
q = deque()
q.append(tree)
while q:
node = q.popleft()
yield node
if include_all or node.left:
q.append(node.left or node.__class__())
if include_all or node.right:
q.append(node.right or node.__class__())
def visualize(tree, max_level=100, node_width=10, left_padding=5):
""" Prints the tree to stdout """
height = min(max_level, tree.height()-1)
max_width = pow(2, height)
per_level = 1
in_level = 0
level = 0
for node in level_order(tree, include_all=True):
if in_level == 0:
print()
print()
print(' '*left_padding, end=' ')
width = int(max_width*node_width/per_level)
node_str = (str(node.data) if node else '').center(width)
print(node_str, end=' ')
in_level += 1
if in_level == per_level:
in_level = 0
per_level *= 2
level += 1
if level > height:
break
print()
print()
| {
"content_hash": "9d99d8e6786f5a2eca8e57697307fbff",
"timestamp": "",
"source": "github",
"line_count": 718,
"max_line_length": 106,
"avg_line_length": 28.77715877437326,
"alnum_prop": 0.5773884425515439,
"repo_name": "karulont/combopt",
"id": "ca69ea046b90c81fd1c383ec73dac80e6cce84df",
"size": "20665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project7/kdtree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95478"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Integration", sigma = 0.0, exog_count = 100, ar_order = 12); | {
"content_hash": "3015c89017112a23eea89b75eef81317",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 168,
"avg_line_length": 38.285714285714285,
"alnum_prop": 0.7089552238805971,
"repo_name": "antoinecarme/pyaf",
"id": "8c7e95fad227054eda05cacce0a82a16e806a28a",
"size": "268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Integration/trend_PolyTrend/cycle_7/ar_12/test_artificial_128_Integration_PolyTrend_7_12_100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from tokens import Token
import types
import inspect
import copy
from nodes import AstNode
from errors import CodeTalkerException
class TranslatorException(CodeTalkerException):
pass
class Translator:
def __init__(self, grammar, **defaults):
self.grammar = grammar
self.register = {}
self.scope = True
if not defaults:
self.scope = False
self.defaults = defaults
def translates(self, what):
def meta(func):
self.register[what] = func
def beta(node, scope=None):
if node is None:
return None
if self.scope:
return func(node, scope)
else:
return func(node)
return beta
return meta
def translate(self, tree, scope=None):
if tree is None:
return None
if tree.__class__ not in self.register:
if isinstance(tree, Token):
return tree.value
raise TranslatorException('no rule to translate %s' % tree.__class__.__name__)
if self.scope:
return self.register[tree.__class__](tree, scope)
else:
return self.register[tree.__class__](tree)
def from_string(self, text, **args):
# assert text == str(self.grammar.process(text))
tree = self.grammar.get_ast(text)
'''
ptree = self.grammar.process(text)
if ptree is None:
return None
tree = self.grammar.to_ast(ptree)
'''
return self.from_ast(tree, **args)
def from_ast(self, tree, **args):
if self.scope:
if self.defaults.keys() == ['scope']:
scope = self.defaults['scope']
for k, v in args.items():
setattr(scope, k, v)
else:
stuff = copy.deepcopy(self.defaults)
stuff.update(args)
Scope = type('Scope', (), {})
scope = Scope()
for k,v in stuff.iteritems():
setattr(scope, k, v)
return self.translate(tree, scope)
elif args:
raise Exception('no scope -- cannot define variables: %s' % (args,))
else:
return self.translate(tree)
# vim: et sw=4 sts=4
| {
"content_hash": "0d14d943d343350d6223f17d6b673785",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 90,
"avg_line_length": 30.48051948051948,
"alnum_prop": 0.5223689816787388,
"repo_name": "jaredly/codetalker",
"id": "fba2c0f759628155c00a0bf5c7ca782f469e8afa",
"size": "2370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codetalker/pgm/translator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "83304"
},
{
"name": "JavaScript",
"bytes": "8881"
},
{
"name": "Python",
"bytes": "109186"
},
{
"name": "Shell",
"bytes": "790"
}
],
"symlink_target": ""
} |
'''
ein Class for login und logout session
'''
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd # Zugang zu Driver wird benötigt
self.app.open_home_page()
wd.find_element_by_name("username").click()
wd.find_element_by_name("username").clear()
wd.find_element_by_name("username").send_keys(username)
wd.find_element_by_name("password").click()
wd.find_element_by_name("password").clear()
wd.find_element_by_name("password").send_keys(password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_xpath("//a[@id='logout-link']").click()
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logger_user() == username
def get_logger_user(self):
wd = self.app.wd
#return wd.find_element_by_xpath("//div/div[2]/div[1]/span[2]").text
return wd.find_element_by_xpath("//span[@id='logged-in-user']").text #-не работает
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
if not self.is_logged_in():
raise ValueError ("password or name is false") | {
"content_hash": "66af314254dcf9b02d40a4e0a8363ea8",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 90,
"avg_line_length": 31.196428571428573,
"alnum_prop": 0.5695477962220951,
"repo_name": "Oliebert/testing_mantis",
"id": "53373c6ea6608c5d4c6babac3edc0084f90c18df",
"size": "1758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "364"
},
{
"name": "Python",
"bytes": "25573"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.utils.functional import SimpleLazyObject
from django_town.core.exceptions import SettingError
from django_town.utils import DictObject, recursive_dict_update
_DJANGO_TOWN_SETTINGS = {
"oauth2": {
"USER_SECRET_KEY_LENGTH": 5,
"SERVICE_SECRET_KEY_LENGTH": 5,
"CLIENT_ID_LENGTH": 30,
"CLIENT_SECRET_LENGTH": 30,
"ACCESS_TOKEN_EXPIRATION": 3600,
"SCOPE_MAX_LENGTH": 30,
"CODE_EXPIRATION": 600
},
"cache": {
},
"rest": {
"site_url": "",
},
"core": {
"CACHE_PREFIX": "_dt",
"DEFAULT_CACHE_DURATION": 3600 * 24 * 14,
},
"microdata": {
"DATABASES": ["default"],
"SHARD_HELPING_DATABASE": "default",
"SLAVE_DATABASES": {}
},
"mongodb": {
"HOST": "localhost",
"PORT": 27017,
"USERNAME": None,
"PASSWORD": None,
},
"social": {
"master_oauth2_service_id": 1
}
}
try:
recursive_dict_update(_DJANGO_TOWN_SETTINGS, (getattr(settings, "DJANGO_TOWN_SETTINGS")))
except AttributeError:
pass
def lazy_load_settings(key, necessary_fields=None):
ret = DictObject(_DJANGO_TOWN_SETTINGS[key], case_sensitive=False)
if necessary_fields:
for necessary_field in necessary_fields:
if not getattr(ret, necessary_field):
raise SettingError("%s does not exist in %s setting" % (necessary_field, key))
return ret
CORE_SETTINGS = SimpleLazyObject(lambda: lazy_load_settings('core'))
OAUTH2_SETTINGS = SimpleLazyObject(lambda: lazy_load_settings('oauth2', necessary_fields=['ACCESS_TOKEN_SECRET_KEY',
"REFRESH_TOKEN_SECRET_KEY",
'CODE_SECRET_KEY', 'SCOPE',
'BASE_URL']))
REST_SETTINGS = SimpleLazyObject(lambda: lazy_load_settings('rest'))
CACHE_SETTINGS = SimpleLazyObject(lambda: lazy_load_settings('cache'))
MICRODATA_SETTINGS = SimpleLazyObject(lambda: lazy_load_settings('microdata'))
SOCIAL_SETTINGS = SimpleLazyObject(lambda: lazy_load_settings('social'))
MONGODB_SETTINGS = SimpleLazyObject(lambda: lazy_load_settings('mongodb', necessary_fields=['DATABASES'])) | {
"content_hash": "6db3ed975ca1c46938a28b71856a0082",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 117,
"avg_line_length": 35.529411764705884,
"alnum_prop": 0.5749172185430463,
"repo_name": "uptown/django-town",
"id": "3a8ccbe40bd0115bddc36988f6471a1640238d27",
"size": "2416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_town/core/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "382389"
}
],
"symlink_target": ""
} |
import time
import operator
import os
import pickle
import shutil
import random
import sys
import json
import numpy as np
from collections import OrderedDict
from collections import defaultdict
from datetime import datetime
from sklearn.cross_validation import train_test_split
# import our costum modules
import config as cfg
import util
import fitness_func as ff
import algor
# import deap and scoop
from deap import tools as deapTools
from deap import base as deapBase
from deap import creator as deapCreator
from deap import gp as deapGP
from deap import algorithms as deapAlgor
from scoop import futures as fu
# These vars are made global for performance when using paralel/distributed computing
param = dict()
#### Set random numbers
seed = random.randint(0,4294967295)
if (cfg.seed!=0):
seed = cfg.seed
random.seed(seed); np.random.seed(seed)
param['seed'] = seed
#### set D_tr
rawData = np.loadtxt(cfg.datasetPath, delimiter=',')
yRaw = rawData[:, 0] # the first column is the class label!
XRaw = rawData[:, 1:]
rawDataIdxDict = defaultdict(list)
for i in range(rawData.shape[0]):
rawDataIdxDict[ yRaw[i] ].append(i)
chosenIdxDict = defaultdict(list)
nChosenSample = 0
for classIdx,idxList in rawDataIdxDict.iteritems():
chosenIdxList = [i for i in idxList]
nSample = len(idxList)
if nSample > cfg.nMaxSampleEachClass:
del chosenIdxList[:]
chosenIdxIdxList = list(np.random.randint(nSample, size=cfg.nMaxSampleEachClass))
for i in chosenIdxIdxList:
chosenIdxList.append( idxList[i] )
chosenIdxDict[classIdx] = chosenIdxList
nChosenSample += len(chosenIdxList)
y = np.empty( (nChosenSample,1) )
X = np.empty( (nChosenSample,XRaw.shape[1]) )
fillingIdx = 0
for classIdx, chosenIdxList in chosenIdxDict.iteritems():
for i in chosenIdxList:
y[fillingIdx,:] = yRaw[i]
X[fillingIdx,:] = XRaw[i]
fillingIdx += 1
assert (y.shape[0]==X.shape[0]==nChosenSample)
# Split test, train
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=cfg.testSize,
random_state=random.randint(0,100000), stratify=y)
y_train = y_train.reshape( (y_train.shape[0],1) )
data = np.concatenate( (y_train,X_train), axis=1 )
dataDict = defaultdict(list)
for idx, datum in enumerate(data):
classIdx = int(datum[0]) # the first element _must_ be classIdx
dataDict[classIdx].append(idx) # contain only the idx
assert (0 in dataDict), 'idx does not begin at 0'
#### Set for fitness computation
recallPercentileRankDict = defaultdict(tuple) # will contain recallFitness values of individuals
simScoreMatDict = dict() # will contain simScore of individuals
#### init Deap GP
# Set Operators and Operands
# Note: Tanimoto: (a/(a+b+c)), Forbes: ?
nOperand = 4 # at most: a, b, c, d
primitiveSet = deapGP.PrimitiveSet("mainPrimitiveSet", nOperand)
primitiveSet.renameArguments(ARG0="a")
primitiveSet.renameArguments(ARG1="b")
primitiveSet.renameArguments(ARG2="c")
primitiveSet.renameArguments(ARG3="d")
primitiveSet.addPrimitive(np.add, arity=2, name="add")
primitiveSet.addPrimitive(np.subtract, arity=2, name="sub")
primitiveSet.addPrimitive(np.multiply, arity=2, name="mul")
primitiveSet.addPrimitive(util.protectedDiv, arity=2, name="pDiv")
# primitiveSet.addPrimitive(np.minimum, arity=2, name="min")
# primitiveSet.addPrimitive(np.maximum, arity=2, name="max")
# primitiveSet.addPrimitive(np.sqrt, arity=1, name="sqrt")
# primitiveSet.addPrimitive(util.pow, arity=1, name="pow")
# primitiveSet.addPrimitive(util.powhalf, arity=1, name="powhalf")
# primitiveSet.addPrimitive(np.log10, arity=1, name="log")
# primitiveSet.addEphemeralConstant("const", lambda: 0.5)
# Settting up the fitness and the individuals
deapCreator.create("Fitness", deapBase.Fitness, weights=(1.0,))
deapCreator.create("Individual", deapGP.PrimitiveTree,
fitness=deapCreator.Fitness, primitiveSet=primitiveSet)
# Setting up the operator of Genetic Programming
toolbox = deapBase.Toolbox()
toolbox.register("map", fu.map) # paralel and distributed computing
toolbox.register("expr", deapGP.genHalfAndHalf, # Half-full, halfGrow
pset=primitiveSet,
min_=cfg.treeMinDepth, # tree min depth
max_=cfg.treeMaxDepth) # tree min depth
toolbox.register("individual", deapTools.initIterate, # alternatives: initRepeat, initCycle
deapCreator.Individual,
toolbox.expr)
toolbox.register("population", deapTools.initRepeat,
list,
toolbox.individual)
toolbox.register("compile", deapGP.compile,
pset=primitiveSet)
toolbox.register("evaluate", ff.compute, data=data,
recallPercentileRankDict=recallPercentileRankDict, simScoreMatDict=simScoreMatDict)
toolbox.register("select", deapTools.selRoulette)# : selRandom, selBest, selWorst, selTournament, selDoubleTournament
toolbox.register("mate", deapGP.cxOnePoint)# :cxOnePointLeafBiased
toolbox.decorate("mate", deapGP.staticLimit(key=operator.attrgetter("height"), max_value=17))
toolbox.register("expr_mut", deapGP.genFull,
min_=cfg.subtreeMinDepthMut, # subtree min depth
max_=cfg.subtreeMaxDepthMut) # subtree min depth
toolbox.register("mutate", deapGP.mutUniform,
expr=toolbox.expr_mut,
pset=primitiveSet)
toolbox.decorate("mutate", deapGP.staticLimit(key=operator.attrgetter("height"), max_value=17))
toolbox.register("exprTanimoto", util.tanimoto,
pset=primitiveSet,
min_=cfg.treeMinDepth, max_=cfg.treeMaxDepth)
toolbox.register("individualTanimoto", deapTools.initIterate,
deapCreator.Individual,
toolbox.exprTanimoto)
toolbox.register("populationTanimoto", deapTools.initRepeat,
list,
toolbox.individualTanimoto)
def main():
xprmtDir = cfg.xprmtDir+"/"+"xprmt-"+cfg.xprmtTag+"."+time.strftime("%Y%m%d-%H%M%S")
param['xprmtRootDir'] = cfg.xprmtDir; param['xprmtDir'] = xprmtDir
os.makedirs(xprmtDir)
shutil.copy2('config.py', xprmtDir+'/config_used.txt')
os.makedirs(xprmtDir+'/data')
np.savetxt(xprmtDir+"/data/X_train.csv", X_train, delimiter=",")
np.savetxt(xprmtDir+"/data/y_train.csv", y_train, delimiter=",")
np.savetxt(xprmtDir+"/data/X_test.csv", X_test, delimiter=",")
np.savetxt(xprmtDir+"/data/y_test.csv", y_test, delimiter=",")
stats_fit = deapTools.Statistics(lambda ind: ind.fitness.values)
stats_size = deapTools.Statistics(len)
mstats = deapTools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean); mstats.register("std", np.std)
mstats.register("min", np.min); mstats.register("max", np.max)
# Init
pop = toolbox.population(n=cfg.nIndividual)
hof = deapTools.HallOfFame(cfg.nHOF,similar=util.equalIndividual) # from all generations over the whole evolution
nTanimotoIndividual = int(cfg.nTanimotoIndividualInPercentage/100.0*cfg.nIndividual)
tanimotoPop = toolbox.populationTanimoto(n=nTanimotoIndividual)
individualIdxToReplaceList = np.random.randint(cfg.nIndividual, size=nTanimotoIndividual)
for i,ind in enumerate(tanimotoPop):
idxToReplace = individualIdxToReplaceList[i]
pop[idxToReplace] = ind
# evolution
print 'Evolution begins ...'
param['timeStartEvol'] = time.strftime("%Y%m%d-%H:%M:%S")
with open(xprmtDir+"/log2.json", 'wb') as f:
json.dump(param, f, indent=2, sort_keys=True)
evolStartTime = time.time()
pop, log = algor.eaSimple(pop, toolbox, cxpb=cfg.pCx, mutpb=cfg.pMut, ngen=cfg.nMaxGen,
data=data, dataDict=dataDict,
recallPercentileRankDict=recallPercentileRankDict, simScoreMatDict=simScoreMatDict,
xprmtDir=xprmtDir, stats=mstats, halloffame=hof, verbose=True)
evolTime = time.time()-evolStartTime
param['evolTime'] = evolTime
param['timeEndEvol'] = time.strftime("%Y%m%d-%H:%M:%S")
print("Evolution took %.3f minutes" % (float(evolTime)/60.0))
# post evolution
param['nGen'] = len(log.select("gen"))
with open(xprmtDir+"/log.txt", "wb") as f:
f.write(str(log))
with open(xprmtDir+"/log2.json", 'wb') as f:
json.dump(param, f, indent=2, sort_keys=True)
if __name__ == "__main__":
main()
| {
"content_hash": "f1dfc3db2a1b9b58681f39d38bb48dd0",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 117,
"avg_line_length": 39.318385650224215,
"alnum_prop": 0.6730155109489051,
"repo_name": "tttor/csipb-jamu-prj",
"id": "8a25de7f123d9645989cbf6f6dc5347a7a4137f9",
"size": "8785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "predictor/connectivity/similarity/compound-kernel/genetic-programming/src/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25778"
},
{
"name": "C++",
"bytes": "63824"
},
{
"name": "CSS",
"bytes": "156653"
},
{
"name": "HTML",
"bytes": "357759"
},
{
"name": "JavaScript",
"bytes": "2345206"
},
{
"name": "LOLCODE",
"bytes": "293"
},
{
"name": "Makefile",
"bytes": "732"
},
{
"name": "PHP",
"bytes": "749826"
},
{
"name": "Python",
"bytes": "299608"
},
{
"name": "R",
"bytes": "2974"
},
{
"name": "Shell",
"bytes": "16644"
},
{
"name": "TeX",
"bytes": "146439"
},
{
"name": "TypeScript",
"bytes": "195860"
}
],
"symlink_target": ""
} |
import alsaaudio, time, audioop
card_info={}
for device_number, card_name in enumerate (alsaaudio.cards()):
card_info[card_name] = "hw:%s,0" % device_number
inp=alsaaudio.PCM(alsaaudio.PCM_CAPTURE,alsaaudio.PCM_NONBLOCK,card_info["Device"]) #Selektuj USB zvucnu karticu
inp.setchannels(1) #Mono
inp.setrate(8000) #8000Hz
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE) #16bit
inp.setperiodsize(160)
while True:
l,data=inp.read()
if l:
print audioop.max(data, 2)
break | {
"content_hash": "a9188598240a4c9f4e28f9f2fe3aa3af",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 112,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.7204081632653061,
"repo_name": "aleksailic/smart-alarm-client",
"id": "426c5917d083becdbb436b25c4701806d5ff1e67",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getdB.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "7646"
},
{
"name": "Python",
"bytes": "509"
}
],
"symlink_target": ""
} |
def quickSort(alist):
quickSortHelper(alist,0,len(alist)-1)
def quickSortHelper(alist,first,last):
if first<last:
splitpoint = partition(alist,first,last)
quickSortHelper(alist,first,splitpoint-1)
quickSortHelper(alist,splitpoint+1,last)
def partition(alist,first,last):
pivotvalue = alist[first]
leftmark = first+1
rightmark = last
done = False
while not done:
while leftmark <= rightmark and \
alist[leftmark] <= pivotvalue:
leftmark = leftmark + 1
while alist[rightmark] >= pivotvalue and \
rightmark >= leftmark:
rightmark = rightmark -1
if rightmark < leftmark:
done = True
else:
temp = alist[leftmark]
alist[leftmark] = alist[rightmark]
alist[rightmark] = temp
temp = alist[first]
alist[first] = alist[rightmark]
alist[rightmark] = temp
return rightmark
| {
"content_hash": "161c0b4211091c62ab1864156a93221e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 49,
"avg_line_length": 22.642857142857142,
"alnum_prop": 0.6151419558359621,
"repo_name": "robin1885/algorithms-exercises-using-python",
"id": "35f45f1376bdd40e1df397237a17a9c1298589a8",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source-code-from-author-book/Listings-for-Second-Edition/listing_5_15.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182896"
}
],
"symlink_target": ""
} |
import pytest
from pytest_embedded import Dut
@pytest.mark.supported_targets
@pytest.mark.generic
def test_newlib(dut: Dut) -> None:
dut.expect_unity_test_output()
| {
"content_hash": "f0b66ce5c51ca7d506c5fa2b737f40ee",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 34,
"avg_line_length": 21.25,
"alnum_prop": 0.7588235294117647,
"repo_name": "espressif/esp-idf",
"id": "81a2f579b4ccde07970a45c5471d3c5878e1467d",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/newlib/test_apps/pytest_newlib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "388440"
},
{
"name": "Batchfile",
"bytes": "5451"
},
{
"name": "C",
"bytes": "69102322"
},
{
"name": "C++",
"bytes": "992772"
},
{
"name": "CMake",
"bytes": "539972"
},
{
"name": "Dockerfile",
"bytes": "3290"
},
{
"name": "Makefile",
"bytes": "23747"
},
{
"name": "Nim",
"bytes": "1005"
},
{
"name": "PowerShell",
"bytes": "4537"
},
{
"name": "Python",
"bytes": "2158180"
},
{
"name": "Roff",
"bytes": "101"
},
{
"name": "Shell",
"bytes": "126143"
}
],
"symlink_target": ""
} |
"""social
Revision ID: 352e12e0f114
Revises: 3f69a0a6351f
Create Date: 2017-05-22 16:08:02.658574
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '352e12e0f114'
down_revision = '3f69a0a6351f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"content_hash": "4647db8a8674ea8a6a304d29f38e120a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 65,
"avg_line_length": 18.862068965517242,
"alnum_prop": 0.6782449725776966,
"repo_name": "exleym/IWBT",
"id": "a05a50692c227bcb3596a8b49c95ce5e1a05c059",
"size": "547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/352e12e0f114_social.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1118"
},
{
"name": "HTML",
"bytes": "25601"
},
{
"name": "JavaScript",
"bytes": "528"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "72714"
},
{
"name": "Shell",
"bytes": "2018"
}
],
"symlink_target": ""
} |
from libmt94x.remittance_info import AbstractRemittanceInfo
from libmt94x.transfer_failed_codes import TransferFailed
class InfoToAcccountOwnerSubField(object):
'''Abstract base class for all subfields of InformationToAcccountOwner'''
pass
class BeneficiaryParty(InfoToAcccountOwnerSubField):
tag = 'BENM'
def __init__(self, account_number=None, bic=None, name=None, city=None):
self.account_number = account_number
self.bic = bic
self.name = name
self.city = city
class BusinessPurpose(InfoToAcccountOwnerSubField):
tag = 'BUSP'
def __init__(self, id_code=None, sepa_transaction_type=None):
self.id_code = id_code
self.sepa_transaction_type = sepa_transaction_type
class Charges(InfoToAcccountOwnerSubField):
tag = 'CHGS'
def __init__(self, charges):
self.charges = charges
class ClientReference(InfoToAcccountOwnerSubField):
tag = 'CREF'
def __init__(self, client_reference):
self.client_reference = client_reference
class CounterPartyID(InfoToAcccountOwnerSubField):
'''NL term: Tegenpartij ID'''
tag = 'CNTP'
def __init__(self, account_number=None, bic=None, name=None, city=None):
self.account_number = account_number
self.bic = bic
self.name = name
self.city = city
class CounterPartyIdentification(InfoToAcccountOwnerSubField):
tag = 'ID'
def __init__(self, id_code):
self.id_code = id_code
class CreditorID(InfoToAcccountOwnerSubField):
'''NL term: Incassant ID'''
tag = 'CSID'
def __init__(self, creditor_id):
self.creditor_id = creditor_id
class EndToEndReference(InfoToAcccountOwnerSubField):
'''NL term: Uniek kenmerk'''
tag = 'EREF'
def __init__(self, end_to_end_reference):
self.end_to_end_reference = end_to_end_reference
class ExchangeRate(InfoToAcccountOwnerSubField):
tag = 'EXCH'
def __init__(self, exchange_rate):
self.exchange_rate = exchange_rate
class InstructionID(InfoToAcccountOwnerSubField):
tag = 'IREF'
def __init__(self, instruction_id):
self.instruction_id = instruction_id
class MandateReference(InfoToAcccountOwnerSubField):
'''NL term: Machtigingskenmerk'''
tag = 'MARF'
def __init__(self, mandate_reference):
self.mandate_reference = mandate_reference
class OrderingParty(InfoToAcccountOwnerSubField):
tag = 'ORDP'
def __init__(self, account_number=None, bic=None, name=None, city=None):
self.account_number = account_number
self.bic = bic
self.name = name
self.city = city
class PaymentInformationID(InfoToAcccountOwnerSubField):
'''NL term: Batch ID'''
tag = 'PREF'
def __init__(self, payment_information_id):
self.payment_information_id = payment_information_id
class PurposeCode(InfoToAcccountOwnerSubField):
'''NL term: Speciale verwerkingscode'''
tag = 'PURP'
def __init__(self, purpose_of_collection):
self.purpose_of_collection = purpose_of_collection
class RemittanceInformation(InfoToAcccountOwnerSubField):
'''NL term: Omschrijvingsregels'''
tag = 'REMI'
def __init__(self, remittance_info, code=None, issuer=None):
if not isinstance(remittance_info, AbstractRemittanceInfo):
raise ValueError(
"Value for `remittance_info` must be instance of AbstractRemittanceInfo")
self.remittance_info = remittance_info
# TODO: Are these two even used??? They are in the spec but do not
# appear in examples
self.code = code
self.issuer = issuer
class ReturnReason(InfoToAcccountOwnerSubField):
'''NL term: Uitval reden'''
tag = 'RTRN'
def __init__(self, reason_code):
'''NOTE: The ING IBP spec also mentions a legacy R-Type integer
parameter which has the following possible values:
1 - Reject (geweigerde)
2 - Return (retourbetaling)
3 - Refund (terugbetaling)
4 - Reversal (herroeping)
5 - Cancellation (annulering)
The R-Type is concatenated to the `reason_code`. We do not implement the R-Type,
we just mention it here for reference.'''
transfer_failed = TransferFailed.get_instance()
if not transfer_failed.code_is_valid(reason_code):
raise ValueError("Value `reason_code` is invalid: %s" % reason_code)
self.reason_code = reason_code
class UltimateBeneficiary(InfoToAcccountOwnerSubField):
tag = 'ULTB'
def __init__(self, name):
self.name = name
class UltimateCreditor(InfoToAcccountOwnerSubField):
'''NL term: Uiteindelijke incassant'''
tag = 'ULTC'
def __init__(self, name=None, id=None):
self.name = name
self.id = id
class UltimateDebtor(InfoToAcccountOwnerSubField):
'''NL term: Uiteindelijke geincasseerde'''
tag = 'ULTD'
def __init__(self, name=None, id=None):
self.name = name
self.id = id
class InfoToAcccountOwnerSubFieldOrder(object):
# This is the order in which the fields must be written
fields = (
ReturnReason,
BusinessPurpose,
ClientReference,
EndToEndReference,
PaymentInformationID,
InstructionID,
MandateReference,
CreditorID,
CounterPartyID,
BeneficiaryParty,
OrderingParty,
RemittanceInformation,
CounterPartyIdentification,
PurposeCode,
UltimateBeneficiary,
UltimateCreditor,
UltimateDebtor,
ExchangeRate,
Charges,
)
@classmethod
def get_field_classes(cls):
return cls.fields
| {
"content_hash": "5ebf23adfae049dcbab2e9fafa5067f3",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 89,
"avg_line_length": 25.475555555555555,
"alnum_prop": 0.6540474528960223,
"repo_name": "gingerpayments/python-libmt94x",
"id": "da113e68c55654b622989d40022d8074525fec59",
"size": "5732",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "libmt94x/info_acct_owner_subfields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "142375"
}
],
"symlink_target": ""
} |
'''
NetConsole - file ``/etc/sysconfig/netconsole``
===============================================
This parser reads the ``/etc/sysconfig/netconsole`` file. It uses the
``SysconfigOptions`` parser class to convert the file into a dictionary of
options.
Sample data::
# This is the configuration file for the netconsole service. By starting
# this service you allow a remote syslog daemon to record console output
# from this system.
# The local port number that the netconsole module will use
LOCALPORT=6666
Examples:
>>> config = shared[NetConsole]
>>> 'LOCALPORT' in config.data
True
>>> 'DEV' in config # Direct access to options
False
'''
from .. import parser, SysconfigOptions, LegacyItemAccess
from insights.specs import netconsole
@parser(netconsole)
class NetConsole(SysconfigOptions, LegacyItemAccess):
'''
Contents of the ``/etc/sysconfig/netconsole`` file. Uses the
``SysconfigOptions`` shared parser class.
'''
pass
| {
"content_hash": "680a7ae6c4c275b00473c11e0eb255ff",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 77,
"avg_line_length": 25.743589743589745,
"alnum_prop": 0.6752988047808764,
"repo_name": "wcmitchell/insights-core",
"id": "37c23c07c12524396cad751cd12bab9f9bec26b9",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/parsers/netconsole.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "19339"
},
{
"name": "Jupyter Notebook",
"bytes": "91793"
},
{
"name": "Python",
"bytes": "3414025"
},
{
"name": "Shell",
"bytes": "2274"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from terp import views
urlpatterns = [
url(r'^$', views.HomeView.as_view(),name='home'),
url(r'^load/$', views.LoadStoryView.as_view(),name='load_story'),
url(r'^start/(?P<story_id>[0-9])/$', views.StartStoryView.as_view(),name='start_story'),
url(r'^restart/(?P<story_id>[0-9])/$', views.RestartStoryView.as_view(),name='restart_story'),
url(r'^play/(?P<session_id>[0-9])/$', views.PlaySessionView.as_view(),name='play'),
url(r'^play/(?P<session_id>[0-9])/initial/$', views.PlaySessionInitialView.as_view(),name='play_initial'),
url(r'^play/(?P<session_id>[0-9])/history/$', views.PlaySessionHistoryView.as_view(),name='play_history'),
url(r'^play/(?P<session_id>[0-9])/command/$', views.PlaySessionCommandView.as_view(),name='play_command'),
]
| {
"content_hash": "3d9ce0d279d0e97356c86a4e1fcfb28f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 110,
"avg_line_length": 59.07142857142857,
"alnum_prop": 0.6457073760580411,
"repo_name": "moosepod/moosezmachine",
"id": "bd1569372e0d9e6f9229c7b22f4b269ff7fe73c9",
"size": "827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_terp/terp/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4898"
},
{
"name": "Python",
"bytes": "352601"
}
],
"symlink_target": ""
} |
import logging
from .backends.cache import cache
LOG = logging.getLogger(__name__)
class BaseManager(object):
"""Simple manager util for storing data related to the group
store informations in free keys
name + self.scope + object.id = object
name + self.scope + count = count
name + self.scope + all = all[object]
"""
CACHE_KEY = '{0}.{1}'
CACHE_COUNT_KEY = '{0}.count'
CACHE_ALL_KEY = '{0}.all'
@property
def db(self):
"""return store backend
"""
return cache
def __init__(self, name=None, scope=None, *args, **kwargs):
self.name = name or self.name
self.scope = scope or self.scope
self.clear()
def get(self, id):
"""return user if exists
"""
return self.db.get(self.get_item_cache_key(id))
def clear(self):
"""clear all cache keys
"""
# this call freeze django start
# keys = [self.get_item_cache_key(item.id)
# for item in self.all()]
keys = [self.get_all_cache_key(),
self.get_cache_count_key()]
return self.db.delete_many(keys)
def count(self):
"""return count of users
"""
count = self.db.get(self.get_cache_count_key())
return count if count else 0
def all(self):
"""return list of connected users
"""
items = self.db.get(self.get_all_cache_key())
return items if items else []
def add(self, item):
"""Add new user to the group
"""
# we cant clear this after restart now
# saved_item = self.db.get(self.get_item_cache_key(item.id))
# if not saved_item:
self.db.set(self.get_item_cache_key(item.id), item, None)
# incerement count because standard cache does not support wildcard
count = self.db.get(self.get_cache_count_key())
if not count:
count = 1
else:
count += 1
self.db.set(self.get_cache_count_key(), count, None)
all = self.all()
all += [item]
self.db.set(self.get_all_cache_key(), all, None)
def delete(self, id):
"""delete user from cache
"""
# delete single object
self.db.delete(self.get_item_cache_key(id))
# update all
items = [item for item in self.all() if item.id != id]
self.db.set(self.get_all_cache_key(), items, None)
count = self.db.get(self.get_cache_count_key())
if count:
count -= 1
self.db.set(self.get_cache_count_key(), count, None)
def get_cache_key(self):
return self.CACHE_KEY.format(self.name, self.scope)
def get_cache_count_key(self):
return self.CACHE_COUNT_KEY.format(self.get_cache_key())
def get_item_cache_key(self, id):
"""return unique cache key
"""
return '%s.%s' % (self.get_cache_key(), id)
def get_all_cache_key(self):
"""return unique cache key
"""
return self.CACHE_ALL_KEY.format(self.get_cache_key())
_instance = None
def __new__(cls, *args, **kwargs):
"""A singleton implementation of Manager. There can be only one.
"""
if not cls._instance:
cls._instance = super(BaseManager, cls).__new__(
cls, *args, **kwargs)
return cls._instance
| {
"content_hash": "aa794087ae48078e93ecfeb938ddbf15",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 75,
"avg_line_length": 24.846715328467152,
"alnum_prop": 0.5543478260869565,
"repo_name": "leonardo-modules/leonardo-channels",
"id": "56c2839ea0edc823cbd1f096b51bf8c3d3a4d58e",
"size": "3405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leonardo_channels/managers/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "15064"
},
{
"name": "Python",
"bytes": "26351"
},
{
"name": "Shell",
"bytes": "102"
}
],
"symlink_target": ""
} |
import requests
import json
def yo_momma():
"""Returns a random yo momma joke (<str>)."""
data = requests.get("http://api.yomomma.info/").text
return json.loads(data)["joke"]
| {
"content_hash": "2403f3b587179a286aa3d5eb2dee2e6d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 23.625,
"alnum_prop": 0.6455026455026455,
"repo_name": "neelkamath/hack.chat-bot",
"id": "9f85336f24716aaa3196ddac1f58a1581dc9d68a",
"size": "213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/commands/jokes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40774"
}
],
"symlink_target": ""
} |
'''test_cpython.py - test the JNI CPython class
python-javabridge is licensed under the BSD license. See the
accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2013 Broad Institute
All rights reserved.
'''
import unittest
import javabridge
class TestCPython(unittest.TestCase):
def setUp(self):
self.cpython = javabridge.JClassWrapper(
"org.cellprofiler.javabridge.CPython")()
def test_01_01_exec(self):
self.cpython.execute("pass")
def test_01_02_locals(self):
jlocals = javabridge.JClassWrapper('java.util.HashMap')()
jref = javabridge.JClassWrapper('java.util.ArrayList')()
jlocals.put("numerator", "6")
jlocals.put("denominator", "2")
code = """
global javabridge
import javabridge
def fn(numerator, denominator, answer):
result = int(numerator) / int(denominator)
javabridge.call(answer, "add", "(Ljava/lang/Object;)Z", str(result))
fn(numerator, denominator, answer)
"""
jlocals.put("code", code)
jlocals.put("answer", jref.o)
self.cpython.execute(code, jlocals.o, None)
self.assertEqual(float(javabridge.to_string(jref.get(0))), 3)
def test_01_03_globals(self):
jglobals = javabridge.JClassWrapper('java.util.HashMap')()
jref = javabridge.JClassWrapper('java.util.ArrayList')()
jglobals.put("numerator", "6")
jglobals.put("denominator", "2")
jglobals.put("answer", jref.o)
self.cpython.execute("""
global javabridge
import javabridge
def fn():
result = int(numerator) / int(denominator)
javabridge.call(answer, "add", "(Ljava/lang/Object;)Z", str(result))
fn()
""", None, jglobals.o)
self.assertEqual(float(javabridge.to_string(jref.get(0))), 3)
def test_01_04_globals_equals_locals(self):
jglobals = javabridge.JClassWrapper('java.util.HashMap')()
jref = javabridge.JClassWrapper('java.util.ArrayList')()
jglobals.put("numerator", "6")
jglobals.put("denominator", "2")
jglobals.put("answer", jref.o)
#
# The import will be added to "locals", but that will be the globals.
#
self.cpython.execute("""
import javabridge
def fn():
result = int(numerator) / int(denominator)
javabridge.call(answer, "add", "(Ljava/lang/Object;)Z", str(result))
fn()
""", jglobals.o, jglobals.o)
self.assertEqual(float(javabridge.to_string(jref.get(0))), 3) | {
"content_hash": "ce603a4b7bb1b17b45ce4d33cfe9d438",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 34.94444444444444,
"alnum_prop": 0.6554054054054054,
"repo_name": "CellProfiler/python-javabridge",
"id": "86601843b4723149a4ed9657690287e03f7b0655",
"size": "2516",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "javabridge/tests/test_cpython.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "27337"
},
{
"name": "Java",
"bytes": "15113"
},
{
"name": "Python",
"bytes": "1850110"
},
{
"name": "Shell",
"bytes": "1213"
}
],
"symlink_target": ""
} |
"""
Django settings for dashboard project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kxmt78byexqz$9m!9o3f7!h3d$lz@2fe9-+te7!=0rfwm2afcb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'stats.apps.StatsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dashboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'dashboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'USER' : 'docker',
'PASSWORD' : 'docker',
'HOST' : 'localhost',
'NAME' : 'pulsar_dashboard',
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'USER' : 'root',
# 'HOST' : 'localhost',
# 'NAME' : 'pulsar_dashboard',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'INFO',
},
}
}
| {
"content_hash": "0d273b9661c1d6ca8553533296fefa04",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 91,
"avg_line_length": 24.932098765432098,
"alnum_prop": 0.6377816291161178,
"repo_name": "agarman/pulsar",
"id": "05eb47df899f5ad0e993cfefc4c08e1693f834ca",
"size": "4618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/django/dashboard/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1259"
},
{
"name": "C++",
"bytes": "17844"
},
{
"name": "Java",
"bytes": "4280272"
},
{
"name": "Protocol Buffer",
"bytes": "9515"
},
{
"name": "Shell",
"bytes": "35298"
}
],
"symlink_target": ""
} |
import logging
from typing import Optional
from google.api_core import exceptions
from google.auth import credentials as auth_credentials
from google.cloud.aiplatform import base, initializer
from google.cloud.aiplatform import compat
from google.cloud.aiplatform import utils
from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store
class _MetadataStore(base.VertexAiResourceNounWithFutureManager):
"""Managed MetadataStore resource for Vertex AI"""
client_class = utils.MetadataClientWithOverride
_is_client_prediction_client = False
_resource_noun = "metadataStores"
_getter_method = "get_metadata_store"
_delete_method = "delete_metadata_store"
def __init__(
self,
metadata_store_name: Optional[str] = "default",
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
):
"""Retrieves an existing MetadataStore given a MetadataStore name or ID.
Args:
metadata_store_name (str):
Optional. A fully-qualified MetadataStore resource name or metadataStore ID.
Example: "projects/123/locations/us-central1/metadataStores/my-store" or
"my-store" when project and location are initialized or passed.
If not set, metadata_store_name will be set to "default".
project (str):
Optional project to retrieve resource from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional location to retrieve resource from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
"""
super().__init__(
project=project, location=location, credentials=credentials,
)
self._gca_resource = self._get_gca_resource(resource_name=metadata_store_name)
@classmethod
def get_or_create(
cls,
metadata_store_id: str = "default",
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
encryption_spec_key_name: Optional[str] = None,
) -> "_MetadataStore":
""""Retrieves or Creates (if it does not exist) a Metadata Store.
Args:
metadata_store_id (str):
The <metadatastore> portion of the resource name with the format:
projects/123/locations/us-central1/metadataStores/<metadatastore>
If not provided, the MetadataStore's ID will be set to "default" to create a default MetadataStore.
project (str):
Project used to retrieve or create the metadata store. Overrides project set in
aiplatform.init.
location (str):
Location used to retrieve or create the metadata store. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Custom credentials used to retrieve or create the metadata store. Overrides
credentials set in aiplatform.init.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the metadata store. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this MetadataStore and all sub-resources of this MetadataStore will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
Returns:
metadata_store (_MetadataStore):
Instantiated representation of the managed metadata store resource.
"""
store = cls._get(
metadata_store_name=metadata_store_id,
project=project,
location=location,
credentials=credentials,
)
if not store:
store = cls._create(
metadata_store_id=metadata_store_id,
project=project,
location=location,
credentials=credentials,
encryption_spec_key_name=encryption_spec_key_name,
)
return store
@classmethod
def _create(
cls,
metadata_store_id: str = "default",
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
encryption_spec_key_name: Optional[str] = None,
) -> "_MetadataStore":
"""Creates a new MetadataStore if it does not exist.
Args:
metadata_store_id (str):
The <metadatastore> portion of the resource name with
the format:
projects/123/locations/us-central1/metadataStores/<metadatastore>
If not provided, the MetadataStore's ID will be set to "default" to create a default MetadataStore.
project (str):
Project used to create the metadata store. Overrides project set in
aiplatform.init.
location (str):
Location used to create the metadata store. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Custom credentials used to create the metadata store. Overrides
credentials set in aiplatform.init.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the metadata store. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this MetadataStore and all sub-resources of this MetadataStore will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
Returns:
metadata_store (_MetadataStore):
Instantiated representation of the managed metadata store resource.
"""
api_client = cls._instantiate_client(location=location, credentials=credentials)
gapic_metadata_store = gca_metadata_store.MetadataStore(
encryption_spec=initializer.global_config.get_encryption_spec(
encryption_spec_key_name=encryption_spec_key_name,
select_version=compat.V1BETA1,
)
)
try:
api_client.create_metadata_store(
parent=initializer.global_config.common_location_path(
project=project, location=location
),
metadata_store=gapic_metadata_store,
metadata_store_id=metadata_store_id,
).result()
except exceptions.AlreadyExists:
logging.info(f"MetadataStore '{metadata_store_id}' already exists")
return cls(
metadata_store_name=metadata_store_id,
project=project,
location=location,
credentials=credentials,
)
@classmethod
def _get(
cls,
metadata_store_name: Optional[str] = "default",
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> Optional["_MetadataStore"]:
"""Returns a MetadataStore resource.
Args:
metadata_store_name (str):
Optional. A fully-qualified MetadataStore resource name or metadataStore ID.
Example: "projects/123/locations/us-central1/metadataStores/my-store" or
"my-store" when project and location are initialized or passed.
If not set, metadata_store_name will be set to "default".
project (str):
Optional project to retrieve the metadata store from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional location to retrieve the metadata store from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Custom credentials to retrieve this metadata store. Overrides
credentials set in aiplatform.init.
Returns:
metadata_store (Optional[_MetadataStore]):
An optional instantiated representation of the managed Metadata Store resource.
"""
try:
return cls(
metadata_store_name=metadata_store_name,
project=project,
location=location,
credentials=credentials,
)
except exceptions.NotFound:
logging.info(f"MetadataStore {metadata_store_name} not found.")
| {
"content_hash": "f8a7295ef685945f92e00c776c50c54d",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 115,
"avg_line_length": 42.39013452914798,
"alnum_prop": 0.6120808209034169,
"repo_name": "sasha-gitg/python-aiplatform",
"id": "3327f47d1f304b37ef77b2dbee36bb16fd8e3320",
"size": "10055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google/cloud/aiplatform/metadata/metadata_store.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "11216304"
},
{
"name": "Shell",
"bytes": "30838"
}
],
"symlink_target": ""
} |
__author__ = 'Daniele Sartiano and Giuseppe Attardi'
"""
Class for dealing with NER data from CoNLL03 file.
"""
from ..reader import TaggerReader
import nlpnet.config as config
import sys
def toIOBES(sent):
"""Convert from IOB to IOBES notation."""
l = len(sent)
for i in range(l):
tok = sent[i]
if i+1 == l or sent[i+1][1][0] != 'I':
if tok[1][0] == 'B':
tok[1] = 'S'+tok[1][1:]
elif tok[1][0] == 'I':
tok[1] = 'E'+tok[1][1:]
return sent
def noGazetteer(words):
return [0] * len(words)
def create_extractor(dict):
def present(words):
# check presence in dictionary possibly as multiword
# set to 1 items corresponding to words present in dictionary
res = [0] * len(words)
for i, token in enumerate(words):
entity = token.lower()
if entity in dict:
res[i] = 1
for j in range(i+1, len(words)):
entity += ' ' + words[j].lower()
if entity in dict:
for k in range(i, j+1):
res[k] = 1
return res
return present
def gazetteer(file):
"""
:return a map of feature extractors form dictionary file, one for each class type.
A dictionary file consists of lines:
TYPE WORD[ WORD]*
"""
classes = {}
for line in open(file):
line = line.strip().decode('utf-8')
c, words = line.split(None, 1)
words = words.lower()
if c not in classes:
classes[c] = set()
classes[c].add(words)
extractors = {}
for c,dict in classes.iteritems():
extractors[c] = create_extractor(dict)
return extractors
class NerReader(TaggerReader):
"""
This class reads data from a CoNLL03 corpus and turns it into a format
readable by the neural network for the NER tagging task.
"""
def __init__(self, md=None, sentences=None, filename=None, load_dictionaries=True, variant=None):
"""
:param sentences: a sequence of tagged sentences. Each sentence must
be a sequence of (token, tag) tuples. If None, the sentences are
read from the default location.
"""
self.rare_tag = None
self.tag_dict = {} # tag IDs
self.task = 'ner'
self.variant = variant
if sentences:
self.sentences = sentences
else:
self.sentences = []
if filename:
with open(filename, 'rb') as f:
sentence = []
for line in f:
line = line.strip()
if line:
form, pos, iob = unicode(line, 'utf-8').split()
sentence.append([form, iob])
else:
sentence = toIOBES(sentence)
self.sentences.append(sentence)
sentence = []
# sets word_dict and tags_dict
super(NerReader, self).__init__(md, load_dictionaries)
def create_converter(self):
"""
Sets up the token converter, which is responsible for transforming tokens into their
feature vector indices
"""
super(NerReader, self).create_converter()
inGazetteer = gazetteer(self.md.paths[self.md.gazetteer])
for c in self.md.gaz_classes:
if c in inGazetteer:
self.converter.add_extractor(inGazetteer[c])
else:
self.converter.add_extractor(noGazetteer)
class NerTagReader(NerReader):
"""
This class reads data from a CoNLL03 corpus and turns it into a format
readable by the neural network for the NER tagging task.
"""
def __init__(self, md=None):
"""
Read sentences from stdin.
"""
self.task = 'ner'
self._set_metadata(md)
# loads word_dict and tags_dict
self.load_dictionary()
self.load_tag_dict()
self.sentences = []
sent = []
for line in sys.stdin:
line = line.decode('utf-8').strip()
if line:
(form, pos, tag) = line.split(None, 2)
sent.append([form, pos])
else:
self.sentences.append(sent)
sent = []
def toIOB(self, tags):
"""Convert from IOBES to IOB notation."""
for i in range(len(tags)):
tag = tags[i]
if tag[0] == 'S':
tags[i] = 'B'+tag[1:]
elif tag[0] == 'E':
tags[i] = 'I'+tag[1:]
return tags
| {
"content_hash": "54551b97efb47811a8ce738d82dd2f1d",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 101,
"avg_line_length": 31.205298013245034,
"alnum_prop": 0.5171901528013583,
"repo_name": "attardi/nlpnet",
"id": "a486cb5fd5ed90e00523a595f372dae2d56d5bf9",
"size": "4737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nlpnet/ner/ner_reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "139"
},
{
"name": "Python",
"bytes": "288636"
}
],
"symlink_target": ""
} |
from unittest import mock
import hyperframe
import pytest
from mitmproxy import exceptions
from mitmproxy.net import http, tcp
from mitmproxy.net.http import http2
from pathod.protocols.http2 import HTTP2StateProtocol, TCPHandler
from ...mitmproxy.net import tservers as net_tservers
class TestTCPHandlerWrapper:
def test_wrapped(self):
h = TCPHandler(rfile='foo', wfile='bar')
p = HTTP2StateProtocol(h)
assert p.tcp_handler.rfile == 'foo'
assert p.tcp_handler.wfile == 'bar'
def test_direct(self):
p = HTTP2StateProtocol(rfile='foo', wfile='bar')
assert isinstance(p.tcp_handler, TCPHandler)
assert p.tcp_handler.rfile == 'foo'
assert p.tcp_handler.wfile == 'bar'
class EchoHandler(tcp.BaseHandler):
sni = None
def handle(self):
while True:
v = self.rfile.safe_read(1)
self.wfile.write(v)
self.wfile.flush()
class TestProtocol:
@mock.patch("pathod.protocols.http2.HTTP2StateProtocol.perform_server_connection_preface")
@mock.patch("pathod.protocols.http2.HTTP2StateProtocol.perform_client_connection_preface")
def test_perform_connection_preface(self, mock_client_method, mock_server_method):
protocol = HTTP2StateProtocol(is_server=False)
protocol.connection_preface_performed = True
protocol.perform_connection_preface()
assert not mock_client_method.called
assert not mock_server_method.called
protocol.perform_connection_preface(force=True)
assert mock_client_method.called
assert not mock_server_method.called
@mock.patch("pathod.protocols.http2.HTTP2StateProtocol.perform_server_connection_preface")
@mock.patch("pathod.protocols.http2.HTTP2StateProtocol.perform_client_connection_preface")
def test_perform_connection_preface_server(self, mock_client_method, mock_server_method):
protocol = HTTP2StateProtocol(is_server=True)
protocol.connection_preface_performed = True
protocol.perform_connection_preface()
assert not mock_client_method.called
assert not mock_server_method.called
protocol.perform_connection_preface(force=True)
assert not mock_client_method.called
assert mock_server_method.called
class TestCheckALPNMatch(net_tservers.ServerTestBase):
handler = EchoHandler
ssl = dict(
alpn_select=b'h2',
)
def test_check_alpn(self):
c = tcp.TCPClient(("127.0.0.1", self.port))
with c.connect():
c.convert_to_tls(alpn_protos=[b'h2'])
protocol = HTTP2StateProtocol(c)
assert protocol.check_alpn()
class TestCheckALPNMismatch(net_tservers.ServerTestBase):
handler = EchoHandler
ssl = dict(
alpn_select=None,
)
def test_check_alpn(self):
c = tcp.TCPClient(("127.0.0.1", self.port))
with c.connect():
c.convert_to_tls(alpn_protos=[b'h2'])
protocol = HTTP2StateProtocol(c)
with pytest.raises(NotImplementedError):
protocol.check_alpn()
class TestPerformServerConnectionPreface(net_tservers.ServerTestBase):
class handler(tcp.BaseHandler):
def handle(self):
# send magic
self.wfile.write(bytes.fromhex("505249202a20485454502f322e300d0a0d0a534d0d0a0d0a"))
self.wfile.flush()
# send empty settings frame
self.wfile.write(bytes.fromhex("000000040000000000"))
self.wfile.flush()
# check empty settings frame
raw = http2.read_raw_frame(self.rfile)
assert raw == bytes.fromhex("00000c040000000000000200000000000300000001")
# check settings acknowledgement
raw = http2.read_raw_frame(self.rfile)
assert raw == bytes.fromhex("000000040100000000")
# send settings acknowledgement
self.wfile.write(bytes.fromhex("000000040100000000"))
self.wfile.flush()
def test_perform_server_connection_preface(self):
c = tcp.TCPClient(("127.0.0.1", self.port))
with c.connect():
protocol = HTTP2StateProtocol(c)
assert not protocol.connection_preface_performed
protocol.perform_server_connection_preface()
assert protocol.connection_preface_performed
with pytest.raises(exceptions.TcpDisconnect):
protocol.perform_server_connection_preface(force=True)
class TestPerformClientConnectionPreface(net_tservers.ServerTestBase):
class handler(tcp.BaseHandler):
def handle(self):
# check magic
assert self.rfile.read(24) == HTTP2StateProtocol.CLIENT_CONNECTION_PREFACE
# check empty settings frame
assert self.rfile.read(9) ==\
bytes.fromhex("000000040000000000")
# send empty settings frame
self.wfile.write(bytes.fromhex("000000040000000000"))
self.wfile.flush()
# check settings acknowledgement
assert self.rfile.read(9) == \
bytes.fromhex("000000040100000000")
# send settings acknowledgement
self.wfile.write(bytes.fromhex("000000040100000000"))
self.wfile.flush()
def test_perform_client_connection_preface(self):
c = tcp.TCPClient(("127.0.0.1", self.port))
with c.connect():
protocol = HTTP2StateProtocol(c)
assert not protocol.connection_preface_performed
protocol.perform_client_connection_preface()
assert protocol.connection_preface_performed
class TestClientStreamIds:
c = tcp.TCPClient(("127.0.0.1", 0))
protocol = HTTP2StateProtocol(c)
def test_client_stream_ids(self):
assert self.protocol.current_stream_id is None
assert self.protocol._next_stream_id() == 1
assert self.protocol.current_stream_id == 1
assert self.protocol._next_stream_id() == 3
assert self.protocol.current_stream_id == 3
assert self.protocol._next_stream_id() == 5
assert self.protocol.current_stream_id == 5
class TestserverstreamIds:
c = tcp.TCPClient(("127.0.0.1", 0))
protocol = HTTP2StateProtocol(c, is_server=True)
def test_server_stream_ids(self):
assert self.protocol.current_stream_id is None
assert self.protocol._next_stream_id() == 2
assert self.protocol.current_stream_id == 2
assert self.protocol._next_stream_id() == 4
assert self.protocol.current_stream_id == 4
assert self.protocol._next_stream_id() == 6
assert self.protocol.current_stream_id == 6
class TestApplySettings(net_tservers.ServerTestBase):
class handler(tcp.BaseHandler):
def handle(self):
# check settings acknowledgement
assert self.rfile.read(9) == bytes.fromhex("000000040100000000")
self.wfile.write(b"OK")
self.wfile.flush()
self.rfile.safe_read(9) # just to keep the connection alive a bit longer
ssl = True
def test_apply_settings(self):
c = tcp.TCPClient(("127.0.0.1", self.port))
with c.connect():
c.convert_to_tls()
protocol = HTTP2StateProtocol(c)
protocol._apply_settings({
hyperframe.frame.SettingsFrame.ENABLE_PUSH: 'foo',
hyperframe.frame.SettingsFrame.MAX_CONCURRENT_STREAMS: 'bar',
hyperframe.frame.SettingsFrame.INITIAL_WINDOW_SIZE: 'deadbeef',
})
assert c.rfile.safe_read(2) == b"OK"
assert protocol.http2_settings[
hyperframe.frame.SettingsFrame.ENABLE_PUSH] == 'foo'
assert protocol.http2_settings[
hyperframe.frame.SettingsFrame.MAX_CONCURRENT_STREAMS] == 'bar'
assert protocol.http2_settings[
hyperframe.frame.SettingsFrame.INITIAL_WINDOW_SIZE] == 'deadbeef'
class TestCreateHeaders:
c = tcp.TCPClient(("127.0.0.1", 0))
def test_create_headers(self):
headers = http.Headers([
(b':method', b'GET'),
(b':path', b'index.html'),
(b':scheme', b'https'),
(b'foo', b'bar')])
data = HTTP2StateProtocol(self.c)._create_headers(
headers, 1, end_stream=True)
assert b''.join(data) == bytes.fromhex("000014010500000001824488355217caf3a69a3f87408294e7838c767f")
data = HTTP2StateProtocol(self.c)._create_headers(
headers, 1, end_stream=False)
assert b''.join(data) == bytes.fromhex("000014010400000001824488355217caf3a69a3f87408294e7838c767f")
def test_create_headers_multiple_frames(self):
headers = http.Headers([
(b':method', b'GET'),
(b':path', b'/'),
(b':scheme', b'https'),
(b'foo', b'bar'),
(b'server', b'version')])
protocol = HTTP2StateProtocol(self.c)
protocol.http2_settings[hyperframe.frame.SettingsFrame.MAX_FRAME_SIZE] = 8
data = protocol._create_headers(headers, 1, end_stream=True)
assert len(data) == 3
assert data[0] == bytes.fromhex("000008010100000001828487408294e783")
assert data[1] == bytes.fromhex("0000080900000000018c767f7685ee5b10")
assert data[2] == bytes.fromhex("00000209040000000163d5")
class TestCreateBody:
c = tcp.TCPClient(("127.0.0.1", 0))
def test_create_body_empty(self):
protocol = HTTP2StateProtocol(self.c)
bytes = protocol._create_body(b'', 1)
assert b''.join(bytes) == b''
def test_create_body_single_frame(self):
protocol = HTTP2StateProtocol(self.c)
data = protocol._create_body(b'foobar', 1)
assert b''.join(data) == bytes.fromhex("000006000100000001666f6f626172")
def test_create_body_multiple_frames(self):
protocol = HTTP2StateProtocol(self.c)
protocol.http2_settings[hyperframe.frame.SettingsFrame.MAX_FRAME_SIZE] = 5
data = protocol._create_body(b'foobarmehm42', 1)
assert len(data) == 3
assert data[0] == bytes.fromhex("000005000000000001666f6f6261")
assert data[1] == bytes.fromhex("000005000000000001726d65686d")
assert data[2] == bytes.fromhex("0000020001000000013432")
class TestReadRequest(net_tservers.ServerTestBase):
class handler(tcp.BaseHandler):
def handle(self):
self.wfile.write(
bytes.fromhex("000003010400000001828487"))
self.wfile.write(
bytes.fromhex("000006000100000001666f6f626172"))
self.wfile.flush()
self.rfile.safe_read(9) # just to keep the connection alive a bit longer
ssl = True
def test_read_request(self):
c = tcp.TCPClient(("127.0.0.1", self.port))
with c.connect():
c.convert_to_tls()
protocol = HTTP2StateProtocol(c, is_server=True)
protocol.connection_preface_performed = True
req = protocol.read_request(NotImplemented)
assert req.stream_id
assert req.headers.fields == ()
assert req.method == "GET"
assert req.path == "/"
assert req.scheme == "https"
assert req.content == b'foobar'
class TestReadRequestRelative(net_tservers.ServerTestBase):
class handler(tcp.BaseHandler):
def handle(self):
self.wfile.write(
bytes.fromhex("00000c0105000000014287d5af7e4d5a777f4481f9"))
self.wfile.flush()
ssl = True
def test_asterisk_form(self):
c = tcp.TCPClient(("127.0.0.1", self.port))
with c.connect():
c.convert_to_tls()
protocol = HTTP2StateProtocol(c, is_server=True)
protocol.connection_preface_performed = True
req = protocol.read_request(NotImplemented)
assert req.first_line_format == "relative"
assert req.method == "OPTIONS"
assert req.path == "*"
class TestReadResponse(net_tservers.ServerTestBase):
class handler(tcp.BaseHandler):
def handle(self):
self.wfile.write(
bytes.fromhex("00000801040000002a88628594e78c767f"))
self.wfile.write(
bytes.fromhex("00000600010000002a666f6f626172"))
self.wfile.flush()
self.rfile.safe_read(9) # just to keep the connection alive a bit longer
ssl = True
def test_read_response(self):
c = tcp.TCPClient(("127.0.0.1", self.port))
with c.connect():
c.convert_to_tls()
protocol = HTTP2StateProtocol(c)
protocol.connection_preface_performed = True
resp = protocol.read_response(NotImplemented, stream_id=42)
assert resp.http_version == "HTTP/2.0"
assert resp.status_code == 200
assert resp.reason == ''
assert resp.headers.fields == ((b':status', b'200'), (b'etag', b'foobar'))
assert resp.content == b'foobar'
assert resp.timestamp_end
class TestReadEmptyResponse(net_tservers.ServerTestBase):
class handler(tcp.BaseHandler):
def handle(self):
self.wfile.write(
bytes.fromhex("00000801050000002a88628594e78c767f"))
self.wfile.flush()
ssl = True
def test_read_empty_response(self):
c = tcp.TCPClient(("127.0.0.1", self.port))
with c.connect():
c.convert_to_tls()
protocol = HTTP2StateProtocol(c)
protocol.connection_preface_performed = True
resp = protocol.read_response(NotImplemented, stream_id=42)
assert resp.stream_id == 42
assert resp.http_version == "HTTP/2.0"
assert resp.status_code == 200
assert resp.reason == ''
assert resp.headers.fields == ((b':status', b'200'), (b'etag', b'foobar'))
assert resp.content == b''
class TestAssembleRequest:
c = tcp.TCPClient(("127.0.0.1", 0))
def test_request_simple(self):
data = HTTP2StateProtocol(self.c).assemble_request(http.Request(
host="",
port=0,
method=b'GET',
scheme=b'https',
authority=b'',
path=b'/',
http_version=b"HTTP/2.0",
headers=(),
content=None,
trailers=None,
timestamp_start=0,
timestamp_end=0
))
assert len(data) == 1
assert data[0] == bytes.fromhex('00000d0105000000018284874188089d5c0b8170dc07')
def test_request_with_stream_id(self):
req = http.Request(
host="",
port=0,
method=b'GET',
scheme=b'https',
authority=b'',
path=b'/',
http_version=b"HTTP/2.0",
headers=(),
content=None,
trailers=None,
timestamp_start=0,
timestamp_end=0
)
req.stream_id = 0x42
data = HTTP2StateProtocol(self.c).assemble_request(req)
assert len(data) == 1
assert data[0] == bytes.fromhex('00000d0105000000428284874188089d5c0b8170dc07')
def test_request_with_body(self):
data = HTTP2StateProtocol(self.c).assemble_request(http.Request(
host="",
port=0,
method=b'GET',
scheme=b'https',
authority=b'',
path=b'/',
http_version=b"HTTP/2.0",
headers=http.Headers([(b'foo', b'bar')]),
content=b'foobar',
trailers=None,
timestamp_start=0,
timestamp_end=None,
))
assert len(data) == 2
assert data[0] == bytes.fromhex("0000150104000000018284874188089d5c0b8170dc07408294e7838c767f")
assert data[1] == bytes.fromhex("000006000100000001666f6f626172")
class TestAssembleResponse:
c = tcp.TCPClient(("127.0.0.1", 0))
def test_simple(self):
data = HTTP2StateProtocol(self.c, is_server=True).assemble_response(http.Response(
http_version=b"HTTP/2.0",
status_code=200,
reason=b"",
headers=(),
content=b"",
trailers=None,
timestamp_start=0,
timestamp_end=0,
))
assert len(data) == 1
assert data[0] == bytes.fromhex("00000101050000000288")
def test_with_stream_id(self):
resp = http.Response(
http_version=b"HTTP/2.0",
status_code=200,
reason=b"",
headers=(),
content=b"",
trailers=None,
timestamp_start=0,
timestamp_end=0,
)
resp.stream_id = 0x42
data = HTTP2StateProtocol(self.c, is_server=True).assemble_response(resp)
assert len(data) == 1
assert data[0] == bytes.fromhex("00000101050000004288")
def test_with_body(self):
data = HTTP2StateProtocol(self.c, is_server=True).assemble_response(http.Response(
http_version=b"HTTP/2.0",
status_code=200,
reason=b'',
headers=http.Headers(foo=b"bar"),
content=b'foobar',
trailers=None,
timestamp_start=0,
timestamp_end=0,
))
assert len(data) == 2
assert data[0] == bytes.fromhex("00000901040000000288408294e7838c767f")
assert data[1] == bytes.fromhex("000006000100000002666f6f626172")
| {
"content_hash": "674b480d8fab75ffc5cd84bbb6a1c357",
"timestamp": "",
"source": "github",
"line_count": 500,
"max_line_length": 108,
"avg_line_length": 35.07,
"alnum_prop": 0.6071285999429712,
"repo_name": "vhaupert/mitmproxy",
"id": "63a13c881c9e6a5abe0c53f39ab818e6f6031fb9",
"size": "17535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/pathod/protocols/test_http2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "186961"
},
{
"name": "HTML",
"bytes": "3034"
},
{
"name": "JavaScript",
"bytes": "2254958"
},
{
"name": "PowerShell",
"bytes": "362"
},
{
"name": "Python",
"bytes": "1312583"
},
{
"name": "Shell",
"bytes": "3726"
}
],
"symlink_target": ""
} |
import csv
import os
from app import create_app, get_db, assets
from flask_assets import Environment, ManageAssets
from flask_script import Manager
from pymongo import MongoClient
manager = Manager(create_app)
manager.add_command("assets", ManageAssets())
@manager.command
def fix_dates():
from datetime import datetime
cur = get_db().users.find({}, {"_id": 1, "registered_on": 1, "confirmed_on": 1, "interests": 1})
for c in cur:
if type(c["registered_on"]) == str:
print(f"original: {c['registered_on']}")
new_d = datetime.strptime(c["registered_on"], "%a, %d %b %Y %H:%M:%S %Z")
get_db().users.update_one({"_id": c["_id"]}, {"$set": {"registered_on": new_d}})
print(f"fixed: {new_d}")
if type(c["confirmed_on"]) == str:
print(f'original: {c["confirmed_on"]}')
new_d = datetime.strptime(c["confirmed_on"], "%a, %d %b %Y %H:%M:%S %Z")
get_db().users.update_one({"_id": c["_id"]}, {"$set": {"confirmed_on": new_d}})
print(f"fixed: {new_d}")
if c.get("interests"):
if c["interests"].get("school"):
domain = c["interests"].get("school")
if domain == "Autre":
print(f"unsetting for school Autre")
get_db().users.update_one({"_id": c["_id"]}, {"$unset": {"interests.domains": ""}})
else:
print(f"updating domains to [{domain}]")
get_db().users.update_one({"_id": c["_id"]}, {"$set": {"interests.domains": [domain]}})
print("Unsetting interests.school")
get_db().users.update_one({"_id": c["_id"]}, {"$unset": {"interests.school": ""}})
if c["interests"].get("location"):
location = c["interests"].get("location")
if location == "Autre":
print(f"unsetting for location Autre")
get_db().users.update_one({"_id": c["_id"]}, {"$unset": {"interests.locations": ""}})
else:
print(f"updating locations to [{location}]")
get_db().users.update_one({"_id": c["_id"]}, {"$set": {"interests.locations": [location]}})
print("Unsetting interests.location")
get_db().users.update_one({"_id": c["_id"]}, {"$unset": {"interests.location": ""}})
@manager.command
def update_companies():
path = os.path.join(os.path.dirname(__file__), "data/Entreprises2019.csv")
reader = csv.DictReader(open(path, "rt", encoding="utf8"), delimiter=";")
for row in reader:
get_db().companies.update_one(
{"id": row["id_entreprise"]}, {"$set": {"info": row}}
)
get_db().companies.update_one(
{"id": row["id_entreprise"]}, {"$unset": {"info.id_entreprise": 1}}
)
@manager.command
def import_companies():
path = os.path.join(os.path.dirname(__file__), "data/createEntreprises2019.csv")
reader = csv.DictReader(open(path, "rt", encoding="utf8"), delimiter=";")
for row in reader:
cur = get_db().companies.find({"id": row["id_entreprise"]})
if cur.count() == 0:
get_db().companies.insert_one(
{
"id": row["id_entreprise"],
"password": row["password"],
"name": row["name"],
"acompte": False,
"emplacement": "",
"size": float(row["size"]),
"duration": row["duration"],
"equiped": (row["equiped"] == "true"),
"pole": row["pole"],
"zone": row["zone"],
"equipement": False,
"restauration": False,
"badges": False,
"transport": False,
"programme": False,
"sections": {
"furnitures": {},
"catering": {"wed": {}, "thu": {}},
"events": {},
"persons": [],
"transports": [],
"profile": {"stand": {}, "facturation": {}},
},
}
)
else:
get_db().companies.update_one(
{"id": row["id_entreprise"]},
{
"$set": {
"password": row["password"],
"name": row["name"],
"acompte": False,
"emplacement": "",
"size": float(row["size"]),
"duration": row["duration"],
"equiped": (row["equiped"] == "true"),
"pole": row["pole"],
"zone": row["zone"],
"equipement": False,
"restauration": False,
"badges": False,
"transport": False,
"programme": False,
"sections": {
"furnitures": {},
"catering": {"wed": {}, "thu": {}},
"events": {},
"persons": [],
"transports": [],
"profile": {"stand": {}, "facturation": {}},
},
}
},
)
@manager.command
def import_coding_contest():
path = os.path.join(os.path.dirname(__file__), "data/coding_contest_results.csv")
reader = csv.DictReader(open(path, "rt", encoding="utf8"), delimiter=";")
for row in reader:
cur = get_db().users.find({"id": row["Username"]})
if cur.count() == 1:
get_db().users.update_one(
{"id": row["Username"]},
{
"$set": {
"events.coding_contest.ranking": int(row["Ranking"]),
"events.coding_contest.score": int(row["Global"]),
}
},
)
# set registred fra false for all users
@manager.command
def set_registered_false():
get_db().users.update_many(
{},
{
"$set": {
"events": {
"fra": {"registered": False},
"joi": {"registered": False},
"master_class": {"registered": False},
"coding_contest": {"registered": False},
}
}
},
)
# set scanfactor fields for companies
@manager.command
def update_scanfactor(update, action):
companies = list(get_db().companies.find({"id": {"$nin": ["admin", "test"]}}, {"sections.scanfactor": 1, "id": 1}))
for c in companies:
if c['sections']['scanfactor']:
print(c['id'], c['sections']['scanfactor'])
if update == 'false':
return
if update == 'true':
for c in companies:
if c['sections']['scanfactor'].get("scanfactor") and "Oui" in c['sections']['scanfactor']['scanfactor']:
if action == 'true':
get_db().companies.update_one({"id": c['id']}, {"$set": {"sections.scanfactor.registered": True}})
print("Setting scanfactor to True and registered ==", c['sections']['scanfactor'].get("registered"))
if c['sections']['scanfactor'].get("scanfactor") and "Non" in c['sections']['scanfactor']['scanfactor']:
if action == 'true':
get_db().companies.update_one({"id": c['id']}, {"$set": {"sections.scanfactor.registered": False}})
print("Setting scanfactor to False and registered ==", c['sections']['scanfactor'].get("registered"))
if action == 'true':
get_db().companies.update_one({"id": c['id']}, {"$unset": {"sections.scanfactor.scanfactor": ""}})
# Change admin password
@manager.command
def set_admin_password(password):
get_db().companies.update_one({"id": "admin"}, {"$set": {"password": password}})
# Resetting jobs
@manager.command
def reset_job_offers():
get_db().jobs.delete_many({})
# Resetting stream
@manager.command
def reset_stream():
get_db().stream.delete_many({})
# Cleaning stream
@manager.command
def cleanup_stream():
get_db().stream.delete_many({
"$or": [{'diff': '{}'}, {'section': {"$eq": None}}]
})
# Adding event
@manager.command
def update_event():
get_db().events.insert_one(
{
"image": "images/coding.jpg",
"id": "coding_contest",
"price": 500,
"description": "Coding Contest organisé par Forum Organisation pour les étudiants spécialisés en informatique.",
"name": "Coding Contest",
}
)
# set registred fra false for all users
@manager.command
def init_profiles():
get_db().users.update_many(
{},
{
"$set": {
"events": {
"fra": {"registered": False},
"joi": {"registered": False},
"master_class": {"registered": False},
"coding_contest": {"registered": False},
}
}
},
)
if __name__ == "__main__":
manager.run()
| {
"content_hash": "ff36ddd3808c7cf47f4df385f81983ed",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 124,
"avg_line_length": 37.596,
"alnum_prop": 0.4609000957548675,
"repo_name": "ForumOrganisation/forumorg",
"id": "57961dc004408bed6f3d73a1fe523d6b421b58ee",
"size": "9403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "570943"
},
{
"name": "Dockerfile",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "645487"
},
{
"name": "JavaScript",
"bytes": "310986"
},
{
"name": "Python",
"bytes": "91339"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
import socket
import time
import greenlet
from functools import wraps
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.concurrent import Future
from tornado.gen import coroutine, Return
from tornado.netutil import Resolver
IS_PYPY = False
try:
import __pypy__
__pypy__
IS_PYPY = True
except:
pass
def set_resolver(resolver):
Resolver.configure(resolver)
def enable_debug():
if IS_PYPY:
sys.stderr.write("settrace api unsupported on pypy")
sys.stderr.flush()
return
import inspect
def trace_green(event, args):
src, target = args
if event == "switch":
print("from %s switch to %s" % (src, target))
elif event == "throw":
print("from %s throw exception to %s" % (src, target))
if src.gr_frame:
tracebacks = inspect.getouterframes(src.gr_frame)
buff = []
for traceback in tracebacks:
srcfile, lineno, func_name, codesample = traceback[1:-1]
trace_line = '''File "%s", line %d, in %s\n%s '''
buff.append(trace_line % (srcfile, lineno, func_name, "".join(codesample)))
print("".join(buff))
greenlet.settrace(trace_green)
__all__ = ("IS_PYPY", "spawn", "AsyncSocket", "GreenTask", "synclize",
"Waiter", "sleep", "Timeout", "Event", "Watcher", "Pool")
class Hub(object):
def __init__(self):
self._greenlet = greenlet.getcurrent()
self._ioloop = IOLoop.current()
@property
def greenlet(self):
return self._greenlet
def switch(self):
self._greenlet.switch()
@property
def ioloop(self):
return self._ioloop
def run_later(self, deadline, callback, *args, **kwargs):
return self.ioloop.add_timeout(time.time() + deadline,
callback, *args, **kwargs)
def run_callback(self, callback, *args, **kwargs):
self.ioloop.add_callback(callback, *args, **kwargs)
hub = Hub()
def get_hub():
return hub
class GreenTask(greenlet.greenlet):
def __init__(self, run, *args, **kwargs):
super(GreenTask, self).__init__()
self._run = run
self._args = args
self._kwargs = kwargs
self._future = Future()
self._result = None
self._exc_info = ()
@property
def args(self):
return self._args
@property
def kwargs(self):
return self._kwargs
def run(self):
try:
timeout = self.kwargs.pop("timeout", 0)
if timeout:
timer = Timeout(timeout)
timer.start()
self._result = self._run(*self.args, **self.kwargs)
self._future.set_result(self._result)
except:
self._exc_info = sys.exc_info()
self._future.set_exc_info(self._exc_info)
finally:
if timeout:
timer.cancel()
def start(self):
self.switch()
def __str__(self):
func_name = "%s of %s " % (self._run.__name__, self._run.__module__)
return "<greenlet %s at %s>" % (func_name, hex(id(self)))
def __repr__(self):
return self.__str__()
def wait(self):
return self._future
@classmethod
def spawn(cls_green, *args, **kwargs):
task = cls_green(*args, **kwargs)
task.start()
return task
def synclize(func):
coro = coroutine(func)
@wraps(func)
def _sync_call(*args, **kwargs):
child_gr = greenlet.getcurrent()
main = child_gr.parent
assert main, "only run in child greenlet"
def callback(future):
if future.exc_info():
child_gr.throw(*future.exc_info())
elif future.exception():
child_gr.throw(future.exception())
else:
child_gr.switch(future.result())
IOLoop.current().add_future(coro(*args, **kwargs), callback)
return main.switch()
return _sync_call
def spawn(callable_obj, *args, **kwargs):
return GreenTask.spawn(callable_obj, *args, **kwargs).wait()
class Waiter(object):
def __init__(self):
self._greenlet = greenlet.getcurrent()
self._main = self._greenlet.parent
@property
def greenlet(self):
return self._greenlet
def switch(self, value):
self._greenlet.switch(value)
def throw(self, *exc_info):
self._greenlet.throw(*exc_info)
def get(self):
return self._main.switch()
def clear(self):
pass
def sleep(seconds):
waiter = Waiter()
unique = object()
IOLoop.current().add_timeout(time.time() + seconds, waiter.switch, unique)
waiter.get()
class TimeoutException(Exception): pass
class Timeout(object):
def __init__(self, deadline, ex=TimeoutException):
self._greenlet = greenlet.getcurrent()
self._ex = ex
self._callback = None
self._deadline = deadline
self._delta = time.time() + deadline
self._ioloop = IOLoop.current()
def start(self, callback=None):
errmsg = "%s timeout, deadline is %d seconds" % (
str(self._greenlet), self._deadline)
if callback:
self._callback = self._ioloop.add_timeout(self._delta,
callback,
self._ex(errmsg))
else:
self._callback = self._ioloop.add_timeout(self._delta,
self._greenlet.throw,
self._ex(errmsg))
def cancel(self):
assert self._callback, "Timeout not started"
self._ioloop.remove_timeout(self._callback)
self._greenlet = None
class Event(object):
def __init__(self):
self._waiter = []
self._ioloop = IOLoop.current()
def set(self):
self._ioloop.add_callback(self._notify)
def wait(self, timeout=None):
current_greenlet = greenlet.getcurrent()
self._waiter.append(current_greenlet.switch)
waiter = Waiter()
if timeout:
timeout_checker = Timeout(timeout)
timeout_checker.start(current_greenlet.throw)
waiter.get()
timeout_checker.cancel()
else:
waiter.get()
def _notify(self):
for waiter in self._waiter:
waiter(self)
class Watcher(object):
def __init__(self, fd, events):
self._fd = fd
self._watched_event = IOLoop.READ if events == 1 else IOLoop.WRITE
self._value = None
self._greenlet = greenlet.getcurrent()
self._main = self._greenlet.parent
self._ioloop = IOLoop.current()
self._callback = None
self._iohandler = None
def start(self, callback, args):
self._callback = callback
self._value = args
self._ioloop.add_handler(self._fd, self._handle_event, self._watched_event)
def _handle_event(self, fd, events):
self._callback(self._value)
def stop(self):
self._ioloop.remove_handler(self._fd)
class AsyncSocket(object):
def __init__(self, sock):
self._iostream = IOStream(sock)
self._resolver = Resolver()
self._readtimeout = 0
self._connecttimeout = 0
def set_readtimeout(self, timeout):
self._readtimeout = timeout
def set_connecttimeout(self, timeout):
self._connecttimeout = timeout
@synclize
def connect(self, address):
host, port = address
timer = None
try:
if self._connecttimeout:
timer = Timeout(self._connecttimeout)
timer.start()
resolved_addrs = yield self._resolver.resolve(host, port, family=socket.AF_INET)
for addr in resolved_addrs:
family, host_port = addr
yield self._iostream.connect(host_port)
break
except TimeoutException:
self.close()
raise
finally:
if timer:
timer.cancel()
#@synclize
def sendall(self, buff):
self._iostream.write(buff)
@synclize
def read(self, nbytes, partial=False):
timer = None
try:
if self._readtimeout:
timer = Timeout(self._readtimeout)
timer.start()
buff = yield self._iostream.read_bytes(nbytes, partial=partial)
raise Return(buff)
except TimeoutException:
self.close()
raise
finally:
if timer:
timer.cancel()
def recv(self, nbytes):
return self.read(nbytes, partial=True)
@synclize
def readline(self, max_bytes=-1):
timer = None
if self._readtimeout:
timer = Timeout(self._readtimeout)
timer.start()
try:
if max_bytes > 0:
buff = yield self._iostream.read_until('\n', max_bytes=max_bytes)
else:
buff = yield self._iostream.read_until('\n')
raise Return(buff)
except TimeoutException:
self.close()
raise
finally:
if timer:
timer.cancel()
def close(self):
self._iostream.close()
def set_nodelay(self, flag):
self._iostream.set_nodelay(flag)
def settimeout(self, timeout):
pass
def shutdown(self, direction):
if self._iostream.fileno():
self._iostream.fileno().shutdown(direction)
def recv_into(self, buff):
expected_rbytes = len(buff)
data = self.read(expected_rbytes, True)
srcarray = bytearray(data)
nbytes = len(srcarray)
buff[0:nbytes] = srcarray
return nbytes
def makefile(self, mode, other):
return self
class Pool(object):
def __init__(self, max_size=-1, params={}):
self._maxsize = max_size
self._conn_params = params
self._pool = []
self._started = False
self._ioloop = IOLoop.current()
self._event = Event()
self._ioloop.add_future(
spawn(self.start),
lambda future: future)
def create_raw_conn(self):
pass
def init_pool(self):
for index in range(self._maxsize):
conn = self.create_raw_conn()
self._pool.append(conn)
@property
def size(self):
return len(self._pool)
def get_conn(self):
if self.size > 0:
return self._pool.pop(0)
else:
raise Exception("no available connections", self.size)
def release(self, conn):
self._pool.append(conn)
def quit(self):
self._started = False
self._event.set()
def _close_all(self):
for conn in self._pool:
conn.close()
self._pool = None
def start(self):
self.init_pool()
self._started = True
self._event.wait()
self._close_all()
| {
"content_hash": "9e8b434d1940139afa5597efb1276de9",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 92,
"avg_line_length": 27.221686746987952,
"alnum_prop": 0.5458971408338497,
"repo_name": "alex8224/gTornado",
"id": "69581bf73583b8456300f630c2a035aaf365f531",
"size": "11320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gtornado/green.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30004"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from pman.abstractmgr import Resources
from pman.cromwell.slurm.wdl import SlurmJob, Image, StrWdl
# Since conversion to WDL is lossy, we need to define the
# expected WDL, actual source info, and WDL-converted info (lossy_info)
Example = namedtuple('Example', ['wdl', 'info', 'lossy_info'])
basic = Example(
wdl=StrWdl(r"""
version 1.0
task plugin_instance {
command {
whatsup --greeting 'I'"'"'m mr. chris' /share/mr /share/president
} #ENDCOMMAND
runtime {
docker: 'quay.io/fedora/fedora:36'
sharedir: '/location/of/bribe'
cpu: '2'
memory: '5954M'
gpu_limit: '0'
number_of_workers: '9'
timelimit: '12'
}
}
workflow ChRISJob {
call plugin_instance
}
"""),
info=SlurmJob(
command=['whatsup', '--greeting', "I'm mr. chris", '/share/mr', '/share/president'],
image=Image('quay.io/fedora/fedora:36'),
sharedir='/location/of/bribe',
partition=None,
timelimit=12,
resources_dict=Resources(
cpu_limit=1234,
memory_limit=5678,
number_of_workers=9,
gpu_limit=0
)
),
lossy_info=SlurmJob(
command=['whatsup', '--greeting', "I'm mr. chris", '/share/mr', '/share/president'],
image=Image('quay.io/fedora/fedora:36'),
sharedir='/location/of/bribe',
partition=None,
timelimit=12,
resources_dict=Resources(
cpu_limit=2000,
memory_limit=5678,
number_of_workers=9,
gpu_limit=0
)
)
)
fastsurfer = Example(
wdl=StrWdl(r"""
version 1.0
task plugin_instance {
command {
/usr/local/bin/python fastsurfer_inference.py /share/incoming /share/outgoing
} #ENDCOMMAND
runtime {
docker: 'ghcr.io/fnndsc/pl-fastsurfer_inference:1.2.0'
sharedir: '/neuroimaging/data'
cpu: '7'
memory: '10356M'
gpu_limit: '6'
number_of_workers: '5'
timelimit: '300'
slurm_partition: 'has-gpu'
}
}
workflow ChRISJob {
call plugin_instance
}
"""),
info=SlurmJob(
command=['/usr/local/bin/python', 'fastsurfer_inference.py', '/share/incoming', '/share/outgoing'],
image=Image('ghcr.io/fnndsc/pl-fastsurfer_inference:1.2.0'),
sharedir='/neuroimaging/data',
partition='has-gpu',
timelimit=300,
resources_dict=Resources(
number_of_workers=5,
cpu_limit=7000,
memory_limit=9876,
gpu_limit=6
)
),
lossy_info=SlurmJob(
command=['/usr/local/bin/python', 'fastsurfer_inference.py', '/share/incoming', '/share/outgoing'],
image=Image('ghcr.io/fnndsc/pl-fastsurfer_inference:1.2.0'),
sharedir='/neuroimaging/data',
partition='has-gpu',
timelimit=300,
resources_dict=Resources(
number_of_workers=5,
cpu_limit=7000,
memory_limit=9876,
gpu_limit=6
)
)
)
| {
"content_hash": "f138f642e48439d43f3f115ebe0dadef",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 107,
"avg_line_length": 27.854545454545455,
"alnum_prop": 0.577023498694517,
"repo_name": "FNNDSC/pman",
"id": "500795d7f6a7eff0207b1c6b5c6e47715748d542",
"size": "3064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cromwell/examples/wdl.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1233"
},
{
"name": "Python",
"bytes": "99739"
},
{
"name": "Shell",
"bytes": "33298"
}
],
"symlink_target": ""
} |
def get_rows(rows_filename):
""" Read the filename with the labels of rows and add
them to a list of lists
"""
rows = []
with open(rows_filename) as f:
lines = f.readlines()
for line in lines:
line_list = line.strip().strip('\n').split(' ')
# Convert to int
line_list = [int(i) for i in line_list]
rows.append(line_list)
return rows
def get_columns(columns_filename):
""" Read the filename with the labels of columns and add
them to a list of lists
"""
columns = []
with open(columns_filename) as f:
lines = f.readlines()
for line in lines:
line_list = line.strip().strip('\n').split(' ')
#Convert to int
line_list = [int(i) for i in line_list]
columns.append(line_list)
return columns
def init_grid(rows = 25, columns = 25):
""" Initialize the grid
It's True for white and False for black
"""
# Initialize the grid with white
grid = [[True for i in range(rows)] for j in range(columns)]
# Add the blacks
grid[3][3] = False
grid[3][4] = False
grid[3][12] = False
grid[3][13] = False
grid[3][21] = False
grid[8][6] = False
grid[8][7] = False
grid[8][10] = False
grid[8][14] = False
grid[8][15] = False
grid[8][18] = False
grid[16][6] = False
grid[16][11] = False
grid[16][16] = False
grid[16][20] = False
grid[21][3] = False
grid[21][4] = False
grid[21][9] = False
grid[21][10] = False
grid[21][15] = False
grid[21][20] = False
grid[21][21] = False
return grid
def whites_in_between_blacks_combinations(black_labels, line):
""" Calculates the combinations of the white squares
in between the black(given by black_labels)
which fit in the line
"""
labels = []
# Get the length of the line
total_length = len(line)
# Get how many white positions are between black
white_in_between_positions = len(black_labels) - 1
# Initialize the list with 1 white square between the black.
# E.g if there are 5 black labels, then the whites are [1 1 1 1]
white_labels = [1 for i in range(white_in_between_positions)]
# The max value that the white label can take. E.g if the value is 4, then
# [4 4 4 4]
max_white_space = total_length - (sum(black_labels) + sum(white_labels)) + 1
if max_white_space < 0:
return None
white_labels_end = [max_white_space for i in range(white_in_between_positions)]
counter = 0
# Iterate until the end list is created
# The end list has the max_white_space in all the elements
# The following is an odometer algorithm, it iterates from
# e.g. [1 1 1 1] to [4 4 4 4]
while set(white_labels) != set(white_labels_end):
for i in range(white_in_between_positions):
if counter and (counter % ((max_white_space+1)**i) == 0):
white_labels[i] += 1
white_labels[i] %= (max_white_space + 1)
if sum(black_labels) + sum(white_labels) <= total_length and 0 not in white_labels:
labels.append(white_labels[:])
counter +=1
return labels
def combine_blacks_whites(black_labels, white_labels, start_black_index = 0):
""" Create a line with black and white squares, using the black and white
labels. By default start with black from position 0.
TODO: Need change the default black from position 0.
"""
line = []
# Mix black and white labels
labels_none = map(None, black_labels, white_labels)
mixed_labels = [i for tup in labels_none for i in tup if i is not None]
for i in range(25):
return line
if __name__ == "__main__":
rows = get_rows('input-rows.txt')
columns = get_columns('input-columns.txt')
grid = init_grid()
# Print rows
for i in range(25):
print 'Row: ', i, whites_in_between_blacks_combinations(rows[i], grid[i])
# Print columns
for i in range(25):
print 'Column: ', i, whites_in_between_blacks_combinations(columns[i], grid[:][i])
| {
"content_hash": "593ff2aab62c9958cd3dbb029375521d",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 91,
"avg_line_length": 27.0974025974026,
"alnum_prop": 0.5938173975557153,
"repo_name": "svagionitis/puzzles",
"id": "9889475771acf78b482c9d2f2c9dde4a1397e9c8",
"size": "4173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gchq.gov.uk/grid-shading-puzzle-2015/sol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5464"
},
{
"name": "Python",
"bytes": "28247"
}
],
"symlink_target": ""
} |
import happybase
# https://happybase.readthedocs.io/en/latest/
'''
If running on Windows with thriftpy 0.3.9, and you get this error:
“thriftpy.parser.exc.ThriftParserError: ThriftPy does not support generating module with path in protocol 'c'”
edit thriftpy/parser/parser.py, and change line 488 from
if url_scheme == '':
to
if url_scheme in ('c', ''):
'''
# get connection to HBase
# dev environment: hadoop02.sgdcelab.sabre.com
# cert environment: bdaolc011node01.sabre.com
connection = happybase.Connection('server dns name')
# create table
# when creating an HBase table, you only define the column family, not the columns.
# Columns are added dymanically as needed
# Unless there is a specific reason to do otherwise, use only one column family, with a short name like 'c'
connection.create_table(
'svchist_profile',
{'c': dict(), # use defaults
}
)
for table in connection.tables():
print(table.decode()) | {
"content_hash": "eeca5f5838d468c104d521a96727e87a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 114,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.709278350515464,
"repo_name": "bundgus/python-playground",
"id": "0e9b93026aece40c1bba6766d9a4cba307287cbe",
"size": "974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hbase-playground/bda_cert/happybase_demo_create_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "897"
},
{
"name": "HTML",
"bytes": "22309040"
},
{
"name": "Jupyter Notebook",
"bytes": "666681"
},
{
"name": "Python",
"bytes": "1046557"
},
{
"name": "Thrift",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""Utilities for interacting with Google Service Management."""
import json
import os.path
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.third_party.apitools.base import py as apitools_base
class SwaggerOpenException(exceptions.Error):
def __init__(self, message):
super(SwaggerOpenException, self).__init__(message)
class SwaggerUploadException(exceptions.Error):
def __init(self, message):
super(SwaggerUploadException, self).__init__(message)
# TODO(b/26202997): Switch to using the GetHttpErrorMessage in core once
# b/26202997 is resolved.
def _GetErrorMessage(error):
content_obj = json.loads(error.content)
return content_obj.get('error', {}).get('message', '')
def PushServiceConfig(swagger_file, project, client, messages):
"""Pushes Service Configuration to Google Service Management.
Args:
swagger_file: full path to a JSON file containing the swagger spec.
project: the Google cloud project Id
client: the client to use for calls to Service Management API
messages: the client library messages to use for Service Management API
Raises:
SwaggerOpenException: if input is malformed or file cannot be read
SwaggerUploadException: if service fails to convert swagger, or
upload of the service configuration conversion result fails
ValueError: if the required inputs are not provided.
Returns:
Operation: a long running asynchronous Operation
"""
if not swagger_file:
raise ValueError('Swagger specification file path must be provided.')
if not project:
raise ValueError('Project Id must be provided.')
if not client:
raise ValueError('Service Management client must be provided.')
if not messages:
raise ValueError('Service Management client messages must be provided.')
# First, convert the swagger specification to Google Service Configuration
try:
with open(swagger_file) as f:
swagger_file = messages.File(
contents=f.read(),
path=swagger_file,
)
except IOError:
raise SwaggerOpenException(
'Unable to read swagger spec file "{0}"'.format(swagger_file))
swagger_spec = messages.SwaggerSpec(swaggerFiles=[swagger_file])
request = messages.ConvertConfigRequest(
swaggerSpec=swagger_spec,
)
try:
response = client.v1.ConvertConfig(request)
except apitools_base.exceptions.HttpError as error:
raise SwaggerUploadException(_GetErrorMessage(error))
if response.diagnostics:
kind = messages.Diagnostic.KindValueValuesEnum
for diagnostic in response.diagnostics:
logger = log.error if diagnostic.kind == kind.ERROR else log.warning
logger('{l}: {m}'.format(l=diagnostic.location, m=diagnostic.message))
if not response.serviceConfig:
raise SwaggerUploadException('Failed to upload service configuration.')
# Create a local ./endpoints directory which will contain the service.json
# file needed by ESP. This file+directory will be carried to the Managed VM
# via the app container.
# TODO(user): Remove this when ESP is able to pull this configuration
# directly from Inception.
endpoints_dir = 'endpoints'
if not os.path.exists(endpoints_dir):
os.makedirs(endpoints_dir)
with open(endpoints_dir + '/service.json', 'w') as out:
out.write(apitools_base.encoding.MessageToJson(response.serviceConfig))
# Second, upload Google Service Configuration to Service Management API
managed_service = messages.ManagedService(
serviceConfig=response.serviceConfig,
serviceName=response.serviceConfig.name)
# Set the serviceConfig producerProjectId
managed_service.serviceConfig.producerProjectId = project
request = messages.ServicemanagementServicesUpdateRequest(
serviceName=managed_service.serviceName,
managedService=managed_service,
)
try:
client.services.Update(request)
# TODO(b/27295262): wait here until the asynchronous operation in the
# response has finished.
except apitools_base.exceptions.HttpError as error:
raise SwaggerUploadException(_GetErrorMessage(error))
# Next, enable the service for the producer project
usage_settings = messages.UsageSettings(
consumerEnableStatus=
messages.UsageSettings.ConsumerEnableStatusValueValuesEnum.ENABLED
)
project_settings = messages.ProjectSettings(usageSettings=usage_settings)
request = messages.ServicemanagementServicesProjectSettingsPatchRequest(
serviceName=managed_service.serviceName,
consumerProjectId=project,
projectSettings=project_settings,
updateMask='usage_settings.consumer_enable_status'
)
try:
client.services_projectSettings.Patch(request)
# TODO(b/27295262): wait here until the asynchronous operation in the
# response has finished.
except apitools_base.exceptions.HttpError as error:
raise SwaggerUploadException(_GetErrorMessage(error))
| {
"content_hash": "bb38121ba2d9d7d61674255c7ec59149",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 77,
"avg_line_length": 37.05263157894737,
"alnum_prop": 0.7540584415584416,
"repo_name": "flgiordano/netcash",
"id": "24f016772e6464bbf2126e68288c98a55f93e531",
"size": "5524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/googlecloudsdk/api_lib/app/cloud_endpoints.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='mr_bot',
packages=['mr_bot'],
version='1.1.3',
description='Small package for assist telegram bot development',
author='Root Kid',
author_email='shaman@born2fish.ru',
url='https://github.com/r00tkid/mr_bot.git',
download_url='https://github.com/r00tkid/mr_bot/tarball/1.1.3',
keywords=['support', 'telegram', 'bot', 'keyboard'], # keywords
classifiers=[],
install_requires=[
"emoji",
'colored',
'future',
'mr-logger',
'python-telegram-bot',
'termcolor',
'geopy',
'urllib3'
],
)
| {
"content_hash": "1ece34ae59f617ec9e47375ac7951d0a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 66,
"avg_line_length": 23.8,
"alnum_prop": 0.6184873949579832,
"repo_name": "r00tkid/mr_bot",
"id": "69fe5cbf18ff1d58b49ab2fcd05208505dc8c651",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17625"
}
],
"symlink_target": ""
} |
"""Defines the Gin configuration framework.
Programs frequently have a number of "hyperparameters" that require variation
across different executions of the program. When the number of such parameters
grows even moderately large, or use of some parameter is deeply embedded in the
code, top-level flags become very cumbersome. This module provides an
alternative mechanism for setting such hyperparameters, by allowing injection of
parameter values for any function marked as "configurable".
For detailed documentation, please see the user guide:
https://github.com/google/gin-config/tree/master/docs/index.md
# Making functions and classes configurable
Functions and classes can be marked configurable using the `@configurable`
decorator, which associates a "configurable name" with the function or class (by
default, just the function or class name). Optionally, parameters can be
allowlisted or denylisted to mark only a subset of the function's parameters as
configurable. Once parameters have been bound (see below) to this function, any
subsequent calls will have those parameters automatically supplied by Gin.
If an argument supplied to a function by its caller (either as a positional
argument or as a keyword argument) corresponds to a parameter configured by Gin,
the caller's value will take precedence.
# A short example
Python code:
@gin.configurable
def mix_cocktail(ingredients):
...
@gin.configurable
def serve_random_cocktail(available_cocktails):
...
@gin.configurable
def drink(cocktail):
...
Gin configuration:
martini/mix_cocktail.ingredients = ['gin', 'vermouth', 'twist of lemon']
gin_and_tonic/mix_cocktail.ingredients = ['gin', 'tonic water']
serve_random_cocktail.available_cocktails = {
'martini': @martini/mix_cocktail,
'gin_and_tonic': @gin_and_tonic/mix_cocktail,
}
drink.cocktail = @serve_random_cocktail()
In the above example, there are three configurable functions: `mix_cocktail`
(with a parameter `ingredients`), `serve_random_cocktail` (with parameter
`available_cocktails`), and `drink` (with parameter `cocktail`).
When `serve_random_cocktail` is called, it will receive a dictionary
containing two scoped *references* to the `mix_cocktail` function (each scope
providing unique parameters, meaning calling the different references will
presumably produce different outputs).
On the other hand, when the `drink` function is called, it will receive the
*output* of calling `serve_random_cocktail` as the value of its `cocktail`
parameter, due to the trailing `()` in `@serve_random_cocktail()`.
"""
import collections
import contextlib
import copy
import enum
import functools
import inspect
import logging
import os
import pprint
import threading
import traceback
import typing
from typing import Any, Callable, Dict, Optional, Sequence, Set, Tuple, Type, Union, Mapping, List
from gin import config_parser
from gin import selector_map
from gin import utils
def _format_location(location: config_parser.Location) -> str:
return f'{location.filename or "bindings string"}:{location.line_num}'
class _ScopeManager(threading.local):
"""Manages currently active config scopes.
This ensures thread safety of config scope management by subclassing
`threading.local`. Scopes are tracked as a stack, where elements in the
stack are lists of the currently active scope names.
"""
def _maybe_init(self):
if not hasattr(self, '_active_scopes'):
self._active_scopes = [[]]
@property
def active_scopes(self):
self._maybe_init()
return self._active_scopes[:]
@property
def current_scope(self):
self._maybe_init()
return self._active_scopes[-1][:] # Slice to get copy.
def enter_scope(self, scope):
"""Enters the given scope, updating the list of active scopes.
Args:
scope: A list of active scope names, ordered from outermost to innermost.
"""
self._maybe_init()
self._active_scopes.append(scope)
def exit_scope(self):
"""Exits the most recently entered scope."""
self._maybe_init()
self._active_scopes.pop()
class _GinBuiltins:
def __init__(self):
self.macro = macro
self.constant = _retrieve_constant
self.singleton = singleton
class ParseContext:
"""Encapsulates context for parsing a single file."""
def __init__(self, import_manager=None):
"""Initializes the instance.
Args:
import_manager: If not `None`, an `ImportManager` providing an initial set
of imports to process. Additionally, if an `ImportManager` is provided
and dynamic registration is enabled, actually importing an unregistered
configurable during parsing becomes an error (instead, all configurables
are assumed to be registered already). This is used when generating
(operative) config strings.
"""
self._import_manager = import_manager
self._imports = []
self._symbol_table = {}
self._symbol_source = {}
self._dynamic_registration = False
if import_manager is not None:
for stmt in import_manager.sorted_imports:
self.process_import(stmt)
@property
def imports(self):
return self._imports
@property
def import_manager(self):
return self._import_manager
def _enable_dynamic_registration(self):
self._dynamic_registration = True
self._symbol_table['gin'] = _GinBuiltins()
self._symbol_source['gin'] = None
def process_import(self, statement: config_parser.ImportStatement):
"""Processes the given `ImportStatement`."""
if statement.is_from and statement.module.startswith('__gin__.'):
if statement.alias:
raise SyntaxError('__gin__ imports do not support `as` aliasing.',
statement.location)
_, feature = statement.module.split('.', maxsplit=1)
if feature == 'dynamic_registration':
if self._imports:
existing_imports = [stmt.format() for stmt in self._imports]
raise SyntaxError(
f'Dynamic registration should be enabled before any other modules '
f'are imported.\n\nAlready imported: {existing_imports}.',
statement.location)
self._enable_dynamic_registration()
else:
raise SyntaxError( # pylint: disable=raising-format-tuple
"Unrecognized __gin__ feature '{feature}'.", statement.location)
else:
fromlist = [''] if statement.is_from or statement.alias else None
module = __import__(statement.module, fromlist=fromlist)
if self._dynamic_registration:
name = statement.bound_name()
if name == 'gin':
raise ValueError(
f'The `gin` symbol is reserved; cannot bind import statement '
f'"{statement.format()}" to `gin`. Use an alias for the import '
f'(via `import ... as ...` or `from ... import ... [as ...]`).')
self._symbol_table[name] = module
self._symbol_source[name] = statement
self._imports.append(statement)
def _resolve_selector(self, selector):
"""Resolves the given `selector` using this context's symbol table.
This method breaks the given `selector` into its contituent components
(names separated by '.'), resolving the first component using this
`ParseContext`'s symbol table, and each subsequent component as an attribute
on the resolved value of the previous component.
Args:
selector: The selector to resolve into attribute names and values.
Raises:
NameError: If the first component of the selector is not a symbol provided
by some import statement in the current `ParseContext`.
AttributeError: If an internal component of the selector does is not a
valid attribute of its parent component.
Returns:
A pair of lists `(attr_names, attr_values)`, with the names and values
corresponding to each component of `selector`.
"""
not_found = object()
attr_names = selector.split('.')
symbol = attr_names[0]
module = self._symbol_table.get(symbol, not_found)
if module is not_found:
raise NameError(f"'{symbol}' was not provided by an import statement.")
attr_chain = [module]
for attr_name in attr_names[1:]:
attr = getattr(attr_chain[-1], attr_name, not_found)
if attr is not_found:
raise AttributeError(
f"Couldn't resolve selector {selector}; {attr_chain[-1]} has no "
f'attribute {attr_name}.')
attr_chain.append(attr)
return attr_names, attr_chain
def _import_source(self, import_statement, attr_names):
"""Creates an "import source" tuple for a given reference."""
if not import_statement.is_from and not import_statement.alias:
module_parts = import_statement.module.split('.')
num_matches = 0
for module_part, attr_name in zip(module_parts, attr_names[:-1]):
if module_part != attr_name:
break
num_matches += 1
module = '.'.join(module_parts[:num_matches])
selector = '.'.join(attr_names[num_matches:])
return (import_statement._replace(module=module), selector)
else:
return (import_statement, '.'.join(attr_names[1:]))
def _register(self, attr_names, attr_values):
"""Registers the function/class at the end of the named_attrs list.
In order to support configurable methods, if a method is registered (a
function whose parent attribute is a class), its parent class will also be
registered. If the parent class has already been registered, it will be
re-registered, and any references to the class in the current config will
be updated (re-initialized) to reference the updated class registration.
Args:
attr_names: A list of attribute names, as returned by `_resolve_selector`.
attr_values: A list of attribute values corresponding to `attr_names`.
Returns:
The `Configurable` instance associated with the new registration.
"""
root_name, *inner_names, fn_or_cls_name = attr_names
*path_attrs, fn_or_cls = attr_values
source = self._symbol_source[root_name]
if source is None: # This happens for Gin "builtins" like `macro`.
module = root_name
else:
module = '.'.join([source.partial_path(), *inner_names])
original = _inverse_lookup(fn_or_cls)
_make_configurable(
fn_or_cls,
name=fn_or_cls_name,
module=module,
import_source=self._import_source(source, attr_names),
avoid_class_mutation=True)
if original is not None: # We've re-registered something...
for reference in iterate_references(_CONFIG, to=original.wrapper):
reference.initialize()
if inspect.isfunction(fn_or_cls) and inspect.isclass(path_attrs[-1]):
self._register(attr_names[:-1], attr_values[:-1])
return _INVERSE_REGISTRY[fn_or_cls]
def get_configurable(self, selector):
"""Get a configurable matching the given `selector`."""
if self._dynamic_registration:
attr_names, attr_values = self._resolve_selector(selector)
existing_configurable = _inverse_lookup(attr_values[-1])
if existing_configurable is None and self._import_manager:
raise RuntimeError(
f'Encountered unregistered configurable `{selector}` in parse only '
f'mode. This indicates an internal error. Please file a bug.')
return existing_configurable or self._register(attr_names, attr_values)
else: # No dynamic registration, just look up the selector.
return _REGISTRY.get_match(selector)
def _parse_context() -> ParseContext:
return _PARSE_CONTEXTS[-1]
@contextlib.contextmanager
def _parse_scope(import_manager=None):
_PARSE_CONTEXTS.append(ParseContext(import_manager))
try:
yield _parse_context()
finally:
_PARSE_CONTEXTS.pop()
# Maintains the registry of configurable functions and classes.
_REGISTRY = selector_map.SelectorMap()
# Maps registered functions or classes to their associated Configurable object.
_INVERSE_REGISTRY = {}
# Maps old selector names to new selector names for selectors that are renamed.
# This is used for handling renaming of class method modules.
_RENAMED_SELECTORS = {}
# Maps tuples of `(scope, selector)` to associated parameter values. This
# specifies the current global "configuration" set through `bind_parameter` or
# `parse_config`, but doesn't include any functions' default argument values.
_CONFIG: Dict[Tuple[str, str], Dict[str, Any]] = {}
# Maps tuples of `(scope, selector)` to a mapping from parameter names to
# locations at which the parameter values were set.
_CONFIG_PROVENANCE: Dict[Tuple[str, str],
Dict[str, Optional[config_parser.Location]]] = {}
# Keeps a set of ImportStatements that were imported via config files.
_IMPORTS = set()
# Maps `(scope, selector)` tuples to all configurable parameter values used
# during program execution (including default argument values).
_OPERATIVE_CONFIG = {}
_OPERATIVE_CONFIG_LOCK = threading.Lock()
# Keeps track of currently active config scopes.
_SCOPE_MANAGER = _ScopeManager()
# Keeps track of hooks to run when the Gin config is finalized.
_FINALIZE_HOOKS = []
# Keeps track of whether the config is locked.
_CONFIG_IS_LOCKED = False
# Keeps track of whether "interactive mode" is enabled, in which case redefining
# a configurable is not an error.
_INTERACTIVE_MODE = False
# Keeps track of constants created via gin.constant, to both prevent duplicate
# definitions and to avoid writing them to the operative config.
_CONSTANTS = selector_map.SelectorMap()
# Parse contexts, providing file-isolated import/symbol tables.
_PARSE_CONTEXTS = [ParseContext()]
# Keeps track of singletons created via the singleton configurable.
_SINGLETONS = {}
# Keeps track of file readers. These are functions that behave like Python's
# `open` function (can be used a context manager) and will be used to load
# config files. Each element of this list should be a tuple of `(function,
# exception_type)`, where `exception_type` is the type of exception thrown by
# `function` when a file can't be opened/read successfully.
_FILE_READERS = [(open, os.path.isfile)]
# Maintains a cache of argspecs for functions.
_ARG_SPEC_CACHE = {}
# List of location prefixes. Similar to PATH var in unix to be used to search
# for files with those prefixes.
_LOCATION_PREFIXES = ['']
# Value to represent required parameters.
REQUIRED = object()
# Add it to constants.
_CONSTANTS['gin.REQUIRED'] = REQUIRED
def _find_class_construction_fn(cls):
"""Find the first __init__ or __new__ method in the given class's MRO."""
for base in inspect.getmro(cls): # pytype: disable=wrong-arg-types
if '__init__' in base.__dict__:
return base.__init__
if '__new__' in base.__dict__:
return base.__new__
def _ensure_wrappability(fn):
"""Make sure `fn` can be wrapped cleanly by functools.wraps."""
# Handle "builtin_function_or_method", "wrapped_descriptor", and
# "method-wrapper" types.
unwrappable_types = (type(sum), type(object.__init__), type(object.__call__))
if isinstance(fn, unwrappable_types):
# pylint: disable=unnecessary-lambda
wrappable_fn = lambda *args, **kwargs: fn(*args, **kwargs)
wrappable_fn.__name__ = fn.__name__
wrappable_fn.__doc__ = fn.__doc__
wrappable_fn.__module__ = '' # These types have no __module__, sigh.
wrappable_fn.__wrapped__ = fn
return wrappable_fn
# Otherwise we're good to go...
return fn
def _inverse_lookup(fn_or_cls, allow_decorators=False):
unwrapped = inspect.unwrap(fn_or_cls, stop=lambda f: f in _INVERSE_REGISTRY)
configurable_ = _INVERSE_REGISTRY.get(unwrapped)
if configurable_ is not None:
wrapped_and_wrapper = (configurable_.wrapped, configurable_.wrapper)
if allow_decorators or fn_or_cls in wrapped_and_wrapper:
return configurable_
return None
def _find_registered_methods(cls, selector):
"""Finds methods in `cls` that have been wrapped or registered with Gin."""
registered_methods = {}
def is_method(maybe_method):
# Python 3 has no notion of an unbound method. To avoid a scenario where a
# previously registered function is assigned as a class attribute (e.g., the
# default value of a dataclass field) and considered a method here, we
# require that the function's __module__ is the same as that of the class,
# its __name__ matches the name it is accessible under via the class, and
# its __qualname__ contains the class name as `Class.name`.
for base in inspect.getmro(cls): # pytype: disable=wrong-arg-types
if (inspect.isfunction(maybe_method) and
maybe_method.__module__ == base.__module__ and
getattr(base, maybe_method.__name__, None) == maybe_method):
qualname_parts = maybe_method.__qualname__.split('.')
if len(qualname_parts) > 1 and qualname_parts[-2] == base.__name__:
return True
return False
for name, method in inspect.getmembers(cls, predicate=is_method):
if method in _INVERSE_REGISTRY:
method_info = _INVERSE_REGISTRY[method]
if method_info.module not in (method.__module__, selector):
raise ValueError(
f'Method {name} in class {cls} ({selector}) was registered with a '
f'custom module ({method_info.module}), but the class is also '
f'being registered. Avoid specifying a module on the method to '
f'allow class registration to modify the method module name.')
old_selector = method_info.selector
new_selector = selector + '.' + method_info.name
method_info = method_info._replace(
module=selector, selector=new_selector, is_method=True)
_RENAMED_SELECTORS[old_selector] = new_selector
_REGISTRY.pop(old_selector)
_REGISTRY[new_selector] = method_info
_INVERSE_REGISTRY[method] = method_info
registered_methods[name] = method_info.wrapper
else:
if _inverse_lookup(method, allow_decorators=True):
registered_methods[name] = method
return registered_methods
def _make_meta_call_wrapper(cls):
"""Creates a pickle-compatible wrapper for `type(cls).__call__`.
This function works in tandem with `_decorate_fn_or_cls` below. It wraps
`type(cls).__call__`, which is in general responsible for creating a new
instance of `cls` or one of its subclasses. In cases where the to-be-created
class is Gin's dynamically-subclassed version of `cls`, the wrapper here
instead returns an instance of `cls`, which isn't a dynamic subclass and more
generally doesn't have any Gin-related magic applied. This means the instance
is compatible with pickling, and is totally transparent to any inspections by
user code (since it really is an instance of the original type).
Args:
cls: The class whose metaclass's call method should be wrapped.
Returns:
A wrapped version of the `type(cls).__call__`.
"""
cls_meta = type(cls)
@functools.wraps(cls_meta.__call__)
def meta_call_wrapper(new_cls, *args, **kwargs):
# If `new_cls` (the to-be-created class) is a direct subclass of `cls`, we
# can be sure that it's Gin's dynamically created subclass. In this case,
# we directly create an instance of `cls` instead. Otherwise, some further
# dynamic subclassing by user code has likely occurred, and we just create
# an instance of `new_cls` to avoid issues. This instance is likely not
# compatible with pickle, but that's generally true of dynamically created
# subclasses and would require some user workaround with or without Gin.
if new_cls.__bases__ == (cls,):
new_cls = cls
return cls_meta.__call__(new_cls, *args, **kwargs)
return meta_call_wrapper
def _decorate_fn_or_cls(decorator,
fn_or_cls,
selector,
avoid_class_mutation=False,
decorate_methods=False):
"""Decorate a function or class with the given decorator.
When `fn_or_cls` is a function, applies `decorator` to the function and
returns the (decorated) result.
When `fn_or_cls` is a class and the `avoid_class_mutation` parameter is
`False`, this will replace either `fn_or_cls.__init__` or `fn_or_cls.__new__`
(whichever is first implemented in the class's MRO, with a preference for
`__init__`) with the result of applying `decorator` to it.
When `fn_or_cls` is a class and `avoid_class_mutation` is `True`, this will
dynamically construct a subclass of `fn_or_cls` using a dynamically
constructed metaclass (which itself is a subclass of `fn_or_cls`'s metaclass).
The metaclass's `__call__` method is wrapped using `decorator` to
intercept/inject parameters for class construction. The resulting subclass has
metadata (docstring, name, and module information) copied over from
`fn_or_cls`, and should behave like the original as much possible, without
modifying it (for example, inspection operations using `issubclass` should
behave the same way as on the original class). When constructed, an instance
of the original (undecorated) class is returned.
Args:
decorator: The decorator to use.
fn_or_cls: The function or class to decorate.
selector: The Gin selector for `fn_or_cls`. This is used to modify method
modules to match the overall class selector.
avoid_class_mutation: Whether to avoid class mutation using dynamic
subclassing. This argument is ignored if `fn_or_cls` is not a class.
decorate_methods: Whether to also decorate Gin-registered methods.
Returns:
The decorated function or class.
"""
if not inspect.isclass(fn_or_cls): # pytype: disable=wrong-arg-types
return decorator(_ensure_wrappability(fn_or_cls))
cls = fn_or_cls
if avoid_class_mutation:
# This approach enables @gin.register and gin.external_configurable(), and
# is in most cases compatible with pickling instances. We can't use it for
# @gin.configurable because the decorated class returned below can interact
# poorly with `super(type, obj)` if `type` references the decorated version
# while `obj` is an instance of the original undecorated class.
method_overrides = _find_registered_methods(cls, selector)
if decorate_methods:
method_overrides = {
name: decorator(method) for name, method in method_overrides.items()
}
cls_meta = type(cls) # The metaclass of the given class.
if method_overrides:
# If we have methods to override, we just use cls_meta.__call__ directly.
# This creates a new sub-class instance (`decorated_class` below) that
# contains the replaced methods, but also means the dynamically created
# class isn't pickle-compatible, since it differs from the base class.
meta_call = cls_meta.__call__
else:
# Otherwise, we wrap the __call__ method on the metaclass. The basic
# strategy here is to create a new metaclass (subclassing the class's
# metaclass to preserve behavior), wrapping its __call__ method to return
# an instance of the *original* (undecorated) class. Gin's wrapper is then
# applied to this decorated __call__ method to ensure any configured
# parameters are passed through to `__init__` or `__new__` appropriately.
meta_call = _make_meta_call_wrapper(cls) # See this for more details.
# We decorate our possibly-wrapped metaclass __call__ with Gin's wrapper.
decorated_call = decorator(_ensure_wrappability(meta_call))
# And now construct a new metaclass, subclassing the one from `cls`,
# supplying our decorated `__call__`. Most often this is just subclassing
# Python's `type`, but when `cls` has a custom metaclass set, this ensures
# that it will continue to work properly.
decorating_meta = type(cls_meta)(cls_meta.__name__, (cls_meta,), {
'__call__': decorated_call,
})
# Now we construct our class. This is a subclass of `cls`, but only with
# wrapper-related overrides, since injecting/intercepting parameters is all
# handled in the metaclass's `__call__` method. Note that we let
# '__annotations__' simply get forwarded to the base class, since creating
# a new type doesn't set this attribute by default.
overrides = {
attr: getattr(cls, attr)
for attr in ('__module__', '__name__', '__qualname__', '__doc__')
}
# If `cls` won't have a `__dict__` attribute, disable `__dict__` creation on
# our subclass as well. This seems like generally correct behavior, and also
# prevents errors that can arise under some very specific circumstances due
# to a CPython bug in type creation.
if getattr(cls, '__dictoffset__', None) == 0:
overrides['__slots__'] = ()
# Update our overrides with any methods we need to replace.
overrides.update(method_overrides)
# Finally, create the decorated class using the metaclass created above.
decorated_class = decorating_meta(cls.__name__, (cls,), overrides)
else:
# Here, we just decorate `__init__` or `__new__` directly, and mutate the
# original class definition to use the decorated version. This is simpler
# and permits reliable subclassing of @gin.configurable decorated classes.
decorated_class = cls
construction_fn = _find_class_construction_fn(decorated_class)
decorated_fn = decorator(_ensure_wrappability(construction_fn))
if construction_fn.__name__ == '__new__':
decorated_fn = staticmethod(decorated_fn)
setattr(decorated_class, construction_fn.__name__, decorated_fn)
return decorated_class
class Configurable(typing.NamedTuple):
wrapper: Callable[..., Any]
wrapped: Callable[..., Any]
name: str
module: str
import_source: Optional[Tuple[config_parser.ImportStatement, str]]
allowlist: Optional[Sequence[str]]
denylist: Optional[Sequence[str]]
selector: str
is_method: bool = False
def _raise_unknown_reference_error(ref, additional_msg=''):
err_str = "No configurable matching reference '@{}{}'.{}"
maybe_parens = '()' if ref.evaluate else ''
raise ValueError(err_str.format(ref.selector, maybe_parens, additional_msg))
def _raise_unknown_configurable_error(selector):
raise ValueError(f"No configurable matching '{selector}'.")
def _decorate_with_scope(configurable_, scope_components):
"""Decorates `configurable`, using the given `scope_components`.
Args:
configurable_: A `Configurable` instance, whose `wrapper` attribute should
be decorated.
scope_components: The list of scope components to use as a scope (e.g., as
returned by `current_scope`).
Returns:
A callable function or class, that applies the given scope to
`configurable_.wrapper`.
"""
def scope_decorator(fn_or_cls):
@functools.wraps(fn_or_cls)
def scoping_wrapper(*args, **kwargs):
with config_scope(scope_components):
return fn_or_cls(*args, **kwargs)
return scoping_wrapper
if scope_components:
return _decorate_fn_or_cls(
scope_decorator,
configurable_.wrapper,
configurable_.selector,
avoid_class_mutation=True,
decorate_methods=True)
else:
return configurable_.wrapper
class ConfigurableReference:
"""Represents a reference to a configurable function or class."""
def __init__(self, scoped_selector, evaluate):
self._scoped_selector = scoped_selector
self._evaluate = evaluate
self.initialize()
def initialize(self):
*self._scopes, self._selector = self._scoped_selector.split('/')
self._configurable = _parse_context().get_configurable(self._selector)
if not self._configurable:
_raise_unknown_reference_error(self)
self._scoped_configurable_fn = _decorate_with_scope(
self._configurable, scope_components=self._scopes)
@property
def configurable(self):
return self._configurable
@property
def scoped_configurable_fn(self):
return self._scoped_configurable_fn
@property
def scopes(self):
return self._scopes
@property
def selector(self):
return self._selector
@property
def scoped_selector(self):
return self._scoped_selector
@property
def config_key(self):
return ('/'.join(self._scopes), self._configurable.selector)
@property
def evaluate(self):
return self._evaluate
def __eq__(self, other):
if isinstance(other, self.__class__):
# pylint: disable=protected-access
return (self._configurable == other._configurable and
self._evaluate == other._evaluate)
# pylint: enable=protected-access
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
# Check if this reference is a macro or constant, i.e. @.../macro() or
# @.../constant(). Only macros and constants correspond to the %... syntax.
configurable_fn = self._configurable.wrapped
if configurable_fn in (macro, _retrieve_constant) and self._evaluate:
return '%' + '/'.join(self._scopes)
maybe_parens = '()' if self._evaluate else ''
import_manager = _parse_context().import_manager
if import_manager is not None and import_manager.dynamic_registration:
selector = import_manager.minimal_selector(self._configurable)
else:
selector = self.selector
scoped_selector = '/'.join([*self.scopes, selector])
return '@{}{}'.format(scoped_selector, maybe_parens)
def __deepcopy__(self, memo):
"""Dishonestly implements the __deepcopy__ special method.
When called, this returns either the `ConfigurableReference` instance itself
(when `self._evaluate` is `False`) or the result of calling the underlying
configurable. Configurable references may be deeply nested inside other
Python data structures, and by providing this implementation,
`copy.deepcopy` can be used on the containing Python structure to return a
copy replacing any `ConfigurableReference` marked for evaluation with its
corresponding configurable's output.
Args:
memo: The memoization dict (unused).
Returns:
When `self._evaluate` is `False`, returns the underlying configurable
(maybe wrapped to be called in the proper scope). When `self._evaluate` is
`True`, returns the output of calling the underlying configurable.
"""
if self._evaluate:
return self._scoped_configurable_fn()
return self._scoped_configurable_fn
class _UnknownConfigurableReference:
"""Represents a reference to an unknown configurable.
This class acts as a substitute for `ConfigurableReference` when the selector
doesn't match any known configurable.
"""
def __init__(self, selector, evaluate):
self._selector = selector.split('/')[-1]
self._evaluate = evaluate
@property
def selector(self):
return self._selector
@property
def evaluate(self):
return self._evaluate
def __deepcopy__(self, memo):
"""Dishonestly implements the __deepcopy__ special method.
See `ConfigurableReference` above. If this method is called, it means there
was an attempt to use this unknown configurable reference, so we throw an
error here.
Args:
memo: The memoization dict (unused).
Raises:
ValueError: To report that there is no matching configurable.
"""
addl_msg = '\n\n To catch this earlier, ensure gin.finalize() is called.'
_raise_unknown_reference_error(self, addl_msg)
def _validate_skip_unknown(skip_unknown):
if not isinstance(skip_unknown, (bool, list, tuple, set)):
err_str = 'Invalid value for `skip_unknown`: {}'
raise ValueError(err_str.format(skip_unknown))
def _should_skip(selector, skip_unknown):
"""Checks whether `selector` should be skipped (if unknown)."""
_validate_skip_unknown(skip_unknown)
if _REGISTRY.matching_selectors(selector):
return False # Never skip known configurables.
if isinstance(skip_unknown, (list, tuple, set)):
return selector in skip_unknown
return skip_unknown # Must be a bool by validation check.
class ParserDelegate(config_parser.ParserDelegate):
"""Delegate to handle creation of configurable references and macros."""
def __init__(self, skip_unknown=False):
self._skip_unknown = skip_unknown
def configurable_reference(self, scoped_selector, evaluate):
unscoped_selector = scoped_selector.rsplit('/', 1)[-1]
if _should_skip(unscoped_selector, self._skip_unknown):
return _UnknownConfigurableReference(scoped_selector, evaluate)
return ConfigurableReference(scoped_selector, evaluate)
def macro(self, name):
matching_selectors = _CONSTANTS.matching_selectors(name)
if matching_selectors:
if len(matching_selectors) == 1:
name = matching_selectors[0]
return ConfigurableReference(name + '/gin.constant', True)
err_str = "Ambiguous constant selector '{}', matches {}."
raise ValueError(err_str.format(name, matching_selectors))
return ConfigurableReference(name + '/gin.macro', True)
class ParsedBindingKey(typing.NamedTuple):
"""Represents a parsed and validated binding key.
A "binding key" identifies a specific parameter (`arg_name`), of a specific
configurable (`complete_selector`), in a specific scope (`scope`), to which a
value may be bound in the global configuration. The `given_selector` field
retains information about how the original configurable selector was
specified, which can be helpful for error messages (but is ignored for the
purposes of equality and hashing).
"""
scope: str
given_selector: str
complete_selector: str
arg_name: str
@classmethod
def parse(cls, binding_key):
"""Parses and validates the given binding key.
This function will parse `binding_key` (if necessary), and ensure that the
specified parameter can be bound for the given configurable selector (i.e.,
that the parameter isn't denylisted or not allowlisted if an allowlist was
provided).
Args:
binding_key: A spec identifying a parameter of a configurable (maybe in
some scope). This should either be a string of the form
'maybe/some/scope/maybe.modules.configurable_name.parameter_name'; or a
list or tuple of `(scope, selector, arg_name)`; or another instance of
`ParsedBindingKey`.
Returns:
A new instance of `ParsedBindingKey`.
Raises:
ValueError: If no function can be found matching the configurable name
specified by `binding_key`, or if the specified parameter name is
denylisted or not in the function's allowlist (if present).
"""
if isinstance(binding_key, ParsedBindingKey):
return cls(*binding_key)
if isinstance(binding_key, (list, tuple)):
scope, selector, arg_name = binding_key
elif isinstance(binding_key, str):
scope, selector, arg_name = config_parser.parse_binding_key(binding_key)
else:
err_str = 'Invalid type for binding_key: {}.'
raise ValueError(err_str.format(type(binding_key)))
configurable_ = _parse_context().get_configurable(selector)
if not configurable_:
_raise_unknown_configurable_error(selector)
if configurable_.is_method and '.' not in selector:
class_name = configurable_.selector.split('.')[-2]
err_str = "Method '{}' referenced without class name '{}'."
raise ValueError(err_str.format(selector, class_name))
if not _might_have_parameter(configurable_.wrapper, arg_name):
err_str = "Configurable '{}' doesn't have a parameter named '{}'."
raise ValueError(err_str.format(selector, arg_name))
if configurable_.allowlist and arg_name not in configurable_.allowlist:
err_str = "Configurable '{}' doesn't include kwarg '{}' in its allowlist."
raise ValueError(err_str.format(selector, arg_name))
if configurable_.denylist and arg_name in configurable_.denylist:
err_str = "Configurable '{}' has denylisted kwarg '{}'."
raise ValueError(err_str.format(selector, arg_name))
return cls(
scope=scope,
given_selector=selector,
complete_selector=configurable_.selector,
arg_name=arg_name)
@property
def config_key(self):
return self.scope, self.complete_selector
@property
def scope_selector_arg(self):
return self.scope, self.complete_selector, self.arg_name
def __equal__(self, other):
# Equality ignores the `given_selector` field, since two binding keys should
# be equal whenever they identify the same parameter.
return self.scope_selector_arg == other.scope_selector_arg
def __hash__(self):
return hash(self.scope_selector_arg)
def _format_value(value):
"""Returns `value` in a format parseable by `parse_value`, or `None`.
Simply put, This function ensures that when it returns a string value, the
following will hold:
parse_value(_format_value(value)) == value
Args:
value: The value to format.
Returns:
A string representation of `value` when `value` is literally representable,
or `None`.
"""
literal = repr(value)
try:
if parse_value(literal) == value:
return literal
except SyntaxError:
pass
return None
def _is_literally_representable(value):
"""Returns `True` if `value` can be (parseably) represented as a string.
Args:
value: The value to check.
Returns:
`True` when `value` can be represented as a string parseable by
`parse_literal`, `False` otherwise.
"""
return _format_value(value) is not None
def clear_config(clear_constants=False):
"""Clears the global configuration.
This clears any parameter values set by `bind_parameter` or `parse_config`, as
well as the set of dynamically imported modules. It does not remove any
configurable functions or classes from the registry of configurables.
Args:
clear_constants: Whether to clear constants created by `constant`. Defaults
to False.
"""
_set_config_is_locked(False)
_CONFIG.clear()
_CONFIG_PROVENANCE.clear()
_SINGLETONS.clear()
if clear_constants:
_CONSTANTS.clear()
_CONSTANTS['gin.REQUIRED'] = REQUIRED
else:
saved_constants = _CONSTANTS.copy()
_CONSTANTS.clear() # Clear then redefine constants (re-adding bindings).
for name, value in saved_constants.items():
constant(name, value)
_IMPORTS.clear()
with _OPERATIVE_CONFIG_LOCK:
_OPERATIVE_CONFIG.clear()
def bind_parameter(binding_key,
value,
location: Optional[config_parser.Location] = None):
"""Binds the parameter value specified by `binding_key` to `value`.
The `binding_key` argument should either be a string of the form
`maybe/scope/optional.module.names.configurable_name.parameter_name`, or a
list or tuple of `(scope, selector, parameter_name)`, where `selector`
corresponds to `optional.module.names.configurable_name`. Once this function
has been called, subsequent calls (in the specified scope) to the specified
configurable function will have `value` supplied to their `parameter_name`
parameter.
Example:
@configurable('fully_connected_network')
def network_fn(num_layers=5, units_per_layer=1024):
...
def main(_):
config.bind_parameter('fully_connected_network.num_layers', 3)
network_fn() # Called with num_layers == 3, not the default of 5.
Args:
binding_key: The parameter whose value should be set. This can either be a
string, or a tuple of the form `(scope, selector, parameter)`.
value: The desired value.
location: Location at which the parameter value was set.
Raises:
RuntimeError: If the config is locked.
ValueError: If no function can be found matching the configurable name
specified by `binding_key`, or if the specified parameter name is
denylisted or not in the function's allowlist (if present).
"""
if config_is_locked():
raise RuntimeError('Attempted to modify locked Gin config.')
pbk = ParsedBindingKey.parse(binding_key)
fn_dict = _CONFIG.setdefault(pbk.config_key, {})
fn_dict[pbk.arg_name] = value
# We need to update the provenance even if no location information was
# provided, to avoid keeping stale information:
loc_dict: Dict[str, Optional[config_parser.Location]] = (
_CONFIG_PROVENANCE.setdefault(pbk.config_key, {}))
loc_dict[pbk.arg_name] = location
def query_parameter(binding_key):
"""Returns the currently bound value to the specified `binding_key`.
The `binding_key` argument should look like
'maybe/some/scope/maybe.modules.configurable_name.parameter_name'. Note that
this will not include default parameters.
Args:
binding_key: The parameter whose value should be queried.
Returns:
The value bound to the configurable/parameter combination given in
`binding_key`.
Raises:
ValueError: If no function can be found matching the configurable name
specified by `binding_key`, or if the specified parameter name is
denylisted or not in the function's allowlist (if present) or if there is
no value bound for the queried parameter or configurable.
"""
if config_parser.MODULE_RE.match(binding_key):
matching_selectors = _CONSTANTS.matching_selectors(binding_key)
if len(matching_selectors) == 1:
return _CONSTANTS[matching_selectors[0]]
elif len(matching_selectors) > 1:
err_str = "Ambiguous constant selector '{}', matches {}."
raise ValueError(err_str.format(binding_key, matching_selectors))
pbk = ParsedBindingKey.parse(binding_key)
if pbk.config_key not in _CONFIG:
err_str = "Configurable '{}' has no bound parameters."
raise ValueError(err_str.format(pbk.given_selector))
if pbk.arg_name not in _CONFIG[pbk.config_key]:
err_str = "Configurable '{}' has no value bound for parameter '{}'."
raise ValueError(err_str.format(pbk.given_selector, pbk.arg_name))
return _CONFIG[pbk.config_key][pbk.arg_name]
def _might_have_parameter(fn_or_cls, arg_name):
"""Returns True if `arg_name` might be a valid parameter for `fn_or_cls`.
Specifically, this means that `fn_or_cls` either has a parameter named
`arg_name`, or has a `**kwargs` parameter.
Args:
fn_or_cls: The function or class to check.
arg_name: The name fo the parameter.
Returns:
Whether `arg_name` might be a valid argument of `fn`.
"""
if inspect.isclass(fn_or_cls): # pytype: disable=wrong-arg-types
fn = _find_class_construction_fn(fn_or_cls)
else:
fn = fn_or_cls
while hasattr(fn, '__wrapped__'):
fn = fn.__wrapped__
arg_spec = _get_cached_arg_spec(fn)
if arg_spec.varkw:
return True
return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs
def _validate_parameters(fn_or_cls, arg_name_list, err_prefix):
for arg_name in arg_name_list or []:
if not _might_have_parameter(fn_or_cls, arg_name):
err_str = "Argument '{}' in {} not a parameter of '{}'."
raise ValueError(err_str.format(arg_name, err_prefix, fn_or_cls.__name__))
def _get_cached_arg_spec(fn):
"""Gets cached argspec for `fn`."""
arg_spec = _ARG_SPEC_CACHE.get(fn)
if arg_spec is None:
try:
arg_spec = inspect.getfullargspec(fn)
except TypeError:
# `fn` might be a callable object.
arg_spec = inspect.getfullargspec(fn.__call__)
_ARG_SPEC_CACHE[fn] = arg_spec
return arg_spec
def _get_supplied_positional_parameter_names(fn, args):
"""Returns the names of the supplied arguments to the given function."""
arg_spec = _get_cached_arg_spec(fn)
# May be shorter than len(args) if args contains vararg (*args) arguments.
return arg_spec.args[:len(args)]
def _get_all_positional_parameter_names(fn):
"""Returns the names of all positional arguments to the given function."""
arg_spec = _get_cached_arg_spec(fn)
args = arg_spec.args
if arg_spec.defaults:
args = args[:-len(arg_spec.defaults)]
return args
def _get_kwarg_defaults(fn):
"""Returns a dict mapping kwargs to default values for the given function."""
arg_spec = _get_cached_arg_spec(fn)
if arg_spec.defaults:
default_kwarg_names = arg_spec.args[-len(arg_spec.defaults):]
arg_vals = dict(zip(default_kwarg_names, arg_spec.defaults))
else:
arg_vals = {}
if arg_spec.kwonlydefaults:
arg_vals.update(arg_spec.kwonlydefaults)
return arg_vals
def _get_validated_required_kwargs(fn, fn_descriptor, allowlist, denylist):
"""Gets required argument names, and validates against allow/denylist."""
kwarg_defaults = _get_kwarg_defaults(fn)
required_kwargs = []
for kwarg, default in kwarg_defaults.items():
if default is REQUIRED:
if denylist and kwarg in denylist:
err_str = "Argument '{}' of {} marked REQUIRED but denylisted."
raise ValueError(err_str.format(kwarg, fn_descriptor))
if allowlist and kwarg not in allowlist:
err_str = "Argument '{}' of {} marked REQUIRED but not allowlisted."
raise ValueError(err_str.format(kwarg, fn_descriptor))
required_kwargs.append(kwarg)
return required_kwargs
def _get_default_configurable_parameter_values(fn, allowlist, denylist):
"""Retrieve all default values for configurable parameters of a function.
Any parameters included in the supplied denylist, or not included in the
supplied allowlist, are excluded.
Args:
fn: The function whose parameter values should be retrieved.
allowlist: The allowlist (or `None`) associated with the function.
denylist: The denylist (or `None`) associated with the function.
Returns:
A dictionary mapping configurable parameter names to their default values.
"""
arg_vals = _get_kwarg_defaults(fn)
# Now, eliminate keywords that are denylisted, or aren't allowlisted (if
# there's an allowlist), or aren't representable as a literal value.
for k in list(arg_vals):
allowlist_fail = allowlist and k not in allowlist
denylist_fail = denylist and k in denylist
representable = _is_literally_representable(arg_vals[k])
if allowlist_fail or denylist_fail or not representable:
del arg_vals[k]
return arg_vals
def _order_by_signature(fn, arg_names):
"""Orders given `arg_names` based on their order in the signature of `fn`."""
arg_spec = _get_cached_arg_spec(fn)
all_args = list(arg_spec.args)
if arg_spec.kwonlyargs:
all_args.extend(arg_spec.kwonlyargs)
ordered = [arg for arg in all_args if arg in arg_names]
# Handle any leftovers corresponding to varkwargs in the order we got them.
ordered.extend([arg for arg in arg_names if arg not in ordered])
return ordered
def current_scope():
return _SCOPE_MANAGER.current_scope
def current_scope_str():
return '/'.join(current_scope())
@contextlib.contextmanager
def config_scope(name_or_scope):
"""Opens a new configuration scope.
Provides a context manager that opens a new explicit configuration
scope. Explicit configuration scopes restrict parameter bindings to only
certain sections of code that run within the scope. Scopes can be nested to
arbitrary depth; any configurable functions called within a scope inherit
parameters defined by higher level scopes.
For example, suppose a function named `preprocess_images` is called in two
places in a codebase: Once when loading data for a training task, and once
when loading data for an evaluation task:
def load_training_data():
...
with gin.config_scope('train'):
images = preprocess_images(images)
...
def load_eval_data():
...
with gin.config_scope('eval'):
images = preprocess_images(images)
...
By using a `config_scope` to wrap each invocation of `preprocess_images` as
above, it is possible to use Gin to supply specific parameters to each. Here
is a possible configuration for the above example:
preprocess_images.crop_size = [64, 64]
preprocess_images.normalize_image = True
train/preprocess_images.crop_location = 'random'
train/preprocess_images.random_flip_lr = True
eval/preprocess_images.crop_location = 'center'
The `crop_size` and `normalize_image` parameters above will be shared by both
the `train` and `eval` invocations; only `train` will receive
`random_flip_lr`, and the two invocations receive different values for
`crop_location`.
Passing `None` or `''` to `config_scope` will temporarily clear all currently
active scopes (within the `with` block; they will be restored afterwards).
Args:
name_or_scope: A name for the config scope, or an existing scope (e.g.,
captured from `with gin.config_scope(...) as scope`), or `None` to clear
currently active scopes.
Raises:
ValueError: If `name_or_scope` is not a list, string, or None.
Yields:
The resulting config scope (a list of all active scope names, ordered from
outermost to innermost).
"""
try:
valid_value = True
if isinstance(name_or_scope, list):
new_scope = name_or_scope
elif name_or_scope and isinstance(name_or_scope, str):
new_scope = current_scope() # Returns a copy.
new_scope.extend(name_or_scope.split('/'))
else:
valid_value = name_or_scope in (None, '')
new_scope = []
# Append new_scope first. It will be popped in the finally block if an
# exception is raised below.
_SCOPE_MANAGER.enter_scope(new_scope)
scopes_are_valid = map(config_parser.MODULE_RE.match, new_scope)
if not valid_value or not all(scopes_are_valid):
err_str = 'Invalid value for `name_or_scope`: {}.'
raise ValueError(err_str.format(name_or_scope))
yield new_scope
finally:
_SCOPE_MANAGER.exit_scope()
_FnOrClsOrSelector = Union[Callable[..., Any], Type[Any], str]
def _as_scope_and_selector(
fn_or_cls_or_selector: _FnOrClsOrSelector) -> Tuple[Sequence[str], str]:
"""Finds the complete selector corresponding to `fn_or_cls`.
Args:
fn_or_cls_or_selector: Configurable function, class or selector `str`.
Returns:
A tuple of `(scope_components, selector)`, where `scope_components` is a
list of the scope elements, either as present in the selector string passed
as input, or obtained from the current scope.
"""
scope = []
if isinstance(fn_or_cls_or_selector, str):
# Resolve partial selector -> full selector
*scope, selector = fn_or_cls_or_selector.split('/')
selector = _REGISTRY.get_match(selector)
if selector:
selector = selector.selector
else:
configurable_ = _inverse_lookup(fn_or_cls_or_selector)
selector = configurable_.selector if configurable_ else None
if not scope:
scope = current_scope()
if selector is None:
raise ValueError(
f'Could not find {fn_or_cls_or_selector} in the Gin registry.')
return scope, selector
def _get_bindings(
selector: str,
scope_components=None,
inherit_scopes: bool = True,
) -> Dict[str, Any]:
"""Returns the bindings for the current full selector, with optional scope."""
scope_components = scope_components or current_scope()
new_kwargs = {}
if not inherit_scopes: # In strict scope mode, only match the exact scope
partial_scopes = [scope_components]
else:
partial_scopes = [
scope_components[:i] for i in range(len(scope_components) + 1)]
for partial_scope in partial_scopes:
partial_scope_str = '/'.join(partial_scope)
new_kwargs.update(_CONFIG.get((partial_scope_str, selector), {}))
return new_kwargs
def get_bindings(
fn_or_cls_or_selector: _FnOrClsOrSelector,
resolve_references: bool = True,
inherit_scopes: bool = True,
) -> Dict[str, Any]:
"""Returns the bindings associated with the given configurable.
Any configurable references in the bindings will be resolved during the call
(and evaluated references will be evaluated).
Example:
```python
config.parse_config('MyParams.kwarg0 = 123')
gin.get_bindings('MyParams') == {'kwarg0': 123}
```
Note: The scope in which `get_bindings` is called will be used.
Args:
fn_or_cls_or_selector: Configurable function, class or selector `str`.
resolve_references: Whether or not references (and macros) should be
resolved. If `False`, the output may contain instances of Gin's
`ConfigurableReference` class.
inherit_scopes: If False, only match the exact scope (so
`get_bindings('scope1/fn')` do not match `scope1/scope2/fn`, nor `fn` but
only the exact 'scope1/fn').
Returns:
The bindings kwargs injected by Gin.
"""
scope_components, selector = _as_scope_and_selector(fn_or_cls_or_selector)
bindings_kwargs = _get_bindings(
selector,
scope_components=scope_components,
inherit_scopes=inherit_scopes,
)
if resolve_references:
return copy.deepcopy(bindings_kwargs)
else:
return bindings_kwargs
def get_configurable(
fn_or_cls_or_selector: _FnOrClsOrSelector) -> Callable[..., Any]:
"""Returns the configurable version of `fn_or_cls_or_selector`.
If a function or class has been registered with Gin, Gin's configurable
version (which will have all relevant Gin bindings applied) can be obtained by
calling this function with any of the following:
- The original function or class (the "non-configurable" version);
- The configurable function or class (so this function is effectively a
no-op for functions annotated with `@gin.configurable`);
- A selector string that specifies the function or class.
If passing a selector string, a scope may be supplied, in which case the
returned configurable will have the scope applied. If a function or class is
passed, or no scope is supplied as part of the selector string, the current
active scope will be used instead.
Args:
fn_or_cls_or_selector: Configurable function, class or selector `str`.
Returns:
The configurable function or class corresponding to `fn_or_cls_or_selector`.
"""
scope_components, selector = _as_scope_and_selector(fn_or_cls_or_selector)
configurable_ = _REGISTRY[selector]
return _decorate_with_scope(configurable_, scope_components=scope_components)
def _make_gin_wrapper(fn, fn_or_cls, name, selector, allowlist, denylist):
"""Creates the final Gin wrapper for the given function.
Args:
fn: The function that will be wrapped.
fn_or_cls: The original function or class being made configurable. This will
differ from `fn` when making a class configurable, in which case `fn` will
be the constructor/new function (or when proxying a class, the type's
`__call__` method), while `fn_or_cls` will be the class.
name: The name given to the configurable.
selector: The full selector of the configurable (name including any module
components).
allowlist: An allowlist of configurable parameters.
denylist: A denylist of non-configurable parameters.
Returns:
The Gin wrapper around `fn`.
"""
# At this point we have access to the final function to be wrapped, so we
# can cache a few things here.
fn_descriptor = "'{}' ('{}')".format(name, fn_or_cls)
signature_fn = fn_or_cls
if inspect.isclass(fn_or_cls):
signature_fn = _find_class_construction_fn(fn_or_cls)
signature_required_kwargs = _get_validated_required_kwargs(
signature_fn, fn_descriptor, allowlist, denylist)
initial_configurable_defaults = _get_default_configurable_parameter_values(
signature_fn, allowlist, denylist)
@functools.wraps(fn)
def gin_wrapper(*args, **kwargs):
"""Supplies fn with parameter values from the configuration."""
current_selector = _RENAMED_SELECTORS.get(selector, selector)
new_kwargs = _get_bindings(current_selector)
gin_bound_args = list(new_kwargs.keys())
scope_str = '/'.join(current_scope())
arg_names = _get_supplied_positional_parameter_names(signature_fn, args)
for arg in args[len(arg_names):]:
if arg is REQUIRED:
raise ValueError(
'gin.REQUIRED is not allowed for unnamed (vararg) parameters. If '
'the function being called is wrapped by a non-Gin decorator, '
'try explicitly providing argument names for positional '
'parameters.')
required_arg_names = []
required_arg_indexes = []
for i, arg in enumerate(args[:len(arg_names)]):
if arg is REQUIRED:
required_arg_names.append(arg_names[i])
required_arg_indexes.append(i)
caller_required_kwargs = []
for kwarg, value in kwargs.items():
if value is REQUIRED:
caller_required_kwargs.append(kwarg)
# If the caller passed arguments as positional arguments that correspond to
# a keyword arg in new_kwargs, remove the keyword argument from new_kwargs
# to let the caller win and avoid throwing an error. Unless it is an arg
# marked as REQUIRED.
for arg_name in arg_names:
if arg_name not in required_arg_names:
new_kwargs.pop(arg_name, None)
# Get default values for configurable parameters.
operative_parameter_values = initial_configurable_defaults.copy()
# Update with the values supplied via configuration.
operative_parameter_values.update(new_kwargs)
# Remove any values from the operative config that are overridden by the
# caller. These can't be configured, so they won't be logged. We skip values
# that are marked as REQUIRED.
for k in arg_names:
if k not in required_arg_names:
operative_parameter_values.pop(k, None)
for k in kwargs:
if k not in caller_required_kwargs:
operative_parameter_values.pop(k, None)
# An update is performed in case another caller of this same configurable
# object has supplied a different set of arguments. By doing an update, a
# Gin-supplied or default value will be present if it was used (not
# overridden by the caller) at least once.
with _OPERATIVE_CONFIG_LOCK:
op_cfg = _OPERATIVE_CONFIG.setdefault((scope_str, current_selector), {})
op_cfg.update(operative_parameter_values)
# We call deepcopy for two reasons: First, to prevent the called function
# from modifying any of the values in `_CONFIG` through references passed in
# via `new_kwargs`; Second, to facilitate evaluation of any
# `ConfigurableReference` instances buried somewhere inside `new_kwargs`.
# See the docstring on `ConfigurableReference.__deepcopy__` above for more
# details on the dark magic happening here.
new_kwargs = copy.deepcopy(new_kwargs)
# Validate args marked as REQUIRED have been bound in the Gin config.
missing_required_params = []
new_args = list(args)
for i, arg_name in zip(required_arg_indexes, required_arg_names):
if arg_name not in new_kwargs:
missing_required_params.append(arg_name)
else:
new_args[i] = new_kwargs.pop(arg_name)
# Validate kwargs marked as REQUIRED have been bound in the Gin config.
for required_kwarg in signature_required_kwargs:
if (required_kwarg not in arg_names and # not a positional arg
required_kwarg not in kwargs and # or a keyword arg
required_kwarg not in new_kwargs): # or bound in config
missing_required_params.append(required_kwarg)
for required_kwarg in caller_required_kwargs:
if required_kwarg not in new_kwargs:
missing_required_params.append(required_kwarg)
else:
# Remove from kwargs and let the new_kwargs value be used.
kwargs.pop(required_kwarg)
if missing_required_params:
missing_required_params = (
_order_by_signature(signature_fn, missing_required_params))
err_str = 'Required bindings for `{}` not provided in config: {}'
minimal_selector = _REGISTRY.minimal_selector(current_selector)
err_str = err_str.format(minimal_selector, missing_required_params)
raise RuntimeError(err_str)
# Now, update with the caller-supplied `kwargs`, allowing the caller to have
# the final say on keyword argument values.
new_kwargs.update(kwargs)
try:
return fn(*new_args, **new_kwargs)
except Exception as e: # pylint: disable=broad-except
err_str = ''
if isinstance(e, TypeError):
all_arg_names = _get_all_positional_parameter_names(signature_fn)
if len(new_args) < len(all_arg_names):
unbound_positional_args = list(
set(all_arg_names[len(new_args):]) - set(new_kwargs))
if unbound_positional_args:
caller_supplied_args = list(
set(arg_names + list(kwargs)) -
set(required_arg_names + list(caller_required_kwargs)))
fmt = ('\n No values supplied by Gin or caller for arguments: {}'
'\n Gin had values bound for: {gin_bound_args}'
'\n Caller supplied values for: {caller_supplied_args}')
canonicalize = lambda x: list(map(str, sorted(x)))
err_str += fmt.format(
canonicalize(unbound_positional_args),
gin_bound_args=canonicalize(gin_bound_args),
caller_supplied_args=canonicalize(caller_supplied_args))
err_str += "\n In call to configurable '{}' ({}){}"
scope_info = " in scope '{}'".format(scope_str) if scope_str else ''
err_str = err_str.format(name, fn_or_cls, scope_info)
utils.augment_exception_message_and_reraise(e, err_str)
return gin_wrapper
def _make_configurable(fn_or_cls,
name=None,
module=None,
allowlist=None,
denylist=None,
avoid_class_mutation=False,
import_source=None):
"""Wraps `fn_or_cls` to make it configurable.
Infers the configurable name from `fn_or_cls.__name__` if necessary, and
updates global state to keep track of configurable name <-> function
mappings, as well as allowlisted and denylisted parameters.
Args:
fn_or_cls: The function or class to decorate.
name: A name for the configurable. If `None`, the name will be inferred from
from `fn_or_cls`. The `name` may also include module components to be used
for disambiguation (these will be appended to any components explicitly
specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. If `None`, `fn_or_cls.__module__` will be used (if no module
is specified as part of `name`).
allowlist: An allowlisted set of parameter names to supply values for.
denylist: A denylisted set of parameter names not to supply values for.
avoid_class_mutation: If `fn_or_cls` is a class and `avoid_class_mutation`
is `True`, decorate by subclassing `fn_or_cls`'s metaclass and overriding
its `__call__` method. If `False`, replace the existing `__init__` or
`__new__` with a decorated version.
import_source: When using dynamic registration, this provides the import
source of the registered configurable and consists of a tuple of
`(source_module, attribute_path)` describing the module fn_or_cls is
imported from and its attribute path within that module.
Returns:
A wrapped version of `fn_or_cls` that will take parameter values from the
global configuration.
Raises:
RuntimeError: If the config is locked.
ValueError: If a configurable with `name` (or the name of `fn_or_cls`)
already exists, or if both an allowlist and denylist are specified.
"""
if config_is_locked():
err_str = 'Attempted to add a new configurable after the config was locked.'
raise RuntimeError(err_str)
name = fn_or_cls.__name__ if name is None else name
if config_parser.IDENTIFIER_RE.match(name):
default_module = getattr(fn_or_cls, '__module__', None)
module = default_module if module is None else module
elif not config_parser.MODULE_RE.match(name):
raise ValueError("Configurable name '{}' is invalid.".format(name))
if module is not None and not config_parser.MODULE_RE.match(module):
raise ValueError("Module '{}' is invalid.".format(module))
selector = module + '.' + name if module else name
if (not _INTERACTIVE_MODE and selector in _REGISTRY and
_REGISTRY[selector].wrapped is not fn_or_cls):
err_str = ("A different configurable matching '{}' already exists.\n\n"
'To allow re-registration of configurables in an interactive '
'environment, use:\n\n'
' gin.enter_interactive_mode()')
raise ValueError(err_str.format(selector))
if allowlist and denylist:
err_str = 'An allowlist or a denylist can be specified, but not both.'
raise ValueError(err_str)
if allowlist and not isinstance(allowlist, (list, tuple)):
raise TypeError('allowlist should be a list or tuple.')
if denylist and not isinstance(denylist, (list, tuple)):
raise TypeError('denylist should be a list or tuple.')
_validate_parameters(fn_or_cls, allowlist, 'allowlist')
_validate_parameters(fn_or_cls, denylist, 'denylist')
def decorator(fn):
"""Wraps `fn` so that it obtains parameters from the configuration."""
return _make_gin_wrapper(fn, fn_or_cls, name, selector, allowlist,
denylist)
decorated_fn_or_cls = _decorate_fn_or_cls(
decorator, fn_or_cls, selector, avoid_class_mutation=avoid_class_mutation)
configurable_info = Configurable(
wrapper=decorated_fn_or_cls,
wrapped=fn_or_cls,
name=name,
module=module,
import_source=import_source,
allowlist=allowlist,
denylist=denylist,
selector=selector)
_REGISTRY[selector] = configurable_info
_INVERSE_REGISTRY[fn_or_cls] = configurable_info
return decorated_fn_or_cls
def configurable(name_or_fn=None,
module=None,
allowlist=None,
denylist=None):
"""Decorator to make a function or class configurable.
This decorator registers the decorated function/class as configurable, which
allows its parameters to be supplied from the global configuration (i.e., set
through `bind_parameter` or `parse_config`). The decorated function is
associated with a name in the global configuration, which by default is simply
the name of the function or class, but can be specified explicitly to avoid
naming collisions or improve clarity.
If some parameters should not be configurable, they can be specified in
`denylist`. If only a restricted set of parameters should be configurable,
they can be specified in `allowlist`.
The decorator can be used without any parameters as follows:
@config.configurable
def some_configurable_function(param1, param2='a default value'):
...
In this case, the function is associated with the name
`'some_configurable_function'` in the global configuration, and both `param1`
and `param2` are configurable.
The decorator can be supplied with parameters to specify the configurable name
or supply an allowlist/denylist:
@config.configurable('explicit_configurable_name', allowlist='param2')
def some_configurable_function(param1, param2='a default value'):
...
In this case, the configurable is associated with the name
`'explicit_configurable_name'` in the global configuration, and only `param2`
is configurable.
Classes can be decorated as well, in which case parameters of their
constructors are made configurable:
@config.configurable
class SomeClass:
def __init__(self, param1, param2='a default value'):
...
In this case, the name of the configurable is `'SomeClass'`, and both `param1`
and `param2` are configurable.
Args:
name_or_fn: A name for this configurable, or a function to decorate (in
which case the name will be taken from that function). If not set,
defaults to the name of the function/class that is being made
configurable. If a name is provided, it may also include module components
to be used for disambiguation (these will be appended to any components
explicitly specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. By default, the module of the function or class being made
configurable will be used (if no module is specified as part of the name).
allowlist: An allowlisted set of kwargs that should be configurable. All
other kwargs will not be configurable. Only one of `allowlist` or
`denylist` should be specified.
denylist: A denylisted set of kwargs that should not be configurable. All
other kwargs will be configurable. Only one of `allowlist` or `denylist`
should be specified.
Returns:
When used with no parameters (or with a function/class supplied as the first
parameter), it returns the decorated function or class. When used with
parameters, it returns a function that can be applied to decorate the target
function or class.
"""
decoration_target = None
if callable(name_or_fn):
decoration_target = name_or_fn
name = None
else:
name = name_or_fn
def perform_decoration(fn_or_cls):
return _make_configurable(fn_or_cls, name, module, allowlist, denylist)
if decoration_target:
return perform_decoration(decoration_target)
return perform_decoration
def external_configurable(fn_or_cls,
name=None,
module=None,
allowlist=None,
denylist=None):
"""Allow referencing/configuring an external class or function.
This alerts Gin to the existence of the class or function `fn_or_cls` in the
event that it can't be easily annotated with `@configurable` (for instance, if
it is from another project). This allows `fn_or_cls` to be configured and
referenced (using the `@name` notation) via parameter binding strings.
Note that only calls to the return value of this function or resulting from
references to `fn_or_cls` made through binding strings (configurations) will
have their parameters injected by Gin---explicit calls to `fn_or_cls` directly
won't have any parameter bindings applied.
Args:
fn_or_cls: The external function or class that should be made configurable.
name: The configurable name to be associated with `fn_or_cls`. The name may
also include module components to be used for disambiguation (these will
be appended to any components explicitly specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. By default, `fn_or_cls.__module__` will be used (if no module
is specified as part of the name).
allowlist: An allowlist of parameter names to allow configuration for.
denylist: A denylist of parameter names to deny configuration for.
Returns:
A decorated version of `fn_or_cls` that permits parameter binding. For
functions, this is just a wrapped version of the function. For classes, this
is a carefully constructed subclass of `fn_or_cls` designed to behave nearly
identically (even under many type inspection operations) save for the
addition of parameter binding.
"""
return _make_configurable(
fn_or_cls,
name=name,
module=module,
allowlist=allowlist,
denylist=denylist,
avoid_class_mutation=True)
def register(name_or_fn=None,
module=None,
allowlist=None,
denylist=None):
"""Decorator to register a function or class configurable.
This decorator only registers the decorated function/class with Gin, so it can
be passed to other configurables in `bind_parameter` or `parse_config`.
This decorator doesn't change the decorated function/class, so any direct
calls from within Python code are not affected by the configuration.
If some parameters should not be configurable, they can be specified in
`denylist`. If only a restricted set of parameters should be configurable,
they can be specified in `allowlist`.
The decorator can be used without any parameters as follows:
@config.register
def some_configurable_function(param1, param2='a default value'):
...
In this case, the function is associated with the name
`'some_configurable_function'` in the configuration, and both `param1`
and `param2` are configurable.
The decorator can be supplied with parameters to specify the name used to
register or supply an allowlist/denylist:
@config.register('explicit_name', allowlist='param2')
def some_configurable_function(param1, param2='a default value'):
...
In this case, the function is registered with the name `'explicit_name'` in
the configuration registry, and only `param2` is configurable.
Classes can be decorated as well, in which case parameters of their
constructors are made configurable:
@config.register
class SomeClass:
def __init__(self, param1, param2='a default value'):
...
In this case, the name of the configurable is `'SomeClass'`, and both `param1`
and `param2` are configurable.
Args:
name_or_fn: A name for this configurable, or a function to decorate (in
which case the name will be taken from that function). If not set,
defaults to the name of the function/class that is being made
configurable. If a name is provided, it may also include module components
to be used for disambiguation (these will be appended to any components
explicitly specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. By default, the module of the function or class being made
configurable will be used (if no module is specified as part of the name).
allowlist: An allowlisted set of kwargs that should be configurable. All
other kwargs will not be configurable. Only one of `allowlist` or
`denylist` should be specified.
denylist: A denylisted set of kwargs that should not be configurable. All
other kwargs will be configurable. Only one of `allowlist` or `denylist`
should be specified.
Returns:
When used with no parameters as a decorator (or with a function/class
supplied as the first parameter), it returns the target function or class
unchanged. When used with parameters, it returns a function that can be
applied to register the target function or class with Gin (this function
also returns the target function or class unchanged).
"""
decoration_target = None
if callable(name_or_fn):
decoration_target = name_or_fn
name = None
else:
name = name_or_fn
def perform_decoration(fn_or_cls):
# Register it as configurable but return the original fn_or_cls.
_make_configurable(
fn_or_cls,
name=name,
module=module,
allowlist=allowlist,
denylist=denylist,
avoid_class_mutation=True)
return fn_or_cls
if decoration_target:
return perform_decoration(decoration_target)
return perform_decoration
def _make_unique(sequence, key=None):
key = key or (lambda x: x)
seen = set()
output = []
for x in sequence:
key_val = key(x)
if key_val not in seen:
seen.add(key_val)
output.append(x)
return output
def _uniquify_name(candidate_name: str, existing_names: Set[str]):
i = 2
unique_name = candidate_name
while unique_name in existing_names:
unique_name = candidate_name + str(i)
i += 1
return unique_name
class ImportManager:
"""Manages imports required when writing out a full config string.
This class does bookkeeping to ensure each import is only output once, and
that each import receives a unique name/alias to avoid collisions.
"""
def __init__(self, imports):
"""Initializes the `ImportManager` instance.
Args:
imports: An iterable of `ImportStatement` instances, providing existing
imports to manage. Every effort will be taken here to respect the
existing structure and format of the imports (e.g., any aliases
provided, and whether the imports use the `from` syntax). Note that if
dynamic registration is enabled, it should be included here as one of
the provided statements.
"""
self.dynamic_registration = any(
statement.module == '__gin__.dynamic_registration'
for statement in imports)
self.imports = []
self.module_selectors = {}
self.names = set()
# Prefer to order `from` style imports first.
for statement in sorted(imports, key=lambda s: (s.module, not s.is_from)):
self.add_import(statement)
@property
def sorted_imports(self):
return sorted(self.imports, key=lambda s: s.module)
def add_import(self, statement: config_parser.ImportStatement):
"""Adds a single import to this `ImportManager` instance.
The provided statement is deduped and possibly re-aliased to ensure it has a
unique name.
Args:
statement: The `ImportStatement` to add.
"""
if statement.module in self.module_selectors:
return
unique_name = _uniquify_name(statement.bound_name(), self.names)
if unique_name != statement.bound_name():
statement = statement._replace(alias=unique_name)
if statement.is_from or statement.alias:
selector = statement.bound_name()
else:
selector = statement.module
self.module_selectors[statement.module] = selector
self.names.add(statement.bound_name())
self.imports.append(statement)
def require_configurable(self, configurable_: Configurable):
"""Adds the import required for `configurable_`, if not already present.
Args:
configurable_: The specific `Configurable` whose corresponding module
should be imported.
"""
if not self.dynamic_registration:
return
if configurable_.wrapped == macro: # pylint: disable=comparison-with-callable
return
if configurable_.import_source:
self.add_import(configurable_.import_source[0])
elif hasattr(configurable_.wrapped, '__module__'):
module = configurable_.wrapped.__module__
import_statement = config_parser.ImportStatement(
module=module,
is_from='.' in module,
alias=None,
location=config_parser.Location(None, 0, None, ''))
self.add_import(import_statement)
else:
logging.warning(
'Configurable %r was not imported using dynamic registration and has '
'no __module__ attribute; dynamic registration will not be used in '
'the resulting config string. This is likely because the initial set '
'of parsed Gin files included a mix of files with and without '
'dynamic registration.', configurable_)
for statement in self.imports:
if statement.module == '__gin__.dynamic_registration':
self.imports.remove(statement)
break
self.dynamic_registration = False
def minimal_selector(self, configurable_: Configurable) -> str:
"""Returns a minimal selector corresponding to `configurable_`.
This method has different behavior depending on whether dynamic registration
has been enabled (see `__init__`). If dynamic registration is enabled, then
the minimal selector is a full '.'-seperated attribute chain beginning with
the name of an imported module. If dynamic registration is not enabled, then
the returned selector is the minimal string required to uniquely identify
`configurable_` (this includes the function/class name, and enough
components of its module name to make the resulting selector unique).
Args:
configurable_: The `Configurable` to return a minimal selector for.
Returns:
The minimal selector for `configurable_` as a string.
"""
if self.dynamic_registration:
if configurable_.import_source:
import_statement, name = configurable_.import_source
module = import_statement.module
else:
module = configurable_.wrapped.__module__
name = configurable_.wrapped.__qualname__
return f'{self.module_selectors[module]}.{name}'
else:
minimal_selector = _REGISTRY.minimal_selector(configurable_.selector)
if configurable_.is_method:
# Methods require `Class.method` as selector.
if '.' not in minimal_selector:
minimal_selector = '.'.join(configurable_.selector.split('.')[-2:])
return minimal_selector
def _config_str(
configuration_object: Mapping[Tuple[str, str], Mapping[str, Any]],
max_line_length: int = 80,
continuation_indent: int = 4,
show_provenance: bool = False,
) -> str:
"""Print the configuration specified in configuration object.
Args:
configuration_object: Either _OPERATIVE_CONFIG (operative config) or _CONFIG
(all config, bound and unbound).
max_line_length: A (soft) constraint on the maximum length of a line in the
formatted string. Large nested structures will be split across lines, but
e.g. long strings won't be split into a concatenation of shorter strings.
continuation_indent: The indentation for continued lines.
show_provenance: Include provenance of configuration value (as comment).
Returns:
A config string capturing all parameter values set by the object.
"""
def format_binding(key: str,
value: str,
provenance: Optional[config_parser.Location] = None):
"""Pretty print the given key/value pair."""
formatted_val = pprint.pformat(
value, width=(max_line_length - continuation_indent))
formatted_val_lines = formatted_val.split('\n')
if (len(formatted_val_lines) == 1 and
len(key + formatted_val) <= max_line_length):
output = '{} = {}'.format(key, formatted_val)
else:
indented_formatted_val = '\n'.join(
[' ' * continuation_indent + line for line in formatted_val_lines])
output = '{} = \\\n{}'.format(key, indented_formatted_val)
if show_provenance and provenance:
output = f'# Set in {_format_location(provenance)}:' + '\n' + output
return output
def sort_key(key_tuple):
"""Sort configurable selector/innermost scopes, ignoring case."""
scope, selector = key_tuple[0]
parts = selector.lower().split('.')[::-1] + scope.lower().split('/')[::-1]
if _REGISTRY[selector].is_method:
method_name = parts.pop(0)
parts[0] += f'.{method_name}' # parts[0] is the class name.
return parts
import_manager = ImportManager(_IMPORTS)
if import_manager.dynamic_registration:
for _, selector in configuration_object:
import_manager.require_configurable(_REGISTRY[selector])
for reference in iterate_references(configuration_object):
import_manager.require_configurable(reference.configurable)
# Build the output as an array of formatted Gin statements. Each statement may
# span multiple lines. Imports are first, followed by macros, and finally all
# other bindings sorted in alphabetical order by configurable name.
formatted_statements = [
statement.format() for statement in import_manager.sorted_imports
]
if formatted_statements:
formatted_statements.append('')
# For config strings that use dynamic registration, we need a parse scope open
# in order to properly resolve symbols. In particular, the
# _is_literally_representable function checks to see if something can be
# parsed in order to determine if it should be represented in the config str.
with _parse_scope(import_manager=import_manager):
macros = {}
for (scope, selector), config in configuration_object.items():
if _REGISTRY[selector].wrapped == macro: # pylint: disable=comparison-with-callable
macros[scope, selector] = config
if macros:
formatted_statements.append('# Macros:')
formatted_statements.append('# ' + '=' * (max_line_length - 2))
for (name, _), config in sorted(macros.items(), key=sort_key):
provenance: Optional[config_parser.Location] = _CONFIG_PROVENANCE.get(
(name, 'gin.macro'), {}).get('value', None)
binding = format_binding(name, config['value'], provenance)
formatted_statements.append(binding)
if macros:
formatted_statements.append('')
sorted_items: List[Tuple[Tuple[str, str], Mapping[str, Any]]] = sorted(
configuration_object.items(), key=sort_key)
for key, config in sorted_items:
(scope, selector) = key
configurable_ = _REGISTRY[selector]
if configurable_.wrapped in (macro, _retrieve_constant): # pylint: disable=comparison-with-callable
continue
minimal_selector = import_manager.minimal_selector(configurable_)
scoped_selector = (scope + '/' if scope else '') + minimal_selector
parameters = [
(k, v) for k, v in config.items() if _is_literally_representable(v)
]
formatted_statements.append(
'# Parameters for {}:'.format(scoped_selector))
formatted_statements.append('# ' + '=' * (max_line_length - 2))
for arg, val in sorted(parameters):
provenance: Optional[config_parser.Location] = _CONFIG_PROVENANCE.get(
key, {}).get(arg, None)
binding = format_binding('{}.{}'.format(scoped_selector, arg), val,
provenance)
formatted_statements.append(binding)
if not parameters:
formatted_statements.append('# None.')
formatted_statements.append('')
return '\n'.join(formatted_statements)
def operative_config_str(max_line_length=80,
continuation_indent=4,
show_provenance: bool = False):
"""Retrieve the "operative" configuration as a config string.
The operative configuration consists of all parameter values used by
configurable functions that are actually called during execution of the
current program. Parameters associated with configurable functions that are
not called (and so can have no effect on program execution) won't be included.
The goal of the function is to return a config that captures the full set of
relevant configurable "hyperparameters" used by a program. As such, the
returned configuration will include the default values of arguments from
configurable functions (as long as the arguments aren't denylisted or missing
from a supplied allowlist), as well as any parameter values overridden via
`bind_parameter` or through `parse_config`.
Any parameters that can't be represented as literals (capable of being parsed
by `parse_config`) are excluded. The resulting config string is sorted
lexicographically and grouped by configurable name.
Args:
max_line_length: A (soft) constraint on the maximum length of a line in the
formatted string. Large nested structures will be split across lines, but
e.g. long strings won't be split into a concatenation of shorter strings.
continuation_indent: The indentation for continued lines.
show_provenance: Include provenance of configuration value (as comment).
Returns:
A config string capturing all parameter values set in the current program.
"""
with _OPERATIVE_CONFIG_LOCK:
result = _config_str(_OPERATIVE_CONFIG, max_line_length,
continuation_indent, show_provenance)
return result
def config_str(max_line_length=80,
continuation_indent=4,
show_provenance: bool = False):
"""Retrieve the interpreted configuration as a config string.
This is not the _operative configuration_, in that it may include parameter
values which are unused by by the program.
Args:
max_line_length: A (soft) constraint on the maximum length of a line in the
formatted string. Large nested structures will be split across lines, but
e.g. long strings won't be split into a concatenation of shorter strings.
continuation_indent: The indentation for continued lines.
show_provenance: Include provenance of configuration value (as comment).
Returns:
A config string capturing all parameter values used by the current program.
"""
return _config_str(_CONFIG, max_line_length, continuation_indent,
show_provenance)
class ParsedConfigFileIncludesAndImports(typing.NamedTuple):
filename: str
imports: Sequence[str]
includes: Sequence['ParsedConfigFileIncludesAndImports']
def parse_config(bindings, skip_unknown=False):
"""Parse a file, string, or list of strings containing parameter bindings.
Parses parameter binding strings to set up the global configuration. Once
`parse_config` has been called, any calls to configurable functions will have
parameter values set according to the values specified by the parameter
bindings in `bindings`.
An individual parameter binding has the format
maybe/some/scopes/configurable_name.parameter_name = value
Multiple binding strings can be passed either in the form of a file-like
object supporting the `readline` method, a single string with each individual
parameter binding separated by a newline, or as a list of individual parameter
binding strings.
Any Python literal (lists, tuples, dicts, strings, etc.) is acceptable to the
right of the equals sign, and follows standard Python rules for line
continuation. Additionally, a value starting with '@' is interpreted as a
(possibly scoped) reference to another configurable function, in which case
this value is replaced by a reference to that function. If the value
furthermore ends in `()` (e.g., `@configurable_name()`), then the value
returned when calling the function is used (it will be called *just before*
the function consuming the output is called).
See the module documentation for a more detailed description of scoping
mechanisms and a complete example.
Reading from a file could be done as follows:
with open('/path/to/file.config') as bindings:
gin.parse_config(bindings)
Passing a newline separated string of parameter bindings might look like:
bindings = '''
my_class.param_one = 'asdf'
my_class_param_two = 9.7
'''
gin.parse_config(bindings)
Alternatively, one can declare a list of parameter bindings and pass it in:
bindings = [
'my_class.param_one = "asdf"',
'my_class.param_two = 9.7',
]
gin.parse_config(bindings)
Can skip unknown configurables. For example, if no module containing a
'training' configurable was imported, errors can be avoided by specifying
`skip_unknown=True`:
bindings = [
'my_class.param_one = "asdf"',
'my_class.param_two = 9.7',
'training.learning_rate = 0.1',
]
gin.parse_config(bindings, skip_unknown=True)
Args:
bindings: A file-like object supporting the readline method, a newline
separated string of parameter bindings, or a list of individual parameter
binding strings.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped (instead of causing an error). Configurable references
to unknown configurables will cause errors if they are present in a
binding that is not itself skipped due to an unknown configurable. This
can also be a list of configurable names: any unknown configurables that
do not match an item in the list will still cause errors. Note that
bindings for known configurables will always be parsed.
Returns:
includes: List of ParsedConfigFileIncludesAndImports describing the result
of loading nested include statements.
imports: List of names of imported modules.
"""
if isinstance(bindings, (list, tuple)):
bindings = '\n'.join(bindings)
_validate_skip_unknown(skip_unknown)
if isinstance(skip_unknown, (list, tuple)):
skip_unknown = set(skip_unknown)
parser = config_parser.ConfigParser(bindings, ParserDelegate(skip_unknown))
includes = []
imports = []
with _parse_scope() as parse_context:
for statement in parser:
if isinstance(statement, config_parser.BindingStatement):
scope, selector, arg_name, value, location = statement
if not arg_name:
macro_name = '{}/{}'.format(scope, selector) if scope else selector
with utils.try_with_location(location):
bind_parameter((macro_name, 'gin.macro', 'value'), value, location)
elif not _should_skip(selector, skip_unknown):
with utils.try_with_location(location):
bind_parameter((scope, selector, arg_name), value, location)
elif isinstance(statement, config_parser.BlockDeclaration):
if not _should_skip(statement.selector, skip_unknown):
with utils.try_with_location(statement.location):
if not parse_context.get_configurable(statement.selector):
_raise_unknown_configurable_error(statement.selector)
elif isinstance(statement, config_parser.ImportStatement):
with utils.try_with_location(statement.location):
try:
parse_context.process_import(statement)
except ImportError as e:
if not skip_unknown:
raise
_print_unknown_import_message(statement, e)
elif isinstance(statement, config_parser.IncludeStatement):
with utils.try_with_location(statement.location):
nested_includes = parse_config_file(statement.filename, skip_unknown)
includes.append(nested_includes)
else:
raise AssertionError(
'Unrecognized statement type {}.'.format(statement))
# Update recorded imports. Using the context's recorded imports ignores any
# `from __gin __ ...` statements used to enable e.g. dynamic registration.
imports.extend(statement.module for statement in parse_context.imports)
_IMPORTS.update(parse_context.imports)
return includes, imports
def _print_unknown_import_message(statement, exception):
"""Prints a properly formatted info message when skipping unknown imports."""
log_str = 'Skipping import of unknown module `%s` (skip_unknown=True).'
log_args = [statement.module]
imported_modules = statement.module.split('.')
exception_modules = exception.name.split('.')
modules_match = imported_modules[:len(exception_modules)] == exception_modules
if not modules_match:
# In case the error comes from a nested import (i.e. the module is
# available, but it imports some unavailable module), print the traceback to
# avoid confusion.
log_str += '\n%s'
log_args.append(traceback.format_exc())
logging.info(log_str, *log_args)
def register_file_reader(*args):
"""Register a file reader for use in parse_config_file.
Registered file readers will be used to try reading files passed to
`parse_config_file`. All file readers (beginning with the default `open`) will
be tried until one of them succeeds at opening the file.
This function may also be be used used as a decorator. For example:
@register_file_reader(IOError)
def exotic_data_source(filename):
...
Args:
*args: (When used as a decorator, only the existence check is supplied.)
- file_reader_fn: The file reader function to register. This should be a
function that can be used as a context manager to open a file and
provide a file-like object, similar to Python's built-in `open`.
- is_readable_fn: A function taking the file path and returning a boolean
indicating whether the file can be read by `file_reader_fn`.
Returns:
`None`, or when used as a decorator, a function that will perform the
registration using the supplied readability predicate.
"""
def do_registration(file_reader_fn, is_readable_fn):
if file_reader_fn not in list(zip(*_FILE_READERS))[0]:
_FILE_READERS.append((file_reader_fn, is_readable_fn))
if len(args) == 1: # It's a decorator.
return functools.partial(do_registration, is_readable_fn=args[0])
elif len(args) == 2:
do_registration(*args)
else: # 0 or > 2 arguments supplied.
err_str = 'register_file_reader() takes 1 or 2 arguments ({} given)'
raise TypeError(err_str.format(len(args)))
def add_config_file_search_path(location_prefix):
"""Adds a path that will be searched for config files by parse_config_file."""
_LOCATION_PREFIXES.append(location_prefix)
def parse_config_file(
config_file: str,
skip_unknown: Union[bool, Sequence[str]] = False,
print_includes_and_imports: bool = False
) -> ParsedConfigFileIncludesAndImports:
"""Parse a Gin config file.
Args:
config_file: The path to a Gin config file.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
print_includes_and_imports: Whether to print the resulting nested includes
and imports.
Returns:
results: An instance of ParsedConfigFileIncludesAndImports containing the
filename of the parse files, a list of names of imported modules and a
list of ParsedConfigFileIncludesAndImports created from including nested
gin files.
Raises:
IOError: If `config_file` cannot be read using any register file reader.
"""
prefixes = _LOCATION_PREFIXES if not os.path.isabs(config_file) else ['']
for location_prefix in prefixes:
config_file_with_prefix = os.path.join(location_prefix, config_file)
for reader, existence_check in _FILE_READERS:
if existence_check(config_file_with_prefix):
with reader(config_file_with_prefix) as f:
includes, imports = parse_config(f, skip_unknown=skip_unknown)
results = ParsedConfigFileIncludesAndImports(
filename=config_file, imports=imports, includes=includes)
if print_includes_and_imports:
log_includes_and_imports(results)
return results
err_str = 'Unable to open file: {}. Searched config paths: {}.'
raise IOError(err_str.format(config_file, prefixes))
def parse_config_files_and_bindings(
config_files: Optional[Sequence[str]],
bindings: Optional[Sequence[str]],
finalize_config: bool = True,
skip_unknown: Union[bool, Sequence[str]] = False,
print_includes_and_imports: bool = False):
"""Parse a list of config files followed by extra Gin bindings.
This function is equivalent to:
for config_file in config_files:
gin.parse_config_file(config_file, skip_configurables)
gin.parse_config(bindings, skip_configurables)
if finalize_config:
gin.finalize()
Args:
config_files: A list of paths to the Gin config files.
bindings: A list of individual parameter binding strings.
finalize_config: Whether to finalize the config after parsing and binding
(defaults to True).
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
print_includes_and_imports: If true, print a summary of the hierarchy of
included gin config files and imported modules.
Returns:
includes_and_imports: List of ParsedConfigFileIncludesAndImports.
"""
if config_files is None:
config_files = []
if bindings is None:
bindings = ''
nested_includes_and_imports = []
for config_file in config_files:
includes_and_imports = parse_config_file(config_file, skip_unknown)
nested_includes_and_imports.append(includes_and_imports)
parse_config(bindings, skip_unknown)
if finalize_config:
finalize()
if print_includes_and_imports:
for includes_and_imports in nested_includes_and_imports:
log_includes_and_imports(includes_and_imports)
if bindings:
logging.info('Additional Gin bindings:')
for binding in bindings:
logging.info(' %s', binding)
return nested_includes_and_imports
def log_includes_and_imports(
file_includes_and_imports: ParsedConfigFileIncludesAndImports,
first_line_prefix: str = '',
prefix: str = ''):
"""Logs a ParsedConfigFileIncludesAndImports and its includes and imports."""
logging.info('%s%s', first_line_prefix, file_includes_and_imports.filename)
infix = ' │' if file_includes_and_imports.includes else ' '
if file_includes_and_imports.imports:
for imported_module in file_includes_and_imports.imports:
logging.info('%s%s import %s', prefix, infix, imported_module)
if file_includes_and_imports.includes:
for i, nested_result in enumerate(file_includes_and_imports.includes):
if i < len(file_includes_and_imports.includes) - 1:
nested_first_line_prefix = prefix + ' ├─ '
nested_prefix = prefix + ' │ '
else:
nested_first_line_prefix = prefix + ' └─ '
nested_prefix = prefix + ' '
log_includes_and_imports(
nested_result,
first_line_prefix=nested_first_line_prefix,
prefix=nested_prefix)
def parse_value(value):
"""Parse and return a single Gin value."""
if not isinstance(value, str):
raise ValueError('value ({}) should be a string type.'.format(value))
return config_parser.ConfigParser(value, ParserDelegate()).parse_value()
def config_is_locked():
return _CONFIG_IS_LOCKED
def _set_config_is_locked(is_locked):
global _CONFIG_IS_LOCKED
_CONFIG_IS_LOCKED = is_locked
@contextlib.contextmanager
def unlock_config():
"""A context manager that temporarily unlocks the config.
Once the config has been locked by `gin.finalize`, it can only be modified
using this context manager (to make modifications explicit). Example:
with gin.unlock_config():
...
gin.bind_parameter(...)
In the case where the config is already unlocked, this does nothing (the
config remains unlocked).
Yields:
None.
"""
config_was_locked = config_is_locked()
_set_config_is_locked(False)
yield
_set_config_is_locked(config_was_locked)
def enter_interactive_mode():
global _INTERACTIVE_MODE
_INTERACTIVE_MODE = True
def exit_interactive_mode():
global _INTERACTIVE_MODE
_INTERACTIVE_MODE = False
@contextlib.contextmanager
def interactive_mode():
try:
enter_interactive_mode()
yield
finally:
exit_interactive_mode()
def finalize():
"""A function that should be called after parsing all Gin config files.
Calling this function allows registered "finalize hooks" to inspect (and
potentially modify) the Gin config, to provide additional functionality. Hooks
should not modify the configuration object they receive directly; instead,
they should return a dictionary mapping Gin binding keys to (new or updated)
values. This way, all hooks see the config as originally parsed.
Raises:
RuntimeError: If the config is already locked.
ValueError: If two or more hooks attempt to modify or introduce bindings for
the same key. Since it is difficult to control the order in which hooks
are registered, allowing this could yield unpredictable behavior.
"""
if config_is_locked():
raise RuntimeError('Finalize called twice (config already locked).')
bindings = {}
for hook in _FINALIZE_HOOKS:
new_bindings = hook(_CONFIG)
if new_bindings is not None:
for key, value in new_bindings.items():
pbk = ParsedBindingKey.parse(key)
if pbk in bindings:
err_str = 'Received conflicting updates when running {}.'
raise ValueError(err_str.format(hook))
bindings[pbk] = value
for pbk, value in bindings.items():
bind_parameter(pbk, value)
_set_config_is_locked(True)
def register_finalize_hook(fn):
"""Registers `fn` as a hook that will run during `gin.finalize`.
All finalize hooks should accept the current config, and return a dictionary
containing any additional parameter bindings that should occur in the form of
a mapping from (scoped) configurable names to values.
Args:
fn: The function to register.
Returns:
`fn`, allowing `register_finalize_hook` to be used as a decorator.
"""
_FINALIZE_HOOKS.append(fn)
return fn
def _iterate_flattened_values(value):
"""Provides an iterator over all values in a nested structure."""
if isinstance(value, str):
yield value
return
if isinstance(value, collections.abc.Mapping):
value = collections.abc.ValuesView(value) # pytype: disable=wrong-arg-count
if isinstance(value, collections.abc.Iterable):
for nested_value in value:
for nested_nested_value in _iterate_flattened_values(nested_value):
yield nested_nested_value
yield value
def iterate_references(config, to=None):
"""Provides an iterator over references in the given config.
Args:
config: A dictionary mapping scoped configurable names to argument bindings.
to: If supplied, only yield references whose `configurable_fn` matches `to`.
Yields:
`ConfigurableReference` instances within `config`, maybe restricted to those
matching the `to` parameter if it is supplied.
"""
for value in _iterate_flattened_values(config):
if isinstance(value, ConfigurableReference):
if to is None or value.configurable.wrapper == to:
yield value
def validate_reference(ref, require_bindings=True, require_evaluation=False):
if require_bindings and ref.config_key not in _CONFIG:
err_str = "No bindings specified for '{}' in config string: \n{}"
raise ValueError(err_str.format(ref.scoped_selector, config_str()))
if require_evaluation and not ref.evaluate:
err_str = ("Reference '{}' must be evaluated (add '()') "
'in config string: \n{}.')
raise ValueError(err_str.format(ref, config_str()))
@register(module='gin')
def macro(value):
"""A Gin macro."""
return value
@register('constant', module='gin')
def _retrieve_constant():
"""Fetches and returns a constant from the _CONSTANTS map."""
return _CONSTANTS[current_scope_str()]
@register(module='gin')
def singleton(constructor):
return singleton_value(current_scope_str(), constructor)
def singleton_value(key, constructor=None):
if key not in _SINGLETONS:
if not constructor:
err_str = "No singleton found for key '{}', and no constructor was given."
raise ValueError(err_str.format(key))
if not callable(constructor):
err_str = "The constructor for singleton '{}' is not callable."
raise ValueError(err_str.format(key))
_SINGLETONS[key] = constructor()
return _SINGLETONS[key]
def constant(name, value):
"""Creates a constant that can be referenced from gin config files.
After calling this function in Python, the constant can be referenced from
within a Gin config file using the macro syntax. For example, in Python:
gin.constant('THE_ANSWER', 42)
Then, in a Gin config file:
meaning.of_life = %THE_ANSWER
Note that any Python object can be used as the value of a constant (including
objects not representable as Gin literals). Values will be stored until
program termination in a Gin-internal dictionary, so avoid creating constants
with values that should have a limited lifetime.
Optionally, a disambiguating module may be prefixed onto the constant
name. For instance:
gin.constant('some.modules.PI', 3.14159)
Args:
name: The name of the constant, possibly prepended by one or more
disambiguating module components separated by periods. An macro with this
name (including the modules) will be created.
value: The value of the constant. This can be anything (including objects
not representable as Gin literals). The value will be stored and returned
whenever the constant is referenced.
Raises:
ValueError: If the constant's selector is invalid, or a constant with the
given selector already exists.
"""
if not config_parser.MODULE_RE.match(name):
raise ValueError("Invalid constant selector '{}'.".format(name))
if _CONSTANTS.matching_selectors(name):
err_str = "Constants matching selector '{}' already exist ({})."
raise ValueError(err_str.format(name, _CONSTANTS.matching_selectors(name)))
_CONSTANTS[name] = value
def constants_from_enum(cls=None, module=None):
"""Decorator for an enum class that generates Gin constants from values.
Generated constants have format `module.ClassName.ENUM_VALUE`. The module
name is optional when using the constant.
Args:
cls: Class type.
module: The module to associate with the constants, to help handle naming
collisions. If `None`, `cls.__module__` will be used.
Returns:
Class type (identity function).
Raises:
TypeError: When applied to a non-enum class.
"""
def decorator(cls, module=module):
if not issubclass(cls, enum.Enum):
raise TypeError("Class '{}' is not subclass of enum.".format(
cls.__name__))
if module is None:
module = cls.__module__
for value in cls:
constant('{}.{}'.format(module, str(value)), value)
return cls
if cls is None:
return decorator
return decorator(cls)
@register_finalize_hook
def validate_macros_hook(config):
for ref in iterate_references(config, to=get_configurable(macro)):
validate_reference(ref, require_evaluation=True)
def _format_binding_key(scope, selector, param_name):
min_selector = _REGISTRY.minimal_selector(selector)
return f"{scope}{'/' if scope else ''}{min_selector}.{param_name}"
@register_finalize_hook
def find_unknown_references_hook(config):
"""Hook to find/raise errors for references to unknown configurables."""
additional_msg_fmt = " In binding for '{}'."
for (scope, selector), param_bindings in config.items():
for param_name, param_value in param_bindings.items():
for maybe_unknown in _iterate_flattened_values(param_value):
if isinstance(maybe_unknown, _UnknownConfigurableReference):
binding_key = _format_binding_key(scope, selector, param_name)
additional_msg = additional_msg_fmt.format(binding_key)
_raise_unknown_reference_error(maybe_unknown, additional_msg)
@register_finalize_hook
def find_missing_overrides_hook(config):
"""Hook to find/raise errors for config bindings marked REQUIRED."""
for (scope, selector), param_bindings in config.items():
for param_name, param_value in param_bindings.items():
if isinstance(param_value, ConfigurableReference):
if param_value.configurable.wrapped == _retrieve_constant: # pylint: disable=comparison-with-callable
# Call the scoped _retrieve_constant() to get the constant value.
constant_value = param_value.scoped_configurable_fn()
if constant_value is REQUIRED:
binding_key = _format_binding_key(scope, selector, param_name)
fmt = '{} set to `%gin.REQUIRED` but not subsequently overridden.'
raise ValueError(fmt.format(binding_key))
def markdown(string):
"""Convert a config string to Markdown format.
This can be useful for rendering the config string (or operative config
string) in dashboards that support Markdown format. Comments in the config
string are left as Markdown headers; other lines are indented by four spaces
to indicate they are code blocks.
Args:
string: The configuration string to convert to Markdown format. This should
be the output of `gin.config_str()` or `gin.operative_config_str()`.
Returns:
The given configuration string in a Markdown-compatible format.
"""
def process(line):
"""Convert a single line to markdown format."""
if not line.startswith('#'):
return ' ' + line
line = line[2:]
if line.startswith('===='):
return ''
if line.startswith('None'):
return ' # None.'
if line.endswith(':'):
return '#### ' + line
return line
output_lines = []
for line in string.splitlines():
procd_line = process(line)
if procd_line is not None:
output_lines.append(procd_line)
return '\n'.join(output_lines)
| {
"content_hash": "5cb0620ca26762234b29fdbcea3accf3",
"timestamp": "",
"source": "github",
"line_count": 2901,
"max_line_length": 110,
"avg_line_length": 38.77111340916925,
"alnum_prop": 0.6954789953322961,
"repo_name": "google/gin-config",
"id": "46e9c3fcafc384376fbb87e81a2ab2b551a8fc4b",
"size": "113090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gin/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "13889"
},
{
"name": "Python",
"bytes": "317690"
},
{
"name": "Shell",
"bytes": "4394"
}
],
"symlink_target": ""
} |
import sys, getopt
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.storagelevel import StorageLevel
from pyspark.accumulators import Accumulator, AccumulatorParam
from pyspark.broadcast import Broadcast
from pyspark.serializers import MarshalSerializer, PickleSerializer
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, SchemaRDD, Row
client = GatewayClient(port=int(sys.argv[1]))
gateway = JavaGateway(client)
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
intp = gateway.entry_point
jsc = intp.getJavaSparkContext()
if jsc.version().startswith("1.2"):
java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
elif jsc.version().startswith("1.3"):
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
jconf = intp.getSparkConf()
conf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)
sc = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
sqlc = SQLContext(sc, intp.getSQLContext())
z = intp.getZeppelinContext()
class Logger(object):
def __init__(self):
self.out = ""
def write(self, message):
self.out = self.out + message
def get(self):
return self.out
def reset(self):
self.out = ""
output = Logger()
sys.stdout = output
sys.stderr = output
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
jobGroup = req.jobGroup()
single = None
incomplete = None
compiledCode = None
for s in stmts:
if s == None or len(s.strip()) == 0:
continue
# skip comment
if s.strip().startswith("#"):
continue
if s[0] != " " and s[0] != "\t":
if incomplete != None:
raise incomplete
if compiledCode != None:
sc.setJobGroup(jobGroup, "Zeppelin")
eval(compiledCode)
compiledCode = None
single = None
incomplete = None
if single == None:
single = s
else:
single += "\n" + s
try :
compiledCode = compile(single, "<string>", "single")
incomplete = None
except SyntaxError as e:
if str(e).startswith("unexpected EOF while parsing") :
# incomplete expression
incomplete = e
continue
else :
# actual error
raise e
if incomplete != None:
raise incomplete
if compiledCode != None:
sc.setJobGroup(jobGroup, "Zeppelin")
eval(compiledCode)
intp.setStatementsFinished(output.get(), False)
except:
intp.setStatementsFinished(str(sys.exc_info()), True)
output.reset()
| {
"content_hash": "13bfb391365addaf84f12eecd4062588",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 72,
"avg_line_length": 26.67479674796748,
"alnum_prop": 0.6705272782688205,
"repo_name": "myrtleTree33/zeppelin",
"id": "b822c0d3baa8fa8c7513b539827611fa6e95bc21",
"size": "3281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spark/src/main/resources/python/zeppelin_pyspark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40131"
},
{
"name": "HTML",
"bytes": "54950"
},
{
"name": "Java",
"bytes": "378815"
},
{
"name": "JavaScript",
"bytes": "99496"
},
{
"name": "Makefile",
"bytes": "6531"
},
{
"name": "Python",
"bytes": "3281"
},
{
"name": "Scala",
"bytes": "6965"
},
{
"name": "Shell",
"bytes": "21064"
},
{
"name": "Thrift",
"bytes": "1105"
},
{
"name": "XSLT",
"bytes": "1326"
}
],
"symlink_target": ""
} |
import os
import random
from ctypes import *
class Packet(Structure):
_fields_ = [
('mu', c_double),
('r', c_double),
('energy', c_double),
('nu', c_double),
('nu_line', c_double),
('tau_event', c_double),
('next_line_id', c_int),
('current_shell_id', c_int),
]
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
#pytestmark = pytest.mark.skipif(True, reason='problem with the files')
from tardis import __path__ as path
test_path = os.path.join(path[0], 'montecarlo', 'test_montecarlo.so')
tests = CDLL(test_path)
def test_compute_distance2boundary():
distance_to_boundary = 259376919351035.88
tests.test_compute_distance2boundary.restype = c_double
assert_almost_equal(tests.test_compute_distance2boundary(),
distance_to_boundary)
def test_compute_distance2line():
distance_to_line = 7.792353908000001e+17
tests.test_compute_distance2line.restype = c_double
assert_almost_equal(tests.test_compute_distance2line(),
distance_to_line)
def test_compute_distance2continuum():
distance_to_electron = 4.359272608766106e+28
tests.test_compute_distance2continuum.restype = c_double
assert_almost_equal(tests.test_compute_distance2continuum(),
distance_to_electron)
def test_rpacket_doppler_factor():
doppler_factor = 0.9998556693818854
tests.test_rpacket_doppler_factor.restype = c_double
assert_almost_equal(tests.test_rpacket_doppler_factor(),
doppler_factor)
@pytest.mark.skipif(True, reason='Bad test design')
def test_move_packet():
doppler_factor = 0.9998556693818854
tests.test_move_packet.restype = c_double
assert_almost_equal(tests.test_move_packet(),
doppler_factor)
def test_increment_j_blue_estimator():
j_blue = 1.1249855669381885
tests.test_increment_j_blue_estimator.restype = c_double
assert_almost_equal( tests.test_increment_j_blue_estimator(),
j_blue)
def test_montecarlo_line_scatter():
assert tests.test_montecarlo_line_scatter()
def test_move_packet_across_shell_boundary():
assert tests.test_move_packet_across_shell_boundary()
def test_montecarlo_one_packet():
assert tests.test_montecarlo_one_packet()
def test_montecarlo_one_packet_loop():
assert tests.test_montecarlo_one_packet_loop() == 0
def test_montecarlo_thomson_scatter():
assert tests.test_montecarlo_thomson_scatter()
def test_calculate_chi_bf():
chi_bf = 1.0006697327643788
tests.test_calculate_chi_bf.restype = c_double
assert_almost_equal(tests.test_calculate_chi_bf(),
chi_bf)
@pytest.mark.xfail
def test_montecarlo_bound_free_scatter():
assert tests.test_montecarlo_bound_free_scatter() == 1
@pytest.mark.xfail
def test_bf_cross_section():
bf_cross_section = 0.0
tests.test_bf_cross_section.restype = c_double
assert_almost_equal(tests.test_bf_cross_section(),
bf_cross_section)
def test_montecarlo_free_free_scatter():
assert tests.test_montecarlo_free_free_scatter() == 2
| {
"content_hash": "d3ec298813bc7287ecd2b8b219c7aebd",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 71,
"avg_line_length": 29.96969696969697,
"alnum_prop": 0.7269969666329625,
"repo_name": "Minhmo/tardis",
"id": "ac0f61c2a1816c5a9c4c9a9f85be1b0b6b11e8fb",
"size": "2967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tardis/montecarlo/tests/test_cmontecarlo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "150456"
},
{
"name": "Python",
"bytes": "499591"
},
{
"name": "Shell",
"bytes": "467"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import importlib
import mimetypes
import posixpath
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
from django.utils.encoding import smart_text
from pipeline.conf import settings
def to_class(class_str):
if not class_str:
return None
module_bits = class_str.split('.')
module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name, None)
def filepath_to_uri(path):
if path is None:
return path
return quote(smart_text(path).replace("\\", "/"), safe="/~!*()'#?")
def guess_type(path, default=None):
for type, ext in settings.MIMETYPES:
mimetypes.add_type(type, ext)
mimetype, _ = mimetypes.guess_type(path)
if not mimetype:
return default
return smart_text(mimetype)
def relpath(path, start=posixpath.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list)
| {
"content_hash": "09f8483659e5d4bb02aac00c56c367bb",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 73,
"avg_line_length": 26.732142857142858,
"alnum_prop": 0.6760187040748163,
"repo_name": "leonardoo/django-pipeline",
"id": "dc5380b3d359faf04e0dfc419013e770b7a62eca",
"size": "1497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeline/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1159"
},
{
"name": "CoffeeScript",
"bytes": "52"
},
{
"name": "HTML",
"bytes": "1495"
},
{
"name": "JavaScript",
"bytes": "229"
},
{
"name": "Python",
"bytes": "92342"
}
],
"symlink_target": ""
} |
"""
Application testing
Generated by yeoman generator-django-ana <%= version %> on <%= date %>.
"""
from django.test import TestCase
from django.urls import reverse
class <%= pascalName %>Tests(TestCase):
def test(self):
response = self.client.get(reverse('index'))
| {
"content_hash": "2c67d19d79a70bdef160d901a149f21c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 71,
"avg_line_length": 25.454545454545453,
"alnum_prop": 0.6892857142857143,
"repo_name": "Wtower/generator-django-ana",
"id": "661b8cd2e168a6ae11c84c2f2dc6f750a1b88466",
"size": "280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generators/app/templates/_name__core/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "327"
},
{
"name": "HTML",
"bytes": "1751"
},
{
"name": "JavaScript",
"bytes": "19668"
},
{
"name": "Python",
"bytes": "11723"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as ET
import time
from objects.cube.rubiks_cube import RubiksCube
from objects.xml import xml_step
from objects.xml.abstact_xml import AbstractXml
from objects.xml.xml_cube import XmlCube
from xml_step import Step
from helper import Helper
class XmlObject(AbstractXml):
def __init__(self, author="Secret", size="3", set_date=True):
self._author = author
if set_date:
self._date = time.strftime("%Y-%m-%d %H:%M:%S")
else:
self._date = set_date
self._size = size
self._start_cube = None
self._start_cube_turn =None
self._result_cube = None
self._result_cube_turn =None
self._codes = []
def set_size(self, size):
if size > 2:
self._size = size
def set_start(self, rubikscube, turn_cube=None):
self._start_cube = rubikscube
self._start_cube_turn = turn_cube
def set_result(self, rubikscube, turn_cube=None):
self._result_cube = rubikscube
self._result_cube_turn = turn_cube
def add_code(self, code):
if isinstance(code, Step):
self._codes.append(code)
else:
raise ValueError("Input isn't an instance of Step")
def set_code(self, codes):
self._codes = codes
def from_xml(self, xml):
self._author = xml.find("Author").text
self._date = xml.find("Date").text
self._size = int(xml.find("Size").text)
self._start_cube = XmlCube.from_xml(xml.find("Cubes/Start"), self._size)
self._result_cube = XmlCube.from_xml(xml.find("Cubes/Result"), self._size)
for element in list(xml.find("Steps")):
step = Step()
step.from_xml(element)
self._codes.append(step)
#Todo fetch the steps
def get_xml(self):
if self._start_cube is None or \
self._result_cube is None or \
len(self._codes) is 0:
raise ValueError("Not all needed values have been set")
move = ET.Element('Move')
author = ET.Element('Author')
author.text = self._author
move.append(author)
date = ET.Element('Date')
date.text = self._date
move.append(date)
size = ET.Element('Size')
size.text = str(self._size)
move.append(size)
cubes = ET.Element('Cubes')
cubes.append(XmlCube.get_xml(self._start_cube, "Start"))
if self._start_cube_turn is None:
start_cube_turn = ET.Element("Start-Turn")
start_cube_turn.text = "False"
cubes.append(start_cube_turn)
else:
cubes.append(self._start_cube_turn("Start-Turn"))
cubes.append(XmlCube.get_xml(self._result_cube, "Result"))
if self._result_cube_turn is None:
start_cube_turn = ET.Element("Result-Turn")
start_cube_turn.text = "False"
cubes.append(start_cube_turn)
else:
cubes.append(self._result_cube_turn("Result-Turn"))
move.append(cubes)
steps = ET.Element("Steps")
for i in self._codes:
steps.append(i.get_xml())
move.append(steps)
return move
| {
"content_hash": "911a92136a7eec89a4519fd73455a73f",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 82,
"avg_line_length": 28.394736842105264,
"alnum_prop": 0.5764596848934198,
"repo_name": "Willempie/Artificial_Intelligence_Cube",
"id": "623f425add4fbd923cf9f14378ac9c0bc8e488ac",
"size": "3237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "objects/xml/xml_move.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93272"
}
],
"symlink_target": ""
} |
import sys
from PyQt4 import Qt
from PyQt4.phonon import Phonon
class AIPYbody():
# in the class body are all functionalities of the AIPY Music Player
# body will be one window from PyQT
def __init__ (self):
pass
def Display():
# used for showing the remaining time, or elapsed time of song, it is posible to
# have visulations
pass
def PlayList():
# defines play list with files selected by user
pass
def PlayButton():
#button to start the music file, if there is not you will be able to choose one
pass
def PauseButton():
# button to pause the music file, can be together with play
pass
def StopButton():
# button to stop the music file, and when you push play it starts from begining
pass
def NextButton():
# button for next song in playlist, if there is not starts same song again
pass
def PrevButton():
# button for previous song in playlist, if there is not starts the same song again
pass
def OnlineStreaming():
# button which opens new window to select source for online streaming
pass
def EQ():
# button which opens window with equilaizer(optional)
pass
| {
"content_hash": "29f96bc4775b6772d6396dc263baa898",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 84,
"avg_line_length": 24.52173913043478,
"alnum_prop": 0.724290780141844,
"repo_name": "MrIliev/AIPY-Music-Player",
"id": "43698c78bb9265eed77616d6fdc2f12bf10a06db",
"size": "1128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AIPY.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1128"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_user
version_added: "1.7"
short_description: Manages local Windows user accounts
description:
- Manages local Windows user accounts
- For non-Windows targets, use the M(user) module instead.
options:
name:
description:
- Name of the user to create, remove or modify.
required: true
fullname:
description:
- Full name of the user
required: false
default: null
version_added: "1.9"
description:
description:
- Description of the user
required: false
default: null
version_added: "1.9"
password:
description:
- Optionally set the user's password to this (plain text) value.
required: false
default: null
update_password:
description:
- C(always) will update passwords if they differ. C(on_create) will
only set the password for newly created users.
required: false
choices: [ 'always', 'on_create' ]
default: always
version_added: "1.9"
password_expired:
description:
- C(yes) will require the user to change their password at next login.
C(no) will clear the expired password flag.
required: false
choices: [ 'yes', 'no' ]
default: null
version_added: "1.9"
password_never_expires:
description:
- C(yes) will set the password to never expire. C(no) will allow the
password to expire.
required: false
choices: [ 'yes', 'no' ]
default: null
version_added: "1.9"
user_cannot_change_password:
description:
- C(yes) will prevent the user from changing their password. C(no) will
allow the user to change their password.
required: false
choices: [ 'yes', 'no' ]
default: null
version_added: "1.9"
account_disabled:
description:
- C(yes) will disable the user account. C(no) will clear the disabled
flag.
required: false
choices: [ 'yes', 'no' ]
default: null
version_added: "1.9"
account_locked:
description:
- C(no) will unlock the user account if locked.
required: false
choices: [ 'no' ]
default: null
version_added: "1.9"
groups:
description:
- Adds or removes the user from this comma-separated lis of groups,
depending on the value of I(groups_action). When I(groups_action) is
C(replace) and I(groups) is set to the empty string ('groups='), the
user is removed from all groups.
required: false
version_added: "1.9"
groups_action:
description:
- If C(replace), the user is added as a member of each group in
I(groups) and removed from any other groups. If C(add), the user is
added to each group in I(groups) where not already a member. If
C(remove), the user is removed from each group in I(groups).
required: false
choices: [ "replace", "add", "remove" ]
default: "replace"
version_added: "1.9"
state:
description:
- When C(present), creates or updates the user account. When C(absent),
removes the user account if it exists. When C(query) (new in 1.9),
retrieves the user account details without making any changes.
required: false
choices:
- present
- absent
- query
default: present
aliases: []
notes:
- For non-Windows targets, use the M(user) module instead.
author:
- "Paul Durivage (@angstwad)"
- "Chris Church (@cchurch)"
'''
EXAMPLES = r'''
- name: Ensure user bob is present
win_user:
name: bob
password: B0bP4ssw0rd
state: present
groups:
- Users
- name: Ensure user bob is absent
win_user:
name: bob
state: absent
'''
RETURN = r'''
account_disabled:
description: Whether the user is disabled.
returned: user exists
type: bool
sample: false
account_locked:
description: Whether the user is locked.
returned: user exists
type: bool
sample: false
description:
description: The description set for the user.
returned: user exists
type: str
sample: Username for test
fullname:
description: The full name set for the user.
returned: user exists
type: str
sample: Test Username
groups:
description: A list of groups and their ADSI path the user is a member of.
returned: user exists
type: list
sample: [
{
"name": "Administrators",
"path": "WinNT://WORKGROUP/USER-PC/Administrators"
}
]
name:
description: The name of the user
returned: always
type: str
sample: username
password_expired:
description: Whether the password is expired.
returned: user exists
type: bool
sample: false
password_never_expires:
description: Whether the password is set to never expire.
returned: user exists
type: bool
sample: true
path:
description: The ADSI path for the user.
returned: user exists
type: str
sample: "WinNT://WORKGROUP/USER-PC/username"
sid:
description: The SID for the user.
returned: user exists
type: str
sample: S-1-5-21-3322259488-2828151810-3939402796-1001
user_cannot_change_password:
description: Whether the user can change their own password.
returned: user exists
type: bool
sample: false
'''
| {
"content_hash": "2dc51a6d6efab8d7fde106939d45065b",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 78,
"avg_line_length": 27.127551020408163,
"alnum_prop": 0.6567613315779575,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "9664df67f39cd50e432660fa816fc540f39538e5",
"size": "6163",
"binary": false,
"copies": "40",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/windows/win_user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
} |
""" FOSS FIAStar
"""
from bika.lims.browser import BrowserView
from DateTime import DateTime
from Products.CMFCore.utils import getToolByName
from plone.i18n.normalizer.interfaces import IIDNormalizer
from zope.component import getUtility
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from . import FOSSFIAStarCSVParser, FOSSFIAStarImporter
from cStringIO import StringIO
import json
import traceback
import csv
title = "FOSS - FIAStar"
class Export(BrowserView):
""" Writes workseet analyses to a CSV file that FIAStar can read.
Sends the CSV file to the response.
Requests "TSO2 & F SO2" for all requests.
uses analysis' PARENT UID as 'Sample name' col.
uses analysis' CONTAINER UID as 'Sample type' col.
(they are not always the same; think of multiple duplicates of the same
analysis.)
"""
def __call__(self, analyses):
tray = 1
now = DateTime().strftime('%Y%m%d-%H%M')
bsc = getToolByName(self.context, 'bika_setup_catalog')
uc = getToolByName(self.context, 'uid_catalog')
instrument = self.context.getInstrument()
norm = getUtility(IIDNormalizer).normalize
filename = '%s-%s.csv'%(self.context.getId(),
norm(instrument.getDataInterface()))
listname = '%s_%s_%s' %(self.context.getId(),
norm(instrument.Title()), now)
options = {'dilute_factor' : 1,
'method': 'F SO2 & T SO2'}
for k,v in instrument.getDataInterfaceOptions():
options[k] = v
# for looking up "cup" number (= slot) of ARs
parent_to_slot = {}
layout = self.context.getLayout()
for x in range(len(layout)):
a_uid = layout[x]['analysis_uid']
p_uid = uc(UID=a_uid)[0].getObject().aq_parent.UID()
layout[x]['parent_uid'] = p_uid
if not p_uid in parent_to_slot.keys():
parent_to_slot[p_uid] = int(layout[x]['position'])
# write rows, one per PARENT
header = [listname, options['method']]
rows = []
rows.append(header)
tmprows = []
ARs_exported = []
for x in range(len(layout)):
# create batch header row
c_uid = layout[x]['container_uid']
p_uid = layout[x]['parent_uid']
if p_uid in ARs_exported:
continue
cup = parent_to_slot[p_uid]
tmprows.append([tray,
cup,
p_uid,
c_uid,
options['dilute_factor'],
""])
ARs_exported.append(p_uid)
tmprows.sort(lambda a,b:cmp(a[1], b[1]))
rows += tmprows
ramdisk = StringIO()
writer = csv.writer(ramdisk, delimiter=';')
assert(writer)
writer.writerows(rows)
result = ramdisk.getvalue()
ramdisk.close()
#stream file to browser
setheader = self.request.RESPONSE.setHeader
setheader('Content-Length',len(result))
setheader('Content-Type', 'text/comma-separated-values')
setheader('Content-Disposition', 'inline; filename=%s' % filename)
self.request.RESPONSE.write(result)
def Import(context, request):
""" FOSS FIAStar analysis results
"""
infile = request.form['data_file']
fileformat = request.form['format']
artoapply = request.form['artoapply']
override = request.form['override']
sample = request.form.get('sample',
'requestid')
instrument = request.form.get('instrument', None)
errors = []
logs = []
warns = []
# Load the most suitable parser according to file extension/options/etc...
parser = None
if not hasattr(infile, 'filename'):
errors.append(_("No file selected"))
if fileformat == 'csv':
parser = FOSSFIAStarCSVParser(infile)
else:
errors.append(t(_("Unrecognized file format ${fileformat}",
mapping={"fileformat": fileformat})))
if parser:
# Load the importer
status = ['sample_received', 'attachment_due', 'to_be_verified']
if artoapply == 'received':
status = ['sample_received']
elif artoapply == 'received_tobeverified':
status = ['sample_received', 'attachment_due', 'to_be_verified']
over = [False, False]
if override == 'nooverride':
over = [False, False]
elif override == 'override':
over = [True, False]
elif override == 'overrideempty':
over = [True, True]
sam = ['getRequestID', 'getSampleID', 'getClientSampleID']
if sample == 'requestid':
sam = ['getRequestID']
if sample == 'sampleid':
sam = ['getSampleID']
elif sample == 'clientsid':
sam = ['getClientSampleID']
elif sample == 'sample_clientsid':
sam = ['getSampleID', 'getClientSampleID']
importer = FOSSFIAStarImporter(parser=parser,
context=context,
idsearchcriteria=sam,
allowed_ar_states=status,
allowed_analysis_states=None,
override=over,
instrument_uid=instrument)
tbex = ''
try:
importer.process()
except:
tbex = traceback.format_exc()
errors = importer.errors
logs = importer.logs
warns = importer.warns
if tbex:
errors.append(tbex)
results = {'errors': errors, 'log': logs, 'warns': warns}
return json.dumps(results)
| {
"content_hash": "6f187be7e684dc77cf60eae05d1bf474",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 36.407407407407405,
"alnum_prop": 0.5496778569006443,
"repo_name": "hocinebendou/bika.gsoc",
"id": "424d13d30da0c8011a3890774250f3b8de1d0f55",
"size": "5898",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bika/lims/exportimport/instruments/foss/fiastar/fiastar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "403"
},
{
"name": "COBOL",
"bytes": "5987"
},
{
"name": "CSS",
"bytes": "29758"
},
{
"name": "JavaScript",
"bytes": "411425"
},
{
"name": "Python",
"bytes": "4330980"
},
{
"name": "RobotFramework",
"bytes": "239735"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
} |
import sys
import requests
from bs4 import BeautifulSoup
import webbrowser
import os
import threading
verbose_mode = False;
def get_urls(config_file):
if not os.path.isfile(config_file):
print ("Can not find file {0}".format(config_file))
print ("Be sure to create a config file as the one provided as a sample")
return None
ret_list = []
with open(config_file,'r') as fin:
for line in fin:
ret_list.append(line.strip('\n'))
return ret_list
def download_url(url):
global verbode_mode
# Create directory for downloads if not exists already
download_folder = os.path.join(os.getcwd(), 'instagram_downloads')
if not os.path.exists(download_folder):
os.mkdir(download_folder)
res = requests.get(url)
# Raise expection if we couldn't read the webpage
res.raise_for_status()
# Folder name
instagram_handle = url[url.find('instagram.com')+len('instagram.com')+1:]
folder_path = os.path.join(download_folder, instagram_handle)
if not os.path.exists(folder_path):
os.mkdir(folder_path)
print ("Started download {0}".format(instagram_handle))
soup = BeautifulSoup(res.text,"html.parser")
# All the pictures URLs to download are in the script tag
scripts = soup.select('script')
# Precisely yhe script tag #5
picscript = scripts[5]
# From the text of the script tag element we use display_src
# to detect the start of each picture URL. The first element
# of the slipt is not a picture.
nrange = max(0,len(picscript.text.split('"display_src":'))-1)
counter = 0
for i in range(1, nrange):
urlpic = picscript.text.split('"display_src":')[i].split('},')[0]
# "https:\\/\\/scontent.cdninstagram.com\\/hphotos-xfa1\\/t51.2885-15\\/e15\\/11809535_437579513099534_959907557_n.jpg"
urlpic = urlpic.replace("\\","")
urlpic = urlpic.replace("https","http").strip('"')
path = os.path.join(folder_path, "pic_" + urlpic[urlpic.rfind('/')+1:])
# download only new files
if not os.path.exists(path):
r = requests.get(urlpic)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
counter += 1
if verbose_mode:
print ("Downloaded picture", path)
print ("Completed download {0} with {1} new pictures".format(instagram_handle, counter))
# Gather our code in a main() function
def main():
global verbose_mode
# Check if verbose mode is on
if len(sys.argv) > 1:
if sys.argv[1].lower() == '-verbose':
verbose_mode = True
# List of download threads, to speed up the process.
downloadThreads = []
# Get the instagram URLs from the config file
urls = get_urls("config.txt")
# Per each url start a downaload thread
for url in urls:
downloader = threading.Thread(target=download_url, args=(url,))
downloadThreads.append(downloader)
downloader.start()
#Wait for all downloaders to complete
for downloader in downloadThreads:
downloader.join()
print ("All done.")
# Standard boilerplate to call the main() function to begin the program.
if __name__ == '__main__':
main()
| {
"content_hash": "a191a64f0b9c44a470b250e93359217c",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 127,
"avg_line_length": 34.56701030927835,
"alnum_prop": 0.6286907247241277,
"repo_name": "gl051/Instagram_Scraper",
"id": "2d2ce8ab8244b6c867fce32e8598fce38763c7e6",
"size": "3371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instagram_scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3371"
}
],
"symlink_target": ""
} |
from datetime import timedelta
from django.conf import settings as django_settings
default = {
'FULLCALENDAR_FIRST_WEEKDAY': 0,
'FULLCALENDAR_OCCURRENCE_DURATION': timedelta(hours=1),
'FULLCALENDAR_SITE_COLORS': {}
}
settings = type('SettingsDummy', (), default)
for key, value in default.items():
setattr(settings, key,
getattr(django_settings, key, value))
| {
"content_hash": "9690ccd3a36d9ffb7506ae06629ec98f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 59,
"avg_line_length": 25.8,
"alnum_prop": 0.7080103359173127,
"repo_name": "jonge-democraten/mezzanine-fullcalendar",
"id": "3d5eb9805f4ab49cb448f33a59cf37ae503fb857",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fullcalendar/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37865"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.views.generic import DetailView
from .models import Movie
class MovieDetailView(DetailView):
model = Movie
template_name = "movies/detail.html" | {
"content_hash": "90a8893e4624b15cf4330f573efac3b7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 25.125,
"alnum_prop": 0.7860696517412935,
"repo_name": "juliancantillo/royal-films",
"id": "384c0aceb4226c184d3c5a7682efbe14e2016384",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "royalfilms/movies/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "23320"
},
{
"name": "JavaScript",
"bytes": "9267"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "81534"
},
{
"name": "Shell",
"bytes": "4534"
}
],
"symlink_target": ""
} |
import datetime
def compute_latest_post(media_list):
if len(media_list) == 0:
return None
return datetime.datetime.now()-media_list[0].created_time
def compute_average_post(media_list):
if len(media_list) == 0:
return None
return (media_list[0].created_time - media_list[-1].created_time).days/(1.0*len(media_list))
def compute_media_liked(media_list):
if len(media_list) == 0:
return None
print media_list[0].get_standard_resolution_url()
return datetime.datetime.now()-media_list[0].created_time
def compute_average_media_liked(media_list):
if len(media_list) == 0:
return None
return (media_list[0].created_time - media_list[-1].created_time).days/(1.0*len(media_list))
def unfollow(api,user):
print("removing %s" % user.id)
#api.block_user(user.id)
| {
"content_hash": "0d65331716b92db5991ce1da76c6ee74",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 94,
"avg_line_length": 36,
"alnum_prop": 0.7032828282828283,
"repo_name": "btabibian/instagramInactive",
"id": "0e930d108734f87184108a736cbaea4fcf5e2e99",
"size": "792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40336"
}
],
"symlink_target": ""
} |
import logging
from grappa import log
def test_log_module(should):
log | should.be.a('module')
log | should.have.property('handler')
log | should.have.property('log') > should.be.instance.of(logging.Logger)
(log
| should.have.property('formatter')
> should.be.instance.of(logging.Formatter))
| {
"content_hash": "a4149b346326ade3491b78044a117056",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 77,
"avg_line_length": 25.23076923076923,
"alnum_prop": 0.6676829268292683,
"repo_name": "grappa-py/grappa",
"id": "34a07e87ecd94ad94c3d9cabc1d75d51b55e22a2",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/log_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1520"
},
{
"name": "Python",
"bytes": "144569"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from tastypie.api import Api
from apps.mds_auth.api import SocialSignUpResource, BackendResource, UserResource, ProfileResource
from apps.muni_scales.api import MscaleResource, UDHResource, UXCResource
from apps.trails.api import TrailResource
from mds_website.views import HomeView
admin.autodiscover()
v1_api = Api(api_name='v1')
v1_api.register(MscaleResource())
v1_api.register(TrailResource())
v1_api.register(UDHResource())
v1_api.register(UXCResource())
v1_api.register(SocialSignUpResource())
v1_api.register(BackendResource())
v1_api.register(UserResource())
v1_api.register(ProfileResource())
urlpatterns = patterns('',
url(r'^$', HomeView.as_view(), name = 'index'),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(v1_api.urls)),
# authentication stuff
# url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
# url(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
# url(r'^accounts/pw-change/$', 'django.contrib.auth.views.password_change'),
# url(r'^accounts/pw-change-done/$', 'django.contrib.auth.views.password_change_done'),
# url(r'^accounts/pw-reset-done/$', 'django.contrib.auth.views.password_reset_done'),
# url(r'^accounts/pw-reset/$', 'django.contrib.auth.views.password_reset'),
# url(r'^accounts/pw-reset-confirm/$', 'django.contrib.auth.views.password_reset_confirm'),
# url(r'^accounts/pw-reset-complete/$', 'django.contrib.auth.views.password_reset_complete'),
url('', include('social.apps.django_app.urls', namespace='social')),
# url(r'^auth/social', TemplateView.as_view(template_name='social_auth.html')),
) | {
"content_hash": "8bd9b1522f4d05f73f11dc2b956a3b5f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 98,
"avg_line_length": 45.026315789473685,
"alnum_prop": 0.7293980128579778,
"repo_name": "schocco/mds-web",
"id": "f61260d60a0efe489960ead4e2083a2eeca46d92",
"size": "1711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mds_website/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3976"
},
{
"name": "Python",
"bytes": "131089"
}
],
"symlink_target": ""
} |
import vanilla
import urlparse
import fnmatch
import base64
import bencode
import struct
import socket
import peers
import posixpath
from eventlet.green import zmq
import cPickle as pickle
import eventlet.queue
import fairywren
import itertools
import logging
import array
def sendBencodedWsgiResponse(env,start_response,responseDict):
headers = [('Content-Type','text/plain')]
headers.append(('Cache-Control','no-cache'))
start_response('200 OK',headers)
yield bencode.bencode(responseDict)
def getClientAddress(environ):
try:
return environ['HTTP_X_FORWARDED_FOR'].split(',')[-1].strip()
except KeyError:
return environ['REMOTE_ADDR']
def dottedQuadToInt(dq):
#Change the peer IP into an integer
try:
peerIp = socket.inet_aton(dq)
except socket.error:
raise ValueError('Not a valid IP address:%s' % peerIp)
#Convert from network byte order to integer
try:
peerIp, = struct.unpack('!I',peerIp)
except struct.error:
raise ValueError('Serious wtf, how did this fail')
return peerIp
class Tracker(object):
def __init__(self,auth,peers,pathDepth):
self.auth = auth
self.peers = peers
self.pathDepth = pathDepth
self.announceLog = logging.getLogger('fairywren.announce')
self.trackerLog = logging.getLogger('fairywren.tracker')
self.afterAnnounce = []
self.trackerLog.info('Created')
def addAfterAnnounce(self,callback):
self.afterAnnounce.append(callback)
def getScrape(self,info_hashes):
"""Return a dictionary object that contains a tracker scrape.
@param info_hashes: list on info_hashes to include in the scrape
"""
retval = {}
retval['files'] = {}
for info_hash in info_hashes:
result = {}
result['downloaded'] = 0
result['complete'] = self.peers.getNumberOfSeeds(info_hash)
result['incomplete'] = self.peers.getNumberOfLeeches(info_hash)
retval['files'][info_hash] = result
return retval
def announce(self,env,start_response):
#Extract and normalize the path
#Posix path may not be the best approach here but
#no alternate has been found
pathInfo = posixpath.normpath(env['PATH_INFO'])
#Split the path into components. Drop the first
#since it should always be the empty string
pathComponents = pathInfo.split('/')[1+self.pathDepth:]
#A SHA512 encoded in base64 is 88 characters
#but the last two are always '==' so
#86 is used here
if len(pathComponents) !=2 or len(pathComponents[0]) != 86 or pathComponents[1] != 'announce':
return vanilla.http_error(404,env,start_response)
#Only GET requests are valid
if env['REQUEST_METHOD'] != 'GET':
return vanilla.http_error(405,env,start_response)
#Add the omitted equals signs back in
secretKey = pathComponents[0] + '=='
#base64 decode the secret key
try:
secretKey = base64.urlsafe_b64decode(secretKey)
except TypeError:
return vanilla.http_error(404,env,start_response)
#Extract the IP of the peer
peerIp = getClientAddress(env)
peerIpAsString = peerIp
try:
peerIp = dottedQuadToInt(peerIp)
except ValueError:
return vanilla.http_error(500,env,start_response)
#Parse the query string. Absence indicates error
if 'QUERY_STRING' not in env:
return vanilla.http_error(400,env,start_response)
query = urlparse.parse_qs(env['QUERY_STRING'])
#List of tuples. Each tuple is
#
#Parameter name
#default value (if any)
#type conversion, side-effect free callable
params = []
def validateInfoHash(info_hash):
#Info hashes are a SHA1 hash, and are always 20 bytes
if len(info_hash) != 20:
raise ValueError("Length " + str(len(info_hash)) + ' not acceptable')
return info_hash
params.append(('info_hash',None,validateInfoHash))
def validatePeerId(peer_id):
#Peer IDs are a string chosen by the peer to identify itself
#and are always 20 bytes
if len(peer_id) != 20:
raise ValueError("Improper Length")
return peer_id
params.append(('peer_id',None,validatePeerId))
def validatePort(port):
port = int(port)
#Ipv4 ports should not be higher than this value
if port > 2 ** 16 - 1 or port <= 0:
raise ValueError("Port outside of range")
return port
def validateByteCount(byteCount):
byteCount = int(byteCount)
if byteCount < 0:
raise ValueError('byte count cannot be negative')
return byteCount
params.append(('port',None,validatePort))
params.append(('uploaded',None,validateByteCount))
params.append(('downloaded',None,validateByteCount))
params.append(('left',None,validateByteCount))
#If the client doesn't specify the compact parameter, it is
#safe to assume that compact responses are understood. So a
#default value of 1 is used. Additionally, any non zero
#value provided assumes the client wants a compact response
params.append(('compact',1,int))
def validateEvent(event):
event = event.lower()
if event not in ['started','stopped','completed']:
raise ValueError("Unknown event")
return event
params.append(('event','update',validateEvent))
maxNumWant = 35
def limitNumWant(numwant):
numwant = int(numwant)
if numwant < 0:
raise ValueError('numwant cannot be negative')
numwant = min(numwant,maxNumWant)
return numwant
params.append(('numwant',maxNumWant,limitNumWant))
#Dictionary holding parameters to query
p = dict()
#Use the params to generate the parameters
for param,defaultValue,typeConversion in params:
#If the parameter is in the query, extract the first
#occurence and type convert if requested
if param in query:
p[param] = query[param][0]
if typeConversion:
try:
p[param] = typeConversion(p[param])
except ValueError as e:
return vanilla.http_error(400,env,start_response,msg='bad value for ' + param)
#If the parameter is not in the query, then
#use a default value is present. Otherwise this is an error
else:
if defaultValue == None:
return vanilla.http_error(400,env,start_response,msg='missing ' + param)
p[param] = defaultValue
#Make sure the secret key is valid
userId = self.auth.authenticateSecretKey(secretKey)
if userId == None:
response = {}
response['failure reason'] = 'failed to authenticate secret key'
return sendBencodedWsgiResponse(env,start_response,response)
#Make sure the info hash is allowed
torrentId = self.auth.authorizeInfoHash(p['info_hash'])
if torrentId == None:
response = {}
response['failure reason'] = 'unauthorized info hash'
return sendBencodedWsgiResponse(env,start_response,response)
#Construct the peers entry
peer = peers.Peer(peerIp,p['port'],p['left'])
#This is the basic response format
response = {}
response['interval'] = 5*60
response['complete'] = 0
response['incomplete'] = 0
response['peers'] = []
#This value is set to True if the number of seeds or leeches
#changes in the course of processing this result
change = False
#This value is set to true if the peer is added, false if removed
addPeer = False
#For all 3 cases here just return peers
if p['event'] in ['started','completed','update']:
response['complete'] = self.peers.getNumberOfLeeches(p['info_hash'])
response['incomplete'] = self.peers.getNumberOfSeeds(p['info_hash'])
change = self.peers.updatePeer(p['info_hash'],peer)
if change:
addPeer = True
peersForResponse = self.peers.getPeers(p['info_hash'])
#Return a compact response or a traditional response
#based on what is requested
if p['compact'] != 0:
peerStruct = struct.Struct('!IH')
maxSize = p['numwant'] * peerStruct.size
peersBuffer = array.array('c')
for peer in itertools.islice(peersForResponse,0,p['numwant']):
peersBuffer.fromstring(peerStruct.pack(peer.ip,peer.port))
response['peers'] = peersBuffer.tostring()
else:
for peer in itertools.islice(peersForResponse,0,p['numwant']):
#For non-compact responses, use a bogus peerId. Hardly any client
#uses this type of response anyways. There is no real meaning to the
#peer ID except informal agreements.
response['peers'].append({'peer id':'0'*20,'ip':socket.inet_ntoa(struct.pack('!I',peer.ip)),'port':peer.port})
#For stop event, just remove the peer. Don't return anything
elif p['event'] == 'stopped':
change = self.peers.removePeer(p['info_hash'],peer)
addPeer = False
#Log the successful announce
self.announceLog.info('%s:%d %s,%s,%d',peerIpAsString,p['port'],p['info_hash'].encode('hex').upper(),p['event'],p['left'])
for callback in self.afterAnnounce:
callback(userId,p['info_hash'],peerIpAsString,p['port'],p['peer_id'])
return sendBencodedWsgiResponse(env,start_response,response)
def __call__(self,env,start_response):
return self.announce(env,start_response)
| {
"content_hash": "43e0b3e49de9345cb31708dc924b79a5",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 124,
"avg_line_length": 30.397959183673468,
"alnum_prop": 0.6966543582857783,
"repo_name": "hydrogen18/fairywren",
"id": "523be045afab95981f618d837c88eebb38f83355",
"size": "8937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "67010"
},
{
"name": "Python",
"bytes": "219448"
},
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
} |
__author__ = "gaunt"
from . import SysRoleAuthorities
def get_authorities(role_id):
sql = SysRoleAuthorities.select(SysRoleAuthorities.authority).where(SysRoleAuthorities.role_id == role_id)
result = [f for f in sql.dicts()]
return result
| {
"content_hash": "fdeeb852396084cfc03daec8b5cbebbb",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 110,
"avg_line_length": 25.5,
"alnum_prop": 0.7215686274509804,
"repo_name": "chenghao/haoAdmin",
"id": "1e9d7ff54e38a5274e8758bd2e5588a172c26e72",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/SysRoleAuthoritiesDao.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "209703"
},
{
"name": "HTML",
"bytes": "100111"
},
{
"name": "JavaScript",
"bytes": "576254"
},
{
"name": "Python",
"bytes": "38416"
}
],
"symlink_target": ""
} |
""" File used to synchronize operations between processes """
import os
class LockFile(object):
"""
Use this to lock operations that need to occur only once, even if several
processes try to run the operation. It works by getting an exclusive lock on
the listed file. It will fail with an exception if the lock already is held
by some other process. Note that the lock is reentrant for any code sharing
the same instance of this class.
Usage:
>>> with LockFile('/tmp/rigor-foo.lock') as lock:
... # do critical stuff...
... pass
"""
def __init__(self, path):
self._path = path
self._lock = None
def acquire(self):
"""
Acquires a reentrant lock. If the lock already exists in this method, it
will simply return; otherwise, it will acquire the lock. It will throw an
exception if the lock cannot be acquired.
"""
if not self._lock:
self._lock = os.open(self._path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
def release(self):
"""
Releases the lock and removes the file from disk.
"""
if self._lock:
os.close(self._lock)
os.unlink(self._path)
def __enter__(self):
self.acquire()
return self
def __exit__(self, _exc_type, _exc_value, _exc_traceback):
self.release()
| {
"content_hash": "0d430b8d501a5b13a546783cd2d74dc3",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 27.31111111111111,
"alnum_prop": 0.6802278275020341,
"repo_name": "blindsightcorp/rigor",
"id": "1c49703f28f036f4d4ac9547a92dd0ad4100c1c4",
"size": "1229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/lockfile.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PHP",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "132419"
}
],
"symlink_target": ""
} |
"""Generic helper functions useful in tests (App Engine only)."""
import datetime
from clusterfuzz._internal.tests.test_libs import test_utils
from libs.issue_management import monorail
from libs.issue_management.monorail.comment import Comment
from libs.issue_management.monorail.issue import Issue
def create_generic_issue(created_days_ago=28):
"""Returns a simple issue object for use in tests."""
issue = Issue()
issue.cc = []
issue.comment = ''
issue.comments = []
issue.components = []
issue.labels = []
issue.open = True
issue.owner = 'owner@chromium.org'
issue.status = 'Assigned'
issue.id = 1
issue.itm = create_issue_tracker_manager()
# Test issue was created 1 week before the current (mocked) time.
issue.created = (
test_utils.CURRENT_TIME - datetime.timedelta(days=created_days_ago))
return monorail.Issue(issue)
def create_generic_issue_comment(comment_body='Comment.',
author='user@chromium.org',
days_ago=21,
labels=None):
"""Return a simple comment used for testing."""
comment = Comment()
comment.comment = comment_body
comment.author = author
comment.created = test_utils.CURRENT_TIME - datetime.timedelta(days=days_ago)
comment.labels = labels
if comment.labels is None:
comment.labels = []
return comment
def create_issue_tracker_manager():
"""Create a fake issue tracker manager."""
class FakeIssueTrackerManager(object):
"""Mock issue tracker manager."""
def __init__(self):
self.project_name = 'test-project'
self.issues = {}
self.next_id = 1
def get_issue(self, issue_id):
"""Get original issue."""
issue = self.issues.get(issue_id)
if not issue:
return None
issue.itm = self
return issue
def get_comments(self, issue): # pylint: disable=unused-argument
"""Return an empty comment list."""
return []
def save(self, issue, *args, **kwargs): # pylint: disable=unused-argument
"""Save an issue."""
if issue.new:
issue.id = self.next_id
issue.new = False
self.next_id += 1
self.issues[issue.id] = issue
return FakeIssueTrackerManager()
| {
"content_hash": "dad85e2b9ecb8f8d65fca2dfdc5f2d40",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 27.695121951219512,
"alnum_prop": 0.6437692646411273,
"repo_name": "google/clusterfuzz",
"id": "af447e286a00e7f9690db076605ef4b61fbfff55",
"size": "2846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/clusterfuzz/_internal/tests/test_libs/appengine_test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "21721"
},
{
"name": "C",
"bytes": "3485"
},
{
"name": "C++",
"bytes": "16326"
},
{
"name": "CSS",
"bytes": "16789"
},
{
"name": "Dockerfile",
"bytes": "25218"
},
{
"name": "Go",
"bytes": "16253"
},
{
"name": "HTML",
"bytes": "503044"
},
{
"name": "JavaScript",
"bytes": "9433"
},
{
"name": "Jinja",
"bytes": "3308"
},
{
"name": "PowerShell",
"bytes": "17307"
},
{
"name": "Python",
"bytes": "5085058"
},
{
"name": "Ruby",
"bytes": "93"
},
{
"name": "Shell",
"bytes": "80910"
},
{
"name": "Starlark",
"bytes": "1951"
}
],
"symlink_target": ""
} |
import os
import six
import socket
import xdrlib
from oslo_config import cfg
from oslo_log import log as logging
from random import randint
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers import nfs
#
# RPC Definition
#
RPCVERSION = 2
CALL = 0
REPLY = 1
AUTH_NULL = 0
MSG_ACCEPTED = 0
MSG_DENIED = 1
SUCCESS = 0
PROG_UNAVAIL = 1
PROG_MISMATCH = 2
PROC_UNAVAIL = 3
GARBAGE_ARGS = 4
RPC_MISMATCH = 0
AUTH_ERROR = 1
COHO_PROGRAM = 400115
COHO_V1 = 1
COHO1_CREATE_SNAPSHOT = 1
COHO1_DELETE_SNAPSHOT = 2
COHO1_CREATE_VOLUME_FROM_SNAPSHOT = 3
#
# Simple RPC Client
#
def make_auth_null():
return six.b('')
class Client(object):
def __init__(self, address, prog, vers, port):
self.packer = xdrlib.Packer()
self.unpacker = xdrlib.Unpacker('')
self.address = address
self.prog = prog
self.vers = vers
self.port = port
self.cred = None
self.verf = None
self.init_socket()
self.init_xid()
def init_socket(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(('', 0))
self.sock.connect((self.address, self.port))
except socket.error:
msg = _('Failed to establish connection with Coho cluster')
raise exception.CohoException(msg)
def init_xid(self):
self.xid = randint(0, 4096)
def make_xid(self):
self.xid += 1
def make_cred(self):
if self.cred is None:
self.cred = (AUTH_NULL, make_auth_null())
return self.cred
def make_verf(self):
if self.verf is None:
self.verf = (AUTH_NULL, make_auth_null())
return self.verf
def pack_auth(self, auth):
flavor, stuff = auth
self.packer.pack_enum(flavor)
self.packer.pack_opaque(stuff)
def pack_callheader(self, xid, prog, vers, proc, cred, verf):
self.packer.pack_uint(xid)
self.packer.pack_enum(CALL)
self.packer.pack_uint(RPCVERSION)
self.packer.pack_uint(prog)
self.packer.pack_uint(vers)
self.packer.pack_uint(proc)
self.pack_auth(cred)
self.pack_auth(verf)
def unpack_auth(self):
flavor = self.unpacker.unpack_enum()
stuff = self.unpacker.unpack_opaque()
return (flavor, stuff)
def unpack_replyheader(self):
xid = self.unpacker.unpack_uint()
mtype = self.unpacker.unpack_enum()
if mtype != REPLY:
raise exception.CohoException(
_('no REPLY but %r') % (mtype,))
stat = self.unpacker.unpack_enum()
if stat == MSG_DENIED:
stat = self.unpacker.unpack_enum()
if stat == RPC_MISMATCH:
low = self.unpacker.unpack_uint()
high = self.unpacker.unpack_uint()
raise exception.CohoException(
_('MSG_DENIED: RPC_MISMATCH: %r') % ((low, high),))
if stat == AUTH_ERROR:
stat = self.unpacker.unpack_uint()
raise exception.CohoException(
_('MSG_DENIED: AUTH_ERROR: %r') % (stat,))
raise exception.CohoException(_('MSG_DENIED: %r') % (stat,))
if stat != MSG_ACCEPTED:
raise exception.CohoException(
_('Neither MSG_DENIED nor MSG_ACCEPTED: %r') % (stat,))
verf = self.unpack_auth()
stat = self.unpacker.unpack_enum()
if stat == PROG_UNAVAIL:
raise exception.CohoException(_('call failed: PROG_UNAVAIL'))
if stat == PROG_MISMATCH:
low = self.unpacker.unpack_uint()
high = self.unpacker.unpack_uint()
raise exception.CohoException(
_('call failed: PROG_MISMATCH: %r') % ((low, high),))
if stat == PROC_UNAVAIL:
raise exception.CohoException(_('call failed: PROC_UNAVAIL'))
if stat == GARBAGE_ARGS:
raise exception.CohoException(_('call failed: GARBAGE_ARGS'))
if stat != SUCCESS:
raise exception.CohoException(_('call failed: %r') % (stat,))
return xid, verf
def init_call(self, proc, args):
self.make_xid()
self.packer.reset()
cred = self.make_cred()
verf = self.make_verf()
self.pack_callheader(self.xid, self.prog, self.vers, proc, cred, verf)
for arg, func in args:
func(arg)
return self.xid, self.packer.get_buf()
def _sendfrag(self, last, frag):
x = len(frag)
if last:
x = x | 0x80000000
header = (six.int2byte(int(x >> 24 & 0xff)) +
six.int2byte(int(x >> 16 & 0xff)) +
six.int2byte(int(x >> 8 & 0xff)) +
six.int2byte(int(x & 0xff)))
self.sock.send(header + frag)
def _sendrecord(self, record):
self._sendfrag(1, record)
def _recvfrag(self):
header = self.sock.recv(4)
if len(header) < 4:
raise exception.CohoException(
_('Invalid response header from RPC server'))
x = (six.indexbytes(header, 0) << 24 |
six.indexbytes(header, 1) << 16 |
six.indexbytes(header, 2) << 8 |
six.indexbytes(header, 3))
last = ((x & 0x80000000) != 0)
n = int(x & 0x7fffffff)
frag = six.b('')
while n > 0:
buf = self.sock.recv(n)
if not buf:
raise exception.CohoException(
_('RPC server response is incomplete'))
n = n - len(buf)
frag = frag + buf
return last, frag
def _recvrecord(self):
record = six.b('')
last = 0
while not last:
last, frag = self._recvfrag()
record = record + frag
return record
def _make_call(self, proc, args):
self.packer.reset()
xid, call = self.init_call(proc, args)
self._sendrecord(call)
reply = self._recvrecord()
self.unpacker.reset(reply)
xid, verf = self.unpack_replyheader()
def _call(self, proc, args):
self._make_call(proc, args)
res = self.unpacker.unpack_uint()
if res != SUCCESS:
raise exception.CohoException(os.strerror(res))
class CohoRPCClient(Client):
def __init__(self, address, port):
Client.__init__(self, address, COHO_PROGRAM, 1, port)
def create_snapshot(self, src, dst, flags):
self._call(COHO1_CREATE_SNAPSHOT,
[(six.b(src), self.packer.pack_string),
(six.b(dst), self.packer.pack_string),
(flags, self.packer.pack_uint)])
def delete_snapshot(self, name):
self._call(COHO1_DELETE_SNAPSHOT,
[(six.b(name), self.packer.pack_string)])
def create_volume_from_snapshot(self, src, dst):
self._call(COHO1_CREATE_VOLUME_FROM_SNAPSHOT,
[(six.b(src), self.packer.pack_string),
(six.b(dst), self.packer.pack_string)])
#
# Coho Data Volume Driver
#
VERSION = '1.0.0'
LOG = logging.getLogger(__name__)
coho_opts = [
cfg.IntOpt('coho_rpc_port',
default=2049,
help='RPC port to connect to Coha Data MicroArray')
]
CONF = cfg.CONF
CONF.register_opts(coho_opts)
class CohoDriver(nfs.NfsDriver):
"""Coho Data NFS based cinder driver.
Creates file on NFS share for using it as block device on hypervisor.
Version history:
1.0.0 - Initial driver
"""
# We have to overload this attribute of RemoteFSDriver because
# unfortunately the base method doesn't accept exports of the form:
# <address>:/
# It expects a non blank export name following the /.
# We are more permissive.
SHARE_FORMAT_REGEX = r'.+:/.*'
def __init__(self, *args, **kwargs):
super(CohoDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(coho_opts)
self._execute_as_root = True
self._rpcclients = dict()
self._backend_name = (self.configuration.volume_backend_name or
self.__class__.__name__)
def _init_rpcclient(self, addr, port):
client = CohoRPCClient(addr, port)
self._rpcclients[(addr, port)] = client
return client
def _get_rpcclient(self, addr, port):
if (addr, port) in self._rpcclients:
return self._rpcclients[(addr, port)]
return self._init_rpcclient(addr, port)
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(CohoDriver, self).do_setup(context)
self._context = context
config = self.configuration.coho_rpc_port
if not config:
msg = _("Coho rpc port is not configured")
LOG.warning(msg)
raise exception.CohoException(msg)
if config < 1 or config > 65535:
msg = (_("Invalid port number %(config)s for Coho rpc port") %
{'config': config})
LOG.warning(msg)
raise exception.CohoException(msg)
def _do_clone_volume(self, volume, src):
"""Clone volume to source.
Create a volume on given remote share with the same contents
as the specified source.
"""
volume_path = self.local_path(volume)
source_path = self.local_path(src)
self._execute('cp', source_path, volume_path,
run_as_root=self._execute_as_root)
def _get_volume_location(self, volume_id):
"""Returns provider location for given volume."""
# The driver should not directly access db, but since volume is not
# passed in create_snapshot and delete_snapshot we are forced to read
# the volume info from the database
volume = self.db.volume_get(self._context, volume_id)
addr, path = volume.provider_location.split(":")
return addr, path
def create_snapshot(self, snapshot):
"""Create a volume snapshot."""
addr, path = self._get_volume_location(snapshot['volume_id'])
volume_path = os.path.join(path, snapshot['volume_name'])
snapshot_name = snapshot['name']
flags = 0 # unused at this time
client = self._get_rpcclient(addr, self.configuration.coho_rpc_port)
client.create_snapshot(volume_path, snapshot_name, flags)
def delete_snapshot(self, snapshot):
"""Delete a volume snapshot."""
addr, path = self._get_volume_location(snapshot['volume_id'])
snapshot_name = snapshot['name']
client = self._get_rpcclient(addr, self.configuration.coho_rpc_port)
client.delete_snapshot(snapshot_name)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
volume['provider_location'] = self._find_share(volume['size'])
addr, path = volume['provider_location'].split(":")
volume_path = os.path.join(path, volume['name'])
snapshot_name = snapshot['name']
client = self._get_rpcclient(addr, self.configuration.coho_rpc_port)
client.create_volume_from_snapshot(snapshot_name, volume_path)
return {'provider_location': volume['provider_location']}
def _extend_file_sparse(self, path, size):
"""Extend the size of a file (with no additional disk usage)."""
self._execute('truncate', '-s', '%sG' % size,
path, run_as_root=self._execute_as_root)
def create_cloned_volume(self, volume, src_vref):
volume['provider_location'] = self._find_share(volume['size'])
self._do_clone_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend the specified file to the new_size (sparsely)."""
volume_path = self.local_path(volume)
self._extend_file_sparse(volume_path, new_size)
def get_volume_stats(self, refresh):
"""Pass in Coho Data information in volume stats."""
_stats = super(CohoDriver, self).get_volume_stats(refresh)
_stats["vendor_name"] = 'Coho Data'
_stats["driver_version"] = VERSION
_stats["storage_protocol"] = 'NFS'
_stats["volume_backend_name"] = self._backend_name
return _stats
| {
"content_hash": "f8a883d8b3e1bbb96248fe7c93599c6d",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 78,
"avg_line_length": 32.40575916230367,
"alnum_prop": 0.582034089991114,
"repo_name": "apporc/cinder",
"id": "0b2c3f8fbd4181e3a06a4d75bdf9857a3f7695ab",
"size": "13014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/coho.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13595277"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
} |
import tensorflow as tf
from dnc.dnc import *
from recurrent_controller import *
graph = tf.Graph()
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
graph_nodes = None
nodes = {}
with graph.as_default():
with tf.Session(graph=graph, config=config) as session:
ncomputer = DNCDuo(
MemRNNController,
2048,
512,
100,
256,
256,
4,
1,
testing=False,
output_feedback=True
)
# tf.train.Saver(tf.trainable_variables()).restore(session, os.path.join('checkpoint', 'model.ckpt'))
graph_nodes = graph.as_graph_def().node
for node in graph_nodes:
nodes[node.name] = {
'in': node.input,
'out': []
}
for node in graph_nodes:
for innode in node.input:
try:
nodes[innode]['out'].append(innode)
except KeyError:
print(innode, " Not found")
| {
"content_hash": "e0a88c3676ce40aa8a74568fbf096132",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 109,
"avg_line_length": 25.785714285714285,
"alnum_prop": 0.5170821791320406,
"repo_name": "HimariO/VideoSum",
"id": "a52129836e84c8326d62f380ae716ff95d321c2c",
"size": "1083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/video/show_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "315417"
},
{
"name": "Python",
"bytes": "391627"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
} |
from concurrent_tree_crawler.html_multipage_navigator.web_browser import \
MechanizeBrowserCreator
from concurrent_tree_crawler.html_multipage_navigator.throttled_web_browser \
import ThrottledWebBrowserCreator
from concurrent_tree_crawler.common.threads.token_bucket import \
TokenBucketFiller, StandardTokenBucket
from concurrent_tree_crawler.html_multipage_navigator.tree_navigator import \
HTMLMultipageNavigator
from concurrent_tree_crawler.html_multipage_navigator.sample_page_analyzer \
import LevelsCreator
from concurrent_tree_crawler.abstract_cmdln_navigators_creator import \
AbstractCmdLnNavigatorsCreator
from concurrent_tree_crawler.html_multipage_navigator.cmdln.abstract_levels_creator \
import AbstractCmdLnLevelsCreator
class CmdLnNavigatorsCreator(AbstractCmdLnNavigatorsCreator):
def __init__(self, levels_creator):
"""@type levels_creator: L{AbstractCmdLnLevelsCreator}"""
self.__token_filler = None
self.__levels_creator = levels_creator
def fill_parser(self, parser):
parser.add_argument("source_address",
help="the address of the web site to crawl.")
parser.add_argument("--max_pages_per_second", type=float,
help="Maximal number of web pages downloads per second "\
"(a real number). By default no limit is imposed.")
self.__levels_creator.fill_parser(parser)
def create(self, args, navigators_count):
browser_creator = self.__get_browser_creator_and_start_token_filler(
args.max_pages_per_second)
navigators = []
for _ in range(navigators_count):
navigators.append(
HTMLMultipageNavigator(args.source_address,
self.__levels_creator.create(args),
browser_creator))
return navigators
def __get_browser_creator_and_start_token_filler(self,
max_pages_per_second):
self.__token_filler = None
browser_creator = None
if max_pages_per_second is not None:
token_bucket = StandardTokenBucket(max_pages_per_second)
browser_creator = ThrottledWebBrowserCreator(
self._create_browser_creator(), token_bucket)
self.__token_filler = TokenBucketFiller(
token_bucket, 1, max_pages_per_second)
self.__token_filler.daemon = True
self.__token_filler.start()
else:
browser_creator = self._create_browser_creator()
return browser_creator
def _create_browser_creator(self):
"""
It is possible to override this function to use a different
C{AbstractWebBrowserCreator}.
@rtype: C{AbstractWebBrowserCreator}
"""
return MechanizeBrowserCreator()
def on_exit(self):
if self.__token_filler is not None:
self.__token_filler.stop()
self.__levels_creator.on_exit()
| {
"content_hash": "34181eee75c82b64da4ab80a4d3327d3",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 85,
"avg_line_length": 37.68115942028985,
"alnum_prop": 0.7584615384615384,
"repo_name": "mkobos/tree_crawler",
"id": "f389491eb1c8abc58a1985af76254f6511ca0513",
"size": "2600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "concurrent_tree_crawler/html_multipage_navigator/cmdln/navigators_creator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111094"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
} |
import time
DEFAULT_REDIS_KEY = 'kamikaze'
class Package(object):
"""
Instances of this class store the values of a package on the kamikaze queue
This item should be treated as immutable
"""
def __init__(self, payload, expire_time, score):
self._payload = payload
self._expire_time = None if expire_time is None else float(expire_time)
self._score = score
def __repr__(self):
return (
'Package(payload={payload},score={score},expire_time={expire_time}'
')'.format(payload=self.payload, score=self.score,
expire_time=self.expire_time))
@property
def payload(self):
return self._payload
@property
def expire_time(self):
return self._expire_time
def ttl(self, now):
return self.expire_time - now
def expired(self, now):
return self.expire_time is not None and self.expire_time < now
@property
def raw_entry(self):
expire_time = '' if self._expire_time is None else self._expire_time
return '{expire_time}:{payload}'.format(
expire_time=expire_time, payload=self._payload)
@property
def score(self):
return self._score
def __lt__(self, other):
"""
The higher the score, the higher the items value
"""
return self.score < other.score
@staticmethod
def value_to_expire_time_and_payload(value):
return value.split(':')
def extract_package(entry):
value, score = entry
try:
# Convert byte variable to string
value = value.decode()
except AttributeError:
pass
expire_time, payload = Package.value_to_expire_time_and_payload(value)
if expire_time.strip() != '':
expire_time = float(expire_time)
else:
expire_time = None
return Package(
payload=payload,
expire_time=expire_time,
score=score)
def create_package_with_ttl(payload, ttl, score, now=None):
now = now or time.time()
expire_time = now + ttl
return Package(
payload=payload,
expire_time=expire_time,
score=score)
| {
"content_hash": "4ecedda3bea5b467b81caa182ec56282",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 24.988505747126435,
"alnum_prop": 0.6039558417663293,
"repo_name": "brendanmaguire/kamikaze",
"id": "c495c51f277eafe23bf366c3e6f424b5d5bb033e",
"size": "2174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kamikaze/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1305"
},
{
"name": "Python",
"bytes": "16035"
}
],
"symlink_target": ""
} |
import sys
import redis
import csv
import os
import posixpath
import datetime
import logging
import logging.handlers
from email.utils import parsedate
# Make sure our PyPI directory is on the sys.path
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path = [root] + sys.path
import config
conf = config.Config(os.environ.get("PYPI_CONFIG", os.path.join(root, "config.ini")))
PRECISIONS = [
("hour", "%y-%m-%d-%H", datetime.timedelta(days=2)),
("daily", "%y-%m-%d", datetime.timedelta(days=32)),
]
logger = logging.getLogger("rsyslog-cdn")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.handlers.SysLogHandler(address="/dev/log"))
store = redis.Redis.from_url(conf.count_redis_url)
def make_key(precision, when, key):
return "downloads:%s:%s:%s" % (
precision[0], when.strftime(precision[1]), key)
def incr(when, project, filename):
# Increment our rolling counts in Redis
for prec in PRECISIONS:
key = make_key(prec, when, project)
store.incr(key)
store.expireat(key, when + prec[2])
# Increment our filename based bucket in Redis
for prec in PRECISIONS:
key = make_key(prec, when, ":".join([project, filename]))
store.incr(key)
store.expireat(key, when + prec[2])
def process(line):
if "last message repeated" in line:
logger.error("Duplicate Line in rsyslog-cdn")
try:
row = list(csv.reader([line], delimiter=" "))[0]
path = row[6].split(" ", 1)[1]
except Exception:
logger.error("Invalid Fastly Log Line: '%s'" % line)
return
# We only care about /packages/ urls
if not path.startswith("/packages/"):
return
# We need to get the Project and Filename
directory, filename = posixpath.split(path)
project = posixpath.basename(directory)
# We need to get the time this request occurred
rtime = datetime.datetime(*parsedate(row[4])[:6])
incr(rtime, project, filename)
if __name__ == "__main__":
line = sys.stdin.readline()
while line:
try:
process(line)
except Exception:
logger.exception("Error occured while processing '%s'", line)
raise
line = sys.stdin.readline()
| {
"content_hash": "ece1a66deb1735892342e5158c57d32d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 85,
"avg_line_length": 26.63529411764706,
"alnum_prop": 0.6435512367491166,
"repo_name": "pydotorg/pypi",
"id": "c2f7e4ff7205348dbf3b9a90e1402add947b4112",
"size": "2285",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/rsyslog-cdn.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "195"
},
{
"name": "CSS",
"bytes": "75520"
},
{
"name": "HTML",
"bytes": "84390"
},
{
"name": "Python",
"bytes": "469430"
},
{
"name": "Shell",
"bytes": "208"
}
],
"symlink_target": ""
} |
import sys
import itk
if len(sys.argv) != 4:
print("Usage: " + sys.argv[0] + " <inputImage> <outputImage> <sigma>")
sys.exit(1)
inputImage = sys.argv[1]
outputImage = sys.argv[2]
sigma = float(sys.argv[3])
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
reader = itk.ImageFileReader[ImageType].New()
reader.SetFileName(inputImage)
smoothFilter = itk.SmoothingRecursiveGaussianImageFilter[ImageType, ImageType].New()
smoothFilter.SetInput(reader.GetOutput())
smoothFilter.SetSigma(sigma)
writer = itk.ImageFileWriter[ImageType].New()
writer.SetFileName(outputImage)
writer.SetInput(smoothFilter.GetOutput())
writer.Update()
| {
"content_hash": "8f83ad8d3494112363316f79d685fc7b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 84,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.7544910179640718,
"repo_name": "InsightSoftwareConsortium/ITKExamples",
"id": "b58c57d4039f1298c6d9a7f3df1b94259fb2933f",
"size": "1266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Filtering/Smoothing/ComputesSmoothingWithGaussianKernel/Code.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1345317"
},
{
"name": "CMake",
"bytes": "468162"
},
{
"name": "CSS",
"bytes": "2087"
},
{
"name": "HTML",
"bytes": "8446"
},
{
"name": "JavaScript",
"bytes": "4743"
},
{
"name": "Python",
"bytes": "325825"
},
{
"name": "Shell",
"bytes": "37497"
}
],
"symlink_target": ""
} |
"""Utility methods for working with WSGI servers."""
import copy
from oslo import i18n
from oslo.serialization import jsonutils
from oslo.utils import importutils
from oslo.utils import strutils
import routes.middleware
import six
import webob.dec
import webob.exc
from keystone.common import config
from keystone.common import dependency
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LI
from keystone.i18n import _LW
from keystone.models import token_model
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
# Environment variable used to pass the request context
CONTEXT_ENV = 'openstack.context'
# Environment variable used to pass the request params
PARAMS_ENV = 'openstack.params'
def validate_token_bind(context, token_ref):
bind_mode = CONF.token.enforce_token_bind
if bind_mode == 'disabled':
return
if not isinstance(token_ref, token_model.KeystoneToken):
raise exception.UnexpectedError(_('token reference must be a '
'KeystoneToken type, got: %s') %
type(token_ref))
bind = token_ref.bind
# permissive and strict modes don't require there to be a bind
permissive = bind_mode in ('permissive', 'strict')
# get the named mode if bind_mode is not one of the known
name = None if permissive or bind_mode == 'required' else bind_mode
if not bind:
if permissive:
# no bind provided and none required
return
else:
LOG.info(_LI("No bind information present in token"))
raise exception.Unauthorized()
if name and name not in bind:
LOG.info(_LI("Named bind mode %s not in bind information"), name)
raise exception.Unauthorized()
for bind_type, identifier in six.iteritems(bind):
if bind_type == 'kerberos':
if not (context['environment'].get('AUTH_TYPE', '').lower()
== 'negotiate'):
LOG.info(_LI("Kerberos credentials required and not present"))
raise exception.Unauthorized()
if not context['environment'].get('REMOTE_USER') == identifier:
LOG.info(_LI("Kerberos credentials do not match "
"those in bind"))
raise exception.Unauthorized()
LOG.info(_LI("Kerberos bind authentication successful"))
elif bind_mode == 'permissive':
LOG.debug(("Ignoring unknown bind for permissive mode: "
"{%(bind_type)s: %(identifier)s}"),
{'bind_type': bind_type, 'identifier': identifier})
else:
LOG.info(_LI("Couldn't verify unknown bind: "
"{%(bind_type)s: %(identifier)s}"),
{'bind_type': bind_type, 'identifier': identifier})
raise exception.Unauthorized()
def best_match_language(req):
"""Determines the best available locale from the Accept-Language
HTTP header passed in the request.
"""
if not req.accept_language:
return None
return req.accept_language.best_match(
i18n.get_available_languages('keystone'))
class BaseApplication(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = keystone.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import keystone.fancy_api
keystone.fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify()
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError('You must implement __call__')
@dependency.requires('assignment_api', 'policy_api', 'token_provider_api')
class Application(BaseApplication):
@webob.dec.wsgify()
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
LOG.debug('arg_dict: %s', arg_dict)
# allow middleware up the stack to provide context, params and headers.
context = req.environ.get(CONTEXT_ENV, {})
context['query_string'] = dict(six.iteritems(req.params))
context['headers'] = dict(six.iteritems(req.headers))
context['path'] = req.environ['PATH_INFO']
context['host_url'] = req.host_url
params = req.environ.get(PARAMS_ENV, {})
# authentication and authorization attributes are set as environment
# values by the container and processed by the pipeline. the complete
# set is not yet know.
context['environment'] = req.environ
context['accept_header'] = req.accept
req.environ = None
params.update(arg_dict)
context.setdefault('is_admin', False)
# TODO(termie): do some basic normalization on methods
method = getattr(self, action)
# NOTE(morganfainberg): use the request method to normalize the
# response code between GET and HEAD requests. The HTTP status should
# be the same.
req_method = req.environ['REQUEST_METHOD'].upper()
# NOTE(vish): make sure we have no unicode keys for py2.6.
params = self._normalize_dict(params)
try:
result = method(context, **params)
except exception.Unauthorized as e:
LOG.warning(
_LW("Authorization failed. %(exception)s from "
"%(remote_addr)s"),
{'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
return render_exception(e, context=context,
user_locale=best_match_language(req))
except exception.Error as e:
LOG.warning(e)
return render_exception(e, context=context,
user_locale=best_match_language(req))
except TypeError as e:
LOG.exception(e)
return render_exception(exception.ValidationError(e),
context=context,
user_locale=best_match_language(req))
except Exception as e:
LOG.exception(e)
return render_exception(exception.UnexpectedError(exception=e),
context=context,
user_locale=best_match_language(req))
if result is None:
return render_response(status=(204, 'No Content'))
elif isinstance(result, six.string_types):
return result
elif isinstance(result, webob.Response):
return result
elif isinstance(result, webob.exc.WSGIHTTPException):
return result
response_code = self._get_response_code(req)
return render_response(body=result, status=response_code,
method=req_method)
def _get_response_code(self, req):
req_method = req.environ['REQUEST_METHOD']
controller = importutils.import_class('keystone.common.controller')
code = None
if isinstance(self, controller.V3Controller) and req_method == 'POST':
code = (201, 'Created')
return code
def _normalize_arg(self, arg):
return str(arg).replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
return dict([(self._normalize_arg(k), v)
for (k, v) in six.iteritems(d)])
def assert_admin(self, context):
if not context['is_admin']:
try:
user_token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
except exception.TokenNotFound as e:
raise exception.Unauthorized(e)
validate_token_bind(context, user_token_ref)
creds = copy.deepcopy(user_token_ref.metadata)
try:
creds['user_id'] = user_token_ref.user_id
except exception.UnexpectedError:
LOG.debug('Invalid user')
raise exception.Unauthorized()
if user_token_ref.project_scoped:
creds['tenant_id'] = user_token_ref.project_id
else:
LOG.debug('Invalid tenant')
raise exception.Unauthorized()
creds['roles'] = user_token_ref.role_names
# Accept either is_admin or the admin role
self.policy_api.enforce(creds, 'admin_required', {})
def _attribute_is_empty(self, ref, attribute):
"""Returns true if the attribute in the given ref (which is a
dict) is empty or None.
"""
return ref.get(attribute) is None or ref.get(attribute) == ''
def _require_attribute(self, ref, attribute):
"""Ensures the reference contains the specified attribute.
Raise a ValidationError if the given attribute is not present
"""
if self._attribute_is_empty(ref, attribute):
msg = _('%s field is required and cannot be empty') % attribute
raise exception.ValidationError(message=msg)
def _require_attributes(self, ref, attrs):
"""Ensures the reference contains the specified attributes.
Raise a ValidationError if any of the given attributes is not present
"""
missing_attrs = [attribute for attribute in attrs
if self._attribute_is_empty(ref, attribute)]
if missing_attrs:
msg = _('%s field(s) cannot be empty') % ', '.join(missing_attrs)
raise exception.ValidationError(message=msg)
def _get_trust_id_for_request(self, context):
"""Get the trust_id for a call.
Retrieve the trust_id from the token
Returns None if token is not trust scoped
"""
if ('token_id' not in context or
context.get('token_id') == CONF.admin_token):
LOG.debug(('will not lookup trust as the request auth token is '
'either absent or it is the system admin token'))
return None
try:
token_data = self.token_provider_api.validate_token(
context['token_id'])
except exception.TokenNotFound:
LOG.warning(_LW('Invalid token in _get_trust_id_for_request'))
raise exception.Unauthorized()
token_ref = token_model.KeystoneToken(token_id=context['token_id'],
token_data=token_data)
return token_ref.trust_id
@classmethod
def base_url(cls, context, endpoint_type):
url = CONF['%s_endpoint' % endpoint_type]
if url:
url = url % CONF
else:
# NOTE(jamielennox): if url is not set via the config file we
# should set it relative to the url that the user used to get here
# so as not to mess with version discovery. This is not perfect.
# host_url omits the path prefix, but there isn't another good
# solution that will work for all urls.
url = context['host_url']
return url.rstrip('/')
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = keystone.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import keystone.analytics
keystone.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
def __init__(self, application):
super(Middleware, self).__init__()
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, request, response):
"""Do whatever you'd like to the response, based on the request."""
return response
@webob.dec.wsgify()
def __call__(self, request):
try:
response = self.process_request(request)
if response:
return response
response = request.get_response(self.application)
return self.process_response(request, response)
except exception.Error as e:
LOG.warning(e)
return render_exception(e, request=request,
user_locale=best_match_language(request))
except TypeError as e:
LOG.exception(e)
return render_exception(exception.ValidationError(e),
request=request,
user_locale=best_match_language(request))
except Exception as e:
LOG.exception(e)
return render_exception(exception.UnexpectedError(exception=e),
request=request,
user_locale=best_match_language(request))
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify()
def __call__(self, req):
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
for key, value in req.environ.items():
LOG.debug('%s = %s', key,
strutils.mask_password(value))
LOG.debug('')
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
for line in req.body_file:
LOG.debug('%s', strutils.mask_password(line))
LOG.debug('')
resp = req.get_response(self.application)
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
for (key, value) in six.iteritems(resp.headers):
LOG.debug('%s = %s', key, value)
LOG.debug('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
for part in app_iter:
LOG.debug(part)
yield part
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify()
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify()
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
msg = _('The resource could not be found.')
return render_exception(exception.NotFound(msg),
request=req,
user_locale=best_match_language(req))
app = match['controller']
return app
class ComposingRouter(Router):
def __init__(self, mapper=None, routers=None):
if mapper is None:
mapper = routes.Mapper()
if routers is None:
routers = []
for router in routers:
router.add_routes(mapper)
super(ComposingRouter, self).__init__(mapper)
class ComposableRouter(Router):
"""Router that supports use by ComposingRouter."""
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.add_routes(mapper)
super(ComposableRouter, self).__init__(mapper)
def add_routes(self, mapper):
"""Add routes to given mapper."""
pass
class ExtensionRouter(Router):
"""A router that allows extensions to supplement or overwrite routes.
Expects to be subclassed.
"""
def __init__(self, application, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.application = application
self.add_routes(mapper)
mapper.connect('{path_info:.*}', controller=self.application)
super(ExtensionRouter, self).__init__(mapper)
def add_routes(self, mapper):
pass
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = keystone.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import keystone.analytics
keystone.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
class RoutersBase(object):
"""Base class for Routers."""
def __init__(self):
self.v3_resources = []
def append_v3_routers(self, mapper, routers):
"""Append v3 routers.
Subclasses should override this method to map its routes.
Use self._add_resource() to map routes for a resource.
"""
def _add_resource(self, mapper, controller, path, rel,
get_action=None, head_action=None, get_head_action=None,
put_action=None, post_action=None, patch_action=None,
delete_action=None, get_post_action=None,
path_vars=None):
if get_head_action:
getattr(controller, get_head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_head_action,
conditions=dict(method=['GET', 'HEAD']))
if get_action:
getattr(controller, get_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_action,
conditions=dict(method=['GET']))
if head_action:
getattr(controller, head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=head_action,
conditions=dict(method=['HEAD']))
if put_action:
getattr(controller, put_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=put_action,
conditions=dict(method=['PUT']))
if post_action:
getattr(controller, post_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=post_action,
conditions=dict(method=['POST']))
if patch_action:
getattr(controller, patch_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=patch_action,
conditions=dict(method=['PATCH']))
if delete_action:
getattr(controller, delete_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=delete_action,
conditions=dict(method=['DELETE']))
if get_post_action:
getattr(controller, get_post_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_post_action,
conditions=dict(method=['GET', 'POST']))
resource_data = dict()
if path_vars:
resource_data['href-template'] = path
resource_data['href-vars'] = path_vars
else:
resource_data['href'] = path
self.v3_resources.append((rel, resource_data))
class V3ExtensionRouter(ExtensionRouter, RoutersBase):
"""Base class for V3 extension router."""
def __init__(self, application, mapper=None):
self.v3_resources = list()
super(V3ExtensionRouter, self).__init__(application, mapper)
def _update_version_response(self, response_data):
response_data['resources'].update(self.v3_resources)
@webob.dec.wsgify()
def __call__(self, request):
if request.path_info != '/':
# Not a request for version info so forward to super.
return super(V3ExtensionRouter, self).__call__(request)
response = request.get_response(self.application)
if response.status_code != 200:
# The request failed, so don't update the response.
return response
if response.headers['Content-Type'] != 'application/json-home':
# Not a request for JSON Home document, so don't update the
# response.
return response
response_data = jsonutils.loads(response.body)
self._update_version_response(response_data)
response.body = jsonutils.dumps(response_data,
cls=utils.SmarterEncoder)
return response
def render_response(body=None, status=None, headers=None, method=None):
"""Forms a WSGI response."""
if headers is None:
headers = []
else:
headers = list(headers)
headers.append(('Vary', 'X-Auth-Token'))
if body is None:
body = ''
status = status or (204, 'No Content')
else:
content_types = [v for h, v in headers if h == 'Content-Type']
if content_types:
content_type = content_types[0]
else:
content_type = None
JSON_ENCODE_CONTENT_TYPES = ('application/json',
'application/json-home',)
if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
if content_type is None:
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
resp = webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
if method == 'HEAD':
# NOTE(morganfainberg): HEAD requests should return the same status
# as a GET request and same headers (including content-type and
# content-length). The webob.Response object automatically changes
# content-length (and other headers) if the body is set to b''. Capture
# all headers and reset them on the response object after clearing the
# body. The body can only be set to a binary-type (not TextType or
# NoneType), so b'' is used here and should be compatible with
# both py2x and py3x.
stored_headers = resp.headers.copy()
resp.body = b''
for header, value in six.iteritems(stored_headers):
resp.headers[header] = value
return resp
def render_exception(error, context=None, request=None, user_locale=None):
"""Forms a WSGI response based on the current error."""
error_message = error.args[0]
message = i18n.translate(error_message, desired_locale=user_locale)
if message is error_message:
# translate() didn't do anything because it wasn't a Message,
# convert to a string.
message = six.text_type(message)
body = {'error': {
'code': error.code,
'title': error.title,
'message': message,
}}
headers = []
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
elif isinstance(error, exception.Unauthorized):
url = CONF.public_endpoint
if not url:
if request:
context = {'host_url': request.host_url}
if context:
url = Application.base_url(context, 'public')
else:
url = 'http://localhost:%d' % CONF.public_port
else:
url = url % CONF
headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
return render_response(status=(error.code, error.title),
body=body,
headers=headers)
| {
"content_hash": "08c283f7078a5db8b05c779b45dc098e",
"timestamp": "",
"source": "github",
"line_count": 783,
"max_line_length": 79,
"avg_line_length": 37.04214559386973,
"alnum_prop": 0.5901944559371122,
"repo_name": "ging/keystone",
"id": "6205321466d02adda87879348ff1b083f196ab69",
"size": "29812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/common/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3894459"
},
{
"name": "Shell",
"bytes": "4619"
}
],
"symlink_target": ""
} |
import os.path as osp
import pprint
from chainer import training
import yaml
class ParamsReport(training.Extension):
def __init__(self, params, file_name='params.yaml'):
self._params = params
self._file_name = file_name
def __call__(self, trainer):
pass
def initialize(self, trainer):
print('# ' + '-' * 77)
pprint.pprint(self._params)
print('# ' + '-' * 77)
with open(osp.join(trainer.out, self._file_name), 'w') as f:
yaml.safe_dump(self._params, f, default_flow_style=False)
| {
"content_hash": "2a9f614e730bc4937e9a2dbed1978e40",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 25.636363636363637,
"alnum_prop": 0.5957446808510638,
"repo_name": "wkentaro/fcn",
"id": "193a193edb6a4517c036cb34be7780a855012f00",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fcn/extensions/params_report.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62753"
}
],
"symlink_target": ""
} |
from google.cloud import talent_v4
async def sample_batch_delete_jobs():
# Create a client
client = talent_v4.JobServiceAsyncClient()
# Initialize request argument(s)
request = talent_v4.BatchDeleteJobsRequest(
parent="parent_value",
)
# Make the request
operation = client.batch_delete_jobs(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END jobs_v4_generated_JobService_BatchDeleteJobs_async]
| {
"content_hash": "cfff5189a3b0f9c41924e7146ad78fbf",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 58,
"avg_line_length": 23.82608695652174,
"alnum_prop": 0.7007299270072993,
"repo_name": "googleapis/python-talent",
"id": "3fc26c7d6cbd04b62186ae8243fe4e4602be88bd",
"size": "1931",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/jobs_v4_generated_job_service_batch_delete_jobs_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2538179"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
import pytest
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestInstances(helpers.TestCase):
INSTANCE_NAME = helpers.gen_random_resource_name('instance',
timestamp=False)
@property
def instances_page(self):
return self.home_pg.go_to_project_compute_instancespage()
@property
def instance_table_name_column(self):
return 'Instance Name'
def test_create_delete_instance(self):
"""tests the instance creation and deletion functionality:
* creates a new instance in Project > Compute > Instances page
* verifies the instance appears in the instances table as active
* deletes the newly created instance via proper page (depends on user)
* verifies the instance does not appear in the table after deletion
"""
instances_page = self.home_pg.go_to_project_compute_instancespage()
instances_page.create_instance(self.INSTANCE_NAME)
self.assertTrue(instances_page.find_message_and_dismiss(messages.INFO))
self.assertFalse(
instances_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(instances_page.is_instance_active(self.INSTANCE_NAME))
instances_page = self.instances_page
instances_page.delete_instance(self.INSTANCE_NAME)
self.assertTrue(instances_page.find_message_and_dismiss(messages.INFO))
self.assertFalse(
instances_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(instances_page.is_instance_deleted(self.INSTANCE_NAME))
@pytest.mark.skip(reason="Bug 1774697")
def test_instances_pagination(self):
"""This test checks instance pagination
Steps:
1) Login to Horizon Dashboard as regular user
2) Navigate to user settings page
3) Change 'Items Per Page' value to 1
4) Go to Project > Compute > Instances page
5) Create 2 instances
6) Go to appropriate page (depends on user)
7) Check that only 'Next' link is available, only one instance is
available (and it has correct name) on the first page
8) Click 'Next' and check that on the second page only one instance is
available (and it has correct name), there is no 'Next' link on page
9) Go to user settings page and restore 'Items Per Page'
10) Delete created instances via proper page (depends on user)
"""
items_per_page = 1
instance_count = 2
instance_list = ["{0}-{1}".format(self.INSTANCE_NAME, item)
for item in range(1, instance_count + 1)]
first_page_definition = {'Next': True, 'Prev': False,
'Count': items_per_page,
'Names': [instance_list[1]]}
second_page_definition = {'Next': False, 'Prev': True,
'Count': items_per_page,
'Names': [instance_list[0]]}
settings_page = self.home_pg.go_to_settings_usersettingspage()
settings_page.change_pagesize(items_per_page)
self.assertTrue(
settings_page.find_message_and_dismiss(messages.SUCCESS))
instances_page = self.home_pg.go_to_project_compute_instancespage()
instances_page.create_instance(self.INSTANCE_NAME,
instance_count=instance_count)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.is_instance_active(instance_list[1]))
instances_page = self.instances_page
instances_page.instances_table.assert_definition(
first_page_definition, sorting=True)
instances_page.instances_table.turn_next_page()
instances_page.instances_table.assert_definition(
second_page_definition, sorting=True)
instances_page = self.instances_page
instances_page.instances_table.assert_definition(
first_page_definition, sorting=True)
settings_page = self.home_pg.go_to_settings_usersettingspage()
settings_page.change_pagesize()
self.assertTrue(
settings_page.find_message_and_dismiss(messages.SUCCESS))
instances_page = self.instances_page
instances_page.delete_instances(instance_list)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.are_instances_deleted(instance_list))
@pytest.mark.skip(reason="Bug 1774697")
def test_instances_pagination_and_filtration(self):
"""This test checks instance pagination and filtration
Steps:
1) Login to Horizon Dashboard as regular user
2) Go to to user settings page
3) Change 'Items Per Page' value to 1
4) Go to Project > Compute > Instances page
5) Create 2 instances
6) Go to appropriate page (depends on user)
7) Check filter by Name of the first and the second instance in order
to have one instance in the list (and it should have correct name)
and no 'Next' link is available
8) Check filter by common part of Name of in order to have one instance
in the list (and it should have correct name) and 'Next' link is
available on the first page and is not available on the second page
9) Go to user settings page and restore 'Items Per Page'
10) Delete created instances via proper page (depends on user)
"""
items_per_page = 1
instance_count = 2
instance_list = ["{0}-{1}".format(self.INSTANCE_NAME, item)
for item in range(1, instance_count + 1)]
first_page_definition = {'Next': True, 'Prev': False,
'Count': items_per_page,
'Names': [instance_list[1]]}
second_page_definition = {'Next': False, 'Prev': False,
'Count': items_per_page,
'Names': [instance_list[0]]}
filter_first_page_definition = {'Next': False, 'Prev': False,
'Count': items_per_page,
'Names': [instance_list[1]]}
settings_page = self.home_pg.go_to_settings_usersettingspage()
settings_page.change_pagesize(items_per_page)
self.assertTrue(
settings_page.find_message_and_dismiss(messages.SUCCESS))
instances_page = self.home_pg.go_to_project_compute_instancespage()
instances_page.create_instance(self.INSTANCE_NAME,
instance_count=instance_count)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.is_instance_active(instance_list[1]))
instances_page = self.instances_page
instances_page.instances_table.set_filter_value('name')
instances_page.instances_table.filter(instance_list[1])
instances_page.instances_table.assert_definition(
filter_first_page_definition, sorting=True)
instances_page.instances_table.filter(instance_list[0])
instances_page.instances_table.assert_definition(
second_page_definition, sorting=True)
instances_page.instances_table.filter(self.INSTANCE_NAME)
instances_page.instances_table.assert_definition(
first_page_definition, sorting=True)
instances_page.instances_table.filter('')
settings_page = self.home_pg.go_to_settings_usersettingspage()
settings_page.change_pagesize()
self.assertTrue(
settings_page.find_message_and_dismiss(messages.SUCCESS))
instances_page = self.instances_page
instances_page.delete_instances(instance_list)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.are_instances_deleted(instance_list))
@pytest.mark.skip(reason="Bug 1774697")
def test_filter_instances(self):
"""This test checks filtering of instances by Instance Name
Steps:
1) Login to Horizon dashboard as regular user
2) Go to Project > Compute > Instances
3) Create 2 instances
4) Go to appropriate page (depends on user)
5) Use filter by Instance Name
6) Check that filtered table has one instance only (which name is equal
to filter value) and no other instances in the table
7) Check that filtered table has both instances (search by common part
of instance names)
8) Set nonexistent instance name. Check that 0 rows are displayed
9) Clear filter and delete instances via proper page (depends on user)
"""
instance_count = 2
instance_list = ["{0}-{1}".format(self.INSTANCE_NAME, item)
for item in range(1, instance_count + 1)]
instances_page = self.home_pg.go_to_project_compute_instancespage()
instances_page.create_instance(self.INSTANCE_NAME,
instance_count=instance_count)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.is_instance_active(instance_list[0]))
instances_page = self.instances_page
instances_page.instances_table.set_filter_value('name')
instances_page.instances_table.filter(instance_list[0])
self.assertTrue(instances_page.is_instance_present(instance_list[0]))
for instance in instance_list[1:]:
self.assertFalse(instances_page.is_instance_present(instance))
instances_page.instances_table.filter(self.INSTANCE_NAME)
for instance in instance_list:
self.assertTrue(instances_page.is_instance_present(instance))
nonexistent_instance_name = "{0}_test".format(self.INSTANCE_NAME)
instances_page.instances_table.filter(nonexistent_instance_name)
self.assertEqual(instances_page.instances_table.rows, [])
instances_page.instances_table.filter('')
instances_page.delete_instances(instance_list)
self.assertTrue(
instances_page.find_message_and_dismiss(messages.SUCCESS))
self.assertTrue(instances_page.are_instances_deleted(instance_list))
class TestAdminInstances(helpers.AdminTestCase, TestInstances):
INSTANCE_NAME = helpers.gen_random_resource_name('instance',
timestamp=False)
@property
def instances_page(self):
self.home_pg.go_to_admin_overviewpage()
return self.home_pg.go_to_admin_compute_instancespage()
@property
def instance_table_name_column(self):
return 'Name'
@pytest.mark.skip(reason="Bug 1774697")
def test_instances_pagination_and_filtration(self):
super().test_instances_pagination_and_filtration()
| {
"content_hash": "9327f3992057c068270917b144a3319c",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 79,
"avg_line_length": 46.01639344262295,
"alnum_prop": 0.6389383683648023,
"repo_name": "ChameleonCloud/horizon",
"id": "fcd696f55ac39b244e5db891e45a75729fd6215e",
"size": "11800",
"binary": false,
"copies": "1",
"ref": "refs/heads/chameleoncloud/xena",
"path": "openstack_dashboard/test/integration_tests/tests/test_instances.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601681"
},
{
"name": "JavaScript",
"bytes": "2486133"
},
{
"name": "Python",
"bytes": "5346021"
},
{
"name": "SCSS",
"bytes": "129668"
},
{
"name": "Shell",
"bytes": "7466"
}
],
"symlink_target": ""
} |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def CustomizationEvent(vim, *args, **kwargs):
'''Base for customization events.'''
obj = vim.client.factory.create('ns0:CustomizationEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'template', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'logLocation', 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| {
"content_hash": "2786eb79344d55fb62121f41fc99fab3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 124,
"avg_line_length": 34.84848484848485,
"alnum_prop": 0.5947826086956521,
"repo_name": "xuru/pyvisdk",
"id": "6f35913f5607773cf108a9a32a5a324c96eedf4a",
"size": "1151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/customization_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bot Irc'
copyright = '2014, Salas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'BotIrcdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'BotIrc.tex', 'Bot Irc Documentation',
'Salas', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'botirc', 'Bot Irc Documentation',
['Salas'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'BotIrc', 'Bot Irc Documentation',
'Salas', 'BotIrc', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "d78b60c232143be6ca4c90048d3f66dc",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 79,
"avg_line_length": 31.294117647058822,
"alnum_prop": 0.7026315789473684,
"repo_name": "salas106/irc-ltl-framework",
"id": "8b29c44f2e97a95010009b0c4ad7156837ef1158",
"size": "8423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6762"
},
{
"name": "Python",
"bytes": "29731"
}
],
"symlink_target": ""
} |
from django.contrib.contenttypes.models import ContentType
from django.db import transaction
from django.db.models import Q
from django.forms import ModelChoiceField
from django.http import QueryDict
from django.template import loader
from django.urls.base import reverse
from django.utils.decorators import method_decorator
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from xadmin.filters import FILTER_PREFIX, SEARCH_VAR
from xadmin.models import Bookmark
from xadmin.plugins.relate import RELATE_PREFIX
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views import ModelAdminView, BaseAdminPlugin, ListAdminView
from xadmin.views.dashboard import widget_manager, BaseWidget, PartialBaseWidget
from xadmin.views.list import COL_LIST_VAR, ORDER_VAR
csrf_protect_m = method_decorator(csrf_protect)
class BookmarkPlugin(BaseAdminPlugin):
# [{'title': "Female", 'query': {'gender': True}, 'order': ('-age'), 'cols': ('first_name', 'age', 'phones'), 'search': 'Tom'}]
list_bookmarks = []
show_bookmarks = True
def has_change_permission(self, obj=None):
if not obj or self.user.is_superuser:
return True
else:
return obj.user == self.user
def get_context(self, context):
if not self.show_bookmarks:
return context
bookmarks = []
current_qs = '&'.join([
'%s=%s' % (k, v)
for k, v in sorted(filter(
lambda i: bool(i[1] and (
i[0] in (COL_LIST_VAR, ORDER_VAR, SEARCH_VAR)
or i[0].startswith(FILTER_PREFIX)
or i[0].startswith(RELATE_PREFIX)
)),
self.request.GET.items()
))
])
model_info = (self.opts.app_label, self.opts.model_name)
has_selected = False
menu_title = _("Bookmark")
list_base_url = reverse('xadmin:%s_%s_changelist' %
model_info, current_app=self.admin_site.name)
# local bookmarks
for bk in self.list_bookmarks:
title = bk['title']
params = dict([
(FILTER_PREFIX + k, v)
for (k, v) in bk['query'].items()
])
if 'order' in bk:
params[ORDER_VAR] = '.'.join(bk['order'])
if 'cols' in bk:
params[COL_LIST_VAR] = '.'.join(bk['cols'])
if 'search' in bk:
params[SEARCH_VAR] = bk['search']
def check_item(i):
return bool(i[1]) or i[1] == False
bk_qs = '&'.join([
'%s=%s' % (k, v)
for k, v in sorted(filter(check_item, params.items()))
])
url = list_base_url + '?' + bk_qs
selected = (current_qs == bk_qs)
bookmarks.append(
{'title': title, 'selected': selected, 'url': url})
if selected:
menu_title = title
has_selected = True
content_type = ContentType.objects.get_for_model(self.model)
bk_model_info = (Bookmark._meta.app_label, Bookmark._meta.model_name)
bookmarks_queryset = Bookmark.objects.filter(
content_type=content_type,
url_name='xadmin:%s_%s_changelist' % model_info
).filter(Q(user=self.user) | Q(is_share=True))
for bk in bookmarks_queryset:
selected = (current_qs == bk.query)
if self.has_change_permission(bk):
change_or_detail = 'change'
else:
change_or_detail = 'detail'
bookmarks.append({'title': bk.title, 'selected': selected, 'url': bk.url, 'edit_url':
reverse('xadmin:%s_%s_%s' % (bk_model_info[0], bk_model_info[1], change_or_detail),
args=(bk.id,))})
if selected:
menu_title = bk.title
has_selected = True
post_url = reverse('xadmin:%s_%s_bookmark' % model_info,
current_app=self.admin_site.name)
new_context = {
'bk_menu_title': menu_title,
'bk_bookmarks': bookmarks,
'bk_current_qs': current_qs,
'bk_has_selected': has_selected,
'bk_list_base_url': list_base_url,
'bk_post_url': post_url,
'has_add_permission_bookmark': self.admin_view.request.user.has_perm('xadmin.add_bookmark'),
'has_change_permission_bookmark': self.admin_view.request.user.has_perm('xadmin.change_bookmark')
}
context.update(new_context)
return context
# Media
def get_media(self, media):
return media + self.vendor('xadmin.plugin.bookmark.js')
# Block Views
def block_nav_menu(self, context, nodes):
if self.show_bookmarks:
nodes.insert(0, loader.render_to_string('xadmin/blocks/model_list.nav_menu.bookmarks.html',
context=get_context_dict(context)))
class BookmarkView(ModelAdminView):
@csrf_protect_m
@transaction.atomic
def post(self, request):
model_info = (self.opts.app_label, self.opts.model_name)
url_name = 'xadmin:%s_%s_changelist' % model_info
bookmark = Bookmark(
content_type=ContentType.objects.get_for_model(self.model),
title=request.POST[
'title'], user=self.user, query=request.POST.get('query', ''),
is_share=request.POST.get('is_share', 0), url_name=url_name)
bookmark.save()
content = {'title': bookmark.title, 'url': bookmark.url}
return self.render_response(content)
class BookmarkAdmin:
model_icon = 'fa fa-book'
list_display = ('title', 'user', 'url_name', 'query')
list_display_links = ('title',)
user_fields = ['user']
hidden_menu = True
def queryset(self):
if self.user.is_superuser:
return Bookmark.objects.all()
return Bookmark.objects.filter(Q(user=self.user) | Q(is_share=True))
def get_list_display(self):
list_display = super(BookmarkAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def has_change_permission(self, obj=None):
if not obj or self.user.is_superuser:
return True
else:
return obj.user == self.user
@widget_manager.register
class BookmarkWidget(PartialBaseWidget):
widget_title = 'bookmark'
widget_title = _('bookmark')
widget_type = 'bookmark'
widget_icon = 'fa fa-bookmark'
description = _('Bookmark Widget, can show user\'s bookmark list data in widget.')
template = "xadmin/widgets/list.html"
bookmark = ModelChoiceField(
label=_('Bookmark'), queryset=Bookmark.objects.all(), required=False)
def setup(self):
BaseWidget.setup(self)
bookmark = self.cleaned_data['bookmark']
model = bookmark.content_type.model_class()
data = QueryDict(bookmark.query)
self.bookmark = bookmark
if not self.title:
self.title = smart_text(bookmark)
req = self.make_get_request("", data.items())
self.list_view = self.get_view_class(
ListAdminView, model, list_per_page=10, list_editable=[])(req)
def has_perm(self):
return True
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [
[o for i, o in enumerate(filter(
lambda c: c.field_name in base_fields,
r.cells
))]
for r in list_view.results()
]
context['result_count'] = list_view.result_count
context['page_url'] = self.bookmark.url
site.register(Bookmark, BookmarkAdmin)
site.register_plugin(BookmarkPlugin, ListAdminView)
site.register_modelview(r'^bookmark/$', BookmarkView, name='%s_%s_bookmark')
| {
"content_hash": "cd2dedd8a2f8be4a8b0f733cd96644d4",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 128,
"avg_line_length": 30.617021276595743,
"alnum_prop": 0.6813064628214037,
"repo_name": "alexsilva/django-xadmin",
"id": "9c7b0b3cff6c39a9cdb7d03246722bb8035707b8",
"size": "7195",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3-dj32",
"path": "xadmin/plugins/bookmark.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "20599"
},
{
"name": "HTML",
"bytes": "130392"
},
{
"name": "JavaScript",
"bytes": "79612"
},
{
"name": "Python",
"bytes": "424173"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
} |
from .utils import connect_foirequest
def connect_campaign(sender, **kwargs):
reference = kwargs.get("reference")
if not reference:
reference = sender.reference
if not reference:
return
if "@" in reference:
parts = reference.split("@", 1)
else:
parts = reference.split(":", 1)
if len(parts) != 2:
return
namespace = parts[0]
connect_foirequest(sender, namespace)
| {
"content_hash": "631bc182b5d91a6dc1c1301b48fb1e94",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 41,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.6118721461187214,
"repo_name": "fin/froide",
"id": "c9d88bc712674e5de83cfbbbe6ba3d008419a6a2",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "froide/campaign/listeners.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "302838"
},
{
"name": "JavaScript",
"bytes": "47357"
},
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "1706123"
},
{
"name": "SCSS",
"bytes": "39397"
},
{
"name": "TypeScript",
"bytes": "57910"
},
{
"name": "Vue",
"bytes": "218866"
}
],
"symlink_target": ""
} |
"""Entity to track connections to websocket API."""
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from .const import (
SIGNAL_WEBSOCKET_CONNECTED,
SIGNAL_WEBSOCKET_DISCONNECTED,
DATA_CONNECTIONS,
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the API streams platform."""
entity = APICount()
async_add_entities([entity])
class APICount(Entity):
"""Entity to represent how many people are connected to the stream API."""
def __init__(self):
"""Initialize the API count."""
self.count = None
async def async_added_to_hass(self):
"""Added to hass."""
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_WEBSOCKET_CONNECTED, self._update_count
)
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_WEBSOCKET_DISCONNECTED, self._update_count
)
self._update_count()
@property
def name(self):
"""Return name of entity."""
return "Connected clients"
@property
def state(self):
"""Return current API count."""
return self.count
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "clients"
@callback
def _update_count(self):
self.count = self.hass.data.get(DATA_CONNECTIONS, 0)
self.async_schedule_update_ha_state()
| {
"content_hash": "599ea6fee5ffc1299428aa3a501e68a8",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 86,
"avg_line_length": 27.054545454545455,
"alnum_prop": 0.6438172043010753,
"repo_name": "fbradyirl/home-assistant",
"id": "1ae76b562525c888eeae04819f1112f47e54e061",
"size": "1488",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/websocket_api/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
} |
import argparse
import time
# from pygecko import TopicSub
# from pygecko.transport import zmqTCP, GeckoCore
from pygecko.multiprocessing import geckopy
from pygecko.test import GeckoSimpleProcess
try:
import simplejson as json
except ImportError:
import json
def handleArgs():
parser = argparse.ArgumentParser(description="""
A simple zero MQ message tool. It will subscribe to a topic and print the messages.
Format:
topic_echo host port topic
geckotopic mode[0] mode[1] [options]
geckotopic pub <topic-name> -m [data...]
geckotopic echo <topic-name>
Examples:
geckotopic echo hello
geckotopic pub hello "{'a': 3.14, 'k': [1,2,3,4]}"
""")
parser.add_argument('-m', '--msg', help='data to publish', default=None)
parser.add_argument('-r', '--rate', help='publish rate in hertz, default 10 hz', default=10)
# parser.add_argument('-o', '--once', help='publish a message once and exit')
parser.add_argument('mode', nargs=2, help="run geckotopic as pub (publisher of topic), echo (subscribed to topic)", default=None)
# parser.add_argument('-i', '--info', nargs=2, help='subscribe to messages on host port: ex. 1.2.3.4 9000', default=None)
# parser.add_argument('-v', '--verbose', help='display info to screen', action='store_true')
args = vars(parser.parse_args())
return args
def publisher(**kwargs):
geckopy.init_node(**kwargs)
topic = kwargs.get('topic')
msg = kwargs.get('msg')
hertz = kwargs.get('rate', 10)
p = geckopy.Publisher([topic])
rate = geckopy.Rate(hertz)
cnt = 0
start = time.time()
while not geckopy.is_shutdown():
p.pub(topic, msg) # topic msg
if cnt % hertz == 0:
print(">> {}[{:.1f}]: published {} msgs".format(topic, time.time()-start, hertz))
cnt += 1
rate.sleep()
def subscriber(**kwargs):
# geckopy = GeckoPy()
geckopy.init_node(**kwargs)
def f(topic, msg):
print(">> {}: {}".format(topic, msg))
topic = kwargs.get('topic')
geckopy.Subscriber([topic], f)
geckopy.spin()
if __name__ == '__main__':
args = handleArgs()
if args['mode'] is None or args['mode'][0] not in ['pub', 'echo', 'list']:
print("Error: please do geckotopic --help") # FIXME: print help
if args['mode'] == 'list':
raise NotImplementedError()
args['topic'] = args['mode'][1]
if args['msg'] is not None:
msg = args['msg']
args['msg'] = json.loads(args['msg'].replace("'", '"'))
print(args['msg'])
print(type(args['msg']))
# check port > 8000
# check valid host?
# if args['info'] is not None:
# args['host_port'] = (args['info'][0], args['info'][1])
if args['mode'][0] == 'pub':
p = GeckoSimpleProcess()
p.start(func=publisher, name='publisher', kwargs=args)
elif args['mode'][0] == 'echo':
p = GeckoSimpleProcess()
p.start(func=subscriber, name='subscriber', kwargs=args)
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
break
# shutdown the processes
p.join(0.1)
| {
"content_hash": "1dc085ff1d2ff0421a2146b4bcab6d60",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 133,
"avg_line_length": 29.174311926605505,
"alnum_prop": 0.6,
"repo_name": "walchko/pygecko",
"id": "c2b2d4cafa0a3f87f2ec6405d08536767e43dea7",
"size": "3435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/geckotopic.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "164042"
},
{
"name": "Shell",
"bytes": "776"
}
],
"symlink_target": ""
} |
from unittest import mock
from ddt import data, ddt
from pylxd.deprecated import connection, exceptions
from pylxd.deprecated.tests import LXDAPITestBase, annotated_data, fake_api
@ddt
@mock.patch.object(
connection.LXDConnection, "get_object", return_value=("200", fake_api.fake_host())
)
class LXDAPIHostTestObject(LXDAPITestBase):
def test_get_host_info(self, ms):
result = self.lxd.host_info()
self.assertEqual(
result,
{
"lxd_api_compat_level": 1,
"lxd_trusted_host": True,
"lxd_backing_fs": "ext4",
"lxd_driver": "lxc",
"lxd_version": 0.12,
"lxc_version": "1.1.2",
"kernel_version": "3.19.0-22-generic",
},
)
ms.assert_called_once_with("GET", "/1.0")
host_data = (
("lxd_api_compat", 1),
("lxd_host_trust", True),
("lxd_backing_fs", "ext4"),
("lxd_driver", "lxc"),
("lxc_version", "1.1.2"),
("lxd_version", 0.12),
("kernel_version", "3.19.0-22-generic"),
)
@annotated_data(*host_data)
def test_get_host_data(self, method, expected, ms):
result = getattr(self.lxd, "get_" + method)(data=None)
self.assertEqual(expected, result)
ms.assert_called_once_with("GET", "/1.0")
@annotated_data(*host_data)
def test_get_host_data_fail(self, method, expected, ms):
ms.side_effect = exceptions.PyLXDException
result = getattr(self.lxd, "get_" + method)(data=None)
self.assertEqual(None, result)
ms.assert_called_once_with("GET", "/1.0")
@ddt
@mock.patch.object(connection.LXDConnection, "get_status")
class LXDAPIHostTestStatus(LXDAPITestBase):
@data(True, False)
def test_get_host_ping(self, value, ms):
ms.return_value = value
self.assertEqual(value, self.lxd.host_ping())
ms.assert_called_once_with("GET", "/1.0")
def test_get_host_ping_fail(self, ms):
ms.side_effect = Exception
self.assertRaises(exceptions.PyLXDException, self.lxd.host_ping)
ms.assert_called_once_with("GET", "/1.0")
| {
"content_hash": "884f028034c486175e7483337bc0c520",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 86,
"avg_line_length": 33,
"alnum_prop": 0.5863177226813591,
"repo_name": "lxc/pylxd",
"id": "1220ff38879e698442c5ec17574bdc3a178f8a34",
"size": "2788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylxd/deprecated/tests/test_host.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "444517"
},
{
"name": "Shell",
"bytes": "6718"
}
],
"symlink_target": ""
} |
from CalFord import *
import argparse
import sys,os
import re
pmapFile = ""
outputResultFile = ""
outputHistogramFile = ""
configFile = "calford.conf"
outputHandle = None
def argsSanityCheck():
isOk = True
if not os.path.isfile(pmapFile):
print "Error: cannot find %s"%pmapFile
isOk = False
return isOk
def parseArgs():
global pmapFile
global outputResultFile
global outputHistogramFile
global configFile
parser = argparse.ArgumentParser(description="Check how many proteins in the pmap file have signal peptide")
parser.add_argument("pmapFile",help="input pmap file to check")
parser.add_argument("--outputResult",help="write the results to this file",
nargs=1,required=True)
parser.add_argument("--outputHistogram",help="write the histogram to this file",
nargs=1,required=True)
parser.add_argument("--config",help="config file",nargs=1)
args = parser.parse_args()
pmapFile = args.pmapFile
outputResultFile = args.outputResult[0]
outputHistogramFile = args.outputHistogram[0]
if args.config:
configFile = args.config[0]
if not argsSanityCheck():
print
exit(1)
def loadPmap(path):
pmapData = []
try:
f = open(path,'r')
proteinRe = re.compile('(\S+) -')
for line in f:
match = proteinRe.match(line)
if match==None:
continue
pmapData.append(match.group(1))
f.close()
except IOError,e:
return None
return pmapData
def writeOutput(msg):
global outputHandle
if outputHandle==None:
try:
outputHandle = open(outputResultFile,'w')
except IOError,e:
print "Error open output file for writing: %s"%str(e)
exit(1)
outputHandle.write(msg+"\n")
def countC(seqString):
# count how many C in the sequence
return seqString.lower().count('c')
def doCheck():
notRe = re.compile("NOT")
signalLocation = re.compile("after AA (\d+)")
haveSignalHistogram = {}
noSignalHistogram = {}
for p in pmapData:
if p not in pepSignal:
writeOutput('%s\t-\t-\t-'%p)
else:
s = pepSignal[p]
m = notRe.search(s)
if m==None:
containSignal = "1"
seq = fastaData[p]
m = signalLocation.search(s)
if m==None:
loc = 0
else:
loc = int(m.group(1))
c = countC(seq[loc:])
if c in haveSignalHistogram:
haveSignalHistogram[c] += 1
else:
haveSignalHistogram[c] = 1
else:
containSignal = "0"
c = countC(fastaData[p])
if c in noSignalHistogram:
noSignalHistogram[c] += 1
else:
noSignalHistogram[c] = 1
writeOutput('%s\t%s\t%s\t%d'%(p,containSignal,s,c))
maxKey = max(max(haveSignalHistogram.keys()),max(noSignalHistogram.keys()))
minKey = min(min(haveSignalHistogram.keys()),min(noSignalHistogram.keys()))
try:
outputHistogramHandle = open(outputHistogramFile,'w')
except IOError,e:
print "Error open histogram file to write: %s"%str(e)
return
outputHistogramHandle.write("C count\tW/Signal peptide\tNon-signal peptide"\
"\tW/Signal %\tNon-signal %\n")
sigCount = sum(haveSignalHistogram.values())
nonSigCount = sum(noSignalHistogram.values())
for i in range(minKey,maxKey+1):
if i in haveSignalHistogram:
sc = haveSignalHistogram[i]
scp = float(sc)/sigCount*100
else:
sc = 0
scp = 0
if i in noSignalHistogram:
nsc = noSignalHistogram[i]
nscp = float(nsc)/nonSigCount*100
else:
nsc = 0
nscp = 0
outputHistogramHandle.write("%d\t%d\t%d\t%.2f\t%.2f\n"%(i,sc,nsc,scp,nscp))
outputHistogramHandle.close()
parseArgs()
parseConfigFile(configFile)
print "Write results to: %s" % outputResultFile
print "Write histogram to: %s" % outputHistogramFile
pepSignal = loadOutput(config['outputSignalFile'])
if pepSignal==None:
# error
print "Error: load peptide signal file error"
exit(1)
fastaData = loadFasta(config['database'])
if fastaData==None:
# error
print "Error: load FASTA file error"
exit(1)
pmapData = loadPmap(pmapFile)
if pmapData==None:
# error
print "Error: reading pmap file error: %s"%str(e)
exit(1)
doCheck()
if outputHandle!=None:
outputHandle.close()
| {
"content_hash": "eb894a6a7f2a718a0a00ac149ec186e6",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 111,
"avg_line_length": 28.2125,
"alnum_prop": 0.6154186973859105,
"repo_name": "tdangkhoa/calford",
"id": "e5f91f9a60172897809e7131fa128b359ed390c2",
"size": "4586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CheckSignal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51147"
}
],
"symlink_target": ""
} |
import numpy
import matplotlib
import pylab
from numba import autojit
@autojit
def iterMandel(x, y, iterMax):
"""
Dadas las partes real e imaginaria de un numero complejo,
determina la iteracion en la cual el candidato al conjunto
de Mandelbrot se escapa.
"""
c = complex(x, y)
z = 0.0j
for i in range(iterMax):
z = z**2 + c
if abs(z) >= 2:
return i
return iterMax
@autojit
def crearFractal(minX, maxX, minY, maxY, imagen, iteraciones):
"""
Dada la region de la grafica que se quiere obtener, y el
tamano de la imagen, determina el valor numerico de cada
pixel en la grafica y determina si es parte o no del conjunto
de Mandelbrot.
"""
altura = imagen.shape[0]
ancho = imagen.shape[1]
pixelX = (maxX - minX) / ancho
pixelY = (maxY - minY) / altura
for x in range(ancho):
real = minX + x*pixelX
for y in range(altura):
imag = minY + y*pixelY
color = iterMandel(real, imag, iteraciones)
imagen[y, x] = color
imagen = numpy.zeros((800, 2000), dtype = numpy.uint8)
crearFractal(-1.0, -0.5, -0.3, -0.1, imagen, 100)
ax = pylab.imshow(imagen, cmap="cool")
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.spines["right"].set_color("none")
ax.axes.spines["left"].set_color("none")
ax.axes.spines["top"].set_color("none")
ax.axes.spines["bottom"].set_color("none")
fig = pylab.gcf()
fig.savefig("mandel2.png", dpi=300, pad_inches=0.0, bbox_inches='tight', figsize=(20,8)) | {
"content_hash": "691a12d712166a2c17a9109901293474",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 88,
"avg_line_length": 27.771929824561404,
"alnum_prop": 0.6298168035375868,
"repo_name": "robblack007/mandelbrot-sets",
"id": "ed3baf9475e0730f03cb1ffc04336be0d57fbb3e",
"size": "1583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mandel2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13185"
}
],
"symlink_target": ""
} |
import pyaf
import numpy as np
import pandas as pd
DATA_FREQ = 'M'
PERIODS = ["M" , "Q" , "A"]
H = 36
N = H * 10
lDateColumn = "Date"
lSignalVar = "Signal";
START_TIME = "2001-01-25"
# generate a daily signal covering one year 2016 in a pandas dataframe
np.random.seed(seed=1960)
df_train = pd.DataFrame({lDateColumn : pd.date_range(start=START_TIME, periods=N, freq=DATA_FREQ),
lSignalVar : (np.arange(N)//40 + np.arange(N) % 21 + np.random.randn(N))})
# print(df_train.head(N))
lHierarchy = {};
lHierarchy['Levels'] = None;
lHierarchy['Data'] = None;
lHierarchy['Groups']= {};
lHierarchy['Periods']= PERIODS
lHierarchy['Type'] = "Temporal";
# create a model to plot the hierarchy.
import pyaf.HierarchicalForecastEngine as hautof
lEngine = hautof.cHierarchicalForecastEngine()
lSignalHierarchy = lEngine.plot_Hierarchy(df_train , lDateColumn, lSignalVar, H,
lHierarchy, None);
# print(lSignalHierarchy.__dict__)
# create a hierarchical model and train it
import pyaf.HierarchicalForecastEngine as hautof
lEngine = hautof.cHierarchicalForecastEngine()
# lEngine.mOptions.mNbCores = 1
lEngine.mOptions.mHierarchicalCombinationMethod = ["BU" , 'TD' , 'MO' , 'OC'];
lSignalHierarchy = lEngine.train(df_train , lDateColumn, lSignalVar, H, lHierarchy, None);
lEngine.getModelInfo();
dfapp_in = df_train.copy();
dfapp_in.info()
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.info()
print(dfapp_out.tail())
| {
"content_hash": "d9c45c2d14eed4ea9df7c337f6bfe9f7",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 99,
"avg_line_length": 26.017241379310345,
"alnum_prop": 0.6905235255135852,
"repo_name": "antoinecarme/pyaf",
"id": "7673d5a4ee827862196dc9c993855c30c8b46184",
"size": "1530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/temporal_hierarchy/test_temporal_demo_monthly_M_Q_A.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0003_delete_member'),
('member', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='member',
name='books',
field=models.ManyToManyField(to='catalogue.Post'),
),
]
| {
"content_hash": "0af7f25383dbc2f09428552afbd831fa",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 22.105263157894736,
"alnum_prop": 0.5833333333333334,
"repo_name": "R-Wolf/CFD_A_library",
"id": "d0f9f96bbffcd7b04d35f6c66b41eb279f70e1fc",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "member/migrations/0002_member_books.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "20896"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "48870"
}
],
"symlink_target": ""
} |
import unittest
from openfermion.ops import BinaryCode, FermionOperator, QubitOperator
from openfermion.transforms import binary_code_transform, dissolve
class CodeTransformTest(unittest.TestCase):
def test_transform(self):
code = BinaryCode([[1, 0, 0], [0, 1, 0]], ['W0', 'W1', '1 + W0 + W1'])
hamiltonian = FermionOperator('0^ 2', 0.5) + FermionOperator('2^ 0',
0.5)
transform = binary_code_transform(hamiltonian, code)
correct_op = QubitOperator('X0 Z1',0.25) + QubitOperator('X0',0.25)
self.assertTrue(transform == correct_op)
with self.assertRaises(TypeError):
binary_code_transform('0^ 2', code)
with self.assertRaises(TypeError):
binary_code_transform(hamiltonian,
([[1, 0], [0, 1]], ['w0', 'w1']))
def test_dissolve(self):
code = BinaryCode([[1, 0, 0], [0, 1, 0]], ['W0', 'W1', '1 + W0 W1'])
hamiltonian = FermionOperator('0^ 2', 0.5) + FermionOperator('2^ 0',
0.5)
transform = binary_code_transform(hamiltonian, code)
correct_op = QubitOperator('X0 Z1', 0.375) + \
QubitOperator('X0', -0.125) + \
QubitOperator('Y0', -0.125j) + \
QubitOperator('Y0 Z1', -0.125j)
self.assertTrue(transform == correct_op)
with self.assertRaises(ValueError):
dissolve(((1, '1'),))
| {
"content_hash": "669764561caf1cf01abfce3e9b775b58",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 44.57142857142857,
"alnum_prop": 0.5230769230769231,
"repo_name": "jarrodmcc/OpenFermion",
"id": "76ea3c8faa6edef42fd794aad60e91eb9ea6cda0",
"size": "2123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/openfermion/transforms/_binary_code_transform_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1370322"
},
{
"name": "Shell",
"bytes": "10029"
}
],
"symlink_target": ""
} |
from typing import Optional
import numpy as np
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
Constant,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
UnParametrizedHyperparameter,
)
from autosklearn.askl_typing import FEAT_TYPE_TYPE
from autosklearn.pipeline.components.base import AutoSklearnPreprocessingAlgorithm
from autosklearn.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from autosklearn.util.common import check_for_bool, check_none
class ExtraTreesPreprocessorRegression(AutoSklearnPreprocessingAlgorithm):
def __init__(
self,
n_estimators,
criterion,
min_samples_leaf,
min_samples_split,
max_features,
bootstrap=False,
max_leaf_nodes=None,
max_depth="None",
min_weight_fraction_leaf=0.0,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
):
self.n_estimators = n_estimators
self.estimator_increment = 10
if criterion not in ("mse", "friedman_mse", "mae"):
raise ValueError(
"'criterion' is not in ('mse', 'friedman_mse', "
"'mae'): %s" % criterion
)
self.criterion = criterion
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.bootstrap = bootstrap
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.preprocessor = None
def fit(self, X, Y):
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.feature_selection import SelectFromModel
self.n_estimators = int(self.n_estimators)
self.min_samples_leaf = int(self.min_samples_leaf)
self.min_samples_split = int(self.min_samples_split)
self.max_features = float(self.max_features)
self.bootstrap = check_for_bool(self.bootstrap)
self.n_jobs = int(self.n_jobs)
self.verbose = int(self.verbose)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
if check_none(self.max_depth):
self.max_depth = None
else:
self.max_depth = int(self.max_depth)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
num_features = X.shape[1]
max_features = int(float(self.max_features) * (np.log(num_features) + 1))
# Use at most half of the features
max_features = max(1, min(int(X.shape[1] / 2), max_features))
estimator = ExtraTreesRegressor(
n_estimators=self.n_estimators,
criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
bootstrap=self.bootstrap,
max_features=max_features,
max_leaf_nodes=self.max_leaf_nodes,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
verbose=self.verbose,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
random_state=self.random_state,
)
estimator.fit(X, Y)
self.preprocessor = SelectFromModel(
estimator=estimator, threshold="mean", prefit=True
)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError
return self.preprocessor.transform(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "ETR",
"name": "Extra Trees Regressor Preprocessing",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": True,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (INPUT,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
n_estimators = Constant("n_estimators", 100)
criterion = CategoricalHyperparameter(
"criterion", ["mse", "friedman_mse", "mae"]
)
max_features = UniformFloatHyperparameter(
"max_features", 0.1, 1.0, default_value=1.0
)
max_depth = UnParametrizedHyperparameter(name="max_depth", value="None")
max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
min_samples_split = UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default_value=2
)
min_samples_leaf = UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default_value=1
)
min_weight_fraction_leaf = Constant("min_weight_fraction_leaf", 0.0)
bootstrap = CategoricalHyperparameter(
"bootstrap", ["True", "False"], default_value="False"
)
cs.add_hyperparameters(
[
n_estimators,
criterion,
max_features,
max_depth,
max_leaf_nodes,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
bootstrap,
]
)
return cs
| {
"content_hash": "b420d36e66bb9e7150fa464ee8f36b4a",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 82,
"avg_line_length": 33.7514450867052,
"alnum_prop": 0.5995889707141634,
"repo_name": "automl/auto-sklearn",
"id": "10e741a44e2039ad7afed0560b1fd5d1b5add71e",
"size": "5839",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "autosklearn/pipeline/components/feature_preprocessing/extra_trees_preproc_for_regression.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "950"
},
{
"name": "Makefile",
"bytes": "3513"
},
{
"name": "Python",
"bytes": "2008151"
},
{
"name": "Shell",
"bytes": "4744"
}
],
"symlink_target": ""
} |
import boto3
import botocore
import logging
import json
import time
import custom_exceptions
import os
import shutil
import tempfile
class DBS3:
def __init__(self,bucket):
self.log = logging.getLogger("DB.S3")
self.bucket = bucket
self.client = boto3.client('s3')
self.indexDirectory = "/tmp/pageIndex"
def getBaseKey(self,page):
return page+".json"
def getTimestampKey(self,page,timestamp):
return self.getBaseKey(page) + ".json." + str(timestamp)
def _pageFromResponse(self, response):
contents = json.load(response["Body"])
ret = {}
ret["user"] = contents["user"]
ret["contentType"] = contents["contentType"]
ret["content"] = contents["content"]
if "rendered" in contents:
ret["rendered"] = contents["rendered"]
return ret
def doesPageExist(self,page):
try:
self.client.head_object(Bucket=self.bucket,Key=self.getBaseKey(page))
return True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "NoSuchKey":
return False
raise e
def getPage(self,page):
try:
obj = self.client.get_object(Bucket=self.bucket,Key=self.getBaseKey(page))
return self._pageFromResponse(obj)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "NoSuchKey":
raise custom_exceptions.NotFound()
raise e
def updatePage(self,page,user,contentType,content,html=None):
data={}
data["user"]=user
data["contentType"]=contentType
data["content"]=content
if html!=None:
data["rendered"]=html
text=json.dumps(data,indent=2)
self.client.put_object(Bucket=self.bucket,Body=text,ContentType="application/json",Key=self.getBaseKey(page))
self._writeVersionedFile(page,text)
return True
def _writeVersionedFile(self,page,data):
timestamp = int(time.time())
self.client.put_object(Bucket=self.bucket,Body=data,ContentType="application/json",Key=self.getTimestampKey(page,timestamp))
def listPageVersions(self, page):
pageKey = self.getBaseKey(page)
#Can only retrieve up to 1000 versions of a page
page_versions = self.client.list_objects_v2(Prefix=pageKey)
page_versions.reverse()
return page_versions
def getPageVersion(self, page, timestamp):
pageKey = self.getTimestampKey(page,timestamp)
try:
obj = self.client.get_object(Bucket=self.bucket,Key=pageKey)
return self._pageFromResponse(obj)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
raise custom_exceptions.NotFound()
raise e
def setupIndexFiles(self):
if os.path.exists(self.indexDirectory):
shutil.rmtree(self.indexDirectory)
os.makedirs(self.indexDirectory)
index_files = self.client.list_objects_v2(Bucket=self.bucket, Prefix="pageIndex/")
if 'Contents' not in index_files:
return False
else:
for object in index_files['Contents']:
key = object['Key']
self.client.download_file(Bucket=self.bucket, Key=key,Filename='/tmp/' + key )
return True
def writeIndex(self):
for root,dirs,files in os.walk(self.indexDirectory):
for file in files:
self.client.upload_file(os.path.join(root,file),self.bucket, "pageIndex/" + file)
class DBMemory:
def __init__(self):
self.log = logging.getLogger("DB.Memory")
self.db = {}
def __enter__(self):
self.tmpdir = tempfile.mkdtemp(prefix="wiki_test")
self.indexDirectory = self.tmpdir
def __exit__(self, exc_type, exc_value, traceback):
#print("Deleting "+self.tmpdir)
shutil.rmtree(self.tmpdir)
def getBaseKey(self,page):
return page
def updatePage(self,page,user,contentType,content,html=None):
if not page in self.db:
self.log.info("Creating new page "+page)
self.db[page]=[]
else:
self.log.info("Updating page "+page)
obj={'user':user,'contentType':contentType,'content':content}
if html != None:
obj["rendered"]=html
self.db[page].insert(0,obj)
return True
def getPage(self,page):
if page in self.db:
return self.db[page][0]
raise custom_exceptions.NotFound()
def doesPageExist(self,page):
return page in self.db
def listPageVersions(self,page):
if not page in self.db:
raise custom_exceptions.NotFound()
ret = []
ret.extend(self.db[page])
ret.reverse()
return ret
def getPageVersion(self,page,version):
if page in self.db and version<=len(self.db[page]):
return self.db[page][len(self.db[page])-version]
return None
def setupIndexFiles(self):
return os.path.exists(self.tmpdir+"/MAIN_WRITELOCK")
def writeIndex(self):
return
| {
"content_hash": "9f21d12adde7b2c72fd54eb82f6c73e3",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 126,
"avg_line_length": 27.88125,
"alnum_prop": 0.7146379735485318,
"repo_name": "intirix/serverless-wiki",
"id": "28c0afd4dea5428f2ed1200ea54b20c71ef87770",
"size": "4480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1305"
},
{
"name": "HTML",
"bytes": "4505"
},
{
"name": "JavaScript",
"bytes": "3243"
},
{
"name": "Python",
"bytes": "22749"
},
{
"name": "Shell",
"bytes": "1152"
}
],
"symlink_target": ""
} |
from classifiers import abstract_classifier as ac
from classifiers import classifiers_helper as helper
#from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
class NaiveBayesScikit(ac.AbstractClassifier):
def __repr__(self):
return "<NaiveBayesScikit>"
def __str__(self):
return "Naive Bayes Scikit"
def train(self, labels, train_set):
self.classifier = BernoulliNB()
l,ts = helper.format_for_scikit(labels, train_set)
self.classifier.fit(ts, l)
def test(self, labels, test_set):
l,ts = helper.format_for_scikit(labels, test_set)
predictions = self.classifier.predict(ts)
if self.plot_roc:
print("ROC curve plot unavailable for %s") % (str(self))
return helper.accuracy(labels, predictions), predictions
| {
"content_hash": "351a13142bb7216a902343aff5a4b3a0",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 68,
"avg_line_length": 32.46153846153846,
"alnum_prop": 0.6800947867298578,
"repo_name": "Rolinh/tweetmining",
"id": "d00e24c48b7cdaebce3d14b7a7980b8e787f2528",
"size": "844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/classifiers/naive_bayes_scikit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "DOT",
"bytes": "886737"
},
{
"name": "Python",
"bytes": "63457"
},
{
"name": "TeX",
"bytes": "77280"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib.auth.models import User
from models import Profile
from django.contrib.auth.forms import UserCreationForm
# our new form
class SeeJobsForm(forms.Form):
job_title = forms.CharField(required=True)
city = forms.CharField(required=False)
state = forms.CharField(required=False)
yrs_exp = forms.IntegerField(required=True)
resume = forms.CharField(
required=True,
widget=forms.Textarea
)
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('job_title', 'yrs_exp', 'resume', 'city', 'state')
class SignUpForm(UserCreationForm):
email = forms.EmailField(required=True)
first_name = forms.CharField(required=True)
last_name = forms.CharField(required=True)
class Meta:
model = User
fields = ("username", "first_name", "last_name", "email", "password1", "password2") | {
"content_hash": "be1f726f8696b57b1f4edb148e5841db",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 91,
"avg_line_length": 29.885714285714286,
"alnum_prop": 0.6787762906309751,
"repo_name": "Qlwentt/qually",
"id": "5be7c778db4fa75c55d5de68325d26a55d566bc5",
"size": "1097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jobs/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27970"
},
{
"name": "HTML",
"bytes": "44660"
},
{
"name": "JavaScript",
"bytes": "11015"
},
{
"name": "Python",
"bytes": "87304"
},
{
"name": "Shell",
"bytes": "4245"
}
],
"symlink_target": ""
} |
import csv
import errno
import flask
import os
from werkzeug.exceptions import Forbidden, NotFound
from .constants import REPORT_DIR, REPORT_EXT
def raise_errno(err):
if err.errno in [errno.EPERM, errno.EACCES]:
raise Forbidden
elif err.errno == errno.ENOENT:
raise NotFound
else:
raise
def nonblank_lines(line_iter):
for line in (l.strip() for l in line_iter):
if line:
yield line
# Source: http://docs.python.org/2/library/csv.html#examples
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
def report_dir_path(*args):
return os.path.join(flask.current_app.instance_path, REPORT_DIR, *args)
def valid_upload(upload):
return bool(upload and upload.filename.endswith(REPORT_EXT))
| {
"content_hash": "04d6d56804c37b43def9fe7dbb88d2a4",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 75,
"avg_line_length": 26.152173913043477,
"alnum_prop": 0.6658354114713217,
"repo_name": "bhrutledge/dsvbrowser",
"id": "46a45d2cb8ac09d5ecc3679e704191e598a04f65",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dsvbrowser/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18799"
},
{
"name": "JavaScript",
"bytes": "35311"
},
{
"name": "Python",
"bytes": "27858"
},
{
"name": "Shell",
"bytes": "6467"
}
],
"symlink_target": ""
} |
"""Tests for core.storage.feedback.gae_models."""
from __future__ import annotations
import types
from core import feconf
from core import utils
from core.domain import feedback_domain
from core.domain import feedback_services
from core.platform import models
from core.tests import test_utils
from typing import Dict
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
from mypy_imports import feedback_models
from mypy_imports import user_models
(base_models, feedback_models, user_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.feedback, models.NAMES.user])
CREATED_ON_FIELD = 'created_on'
LAST_UPDATED_FIELD = 'last_updated'
DELETED_FIELD = 'deleted'
FIELDS_NOT_REQUIRED = [CREATED_ON_FIELD, LAST_UPDATED_FIELD, DELETED_FIELD]
class FeedbackThreadModelTest(test_utils.GenericTestBase):
"""Tests for the GeneralFeedbackThreadModel class."""
NONEXISTENT_USER_ID = 'id_x'
ENTITY_TYPE = feconf.ENTITY_TYPE_EXPLORATION
ENTITY_ID = 'exp_id_2'
USER_ID = 'user_1'
OLD_USER_1_ID = 'user_1_old'
NEW_USER_1_ID = 'user_1_new'
OLD_USER_2_ID = 'user_2_old'
NEW_USER_2_ID = 'user_2_new'
STATUS = 'open'
SUBJECT = 'dummy subject'
HAS_SUGGESTION = True
SUMMARY = 'This is a great summary.'
MESSAGE_COUNT = 0
def setUp(self) -> None:
"""Set up user models in datastore for use in testing."""
super(FeedbackThreadModelTest, self).setUp()
user_models.UserSettingsModel(
id=self.NEW_USER_1_ID,
email='some@email.com'
).put()
user_models.UserSettingsModel(
id=self.NEW_USER_2_ID,
email='some_other@email.com'
).put()
self.feedback_thread_model = feedback_models.GeneralFeedbackThreadModel(
id='%s.%s.%s' % (self.ENTITY_TYPE, self.ENTITY_ID, 'random'),
entity_type=self.ENTITY_TYPE,
entity_id=self.ENTITY_ID,
original_author_id=self.USER_ID,
status=self.STATUS,
subject=self.SUBJECT,
has_suggestion=self.HAS_SUGGESTION,
summary=self.SUMMARY,
message_count=self.MESSAGE_COUNT
)
self.feedback_thread_model.update_timestamps()
self.feedback_thread_model.put()
def test_get_deletion_policy(self) -> None:
self.assertEqual(
feedback_models.GeneralFeedbackThreadModel.get_deletion_policy(),
base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE)
def test_has_reference_to_user_id(self) -> None:
self.assertTrue(
feedback_models.GeneralFeedbackThreadModel
.has_reference_to_user_id(self.USER_ID))
self.assertFalse(
feedback_models.GeneralFeedbackThreadModel
.has_reference_to_user_id(self.NONEXISTENT_USER_ID))
def test_raise_exception_by_mocking_collision(self) -> None:
feedback_thread_model_cls = feedback_models.GeneralFeedbackThreadModel
# Test create method.
with self.assertRaisesRegex( # type: ignore[no-untyped-call]
Exception, 'Feedback thread ID conflict on create.'):
# Swap dependent method get_by_id to simulate collision every time.
with self.swap(
feedback_thread_model_cls, 'get_by_id',
types.MethodType(
lambda x, y: True,
feedback_thread_model_cls)):
feedback_thread_model_cls.create(
'exploration.exp_id.thread_id')
# Test generate_new_thread_id method.
with self.assertRaisesRegex( # type: ignore[no-untyped-call]
Exception,
'New thread id generator is producing too many collisions.'):
# Swap dependent method get_by_id to simulate collision every time.
with self.swap(
feedback_thread_model_cls, 'get_by_id',
types.MethodType(
lambda x, y: True,
feedback_thread_model_cls)):
feedback_thread_model_cls.generate_new_thread_id(
'exploration', 'exp_id')
def test_export_data_trivial(self) -> None:
user_data = feedback_models.GeneralFeedbackThreadModel.export_data(
'fake_user'
)
test_data: Dict[str, str] = {}
self.assertEqual(user_data, test_data)
def test_export_data_nontrivial(self) -> None:
user_data = (
feedback_models
.GeneralFeedbackThreadModel.export_data(self.USER_ID))
feedback_id = '%s.%s.%s' % (self.ENTITY_TYPE, self.ENTITY_ID, 'random')
test_data = {
feedback_id: {
'entity_type': self.ENTITY_TYPE,
'entity_id': self.ENTITY_ID,
'status': self.STATUS,
'subject': self.SUBJECT,
'has_suggestion': self.HAS_SUGGESTION,
'summary': self.SUMMARY,
'message_count': self.MESSAGE_COUNT,
'last_updated_msec': utils.get_time_in_millisecs(
self.feedback_thread_model.last_updated)
}
}
self.assertEqual(user_data, test_data)
def test_message_cache_supports_huge_text(self) -> None:
self.feedback_thread_model.last_nonempty_message_text = 'X' * 2000
# Storing the model should not throw.
self.feedback_thread_model.update_timestamps()
self.feedback_thread_model.put()
class GeneralFeedbackMessageModelTests(test_utils.GenericTestBase):
"""Tests for the GeneralFeedbackMessageModel class."""
def test_get_deletion_policy(self) -> None:
self.assertEqual(
feedback_models.GeneralFeedbackMessageModel.get_deletion_policy(),
base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE)
def test_has_reference_to_user_id(self) -> None:
feedback_models.GeneralFeedbackMessageModel(
id='id',
thread_id='thread_id',
message_id=1,
author_id='user_id',
received_via_email=False
).put()
self.assertTrue(
feedback_models.GeneralFeedbackMessageModel
.has_reference_to_user_id('user_id'))
self.assertFalse(
feedback_models.GeneralFeedbackMessageModel
.has_reference_to_user_id('id_x'))
def test_raise_exception_by_mocking_collision(self) -> None:
thread_id = feedback_services.create_thread( # type: ignore[no-untyped-call]
'exploration', '0', 'test_author', 'subject 1', 'text 1')
# Simulating the _generate_id function in the
# GeneralFeedbackMessageModel class.
instance_id = '.'.join([thread_id, '0'])
expected_exception_regexp = (
r'The following feedback message ID\(s\) conflicted on '
'create: %s' % (instance_id)
)
with self.assertRaisesRegex(Exception, expected_exception_regexp): # type: ignore[no-untyped-call]
feedback_models.GeneralFeedbackMessageModel.create(
feedback_domain.FullyQualifiedMessageIdentifier(
thread_id, 0)
)
def test_get_all_messages(self) -> None:
thread_id = feedback_services.create_thread( # type: ignore[no-untyped-call]
'exploration', '0', None, 'subject 1', 'text 1')
feedback_services.create_message( # type: ignore[no-untyped-call]
thread_id, None, 'open', 'subject 2', 'text 2')
model = feedback_models.GeneralFeedbackMessageModel.get(
thread_id, 0)
# Ruling out the possibility of None for mypy type checking.
assert model is not None
self.assertEqual(model.entity_type, 'exploration')
all_messages = (
feedback_models.GeneralFeedbackMessageModel
.get_all_messages(2, None))
self.assertEqual(len(all_messages[0]), 2)
self.assertEqual(all_messages[0][0].thread_id, thread_id)
self.assertEqual(all_messages[0][0].entity_id, '0')
self.assertEqual(all_messages[0][0].entity_type, 'exploration')
self.assertEqual(all_messages[0][0].text, 'text 2')
self.assertEqual(all_messages[0][0].updated_subject, 'subject 2')
self.assertEqual(all_messages[0][1].thread_id, thread_id)
self.assertEqual(all_messages[0][1].entity_id, '0')
self.assertEqual(all_messages[0][1].entity_type, 'exploration')
self.assertEqual(all_messages[0][1].text, 'text 1')
self.assertEqual(all_messages[0][1].updated_subject, 'subject 1')
def test_get_most_recent_message(self) -> None:
thread_id = feedback_services.create_thread( # type: ignore[no-untyped-call]
'exploration', '0', None, 'subject 1', 'text 1')
feedback_services.create_message( # type: ignore[no-untyped-call]
thread_id, None, 'open', 'subject 2', 'text 2')
model1 = feedback_models.GeneralFeedbackMessageModel.get(
thread_id, 0)
# Ruling out the possibility of None for mypy type checking.
assert model1 is not None
self.assertEqual(model1.entity_type, 'exploration')
message = (
feedback_models.GeneralFeedbackMessageModel
.get_most_recent_message(thread_id))
self.assertEqual(message.thread_id, thread_id)
self.assertEqual(message.entity_id, '0')
self.assertEqual(message.entity_type, 'exploration')
self.assertEqual(message.text, 'text 2')
self.assertEqual(message.updated_subject, 'subject 2')
def test_export_data_trivial(self) -> None:
user_data = (
feedback_models.GeneralFeedbackMessageModel
.export_data('non_existent_user'))
test_data: Dict[str, str] = {}
self.assertEqual(user_data, test_data)
def test_export_data_nontrivial(self) -> None:
# Setup test variables.
test_export_thread_type = 'exploration'
test_export_thread_id = 'export_thread_1'
test_export_updated_status = 'open'
test_export_updated_subject = 'export_subject_1'
test_export_text = 'Export test text.'
test_export_received_via_email = False
self.signup('export_author_1@example.com', 'exportAuthor1')
test_export_author_id = (
self.get_user_id_from_email('export_author_1@example.com')) # type: ignore[no-untyped-call]
thread_id = feedback_services.create_thread( # type: ignore[no-untyped-call]
test_export_thread_type,
test_export_thread_id,
test_export_author_id,
test_export_updated_subject,
test_export_text
)
feedback_services.create_message( # type: ignore[no-untyped-call]
thread_id,
test_export_author_id,
test_export_updated_status,
test_export_updated_subject,
test_export_text
)
user_data = (
feedback_models.GeneralFeedbackMessageModel
.export_data(test_export_author_id))
test_data = {
thread_id + '.0': {
'thread_id': thread_id,
'message_id': 0,
'updated_status': test_export_updated_status,
'updated_subject': test_export_updated_subject,
'text': test_export_text,
'received_via_email': test_export_received_via_email
},
thread_id + '.1': {
'thread_id': thread_id,
'message_id': 1,
'updated_status': test_export_updated_status,
'updated_subject': test_export_updated_subject,
'text': test_export_text,
'received_via_email': test_export_received_via_email
}
}
self.assertEqual(test_data, user_data)
class FeedbackThreadUserModelTest(test_utils.GenericTestBase):
"""Tests for the FeedbackThreadUserModel class."""
USER_ID_A = 'user.id.a'
USER_ID_B = 'user_id_b'
THREAD_ID_A = 'exploration.exp_id.thread_id_a'
THREAD_ID_B = 'exploration.exp_id.thread_id_b'
THREAD_ID_C = 'exploration.exp_id.thread_id_c'
MESSAGE_IDS_READ_IN_THREAD_A = [0, 1, 2]
MESSAGE_IDS_READ_IN_THREAD_B = [3, 4]
MESSAGE_IDS_READ_IN_THREAD_C = [5, 6, 7, 8, 9]
def setUp(self) -> None:
super(FeedbackThreadUserModelTest, self).setUp()
model = feedback_models.GeneralFeedbackThreadUserModel.create(
self.USER_ID_A, self.THREAD_ID_A)
model.message_ids_read_by_user = self.MESSAGE_IDS_READ_IN_THREAD_A
model = feedback_models.GeneralFeedbackThreadUserModel.create(
self.USER_ID_A, self.THREAD_ID_B)
model.message_ids_read_by_user = self.MESSAGE_IDS_READ_IN_THREAD_B
model = feedback_models.GeneralFeedbackThreadUserModel.create(
self.USER_ID_A, self.THREAD_ID_C)
model.message_ids_read_by_user = self.MESSAGE_IDS_READ_IN_THREAD_C
def test_get_deletion_policy(self) -> None:
self.assertEqual(
feedback_models.GeneralFeedbackThreadUserModel
.get_deletion_policy(),
base_models.DELETION_POLICY.DELETE)
def test_has_reference_to_user_id(self) -> None:
feedback_models.GeneralFeedbackThreadUserModel(
id='id',
thread_id='thread_id',
user_id='user_id',
).put()
self.assertTrue(
feedback_models.GeneralFeedbackThreadUserModel
.has_reference_to_user_id('user_id'))
self.assertFalse(
feedback_models.GeneralFeedbackThreadUserModel
.has_reference_to_user_id('id_x'))
def test_put_function(self) -> None:
feedback_thread_model = feedback_models.GeneralFeedbackThreadUserModel(
id='user_id.exploration.exp_id.thread_id',
user_id='user_id',
thread_id='exploration.exp_id.thread_id',
message_ids_read_by_user=[])
feedback_thread_model.update_timestamps()
feedback_thread_model.put()
last_updated = feedback_thread_model.last_updated
# If we do not wish to update the last_updated time, we should set
# the update_last_updated_time argument to False in the put function.
feedback_thread_model.update_timestamps(update_last_updated_time=False)
feedback_thread_model.put()
self.assertEqual(feedback_thread_model.last_updated, last_updated)
# If we do wish to change it however, we can simply use the put function
# as the default value of update_last_updated_time is True.
feedback_thread_model.update_timestamps()
feedback_thread_model.put()
self.assertNotEqual(feedback_thread_model.last_updated, last_updated)
def test_create_new_object(self) -> None:
feedback_models.GeneralFeedbackThreadUserModel.create(
'user_id', 'exploration.exp_id.thread_id')
feedback_thread_user_model = (
feedback_models.GeneralFeedbackThreadUserModel.get(
'user_id', 'exploration.exp_id.thread_id'))
# Ruling out the possibility of None for mypy type checking.
assert feedback_thread_user_model is not None
self.assertEqual(
feedback_thread_user_model.id,
'user_id.exploration.exp_id.thread_id')
self.assertEqual(feedback_thread_user_model.user_id, 'user_id')
self.assertEqual(
feedback_thread_user_model.thread_id,
'exploration.exp_id.thread_id')
self.assertEqual(
feedback_thread_user_model.message_ids_read_by_user, [])
def test_get_object(self) -> None:
feedback_models.GeneralFeedbackThreadUserModel.create(
'user_id', 'exploration.exp_id.thread_id')
expected_model = feedback_models.GeneralFeedbackThreadUserModel(
id='user_id.exploration.exp_id.thread_id',
user_id='user_id',
thread_id='exploration.exp_id.thread_id',
message_ids_read_by_user=[])
actual_model = (
feedback_models.GeneralFeedbackThreadUserModel.get(
'user_id', 'exploration.exp_id.thread_id'))
# Ruling out the possibility of None for mypy type checking.
assert expected_model is not None
assert actual_model is not None
self.assertEqual(actual_model.id, expected_model.id)
self.assertEqual(actual_model.user_id, expected_model.user_id)
self.assertEqual(actual_model.thread_id, expected_model.thread_id)
self.assertEqual(
actual_model.message_ids_read_by_user,
expected_model.message_ids_read_by_user)
def test_get_multi(self) -> None:
feedback_models.GeneralFeedbackThreadUserModel.create(
'user_id', 'exploration.exp_id.thread_id_1')
feedback_models.GeneralFeedbackThreadUserModel.create(
'user_id', 'exploration.exp_id.thread_id_2')
expected_model_1 = feedback_models.GeneralFeedbackThreadUserModel(
id='user_id.exploration.exp_id.thread_id_1',
user_id='user_id',
thread_id='exploration.exp_id.thread_id_1',
message_ids_read_by_user=[])
expected_model_2 = feedback_models.GeneralFeedbackThreadUserModel(
id='user_id.exploration.exp_id.thread_id_2',
user_id='user_id',
thread_id='exploration.exp_id.thread_id_2',
message_ids_read_by_user=[])
actual_models = (
feedback_models.GeneralFeedbackThreadUserModel.get_multi(
'user_id',
['exploration.exp_id.thread_id_1',
'exploration.exp_id.thread_id_2']))
actual_model_1 = actual_models[0]
actual_model_2 = actual_models[1]
# Ruling out the possibility of None for mypy type checking.
assert actual_model_1 is not None
assert actual_model_2 is not None
self.assertEqual(actual_model_1.id, expected_model_1.id)
self.assertEqual(actual_model_1.user_id, expected_model_1.user_id)
self.assertEqual(actual_model_1.thread_id, expected_model_1.thread_id)
self.assertEqual(
actual_model_1.message_ids_read_by_user,
expected_model_1.message_ids_read_by_user)
self.assertEqual(actual_model_2.id, expected_model_2.id)
self.assertEqual(actual_model_2.user_id, expected_model_2.user_id)
self.assertEqual(actual_model_2.thread_id, expected_model_2.thread_id)
self.assertEqual(
actual_model_2.message_ids_read_by_user,
expected_model_2.message_ids_read_by_user)
def test_export_data_general_case(self) -> None:
"""Ensure export_data returns well-formed data in general case."""
user_data = feedback_models.GeneralFeedbackThreadUserModel.export_data(
self.USER_ID_A)
expected_data = {
self.THREAD_ID_A: {
'message_ids_read_by_user': self.MESSAGE_IDS_READ_IN_THREAD_A
},
self.THREAD_ID_B: {
'message_ids_read_by_user': self.MESSAGE_IDS_READ_IN_THREAD_B
},
self.THREAD_ID_C: {
'message_ids_read_by_user': self.MESSAGE_IDS_READ_IN_THREAD_C
}
}
self.assertDictEqual(expected_data, user_data)
def test_export_data_nonexistent_case(self) -> None:
"""Ensure export data returns empty dict when data is not found."""
user_data = feedback_models.GeneralFeedbackThreadUserModel.export_data(
self.USER_ID_B)
self.assertEqual({}, user_data)
class FeedbackAnalyticsModelTests(test_utils.GenericTestBase):
"""Tests for the FeedbackAnalyticsModelTests class."""
def test_get_deletion_policy(self) -> None:
self.assertEqual(
feedback_models.FeedbackAnalyticsModel.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
class UnsentFeedbackEmailModelTest(test_utils.GenericTestBase):
"""Tests for FeedbackMessageEmailDataModel class."""
NONEXISTENT_USER_ID = 'id_x'
USER_ID_1 = 'id_1'
def setUp(self) -> None:
super().setUp()
feedback_models.UnsentFeedbackEmailModel(id='user_id').put()
def test_get_deletion_policy(self) -> None:
self.assertEqual(
feedback_models.UnsentFeedbackEmailModel.get_deletion_policy(),
base_models.DELETION_POLICY.DELETE)
def test_has_reference_to_user_id(self) -> None:
self.assertTrue(
feedback_models.UnsentFeedbackEmailModel
.has_reference_to_user_id('user_id'))
self.assertFalse(
feedback_models.UnsentFeedbackEmailModel
.has_reference_to_user_id('id_x'))
def test_apply_deletion_policy_deletes_model_for_user(self) -> None:
feedback_models.UnsentFeedbackEmailModel.apply_deletion_policy(
self.USER_ID_1)
self.assertIsNone(
feedback_models.UnsentFeedbackEmailModel.get_by_id(self.USER_ID_1))
def test_apply_deletion_policy_raises_no_exception_for_nonexistent_user(
self
) -> None:
feedback_models.UnsentFeedbackEmailModel.apply_deletion_policy(
self.NONEXISTENT_USER_ID)
def test_new_instances_stores_correct_data(self) -> None:
user_id = 'A'
message_reference_dict = {
'exploration_id': 'ABC123',
'thread_id': 'thread_id1',
'message_id': 'message_id1'
}
email_instance = feedback_models.UnsentFeedbackEmailModel(
id=user_id, feedback_message_references=[message_reference_dict])
email_instance.update_timestamps()
email_instance.put()
retrieved_instance = (
feedback_models.UnsentFeedbackEmailModel.get_by_id(id=user_id))
self.assertEqual(
retrieved_instance.feedback_message_references,
[message_reference_dict])
self.assertEqual(retrieved_instance.retries, 0)
| {
"content_hash": "ec118f9e21c7d2ce603d3b4941ca2d06",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 106,
"avg_line_length": 40.579816513761465,
"alnum_prop": 0.6226713691445107,
"repo_name": "brianrodri/oppia",
"id": "56e711b0e42060ea808591421e509a0487e3d31a",
"size": "22739",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/storage/feedback/gae_models_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "487903"
},
{
"name": "HTML",
"bytes": "1748056"
},
{
"name": "JavaScript",
"bytes": "1176446"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "14169091"
},
{
"name": "Shell",
"bytes": "2239"
},
{
"name": "TypeScript",
"bytes": "13316709"
}
],
"symlink_target": ""
} |
import asyncio
from sanic import Sanic
from sanic import response
from sanic.config import Config
from sanic.exceptions import RequestTimeout
Config.REQUEST_TIMEOUT = 1
app = Sanic(__name__)
@app.route('/')
async def test(request):
await asyncio.sleep(3)
return response.text('Hello, world!')
@app.exception(RequestTimeout)
def timeout(request, exception):
return response.text('RequestTimeout from error_handler.', 408)
app.run(host='0.0.0.0', port=8000) | {
"content_hash": "d41a1d232ca5a383db14f1a95e40cd97",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 67,
"avg_line_length": 22.523809523809526,
"alnum_prop": 0.7463002114164905,
"repo_name": "jrocketfingers/sanic",
"id": "fb2822eeb34a5fe6c782d6209138aa8ac473631d",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/request_timeout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "482"
},
{
"name": "Makefile",
"bytes": "108"
},
{
"name": "Python",
"bytes": "231242"
}
],
"symlink_target": ""
} |
import sys
import argparse
import json
# Import smtplib to provide email functions
import smtplib
# Import the email modules
from email.mime.text import MIMEText
try:
from .mac_notify import notify as mac_notify
mac_notifications = True
except Exception as ex:
mac_notifications = False
print( "Failed to import mac_notify. Mac OS X Notifications will not be sent." )
print( "Exception: {}".format( ex ) )
def mailTo( addr_to, subject='Notification', message='', config={} ):
""" Send an email using an smtp account.
Email addresses (comma delimited if more than one)
Optional subject and message body
@config requires the following keys: addr_from, smtp_server, smtp_user, smtp_pass
"""
# Define email addresses to use
addr_from = config['addr_from']
# Define SMTP email server details
smtp_server = config['smtp_server']
smtp_user = config['smtp_user']
smtp_pass = config['smtp_pass']
if not subject:
subject = 'Notification'
# Construct email
msg = MIMEText(message)
msg['To'] = addr_to
msg['From'] = addr_from
msg['Subject'] = subject
# Send the message via an SMTP server
s = smtplib.SMTP_SSL()
s.connect(smtp_server, 465)
s.login(smtp_user,smtp_pass)
s.sendmail(addr_from, addr_to, msg.as_string())
s.quit()
def read_email_config( fname="email_config.json" ):
""" Read the json config file and return notification related entries.
"""
try:
with open(fname,'r') as f:
notifications = json.load(f)
except Exception as ex:
print( "Failed to load email config file {}".format( fname ) )
raise
all_keys = {'addr_from', 'smtp_pass', 'smtp_server', 'smtp_user'}
read_keys = set(notifications)
if not all_keys.issubset( read_keys ):
raise KeyError("Key(s) missing from email notifications config file: {}".format( ", ".join( list(all_keys.difference(read_keys)) )))
return notifications
# TODO Add a write_email_config function
def notify( title, subTitle='', message='', email_to=None, mac=True, sound=False, email_config={} ):
if email_to is not None:
if subTitle is None:
msg2 = message
else:
msg2 = subTitle + "\n" + message
mailTo( email_to, subject=title, message=msg2, config=email_config )
if mac and mac_notifications:
mac_notify( title, subTitle, message, sound=sound )
| {
"content_hash": "16d4c9312717941f574b7de7df433b02",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 140,
"avg_line_length": 31.77922077922078,
"alnum_prop": 0.6526358806702084,
"repo_name": "Bleyddyn/malpi",
"id": "16e76a9640e70db5e69f3309421389d497ef1d45",
"size": "2467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "malpi/notify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "4982"
},
{
"name": "Jupyter Notebook",
"bytes": "16321"
},
{
"name": "Python",
"bytes": "947508"
},
{
"name": "Shell",
"bytes": "1444"
}
],
"symlink_target": ""
} |
import numpy as np
def topattern(pd, patlen, patternlength):
m=np.zeros((len(pd), patternlength))
for i, ID in enumerate(pd.keys()):
starts = pd[ID]
length = patlen[ID]
for t in starts:
start = min(t, patternlength)
le = min(length, patternlength - t)
m[i, start: t + le] = 1
return m
class PatternManager:
"""
PatternManager is a container of different patterns, in charge of creating sequence of overlapping unsync patterns in time.
The distribution of overlapping patterns that PatternManager creates is determined by mixing probabilities.
If mixing probabilities has k elements, then each element represents probability that one of N available patterns in PatternManager (each with some possibly different lenght in time) is presented in the input.
E.g. for some simulation time T and a mixing probability [0.9, 0.8, 0.7] PatternManager creates distribution of patterns in time such that:
- there are at most 3 different patterns in the input at any given time (non-sync)
- probability that there is no pattern present in the input is (1 - 0.9) * (1 - 0.8) * (1 - 0.7)
- probability that some 3 (out of N) of patterns are present at the same time is 0.9 * 0.8 * 0.7
- probability that there is exactly 1 pattern present is:
0.9 * (1 - 0.8) * (1 - 0.7) + (1 - 0.9) * 0.8 * (1 - 0.7) + (1 - 0.9) * (1 - 0.8) * 0.7
- etc
"""
def __init__(self, dt):
self.dt = dt
self.patterns = {}
self.npatterns = 0
self.patlen = {}
def addPatterns(self, patterns, IDs):
"""
Add patterns with IDs
Patterns should be a list or an 2d array (nchannels x pattern_length)
"""
for ind, p in enumerate(patterns):
ID = IDs[ind]
if ID not in self.patterns.keys():
self.patterns[ID] = p
self.patlen[ID] = p.shape[1]
self.npatterns += 1
def getPatternsIDs(self):
return self.patterns.keys()
# onoff: periods of patterns on and patterns off -> when they are shown and when they are not
# first param gives a range for patters on [0.5, 0.7] means patterns are on for random time between 0.5 and 0.7s
# second param gives a range for patters off [0.3, 0.5] means patterns are on for random time between 0.3 and 0.5s
def createUnsyncPatterns(self, simulationtime, IDs, mixingprob, onoff, offset=0):
onoff_isRange_on = isinstance(onoff[0], list)
onoff_isRange_off = isinstance(onoff[1], list)
assert not (onoff_isRange_on ^ onoff_isRange_off)
# onofftimes: array with 0(no patterns) and 1(patterns are on)
# simulationtime : sec
simulationtimeTS = int(np.ceil(simulationtime / self.dt)) # sim time in timesteps
onoffTS = np.array(np.ceil(np.array(onoff) / self.dt), dtype=int) # sim time in timesteps
#create onofftimes
onofftimes=np.zeros(simulationtimeTS)
t = 0
onoroff = 0 #0 is on, 1 is off
while t < simulationtimeTS:
#duration of on/off time
if onoff_isRange_on:
minOnOffTime = onoff[onoroff][0]
maxOnOffTime = onoff[onoroff][1]
onofftime = minOnOffTime + np.random.rand() * (maxOnOffTime - minOnOffTime)
else:
onofftime = onoff[onoroff]
steps=np.array(np.ceil(np.array(onofftime) / self.dt), dtype=int)
steps = min(steps, simulationtimeTS - t)
onofftimes[t: t + steps] = 1 - onoroff
t += steps
onoroff = 1 - onoroff
# check weather all IDs exists
pIDs = []
patlen = []
for ID in IDs:
if ID in self.patterns.keys():
pIDs.append(ID)
patlen.append(self.patlen[ID])
npatterns = len(pIDs)
maxnpatterns = len(mixingprob) # max overlap of patterns
patact = np.zeros((maxnpatterns, simulationtimeTS), dtype = 'int')
# probability of mixing channels (each can contain any pattern)
pa = np.array(mixingprob) # active percentage, size is maxnpatterns
apatlen = sum(patlen) / float(len(patlen)) # average length of pattern
pa /= (apatlen - pa * (apatlen - 1)) # probability of activating some pattern
# prepare random numbers
r = np.random.rand(maxnpatterns, simulationtimeTS)
# nov generate patterns
for t in xrange(simulationtimeTS):
if onofftimes[t] == 1: # if pattern time
for p in xrange(maxnpatterns):
if patact[p, t] == 0: # if we can put new pattern
if pa[p] > r[p, t]:
# then chose one of patterns to put, eliminate those that are already active
#available patterns are
s = range(1, npatterns + 1)
for pp in patact[:, t]:
if pp > 0 and pp in s:
s.remove(pp)
rp = s[np.random.random_integers(0, len(s) - 1)] # random pattern, 1-based index
patact[p, t: t + min(patlen[rp - 1], simulationtimeTS - t)] = rp
# count how many time combination occured (number of overlapping patterns)
sp = sum(patact > 0)
k = np.zeros(maxnpatterns + 1)
for i in xrange(maxnpatterns + 1):
k[i] = sum(sp == i) / float(simulationtimeTS)
print "Distribution of number of overlapping patterns [ 0 to", maxnpatterns, "]"
print k
# now create activity of patterns to start times of patterns (conversion of IDs to pIDs!)
# patterndistribution
pd = dict()
for i in xrange(npatterns):
pd[pIDs[i]] = []
for p in xrange(maxnpatterns):
t = 0
while t < simulationtimeTS:
if patact[p, t] > 0: # if there is start of pattern
ID = pIDs[patact[p, t] - 1]
pd[ID] += [t + offset]
t += self.patlen[ID]
else:
t += 1
for k in pd.keys():
pd[k].sort()
return pd
class TPattern:
"""
General (abstract) class for pattern.
It creates pattern of given length based on given rates.
mask : pattern rates (array)
length : length of pattern
"""
def info(self):
"""
Print some info about pattern (class).
"""
print "Number of patterns:", self.npatterns
print "Number of channels in pattern:", self.nchannels
print "Lenght of pattern(sec):", self.length
print "Rates:", self.rates
def createFromRates(self, rates):
"""
Creates one (poisson) pattern from rates.
-> rates : array([nchannels, time])
time == 1 means const rate for channel
"""
if rates.shape[1] == 1: # if is const rate
trates = np.tile(rates, (1, self.lengthTS))
else:
trates = rates
r = np.random.rand(self.nchannels, self.lengthTS) * 1000.
spikes = []
bsp = trates > r
for ch in xrange(self.nchannels):
spikes.append(bsp[ch, :].nonzero()[0])
return [spikes, bsp.sum()]
def limitRates(self, minrate, maxrate):
"""
Limits rates to some range.
-> minrate : minimum rate
-> maxrate : maximum rate
"""
self.patterns[self.patterns < minrate] = minrate
self.patterns[self.patterns > maxrate] = maxrate
class BarRatePatterns(TPattern):
"""
Create rate bar pattern generator.
It creates internally set of patterns defined with rates : patterns
and binary masks which defines for each channels whether is it high or low rate : masks
"""
def __init__(self, patternshape, bars, rates, length,dt):
"""
Inits class:
-> patternshape : shape of pattern [width, height]
-> bars : set of indecies (of bars vertical and horizontal) we want to create as pattern
-> rates : dictionary with 'low' and 'high' rates
-> lenght : lenght of pattern in sec
-> dt : simulation timestep
"""
self.patternshape = patternshape
self.rates = rates
self.length = length
self.dt = dt
self.lengthTS = np.ceil(self.length / dt) # length in timesteps
# create masks of patterns
self.nchannels = self.patternshape[0] * self.patternshape[1]
n = len(bars)
if n == 0:
n = self.patternshape[0] + self.patternshape[1]
bars = range(n)
self.npatterns = len(bars)
self.masks = np.zeros((n, self.nchannels, self.lengthTS), dtype='byte')
for ind, i in enumerate(bars):
pattern = np.zeros(self.patternshape)
# first vertical bars
if i >= 0 and i < self.patternshape[1]:
pattern[:, i] = 1
elif i >= self.patternshape[1] and i < self.patternshape[1] + self.patternshape[0]:
pattern[i - self.patternshape[1], :] = 1
newpattern = pattern.ravel().reshape((self.nchannels, 1))
self.masks[ind, :, :] = newpattern.repeat(self.lengthTS, 1)
# each mask multiply with rates
HR = rates['high']
LR = rates['low']
self.patterns = np.array(self.masks * (HR - LR) + LR, dtype='float')
def info(self):
"""
Prints additional info about pattern
"""
super(BarRatePatterns, self).info()
print "Name : Bar rate pattern"
print "Shape of pattern:", self.patternshape
class OrientedBarRatePatterns(TPattern):
"""
Create rate oriented (rotated) bar pattern generator.
It creates internally set of patterns defined with rates : patterns
and binary masks which defines for each channels whether is it high or low rate : masks
Note:
It can be seen as a special case of random rate patterns, with constraints 2,1
(even weaker due to max overlap all =1 for 2 groups (vertical and horizontal bars)
which dont have any overlap!)
"""
def __init__(self, patternshape, barwidth, angles, rates, length, dt):
"""
Inits class:
-> patternshape : shape of pattern [width, height]
-> angles : set of angles (starting from vertical bar in clock-wise direction) we want to create as pattern
-> rates : dictionary with 'low' and 'high' rates
-> lenght : lenght of pattern in sec
-> dt : simulation timestep
"""
self.patternshape = patternshape
self.barwidth = barwidth
self.rates = rates
self.length = length
self.dt = dt
self.lengthTS = np.ceil(self.length / dt) # length in timesteps
# create masks of patterns
self.nchannels = self.patternshape[0] * self.patternshape[1]
n = len(angles)
self.npatterns = n
self.masks = np.zeros((n, self.nchannels, self.lengthTS), dtype='byte')
pattern0 = np.zeros(self.patternshape)
pattern0[:, patternshape[1] / 2 - barwidth / 2 : patternshape[1] / 2 - barwidth / 2 + barwidth] = 1
from scipy import ndimage
for ind, angle in enumerate(angles):
# ensure angle is correct
assert angle >= 0 and angle <= 360
pattern = (ndimage.rotate(pattern0, angle, reshape=False) > 0.1) * 1.
newpattern = pattern.ravel().reshape((self.nchannels, 1))
self.masks[ind, :, :] = newpattern.repeat(self.lengthTS, 1)
# each mask multiply with rates
HR = rates['high']
LR = rates['low']
self.patterns = np.array(self.masks * (HR - LR) + LR, dtype='float')
def info(self):
"""
Prints additional info about pattern
"""
print "Name : Rotated bar rate pattern"
print "Shape of pattern:", self.patternshape
class SpatioTemporalPatterns(TPattern):
"""
Create variable rate random pattern generator.
It creates internally set of patterns defined with rates in time : time patterns
(the binary mask is created for compatiblity reasons, it is set to 0)
"""
def __init__(self, nchannels, npatterns, rates, length, dt, process=None, patternsrates=None):
"""
Init class
-> nchannels : (int) number of channels
-> npatterns : (int) number of different patterns
-> rates : dictionary defining max and min rates {'low', 'high'}
-> length : (float) length of pattern in sec
-> dt : simulation timestep
-> process: process class which creates rates, it is applied to each pattern and each
channel in it for duration length
default process is random
It requires .Create(length) method
-> patternsrates : external matrix of rates for each channel through time for each pattern
array(npatterns, nchannels, length)
"""
self.rates = rates
self.length = length
self.nchannels = nchannels
self.npatterns = npatterns
self.dt = dt
self.lengthTS = int(np.ceil(self.length / dt)) # length in timesteps
print self.lengthTS
# create masks for compatiblity reasons
self.masks = np.zeros((self.npatterns, self.nchannels, self.lengthTS))
self.patterns = np.zeros((self.npatterns, self.nchannels, self.lengthTS))
if patternsrates == None: # if external rates description not provided
if process == None : # if no external process is provided
# process = random
for n in xrange(self.npatterns):
for ch in xrange(self.nchannels):
rrs = np.random.random_integers(rates['low'], rates['high'], self.lengthTS)
self.patterns[n, ch, :] = rrs
else: # process is defined so use it
for n in xrange(self.npatterns):
for ch in xrange(self.nchannels):
self.patterns[n, ch, :] = process.create(self.lengthTS)
else: # external description is provided
self.patterns = patternsrates
def info(self):
"""
Prints additional info about pattern
"""
super(VariableRatePatterns, self).info()
print "Name : Variable rate pattern"
| {
"content_hash": "a123265fad2c115546ac8c8ea80c71b9",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 210,
"avg_line_length": 35.76536312849162,
"alnum_prop": 0.6629959387691347,
"repo_name": "zjonke/EImotif",
"id": "85f9f5636575a3c29bc9f3fc9ffb663d2b28df53",
"size": "12804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eim/patterns.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104308"
}
],
"symlink_target": ""
} |
'''Bookstore
Stores IPython notebooks automagically onto OpenStack clouds through Swift.
'''
__title__ = 'bookstore'
__version__ = '1.0.0'
__build__ = 0x010000
__author__ = 'Kyle Kelley'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kyle Kelley'
from .swift import SwiftNotebookManager
from .openstack import OpenStackNotebookManager
from .cloudfiles import CloudFilesNotebookManager
| {
"content_hash": "672ccad8e671c3535bc55ccf64f6316f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.745,
"repo_name": "MatthewTurk/bookstore",
"id": "b051534888c0d3cfecf43b5b23e985d2cb29b8bf",
"size": "447",
"binary": false,
"copies": "1",
"ref": "refs/heads/ipynb2",
"path": "bookstore/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import json
import sys
#
# fio_latency2csv.py
#
# This tool converts fio's json+ completion latency data to CSV format.
# For example:
#
# fio_latency2csv.py fio-jsonplus.output fio-latency.csv
#
import os
# import json
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('source',
help='fio json+ output file containing completion '
'latency data')
parser.add_argument('dest',
help='destination file stub for latency data in CSV '
'format. job number will be appended to filename')
args = parser.parse_args()
return args
# from stat.c
def plat_idx_to_val(idx, FIO_IO_U_PLAT_BITS=6, FIO_IO_U_PLAT_VAL=64):
# MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
# all bits of the sample as index
if (idx < (FIO_IO_U_PLAT_VAL << 1)):
return idx
# Find the group and compute the minimum value of that group
error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1
base = 1 << (error_bits + FIO_IO_U_PLAT_BITS)
# Find its bucket number of the group
k = idx % FIO_IO_U_PLAT_VAL
# Return the mean of the range of the bucket
return (base + ((k + 0.5) * (1 << error_bits)))
if __name__ == '__main__':
args = parse_args()
with open(args.source, 'r') as source:
jsondata = json.loads(source.read())
bins = {}
bin_const = {}
ddir_list = ['read', 'write', 'trim']
const_list = ['FIO_IO_U_PLAT_NR', 'FIO_IO_U_PLAT_BITS',
'FIO_IO_U_PLAT_VAL']
for jobnum in range(0,len(jsondata['jobs'])):
prev_ddir = None
for ddir in ddir_list:
bins[ddir] = jsondata['jobs'][jobnum][ddir]['clat']['bins']
bin_const[ddir] = {}
for const in const_list:
bin_const[ddir][const] = bins[ddir].pop(const)
if prev_ddir:
assert bin_const[ddir][const] == bin_const[prev_ddir][const]
prev_ddir = ddir
stub, ext = os.path.splitext(args.dest)
for ddir in ddir_list:
outfile = stub + '_job' + str(jobnum) + "_" + ddir + ext
with open(outfile, 'w') as output:
for x in range(bin_const[ddir]['FIO_IO_U_PLAT_NR']):
lat = plat_idx_to_val(x,
bin_const[ddir]['FIO_IO_U_PLAT_BITS'],
bin_const[ddir]['FIO_IO_U_PLAT_VAL'])
for i in range(bins[ddir][str(x)]):
output.write("{0}\n".format(lat))
# def extract_samples(fio_file, samples_file):
# """
# # Convert FIO histogram to samples list.
# # FIO keeps it in bins: "{value: count, value: count"} .
# # We need it (for ./maketable) as a flat file with a single value per line
# """
# # open the file and extract bins
# with open(fio_file, mode='r') as f:
# data = json.load(f)
# bins = data["jobs"][0]["write"]["clat"]["bins"]
# # filter out values with 0 occurence
# # '-3' is needed because there are 3 service lines in the bins
# # so we have 3 less actual values to handle
# occuring_values = filter(lambda x: bins[str(x)] != 0, range(len(bins)-3))
# # Flatten the data into a list of individual samples
# # e.g. {150:3} (meaning 150 msec latency was seen 3 times) will look like [150, 150, 150]
# # and then print each sample as a separate line
# with open(samples_file, mode='w') as f:
# for value in occuring_values:
# for sample in range(bins[str(value)]):
# f.writelines([str(value), "\n"])
# if __name__ == "__main__":
# if len(sys.argv) != 3:
# print("Convert FIO json+ output to a flat colums of samples.")
# print("Usage: dump.py <fio_output> <samples_file>'")
# sys.exit(2)
# extract_samples(sys.argv[1], sys.argv[2])
| {
"content_hash": "ffe41a8cffff5a2f7b49d174017d5889",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 95,
"avg_line_length": 33.8974358974359,
"alnum_prop": 0.5542107917297024,
"repo_name": "msterin/play",
"id": "0f7588b3c13e5302b43de68677acbc012e552dfa",
"size": "4263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fio_play/dump.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13507"
},
{
"name": "Dockerfile",
"bytes": "83"
},
{
"name": "Makefile",
"bytes": "3014"
},
{
"name": "Python",
"bytes": "28902"
},
{
"name": "Shell",
"bytes": "3266"
},
{
"name": "Smarty",
"bytes": "516"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="splom.unselected", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
size
Sets the marker size of unselected points,
applied only when a selection exists.
""",
),
**kwargs
)
| {
"content_hash": "922f6042301a9434efe97c8c963d4db7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 87,
"avg_line_length": 36.68,
"alnum_prop": 0.5528898582333697,
"repo_name": "plotly/python-api",
"id": "c80a741d59d31b26101cc41ede93a37e34b4df76",
"size": "917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/splom/unselected/_marker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import ShtikerPage
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.suit import SuitDNA
from toontown.battle import SuitBattleGlobals
from toontown.minigame import MinigamePowerMeter
from toontown.coghq import CogDisguiseGlobals
DeptColors = (Vec4(0.647, 0.608, 0.596, 1.0),
Vec4(0.588, 0.635, 0.671, 1.0),
Vec4(0.596, 0.714, 0.659, 1.0),
Vec4(0.761, 0.678, 0.69, 1.0))
NumParts = max(CogDisguiseGlobals.PartsPerSuit)
PartNames = ('lUpleg', 'lLowleg', 'lShoe', 'rUpleg', 'rLowleg', 'rShoe', 'lShoulder', 'rShoulder', 'chest', 'waist', 'hip', 'lUparm', 'lLowarm', 'lHand', 'rUparm', 'rLowarm', 'rHand')
class DisguisePage(ShtikerPage.ShtikerPage):
meterColor = Vec4(0.87, 0.87, 0.827, 1.0)
meterActiveColor = Vec4(0.7, 0.3, 0.3, 1)
def __init__(self):
ShtikerPage.ShtikerPage.__init__(self)
self.activeTab = 0
self.progressTitle = None
return
def load(self):
ShtikerPage.ShtikerPage.load(self)
gui = loader.loadModel('phase_9/models/gui/cog_disguises')
self.frame = DirectFrame(parent=self, relief=None, scale=0.47, pos=(0.02, 1, 0))
self.bkgd = DirectFrame(parent=self.frame, geom=gui.find('**/base'), relief=None, scale=(0.98, 1, 1))
self.bkgd.setTextureOff(1)
self.tabs = []
self.pageFrame = DirectFrame(parent=self.frame, relief=None)
for dept in SuitDNA.suitDepts:
if dept == 'c':
tabIndex = 1
textPos = (1.57, 0.75)
elif dept == 'l':
tabIndex = 2
textPos = (1.57, 0.12)
elif dept == 'm':
tabIndex = 3
textPos = (1.57, -0.47)
elif dept == 's':
tabIndex = 4
textPos = (1.57, -1.05)
pageGeom = gui.find('**/page%d' % tabIndex)
tabGeom = gui.find('**/tab%d' % tabIndex)
tab = DirectButton(parent=self.pageFrame, relief=None, geom=tabGeom, geom_color=DeptColors[tabIndex - 1], text=SuitDNA.suitDeptFullnames[dept], text_font=ToontownGlobals.getSuitFont(), text_pos=textPos, text_roll=-90, text_scale=TTLocalizer.DPtab, text_align=TextNode.ACenter, text1_fg=Vec4(1, 0, 0, 1), text2_fg=Vec4(0.5, 0.4, 0.4, 1), text3_fg=Vec4(0.4, 0.4, 0.4, 1), command=self.doTab, extraArgs=[len(self.tabs)], pressEffect=0)
self.tabs.append(tab)
page = DirectFrame(parent=tab, relief=None, geom=pageGeom)
self.deptLabel = DirectLabel(parent=self.frame, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=TTLocalizer.DPdeptLabel, text_pos=(-0.1, 0.8))
DirectFrame(parent=self.frame, relief=None, geom=gui.find('**/pipe_frame'))
self.tube = DirectFrame(parent=self.frame, relief=None, geom=gui.find('**/tube'))
DirectFrame(parent=self.frame, relief=None, geom=gui.find('**/robot/face'))
DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_cog_disguises'), geom_pos=(0, 0.1, 0))
self.meritTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_merit_progress'), geom_pos=(0, 0.1, 0))
self.meritTitle.hide()
self.cogbuckTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_cashbuck_progress'), geom_pos=(0, 0.1, 0))
self.cogbuckTitle.hide()
self.juryNoticeTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_jury_notice_progress'), geom_pos=(0, 0.1, 0))
self.juryNoticeTitle.hide()
self.stockOptionTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_stock_option_progress'), geom_pos=(0, 0.1, 0))
self.stockOptionTitle.hide()
self.progressTitle = self.meritTitle
self.promotionTitle = DirectLabel(parent=self.frame, relief=None, geom=gui.find('**/text_ready4promotion'), geom_pos=(0, 0.1, 0))
self.cogName = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=TTLocalizer.DPcogName, text_align=TextNode.ACenter, pos=(-0.948, 0, -1.15))
self.cogLevel = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=0.09, text_align=TextNode.ACenter, pos=(-0.91, 0, -1.02))
self.partFrame = DirectFrame(parent=self.frame, relief=None)
self.parts = []
for partNum in range(0, NumParts):
self.parts.append(DirectFrame(parent=self.partFrame, relief=None, geom=gui.find('**/robot/' + PartNames[partNum])))
self.holes = []
for partNum in range(0, NumParts):
self.holes.append(DirectFrame(parent=self.partFrame, relief=None, geom=gui.find('**/robot_hole/' + PartNames[partNum])))
self.cogPartRatio = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=0.08, text_align=TextNode.ACenter, pos=(-0.91, 0, -0.82))
self.cogMeritRatio = DirectLabel(parent=self.frame, relief=None, text='', text_font=ToontownGlobals.getSuitFont(), text_scale=0.08, text_align=TextNode.ACenter, pos=(0.45, 0, -0.36))
meterFace = gui.find('**/meter_face_whole')
meterFaceHalf = gui.find('**/meter_face_half')
self.meterFace = DirectLabel(parent=self.frame, relief=None, geom=meterFace, color=self.meterColor, pos=(0.455, 0.0, 0.04))
self.meterFaceHalf1 = DirectLabel(parent=self.frame, relief=None, geom=meterFaceHalf, color=self.meterActiveColor, pos=(0.455, 0.0, 0.04))
self.meterFaceHalf2 = DirectLabel(parent=self.frame, relief=None, geom=meterFaceHalf, color=self.meterColor, pos=(0.455, 0.0, 0.04))
self.frame.hide()
self.activeTab = 3
self.updatePage()
return
def unload(self):
ShtikerPage.ShtikerPage.unload(self)
def enter(self):
self.frame.show()
ShtikerPage.ShtikerPage.enter(self)
def exit(self):
self.frame.hide()
ShtikerPage.ShtikerPage.exit(self)
def updatePage(self):
self.doTab(self.activeTab)
def updatePartsDisplay(self, index, numParts, numPartsRequired):
partBitmask = 1
groupingBitmask = CogDisguiseGlobals.PartsPerSuitBitmasks[index]
previousPart = 0
for part in self.parts:
groupingBit = groupingBitmask & partBitmask
if numParts & partBitmask & groupingBit:
part.show()
self.holes[self.parts.index(part)].hide()
if groupingBit:
previousPart = 1
elif not groupingBit and previousPart:
part.show()
self.holes[self.parts.index(part)].hide()
else:
self.holes[self.parts.index(part)].show()
part.hide()
previousPart = 0
partBitmask = partBitmask << 1
def updateMeritBar(self, dept):
merits = base.localAvatar.cogMerits[dept]
totalMerits = CogDisguiseGlobals.getTotalMerits(base.localAvatar, dept)
if totalMerits == 0:
progress = 1
else:
progress = min(merits / float(totalMerits), 1)
self.updateMeritDial(progress)
if base.localAvatar.readyForPromotion(dept):
self.cogMeritRatio['text'] = TTLocalizer.DisguisePageMeritFull
self.promotionTitle.show()
self.progressTitle.hide()
else:
self.cogMeritRatio['text'] = '%d/%d' % (merits, totalMerits)
self.promotionTitle.hide()
self.progressTitle.show()
def updateMeritDial(self, progress):
if progress == 0:
self.meterFaceHalf1.hide()
self.meterFaceHalf2.hide()
self.meterFace.setColor(self.meterColor)
elif progress == 1:
self.meterFaceHalf1.hide()
self.meterFaceHalf2.hide()
self.meterFace.setColor(self.meterActiveColor)
else:
self.meterFaceHalf1.show()
self.meterFaceHalf2.show()
self.meterFace.setColor(self.meterColor)
if progress < 0.5:
self.meterFaceHalf2.setColor(self.meterColor)
else:
self.meterFaceHalf2.setColor(self.meterActiveColor)
progress = progress - 0.5
self.meterFaceHalf2.setR(180 * (progress / 0.5))
def doTab(self, index):
self.activeTab = index
self.tabs[index].reparentTo(self.pageFrame)
for i in range(len(self.tabs)):
tab = self.tabs[i]
if i == index:
tab['text0_fg'] = (1, 0, 0, 1)
tab['text2_fg'] = (1, 0, 0, 1)
else:
tab['text0_fg'] = (0, 0, 0, 1)
tab['text2_fg'] = (0.5, 0.4, 0.4, 1)
self.bkgd.setColor(DeptColors[index])
self.deptLabel['text'] = (SuitDNA.suitDeptFullnames[SuitDNA.suitDepts[index]],)
cogIndex = base.localAvatar.cogTypes[index] + SuitDNA.suitsPerDept * index
cog = SuitDNA.suitHeadTypes[cogIndex]
self.progressTitle.hide()
if SuitDNA.suitDepts[index] == 'm':
self.progressTitle = self.cogbuckTitle
elif SuitDNA.suitDepts[index] == 'l':
self.progressTitle = self.juryNoticeTitle
elif SuitDNA.suitDepts[index] == 'c':
self.progressTitle = self.stockOptionTitle
else:
self.progressTitle = self.meritTitle
self.progressTitle.show()
self.cogName['text'] = SuitBattleGlobals.SuitAttributes[cog]['name']
cogLevel = base.localAvatar.cogLevels[index]
self.cogLevel['text'] = TTLocalizer.DisguisePageCogLevel % str(cogLevel + 1)
numParts = base.localAvatar.cogParts[index]
numPartsRequired = CogDisguiseGlobals.PartsPerSuit[index]
self.updatePartsDisplay(index, numParts, numPartsRequired)
self.updateMeritBar(index)
self.cogPartRatio['text'] = '%d/%d' % (CogDisguiseGlobals.getTotalParts(numParts), numPartsRequired)
| {
"content_hash": "6962457097e5d21f3dcb280ada142593",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 444,
"avg_line_length": 51.56122448979592,
"alnum_prop": 0.6270532357015635,
"repo_name": "ksmit799/Toontown-Source",
"id": "bc485bf9604340530f740f91812b062320d804bf",
"size": "10106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/shtiker/DisguisePage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "PLSQL",
"bytes": "901"
},
{
"name": "Python",
"bytes": "15617225"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
import sys
import sqlalchemy
from SQLiteConnection import engine, Session
from ModelClasses import *
filename = 'student_data.txt'
data = open(filename)
lines = data.readlines()
data.close()
session = Session()
for line in lines[5:]:
line = line.split("|")
#print line
try:
a_student = session.query(Student).filter(Student.last_name==line[1]).filter(Student.first_name==line[0]).one()
except sqlalchemy.orm.exc.NoResultFound:
student = Student()
student.first_name = line[0]
student.last_name = line[1]
session.add(student)
print("Adding {} {}".format(line[0],line[1]))
except sqlalchemy.orm.exc.MultipleResultsFound:
print("**{} {} is already in database!**".format(line[0],line[1]))
#Eccleston/Room 205, Baker/Room 315
supers = line[3].split(',')
for supe in supers:
supe = supe.split('/')
if len(supe) < 2 :
continue
supe[0].strip(' ')
try:
one_supervisor = session.query(Supervisor).filter(Supervisor.last_name==supe[0]) \
.filter(Supervisor.room_number==supe[1]).one()
except sqlalchemy.orm.exc.NoResultFound:
one_supervisor = Supervisor()
one_supervisor.last_name = supe[0]
one_supervisor.first_name = ""
one_supervisor.room_number = supe[1]
session.add(one_supervisor)
except sqlalchemy.orm.exc.MultipleResultsFound:
print "There is more than one Doctor!"
sys.exit(1)
student.supervisors.append(one_supervisor)
session.commit()
engine.dispose() # cleanly disconnect from the database
sys.exit(0)
| {
"content_hash": "64005755272f8cae57c3731a9db977a7",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 113,
"avg_line_length": 23.43076923076923,
"alnum_prop": 0.685489166119501,
"repo_name": "rcmorehead/scicoder2014",
"id": "40335ada2521e087e90853125bb6b87b074c7399",
"size": "1542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "student_import_script.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10318"
}
],
"symlink_target": ""
} |
"""Test RPC commands for signing and verifying messages."""
from test_framework.test_framework import IonTestFramework
class SignMessagesTest(IonTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
message = 'This is just a test message'
# Test the signing with a privkey
privKey = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'
address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB'
signature = self.nodes[0].signmessagewithprivkey(privKey, message)
# Verify the message
assert(self.nodes[0].verifymessage(address, signature, message))
# Test the signing with an address with wallet
address = self.nodes[0].getnewaddress()
signature = self.nodes[0].signmessage(address, message)
# Verify the message
assert(self.nodes[0].verifymessage(address, signature, message))
if __name__ == '__main__':
SignMessagesTest().main()
| {
"content_hash": "c5f03ab3e2ac62b6489929fe27692000",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 33.25806451612903,
"alnum_prop": 0.6702230843840931,
"repo_name": "aspaas/ion",
"id": "cfc57b17bc979f8b83b0df7671964fc5e10680ee",
"size": "1240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/signmessages.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "616463"
},
{
"name": "C++",
"bytes": "4560754"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "18274"
},
{
"name": "Makefile",
"bytes": "16792"
},
{
"name": "NSIS",
"bytes": "5917"
},
{
"name": "Objective-C++",
"bytes": "6205"
},
{
"name": "Python",
"bytes": "96149"
},
{
"name": "QMake",
"bytes": "20721"
},
{
"name": "Shell",
"bytes": "391146"
}
],
"symlink_target": ""
} |
import csv
import json
import traceback
import re
import sys
import warnings
from ..sentence import *
from ..word import *
warnings.filterwarnings("ignore", category=FutureWarning, module="__main__")
################################################################################
#### HELPER FUNCTIONS ##########################################################
################################################################################
def _clean_sent(sent):
"""
This preprocessing function takes in a sentence object, grabs just the
actual sentence, and runs three regex substitutions to clean up the
sentence for querying.
"""
s = re.sub(r'\s([\.,!?\)\]])', r'\1', sent.get_sentence().lower())
s = re.sub(r'([\(\[])\s', r'\1', s)
s = re.sub(r'(\s’[\ss])', r'’', s, re.UNICODE)
return s
def _get_sent_label(labels, sent):
"""
Given a list of uncertainty cues and a sentence object, return a label
representing the classification of the entire sentence and each token within
that sentence.
Favor is given to the five uncertainty classes; if any token within the
sentence is labeled as 'U', 'E', 'I', 'D', or 'N', the entire sentence is
considered to be whichever label that occurs most frequently within the
sentence. This decision is based on that made by Vincze et al. in their
binary classifier. Only if there are no occurences of the five uncertainty
labels within a sentence is the sentence classified as 'C'.
"""
label_map = {"possible": "E", "probable": "E", "epistemic": "E",
"doxastic": "D", "investigation": "I", "condition": "N",
"certain": "C", "uncertain": "U"}
word_labels = []
for i, word in enumerate(sent.get_words()):
word_labels.append(word.binary_label)
if type(labels) != str:
for k, v in labels.items():
if word.word in v:
word_labels[i] = label_map[k.strip().rstrip("_").split("_")[-1].strip()]
break
print("RAW:\t" + str(labels))
if not bool(labels):
return sent.get_label(), word_labels
else:
labs = {"E": 0.0, "D": 0.0, "I": 0.0, "N": 0.0, "C": 0.0, "U": 0.0}
for k, v in labels.items():
label = label_map[k.strip().rstrip("_").split("_")[-1].strip()]
labs[label] += len(v)
max_val = max(labs.values())
max_keys = []
for k, v in labs.items():
if v == max_val:
max_keys.append(k)
if len(max_keys) == 1:
return max_keys[0], word_labels
else:
return "U", word_labels
def _get_lines(filepath):
""" Given a filepath, return a list of lines within that file. """
lines = None
with open(filepath) as file:
lines = file.readlines()
return lines
def _get_sentences(filepath):
""" Given a filepath, return a list of sentences within that file. """
sentences = list()
_lines = list()
for line in _get_lines(filepath):
if line.strip() == '': # End of Sentence
sentences.append(_lines)
_lines = list()
continue
_lines.append(line)
return sentences
################################################################################
#### MAIN FUNCTION #############################################################
################################################################################
def merge_data(json_data, tagged_data):
"""
There are two datasets associated with the work of Vincze et al [0].
The first [1A] consists of a collection of XML objects that contain the text
of a sentence and a list of uncertainty cues (with their categorization)
that are within the sentence, if any. It has been parsed into a more
intuitive JSON [1B] structure (at the expense of harddrive space).
The second [2] consists of tab-delineated lists of tokens from [1A]. Each
line in the files contains at least five columns: 1) an ID, 2) the raw
token, 3) the lemma of the token, 4) the part-of-speech tag for the token,
and 5) a label of certain or uncertain for the token. The tokens are in
order by sentence, with each sentence separated by an empty line. All
columns following the first five contain preparsed features used to train
the binary classifier described in Vincze et al [0].
This function parses both datasets (which contain the same sentences), and
matches the uncertainty cues from [1B] to the tokens in [2]. This is not a
trivial task; since [2] does not actually contain the raw sentences, the
tokens had to be parsed into sentences and compared against those in [1B].
This function results in a new file /data/merged_data [3] that is formatted
in the same manner as [2], but contains a sixth column denoting a specific
type of uncertainty (epistemic, doxastic, investigation, condition, other)
when applicable. This new file [3] was used to train the multiclass models
contained in this codebase.
[0 ] http://doktori.bibl.u-szeged.hu/2291/1/Vincze_Veronika_tezis.pdf
[1A] http://rgai.inf.u-szeged.hu/project/nlp/uncertainty/uncertainty.zip
[1B] http://people.rc.rit.edu/~bsm9339/corpora/szeged_uncertainty/szeged_uncertainty_json.tar.gz
[2 ] http://rgai.inf.u-szeged.hu/project/nlp/uncertainty/clexperiments.zip
[3 ] http://people.rc.rit.edu/~bsm9339/corpora/szeged_uncertainty/merged_data
"""
try:
json_dict = json.loads(open(json_data, 'r').read())
json_dict2 = {}
cnt = 0
for item in json_dict:
cnt += 1
if bool(item['ccue']):
json_dict2.update({item['text'].lower(): item['ccue']})
print("Found " + str(cnt) + " documents with uncertainty cues.")
sents = Sentences.from_lineslist(_get_sentences(tagged_data))
X, y = sents.get_data()
with open(tagged_data + ".new", "w", newline='') as tsvfile:
tsv_writer = csv.writer(tsvfile, delimiter='\t')
for i, sent in enumerate(X):
s = _clean_sent(sent)
tags = {}
if s in json_dict2.keys():
tags = json_dict2[s]
elif re.sub(r"(\d\s)-(\s\d)", r"\1-\2", s) in json_dict2.keys():
tags = json_dict2[re.sub(r"(\d\s)-(\s\d)", r"\1-\2", s)]
elif re.sub(r"\s/\s", r"/", s) in json_dict2.keys():
tags = json_dict2[re.sub(r"\s/\s", r"/", s)]
elif re.sub(r'\\u00b1', '±', s) in json_dict2.keys():
tags = json_dict2[re.sub(r'\\u00b1', '±', s)]
elif re.sub(r'\s-\s', '-', s) in json_dict2.keys():
tags = json_dict2[re.sub(r'\s-\s', '-', s)]
elif re.sub(r"\\", '', s) in json_dict2.keys():
tags = json_dict2[re.sub(r"\\", '', s)]
else:
tags = {}
rows = []
sent_label, word_labels = _get_sent_label(tags, sent)
prepend = "000"
for j, word in enumerate(sent.get_words()):
if j > 999:
prepend=""
elif j > 99:
prepend="0"
elif j > 9:
prepend="00"
row = ['sent' + str(i) + "token" + prepend + str(j),
str(word.word), str(word.root), str(word.pos)]
row.append(word.binary_label) # Binary Label
row.append(word_labels[j]) # Multiclass Label
for k, v in word.get_features().items():
row.append(str(k) + ":" + str(v))
tsv_writer.writerow(row)
tsv_writer.writerow([""])
except Exception as e:
extype, exvalue, extrace = sys.exc_info()
traceback.print_exception(extype, exvalue, extrace)
return False
return True
| {
"content_hash": "042ef6b9a73094828fb17981ac931be2",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 100,
"avg_line_length": 41.61340206185567,
"alnum_prop": 0.5362318840579711,
"repo_name": "meyersbs/uncertainty",
"id": "63d26651335bb50002c28d743db05092fd5f1321",
"size": "8079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uncertainty/data/merge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101812"
}
],
"symlink_target": ""
} |
from jsg import Document
from jsg.fields import NumberField, IntField
from utils import check_schema, schema
import pytest
def test_resolve(schema):
@schema.add()
class A(Document):
a = IntField()
schema.resolve(A)
def test_extra(schema):
@schema.add()
class A(Document):
a = IntField()
b = 1
@schema.add()
class B(A):
c = IntField()
d = 1
check_schema(schema, "B", {
'$schema': 'http://json-schema.org/draft-04/schema#',
'definitions': {
'B': {
'type': 'object',
'properties': {
"a": {"type": "integer"},
"c": {"type": "integer"}
}
}
},
'$ref': '#/definitions/B',
})
| {
"content_hash": "ad3b340982bb974dc11b9469ae9090b6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 61,
"avg_line_length": 20.92105263157895,
"alnum_prop": 0.4691823899371069,
"repo_name": "pbutler/jsg",
"id": "824f64c6c6e64733621b94dce959e2f782dedc1a",
"size": "796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44680"
}
],
"symlink_target": ""
} |
def pancake():
D = int(input())
P = map(int, raw_input().strip().split())
time = max(P)
# Try to split each plate into target count of pancakes, and count waiting time
for cnt in xrange(2, max(P)):
wait = 0
for cakes in P:
wait += (cakes-1)//cnt
time = min(time, cnt+wait)
return time
for case in xrange(input()):
print 'Case #%d: %d' % (case+1, pancake())
| {
"content_hash": "0918664c5ec0df5205a4d4129db0b565",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 83,
"avg_line_length": 28.466666666666665,
"alnum_prop": 0.5480093676814989,
"repo_name": "kamyu104/GoogleCodeJam-2015",
"id": "6c89cb3e3ce7d48003663667aa06c80a0d5f2cac",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Qualification Round/infinite-house-of-pancakes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "30148"
},
{
"name": "Python",
"bytes": "51406"
}
],
"symlink_target": ""
} |
_platform_backends_to_devtools_clients_maps = {}
def _RemoveStaleDevToolsClient(platform_backend):
"""Removes DevTools clients that are no longer connectable."""
devtools_clients_map = _platform_backends_to_devtools_clients_maps.get(
platform_backend, {})
devtools_clients_map = {
port: client
for port, client in devtools_clients_map.iteritems()
if client.IsAlive()
}
_platform_backends_to_devtools_clients_maps[platform_backend] = (
devtools_clients_map)
def RegisterDevToolsClient(devtools_client_backend):
"""Register DevTools client
This should only be called from DevToolsClientBackend when it is initialized.
"""
remote_port = str(devtools_client_backend.remote_port)
platform_clients = _platform_backends_to_devtools_clients_maps.setdefault(
devtools_client_backend.platform_backend, {})
platform_clients[remote_port] = devtools_client_backend
def GetDevToolsClients(platform_backend):
"""Get DevTools clients including the ones that are no longer connectable."""
devtools_clients_map = _platform_backends_to_devtools_clients_maps.get(
platform_backend, {})
if not devtools_clients_map:
return []
return devtools_clients_map.values()
def GetActiveDevToolsClients(platform_backend):
"""Get DevTools clients that are still connectable."""
_RemoveStaleDevToolsClient(platform_backend)
return GetDevToolsClients(platform_backend)
| {
"content_hash": "41f8f7f358f7a467db48093b57ee14c8",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 36.61538461538461,
"alnum_prop": 0.7478991596638656,
"repo_name": "endlessm/chromium-browser",
"id": "d6565b1040297651d04b513c20eaf0d3b12f3a45",
"size": "1806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/catapult/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_devtools_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import sys
from setuptools import find_packages
from setuptools import setup
version = '1.32.0.dev0'
install_requires = [
'dnspython>=1.15.0',
'setuptools>=41.6.0',
]
if not os.environ.get('SNAP_BUILD'):
install_requires.extend([
# We specify the minimum acme and certbot version as the current plugin
# version for simplicity. See
# https://github.com/certbot/certbot/issues/8761 for more info.
f'acme>={version}',
f'certbot>={version}',
])
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Unset SNAP_BUILD when building wheels '
'to include certbot dependencies.')
if os.environ.get('SNAP_BUILD'):
install_requires.append('packaging')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='certbot-dns-rfc2136',
version=version,
description="RFC 2136 DNS Authenticator plugin for Certbot",
url='https://github.com/certbot/certbot',
author="Certbot Project",
author_email='certbot-dev@eff.org',
license='Apache License 2.0',
python_requires='>=3.7',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'dns-rfc2136 = certbot_dns_rfc2136._internal.dns_rfc2136:Authenticator',
],
},
)
| {
"content_hash": "24c5cd4baa2fe65170db0b628e7c7456",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 84,
"avg_line_length": 31.34246575342466,
"alnum_prop": 0.6140734265734266,
"repo_name": "lmcro/letsencrypt",
"id": "670654c4231e8fdf4eccc4a75a587557fb95163b",
"size": "2288",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "certbot-dns-rfc2136/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14147"
},
{
"name": "Augeas",
"bytes": "4731"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1225979"
},
{
"name": "Shell",
"bytes": "26934"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, division
from sklearn.preprocessing import LabelEncoder
from .base import _validate_X_y_ratio_classes
from ..utils import get_random_state, DEFAULT_SEED
from . import base
import numpy as np
__all__ = [
'over_sample_balance'
]
def over_sample_balance(X, y, balance_ratio=0.2, random_state=DEFAULT_SEED):
"""One strategy for balancing data is to over-sample the minority class until it is
represented at the prescribed ``balance_ratio``. While there is significant literature
to show that this is not the best technique, and can sometimes lead to over-fitting, there
are instances wherein it works quite well.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors as real numbers, where ``n_samples`` is the number of
samples and ``n_features`` is the number of input features.
y : array-like, shape (n_samples,)
Training labels as integers, where ``n_samples`` is the number of samples.
``n_samples`` should be equal to the ``n_samples`` in ``X``.
balance_ratio : float, optional (default=0.2)
The minimum acceptable ratio of $MINORITY_CLASS : $MAJORITY_CLASS representation,
where 0 < ``ratio`` <= 1
random_state : int or None, optional (default=None)
The seed to construct the random state to generate random selections.
"""
random_state = get_random_state(random_state).state
# validate before copying arrays around...
X, y, n_classes, present_classes, \
counts, majority_label, target_count = _validate_X_y_ratio_classes(X, y, balance_ratio)
# encode y, in case they are not numeric (we need them to be for np.ones)
le = LabelEncoder()
le.fit(present_classes)
y_transform = le.transform(y) # make numeric
# we'll vstack/concatenate to these
out_X, out_y = X.copy(), y_transform.copy()
# iterate the present classes
for label in present_classes:
if label == majority_label:
continue
# get the transformed label
label_transform = le.transform([label])[0]
while True:
# use the out_X, out_y copies. Since we're oversamping,
# it doesn't matter if we're drawing from the out_X matrix.
# also, this way we can better keep track of how many we've drawn.
mask = out_y == label_transform
n_req = target_count - mask.sum()
# terminal case
if n_req == 0:
break
# draw a sample, take first n_req:
idcs = np.arange(out_X.shape[0])[mask] # get the indices, mask them
sample = out_X[random_state.permutation(idcs), :][:n_req]
# vstack
out_X = np.vstack([out_X, sample])
# concatenate. Use sample length, since it might be < n_req
out_y = np.concatenate([out_y, np.ones(sample.shape[0], dtype=np.int16) * label_transform])
return out_X, le.inverse_transform(out_y)
| {
"content_hash": "6bdd3fcf33ef38aaa627fb3cff68dec8",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 103,
"avg_line_length": 38.51898734177215,
"alnum_prop": 0.6375287545185672,
"repo_name": "tgsmith61591/smrt",
"id": "48955dcc181792549b9b7a393125d6ac222a0d50",
"size": "3206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smrt/balance/over.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "115681"
},
{
"name": "Shell",
"bytes": "4133"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12); | {
"content_hash": "ddbc847bbc9de5e91280706b4378e4d7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 170,
"avg_line_length": 38.57142857142857,
"alnum_prop": 0.7111111111111111,
"repo_name": "antoinecarme/pyaf",
"id": "34c7e1908a013ffdfd59077a3215a56eaf342493",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Quantization/trend_MovingMedian/cycle_30/ar_12/test_artificial_32_Quantization_MovingMedian_30_12_0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.