hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8bcd980494f45374b1de2cc00e0bbf0dc26ca6bd | 18,357 | py | Python | kafkaesk/consumer.py | onna/kafkaesk | 31480c476a420faebc23cd763b4b039b9378c9f6 | [
"BSD-2-Clause"
] | 24 | 2020-05-18T08:46:18.000Z | 2021-11-01T12:51:54.000Z | kafkaesk/consumer.py | onna/kafkaesk | 31480c476a420faebc23cd763b4b039b9378c9f6 | [
"BSD-2-Clause"
] | 40 | 2020-05-18T20:50:46.000Z | 2021-08-24T17:28:29.000Z | kafkaesk/consumer.py | onna/kafkaesk | 31480c476a420faebc23cd763b4b039b9378c9f6 | [
"BSD-2-Clause"
] | 3 | 2020-09-01T12:08:04.000Z | 2021-01-05T10:11:22.000Z | from .exceptions import ConsumerUnhealthyException
from .exceptions import HandlerTaskCancelled
from .exceptions import StopConsumer
from .exceptions import UnhandledMessage
from .metrics import CONSUMED_MESSAGE_TIME
from .metrics import CONSUMED_MESSAGES
from .metrics import CONSUMED_MESSAGES_BATCH_SIZE
from .metrics import CONSUMER_HEALTH
from .metrics import CONSUMER_REBALANCED
from .metrics import CONSUMER_TOPIC_OFFSET
from .metrics import MESSAGE_LEAD_TIME
from .metrics import NOERROR
from kafka.structs import TopicPartition
import aiokafka
import asyncio
import fnmatch
import functools
import inspect
import logging
import opentracing
import orjson
import pydantic
import time
import typing
if typing.TYPE_CHECKING: # pragma: no cover
from .app import Application
else:
Application = None
logger = logging.getLogger(__name__)
def build_handler(
coro: typing.Callable, app: "Application", consumer: "BatchConsumer"
) -> typing.Callable:
"""Introspection on the coroutine signature to inject dependencies"""
sig = inspect.signature(coro)
param_name = [k for k in sig.parameters.keys()][0]
annotation = sig.parameters[param_name].annotation
handler = _raw_msg_handler
if annotation and annotation != sig.empty:
if annotation == bytes:
handler = _bytes_msg_handler # type: ignore
elif annotation == aiokafka.ConsumerRecord:
handler = _record_msg_handler # type: ignore
else:
handler = functools.partial(_pydantic_msg_handler, annotation) # type: ignore
it = iter(sig.parameters.items())
# first argument is required and its the payload
next(it)
kwargs: typing.Dict[str, typing.Any] = getattr(coro, "__extra_kwargs__", {})
for key, param in it:
if key == "schema":
kwargs["schema"] = None
elif key == "record":
kwargs["record"] = None
elif key == "app":
kwargs["app"] = app
elif key == "subscriber":
kwargs["subscriber"] = consumer
elif issubclass(param.annotation, opentracing.Span):
kwargs[key] = opentracing.Span
return inner
| 36.935614 | 100 | 0.611538 | from .exceptions import ConsumerUnhealthyException
from .exceptions import HandlerTaskCancelled
from .exceptions import StopConsumer
from .exceptions import UnhandledMessage
from .metrics import CONSUMED_MESSAGE_TIME
from .metrics import CONSUMED_MESSAGES
from .metrics import CONSUMED_MESSAGES_BATCH_SIZE
from .metrics import CONSUMER_HEALTH
from .metrics import CONSUMER_REBALANCED
from .metrics import CONSUMER_TOPIC_OFFSET
from .metrics import MESSAGE_LEAD_TIME
from .metrics import NOERROR
from kafka.structs import TopicPartition
import aiokafka
import asyncio
import fnmatch
import functools
import inspect
import logging
import opentracing
import orjson
import pydantic
import time
import typing
if typing.TYPE_CHECKING: # pragma: no cover
from .app import Application
else:
Application = None
logger = logging.getLogger(__name__)
class Subscription:
def __init__(
self,
consumer_id: str,
func: typing.Callable,
group: str,
*,
pattern: typing.Optional[str] = None,
topics: typing.Optional[typing.List[str]] = None,
timeout_seconds: float = None,
concurrency: int = None,
):
self.consumer_id = consumer_id
self.pattern = pattern
self.topics = topics
self.func = func
self.group = group
self.timeout = timeout_seconds
self.concurrency = concurrency
def __repr__(self) -> str:
return f"<Subscription stream: {self.consumer_id} >"
def _pydantic_msg_handler(
model: typing.Type[pydantic.BaseModel], record: aiokafka.ConsumerRecord
) -> pydantic.BaseModel:
try:
data: typing.Dict[str, typing.Any] = orjson.loads(record.value)
return model.parse_obj(data["data"])
except orjson.JSONDecodeError:
# log the execption so we can see what fields failed
logger.warning(f"Payload is not valid json: {record}", exc_info=True)
raise UnhandledMessage("Error deserializing json")
except pydantic.ValidationError:
# log the execption so we can see what fields failed
logger.warning(f"Error parsing pydantic model:{model} {record}", exc_info=True)
raise UnhandledMessage(f"Error parsing data: {model}")
except Exception:
# Catch all
logger.warning(f"Error parsing payload: {model} {record}", exc_info=True)
raise UnhandledMessage("Error parsing payload")
def _raw_msg_handler(record: aiokafka.structs.ConsumerRecord) -> typing.Dict[str, typing.Any]:
data: typing.Dict[str, typing.Any] = orjson.loads(record.value)
return data
def _bytes_msg_handler(record: aiokafka.structs.ConsumerRecord) -> bytes:
return record.value
def _record_msg_handler(record: aiokafka.structs.ConsumerRecord) -> aiokafka.structs.ConsumerRecord:
return record
def build_handler(
coro: typing.Callable, app: "Application", consumer: "BatchConsumer"
) -> typing.Callable:
"""Introspection on the coroutine signature to inject dependencies"""
sig = inspect.signature(coro)
param_name = [k for k in sig.parameters.keys()][0]
annotation = sig.parameters[param_name].annotation
handler = _raw_msg_handler
if annotation and annotation != sig.empty:
if annotation == bytes:
handler = _bytes_msg_handler # type: ignore
elif annotation == aiokafka.ConsumerRecord:
handler = _record_msg_handler # type: ignore
else:
handler = functools.partial(_pydantic_msg_handler, annotation) # type: ignore
it = iter(sig.parameters.items())
# first argument is required and its the payload
next(it)
kwargs: typing.Dict[str, typing.Any] = getattr(coro, "__extra_kwargs__", {})
for key, param in it:
if key == "schema":
kwargs["schema"] = None
elif key == "record":
kwargs["record"] = None
elif key == "app":
kwargs["app"] = app
elif key == "subscriber":
kwargs["subscriber"] = consumer
elif issubclass(param.annotation, opentracing.Span):
kwargs[key] = opentracing.Span
async def inner(record: aiokafka.ConsumerRecord, span: opentracing.Span) -> None:
data = handler(record)
deps = kwargs.copy()
for key, param in kwargs.items():
if key == "schema":
msg = orjson.loads(record.value)
deps["schema"] = msg["schema"]
elif key == "record":
deps["record"] = record
elif param == opentracing.Span:
deps[key] = span
await coro(data, **deps)
return inner
class BatchConsumer(aiokafka.ConsumerRebalanceListener):
_subscription: Subscription
_close: typing.Optional[asyncio.Future]
_consumer: aiokafka.AIOKafkaConsumer
_offsets: typing.Dict[aiokafka.TopicPartition, int]
_message_handler: typing.Callable
_initialized: bool
_running: bool = False
def __init__(
self,
subscription: Subscription,
app: "Application",
event_handlers: typing.Optional[typing.Dict[str, typing.List[typing.Callable]]] = None,
auto_commit: bool = True,
):
self._initialized = False
self.stream_id = subscription.consumer_id
self.group_id = subscription.group
self._coro = subscription.func
self._event_handlers = event_handlers or {}
self._concurrency = subscription.concurrency or 1
self._timeout = subscription.timeout
self._subscription = subscription
self._close = None
self._app = app
self._last_commit = 0
self._auto_commit = auto_commit
self._tp: typing.Dict[aiokafka.TopicPartition, int] = {}
# We accept either pattern or a list of topics, also we might accept a single topic
# to keep compatibility with older API
self.pattern = subscription.pattern
self.topics = subscription.topics
async def __call__(self) -> None:
if not self._initialized:
await self.initialize()
try:
while not self._close:
try:
if not self._consumer.assignment():
await asyncio.sleep(2)
continue
await self._consume()
except aiokafka.errors.KafkaConnectionError:
# We retry
self._health_metric(False)
logger.info(f"Consumer {self} kafka connection error, retrying...")
await asyncio.sleep(0.5)
except asyncio.CancelledError:
self._health_metric(False)
except StopConsumer:
self._health_metric(False)
logger.info(f"Consumer {self} stopped, exiting")
except BaseException as exc:
logger.exception(f"Consumer {self} failed. Finalizing.", exc_info=exc)
self._health_metric(False)
raise
finally:
await self.finalize()
def _health_metric(self, healthy: bool) -> None:
CONSUMER_HEALTH.labels(
group_id=self.group_id,
).set(healthy)
async def emit(self, name: str, *args: typing.Any, **kwargs: typing.Any) -> None:
for func in self._event_handlers.get(name, []):
try:
await func(*args, **kwargs)
except StopConsumer:
raise
except Exception:
logger.warning(f"Error emitting event: {name}: {func}", exc_info=True)
async def initialize(self) -> None:
self._close = None
self._running = True
self._processing = asyncio.Lock()
self._consumer = await self._consumer_factory()
await self._consumer.start()
self._message_handler = build_handler(self._coro, self._app, self) # type: ignore
self._initialized = True
async def finalize(self) -> None:
try:
await self._consumer.stop()
except Exception:
logger.info(f"[{self}] Could not commit on shutdown", exc_info=True)
self._initialized = False
self._running = False
if self._close:
self._close.set_result("done")
async def _consumer_factory(self) -> aiokafka.AIOKafkaConsumer:
consumer = self._app.consumer_factory(self.group_id)
if self.pattern and self.topics:
raise AssertionError(
"Both of the params 'pattern' and 'topics' are not allowed. Select only one mode."
) # noqa
if self.pattern:
# This is needed in case we have a prefix
topic_id = self._app.topic_mng.get_topic_id(self.pattern)
if "*" in self.pattern:
pattern = fnmatch.translate(topic_id)
consumer.subscribe(pattern=pattern, listener=self)
else:
consumer.subscribe(topics=[topic_id], listener=self)
elif self.topics:
topics = [self._app.topic_mng.get_topic_id(topic) for topic in self.topics]
consumer.subscribe(topics=topics, listener=self)
else:
raise ValueError("Either `topics` or `pattern` should be defined")
return consumer
async def stop(self) -> None:
if not self._running:
return
# Exit the loop, this will trigger finalize call
loop = asyncio.get_running_loop()
self._close = loop.create_future()
await asyncio.wait([self._close])
def __repr__(self) -> str:
return f"<Consumer: {self.stream_id}, Group: {self.group_id}>"
def _span(self, record: aiokafka.ConsumerRecord) -> opentracing.SpanContext:
tracer = opentracing.tracer
headers = {x[0]: x[1].decode() for x in record.headers or []}
parent = tracer.extract(opentracing.Format.TEXT_MAP, headers)
context = tracer.start_active_span(
record.topic,
tags={
"message_bus.destination": record.topic,
"message_bus.partition": record.partition,
"message_bus.group_id": self.group_id,
},
references=[opentracing.follows_from(parent)],
)
return context.span
async def _handler(self, record: aiokafka.ConsumerRecord) -> None:
with self._span(record) as span:
await self._message_handler(record, span)
async def _consume(self) -> None:
batch = await self._consumer.getmany(max_records=self._concurrency, timeout_ms=500)
async with self._processing:
if not batch:
await self._maybe_commit()
else:
await self._consume_batch(batch)
async def _consume_batch(
self, batch: typing.Dict[TopicPartition, typing.List[aiokafka.ConsumerRecord]]
) -> None:
futures: typing.Dict[asyncio.Future[typing.Any], aiokafka.ConsumerRecord] = dict()
for tp, records in batch.items():
for record in records:
coro = self._handler(record)
fut = asyncio.create_task(coro)
futures[fut] = record
# TODO: this metric is kept for backwards-compatibility, but should be revisited
with CONSUMED_MESSAGE_TIME.labels(
stream_id=self.stream_id,
partition=next(iter(batch)),
group_id=self.group_id,
).time():
done, pending = await asyncio.wait(
futures.keys(), timeout=self._timeout, return_when=asyncio.FIRST_EXCEPTION
)
# Look for failures
for task in done:
record = futures[task]
tp = aiokafka.TopicPartition(record.topic, record.partition)
# Get the largest offset of the batch
current_max = self._tp.get(tp)
if not current_max:
self._tp[tp] = record.offset + 1
else:
self._tp[tp] = max(record.offset + 1, current_max)
try:
if exc := task.exception():
self._count_message(record, error=exc.__class__.__name__)
await self.on_handler_failed(exc, record)
else:
self._count_message(record)
except asyncio.InvalidStateError:
# Task didnt finish yet, we shouldnt be here since we are
# iterating the `done` list, so just log something
logger.warning(f"Trying to get exception from unfinished task. Record: {record}")
except asyncio.CancelledError:
# During task execution any exception will be returned in
# the `done` list. But timeout exception should be captured
# independendly, thats why we handle this condition here.
self._count_message(record, error="cancelled")
await self.on_handler_failed(HandlerTaskCancelled(record), record)
# Process timeout tasks
for task in pending:
record = futures[task]
try:
# This will raise a `asyncio.CancelledError`, the consumer logic
# is responsible to catch it.
task.cancel()
await task
except asyncio.CancelledError:
# App didnt catch this exception, so we treat it as an unmanaged one.
await self.on_handler_timeout(record)
self._count_message(record, error="pending")
for tp, records in batch.items():
CONSUMED_MESSAGES_BATCH_SIZE.labels(
stream_id=tp.topic,
group_id=self.group_id,
partition=tp.partition,
).observe(len(records))
for record in sorted(records, key=lambda rec: rec.offset):
lead_time = time.time() - record.timestamp / 1000 # type: ignore
MESSAGE_LEAD_TIME.labels(
stream_id=record.topic,
group_id=self.group_id,
partition=record.partition,
).observe(lead_time)
CONSUMER_TOPIC_OFFSET.labels(
stream_id=record.topic,
group_id=self.group_id,
partition=record.partition,
).set(record.offset)
# Commit first and then call the event subscribers
await self._maybe_commit()
for _, records in batch.items():
for record in records:
await self.emit("message", record=record)
def _count_message(self, record: aiokafka.ConsumerRecord, error: str = NOERROR) -> None:
CONSUMED_MESSAGES.labels(
stream_id=record.topic,
error=error,
partition=record.partition,
group_id=self.group_id,
).inc()
@property
def consumer(self) -> aiokafka.AIOKafkaConsumer:
return self._consumer
async def _maybe_commit(self, forced: bool = False) -> None:
if not self._auto_commit:
return
if not self._consumer.assignment() or not self._tp:
logger.warning("Cannot commit because no partitions are assigned!")
return
interval = self._app.kafka_settings.get("auto_commit_interval_ms", 2000) / 1000
now = time.monotonic_ns()
if forced or (now > (self._last_commit + interval)):
try:
if self._tp:
await self._consumer.commit(offsets=self._tp)
except aiokafka.errors.CommitFailedError:
logger.warning("Error attempting to commit", exc_info=True)
self._last_commit = now
async def publish(
self,
stream_id: str,
record: aiokafka.ConsumerRecord,
headers: typing.Optional[typing.List[typing.Tuple[str, bytes]]] = None,
) -> None:
record_headers = (record.headers or []) + (headers or [])
fut = await self._app.raw_publish(
stream_id=stream_id, data=record.value, key=record.key, headers=record_headers
)
await fut
async def healthy(self) -> None:
if not self._running:
self._health_metric(False)
raise ConsumerUnhealthyException(f"Consumer '{self}' is not running")
if self._consumer is not None and not await self._consumer._client.ready(
self._consumer._coordinator.coordinator_id
):
self._health_metric(False)
raise ConsumerUnhealthyException(f"Consumer '{self}' is not ready")
self._health_metric(True)
return
# Event handlers
async def on_partitions_revoked(self, revoked: typing.List[aiokafka.TopicPartition]) -> None:
if revoked:
# Wait for the current batch to be processed
async with self._processing:
if self._auto_commit:
# And commit before releasing the partitions.
await self._maybe_commit(forced=True)
for tp in revoked:
# Remove the partition from the dict
self._tp.pop(tp, None)
CONSUMER_REBALANCED.labels(
partition=tp.partition,
group_id=self.group_id,
event="revoked",
).inc()
logger.info(f"Partitions revoked to {self}: {revoked}")
async def on_partitions_assigned(self, assigned: typing.List[aiokafka.TopicPartition]) -> None:
if assigned:
logger.info(f"Partitions assigned to {self}: {assigned}")
for tp in assigned:
position = await self._consumer.position(tp)
self._tp[tp] = position
CONSUMER_REBALANCED.labels(
partition=tp.partition,
group_id=self.group_id,
event="assigned",
).inc()
async def on_handler_timeout(self, record: aiokafka.ConsumerRecord) -> None:
raise HandlerTaskCancelled(record)
async def on_handler_failed(
self, exception: BaseException, record: aiokafka.ConsumerRecord
) -> None:
if isinstance(exception, UnhandledMessage):
logger.warning("Unhandled message, ignoring...", exc_info=exception)
else:
raise exception
| 15,040 | 923 | 218 |
91abba3d2adc0ce3668a924da98190df043dc094 | 3,661 | py | Python | public_data/migrations/0018_auto_20210926_2332.py | MTES-MCT/sparte | 3b8ae6d21da81ca761d64ae9dfe2c8f54487211c | [
"MIT"
] | null | null | null | public_data/migrations/0018_auto_20210926_2332.py | MTES-MCT/sparte | 3b8ae6d21da81ca761d64ae9dfe2c8f54487211c | [
"MIT"
] | 3 | 2022-02-10T11:47:58.000Z | 2022-02-23T18:50:24.000Z | public_data/migrations/0018_auto_20210926_2332.py | MTES-MCT/sparte | 3b8ae6d21da81ca761d64ae9dfe2c8f54487211c | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-09-26 23:32
from django.db import migrations, models
| 32.114035 | 85 | 0.523627 | # Generated by Django 3.2.5 on 2021-09-26 23:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("public_data", "0017_auto_20210924_2219"),
]
operations = [
migrations.RemoveField(
model_name="artificielle2018",
name="usage_label",
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="cs_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2015"
),
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="cs_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2018"
),
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="us_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2015"
),
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="us_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2018"
),
),
migrations.AddField(
model_name="enveloppeurbaine2018",
name="couverture_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé couverture du sol",
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="cs_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2015"
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="cs_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2018"
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="us_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2015"
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="us_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2018"
),
),
migrations.AddField(
model_name="voirie2018",
name="couverture_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé couverture du sol",
),
),
migrations.AddField(
model_name="zonesbaties2018",
name="couverture_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé couverture du sol",
),
),
migrations.AddField(
model_name="zonesbaties2018",
name="usage_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé usage du sol",
),
),
]
| 0 | 3,551 | 23 |
e0a318193742577aceb5a0d3b563ebd2374caeab | 11,063 | py | Python | chris_backend/plugins/tests/test_models.py | jdtzmn/ChRIS_ultron_backEnd | b6604617c74fa1ecfaca108915c1b61e7a43b55f | [
"MIT"
] | null | null | null | chris_backend/plugins/tests/test_models.py | jdtzmn/ChRIS_ultron_backEnd | b6604617c74fa1ecfaca108915c1b61e7a43b55f | [
"MIT"
] | null | null | null | chris_backend/plugins/tests/test_models.py | jdtzmn/ChRIS_ultron_backEnd | b6604617c74fa1ecfaca108915c1b61e7a43b55f | [
"MIT"
] | null | null | null |
import os, shutil
from unittest import mock
import swiftclient
from django.test import TestCase, tag
from django.contrib.auth.models import User
from django.conf import settings
from feeds.models import Feed, FeedFile
from plugins.models import Plugin, PluginParameter, PluginInstance
from plugins.models import ComputeResource
| 48.1 | 97 | 0.609057 |
import os, shutil
from unittest import mock
import swiftclient
from django.test import TestCase, tag
from django.contrib.auth.models import User
from django.conf import settings
from feeds.models import Feed, FeedFile
from plugins.models import Plugin, PluginParameter, PluginInstance
from plugins.models import ComputeResource
class PluginModelTests(TestCase):
def setUp(self):
self.plugin_fs_name = "simplefsapp"
self.plugin_fs_parameters = {'dir': {'type': 'string', 'optional': False}}
(self.compute_resource, tf) = ComputeResource.objects.get_or_create(
compute_resource_identifier="host")
# create a plugin
(plugin_fs, tf) = Plugin.objects.get_or_create(name=self.plugin_fs_name,
type='fs',
compute_resource=self.compute_resource)
# add plugins' parameters
PluginParameter.objects.get_or_create(
plugin=plugin_fs,
name='dir',
type=self.plugin_fs_parameters['dir']['type'],
optional=self.plugin_fs_parameters['dir']['optional'])
def test_get_plugin_parameter_names(self):
plugin = Plugin.objects.get(name=self.plugin_fs_name)
param_names = plugin.get_plugin_parameter_names()
self.assertEquals(param_names, ['dir'])
class PluginInstanceModelTests(TestCase):
def setUp(self):
self.plugin_fs_name = "simplefsapp"
self.plugin_fs_parameters = {'dir': {'type': 'string', 'optional': False}}
self.plugin_ds_name = "simpledsapp"
self.plugin_ds_parameters = {'prefix': {'type': 'string', 'optional': False}}
self.username = 'foo'
self.password = 'foo-pass'
(self.compute_resource, tf) = ComputeResource.objects.get_or_create(
compute_resource_identifier="host")
# create plugins
(plugin_fs, tf) = Plugin.objects.get_or_create(name=self.plugin_fs_name,
type='fs',
compute_resource=self.compute_resource)
(plugin_ds, tf) = Plugin.objects.get_or_create(name=self.plugin_ds_name,
type='ds',
compute_resource=self.compute_resource)
# add plugins' parameters
PluginParameter.objects.get_or_create(
plugin=plugin_fs,
name='dir',
type=self.plugin_fs_parameters['dir']['type'],
optional=self.plugin_fs_parameters['dir']['optional'])
PluginParameter.objects.get_or_create(
plugin=plugin_ds,
name='prefix',
type=self.plugin_ds_parameters['prefix']['type'],
optional=self.plugin_ds_parameters['prefix']['optional'])
# create user
User.objects.create_user(username=self.username,
password=self.password)
def test_save_creates_new_feed_just_after_fs_plugininstance_is_created(self):
"""
Test whether overriden save method creates a feed just after an 'fs' plugin
instance is created.
"""
# create an 'fs' plugin instance that in turn should create a new feed
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
pl_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
self.assertEquals(Feed.objects.count(), 1)
self.assertEquals(pl_inst.feed.name,pl_inst.plugin.name)
def test_save_do_not_create_new_feed_just_after_ds_plugininstance_is_created(self):
"""
Test whether overriden save method do not create a feed just after a 'ds' plugin
instance is created.
"""
# create a 'ds' plugin instance that shouldn't create a new feed
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_ds_name)
pl_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
self.assertEquals(Feed.objects.count(), 0)
def test_get_root_instance(self):
"""
Test whether custom get_root_instance method returns the root 'fs' plugin
instance for a give plugin instance.
"""
# create a 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
pl_inst_root = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
# create a 'ds' plugin instance whose root is the previous 'fs' plugin instance
plugin = Plugin.objects.get(name=self.plugin_ds_name)
pl_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
previous=pl_inst_root,compute_resource=plugin.compute_resource)
root_instance = pl_inst.get_root_instance()
self.assertEquals(root_instance, pl_inst_root)
def test_get_output_path(self):
"""
Test whether custom get_output_path method returns appropriate output paths
for both 'fs' and 'ds' plugins.
"""
# create an 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin_fs = Plugin.objects.get(name=self.plugin_fs_name)
pl_inst_fs = PluginInstance.objects.create(plugin=plugin_fs, owner=user,
compute_resource=plugin_fs.compute_resource)
# 'fs' plugins will output files to:
# SWIFT_CONTAINER_NAME/<username>/feed_<id>/plugin_name_plugin_inst_<id>/data
fs_output_path = '{0}/feed_{1}/{2}_{3}/data'.format( self.username,
pl_inst_fs.feed.id,
pl_inst_fs.plugin.name,
pl_inst_fs.id)
self.assertEquals(pl_inst_fs.get_output_path(), fs_output_path)
# create a 'ds' plugin instance
user = User.objects.get(username=self.username)
plugin_ds = Plugin.objects.get(name=self.plugin_ds_name)
pl_inst_ds = PluginInstance.objects.create(plugin=plugin_ds,
owner=user, previous=pl_inst_fs, compute_resource=plugin_ds.compute_resource)
# 'ds' plugins will output files to:
# SWIFT_CONTAINER_NAME/<username>/feed_<id>/...
#/previous_plugin_name_plugin_inst_<id>/plugin_name_plugin_inst_<id>/data
ds_output_path = os.path.join(os.path.dirname(fs_output_path),
'{0}_{1}/data'.format(pl_inst_ds.plugin.name,
pl_inst_ds.id))
self.assertEquals(pl_inst_ds.get_output_path(), ds_output_path)
def test_register_output_files(self):
"""
Test whether custom register_output_files method properly register a plugin's
output file with the REST API.
"""
# create an 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
pl_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
pl_inst.feed.name = 'Feed1'
pl_inst.feed.save()
output_path = pl_inst.get_output_path()
object_list = [{'name': output_path + '/file1.txt'}]
container_data = ['', object_list]
with mock.patch.object(swiftclient.Connection, '__init__',
return_value=None) as conn_init_mock:
with mock.patch.object(swiftclient.Connection, 'get_container',
return_value=container_data) as conn_get_container_mock:
pl_inst.register_output_files()
conn_init_mock.assert_called_with(user=settings.SWIFT_USERNAME,
key=settings.SWIFT_KEY,
authurl=settings.SWIFT_AUTH_URL,)
conn_get_container_mock.assert_called_with(settings.SWIFT_CONTAINER_NAME,
prefix=output_path, full_listing=True)
self.assertEquals(FeedFile.objects.count(), 1)
feedfile = FeedFile.objects.get(plugin_inst=pl_inst, feed=pl_inst.feed)
self.assertEquals(feedfile.fname.name, output_path + '/file1.txt')
@tag('integration')
def test_integration_register_output_files(self):
"""
Test whether custom register_output_files method properly register a plugin's
output file with the REST API.
"""
# create an 'fs' plugin instance
user = User.objects.get(username=self.username)
plugin = Plugin.objects.get(name=self.plugin_fs_name)
pl_inst = PluginInstance.objects.create(plugin=plugin, owner=user,
compute_resource=plugin.compute_resource)
pl_inst.feed.name = 'Feed1'
pl_inst.feed.save()
# initiate a Swift service connection
conn = swiftclient.Connection(
user=settings.SWIFT_USERNAME,
key=settings.SWIFT_KEY,
authurl=settings.SWIFT_AUTH_URL,
)
# create container in case it doesn't already exist
conn.put_container(settings.SWIFT_CONTAINER_NAME)
# create test directory where files are created
self.test_dir = settings.MEDIA_ROOT + '/test'
if not os.path.exists(self.test_dir):
os.makedirs(self.test_dir)
# create a test file
test_file = self.test_dir + '/file1.txt'
file = open(test_file, "w")
file.write("test file")
file.close()
# upload file to Swift storage
output_path = pl_inst.get_output_path()
with open(test_file, 'r') as file1:
conn.put_object(settings.SWIFT_CONTAINER_NAME, output_path + '/file1.txt',
contents=file1.read(),
content_type='text/plain')
# remove test directory
shutil.rmtree(self.test_dir, ignore_errors=True)
pl_inst.register_output_files()
self.assertEquals(FeedFile.objects.count(), 1)
feedfile = FeedFile.objects.get(plugin_inst=pl_inst, feed=pl_inst.feed)
self.assertEquals(feedfile.fname.name, output_path + '/file1.txt')
# delete file from Swift storage
conn.delete_object(settings.SWIFT_CONTAINER_NAME, output_path + '/file1.txt')
| 2,584 | 8,027 | 110 |
1047e9d75e176b88cd65293400cd5447cff92024 | 15,110 | py | Python | ebook_dl/scraper.py | glenlancer/ebook-dl | 9a8fa9e700e15727de64ecd4219dcb9c2a09c702 | [
"MIT"
] | null | null | null | ebook_dl/scraper.py | glenlancer/ebook-dl | 9a8fa9e700e15727de64ecd4219dcb9c2a09c702 | [
"MIT"
] | null | null | null | ebook_dl/scraper.py | glenlancer/ebook-dl | 9a8fa9e700e15727de64ecd4219dcb9c2a09c702 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- conding:utf-8 -*-
from . import config
from .thread_manager import ThreadManager
from .db import Db
from .db import BookInfo
import os
import re
import typer
import time
import requests
import logging
import rich
from urllib.parse import quote
from tomd import Tomd
from bs4 import BeautifulSoup
| 40.509383 | 131 | 0.646989 | #!/usr/bin/python3
# -*- conding:utf-8 -*-
from . import config
from .thread_manager import ThreadManager
from .db import Db
from .db import BookInfo
import os
import re
import typer
import time
import requests
import logging
import rich
from urllib.parse import quote
from tomd import Tomd
from bs4 import BeautifulSoup
class Scraper:
_MAIN_URL = 'https://itebooksfree.com/'
_URL_THAT_WONT_WORK = 'http://file.allitebooks.com'
_REQUEST_TIMEOUT = 15
_DEFAULT_OUTPUT_DIR = 'output'
_LOGGER = logging.getLogger(__name__)
def __init__(self):
self._search_key = config.get('keyword')
self.db = Db()
self._book_profile_page_urls = []
self._book_info_collection = []
self._book_url_collection = []
@staticmethod
def _construct_search_api(search_key='', page=None):
if search_key == '' and page is None:
return Scraper._MAIN_URL
elif search_key == '' and page:
return f'{Scraper._MAIN_URL}/page/{page}'
elif page is None:
return f'{Scraper._MAIN_URL}/search/{search_key}'
else:
return f'{Scraper._MAIN_URL}/search/{search_key}/{page}'
@staticmethod
def _get_page(url):
response = requests.get(
url,
headers=config.get('fake_headers'),
timeout=Scraper._REQUEST_TIMEOUT,
)
response.raise_for_status()
return response.text
@staticmethod
def _get_bs_obj(url, retry_count):
try:
page_text = Scraper._get_page(url)
return BeautifulSoup(page_text, 'html.parser')
except Exception as e:
Scraper._LOGGER.warning(f'{Scraper._get_bs_obj.__name__} exception: {e} for url: {url}')
retry_count += 1
# Sleep 1s to avoid stack overflow when sudden Internet
# disconnection occurs.
time.sleep(1)
Scraper._LOGGER.warning(f'Re-try {Scraper._get_bs_obj.__name__} {retry_count} times')
return Scraper._get_bs_obj(url, retry_count)
@staticmethod
def _start_get_bs_obj(url):
return Scraper._get_bs_obj(url, 0)
@staticmethod
def _get_pagination_count(bs_obj):
smaller_bs_obj = bs_obj.find('div', {'class': 'pagination'})
if smaller_bs_obj is None:
return 1
pagination_bs = smaller_bs_obj.find(
'span', {'class': 'text'}
)
if pagination_bs is None:
return 1
try:
prefix_text = '1 / '
suffix_text = ' Pages'
pagination_text = pagination_bs.get_text()
if pagination_text.startswith(prefix_text):
pagination_text = pagination_text[len(prefix_text):]
if pagination_text.endswith(suffix_text):
pagination_text = pagination_text[:-len(suffix_text)]
return int(pagination_text)
except Exception as e:
Scraper._LOGGER.warning(f'Pagination cast exception: {e}')
return 1
@staticmethod
def _retrieve_book_profile_page_urls_from_page(bs_obj):
urls = []
for entry_card_bs_obj in bs_obj.find_all('div', {'class': 'card-body'}):
link = entry_card_bs_obj.find('a')
if 'href' in link.attrs:
urls.append(link.attrs['href'])
return urls
@staticmethod
def _calculate_start_index_and_workload(index, url_workload, extra_workload):
if index < extra_workload:
workload = url_workload + 1
start_index = index * workload
else:
workload = url_workload
start_index = extra_workload * (workload + 1) + (index - extra_workload) * workload
return start_index, workload
@staticmethod
def _retrieve_profile_page_urls(page_index, search_key):
index_page_url = Scraper._construct_search_api(search_key, page_index)
page_bs = Scraper._start_get_bs_obj(index_page_url)
return Scraper._retrieve_book_profile_page_urls_from_page(page_bs)
@staticmethod
def _run_profile_page_urls(thread_pools, index, url_workload, extra_workload, search_key):
thread_profile_page_urls = []
start_index, workload = Scraper._calculate_start_index_and_workload(
index, url_workload, extra_workload
)
start_index += 2
for i in range(start_index, start_index + workload):
thread_profile_page_urls += Scraper._retrieve_profile_page_urls(i, search_key)
thread_pools[index] = thread_profile_page_urls
config.get('console').log(f'Thread {index} finished its job.')
@staticmethod
def _get_book_info_from_bs(book_bs):
bookInfo = BookInfo()
content_bs = book_bs.find('section', {'class': 'content'})
if not content_bs:
return None
title_bs = content_bs.find('h3', {'class': 'product-title'})
if title_bs:
bookInfo.title = title_bs.get_text()
if not bookInfo.title:
return None
details_bs = content_bs.find('div', {'class': 'details'})
if details_bs:
details_list_bs = details_bs.find('ul', {'class': 'list-unstyled'})
if details_list_bs:
bookInfo.details = details_list_bs.get_text().strip('\n')
body_bs_list = content_bs.find_all('div', {'class': 'body'})
if len(body_bs_list) == 6:
description_bs = body_bs_list[3]
bookInfo.description = Tomd(str(description_bs)).markdown.strip()
download_bs = content_bs.find('span', {'class': 'tn-download'})
if download_bs:
bookInfo.tn_url.append(download_bs.get('tn-url'))
return bookInfo
@staticmethod
def _get_resource_url_from_tn_url(tn_url):
url = Scraper._MAIN_URL + '/download/' + tn_url
try:
response = requests.get(
url,
headers=config.get('fake_headers'),
timeout=Scraper._REQUEST_TIMEOUT,
)
response.raise_for_status()
res_json = response.json()
if res_json['ok']:
return res_json['url']
return ''
except Exception as e:
Scraper._LOGGER.error(f'Get resource url failed: {e}')
return ''
@staticmethod
def _run_collect_book_info(thread_pools, index, url_workload, extra_workload, profile_urls):
thread_book_info_collection = []
start_index, workload = Scraper._calculate_start_index_and_workload(
index, url_workload, extra_workload
)
for url in profile_urls[start_index:start_index+workload]:
book_bs = Scraper._start_get_bs_obj(Scraper._MAIN_URL + url)
book_info = Scraper._get_book_info_from_bs(book_bs)
thread_book_info_collection.append(book_info)
thread_pools[index] = thread_book_info_collection
config.get('console').log(f'Thread {index} finished its job.')
@staticmethod
def _run_collect_book_url(thread_pools, index, url_workload, extra_workload, book_urls):
thread_book_url_collection = []
start_index, workload = Scraper._calculate_start_index_and_workload(
index, url_workload, extra_workload
)
for book_url in book_urls[start_index:start_index+workload]:
book_url.resource_url = Scraper._get_resource_url_from_tn_url(book_url.tn_url)
thread_book_url_collection.append(book_url)
thread_pools[index] = thread_book_url_collection
config.get('console').log(f'Thread {index} finished its job.')
@staticmethod
def _run_download_all(index, url_workload, extra_workload, book_urls):
start_index, workload = Scraper._calculate_start_index_and_workload(
index, url_workload, extra_workload
)
for book_url in book_urls[start_index:start_index+workload]:
if book_url.resource_url.startswith(Scraper._URL_THAT_WONT_WORK):
continue
if book_url.resource_url.startswith('/'):
resource_url = Scraper._MAIN_URL + book_url.resource_url[1:]
filename_regex = re.compile('^.+/(.+)$')
date_code_regex = re.compile('^.+/(.+)/.*$')
date_code_2_regex = re.compile('^.+/(.*)//.*$')
prefix_regex = re.compile('^(.+)/.*$')
filename = filename_regex.findall(resource_url)[0]
date_code = date_code_regex.findall(resource_url)[0]
if date_code == '':
date_code = date_code_2_regex.findall(resource_url)[0]
quoted_resource_url = ''.join([
prefix_regex.findall(resource_url)[0],
'/',
quote(filename)
])
file_path = ''.join([
Scraper._DEFAULT_OUTPUT_DIR,
'/',
'"',
date_code,
' ',
filename,
'"',
])
Scraper._download_from_url_and_save(quoted_resource_url, file_path, filename)
config.get('console').log(f'Thread {index} finished its job.')
@staticmethod
def _download_from_url_and_save(url, path, filename):
download_command = 'axel %s --output=%s' % (url, path)
result = os.system('xterm -e %s' % download_command)
if result == 0:
return
rich.print(f'Download {filename} failed, now re-start its downloading...')
Scraper.__download_from_url_and_save(url, path, filename)
def _retrieve_book_profile_page_urls_from_other_page(self, page_count):
thread_manager = ThreadManager()
thread_manager.thread_job_distribution(page_count, ThreadManager.THREAD_PROFILE_PAGE_JOB)
thread_manager.thread_job_preparation(
Scraper._run_profile_page_urls,
ThreadManager.THREAD_PROFILE_PAGE_JOB,
self._search_key
)
thread_manager.thread_job_handling(self._book_profile_page_urls)
def get_all_book_profile_page_urls(self):
search_api = Scraper._construct_search_api(self._search_key)
main_bs = Scraper._start_get_bs_obj(search_api)
page_count = Scraper._get_pagination_count(main_bs)
styled_page_count = typer.style(str(page_count), fg=typer.colors.MAGENTA, bold=True)
typer.echo(f'There are {styled_page_count} page(s) in total.')
with config.get('console').status('[bold green]retrieving book profile page urls...') as status:
self._book_profile_page_urls = Scraper._retrieve_book_profile_page_urls_from_page(main_bs)
if page_count > 1:
self._retrieve_book_profile_page_urls_from_other_page(page_count - 1)
styled_book_count = typer.style(str(len(self._book_profile_page_urls)), fg=typer.colors.MAGENTA, bold=True)
typer.echo(f'There are {styled_book_count} book urls in total.')
self.db.store_profile_page_urls(self._book_profile_page_urls)
typer.echo('Done.')
def _profile_urls_status(self):
filename_pattern = re.compile('^/book/(.*)/[0-9]*$')
unique_book_names = []
for url in self._book_profile_page_urls:
match = re.fullmatch(filename_pattern, url)
if match:
unique_book_names.append(match.group(1))
table = rich.table.Table(show_header=True, header_style='magenta')
table.add_column('Item', style='dim')
table.add_column('Count', width=12)
table.add_row('Profile url count', str(len(self._book_profile_page_urls)))
table.add_row('Unique book names from urls', str(len(set(unique_book_names))))
config.get('console').print(table)
rich.print()
def _collect_book_info_from_profile_pages(self):
thread_manager = ThreadManager()
thread_manager.thread_job_distribution(len(self._book_profile_page_urls), ThreadManager.THREAD_RETRIEVE_RESOURCE_JOB)
thread_manager.thread_job_preparation(
Scraper._run_collect_book_info,
ThreadManager.THREAD_RETRIEVE_RESOURCE_JOB,
self._book_profile_page_urls
)
self._book_info_collection = []
thread_manager.thread_job_handling(self._book_info_collection)
self._book_info_collection = list(
filter(lambda x: x is not None, self._book_info_collection)
)
self.db.store_book_info_collection(self._book_info_collection)
def _collect_resource_urls_from_tn_urls(self):
thread_manager = ThreadManager()
thread_manager.thread_job_distribution(len(self._book_url_collection), ThreadManager.THREAD_COLLECT_RESOURCE_URL_JOB)
thread_manager.thread_job_preparation(
Scraper._run_collect_book_url,
ThreadManager.THREAD_COLLECT_RESOURCE_URL_JOB,
self._book_url_collection
)
self._book_url_collection = []
thread_manager.thread_job_handling(self._book_url_collection)
self.db.store_book_url_collection(self._book_url_collection)
def _download_all_books(self):
if not os.path.exists(self._DEFAULT_OUTPUT_DIR):
os.mkdir(self._DEFAULT_OUTPUT_DIR, 0o775)
thread_manager = ThreadManager()
thread_manager.thread_job_distribution(len(self._book_url_collection), ThreadManager.THREAD_DOWNLOAD_JOB)
thread_manager.thread_job_preparation(
Scraper._run_download_all,
ThreadManager.THREAD_DOWNLOAD_JOB,
self._book_url_collection
)
thread_manager.thread_job_handling()
def collect_book_info(self):
self._book_profile_page_urls = self.db.select_all_profile_urls()
self._profile_urls_status()
if self._book_profile_page_urls == []:
rich.print('There is no record in [bold]profile[/bold] table, probably need to run [bold]search[/bold] command first.')
rich.print(':monkey: :pile_of_poo:')
return
with config.get('console').status('[bold green]collecting book info from profile pages...') as status:
self._collect_book_info_from_profile_pages()
def collect_all_resource_urls(self):
self._book_url_collection = self.db.select_all_book_urls()
if self._book_url_collection == []:
rich.print(':monkey: :pile_of_poo: It looks like nothing needs to be done.')
with config.get('console').status('[bold green]collecting resource urls from tn_urls...') as status:
self._collect_resource_urls_from_tn_urls()
def download_all_books(self):
self._book_url_collection = self.db.select_all_book_urls(use_resource_url=True)
if self._book_url_collection == []:
rich.print(':monkey: :pile_of_poo: It looks like nothing needs to be done.')
with config.get('console').status('[bold green]downloading all ebooks...') as status:
self._download_all_books() | 13,593 | 1,171 | 23 |
72a32d9f04ab8ab4ced9414b1f448ddca45e1b85 | 108 | py | Python | vvsa/abstracts/economic_indicator.py | goncalovf/security-analysis | 72b80ea7c0c5c93b6fd80a4e347ecdb401b7667e | [
"MIT"
] | 1 | 2021-09-16T13:36:13.000Z | 2021-09-16T13:36:13.000Z | vvsa/abstracts/economic_indicator.py | goncalovf/security-analysis | 72b80ea7c0c5c93b6fd80a4e347ecdb401b7667e | [
"MIT"
] | null | null | null | vvsa/abstracts/economic_indicator.py | goncalovf/security-analysis | 72b80ea7c0c5c93b6fd80a4e347ecdb401b7667e | [
"MIT"
] | null | null | null | from abc import ABC
| 15.428571 | 30 | 0.675926 | from abc import ABC
class Economic_Indicator(ABC):
def __init__(self, name):
self.name = name
| 29 | 9 | 49 |
d2c0be59550e7842db850111f2cf4186a92e7a8f | 87 | py | Python | py_proj/__init__.py | maryno-net/py-project | a8db26494087a9a290553ba0bfaa692c9000dde1 | [
"MIT"
] | null | null | null | py_proj/__init__.py | maryno-net/py-project | a8db26494087a9a290553ba0bfaa692c9000dde1 | [
"MIT"
] | null | null | null | py_proj/__init__.py | maryno-net/py-project | a8db26494087a9a290553ba0bfaa692c9000dde1 | [
"MIT"
] | null | null | null | """
py_proj
~~~~~~~
Пустой проект на питоне.
"""
__version__ = '0.0.0'
| 7.909091 | 28 | 0.471264 | """
py_proj
~~~~~~~
Пустой проект на питоне.
"""
__version__ = '0.0.0'
| 0 | 0 | 0 |
6f95a6f47ed3c2841f65cdd72753895f142c67aa | 7,151 | py | Python | tests/test_pybbcsig.py | beyond-blockchain/libbbcsig | 865e7069c682f9fdcea01059171d618bd9dd501e | [
"Apache-2.0"
] | null | null | null | tests/test_pybbcsig.py | beyond-blockchain/libbbcsig | 865e7069c682f9fdcea01059171d618bd9dd501e | [
"Apache-2.0"
] | null | null | null | tests/test_pybbcsig.py | beyond-blockchain/libbbcsig | 865e7069c682f9fdcea01059171d618bd9dd501e | [
"Apache-2.0"
] | 1 | 2021-08-04T10:00:43.000Z | 2021-08-04T10:00:43.000Z | import os
#import unittest
import pybbcsig
import binascii
def _is_windows():
"""
"""
return os.name == "nt"
sig = pybbcsig.PyBBcSigSecp256k1()
in_privkey = b'\xd6Y\xbc#I\xfe\xed\x00\xe1x\xaa\xb4V\xd0\x9c\x01\xe2\x9a\xfd\xd2a\xabf\xcb\x14\xacM\x8e\xca2=\xbb'
in_pubkey = b'\x04\x0fd(\xdd\x8fR\xf7@\x86\xe7\x04\x06\xc3K\xecu\xd9\xfe\xe9de\x95\x8c\x16\x0esJ\xe8\x12Q`\xad).\xbd\xfb\x1c\x80\x96p\x12\xb5o\xfdr;\xd8\xa6`\xec\x85i\xad\x14\xceks8\x17&\x7f\xee\xd0\xc1'
in_pubkey_compressed = b'\x03\x0fd(\xdd\x8fR\xf7@\x86\xe7\x04\x06\xc3K\xecu\xd9\xfe\xe9de\x95\x8c\x16\x0esJ\xe8\x12Q`\xad'
in_test_digest = binascii.a2b_hex("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
in_signature_nt = b'Dz\xe5\x84$\xf5_\x19\x9d\xda\x83\x00(O\x91\xec\x18MP\xbd\xef\xa2\x9b\x96\xb3\x9d\xea\xb5\xf9\x93;\xa4\xafB\xe9:\x9e\xa5G\xc79\xe1\xb7\xfaS)\xfd\x82\x0e\xa7\x13|\xe6\xc8\xebX\x91[\x8a$\xe3\xf0\xf4\r'
in_signature_posix = b'B)\n@\xe666\xb310z5\x8c\x99\x06u\xe7\xa9}\\\xdc\xa5\x93\x8a\xc6\xb2by\xe1`L\xe8\x95\x18\xd3?\x1f\x1d\x81\x96\xd6\x01\x96\xe2\x80y\x0fz3\xde\xd8\x18\xbd\xbc\xce\xc2\xf6\xdf\xde\x8c\xdd\xb8\xb0\xd5'
if _is_windows():
in_signature = in_signature_nt
else:
in_signature = in_signature_posix
in_der_nt = b'0\x82\x01\x13\x02\x01\x01\x04 \xd6Y\xbc#I\xfe\xed\x00\xe1x\xaa\xb4V\xd0\x9c\x01\xe2\x9a\xfd\xd2a\xabf\xcb\x14\xacM\x8e\xca2=\xbb\xa0\x81\xa50\x81\xa2\x02\x01\x010,\x06\x07*\x86H\xce=\x01\x01\x02!\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xfc/0\x06\x04\x01\x00\x04\x01\x07\x04A\x04y\xbef~\xf9\xdc\xbb\xacU\xa0b\x95\xce\x87\x0b\x07\x02\x9b\xfc\xdb-\xce(\xd9Y\xf2\x81[\x16\xf8\x17\x98H:\xdaw&\xa3\xc4e]\xa4\xfb\xfc\x0e\x11\x08\xa8\xfd\x17\xb4H\xa6\x85T\x19\x9cG\xd0\x8f\xfb\x10\xd4\xb8\x02!\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xba\xae\xdc\xe6\xafH\xa0;\xbf\xd2^\x8c\xd06AA\x02\x01\x01\xa1D\x03B\x00\x04\x0fd(\xdd\x8fR\xf7@\x86\xe7\x04\x06\xc3K\xecu\xd9\xfe\xe9de\x95\x8c\x16\x0esJ\xe8\x12Q`\xad).\xbd\xfb\x1c\x80\x96p\x12\xb5o\xfdr;\xd8\xa6`\xec\x85i\xad\x14\xceks8\x17&\x7f\xee\xd0\xc1'
in_der_posix = b'0t\x02\x01\x01\x04 \xd6Y\xbc#I\xfe\xed\x00\xe1x\xaa\xb4V\xd0\x9c\x01\xe2\x9a\xfd\xd2a\xabf\xcb\x14\xacM\x8e\xca2=\xbb\xa0\x07\x06\x05+\x81\x04\x00\n\xa1D\x03B\x00\x04\x0fd(\xdd\x8fR\xf7@\x86\xe7\x04\x06\xc3K\xecu\xd9\xfe\xe9de\x95\x8c\x16\x0esJ\xe8\x12Q`\xad).\xbd\xfb\x1c\x80\x96p\x12\xb5o\xfdr;\xd8\xa6`\xec\x85i\xad\x14\xceks8\x17&\x7f\xee\xd0\xc1'
if _is_windows():
in_der = in_der_nt
else:
in_der = in_der_posix
in_pem_nt = b'-----BEGIN EC PRIVATE KEY-----\nMIIBEwIBAQQg1lm8I0n+7QDheKq0VtCcAeKa/dJhq2bLFKxNjsoyPbuggaUwgaIC\nAQEwLAYHKoZIzj0BAQIhAP////////////////////////////////////7///wv\nMAYEAQAEAQcEQQR5vmZ++dy7rFWgYpXOhwsHApv82y3OKNlZ8oFbFvgXmEg62ncm\no8RlXaT7/A4RCKj9F7RIpoVUGZxH0I/7ENS4AiEA/////////////////////rqu\n3OavSKA7v9JejNA2QUECAQGhRANCAAQPZCjdj1L3QIbnBAbDS+x12f7pZGWVjBYO\nc0roElFgrSkuvfscgJZwErVv/XI72KZg7IVprRTOa3M4FyZ/7tDB\n-----END EC PRIVATE KEY-----\n\x00'
in_pem_posix = b'-----BEGIN EC PRIVATE KEY-----\nMHQCAQEEINZZvCNJ/u0A4XiqtFbQnAHimv3SYatmyxSsTY7KMj27oAcGBSuBBAAK\noUQDQgAED2Qo3Y9S90CG5wQGw0vsddn+6WRllYwWDnNK6BJRYK0pLr37HICWcBK1\nb/1yO9imYOyFaa0UzmtzOBcmf+7QwQ==\n-----END EC PRIVATE KEY-----\n\x00'
if _is_windows():
in_pem = in_pem_nt
else:
in_pem = in_pem_posix
def test_keypair():
"""
"""
pubkey, privkey = sig.generate_keypair(0)
# print("pubkey = {}".format(pubkey))
# print("privkey = {}".format(privkey))
assert (len(privkey) == 32)
assert (len(pubkey) == 65)
def test_sign():
"""
"""
assert (len(in_privkey) == 32)
signature = sig.sign(in_privkey, in_test_digest) # 毎回変わる
# print("signature = {}".format(signature))
assert (len(signature) == 64)
def test_der():
"""
"""
assert (len(in_privkey) == 32)
der = sig.output_der(in_privkey)
# print("der = {}".format(der))
# print("len(der) = {}".format(len(der)))
if _is_windows():
assert (len(der) == 279)
else:
assert (len(der) == 118)
assert (der == in_der)
def test_from_der():
"""
"""
pubkey, privkey = sig.convert_from_der(in_der, 0)
# print("pubkey = {}".format(pubkey))
# print("privkey = {}".format(privkey))
assert (len(privkey) == 32)
assert (len(pubkey) == 65)
assert (privkey == in_privkey)
assert (pubkey == in_pubkey)
def test_from_der2():
"""
"""
if _is_windows():
der = in_der_posix
else:
der = in_der_nt
pubkey, privkey = sig.convert_from_der(der, 0)
# print("pubkey = {}".format(pubkey))
# print("privkey = {}".format(privkey))
assert (len(privkey) == 32)
assert (len(pubkey) == 65)
assert (privkey == in_privkey)
assert (pubkey == in_pubkey)
def test_pubkey_compressed():
"""
"""
assert (len(in_privkey) == 32)
pubkey = sig.get_public_key_compressed(in_privkey)
# print("pubkey = {}".format(pubkey))
# print("len(pubkey) = {}".format(len(pubkey)))
assert (len(pubkey) == 33)
assert (pubkey == in_pubkey_compressed)
def test_pubkey_uncompressed():
"""
"""
assert (len(in_privkey) == 32)
pubkey = sig.get_public_key_uncompressed(in_privkey)
# print("pubkey = {}".format(pubkey))
# print("len(pubkey) = {}".format(len(pubkey)))
assert (len(pubkey) == 65)
assert (pubkey == in_pubkey)
def test_verify():
"""
"""
# print("len(in_signature) = {}".format(len(in_signature)))
assert (len(in_signature) == 64)
ret = sig.verify(in_pubkey, in_test_digest, in_signature)
# print("ret = {}".format(ret))
assert (ret > 0)
assert (ret == 1)
def test_verify2():
"""
"""
if _is_windows():
signature = in_signature_posix
else:
signature = in_signature_nt
# print("len(signature) = {}".format(len(signature)))
assert (len(signature) == 64)
ret = sig.verify(in_pubkey, in_test_digest, signature)
# print("ret = {}".format(ret))
assert (ret > 0)
assert (ret == 1)
def test_pem():
"""
"""
assert (len(in_privkey) == 32)
pem = sig.output_pem(in_privkey)
# print("pem = {}".format(pem))
# print("len(pem) = {}".format(len(pem)))
if _is_windows():
assert (len(pem) == (438 + 1))
else:
assert(len(pem) == (223 + 1))
assert (pem == in_pem)
def test_from_pem():
"""
"""
pubkey, privkey = sig.convert_from_pem(in_pem, 0)
# print("pubkey = {}".format(pubkey))
# print("privkey = {}".format(privkey))
assert (len(privkey) == 32)
assert (len(pubkey) == 65)
assert (privkey == in_privkey)
assert(pubkey == in_pubkey)
def test_from_pem2():
"""
"""
if _is_windows():
pem = in_pem_posix
else:
pem = in_pem_nt
pubkey, privkey = sig.convert_from_pem(pem, 0)
# print("pubkey = {}".format(pubkey))
# print("privkey = {}".format(privkey))
assert (len(privkey) == 32)
assert (len(pubkey) == 65)
assert (privkey == in_privkey)
assert (pubkey == in_pubkey)
| 35.053922 | 912 | 0.654734 | import os
#import unittest
import pybbcsig
import binascii
def _is_windows():
"""
"""
return os.name == "nt"
sig = pybbcsig.PyBBcSigSecp256k1()
in_privkey = b'\xd6Y\xbc#I\xfe\xed\x00\xe1x\xaa\xb4V\xd0\x9c\x01\xe2\x9a\xfd\xd2a\xabf\xcb\x14\xacM\x8e\xca2=\xbb'
in_pubkey = b'\x04\x0fd(\xdd\x8fR\xf7@\x86\xe7\x04\x06\xc3K\xecu\xd9\xfe\xe9de\x95\x8c\x16\x0esJ\xe8\x12Q`\xad).\xbd\xfb\x1c\x80\x96p\x12\xb5o\xfdr;\xd8\xa6`\xec\x85i\xad\x14\xceks8\x17&\x7f\xee\xd0\xc1'
in_pubkey_compressed = b'\x03\x0fd(\xdd\x8fR\xf7@\x86\xe7\x04\x06\xc3K\xecu\xd9\xfe\xe9de\x95\x8c\x16\x0esJ\xe8\x12Q`\xad'
in_test_digest = binascii.a2b_hex("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
in_signature_nt = b'Dz\xe5\x84$\xf5_\x19\x9d\xda\x83\x00(O\x91\xec\x18MP\xbd\xef\xa2\x9b\x96\xb3\x9d\xea\xb5\xf9\x93;\xa4\xafB\xe9:\x9e\xa5G\xc79\xe1\xb7\xfaS)\xfd\x82\x0e\xa7\x13|\xe6\xc8\xebX\x91[\x8a$\xe3\xf0\xf4\r'
in_signature_posix = b'B)\n@\xe666\xb310z5\x8c\x99\x06u\xe7\xa9}\\\xdc\xa5\x93\x8a\xc6\xb2by\xe1`L\xe8\x95\x18\xd3?\x1f\x1d\x81\x96\xd6\x01\x96\xe2\x80y\x0fz3\xde\xd8\x18\xbd\xbc\xce\xc2\xf6\xdf\xde\x8c\xdd\xb8\xb0\xd5'
if _is_windows():
in_signature = in_signature_nt
else:
in_signature = in_signature_posix
in_der_nt = b'0\x82\x01\x13\x02\x01\x01\x04 \xd6Y\xbc#I\xfe\xed\x00\xe1x\xaa\xb4V\xd0\x9c\x01\xe2\x9a\xfd\xd2a\xabf\xcb\x14\xacM\x8e\xca2=\xbb\xa0\x81\xa50\x81\xa2\x02\x01\x010,\x06\x07*\x86H\xce=\x01\x01\x02!\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xfc/0\x06\x04\x01\x00\x04\x01\x07\x04A\x04y\xbef~\xf9\xdc\xbb\xacU\xa0b\x95\xce\x87\x0b\x07\x02\x9b\xfc\xdb-\xce(\xd9Y\xf2\x81[\x16\xf8\x17\x98H:\xdaw&\xa3\xc4e]\xa4\xfb\xfc\x0e\x11\x08\xa8\xfd\x17\xb4H\xa6\x85T\x19\x9cG\xd0\x8f\xfb\x10\xd4\xb8\x02!\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xba\xae\xdc\xe6\xafH\xa0;\xbf\xd2^\x8c\xd06AA\x02\x01\x01\xa1D\x03B\x00\x04\x0fd(\xdd\x8fR\xf7@\x86\xe7\x04\x06\xc3K\xecu\xd9\xfe\xe9de\x95\x8c\x16\x0esJ\xe8\x12Q`\xad).\xbd\xfb\x1c\x80\x96p\x12\xb5o\xfdr;\xd8\xa6`\xec\x85i\xad\x14\xceks8\x17&\x7f\xee\xd0\xc1'
in_der_posix = b'0t\x02\x01\x01\x04 \xd6Y\xbc#I\xfe\xed\x00\xe1x\xaa\xb4V\xd0\x9c\x01\xe2\x9a\xfd\xd2a\xabf\xcb\x14\xacM\x8e\xca2=\xbb\xa0\x07\x06\x05+\x81\x04\x00\n\xa1D\x03B\x00\x04\x0fd(\xdd\x8fR\xf7@\x86\xe7\x04\x06\xc3K\xecu\xd9\xfe\xe9de\x95\x8c\x16\x0esJ\xe8\x12Q`\xad).\xbd\xfb\x1c\x80\x96p\x12\xb5o\xfdr;\xd8\xa6`\xec\x85i\xad\x14\xceks8\x17&\x7f\xee\xd0\xc1'
if _is_windows():
in_der = in_der_nt
else:
in_der = in_der_posix
in_pem_nt = b'-----BEGIN EC PRIVATE KEY-----\nMIIBEwIBAQQg1lm8I0n+7QDheKq0VtCcAeKa/dJhq2bLFKxNjsoyPbuggaUwgaIC\nAQEwLAYHKoZIzj0BAQIhAP////////////////////////////////////7///wv\nMAYEAQAEAQcEQQR5vmZ++dy7rFWgYpXOhwsHApv82y3OKNlZ8oFbFvgXmEg62ncm\no8RlXaT7/A4RCKj9F7RIpoVUGZxH0I/7ENS4AiEA/////////////////////rqu\n3OavSKA7v9JejNA2QUECAQGhRANCAAQPZCjdj1L3QIbnBAbDS+x12f7pZGWVjBYO\nc0roElFgrSkuvfscgJZwErVv/XI72KZg7IVprRTOa3M4FyZ/7tDB\n-----END EC PRIVATE KEY-----\n\x00'
in_pem_posix = b'-----BEGIN EC PRIVATE KEY-----\nMHQCAQEEINZZvCNJ/u0A4XiqtFbQnAHimv3SYatmyxSsTY7KMj27oAcGBSuBBAAK\noUQDQgAED2Qo3Y9S90CG5wQGw0vsddn+6WRllYwWDnNK6BJRYK0pLr37HICWcBK1\nb/1yO9imYOyFaa0UzmtzOBcmf+7QwQ==\n-----END EC PRIVATE KEY-----\n\x00'
if _is_windows():
in_pem = in_pem_nt
else:
in_pem = in_pem_posix
def test_keypair():
"""
"""
pubkey, privkey = sig.generate_keypair(0)
# print("pubkey = {}".format(pubkey))
# print("privkey = {}".format(privkey))
assert (len(privkey) == 32)
assert (len(pubkey) == 65)
def test_sign():
"""
"""
assert (len(in_privkey) == 32)
signature = sig.sign(in_privkey, in_test_digest) # 毎回変わる
# print("signature = {}".format(signature))
assert (len(signature) == 64)
def test_der():
"""
"""
assert (len(in_privkey) == 32)
der = sig.output_der(in_privkey)
# print("der = {}".format(der))
# print("len(der) = {}".format(len(der)))
if _is_windows():
assert (len(der) == 279)
else:
assert (len(der) == 118)
assert (der == in_der)
def test_from_der():
"""
"""
pubkey, privkey = sig.convert_from_der(in_der, 0)
# print("pubkey = {}".format(pubkey))
# print("privkey = {}".format(privkey))
assert (len(privkey) == 32)
assert (len(pubkey) == 65)
assert (privkey == in_privkey)
assert (pubkey == in_pubkey)
def test_from_der2():
"""
"""
if _is_windows():
der = in_der_posix
else:
der = in_der_nt
pubkey, privkey = sig.convert_from_der(der, 0)
# print("pubkey = {}".format(pubkey))
# print("privkey = {}".format(privkey))
assert (len(privkey) == 32)
assert (len(pubkey) == 65)
assert (privkey == in_privkey)
assert (pubkey == in_pubkey)
def test_pubkey_compressed():
"""
"""
assert (len(in_privkey) == 32)
pubkey = sig.get_public_key_compressed(in_privkey)
# print("pubkey = {}".format(pubkey))
# print("len(pubkey) = {}".format(len(pubkey)))
assert (len(pubkey) == 33)
assert (pubkey == in_pubkey_compressed)
def test_pubkey_uncompressed():
"""
"""
assert (len(in_privkey) == 32)
pubkey = sig.get_public_key_uncompressed(in_privkey)
# print("pubkey = {}".format(pubkey))
# print("len(pubkey) = {}".format(len(pubkey)))
assert (len(pubkey) == 65)
assert (pubkey == in_pubkey)
def test_verify():
"""
"""
# print("len(in_signature) = {}".format(len(in_signature)))
assert (len(in_signature) == 64)
ret = sig.verify(in_pubkey, in_test_digest, in_signature)
# print("ret = {}".format(ret))
assert (ret > 0)
assert (ret == 1)
def test_verify2():
"""
"""
if _is_windows():
signature = in_signature_posix
else:
signature = in_signature_nt
# print("len(signature) = {}".format(len(signature)))
assert (len(signature) == 64)
ret = sig.verify(in_pubkey, in_test_digest, signature)
# print("ret = {}".format(ret))
assert (ret > 0)
assert (ret == 1)
def test_pem():
"""
"""
assert (len(in_privkey) == 32)
pem = sig.output_pem(in_privkey)
# print("pem = {}".format(pem))
# print("len(pem) = {}".format(len(pem)))
if _is_windows():
assert (len(pem) == (438 + 1))
else:
assert(len(pem) == (223 + 1))
assert (pem == in_pem)
def test_from_pem():
"""
"""
pubkey, privkey = sig.convert_from_pem(in_pem, 0)
# print("pubkey = {}".format(pubkey))
# print("privkey = {}".format(privkey))
assert (len(privkey) == 32)
assert (len(pubkey) == 65)
assert (privkey == in_privkey)
assert(pubkey == in_pubkey)
def test_from_pem2():
"""
"""
if _is_windows():
pem = in_pem_posix
else:
pem = in_pem_nt
pubkey, privkey = sig.convert_from_pem(pem, 0)
# print("pubkey = {}".format(pubkey))
# print("privkey = {}".format(privkey))
assert (len(privkey) == 32)
assert (len(pubkey) == 65)
assert (privkey == in_privkey)
assert (pubkey == in_pubkey)
| 0 | 0 | 0 |
9e046cf6fd37e43d4432e2c93cba27b61ffda4e6 | 28 | py | Python | mic/__init__.py | 1x-eng/PROTON | 2f27352f7eb9b46642325d800fcdb98ba5c99596 | [
"BSD-3-Clause"
] | 31 | 2018-09-28T05:00:02.000Z | 2021-11-09T11:06:57.000Z | config/__init__.py | PruthviKumarBK/fitbit_webservice | 68ff427311d888b951d48c98ac35f7b181d416c7 | [
"MIT"
] | 23 | 2019-05-17T08:48:07.000Z | 2020-01-20T22:34:28.000Z | nucleus/istore/__init__.py | 1x-eng/PROTON | 2f27352f7eb9b46642325d800fcdb98ba5c99596 | [
"BSD-3-Clause"
] | 7 | 2018-09-28T16:57:35.000Z | 2019-11-23T07:36:41.000Z | __author__ = 'pruthvi kumar' | 28 | 28 | 0.785714 | __author__ = 'pruthvi kumar' | 0 | 0 | 0 |
d297f8860252eee12e4a3665b01d5afa296e7b75 | 4,542 | py | Python | goss-testing/scripts/python/pyTest-ips-in-dnsmasq_d-in-correct-order.py | Cray-HPE/csm-testing | 9d72e4a8a11102a037b729afbae0bfdefb4e2805 | [
"MIT"
] | 2 | 2021-12-03T20:56:33.000Z | 2022-01-25T06:57:59.000Z | goss-testing/scripts/python/pyTest-ips-in-dnsmasq_d-in-correct-order.py | Cray-HPE/csm-testing | 9d72e4a8a11102a037b729afbae0bfdefb4e2805 | [
"MIT"
] | 25 | 2021-11-12T18:46:06.000Z | 2022-03-30T20:01:35.000Z | goss-testing/scripts/python/pyTest-ips-in-dnsmasq_d-in-correct-order.py | Cray-HPE/csm-testing | 9d72e4a8a11102a037b729afbae0bfdefb4e2805 | [
"MIT"
] | 1 | 2022-02-23T18:10:46.000Z | 2022-02-23T18:10:46.000Z | #!/usr/bin/env python3
#
# MIT License
#
# (C) Copyright 2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import ipaddress
import sys
import logging, datetime
'''
Simple script to ensure that the dhcp-range in the /etc/dnsmasq.d/{net}.conf files
are in the correct order. This error was encountered on surtur and caused problems
The location of the files is already known, as well as the file names. If that changes
this script will break. Because of the number of files in dnsmasq.d this is the only
way this check can be accomplished (Can't check all files in the directory)
Note: It's possible to get the filenames from GOSS variables, so if the names change,
which is a concern, they would only need to be changed in the variables file.
USAGE: pyTest-dhcp-reange-in-dnsmasq_d-in-correct-order.py
Goss will search the output for the word FAIL
'''
# In case we want to make this script more user-friendly and add argparse or configparser
# CRITICAL 50, ERROR 40, WARNING 30, INFO 20, DEBUG 10, NOTSET 0
l_lvl = logging.INFO
# Start the logger
logging.basicConfig(filename='/tmp/' + sys.argv[0].split('/')[-1] + '.log', level=l_lvl)
logging.info(now()+" Starting up")
fileDir = "/etc/dnsmasq.d/"
fileNames = ['CAN', 'NMN', 'HMN', 'mtl' ]
contents =[]
if __name__ == '__main__':
# Iterate over the list of filenames and try to open the file
for fileName in fileNames:
# clear the start and end strings
logging.info(now()+" Checking %s.", fileDir+fileName)
start = end = ''
try:
f = open(fileDir+fileName+".conf", 'r')
#contents = f.read().split('\n')
except:
logging.critical(now()+" Couldn't open %s.", fileDir+fileName+'.conf')
print("Unable to open file: "+fileName+".conf")
sys.exit(1)
# if the contents of the file !NULL - read the file line-by-line
# and check if the line contains 'dhcp-range'
# it's a really good bet that the format of that line will not change
line = f.readline()
while line:
logging.debug(now() + " line from %s: %s", fileName, line.strip())
# If the line continas 'dhcp-range' extract the start and end addresses
if 'dhcp-range' in line:
start = line.split(',')[1]
end = line.split(',')[2]
logging.debug("Start IP = %s, End IP = %s.", start, end)
line = f.readline()
# If we found the start IP, ensure that it is less than the end IP
if start:
# They really should be valid ip addresses, but using try/except just in case
try:
start_ip = ipaddress.ip_address(start)
end_ip = ipaddress.ip_address(end)
except:
logging.critical(now()+" Could not convert either start = %s or end = %s to IP addresses.", start, end)
print("FAIL: Conversion of strings to IP addresses failed")
sys.exit(2)
if start_ip < end_ip:
print("PASS")
else:
logging.error( now()+" The file %s failed. Start IP (%s) >= End IP (%s).", fileDir + fileName + ".conf", start, end)
print("FAIL for file:" + fileDir + fileName + ".conf")
else:
print("FAIL - no starting IP address found")
| 43.673077 | 132 | 0.655218 | #!/usr/bin/env python3
#
# MIT License
#
# (C) Copyright 2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import ipaddress
import sys
import logging, datetime
'''
Simple script to ensure that the dhcp-range in the /etc/dnsmasq.d/{net}.conf files
are in the correct order. This error was encountered on surtur and caused problems
The location of the files is already known, as well as the file names. If that changes
this script will break. Because of the number of files in dnsmasq.d this is the only
way this check can be accomplished (Can't check all files in the directory)
Note: It's possible to get the filenames from GOSS variables, so if the names change,
which is a concern, they would only need to be changed in the variables file.
USAGE: pyTest-dhcp-reange-in-dnsmasq_d-in-correct-order.py
Goss will search the output for the word FAIL
'''
def now():
# convenience function because it'll be used for logging
return str(datetime.datetime.now())
# In case we want to make this script more user-friendly and add argparse or configparser
# CRITICAL 50, ERROR 40, WARNING 30, INFO 20, DEBUG 10, NOTSET 0
l_lvl = logging.INFO
# Start the logger
logging.basicConfig(filename='/tmp/' + sys.argv[0].split('/')[-1] + '.log', level=l_lvl)
logging.info(now()+" Starting up")
fileDir = "/etc/dnsmasq.d/"
fileNames = ['CAN', 'NMN', 'HMN', 'mtl' ]
contents =[]
if __name__ == '__main__':
# Iterate over the list of filenames and try to open the file
for fileName in fileNames:
# clear the start and end strings
logging.info(now()+" Checking %s.", fileDir+fileName)
start = end = ''
try:
f = open(fileDir+fileName+".conf", 'r')
#contents = f.read().split('\n')
except:
logging.critical(now()+" Couldn't open %s.", fileDir+fileName+'.conf')
print("Unable to open file: "+fileName+".conf")
sys.exit(1)
# if the contents of the file !NULL - read the file line-by-line
# and check if the line contains 'dhcp-range'
# it's a really good bet that the format of that line will not change
line = f.readline()
while line:
logging.debug(now() + " line from %s: %s", fileName, line.strip())
# If the line continas 'dhcp-range' extract the start and end addresses
if 'dhcp-range' in line:
start = line.split(',')[1]
end = line.split(',')[2]
logging.debug("Start IP = %s, End IP = %s.", start, end)
line = f.readline()
# If we found the start IP, ensure that it is less than the end IP
if start:
# They really should be valid ip addresses, but using try/except just in case
try:
start_ip = ipaddress.ip_address(start)
end_ip = ipaddress.ip_address(end)
except:
logging.critical(now()+" Could not convert either start = %s or end = %s to IP addresses.", start, end)
print("FAIL: Conversion of strings to IP addresses failed")
sys.exit(2)
if start_ip < end_ip:
print("PASS")
else:
logging.error( now()+" The file %s failed. Start IP (%s) >= End IP (%s).", fileDir + fileName + ".conf", start, end)
print("FAIL for file:" + fileDir + fileName + ".conf")
else:
print("FAIL - no starting IP address found")
| 90 | 0 | 23 |
6cef0f6f343a55bfefa8c00a31e48c45f56fc66b | 94 | py | Python | src/lingrex/__init__.py | lingpy/lingrex | e0a560fd58006caa7a3a82f1a8d25f75f9f6649c | [
"MIT"
] | 3 | 2019-02-14T15:06:45.000Z | 2021-12-30T22:51:49.000Z | src/lingrex/__init__.py | lingpy/lingrex | e0a560fd58006caa7a3a82f1a8d25f75f9f6649c | [
"MIT"
] | 19 | 2018-10-13T09:12:20.000Z | 2021-11-09T07:53:01.000Z | src/lingrex/__init__.py | lingpy/lingrex | e0a560fd58006caa7a3a82f1a8d25f75f9f6649c | [
"MIT"
] | null | null | null | from lingrex.copar import CoPaR, density
assert CoPaR and density
__version__ = "1.1.2.dev0"
| 18.8 | 40 | 0.776596 | from lingrex.copar import CoPaR, density
assert CoPaR and density
__version__ = "1.1.2.dev0"
| 0 | 0 | 0 |
fc29719d78085111882522fb969efafc9ce3d058 | 7,379 | py | Python | westclass/gen.py | JohnGiorgi/WeSTClass | 647ab2a4fe1b45e94e40caa4f2f9a50bc5e9d406 | [
"Apache-2.0"
] | null | null | null | westclass/gen.py | JohnGiorgi/WeSTClass | 647ab2a4fe1b45e94e40caa4f2f9a50bc5e9d406 | [
"Apache-2.0"
] | 2 | 2019-01-03T17:58:58.000Z | 2019-01-04T00:31:14.000Z | westclass/gen.py | JohnGiorgi/WeSTClass | 647ab2a4fe1b45e94e40caa4f2f9a50bc5e9d406 | [
"Apache-2.0"
] | 1 | 2019-01-03T18:31:39.000Z | 2019-01-03T18:31:39.000Z | import os
import numpy as np
from spherecluster import VonMisesFisherMixture, sample_vMF
np.random.seed(1234)
| 42.653179 | 118 | 0.622171 | import os
import numpy as np
from spherecluster import VonMisesFisherMixture, sample_vMF
np.random.seed(1234)
def seed_expansion(word_sup_array, prob_sup_array, sz, write_path, vocabulary_inv, embedding_mat):
expanded_seed = []
vocab_sz = len(vocabulary_inv)
for j, word_class in enumerate(word_sup_array):
prob_sup_class = prob_sup_array[j]
expanded_class = []
seed_vec = np.zeros(vocab_sz)
if len(word_class) < sz:
for i, word in enumerate(word_class):
seed_vec[word] = prob_sup_class[i]
expanded = np.dot(embedding_mat.transpose(), seed_vec)
expanded = np.dot(embedding_mat, expanded)
word_expanded = sorted(range(len(expanded)), key=lambda k: expanded[k], reverse=True)
for i in range(sz):
expanded_class.append(word_expanded[i])
expanded_seed.append(np.array(expanded_class))
else:
expanded_seed.append(word_class)
if write_path is not None:
if not os.path.exists(write_path):
os.makedirs(write_path)
f = open(write_path + 'class' + str(j) + '_' + str(sz) + '.txt', 'w')
for i, word in enumerate(expanded_class):
f.write(vocabulary_inv[word] + ' ')
f.close()
return expanded_seed
def label_expansion(class_labels, write_path, vocabulary_inv, embedding_mat):
print("Retrieving top-t nearest words...")
n_classes = len(class_labels)
prob_sup_array = []
current_szes = []
all_class_labels = []
for class_label in class_labels:
current_sz = len(class_label)
current_szes.append(current_sz)
prob_sup_array.append([1/current_sz] * current_sz)
all_class_labels += list(class_label)
current_sz = np.min(current_szes)
while len(all_class_labels) == len(set(all_class_labels)):
current_sz += 1
expanded_array = seed_expansion(class_labels, prob_sup_array, current_sz, None, vocabulary_inv, embedding_mat)
all_class_labels = [w for w_class in expanded_array for w in w_class]
expanded_array = seed_expansion(class_labels, prob_sup_array, current_sz-1, None, vocabulary_inv, embedding_mat)
print("Final expansion size t = {}".format(len(expanded_array[0])))
centers = []
kappas = []
print("Top-t nearest words for each class:")
for i in range(n_classes):
expanded_class = expanded_array[i]
vocab_expanded = [vocabulary_inv[w] for w in expanded_class]
print("Class {}:".format(i))
print(vocab_expanded)
expanded_mat = embedding_mat[np.asarray(expanded_class)]
vmf_soft = VonMisesFisherMixture(n_clusters=1, n_jobs=15)
vmf_soft.fit(expanded_mat)
center = vmf_soft.cluster_centers_[0]
kappa = vmf_soft.concentrations_[0]
centers.append(center)
kappas.append(kappa)
for j, expanded_class in enumerate(expanded_array):
if write_path is not None:
if not os.path.exists(write_path):
os.makedirs(write_path)
f = open(write_path + 'class' + str(j) + '.txt', 'w')
for i, word in enumerate(expanded_class):
f.write(vocabulary_inv[word] + ' ')
f.close()
print("Finished vMF distribution fitting.")
return expanded_array, centers, kappas
def pseudodocs(word_sup_array, total_num, background_array, sequence_length, len_avg,
len_std, num_doc, interp_weight, vocabulary_inv, embedding_mat, model, save_dir=None):
for i, _ in enumerate(embedding_mat):
embedding_mat[i] = embedding_mat[i] / np.linalg.norm(embedding_mat[i])
_, centers, kappas = \
label_expansion(word_sup_array, save_dir, vocabulary_inv, embedding_mat)
print("Pseudo documents generation...")
background_vec = interp_weight * background_array
if model == 'cnn':
docs = np.zeros((num_doc*len(word_sup_array), sequence_length), dtype='int32')
label = np.zeros((num_doc*len(word_sup_array), len(word_sup_array)))
for i in range(len(word_sup_array)):
docs_len = len_avg*np.ones(num_doc)
center = centers[i]
kappa = kappas[i]
discourses = sample_vMF(center, kappa, num_doc)
for j in range(num_doc):
discourse = discourses[j]
prob_vec = np.dot(embedding_mat, discourse)
prob_vec = np.exp(prob_vec)
sorted_idx = np.argsort(prob_vec)[::-1]
delete_idx = sorted_idx[total_num:]
prob_vec[delete_idx] = 0
prob_vec /= np.sum(prob_vec)
prob_vec *= 1 - interp_weight
prob_vec += background_vec
doc_len = int(docs_len[j])
docs[i*num_doc+j][:doc_len] = np.random.choice(len(prob_vec), size=doc_len, p=prob_vec)
label[i*num_doc+j] = interp_weight/len(word_sup_array)*np.ones(len(word_sup_array))
label[i*num_doc+j][i] += 1 - interp_weight
elif model == 'rnn':
docs = np.zeros((num_doc*len(word_sup_array), sequence_length[0], sequence_length[1]), dtype='int32')
label = np.zeros((num_doc*len(word_sup_array), len(word_sup_array)))
doc_len = int(len_avg[0])
sent_len = int(len_avg[1])
for period_idx in vocabulary_inv:
if vocabulary_inv[period_idx] == '.':
break
for i in range(len(word_sup_array)):
center = centers[i]
kappa = kappas[i]
discourses = sample_vMF(center, kappa, num_doc)
for j in range(num_doc):
discourse = discourses[j]
prob_vec = np.dot(embedding_mat, discourse)
prob_vec = np.exp(prob_vec)
sorted_idx = np.argsort(prob_vec)[::-1]
delete_idx = sorted_idx[total_num:]
prob_vec[delete_idx] = 0
prob_vec /= np.sum(prob_vec)
prob_vec *= 1 - interp_weight
prob_vec += background_vec
for k in range(doc_len):
docs[i*num_doc+j][k][:sent_len] = np.random.choice(len(prob_vec), size=sent_len, p=prob_vec)
docs[i*num_doc+j][k][sent_len] = period_idx
label[i*num_doc+j] = interp_weight/len(word_sup_array)*np.ones(len(word_sup_array))
label[i*num_doc+j][i] += 1 - interp_weight
print("Finished Pseudo documents generation.")
return docs, label
def augment(x, sup_idx, total_len):
print("Labeled documents augmentation...")
docs = x[sup_idx.flatten()]
curr_len = len(docs)
copy_times = int(total_len/curr_len) - 1
y = np.zeros(len(sup_idx.flatten()), dtype='int32')
label_nums = [len(seed_idx) for seed_idx in sup_idx]
cnt = 0
for i in range(len(sup_idx)):
y[cnt:cnt+label_nums[i]] = i
cnt += label_nums[i]
new_docs = docs
new_y = y
for i in range(copy_times):
new_docs = np.concatenate((new_docs, docs), axis=0)
new_y = np.concatenate((new_y, y), axis=0)
pretrain_labels = np.zeros((len(new_y),len(np.unique(y))))
for i, _ in enumerate(new_y):
pretrain_labels[i][new_y[i]] = 1.0
print("Finished labeled documents augmentation.")
return new_docs, pretrain_labels
| 7,171 | 0 | 92 |
c5fcc1853db1ba4d840554f6658634f833033af9 | 7,452 | py | Python | tests/test_squeues_request.py | FingerCrunch/scrapy | 3225de725720bba246ba8c9845fe4b84bc0c82e7 | [
"BSD-3-Clause"
] | 41,267 | 2015-01-01T07:39:25.000Z | 2022-03-31T20:09:40.000Z | tests/test_squeues_request.py | FingerCrunch/scrapy | 3225de725720bba246ba8c9845fe4b84bc0c82e7 | [
"BSD-3-Clause"
] | 4,420 | 2015-01-02T09:35:38.000Z | 2022-03-31T22:53:32.000Z | tests/test_squeues_request.py | FingerCrunch/scrapy | 3225de725720bba246ba8c9845fe4b84bc0c82e7 | [
"BSD-3-Clause"
] | 11,080 | 2015-01-01T18:11:30.000Z | 2022-03-31T15:33:19.000Z | import shutil
import tempfile
import unittest
import queuelib
from scrapy.squeues import (
PickleFifoDiskQueue,
PickleLifoDiskQueue,
MarshalFifoDiskQueue,
MarshalLifoDiskQueue,
FifoMemoryQueue,
LifoMemoryQueue,
)
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
"""
Queues that handle requests
"""
| 34.660465 | 112 | 0.637815 | import shutil
import tempfile
import unittest
import queuelib
from scrapy.squeues import (
PickleFifoDiskQueue,
PickleLifoDiskQueue,
MarshalFifoDiskQueue,
MarshalLifoDiskQueue,
FifoMemoryQueue,
LifoMemoryQueue,
)
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
"""
Queues that handle requests
"""
class BaseQueueTestCase(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix="scrapy-queue-tests-")
self.qpath = self.tempfilename()
self.qdir = self.mkdtemp()
self.crawler = get_crawler(Spider)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def tempfilename(self):
with tempfile.NamedTemporaryFile(dir=self.tmpdir) as nf:
return nf.name
def mkdtemp(self):
return tempfile.mkdtemp(dir=self.tmpdir)
class RequestQueueTestMixin:
def queue(self):
raise NotImplementedError()
def test_one_element_with_peek(self):
if not hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues do not define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
req = Request("http://www.example.com")
q.push(req)
self.assertEqual(len(q), 1)
self.assertEqual(q.peek().url, req.url)
self.assertEqual(q.pop().url, req.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
q.close()
def test_one_element_without_peek(self):
if hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
req = Request("http://www.example.com")
q.push(req)
self.assertEqual(len(q), 1)
with self.assertRaises(NotImplementedError, msg="The underlying queue class does not implement 'peek'"):
q.peek()
self.assertEqual(q.pop().url, req.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
q.close()
class FifoQueueMixin(RequestQueueTestMixin):
def test_fifo_with_peek(self):
if not hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues do not define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
req1 = Request("http://www.example.com/1")
req2 = Request("http://www.example.com/2")
req3 = Request("http://www.example.com/3")
q.push(req1)
q.push(req2)
q.push(req3)
self.assertEqual(len(q), 3)
self.assertEqual(q.peek().url, req1.url)
self.assertEqual(q.pop().url, req1.url)
self.assertEqual(len(q), 2)
self.assertEqual(q.peek().url, req2.url)
self.assertEqual(q.pop().url, req2.url)
self.assertEqual(len(q), 1)
self.assertEqual(q.peek().url, req3.url)
self.assertEqual(q.pop().url, req3.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
q.close()
def test_fifo_without_peek(self):
if hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues do not define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
req1 = Request("http://www.example.com/1")
req2 = Request("http://www.example.com/2")
req3 = Request("http://www.example.com/3")
q.push(req1)
q.push(req2)
q.push(req3)
with self.assertRaises(NotImplementedError, msg="The underlying queue class does not implement 'peek'"):
q.peek()
self.assertEqual(len(q), 3)
self.assertEqual(q.pop().url, req1.url)
self.assertEqual(len(q), 2)
self.assertEqual(q.pop().url, req2.url)
self.assertEqual(len(q), 1)
self.assertEqual(q.pop().url, req3.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
q.close()
class LifoQueueMixin(RequestQueueTestMixin):
def test_lifo_with_peek(self):
if not hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues do not define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
req1 = Request("http://www.example.com/1")
req2 = Request("http://www.example.com/2")
req3 = Request("http://www.example.com/3")
q.push(req1)
q.push(req2)
q.push(req3)
self.assertEqual(len(q), 3)
self.assertEqual(q.peek().url, req3.url)
self.assertEqual(q.pop().url, req3.url)
self.assertEqual(len(q), 2)
self.assertEqual(q.peek().url, req2.url)
self.assertEqual(q.pop().url, req2.url)
self.assertEqual(len(q), 1)
self.assertEqual(q.peek().url, req1.url)
self.assertEqual(q.pop().url, req1.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
q.close()
def test_lifo_without_peek(self):
if hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues do not define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
req1 = Request("http://www.example.com/1")
req2 = Request("http://www.example.com/2")
req3 = Request("http://www.example.com/3")
q.push(req1)
q.push(req2)
q.push(req3)
with self.assertRaises(NotImplementedError, msg="The underlying queue class does not implement 'peek'"):
q.peek()
self.assertEqual(len(q), 3)
self.assertEqual(q.pop().url, req3.url)
self.assertEqual(len(q), 2)
self.assertEqual(q.pop().url, req2.url)
self.assertEqual(len(q), 1)
self.assertEqual(q.pop().url, req1.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
q.close()
class PickleFifoDiskQueueRequestTest(FifoQueueMixin, BaseQueueTestCase):
def queue(self):
return PickleFifoDiskQueue.from_crawler(crawler=self.crawler, key="pickle/fifo")
class PickleLifoDiskQueueRequestTest(LifoQueueMixin, BaseQueueTestCase):
def queue(self):
return PickleLifoDiskQueue.from_crawler(crawler=self.crawler, key="pickle/lifo")
class MarshalFifoDiskQueueRequestTest(FifoQueueMixin, BaseQueueTestCase):
def queue(self):
return MarshalFifoDiskQueue.from_crawler(crawler=self.crawler, key="marshal/fifo")
class MarshalLifoDiskQueueRequestTest(LifoQueueMixin, BaseQueueTestCase):
def queue(self):
return MarshalLifoDiskQueue.from_crawler(crawler=self.crawler, key="marshal/lifo")
class FifoMemoryQueueRequestTest(FifoQueueMixin, BaseQueueTestCase):
def queue(self):
return FifoMemoryQueue.from_crawler(crawler=self.crawler)
class LifoMemoryQueueRequestTest(LifoQueueMixin, BaseQueueTestCase):
def queue(self):
return LifoMemoryQueue.from_crawler(crawler=self.crawler)
| 6,003 | 375 | 679 |
4056c2a32274bdf16ec752628c55c728c461ea14 | 104 | py | Python | logya/__init__.py | yaph/logya | 9647f58a0b8653b56ad64332e235a76cab3acda9 | [
"MIT"
] | 12 | 2015-03-04T03:23:56.000Z | 2020-11-17T08:09:17.000Z | logya/__init__.py | elaOnMars/logya | a9f256ac8840e21b348ac842b35683224e25b613 | [
"MIT"
] | 78 | 2015-01-05T11:40:41.000Z | 2022-01-23T21:05:39.000Z | logya/__init__.py | elaOnMars/logya | a9f256ac8840e21b348ac842b35683224e25b613 | [
"MIT"
] | 6 | 2015-04-20T06:58:42.000Z | 2022-01-31T00:36:29.000Z | # -*- coding: utf-8 -*-
__author__ = 'Ramiro Gómez'
__email__ = 'code@ramiro.org'
__version__ = '5.0.0'
| 20.8 | 29 | 0.644231 | # -*- coding: utf-8 -*-
__author__ = 'Ramiro Gómez'
__email__ = 'code@ramiro.org'
__version__ = '5.0.0'
| 0 | 0 | 0 |
bcca9df667a8ef58947ffa37f575e51e1d6c54dc | 966 | py | Python | code.py | jamesbowman/py-eve | dd2dc7cdd9c5e5ef82f84132ec9a05d989788112 | [
"BSD-3-Clause"
] | 17 | 2020-01-29T04:24:48.000Z | 2021-07-13T18:03:42.000Z | code.py | jamesbowman/py-eve | dd2dc7cdd9c5e5ef82f84132ec9a05d989788112 | [
"BSD-3-Clause"
] | 3 | 2020-12-28T00:48:06.000Z | 2021-05-30T17:46:15.000Z | code.py | jamesbowman/py-eve | dd2dc7cdd9c5e5ef82f84132ec9a05d989788112 | [
"BSD-3-Clause"
] | 4 | 2020-03-08T01:19:14.000Z | 2021-09-08T13:26:46.000Z | import bteve as eve
gd = eve.Gameduino()
gd.init()
import pong
import fruit
import temperature
demos = (pong, fruit, temperature)
# demos[0].run(gd)
sel = 0
prev_touch = False
while True:
gd.finish()
c = gd.controllers()[0]
if not prev_touch:
if c['bdu']:
sel = (sel - 1)
if c['bdd']:
sel = (sel + 1)
sel %= len(demos)
prev_touch = any(c[b] for b in ['bdu', 'bdd'])
if c['ba']:
demos[sel].run(gd)
gd.cmd_romfont(30, 30)
gd.cmd_romfont(31, 31)
gd.Clear()
gd.cmd_text(640, 60, 31, eve.OPT_CENTER, "CircuitPython demos")
gd.cmd_text(640, 640, 30, eve.OPT_CENTER, "Press A to launch, HOME to return to this menu")
for i,d in enumerate(demos):
y = 180 + 120 * i
if i == sel:
gd.cmd_fgcolor(0xa06000)
else:
gd.cmd_fgcolor(0x003030)
gd.cmd_button(320, y, 640, 100, 31, eve.OPT_FLAT, d.__name__)
gd.swap()
| 22.465116 | 95 | 0.564182 | import bteve as eve
gd = eve.Gameduino()
gd.init()
import pong
import fruit
import temperature
demos = (pong, fruit, temperature)
# demos[0].run(gd)
sel = 0
prev_touch = False
while True:
gd.finish()
c = gd.controllers()[0]
if not prev_touch:
if c['bdu']:
sel = (sel - 1)
if c['bdd']:
sel = (sel + 1)
sel %= len(demos)
prev_touch = any(c[b] for b in ['bdu', 'bdd'])
if c['ba']:
demos[sel].run(gd)
gd.cmd_romfont(30, 30)
gd.cmd_romfont(31, 31)
gd.Clear()
gd.cmd_text(640, 60, 31, eve.OPT_CENTER, "CircuitPython demos")
gd.cmd_text(640, 640, 30, eve.OPT_CENTER, "Press A to launch, HOME to return to this menu")
for i,d in enumerate(demos):
y = 180 + 120 * i
if i == sel:
gd.cmd_fgcolor(0xa06000)
else:
gd.cmd_fgcolor(0x003030)
gd.cmd_button(320, y, 640, 100, 31, eve.OPT_FLAT, d.__name__)
gd.swap()
| 0 | 0 | 0 |
76166cf62baee4f8bac11d60a8d34ccfd53070aa | 8,382 | py | Python | pyDNase/scripts/wellington_bootstrap.py | zoi-mibtp/pyDNase | 047d2f89af6109a530505b370782c4841d710cbf | [
"MIT"
] | 34 | 2015-05-28T17:16:18.000Z | 2022-03-04T11:54:37.000Z | pyDNase/scripts/wellington_bootstrap.py | zoi-mibtp/pyDNase | 047d2f89af6109a530505b370782c4841d710cbf | [
"MIT"
] | 39 | 2015-05-14T09:03:07.000Z | 2020-05-13T11:40:28.000Z | pyDNase/scripts/wellington_bootstrap.py | zoi-mibtp/pyDNase | 047d2f89af6109a530505b370782c4841d710cbf | [
"MIT"
] | 24 | 2015-09-24T07:39:53.000Z | 2021-07-13T02:37:24.000Z | #!/usr/bin/env python
import pyDNase, pyDNase.footprinting
import numpy as np
from clint.textui import progress
import multiprocessing as mp
import argparse
__version__ = "0.1.0"
parser = argparse.ArgumentParser(description='Scores Differential Footprints using Wellington-Bootstrap.')
parser.add_argument("-fp", "--footprint-sizes",
help="Range of footprint sizes to try in format \"from,to,step\" (default: 11,26,2)",
default="11,26,2",
type=str)
parser.add_argument("-fdr","--FDR_cutoff",
help="Detect footprints using the FDR selection method at a specific FDR (default: 0.01)",
default=0.01,
type=float)
parser.add_argument("-fdriter", "--FDR_iterations",
help="How many randomisations to use when performing FDR calculations (default: 100)",
default=100,
type=int)
parser.add_argument("-fdrlimit", "--FDR_limit",
help="Minimum p-value to be considered significant for FDR calculation (default: -20)",
default=-20,
type=int)
parser.add_argument("-p", "--processes", help="Number of processes to use (default: uses all CPUs)",
default=0,
type=int)
parser.add_argument("-A", action="store_true", help="ATAC-seq mode (default: False)", default=False)
parser.add_argument("treatment_bam", help="BAM file for treatment")
parser.add_argument("control_bam", help="BAM file for control")
parser.add_argument("bedsites", help="BED file of genomic locations to search in")
parser.add_argument("treatment_only_output", help="File to write treatment specific fooprints scores to")
parser.add_argument("control_only_output", help="File to write control specific footprint scores to")
args = parser.parse_args()
# Sanity check parameters from the user
try:
args.footprint_sizes = xrange_from_string(args.footprint_sizes)
except ValueError:
raise RuntimeError("Footprint sizes must be supplied as from,to,step")
assert 0 < args.FDR_cutoff < 1, "FDR must be between 0 and 1"
assert args.FDR_limit <= 0, "FDR limit must be less than or equal to 0 (to disable)"
# Treatment
reads2 = pyDNase.BAMHandler(args.treatment_bam, caching=0, ATAC=args.A)
# Control
reads1 = pyDNase.BAMHandler(args.control_bam, caching=0, ATAC=args.A)
# Regions of Interest
regions = pyDNase.GenomicIntervalSet(args.bedsites)
# Output
treatment_output = open(args.treatment_only_output, "w", buffering=1)
control_output = open(args.control_only_output, "w", buffering=1)
# Determine Number of CPUs to use
if args.processes:
CPUs = args.processes
else:
CPUs = mp.cpu_count()
# NOTE: This roughly scales at about 450mb per 300 regions held in memory
max_regions_cached_in_memory = 50 * CPUs
p = mp.Pool(CPUs)
print("Performing differential footprinting...")
for i in progress.bar(regions):
# Make sure the interval is actually big enough to footprint to begin with
if len(i) < 120:
i.startbp -= 60
i.endbp += 60
# Make the optional arguments
fp_args = {'footprint_sizes': args.footprint_sizes, 'FDR_cutoff': args.FDR_cutoff, 'FDR_iterations': args.FDR_iterations}
# Perform both comparisons - A against B and B against A
fp = Diffwell(reads2=reads1, min_score=args.FDR_limit, interval=i, reads=reads2, **fp_args)
fp2 = Diffwell(reads2=reads2, min_score=args.FDR_limit, interval=i, reads=reads1, **fp_args)
# Push these tasks to the queue
p.apply_async(fp, callback=write_treat_to_disk)
p.apply_async(fp2, callback=write_control_to_disk)
# Hold here while the queue is bigger than the number of reads we're happy to store in memory
while p._taskqueue.qsize() > max_regions_cached_in_memory:
pass
| 42.548223 | 125 | 0.605703 | #!/usr/bin/env python
import pyDNase, pyDNase.footprinting
import numpy as np
from clint.textui import progress
import multiprocessing as mp
import argparse
__version__ = "0.1.0"
def write_treat_to_disk(item):
if item.results:
for i in item.results:
print(i, file=treatment_output)
def write_control_to_disk(item):
if item.results:
for i in item.results:
print(i, file=control_output)
def xrange_from_string(range_string):
try:
range_string = list(map(int, range_string.split(",")))
range_string = list(range(range_string[0], range_string[1], range_string[2]))
assert len(range_string) > 0
return range_string
except:
raise ValueError
class Diffwell(pyDNase.footprinting.wellington):
def __init__(self, reads2, min_score, *args, **kwargs):
super(Diffwell, self).__init__(*args, **kwargs)
self.MIN_SCORE = min_score
self.reads2 = reads2[self.interval]
def footprints(self, withCutoff = -20, merge = 1):
"""
This returns reads GenomicIntervalSet with the intervals retrieved
below the specific cutoff applied to the selected data
"""
ranges = []
tempMLE, templogProb = np.copy(self.lengths), np.copy(self.scores)
while templogProb.min() < withCutoff:
minimapos = templogProb.argmin()
minimafplen = tempMLE[minimapos]
minimaphalffplen = minimafplen//2
lbound = max(minimapos-minimaphalffplen, 0)
rbound = min(minimapos+minimaphalffplen, len(templogProb))
ranges.append((lbound, rbound, templogProb.min(), minimafplen))
templogProb[max(lbound-self.shoulder_sizes[-1], 0):min(rbound+self.shoulder_sizes[-1], len(templogProb))] = 1
return_set = []
if ranges:
merged_ranges = []
while len(ranges):
# Find best score
ranges.sort(key=lambda x: -x[2])
# Take the last value
best = ranges.pop()
merged_ranges.append(best)
# Check for overlapping regions and remove
new_ranges = []
for c, d, e, f in ranges:
if not c <= best[1] <= d:
new_ranges.append([c, d, e, f])
ranges = new_ranges
# Creates reads GenomicIntervalSet and adds the footprints to them
for i in merged_ranges:
return_set.append((int((i[0] + i[1])/2), i[3]))
return return_set
def findDiffFP(self):
cuts = self.reads
forwardArray, backwardArray = cuts["+"], cuts["-"]
cuts2 = self.reads2
forwardArray2, backwardArray2 = cuts2["+"], cuts2["-"]
# Adjust the FDR threshold to a minimum of withCutoff
threshold = min(self.FDR_value, self.MIN_SCORE)
# Find the footprints at this threshold
offsets = self.footprints(threshold)
# Work out the bootstrap scores for these footprints using the other data set
best_probabilities, best_footprintsizes = pyDNase.footprinting.WellingtonC.diff_calculate(forwardArray,
backwardArray,
forwardArray2,
backwardArray2,
[i[1] for i in offsets],
[i[0] for i in offsets],
threshold)
result_intervals = []
for i in offsets:
middle = self.interval.startbp + i[0]
fp_halfsize = (best_footprintsizes[i[0]] // 2)
left = middle - fp_halfsize
right = middle + fp_halfsize
ml_score = best_probabilities[i[0]]
result = pyDNase.GenomicInterval(self.interval.chromosome, left, right, score=ml_score)
result_intervals.append(result)
return result_intervals
def __call__(self):
results = None
# this is where the first round of footprinting is actually called, as self.scores invoked the footprinting
if min(self.scores) < self.MIN_SCORE:
if min(self.scores) < self.FDR_value:
results = self.findDiffFP()
self.results = results
return self
parser = argparse.ArgumentParser(description='Scores Differential Footprints using Wellington-Bootstrap.')
parser.add_argument("-fp", "--footprint-sizes",
help="Range of footprint sizes to try in format \"from,to,step\" (default: 11,26,2)",
default="11,26,2",
type=str)
parser.add_argument("-fdr","--FDR_cutoff",
help="Detect footprints using the FDR selection method at a specific FDR (default: 0.01)",
default=0.01,
type=float)
parser.add_argument("-fdriter", "--FDR_iterations",
help="How many randomisations to use when performing FDR calculations (default: 100)",
default=100,
type=int)
parser.add_argument("-fdrlimit", "--FDR_limit",
help="Minimum p-value to be considered significant for FDR calculation (default: -20)",
default=-20,
type=int)
parser.add_argument("-p", "--processes", help="Number of processes to use (default: uses all CPUs)",
default=0,
type=int)
parser.add_argument("-A", action="store_true", help="ATAC-seq mode (default: False)", default=False)
parser.add_argument("treatment_bam", help="BAM file for treatment")
parser.add_argument("control_bam", help="BAM file for control")
parser.add_argument("bedsites", help="BED file of genomic locations to search in")
parser.add_argument("treatment_only_output", help="File to write treatment specific fooprints scores to")
parser.add_argument("control_only_output", help="File to write control specific footprint scores to")
args = parser.parse_args()
# Sanity check parameters from the user
try:
args.footprint_sizes = xrange_from_string(args.footprint_sizes)
except ValueError:
raise RuntimeError("Footprint sizes must be supplied as from,to,step")
assert 0 < args.FDR_cutoff < 1, "FDR must be between 0 and 1"
assert args.FDR_limit <= 0, "FDR limit must be less than or equal to 0 (to disable)"
# Treatment
reads2 = pyDNase.BAMHandler(args.treatment_bam, caching=0, ATAC=args.A)
# Control
reads1 = pyDNase.BAMHandler(args.control_bam, caching=0, ATAC=args.A)
# Regions of Interest
regions = pyDNase.GenomicIntervalSet(args.bedsites)
# Output
treatment_output = open(args.treatment_only_output, "w", buffering=1)
control_output = open(args.control_only_output, "w", buffering=1)
# Determine Number of CPUs to use
if args.processes:
CPUs = args.processes
else:
CPUs = mp.cpu_count()
# NOTE: This roughly scales at about 450mb per 300 regions held in memory
max_regions_cached_in_memory = 50 * CPUs
p = mp.Pool(CPUs)
print("Performing differential footprinting...")
for i in progress.bar(regions):
# Make sure the interval is actually big enough to footprint to begin with
if len(i) < 120:
i.startbp -= 60
i.endbp += 60
# Make the optional arguments
fp_args = {'footprint_sizes': args.footprint_sizes, 'FDR_cutoff': args.FDR_cutoff, 'FDR_iterations': args.FDR_iterations}
# Perform both comparisons - A against B and B against A
fp = Diffwell(reads2=reads1, min_score=args.FDR_limit, interval=i, reads=reads2, **fp_args)
fp2 = Diffwell(reads2=reads2, min_score=args.FDR_limit, interval=i, reads=reads1, **fp_args)
# Push these tasks to the queue
p.apply_async(fp, callback=write_treat_to_disk)
p.apply_async(fp2, callback=write_control_to_disk)
# Hold here while the queue is bigger than the number of reads we're happy to store in memory
while p._taskqueue.qsize() > max_regions_cached_in_memory:
pass
| 2,740 | 1,712 | 92 |
09dfd928cdee8310a84bd1f6f85c08f272b1e928 | 623 | py | Python | t1.py | dongmeng168/raspi_robot_car | 893698cd8649ec7d51a4716f5acf415fe2f2d2f5 | [
"BSD-2-Clause"
] | null | null | null | t1.py | dongmeng168/raspi_robot_car | 893698cd8649ec7d51a4716f5acf415fe2f2d2f5 | [
"BSD-2-Clause"
] | null | null | null | t1.py | dongmeng168/raspi_robot_car | 893698cd8649ec7d51a4716f5acf415fe2f2d2f5 | [
"BSD-2-Clause"
] | null | null | null |
ms1 = MyShow()
ms1.showName() | 20.766667 | 43 | 0.532905 | def catch(origin_func):
def wrapper(self, *args, **kwargs):
print("catch start")
# print(self.name)
origin_func(self, *args, **kwargs)
print(self.name)
print("catch end")
return wrapper
class Decorator(object):
def __init__(self, f):
self.f = f
def __call__(self):
print("decorator start")
self.f()
print(self.name)
print("decorator end")
class MyShow(object):
@catch
def showName(self):
self.name = "dongmeng"
print("show name")
ms1 = MyShow()
ms1.showName() | 416 | 43 | 126 |
e4e9eb18487f687ece7b0a8c1f2f0fdfa4595ebf | 6,956 | py | Python | v0/aia_eis_v0/utils/visualize_utils/two_vertical_contour.py | DreamBoatOve/aia_eis | 458b4d29846669b10db4da1b3e86c0b394614ceb | [
"MIT"
] | 1 | 2022-03-02T12:57:19.000Z | 2022-03-02T12:57:19.000Z | v0/aia_eis_v0/utils/visualize_utils/two_vertical_contour.py | DreamBoatOve/aia_eis | 458b4d29846669b10db4da1b3e86c0b394614ceb | [
"MIT"
] | null | null | null | v0/aia_eis_v0/utils/visualize_utils/two_vertical_contour.py | DreamBoatOve/aia_eis | 458b4d29846669b10db4da1b3e86c0b394614ceb | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from matplotlib import ticker, cm
from playground.laiZhaoGui.goa.get_lai_manual_fitting_res import read_lai_manual_fitting_res, read_lai_test_coordinate, pack_lai_manual_fitting_res, wrap_lai_data_4_contour
from playground.laiZhaoGui.goa.get_GOAs_fitting_res import get_GOAs_best_fitting_res, pack_GOAs_fit_res, wrap_GOAs_data_4_contour
def two_vertical_contour_0(x1_list, y1_list, z1_list, x2_list, y2_list, z2_list):
"""
Function:
我用各种ECM上最优的前五种GOA拟合的Chi-Squared误差,与赖拟合的误差,分别画在上下两个等高图上
Requirement:
1- 各自 或 一起 拥有colorbar
2- colorbar 上的刻度是对数分布的
3- the range of x-axis is: 0 ~ 17 mm; the range of y-axis is: 0 ~ 2 mm.
:return:
"""
"""
Lai
Z: Min Max
0.0004402 0.04055
GOA
Z: Min Max(1st, too big and weird, delete it) Max(2nd, better and normal) Max(3rd)
0.0033192209598534358 13891082844.471136 54.41158700914487 27.29493804319961
1- Delete the huge abnormal data in z2_list (GOAs' R(RC)_IS_lin-kk_res.txt), (x=1.3,y=0.458,z=13891082844.471136,fn=2-3,ECM-Num=9)
2- Set the value range of colorbar as 1e-4 ~ 1e2, 6 margins
"""
# 将z_min ~ z_max等分成15份,每个数值区间用一个颜色表示
# z_min = min(min(z1_list), min(z2_list))
# z_max = max(max(z1_list), max(z2_list))
# print(z_min, z_max)
# levels = MaxNLocator(nbins=15).tick_values(z.min(), z.max())
# 1- Delete the huge abnormal data in z2_list (GOAs' R(RC)_IS_lin-kk_res.txt), (x=1.3,y=0.458,z=13891082844.471136,fn=2-3,ECM-Num=9)
abnormal_index = z2_list.index(max(z2_list))
del x2_list[abnormal_index]
del y2_list[abnormal_index]
del z2_list[abnormal_index]
"""
The data format of x, y and z has to be 2D np.array
laiZhaoGui has 125(Odd) pieces of data, in order to transfer them into 2D np.array, remove the last data(can not randomly add one piece of data).
So laiZhaoGui's data will be 124 ==> 62, 2
Goa has 126(even) pieces of data, in order to transfer them into 2D np.array.
So Goa's data will be 126 ==> 63, 2
"""
# x,y的数值要按照逐渐变大或变小的规律有序排列,不然图像会乱
xyz1_list = [[x,y,z] for x,y,z in zip(x1_list, y1_list, z1_list)]
# sort by y, then sort by x
xyz1_list.sort(key=lambda xyz:(xyz[1], xyz[0]), reverse=False)
x1_sorted_list = [xyz[0] for xyz in xyz1_list]
y1_sorted_list = [xyz[1] for xyz in xyz1_list]
z1_sorted_list = [xyz[2] for xyz in xyz1_list]
x1_2D_arr = np.array(x1_sorted_list[:len(x1_sorted_list)-1]).reshape((62, 2))
y1_2D_arr = np.array(y1_sorted_list[:len(y1_sorted_list)-1]).reshape((62, 2))
z1_2D_arr = np.array(z1_sorted_list[:len(z1_sorted_list)-1]).reshape((62, 2))
# x1_2D_arr = np.array(x1_list[:len(x1_list)-1]).reshape((62, 2))
# y1_2D_arr = np.array(y1_list[:len(y1_list)-1]).reshape((62, 2))
# z1_2D_arr = np.array(z1_list[:len(z1_list)-1]).reshape((62, 2))
xyz2_list = [[x,y,z] for x,y,z in zip(x2_list, y2_list, z2_list)]
xyz2_list.sort(key=lambda xyz:(xyz[1], xyz[0]), reverse=False)
x2_sorted_list = [xyz[0] for xyz in xyz2_list]
y2_sorted_list = [xyz[1] for xyz in xyz2_list]
z2_sorted_list = [xyz[2] for xyz in xyz2_list]
x2_2D_arr = np.array(x2_sorted_list).reshape((63, 2))
y2_2D_arr = np.array(y2_sorted_list).reshape((63, 2))
z2_2D_arr = np.array(z2_sorted_list).reshape((63, 2))
# x2_2D_arr = np.array(x2_list).reshape((63, 2))
# y2_2D_arr = np.array(y2_list).reshape((63, 2))
# z2_2D_arr = np.array(z2_list).reshape((63, 2))
# 将z_min ~ z_max等分成6份,每个数值区间用一个颜色表示
level_arr = np.array([10 ** i for i in range(-4, 3)])
# pick the desired colormap, sensible levels, and define a normalization
# instance which takes data values and translates those into levels.
# cmap = plt.get_cmap('PiYG')
cmap = plt.get_cmap('viridis')
norm = BoundaryNorm(level_arr, ncolors=cmap.N, clip=True)
fig, (ax1, ax2) = plt.subplots(nrows=2)
fig.suptitle('Title of two subplots')
# contours are *point* based plots, so convert our bound into point
# centers
# cf1 = ax1.contourf(x1_list, y1_list, z1_list, levels=level_arr, cmap=cmap)
# setting the log locator tells contourf to use a log scale:
# cf1 = ax1.contourf(x1_2D_arr, y1_2D_arr, z1_2D_arr, locator=ticker.LogLocator(), levels=level_arr, cmap=cmap)
# fig.colorbar(cf1, ax=ax1)
# ax1.set_title('1 Lai R(RC)_IS_lin-kk_res.txt')
# im = ax1.pcolormesh(x, y, z, cmap=cmap, norm=norm)
im = ax1.pcolormesh(x1_2D_arr, y1_2D_arr, z1_2D_arr, cmap=cmap, norm=norm)
fig.colorbar(im, ax=ax1)
ax1.set_title('pcolormesh with levels')
# cf2 = ax2.contourf(x2_list, y2_list, z2_list, levels=level_arr, cmap=cmap)
cf2 = ax2.contourf(x2_2D_arr, y2_2D_arr, z2_2D_arr, locator=ticker.LogLocator(), levels=level_arr, cmap=cmap)
fig.colorbar(cf2, ax=ax2)
# ax2.xlim(0,20) # AttributeError: 'AxesSubplot' object has no attribute 'xlim'
ax2.set_title('2 GOA R(RC)_IS_lin-kk_res.txt')
# ax1.plot(x1, y1, 'o-')
# ax1.set_ylabel('Damped oscillation')
# ax2.plot(x2, y2, '.-')
# ax2.set_xlabel('time (s)')
# ax2.set_ylabel('Undamped')
plt.xlim(0, 17)
plt.ylim(0, 2)
plt.show()
# 1- Get Lai's manual fitting R(RC)_IS_lin-kk_res.txt and GOAs R(RC)_IS_lin-kk_res.txt
# 1.1- Get Lai's manual fitting R(RC)_IS_lin-kk_res.txt
lai_manual_fit_res_dict_list = read_lai_manual_fitting_res(ex_fp='../../../datasets/experiement_data/laiZhaoGui/eis/2020-07-22-阻抗类型整理2006.xlsx',\
sheet_name='statistic')
coor_dict_list = read_lai_test_coordinate(ex_fp='../../../datasets/experiement_data/laiZhaoGui/eis/坐标.xlsx',\
sheet_name='Sheet1')
lai_manual_fit_res_dict_list = pack_lai_manual_fitting_res(lai_manual_fit_res_dict_list,\
coor_dict_list)
lai_x_list, lai_y_list, lai_z_list = wrap_lai_data_4_contour(lai_manual_fit_res_dict_list)
# 1.2- Get GOAs R(RC)_IS_lin-kk_res.txt
goa_fit_res_dict_list = get_GOAs_best_fitting_res(fp='../../playground/laiZhaoGui/goa/R(RC)_IS_lin-kk_res.txt/magNum=2_res')
goa_fit_res_dict_list = pack_GOAs_fit_res(goa_fit_res_dict_list, coor_dict_list)
goa_x_list, goa_y_list, goa_z_list = wrap_GOAs_data_4_contour(goa_fit_res_dict_list)
# 2- Plot contour
two_vertical_contour_0(x1_list=lai_x_list, y1_list=lai_y_list, z1_list=lai_z_list,\
x2_list=goa_x_list, y2_list=goa_y_list, z2_list=goa_z_list)
"""
我认为失败的原因
1- python 的contourf 要求横纵坐标都是网格状的二维数组,但是我的实际数据(测试点的坐标)不是每行元素个数一致,创建二维数组就意味着,有些位置
上的数字是瞎编的,没有意义,如果填写0,反而意味着拟合的效果非常好,这是错的
2- 之前工作(三元合金)的contour就是用origin画的,应该是可行的
""" | 48.985915 | 172 | 0.679845 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from matplotlib import ticker, cm
from playground.laiZhaoGui.goa.get_lai_manual_fitting_res import read_lai_manual_fitting_res, read_lai_test_coordinate, pack_lai_manual_fitting_res, wrap_lai_data_4_contour
from playground.laiZhaoGui.goa.get_GOAs_fitting_res import get_GOAs_best_fitting_res, pack_GOAs_fit_res, wrap_GOAs_data_4_contour
def two_vertical_contour_0(x1_list, y1_list, z1_list, x2_list, y2_list, z2_list):
"""
Function:
我用各种ECM上最优的前五种GOA拟合的Chi-Squared误差,与赖拟合的误差,分别画在上下两个等高图上
Requirement:
1- 各自 或 一起 拥有colorbar
2- colorbar 上的刻度是对数分布的
3- the range of x-axis is: 0 ~ 17 mm; the range of y-axis is: 0 ~ 2 mm.
:return:
"""
"""
Lai
Z: Min Max
0.0004402 0.04055
GOA
Z: Min Max(1st, too big and weird, delete it) Max(2nd, better and normal) Max(3rd)
0.0033192209598534358 13891082844.471136 54.41158700914487 27.29493804319961
1- Delete the huge abnormal data in z2_list (GOAs' R(RC)_IS_lin-kk_res.txt), (x=1.3,y=0.458,z=13891082844.471136,fn=2-3,ECM-Num=9)
2- Set the value range of colorbar as 1e-4 ~ 1e2, 6 margins
"""
# 将z_min ~ z_max等分成15份,每个数值区间用一个颜色表示
# z_min = min(min(z1_list), min(z2_list))
# z_max = max(max(z1_list), max(z2_list))
# print(z_min, z_max)
# levels = MaxNLocator(nbins=15).tick_values(z.min(), z.max())
# 1- Delete the huge abnormal data in z2_list (GOAs' R(RC)_IS_lin-kk_res.txt), (x=1.3,y=0.458,z=13891082844.471136,fn=2-3,ECM-Num=9)
abnormal_index = z2_list.index(max(z2_list))
del x2_list[abnormal_index]
del y2_list[abnormal_index]
del z2_list[abnormal_index]
"""
The data format of x, y and z has to be 2D np.array
laiZhaoGui has 125(Odd) pieces of data, in order to transfer them into 2D np.array, remove the last data(can not randomly add one piece of data).
So laiZhaoGui's data will be 124 ==> 62, 2
Goa has 126(even) pieces of data, in order to transfer them into 2D np.array.
So Goa's data will be 126 ==> 63, 2
"""
# x,y的数值要按照逐渐变大或变小的规律有序排列,不然图像会乱
xyz1_list = [[x,y,z] for x,y,z in zip(x1_list, y1_list, z1_list)]
# sort by y, then sort by x
xyz1_list.sort(key=lambda xyz:(xyz[1], xyz[0]), reverse=False)
x1_sorted_list = [xyz[0] for xyz in xyz1_list]
y1_sorted_list = [xyz[1] for xyz in xyz1_list]
z1_sorted_list = [xyz[2] for xyz in xyz1_list]
x1_2D_arr = np.array(x1_sorted_list[:len(x1_sorted_list)-1]).reshape((62, 2))
y1_2D_arr = np.array(y1_sorted_list[:len(y1_sorted_list)-1]).reshape((62, 2))
z1_2D_arr = np.array(z1_sorted_list[:len(z1_sorted_list)-1]).reshape((62, 2))
# x1_2D_arr = np.array(x1_list[:len(x1_list)-1]).reshape((62, 2))
# y1_2D_arr = np.array(y1_list[:len(y1_list)-1]).reshape((62, 2))
# z1_2D_arr = np.array(z1_list[:len(z1_list)-1]).reshape((62, 2))
xyz2_list = [[x,y,z] for x,y,z in zip(x2_list, y2_list, z2_list)]
xyz2_list.sort(key=lambda xyz:(xyz[1], xyz[0]), reverse=False)
x2_sorted_list = [xyz[0] for xyz in xyz2_list]
y2_sorted_list = [xyz[1] for xyz in xyz2_list]
z2_sorted_list = [xyz[2] for xyz in xyz2_list]
x2_2D_arr = np.array(x2_sorted_list).reshape((63, 2))
y2_2D_arr = np.array(y2_sorted_list).reshape((63, 2))
z2_2D_arr = np.array(z2_sorted_list).reshape((63, 2))
# x2_2D_arr = np.array(x2_list).reshape((63, 2))
# y2_2D_arr = np.array(y2_list).reshape((63, 2))
# z2_2D_arr = np.array(z2_list).reshape((63, 2))
# 将z_min ~ z_max等分成6份,每个数值区间用一个颜色表示
level_arr = np.array([10 ** i for i in range(-4, 3)])
# pick the desired colormap, sensible levels, and define a normalization
# instance which takes data values and translates those into levels.
# cmap = plt.get_cmap('PiYG')
cmap = plt.get_cmap('viridis')
norm = BoundaryNorm(level_arr, ncolors=cmap.N, clip=True)
fig, (ax1, ax2) = plt.subplots(nrows=2)
fig.suptitle('Title of two subplots')
# contours are *point* based plots, so convert our bound into point
# centers
# cf1 = ax1.contourf(x1_list, y1_list, z1_list, levels=level_arr, cmap=cmap)
# setting the log locator tells contourf to use a log scale:
# cf1 = ax1.contourf(x1_2D_arr, y1_2D_arr, z1_2D_arr, locator=ticker.LogLocator(), levels=level_arr, cmap=cmap)
# fig.colorbar(cf1, ax=ax1)
# ax1.set_title('1 Lai R(RC)_IS_lin-kk_res.txt')
# im = ax1.pcolormesh(x, y, z, cmap=cmap, norm=norm)
im = ax1.pcolormesh(x1_2D_arr, y1_2D_arr, z1_2D_arr, cmap=cmap, norm=norm)
fig.colorbar(im, ax=ax1)
ax1.set_title('pcolormesh with levels')
# cf2 = ax2.contourf(x2_list, y2_list, z2_list, levels=level_arr, cmap=cmap)
cf2 = ax2.contourf(x2_2D_arr, y2_2D_arr, z2_2D_arr, locator=ticker.LogLocator(), levels=level_arr, cmap=cmap)
fig.colorbar(cf2, ax=ax2)
# ax2.xlim(0,20) # AttributeError: 'AxesSubplot' object has no attribute 'xlim'
ax2.set_title('2 GOA R(RC)_IS_lin-kk_res.txt')
# ax1.plot(x1, y1, 'o-')
# ax1.set_ylabel('Damped oscillation')
# ax2.plot(x2, y2, '.-')
# ax2.set_xlabel('time (s)')
# ax2.set_ylabel('Undamped')
plt.xlim(0, 17)
plt.ylim(0, 2)
plt.show()
# 1- Get Lai's manual fitting R(RC)_IS_lin-kk_res.txt and GOAs R(RC)_IS_lin-kk_res.txt
# 1.1- Get Lai's manual fitting R(RC)_IS_lin-kk_res.txt
lai_manual_fit_res_dict_list = read_lai_manual_fitting_res(ex_fp='../../../datasets/experiement_data/laiZhaoGui/eis/2020-07-22-阻抗类型整理2006.xlsx',\
sheet_name='statistic')
coor_dict_list = read_lai_test_coordinate(ex_fp='../../../datasets/experiement_data/laiZhaoGui/eis/坐标.xlsx',\
sheet_name='Sheet1')
lai_manual_fit_res_dict_list = pack_lai_manual_fitting_res(lai_manual_fit_res_dict_list,\
coor_dict_list)
lai_x_list, lai_y_list, lai_z_list = wrap_lai_data_4_contour(lai_manual_fit_res_dict_list)
# 1.2- Get GOAs R(RC)_IS_lin-kk_res.txt
goa_fit_res_dict_list = get_GOAs_best_fitting_res(fp='../../playground/laiZhaoGui/goa/R(RC)_IS_lin-kk_res.txt/magNum=2_res')
goa_fit_res_dict_list = pack_GOAs_fit_res(goa_fit_res_dict_list, coor_dict_list)
goa_x_list, goa_y_list, goa_z_list = wrap_GOAs_data_4_contour(goa_fit_res_dict_list)
# 2- Plot contour
two_vertical_contour_0(x1_list=lai_x_list, y1_list=lai_y_list, z1_list=lai_z_list,\
x2_list=goa_x_list, y2_list=goa_y_list, z2_list=goa_z_list)
"""
我认为失败的原因
1- python 的contourf 要求横纵坐标都是网格状的二维数组,但是我的实际数据(测试点的坐标)不是每行元素个数一致,创建二维数组就意味着,有些位置
上的数字是瞎编的,没有意义,如果填写0,反而意味着拟合的效果非常好,这是错的
2- 之前工作(三元合金)的contour就是用origin画的,应该是可行的
""" | 0 | 0 | 0 |
055971b4d0f7c1b8931618e17e88c3c031984606 | 384 | py | Python | use_decorater.py | silenceFei/singleton | 970aeacea3541ef215f3a92b83662f3d6243995f | [
"Apache-2.0"
] | null | null | null | use_decorater.py | silenceFei/singleton | 970aeacea3541ef215f3a92b83662f3d6243995f | [
"Apache-2.0"
] | null | null | null | use_decorater.py | silenceFei/singleton | 970aeacea3541ef215f3a92b83662f3d6243995f | [
"Apache-2.0"
] | null | null | null |
@SingleTon
test1 = TestClass()
test2 = TestClass()
print test1.a, test2.a
test1.a = 2
print test1.a, test2.a
print id(test1), id(test2) | 18.285714 | 48 | 0.625 | def SingleTon(cls, *args, **kwargs):
instance = {}
def _singleton():
if cls not in instance:
instance[cls] = cls(*args, **kwargs)
return instance[cls]
return _singleton
@SingleTon
class TestClass(object):
a = 1
test1 = TestClass()
test2 = TestClass()
print test1.a, test2.a
test1.a = 2
print test1.a, test2.a
print id(test1), id(test2) | 188 | 13 | 44 |
e6e5d499d771424e99b0cd46ef0c30546e830d9b | 1,623 | py | Python | exercises/13_ransom/solution2.py | AnnieBrunton/biosystems-analytics-2020 | 219e961b07e62dae6c27675e1de94cb56c9adb8e | [
"MIT"
] | 1 | 2021-05-19T19:07:56.000Z | 2021-05-19T19:07:56.000Z | exercises/13_ransom/solution2.py | AnnieBrunton/biosystems-analytics-2020 | 219e961b07e62dae6c27675e1de94cb56c9adb8e | [
"MIT"
] | 1 | 2020-02-11T20:15:59.000Z | 2020-02-11T20:15:59.000Z | exercises/13_ransom/solution2.py | AnnieBrunton/biosystems-analytics-2020 | 219e961b07e62dae6c27675e1de94cb56c9adb8e | [
"MIT"
] | 24 | 2020-01-15T17:34:40.000Z | 2021-08-23T05:57:24.000Z | #!/usr/bin/env python3
"""Ransom note"""
import argparse
import os
import random
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Ransom Note',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='str', help='Input text or file')
parser.add_argument('-s',
'--seed',
help='Random seed',
metavar='int',
type=int,
default=None)
args = parser.parse_args()
if os.path.isfile(args.text):
args.text = open(args.text).read().rstrip()
return args
# --------------------------------------------------
def choose(char):
"""Randomly choose an upper or lowercase letter to return"""
return char.upper() if random.choice([0, 1]) else char.lower()
# --------------------------------------------------
def test_choose():
"""Test choose"""
random.seed(1)
assert choose('a') == 'a'
assert choose('b') == 'b'
assert choose('c') == 'C'
assert choose('d') == 'd'
random.seed(None)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
random.seed(args.seed)
# Method 2: Iterate each character, add to a list
ransom = []
for char in args.text:
ransom += choose(char)
print(''.join(ransom))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 22.859155 | 73 | 0.473814 | #!/usr/bin/env python3
"""Ransom note"""
import argparse
import os
import random
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Ransom Note',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='str', help='Input text or file')
parser.add_argument('-s',
'--seed',
help='Random seed',
metavar='int',
type=int,
default=None)
args = parser.parse_args()
if os.path.isfile(args.text):
args.text = open(args.text).read().rstrip()
return args
# --------------------------------------------------
def choose(char):
"""Randomly choose an upper or lowercase letter to return"""
return char.upper() if random.choice([0, 1]) else char.lower()
# --------------------------------------------------
def test_choose():
"""Test choose"""
random.seed(1)
assert choose('a') == 'a'
assert choose('b') == 'b'
assert choose('c') == 'C'
assert choose('d') == 'd'
random.seed(None)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
random.seed(args.seed)
# Method 2: Iterate each character, add to a list
ransom = []
for char in args.text:
ransom += choose(char)
print(''.join(ransom))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
9bcde1745ec8d892e2b966bda42ec9ead442fb6b | 970 | py | Python | tests/test_yourcar.py | SebRut/python-yourcar | fa4b1e52381dd0a96f62dd633409667c6ecd10ef | [
"MIT"
] | null | null | null | tests/test_yourcar.py | SebRut/python-yourcar | fa4b1e52381dd0a96f62dd633409667c6ecd10ef | [
"MIT"
] | 8 | 2020-06-12T04:37:37.000Z | 2020-08-24T04:51:06.000Z | tests/test_yourcar.py | SebRut/python-yourcar | fa4b1e52381dd0a96f62dd633409667c6ecd10ef | [
"MIT"
] | null | null | null | import pytest
from yourcar import YourCarAPIClient, Place
LAT_ROSTOCK = 54.0833
LON_ROSTOCK = 12.1333
@pytest.fixture
@pytest.fixture
@pytest.fixture(scope="module", autouse=True)
@pytest.mark.vcr()
def test_places_successful(client):
"""Tests an API call to get places near a location in a given range"""
places = client.places(lat=LAT_ROSTOCK, lon=LON_ROSTOCK, distance=2000)
assert isinstance(places, list)
assert len(places) == 15
first = places[0]
assert isinstance(first, Place)
assert first.id == "4402"
assert first.name == "August-Bebel-Str. 71"
assert first.geo_position.latitude == 54.085047
assert first.geo_position.longitude == 12.133862
assert first.distance - 197.9 < 0.1
| 23.658537 | 75 | 0.707216 | import pytest
from yourcar import YourCarAPIClient, Place
LAT_ROSTOCK = 54.0833
LON_ROSTOCK = 12.1333
@pytest.fixture
def api_key():
import os
return os.environ.get("YOURCAR_API_KEY", "YEK-IPA")
@pytest.fixture
def client(api_key):
return YourCarAPIClient(api_key)
@pytest.fixture(scope="module", autouse=True)
def vcr_config():
return {"filter_headers": [("X-API-KEY", "DUMMY_API_KEY")]}
@pytest.mark.vcr()
def test_places_successful(client):
"""Tests an API call to get places near a location in a given range"""
places = client.places(lat=LAT_ROSTOCK, lon=LON_ROSTOCK, distance=2000)
assert isinstance(places, list)
assert len(places) == 15
first = places[0]
assert isinstance(first, Place)
assert first.id == "4402"
assert first.name == "August-Bebel-Str. 71"
assert first.geo_position.latitude == 54.085047
assert first.geo_position.longitude == 12.133862
assert first.distance - 197.9 < 0.1
| 160 | 0 | 66 |
dc4ae7a785c1e22a2bfd63f37d10be3734760c8f | 487 | py | Python | pystratis/api/wallet/requestmodels/extpubkeyrequest.py | TjadenFroyda/pyStratis | 9cc7620d7506637f8a2b84003d931eceb36ac5f2 | [
"MIT"
] | 8 | 2021-06-30T20:44:22.000Z | 2021-12-07T14:42:22.000Z | pystratis/api/wallet/requestmodels/extpubkeyrequest.py | TjadenFroyda/pyStratis | 9cc7620d7506637f8a2b84003d931eceb36ac5f2 | [
"MIT"
] | 2 | 2021-07-01T11:50:18.000Z | 2022-01-25T18:39:49.000Z | pystratis/api/wallet/requestmodels/extpubkeyrequest.py | TjadenFroyda/pyStratis | 9cc7620d7506637f8a2b84003d931eceb36ac5f2 | [
"MIT"
] | 4 | 2021-07-01T04:36:42.000Z | 2021-09-17T10:54:19.000Z | from typing import Optional
from pydantic import Field
from pystratis.api import Model
# noinspection PyUnresolvedReferences
class ExtPubKeyRequest(Model):
"""A request model for the wallet/extpubkey endpoint.
Args:
wallet_name (str): The wallet name.
account_name (str, optional): The account name. Default='account 0'.
"""
wallet_name: str = Field(alias='WalletName')
account_name: Optional[str] = Field(default='account 0', alias='AccountName')
| 30.4375 | 81 | 0.722793 | from typing import Optional
from pydantic import Field
from pystratis.api import Model
# noinspection PyUnresolvedReferences
class ExtPubKeyRequest(Model):
"""A request model for the wallet/extpubkey endpoint.
Args:
wallet_name (str): The wallet name.
account_name (str, optional): The account name. Default='account 0'.
"""
wallet_name: str = Field(alias='WalletName')
account_name: Optional[str] = Field(default='account 0', alias='AccountName')
| 0 | 0 | 0 |
4b71e32eb21cadefd433af3f99d102a77a149de7 | 2,094 | py | Python | DIL/crypto/AES256.py | HanSooLim/DIL-Project | 069fa7e35a2e1edfff30dc2540d9b87f5db95dde | [
"MIT",
"BSD-3-Clause"
] | 2 | 2021-10-16T15:08:05.000Z | 2021-10-16T15:59:57.000Z | DIL/crypto/AES256.py | HanSooLim/DIL-Project | 069fa7e35a2e1edfff30dc2540d9b87f5db95dde | [
"MIT",
"BSD-3-Clause"
] | 8 | 2021-10-21T04:48:12.000Z | 2021-11-07T03:09:25.000Z | DIL/crypto/AES256.py | HanSooLim/DIL-Project | 069fa7e35a2e1edfff30dc2540d9b87f5db95dde | [
"MIT",
"BSD-3-Clause"
] | 3 | 2021-05-02T13:39:14.000Z | 2021-05-31T14:05:56.000Z | import pandas, hashlib, base64
from Crypto import Random
from Crypto.Cipher import AES
from util import DataSetting
DataFrame = pandas.DataFrame
class AES256(DataSetting):
"""
| 암호화 기술 중 양방향 암호화(AES-256)를 구현한 클래스
| 모든 메소드는 생성자에 원본 데이터를 인자 값으로 넣으면 원본 데이터를 수정한다.
Args:
datas (pandas.DataFrame) : 양방향 암호화 기술(AES-256 암호화)을 적용할 DataFrame 지정
key (str) : AES-256 암호화에 사용될 암호키 지정
"""
def encrypt(self, column: str):
"""
AES-256 암호화를 수행하는 메소드
Args:
column (str) : AES-256 암호화를 적용할 컬럼
Returns:
bool : 기술 적용 성공 시 True 리턴
"""
datas = self._toList(column)
result = []
for raw in datas:
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
cipherText = base64.b64encode(iv + cipher.encrypt(raw.encode()))
result.append(cipherText.decode())
self.datas.loc[:, column] = result
return True
def decrypt(self, column: str):
"""
AES-256 복호화를 수행하는 메소드
Args:
column (str) : AES-256 복호화를 적용할 컬럼
Returns:
bool : 기술 적용 성공 시 True 리턴
"""
datas = self._toList(column)
result = []
for enc in datas:
enc = base64.b64decode(enc.encode())
iv = enc[: AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
plainText = self._unpad(cipher.decrypt(enc[AES.block_size :])).decode(
"utf-8"
)
result.append(plainText)
self.datas.loc[:, column] = result
return True
@staticmethod
| 24.348837 | 82 | 0.536772 | import pandas, hashlib, base64
from Crypto import Random
from Crypto.Cipher import AES
from util import DataSetting
DataFrame = pandas.DataFrame
class AES256(DataSetting):
"""
| 암호화 기술 중 양방향 암호화(AES-256)를 구현한 클래스
| 모든 메소드는 생성자에 원본 데이터를 인자 값으로 넣으면 원본 데이터를 수정한다.
Args:
datas (pandas.DataFrame) : 양방향 암호화 기술(AES-256 암호화)을 적용할 DataFrame 지정
key (str) : AES-256 암호화에 사용될 암호키 지정
"""
def __init__(self, datas: DataFrame, key: str):
self.datas = datas
self.bs = 32
self.key = hashlib.sha256(key.encode("utf-8")).digest()
def encrypt(self, column: str):
"""
AES-256 암호화를 수행하는 메소드
Args:
column (str) : AES-256 암호화를 적용할 컬럼
Returns:
bool : 기술 적용 성공 시 True 리턴
"""
datas = self._toList(column)
result = []
for raw in datas:
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
cipherText = base64.b64encode(iv + cipher.encrypt(raw.encode()))
result.append(cipherText.decode())
self.datas.loc[:, column] = result
return True
def decrypt(self, column: str):
"""
AES-256 복호화를 수행하는 메소드
Args:
column (str) : AES-256 복호화를 적용할 컬럼
Returns:
bool : 기술 적용 성공 시 True 리턴
"""
datas = self._toList(column)
result = []
for enc in datas:
enc = base64.b64decode(enc.encode())
iv = enc[: AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
plainText = self._unpad(cipher.decrypt(enc[AES.block_size :])).decode(
"utf-8"
)
result.append(plainText)
self.datas.loc[:, column] = result
return True
def _pad(self, s):
return s + (self.bs - len(s.encode("utf-8")) % self.bs) * chr(
self.bs - len(s.encode("utf-8")) % self.bs
)
@staticmethod
def _unpad(s):
return s[: -s[-1]]
| 292 | 0 | 80 |
7cb58a9fccba2b4158d2532461d1075653bc4ddf | 789 | py | Python | tests/check.py | denik/cython-ifdef | 218162db1c1bd9afe4e06aac1de996fea1ba5bf3 | [
"MIT"
] | 3 | 2015-11-22T19:42:13.000Z | 2020-02-10T08:16:00.000Z | tests/check.py | denik/cython-ifdef | 218162db1c1bd9afe4e06aac1de996fea1ba5bf3 | [
"MIT"
] | null | null | null | tests/check.py | denik/cython-ifdef | 218162db1c1bd9afe4e06aac1de996fea1ba5bf3 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import testmodule
config = int(sys.argv[1])
if config == 0:
not_debug()
not_windows()
elif config == 1:
not_debug()
windows()
elif config == 2:
debug()
not_windows()
elif config == 3:
debug()
windows()
else:
sys.exit('Invalid argument')
print 'checked', config
| 18.785714 | 54 | 0.653992 | #!/usr/bin/python
import sys
import testmodule
def windows():
assert testmodule.X().windows_only() == 5
assert not hasattr(testmodule.X, 'unix_only')
assert not hasattr(testmodule.X(), 'unix_only')
def not_windows():
assert testmodule.X().unix_only() == 6
assert not hasattr(testmodule.X, 'windows_only')
assert not hasattr(testmodule.X(), 'windows_only')
def debug():
assert getattr(testmodule, 'debug') == 25
def not_debug():
assert not hasattr(testmodule, 'debug')
config = int(sys.argv[1])
if config == 0:
not_debug()
not_windows()
elif config == 1:
not_debug()
windows()
elif config == 2:
debug()
not_windows()
elif config == 3:
debug()
windows()
else:
sys.exit('Invalid argument')
print 'checked', config
| 365 | 0 | 92 |
e025ba47e89fa915062db3b001e151b6e658b49d | 3,362 | py | Python | hatchet/tests/node.py | slabasan/llnl-hatchet | 5fce542f435952ad93248feac8b0c47c44649b47 | [
"MIT"
] | 55 | 2017-10-06T18:50:15.000Z | 2022-03-10T19:15:52.000Z | hatchet/tests/node.py | slabasan/llnl-hatchet | 5fce542f435952ad93248feac8b0c47c44649b47 | [
"MIT"
] | 172 | 2018-02-01T01:02:04.000Z | 2022-03-11T19:15:21.000Z | hatchet/tests/node.py | slabasan/llnl-hatchet | 5fce542f435952ad93248feac8b0c47c44649b47 | [
"MIT"
] | 28 | 2017-11-22T14:27:22.000Z | 2022-02-22T00:25:33.000Z | # Copyright 2017-2021 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import pytest
from hatchet.node import Node, MultiplePathError
from hatchet.frame import Frame
from hatchet.graph import Graph
| 26.896 | 85 | 0.560976 | # Copyright 2017-2021 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import pytest
from hatchet.node import Node, MultiplePathError
from hatchet.frame import Frame
from hatchet.graph import Graph
def test_from_lists():
node = Node.from_lists("a")
assert node.frame == Frame(name="a")
a = Frame(name="a")
b = Frame(name="b")
c = Frame(name="c")
node = Node.from_lists(["a", ["b", "c"]])
assert node.frame == a
assert node.children[0].frame == b
assert node.children[0].children[0].frame == c
def test_from_lists_value_error():
with pytest.raises(ValueError):
Node.from_lists(object())
def test_traverse_pre():
node = Node(Frame(name="a"))
assert list(node.traverse(attrs="name")) == ["a"]
node = Node.from_lists(["a", ["b", "d", "e"], ["c", "f", "g"]])
assert list(node.traverse(attrs="name")) == ["a", "b", "d", "e", "c", "f", "g"]
def test_traverse_post():
node = Node.from_lists(["a", ["b", "d", "e"], ["c", "f", "g"]])
assert list(node.traverse(order="post", attrs="name")) == [
"d",
"e",
"b",
"f",
"g",
"c",
"a",
]
def test_traverse_dag():
d = Node(Frame(name="d"))
node = Node.from_lists(["a", ["b", d], ["c", d]])
assert list(node.traverse(attrs="name")) == ["a", "b", "d", "c"]
def test_node_repr():
d = Node(Frame(a=1, b=2, c=3))
assert repr(d) == "Node({'a': 1, 'b': 2, 'c': 3, 'type': 'None'})"
def test_path():
d = Node(Frame(name="d", type="function"))
node = Node.from_lists(["a", ["b", d]])
assert d.path() == (
Node(Frame(name="a")),
Node(Frame(name="b")),
Node(Frame(name="d", type="function")),
)
assert d.parents[0].path() == (Node(Frame(name="a")), Node(Frame(name="b")))
assert node.path() == (Node(Frame(name="a")),)
def test_paths():
d = Node(Frame(name="d"))
Node.from_lists(["a", ["b", d], ["c", d]])
with pytest.raises(MultiplePathError):
d.path()
assert d.paths() == [
(Node(Frame(name="a")), Node(Frame(name="b")), Node(Frame(name="d"))),
(Node(Frame(name="a")), Node(Frame(name="c")), Node(Frame(name="d"))),
]
def test_traverse_paths():
d = Node(Frame(name="d"))
diamond_subdag = Node.from_lists(("a", ("b", d), ("c", d)))
g = Graph.from_lists(("e", "f", diamond_subdag), ("g", diamond_subdag, "h"))
assert list(g.traverse(attrs="name")) == ["e", "a", "b", "d", "c", "f", "g", "h"]
def check_dag_equal():
chain = Node.from_lists(("a", ("b", ("c", ("d",)))))
d = Node(Frame(name="d"))
diamond = Node.from_lists(("a", ("b", d), ("c", d)))
tree = Node.from_lists(
("a", ("b", "e", "f", "g"), ("c", "e", "f", "g"), ("d", "e", "f", "g"))
)
assert chain.dag_equal(chain)
assert chain.dag_equal(chain.copy())
assert diamond.dag_equal(diamond)
assert diamond.dag_equal(diamond.copy())
assert tree.dag_equal(tree)
assert tree.dag_equal(tree.copy())
assert not chain.dag_equal(tree)
assert not chain.dag_equal(diamond)
assert not tree.dag_equal(chain)
assert not tree.dag_equal(diamond)
assert not diamond.dag_equal(chain)
assert not diamond.dag_equal(tree)
| 2,812 | 0 | 230 |
1285a695be87fef4d88cbfbcfbca608ee51ac83b | 7,173 | py | Python | applications/incompressible_fluid_application/python_scripts/trilinos_monolithic_solver_eulerian.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 2 | 2019-10-25T09:28:10.000Z | 2019-11-21T12:51:46.000Z | applications/incompressible_fluid_application/python_scripts/trilinos_monolithic_solver_eulerian.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 13 | 2019-10-07T12:06:51.000Z | 2020-02-18T08:48:33.000Z | applications/incompressible_fluid_application/python_scripts/trilinos_monolithic_solver_eulerian.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | null | null | null | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from KratosMultiphysics import *
from KratosMultiphysics.mpi import *
from KratosMultiphysics.MetisApplication import *
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.TrilinosApplication import *
from KratosMultiphysics.FluidDynamicsApplication import *
#
#
#
#
#
| 38.358289 | 210 | 0.72048 | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from KratosMultiphysics import *
from KratosMultiphysics.mpi import *
from KratosMultiphysics.MetisApplication import *
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.TrilinosApplication import *
from KratosMultiphysics.FluidDynamicsApplication import *
def AddVariables(model_part):
model_part.AddNodalSolutionStepVariable(VELOCITY)
model_part.AddNodalSolutionStepVariable(ACCELERATION)
model_part.AddNodalSolutionStepVariable(MESH_VELOCITY)
model_part.AddNodalSolutionStepVariable(PRESSURE)
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE)
model_part.AddNodalSolutionStepVariable(IS_FLUID)
model_part.AddNodalSolutionStepVariable(IS_POROUS)
model_part.AddNodalSolutionStepVariable(IS_STRUCTURE)
model_part.AddNodalSolutionStepVariable(IS_FREE_SURFACE)
model_part.AddNodalSolutionStepVariable(IS_INTERFACE)
model_part.AddNodalSolutionStepVariable(IS_BOUNDARY)
model_part.AddNodalSolutionStepVariable(DISPLACEMENT)
model_part.AddNodalSolutionStepVariable(VISCOSITY)
model_part.AddNodalSolutionStepVariable(DENSITY)
model_part.AddNodalSolutionStepVariable(DENSITY_AIR)
model_part.AddNodalSolutionStepVariable(AIR_SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(BODY_FORCE)
model_part.AddNodalSolutionStepVariable(NODAL_AREA)
model_part.AddNodalSolutionStepVariable(NODAL_H)
model_part.AddNodalSolutionStepVariable(ADVPROJ)
model_part.AddNodalSolutionStepVariable(DIVPROJ)
model_part.AddNodalSolutionStepVariable(THAWONE)
model_part.AddNodalSolutionStepVariable(THAWTWO)
model_part.AddNodalSolutionStepVariable(REACTION)
model_part.AddNodalSolutionStepVariable(REACTION_WATER_PRESSURE)
model_part.AddNodalSolutionStepVariable(EXTERNAL_PRESSURE)
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE)
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_DT)
model_part.AddNodalSolutionStepVariable(ARRHENIUS)
model_part.AddNodalSolutionStepVariable(FLAG_VARIABLE)
model_part.AddNodalSolutionStepVariable(PARTITION_INDEX)
model_part.AddNodalSolutionStepVariable(NORMAL)
print("variables for the dynamic structural solution added correctly")
def AddDofs(model_part):
for node in model_part.Nodes:
# adding dofs
node.AddDof(VELOCITY_X, REACTION_X)
node.AddDof(VELOCITY_Y, REACTION_Y)
node.AddDof(VELOCITY_Z, REACTION_Z)
node.AddDof(PRESSURE, REACTION_WATER_PRESSURE)
node.AddDof(AIR_PRESSURE, REACTION_AIR_PRESSURE)
mpi.world.barrier()
print("dofs for the monolithic solver added correctly")
class MonolithicSolver:
#
def __init__(self, model_part, domain_size):
self.model_part = model_part
self.domain_size = domain_size
self.alpha = -0.3
self.move_mesh_strategy = 0
self.Comm = CreateCommunicator()
self.linear_solver = TrilinosLinearSolver()
# definition of the convergence criteria
self.vel_criteria = 1e-3
self.press_criteria = 1e-3
self.vel_abs_criteria = 1e-9
self.press_abs_criteria = 1e-9
self.model_part.ProcessInfo.SetValue(DYNAMIC_TAU, 0.001)
self.max_iter = 20
# default settings
self.echo_level = 1
self.CalculateReactionFlag = False
self.ReformDofSetAtEachStep = True
self.CalculateNormDxFlag = True
self.MoveMeshFlag = False
if(domain_size == 2):
estimate_neighbours = 10
self.guess_row_size = estimate_neighbours * (self.domain_size + 1)
self.buildertype = "ML2Dpress"
else:
estimate_neighbours = 25
self.guess_row_size = estimate_neighbours * (self.domain_size + 1)
self.buildertype = "ML3Dpress"
# self.guess_row_size = 25
# self.buildertype="standard"
# aztec_parameters = ParameterList()
# aztec_parameters.set("AZ_solver","AZ_gmres");
# aztec_parameters.set("AZ_kspace",200);
# aztec_parameters.set("AZ_output","AZ_none");
# aztec_parameters.set("AZ_output",10);
# preconditioner_type = "ILU"
# preconditioner_parameters = ParameterList()
# preconditioner_parameters.set ("fact: drop tolerance", 1e-9);
# preconditioner_parameters.set ("fact: level-of-fill", 1);
# overlap_level = 0
# nit_max = 1000
# linear_tol = 1e-9
# self.linear_solver = AztecSolver(aztec_parameters,preconditioner_type,preconditioner_parameters,linear_tol,nit_max,overlap_level);
# solver_parameters = ParameterList()
# self.linear_solver = AmesosSolver("Superludist",solver_parameters);
#
# defining the linear solver
# self.buildertype="standard"
# aztec_parameters = ParameterList()
# aztec_parameters.set("AZ_solver","AZ_gmres");
# aztec_parameters.set("AZ_kspace",100);
# aztec_parameters.set("AZ_output",32);
# preconditioner_type = "Amesos"
# preconditioner_parameters = ParameterList()
# preconditioner_parameters.set("amesos: solver type", "Amesos_Klu");
# preconditioner_type = "ILU"
# preconditioner_parameters = ParameterList()
# overlap_level = 0
# nit_max = 500
# tol = 1e-6
# self.linear_solver = AztecSolver(aztec_parameters,preconditioner_type,preconditioner_parameters,tol,nit_max,overlap_level);
# self.linear_solver.SetScalingType(AztecScalingType.LeftScaling)
#
#
def Initialize(self):
self.time_scheme = TrilinosPredictorCorrectorVelocityBossakSchemeTurbulent(
self.alpha, self.move_mesh_strategy, self.domain_size)
self.time_scheme.Check(self.model_part)
self.conv_criteria = TrilinosUPCriteria(
self.vel_criteria,
self.vel_abs_criteria,
self.press_criteria,
self.press_abs_criteria,
self.Comm)
# creating the solution strategy
import trilinos_strategy_python
self.solver = trilinos_strategy_python.SolvingStrategyPython(
self.buildertype,
self.model_part,
self.time_scheme,
self.linear_solver,
self.conv_criteria,
self.CalculateReactionFlag,
self.ReformDofSetAtEachStep,
self.MoveMeshFlag,
self.Comm,
self.guess_row_size)
self.solver.max_iter = self.max_iter
# self.solver = ResidualBasedNewtonRaphsonStrategy(self.model_part,self.time_scheme,self.linear_solver,self.conv_criteria,self.max_iter,self.CalculateReactionFlag, self.ReformDofSetAtEachStep,self.MoveMeshFlag)
(self.solver).SetEchoLevel(self.echo_level)
#
def Solve(self):
mpi.world.barrier()
(self.solver).Solve()
#
def SetEchoLevel(self, level):
(self.solver).SetEchoLevel(level)
#
| 6,530 | 2 | 174 |
d38e1cb36595014c00e80605a97ca80995c9ac7a | 777 | py | Python | tests/integration/test_github_actions.py | vikasbhatia/asyncpraw | 8b9e1bd72945023f6eb4567ff69ba13aa428a2d2 | [
"BSD-2-Clause"
] | null | null | null | tests/integration/test_github_actions.py | vikasbhatia/asyncpraw | 8b9e1bd72945023f6eb4567ff69ba13aa428a2d2 | [
"BSD-2-Clause"
] | null | null | null | tests/integration/test_github_actions.py | vikasbhatia/asyncpraw | 8b9e1bd72945023f6eb4567ff69ba13aa428a2d2 | [
"BSD-2-Clause"
] | 1 | 2020-12-13T05:10:35.000Z | 2020-12-13T05:10:35.000Z | """A test that is run only by Github Actions
This test makes real network requests, so environment variables
should be specified in Github Actions.
"""
import os
import pytest
from asyncpraw import Reddit
from asyncpraw.models import Submission
@pytest.mark.skipif(
not os.getenv("NETWORK_TEST_CLIENT_ID"),
reason="Not running from the NETWORK_TEST ci task on praw-dev/asyncpraw",
)
| 27.75 | 77 | 0.732304 | """A test that is run only by Github Actions
This test makes real network requests, so environment variables
should be specified in Github Actions.
"""
import os
import pytest
from asyncpraw import Reddit
from asyncpraw.models import Submission
@pytest.mark.skipif(
not os.getenv("NETWORK_TEST_CLIENT_ID"),
reason="Not running from the NETWORK_TEST ci task on praw-dev/asyncpraw",
)
async def test_github_actions():
reddit = Reddit(
client_id=os.getenv("NETWORK_TEST_CLIENT_ID"),
client_secret=os.getenv("NETWORK_TEST_CLIENT_SECRET"),
user_agent="Github Actions CI Testing",
)
subreddit = await reddit.subreddit("all")
async for submission in subreddit.hot():
assert isinstance(submission, Submission)
break
| 359 | 0 | 22 |
8b11b60ed5c0f29b6f6ce3c11d65a04193edc4fe | 216 | py | Python | src/tacotron/app/defaults.py | stefantaubert/tacotron | 9ac37fbf8789b4e7fe1067212a736074181b6fd8 | [
"MIT"
] | null | null | null | src/tacotron/app/defaults.py | stefantaubert/tacotron | 9ac37fbf8789b4e7fe1067212a736074181b6fd8 | [
"MIT"
] | 1 | 2021-11-11T08:50:32.000Z | 2021-11-19T12:39:06.000Z | src/tacotron/app/defaults.py | stefantaubert/tacotron | 9ac37fbf8789b4e7fe1067212a736074181b6fd8 | [
"MIT"
] | null | null | null | from pathlib import Path
DEFAULT_SEED = None
DEFAULT_REPETITIONS = 1
DEFAULT_MAX_DECODER_STEPS = 3000
DEFAULT_SAVE_MEL_INFO_COPY_PATH = Path("/tmp/mel_out.json")
# from paper
DEFAULT_MCD_NO_OF_COEFFS_PER_FRAME = 16
| 24 | 59 | 0.833333 | from pathlib import Path
DEFAULT_SEED = None
DEFAULT_REPETITIONS = 1
DEFAULT_MAX_DECODER_STEPS = 3000
DEFAULT_SAVE_MEL_INFO_COPY_PATH = Path("/tmp/mel_out.json")
# from paper
DEFAULT_MCD_NO_OF_COEFFS_PER_FRAME = 16
| 0 | 0 | 0 |
7b921be17fa34c3ac24248e5fc07fa00295c30e7 | 730 | py | Python | commong-bang-pre/submit.py | a414351664/TRAB-IKE | 3dd07221e1854c974127d7f6d0d95779a25166c0 | [
"MIT"
] | null | null | null | commong-bang-pre/submit.py | a414351664/TRAB-IKE | 3dd07221e1854c974127d7f6d0d95779a25166c0 | [
"MIT"
] | null | null | null | commong-bang-pre/submit.py | a414351664/TRAB-IKE | 3dd07221e1854c974127d7f6d0d95779a25166c0 | [
"MIT"
] | null | null | null | # coding:utf-8
import numpy as np
import json
import pandas as pd
pred_file = 'sort_hypo_glge_xsum_ck20_bsz1.txt.dedup'
sub_example = 'test_submission_example.jsonl'
save_file = 'submit.jsonl'
features = []
pred = []
example = []
with open(pred_file, 'r', encoding='utf-8') as infle:
for line in infle.readlines():
pred.append(line)
dict_pred = {"pred": pred}
pd_pred = pd.DataFrame(dict_pred)
pd_pred.drop_duplicates()
with open(sub_example, 'r', encoding='utf-8') as infle:
for line in infle.readlines():
line[pred_scene]
example.append(line)
with open(save_file, 'w', encoding="utf-8") as fout:
for feature in features:
fout.write(json.dumps(feature, ensure_ascii=False) + '\n') | 26.071429 | 66 | 0.694521 | # coding:utf-8
import numpy as np
import json
import pandas as pd
pred_file = 'sort_hypo_glge_xsum_ck20_bsz1.txt.dedup'
sub_example = 'test_submission_example.jsonl'
save_file = 'submit.jsonl'
features = []
pred = []
example = []
with open(pred_file, 'r', encoding='utf-8') as infle:
for line in infle.readlines():
pred.append(line)
dict_pred = {"pred": pred}
pd_pred = pd.DataFrame(dict_pred)
pd_pred.drop_duplicates()
with open(sub_example, 'r', encoding='utf-8') as infle:
for line in infle.readlines():
line[pred_scene]
example.append(line)
with open(save_file, 'w', encoding="utf-8") as fout:
for feature in features:
fout.write(json.dumps(feature, ensure_ascii=False) + '\n') | 0 | 0 | 0 |
1c05a5e22add0bc09a7d9d6125322faf1956609c | 1,839 | py | Python | tests/generators.py | ppedemon/fplib | 61c221e967e924b3fd0a3014e80b331574d45f0c | [
"MIT"
] | null | null | null | tests/generators.py | ppedemon/fplib | 61c221e967e924b3fd0a3014e80b331574d45f0c | [
"MIT"
] | null | null | null | tests/generators.py | ppedemon/fplib | 61c221e967e924b3fd0a3014e80b331574d45f0c | [
"MIT"
] | null | null | null | import itertools
import operator
import random
import string
from fplib.ident import Id
from fplib.list import List
from fplib.maybe import (Just, Nothing)
from fplib.reader import Reader
from fplib.state import State
| 22.156627 | 78 | 0.685699 | import itertools
import operator
import random
import string
from fplib.ident import Id
from fplib.list import List
from fplib.maybe import (Just, Nothing)
from fplib.reader import Reader
from fplib.state import State
def repeatfunc(func, times=None, *args):
if times is None:
return itertools.starmap(func, itertools.repeat(args))
return itertools.starmap(func, itertools.repeat(args, times))
def random_ints(lo, hi):
return repeatfunc(random.randint, None, lo, hi)
def random_floats(lo, hi):
return repeatfunc(random.uniform, None, lo, hi)
def random_word(length):
return ''.join(random.choice(string.ascii_letters) for i in range(length))
def random_strings(max_len):
return map(random_word, random_ints(0, max_len))
def random_list(gen, max_len):
return list(itertools.islice(gen, 0, random.randint(0, max_len)))
def random_lists(gen, max_len):
return repeatfunc(lambda: random_list(gen, max_len))
def random_Lists(gen, max_len):
return map(lambda xs: List(*xs), random_lists(gen, max_len))
def random_ids(gen):
return map(Id, gen)
def random_maybes(gen):
return map(lambda x: Just(x) if random.random() <= 0.8 else Nothing, gen)
def random_readers(gen):
return map(Reader.unit, gen)
def random_states(gen):
return map(State.unit, gen)
def random_functions(gen):
return map(lambda x: lambda y: y + x, gen)
def random_state_functions(gen):
return map(lambda x: lambda s: (s + x, s), gen)
class FunEq:
def __init__(self, times, gen, cmp_fun=operator.eq):
self._times = times
self._gen = gen
self._cmp_fun = cmp_fun
def __call__(self, f, g):
for _ in range(self._times):
x = next(self._gen)
if not self._cmp_fun(f(x), g(x)):
return False
return True
| 1,216 | -9 | 398 |
a06e1acc2651b527e61ef2935b70ffd5915ad8a5 | 908 | py | Python | tests/test_settings.py | Learning-and-Intelligent-Systems/predicators | 0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e | [
"MIT"
] | 24 | 2021-11-20T16:35:41.000Z | 2022-03-30T03:49:52.000Z | tests/test_settings.py | Learning-and-Intelligent-Systems/predicators | 0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e | [
"MIT"
] | 214 | 2021-10-12T01:17:50.000Z | 2022-03-31T20:18:36.000Z | tests/test_settings.py | Learning-and-Intelligent-Systems/predicators | 0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e | [
"MIT"
] | 1 | 2022-02-15T20:24:17.000Z | 2022-02-15T20:24:17.000Z | """Test cases for some parts of the settings.py file."""
from predicators.src import utils
from predicators.src.settings import get_allowed_query_type_names
def test_get_allowed_query_type_names():
"""Test the get_allowed_query_type_names method."""
utils.reset_config()
assert get_allowed_query_type_names() == set()
utils.reset_config({
"option_learner": "direct_bc",
})
assert get_allowed_query_type_names() == {"PathToStateQuery"}
utils.reset_config({
"option_learner": "no_learning",
"approach": "interactive_learning"
})
assert get_allowed_query_type_names() == {"GroundAtomsHoldQuery"}
utils.reset_config({
"option_learner": "no_learning",
"approach": "unittest"
})
assert get_allowed_query_type_names() == {
"GroundAtomsHoldQuery", "DemonstrationQuery", "PathToStateQuery",
"_MockQuery"
}
| 32.428571 | 73 | 0.693833 | """Test cases for some parts of the settings.py file."""
from predicators.src import utils
from predicators.src.settings import get_allowed_query_type_names
def test_get_allowed_query_type_names():
"""Test the get_allowed_query_type_names method."""
utils.reset_config()
assert get_allowed_query_type_names() == set()
utils.reset_config({
"option_learner": "direct_bc",
})
assert get_allowed_query_type_names() == {"PathToStateQuery"}
utils.reset_config({
"option_learner": "no_learning",
"approach": "interactive_learning"
})
assert get_allowed_query_type_names() == {"GroundAtomsHoldQuery"}
utils.reset_config({
"option_learner": "no_learning",
"approach": "unittest"
})
assert get_allowed_query_type_names() == {
"GroundAtomsHoldQuery", "DemonstrationQuery", "PathToStateQuery",
"_MockQuery"
}
| 0 | 0 | 0 |
0219508ed6580e7eef295d7b66dd00023ec36692 | 3,421 | py | Python | data/cloudnet_dataset.py | aRI0U/cloudnet | 2bc7fd0f90898dc360b1e7ce4ff826bf79d75ec3 | [
"BSD-3-Clause"
] | null | null | null | data/cloudnet_dataset.py | aRI0U/cloudnet | 2bc7fd0f90898dc360b1e7ce4ff826bf79d75ec3 | [
"BSD-3-Clause"
] | null | null | null | data/cloudnet_dataset.py | aRI0U/cloudnet | 2bc7fd0f90898dc360b1e7ce4ff826bf79d75ec3 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import os
from PIL import Image
import torch
from torch.utils.data import Dataset
from data.base_dataset import get_posenet_transform
| 34.21 | 105 | 0.58404 | import numpy as np
import os
from PIL import Image
import torch
from torch.utils.data import Dataset
from data.base_dataset import get_posenet_transform
def qlog(q):
n = np.minimum(np.linalg.norm(q[:,1:], axis=1), 1e-8)
log = q[:,:1] * np.arccos(np.clip(q[:,1:], -1, 1))
return log/n[:,np.newaxis]
class CloudNetDataset(Dataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
driving_data = os.path.join(self.root, 'poses.txt')
# select the folder corresponding to the right input type
pc_path = os.path.join(self.root, 'PointCloudLocal1', 'point_cloud_%05d.npy')
if self.opt.model == 'posepoint':
img_path = os.path.join(self.root, 'CameraRGB1', 'image_%05d.png')
self.mean_image = np.load(os.path.join(self.root , 'mean_image.npy'))
frames = np.loadtxt(driving_data, dtype=int, usecols=0, delimiter=';', skiprows=1)
poses = np.loadtxt(driving_data, dtype=float, usecols=(1,2,3,4,5,6,7), delimiter=';', skiprows=1)
# splitting between training and test sets
set = np.ones(len(frames), dtype=bool)
if opt.split > 0:
if opt.isTrain or opt.phase == 'retrain':
set = frames % opt.split != 0
elif opt.phase == 'val':
set = frames % opt.split == 0
frames = frames[set]
self.pc_paths = [pc_path % f for f in frames]
if self.opt.model == 'posepoint':
self.img_paths = [img_path % f for f in frames]
self.transform = get_posenet_transform(opt, self.mean_image)
self.poses = poses[set]
if opt.criterion == 'log':
self.poses = np.concatenate((self.poses[:,:3], qlog(self.poses[:,3:])), axis=1)
self.size = len(self.pc_paths)
def __getitem__(self, index):
index_A = index % self.size
pc_path = self.pc_paths[index_A]
pose = self.poses[index_A]
pc = self._extract_file(pc_path)
if self.opt.model == 'posepoint':
img_path = self.img_paths[index_A]
img = Image.open(img_path).convert('RGB')
img = self.transform(img)
return {'X_img': img, 'X_pc': pc, 'Y': pose, 'img_path': img_path, 'pc_path': pc_path}
return {'X_pc': pc, 'Y': pose, 'pc_path': pc_path}
def __len__(self):
return self.size
def name(self):
return 'CloudNetDataset'
def _extract_file(self, path):
# type: (CloudNetDataset, str) -> torch.FloatTensor
r"""
Extracts a file and transform it into a usable tensor
Parameters
----------
path: str
location of the file that has to be transformed
Returns
-------
torch.FloatTensor
(opt.n_points,opt.input_nc) tensor of point cloud
"""
pc = self._sample(path, self.opt.input_nc, self.opt.n_points, self.opt.sampling)
return torch.from_numpy(pc)
@staticmethod
def _sample(path, input_nc, n_points, sampling):
if sampling == 'fps':
return np.load(path, mmap_mode='r')[:n_points, :input_nc]
if sampling == 'uni':
data = np.load(path, mmap_mode='r')
return data[::(len(data))//n_points][:n_points, :input_nc]
raise ValueError('Sampling [%s] does not exist' % sampling)
| 2,475 | 744 | 46 |
46bb17e9bbf8bbd95ddfa29336b52f227f47c75e | 2,515 | py | Python | common/imagebusutil.py | eusholli/imagebus | 79ef67cb5305e3bda7131d5c85a16b985b80c673 | [
"MIT"
] | 1 | 2020-01-20T11:45:57.000Z | 2020-01-20T11:45:57.000Z | common/imagebusutil.py | eusholli/imagebus | 79ef67cb5305e3bda7131d5c85a16b985b80c673 | [
"MIT"
] | 18 | 2019-12-27T01:44:12.000Z | 2022-03-12T00:07:25.000Z | common/imagebusutil.py | eusholli/imagebus | 79ef67cb5305e3bda7131d5c85a16b985b80c673 | [
"MIT"
] | 2 | 2019-12-14T04:07:07.000Z | 2019-12-26T18:19:50.000Z | from enum import Enum
import datetime
import base64
if __name__ == "__main__":
print("Welcome to imagebusutil module")
| 26.473684 | 153 | 0.555865 | from enum import Enum
import datetime
import base64
class ImagebusTopic(Enum):
SOURCE_FRAME = 1
IMAGEAI_FRAME = 2
REDACTION_FRAME = 3
def __str__(self):
return self.name
class FrameDetails:
def __init__(
self,
name,
topic,
frameRate=1,
url=None,
parent=None,
details=None,
speed=None,
average=None,
):
print(
"Create FrameDetails: name=%s frameRate=%d url=%s, topic=%s"
% (name, frameRate, url, topic)
)
if name is None:
self.name = url
else:
self.name = name
self.frameRate = frameRate
self.url = url
self.dateTime = datetime.datetime.now()
self.frameReference = 0
self.topic = topic
self.image = None
self.parent = parent
self.details = details
self.speed = speed
self.average = average
@staticmethod
def datetimefilter(value, format="%Y/%m/%d %H:%M:%S.%f"):
"""convert a datetime to a different format."""
format = "%H:%M:%S.%f"
return value.strftime(format)
def __str__(self):
return "FrameDetails( \n Name={} \n url={} \n frameRate={} \n frameReference={} \n topic={} \n datetime={} \n parent={} \n details={}\n)".format(
self.name,
self.url,
self.frameRate,
self.frameReference,
self.topic,
self.dateTime,
self.parent,
self.details,
)
def setFrame(self, frameReference, image):
self.frameReference = frameReference
self.image = image
self.dateTime = datetime.datetime.now()
def setChildFrame(self, frameReference, image, details, parent, speed, average):
self.setFrame(frameReference, image)
self.details = details
self.parent = parent
self.speed = speed
self.average = average
def createResponse(self):
encoded_image = "data:image/jpg;base64," + base64.b64encode(self.image).decode(
"utf8"
)
return {
"name": self.name,
"image": encoded_image,
"time": self.datetimefilter(self.dateTime),
"frame_reference": self.frameReference,
"details": self.details,
"performance": {"speed": self.speed, "average": self.average},
}
if __name__ == "__main__":
print("Welcome to imagebusutil module")
| 1,904 | 438 | 46 |
0c7b858125e1e9c10ccae3e2ccc6d902b5722252 | 894 | py | Python | azdev/operations/linter/rules/command_group_rules.py | qwordy/azure-cli-dev-tools | cb0349814fa3a32744af214a3e0e8607287104fc | [
"MIT"
] | 1 | 2020-03-24T13:33:25.000Z | 2020-03-24T13:33:25.000Z | azdev/operations/linter/rules/command_group_rules.py | qwordy/azure-cli-dev-tools | cb0349814fa3a32744af214a3e0e8607287104fc | [
"MIT"
] | null | null | null | azdev/operations/linter/rules/command_group_rules.py | qwordy/azure-cli-dev-tools | cb0349814fa3a32744af214a3e0e8607287104fc | [
"MIT"
] | 1 | 2019-06-01T15:00:27.000Z | 2019-06-01T15:00:27.000Z | # -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
from ..rule_decorators import command_group_rule
from ..linter import RuleError
@command_group_rule
@command_group_rule
| 40.636364 | 119 | 0.646532 | # -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
from ..rule_decorators import command_group_rule
from ..linter import RuleError
@command_group_rule
def missing_group_help(linter, command_group_name):
if not linter.get_command_group_help(command_group_name) and not linter.command_group_expired(command_group_name) \
and command_group_name != '':
raise RuleError('Missing help')
@command_group_rule
def expired_command_group(linter, command_group_name):
if linter.command_group_expired(command_group_name):
raise RuleError("Deprecated command group is expired and should be removed.")
| 408 | 0 | 44 |
0fae0ccbf45216853e271858a6b4207da029286b | 779 | py | Python | Python/general/delegation.py | J0shu4B0y/Practice | ea6af3773520f12afcf72f25952a80614d0c13ef | [
"MIT"
] | 1 | 2021-07-18T08:40:05.000Z | 2021-07-18T08:40:05.000Z | Python/general/delegation.py | J0shu4B0y/Practice | ea6af3773520f12afcf72f25952a80614d0c13ef | [
"MIT"
] | 3 | 2020-02-21T13:43:44.000Z | 2020-02-21T13:54:57.000Z | Python/general/delegation.py | J0shu4B0y/Practice-Python | ea6af3773520f12afcf72f25952a80614d0c13ef | [
"MIT"
] | 2 | 2019-07-19T10:09:08.000Z | 2020-09-20T08:07:36.000Z | # Процесс делегирования в Python
file = File()
instance = Upcase(file)
instance.write('test')
instance.open()
print(instance._outfile)
#print(instance.hello())
#print(instance.hello())
#print(instance.write('test'))
#print(instance._outfile)
#print(instance.write('asd'))
#print(instance._outfile)
#print(instance._outfile) | 20.5 | 46 | 0.639281 | # Процесс делегирования в Python
class Upcase():
def __init__(self, out):
self._out = out
def write(self, s):
self._outfile = s.upper()
def __getattr__(self, name):
print('Looking up', name)
return getattr(self._out, name)
class File():
def __init__(self):
self._outfile = 'test'
def open(self):
with open(self._outfile, 'w') as file:
return file
def write(self):
self._outfile = 'test2'
file = File()
instance = Upcase(file)
instance.write('test')
instance.open()
print(instance._outfile)
#print(instance.hello())
#print(instance.hello())
#print(instance.write('test'))
#print(instance._outfile)
#print(instance.write('asd'))
#print(instance._outfile)
#print(instance._outfile) | 261 | -14 | 206 |
0cfb8ed596ae8e7e5a700f718c6059fbb62623dd | 766 | py | Python | GoogleSheetLocalizationExport.py | muzle/GoogleSheetLocalizationExport | 2fdd5bce7790409b0550db9b8450f3703e7a4e04 | [
"MIT"
] | 2 | 2021-12-29T00:03:58.000Z | 2021-12-29T19:20:24.000Z | GoogleSheetLocalizationExport.py | muzle/GoogleSheetLocalizationExport | 2fdd5bce7790409b0550db9b8450f3703e7a4e04 | [
"MIT"
] | null | null | null | GoogleSheetLocalizationExport.py | muzle/GoogleSheetLocalizationExport | 2fdd5bce7790409b0550db9b8450f3703e7a4e04 | [
"MIT"
] | null | null | null | import sys
from yamlSupport.YamlConfigurationSupport import YamlConfigurationSupport
from googleSheetSupport.GoogleSheetSupport import GoogleSheetSupport
from iosSupport.iOSLocalizationSettings import iOSLocalizationSettings
if __name__ == '__main__':
args = sys.argv
if len(args) < 2:
log_menu()
elif args[1] == "cnfg":
YamlConfigurationSupport.create_default()
elif args[1] == "fetch":
config = YamlConfigurationSupport.get_config()
localizations = GoogleSheetSupport.get_localizations()
iOSLocalizationSettings.create(localizations, config)
else:
log_menu()
| 31.916667 | 100 | 0.72846 | import sys
from yamlSupport.YamlConfigurationSupport import YamlConfigurationSupport
from googleSheetSupport.GoogleSheetSupport import GoogleSheetSupport
from iosSupport.iOSLocalizationSettings import iOSLocalizationSettings
def log_menu():
menu = """commands:\n1. cnfg - create default localization.yml\n2. fetch - fetch localization"""
print(menu)
if __name__ == '__main__':
args = sys.argv
if len(args) < 2:
log_menu()
elif args[1] == "cnfg":
YamlConfigurationSupport.create_default()
elif args[1] == "fetch":
config = YamlConfigurationSupport.get_config()
localizations = GoogleSheetSupport.get_localizations()
iOSLocalizationSettings.create(localizations, config)
else:
log_menu()
| 111 | 0 | 23 |
db8cdf360eb4b2f9f9181c8aab45f97a431cb628 | 3,005 | py | Python | ds_discovery/sample/map_us_phone_code.py | project-hadron/discovery-transition-ds | 08229ca3b7617b42ce2dd8e47ff93876c0843810 | [
"BSD-3-Clause"
] | 2 | 2020-09-21T17:24:16.000Z | 2021-05-28T18:02:54.000Z | ds_discovery/sample/map_us_phone_code.py | project-hadron/discovery-transition-ds | 08229ca3b7617b42ce2dd8e47ff93876c0843810 | [
"BSD-3-Clause"
] | null | null | null | ds_discovery/sample/map_us_phone_code.py | project-hadron/discovery-transition-ds | 08229ca3b7617b42ce2dd8e47ff93876c0843810 | [
"BSD-3-Clause"
] | 1 | 2021-07-23T13:52:04.000Z | 2021-07-23T13:52:04.000Z | data={'State': ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware', 'Florida', 'Georgia', 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine', 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey', 'New Mexico', 'New York', 'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania', 'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee', 'Texas', 'Utah', 'Vermont', 'Virginia', 'Washington', 'Washington DC', 'West Virginia', 'Wisconsin', 'Wyoming'], 'AreaCode': [['205', '251', '256', '334', '938'], ['907'], ['480', '520', '602', '623', '928'], ['479', '501', '870'], ['209', '213', '279', '310', '323', '408', '415', '424', '442', '510', '530', '559', '562', '619', '626', '628', '650', '657', '661', '669', '707', '714', '747', '760', '805', '818', '820', '831', '858', '909', '916', '925', '949', '951'], ['303', '719', '720', '970'], ['203', '475', '860', '959'], ['302'], ['239', '305', '321', '352', '386', '407', '561', '727', '754', '772', '786', '813', '850', '863', '904', '941', '954'], ['229', '404', '470', '478', '678', '706', '762', '770', '912'], ['808'], ['208', '986'], ['217', '224', '309', '312', '331', '618', '630', '708', '773', '779', '815', '847', '872'], ['219', '260', '317', '463', '574', '765', '812', '930'], ['319', '515', '563', '641', '712'], ['316', '620', '785', '913'], ['270', '364', '502', '606', '859'], ['225', '318', '337', '504', '985'], ['207'], ['240', '301', '410', '443', '667'], ['339', '351', '413', '508', '617', '774', '781', '857', '978'], ['231', '248', '269', '313', '517', '586', '616', '734', '810', '906', '947', '989'], ['218', '320', '507', '612', '651', '763', '952'], ['228', '601', '662', '769'], ['314', '417', '573', '636', '660', '816'], ['406'], ['308', '402', '531'], ['702', '725', '775'], ['603'], ['201', '551', '609', '640', '732', '848', '856', '862', '908', '973'], ['505', '575'], ['212', '315', '332', '347', '516', '518', '585', '607', '631', '646', '680', '716', '718', '838', '845', '914', '917', '929', '934'], ['252', '336', '704', '743', '828', '910', '919', '980', '984'], ['701'], ['216', '220', '234', '330', '380', '419', '440', '513', '567', '614', '740', '937'], ['405', '539', '580', '918'], ['458', '503', '541', '971'], ['215', '223', '267', '272', '412', '445', '484', '570', '610', '717', '724', '814', '878'], ['401'], ['803', '843', '854', '864'], ['605'], ['423', '615', '629', '731', '865', '901', '931'], ['210', '214', '254', '281', '325', '346', '361', '409', '430', '432', '469', '512', '682', '713', '726', '737', '806', '817', '830', '832', '903', '915', '936', '940', '956', '972', '979'], ['385', '435', '801'], ['802'], ['276', '434', '540', '571', '703', '757', '804'], ['206', '253', '360', '425', '509', '564'], ['202'], ['304', '681'], ['262', '414', '534', '608', '715', '920'], ['307']]}
| 1,502.5 | 3,004 | 0.465225 | data={'State': ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware', 'Florida', 'Georgia', 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine', 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey', 'New Mexico', 'New York', 'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania', 'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee', 'Texas', 'Utah', 'Vermont', 'Virginia', 'Washington', 'Washington DC', 'West Virginia', 'Wisconsin', 'Wyoming'], 'AreaCode': [['205', '251', '256', '334', '938'], ['907'], ['480', '520', '602', '623', '928'], ['479', '501', '870'], ['209', '213', '279', '310', '323', '408', '415', '424', '442', '510', '530', '559', '562', '619', '626', '628', '650', '657', '661', '669', '707', '714', '747', '760', '805', '818', '820', '831', '858', '909', '916', '925', '949', '951'], ['303', '719', '720', '970'], ['203', '475', '860', '959'], ['302'], ['239', '305', '321', '352', '386', '407', '561', '727', '754', '772', '786', '813', '850', '863', '904', '941', '954'], ['229', '404', '470', '478', '678', '706', '762', '770', '912'], ['808'], ['208', '986'], ['217', '224', '309', '312', '331', '618', '630', '708', '773', '779', '815', '847', '872'], ['219', '260', '317', '463', '574', '765', '812', '930'], ['319', '515', '563', '641', '712'], ['316', '620', '785', '913'], ['270', '364', '502', '606', '859'], ['225', '318', '337', '504', '985'], ['207'], ['240', '301', '410', '443', '667'], ['339', '351', '413', '508', '617', '774', '781', '857', '978'], ['231', '248', '269', '313', '517', '586', '616', '734', '810', '906', '947', '989'], ['218', '320', '507', '612', '651', '763', '952'], ['228', '601', '662', '769'], ['314', '417', '573', '636', '660', '816'], ['406'], ['308', '402', '531'], ['702', '725', '775'], ['603'], ['201', '551', '609', '640', '732', '848', '856', '862', '908', '973'], ['505', '575'], ['212', '315', '332', '347', '516', '518', '585', '607', '631', '646', '680', '716', '718', '838', '845', '914', '917', '929', '934'], ['252', '336', '704', '743', '828', '910', '919', '980', '984'], ['701'], ['216', '220', '234', '330', '380', '419', '440', '513', '567', '614', '740', '937'], ['405', '539', '580', '918'], ['458', '503', '541', '971'], ['215', '223', '267', '272', '412', '445', '484', '570', '610', '717', '724', '814', '878'], ['401'], ['803', '843', '854', '864'], ['605'], ['423', '615', '629', '731', '865', '901', '931'], ['210', '214', '254', '281', '325', '346', '361', '409', '430', '432', '469', '512', '682', '713', '726', '737', '806', '817', '830', '832', '903', '915', '936', '940', '956', '972', '979'], ['385', '435', '801'], ['802'], ['276', '434', '540', '571', '703', '757', '804'], ['206', '253', '360', '425', '509', '564'], ['202'], ['304', '681'], ['262', '414', '534', '608', '715', '920'], ['307']]}
| 0 | 0 | 0 |
f68c5286b6e457069b84e1113796f3322531cc37 | 1,070 | py | Python | PersianSwear.py | amirshnll/Persian-Swear-Words | 8aa6df8b55262b8cb85731342e78c769d974dff9 | [
"MIT"
] | 180 | 2020-09-22T11:40:11.000Z | 2022-03-25T18:39:37.000Z | PersianSwear.py | Nimaw/Persian-Swear-Words | 8aa6df8b55262b8cb85731342e78c769d974dff9 | [
"MIT"
] | 9 | 2020-11-16T13:19:39.000Z | 2021-08-29T09:34:10.000Z | PersianSwear.py | Nimaw/Persian-Swear-Words | 8aa6df8b55262b8cb85731342e78c769d974dff9 | [
"MIT"
] | 38 | 2020-11-16T12:58:21.000Z | 2022-02-19T11:48:06.000Z | """
swearWords Class
Author : Sorosh Safari @coci
created date : 7 October, 2021
updated date : 11 October, 2021
"""
# return string
# return boolean
# return nothing
# return nothing
# return boolean
# return boolean
# return string | 19.107143 | 42 | 0.657009 | """
swearWords Class
Author : Sorosh Safari @coci
created date : 7 October, 2021
updated date : 11 October, 2021
"""
class PersianSwear(object):
def __init__(self):
self.data = json.load(open('data.json'))
# return string
def filter_words(self, text, symbol="*"):
if(self.is_empty()):
return text
text = text.split()
for i in range(len(text)):
if text[i] in self.data['word']:
text[i] = symbol
return " ".join(text)
# return boolean
def is_empty(self):
if(len(self.data['word'])<1):
return True;
return False;
# return nothing
def add_word(self, text):
self.data['word'].append(text)
# return nothing
def remove_word(self, text):
self.data['word'].remove(text)
# return boolean
def is_bad(self, text):
return text in self.data['word']
# return boolean
def has_swear(self, text):
if(self.is_empty()):
return text
text = text.split()
for i in range(len(text)):
if text[i] in self.data['word']:
return True
return False
# return string
def tostring(self):
return ' - '.join(self.data['word']) | 609 | 6 | 208 |
c1e74d087d61927f3b6d66fade602932fce137bd | 858 | py | Python | centralreport/test.py | Ninir/CentralReport | 655b060ae0aa7df404f3799824e7635f5997e662 | [
"Apache-2.0",
"ZPL-2.0"
] | 1 | 2015-11-03T14:32:01.000Z | 2015-11-03T14:32:01.000Z | centralreport/test.py | Ninir/CentralReport | 655b060ae0aa7df404f3799824e7635f5997e662 | [
"Apache-2.0",
"ZPL-2.0"
] | null | null | null | centralreport/test.py | Ninir/CentralReport | 655b060ae0aa7df404f3799824e7635f5997e662 | [
"Apache-2.0",
"ZPL-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
CentralReport - Test script
Launchs CentralReport in debug mode, without installation.
Please verify CR is not installed on your host before launch this script.
https://github.com/miniche/CentralReport/
"""
import centralreport
import sys
import os
print '--- CentralReport debug mode. ---'
print 'This tool is only for debug purpose. For running CR in production env, use python centralreport.py start instead.'
print '---------------------------------'
print ''
cr = centralreport.CentralReport('/tmp/centralreport_debug.pid')
return_value = cr.debug()
print ''
print '---------------------------------'
print 'Ending debug mode'
print '---------------------------------'
# CentralReport is now stopped, we can kill current process
os.system('kill %d' % os.getpid())
sys.exit(0)
| 26.8125 | 121 | 0.632867 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
CentralReport - Test script
Launchs CentralReport in debug mode, without installation.
Please verify CR is not installed on your host before launch this script.
https://github.com/miniche/CentralReport/
"""
import centralreport
import sys
import os
print '--- CentralReport debug mode. ---'
print 'This tool is only for debug purpose. For running CR in production env, use python centralreport.py start instead.'
print '---------------------------------'
print ''
cr = centralreport.CentralReport('/tmp/centralreport_debug.pid')
return_value = cr.debug()
print ''
print '---------------------------------'
print 'Ending debug mode'
print '---------------------------------'
# CentralReport is now stopped, we can kill current process
os.system('kill %d' % os.getpid())
sys.exit(0)
| 0 | 0 | 0 |
429da5a042a803a782ed7b16c9d0971a3455252e | 15,452 | py | Python | mayan/apps/sources/models/email_sources.py | O2Graphics/Mayan-EDMS | e11e6f47240f3c536764be66828dbe6428dceb41 | [
"Apache-2.0"
] | null | null | null | mayan/apps/sources/models/email_sources.py | O2Graphics/Mayan-EDMS | e11e6f47240f3c536764be66828dbe6428dceb41 | [
"Apache-2.0"
] | 5 | 2021-03-19T22:56:45.000Z | 2022-03-12T00:08:43.000Z | mayan/apps/sources/models/email_sources.py | halsten/mayan-edms | 10372daede6e6dea0bea67eb98767e3be6fbf86f | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import imaplib
import logging
import poplib
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.db import models
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.serialization import yaml_load
from mayan.apps.documents.models import Document
from mayan.apps.metadata.api import set_bulk_metadata
from mayan.apps.metadata.models import MetadataType
from ..exceptions import SourceException
from ..literals import (
DEFAULT_IMAP_MAILBOX, DEFAULT_IMAP_SEARCH_CRITERIA,
DEFAULT_IMAP_STORE_COMMANDS, DEFAULT_METADATA_ATTACHMENT_NAME,
DEFAULT_POP3_TIMEOUT, SOURCE_CHOICE_EMAIL_IMAP, SOURCE_CHOICE_EMAIL_POP3,
SOURCE_UNCOMPRESS_CHOICE_N, SOURCE_UNCOMPRESS_CHOICE_Y,
)
from .base import IntervalBaseModel
__all__ = ('IMAPEmail', 'POP3Email')
logger = logging.getLogger(__name__)
class EmailBaseModel(IntervalBaseModel):
"""
POP3 email and IMAP email sources are non-interactive sources that
periodically fetch emails from an email account using either the POP3 or
IMAP email protocol. These sources are useful when users need to scan
documents outside their office, they can photograph a paper document with
their phones and send the image to a designated email that is setup as a
Mayan POP3 or IMAP source. Mayan will periodically download the emails
and process them as Mayan documents.
"""
host = models.CharField(max_length=128, verbose_name=_('Host'))
ssl = models.BooleanField(default=True, verbose_name=_('SSL'))
port = models.PositiveIntegerField(blank=True, null=True, help_text=_(
'Typical choices are 110 for POP3, 995 for POP3 over SSL, 143 for '
'IMAP, 993 for IMAP over SSL.'), verbose_name=_('Port')
)
username = models.CharField(max_length=96, verbose_name=_('Username'))
password = models.CharField(max_length=96, verbose_name=_('Password'))
metadata_attachment_name = models.CharField(
default=DEFAULT_METADATA_ATTACHMENT_NAME,
help_text=_(
'Name of the attachment that will contains the metadata type '
'names and value pairs to be assigned to the rest of the '
'downloaded attachments.'
), max_length=128, verbose_name=_('Metadata attachment name')
)
subject_metadata_type = models.ForeignKey(
blank=True, help_text=_(
'Select a metadata type valid for the document type selected in '
'which to store the email\'s subject.'
), on_delete=models.CASCADE, null=True, related_name='email_subject',
to=MetadataType, verbose_name=_('Subject metadata type')
)
from_metadata_type = models.ForeignKey(
blank=True, help_text=_(
'Select a metadata type valid for the document type selected in '
'which to store the email\'s "from" value.'
), on_delete=models.CASCADE, null=True, related_name='email_from',
to=MetadataType, verbose_name=_('From metadata type')
)
store_body = models.BooleanField(
default=True, help_text=_(
'Store the body of the email as a text document.'
), verbose_name=_('Store email body')
)
objects = models.Manager()
@staticmethod
@staticmethod
| 39.218274 | 120 | 0.563357 | from __future__ import unicode_literals
import imaplib
import logging
import poplib
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.db import models
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.serialization import yaml_load
from mayan.apps.documents.models import Document
from mayan.apps.metadata.api import set_bulk_metadata
from mayan.apps.metadata.models import MetadataType
from ..exceptions import SourceException
from ..literals import (
DEFAULT_IMAP_MAILBOX, DEFAULT_IMAP_SEARCH_CRITERIA,
DEFAULT_IMAP_STORE_COMMANDS, DEFAULT_METADATA_ATTACHMENT_NAME,
DEFAULT_POP3_TIMEOUT, SOURCE_CHOICE_EMAIL_IMAP, SOURCE_CHOICE_EMAIL_POP3,
SOURCE_UNCOMPRESS_CHOICE_N, SOURCE_UNCOMPRESS_CHOICE_Y,
)
from .base import IntervalBaseModel
__all__ = ('IMAPEmail', 'POP3Email')
logger = logging.getLogger(__name__)
class EmailBaseModel(IntervalBaseModel):
"""
POP3 email and IMAP email sources are non-interactive sources that
periodically fetch emails from an email account using either the POP3 or
IMAP email protocol. These sources are useful when users need to scan
documents outside their office, they can photograph a paper document with
their phones and send the image to a designated email that is setup as a
Mayan POP3 or IMAP source. Mayan will periodically download the emails
and process them as Mayan documents.
"""
host = models.CharField(max_length=128, verbose_name=_('Host'))
ssl = models.BooleanField(default=True, verbose_name=_('SSL'))
port = models.PositiveIntegerField(blank=True, null=True, help_text=_(
'Typical choices are 110 for POP3, 995 for POP3 over SSL, 143 for '
'IMAP, 993 for IMAP over SSL.'), verbose_name=_('Port')
)
username = models.CharField(max_length=96, verbose_name=_('Username'))
password = models.CharField(max_length=96, verbose_name=_('Password'))
metadata_attachment_name = models.CharField(
default=DEFAULT_METADATA_ATTACHMENT_NAME,
help_text=_(
'Name of the attachment that will contains the metadata type '
'names and value pairs to be assigned to the rest of the '
'downloaded attachments.'
), max_length=128, verbose_name=_('Metadata attachment name')
)
subject_metadata_type = models.ForeignKey(
blank=True, help_text=_(
'Select a metadata type valid for the document type selected in '
'which to store the email\'s subject.'
), on_delete=models.CASCADE, null=True, related_name='email_subject',
to=MetadataType, verbose_name=_('Subject metadata type')
)
from_metadata_type = models.ForeignKey(
blank=True, help_text=_(
'Select a metadata type valid for the document type selected in '
'which to store the email\'s "from" value.'
), on_delete=models.CASCADE, null=True, related_name='email_from',
to=MetadataType, verbose_name=_('From metadata type')
)
store_body = models.BooleanField(
default=True, help_text=_(
'Store the body of the email as a text document.'
), verbose_name=_('Store email body')
)
objects = models.Manager()
class Meta:
verbose_name = _('Email source')
verbose_name_plural = _('Email sources')
@staticmethod
def process_message(source, message_text):
from flanker import mime
metadata_dictionary = {}
message = mime.from_string(force_bytes(message_text))
if source.from_metadata_type:
metadata_dictionary[
source.from_metadata_type.name
] = message.headers.get('From')
if source.subject_metadata_type:
metadata_dictionary[
source.subject_metadata_type.name
] = message.headers.get('Subject')
document_ids, parts_metadata_dictionary = EmailBaseModel._process_message(source=source, message=message)
metadata_dictionary.update(parts_metadata_dictionary)
if metadata_dictionary:
for document in Document.objects.filter(id__in=document_ids):
set_bulk_metadata(
document=document,
metadata_dictionary=metadata_dictionary
)
@staticmethod
def _process_message(source, message):
counter = 1
document_ids = []
metadata_dictionary = {}
# Messages are tree based, do nested processing of message parts until
# a message with no children is found, then work out way up.
if message.parts:
for part in message.parts:
part_document_ids, part_metadata_dictionary = EmailBaseModel._process_message(
source=source, message=part,
)
document_ids.extend(part_document_ids)
metadata_dictionary.update(part_metadata_dictionary)
else:
# Treat inlines as attachments, both are extracted and saved as
# documents
if message.is_attachment() or message.is_inline():
# Reject zero length attachments
if len(message.body) == 0:
return document_ids, metadata_dictionary
label = message.detected_file_name or 'attachment-{}'.format(counter)
counter = counter + 1
with ContentFile(content=message.body, name=label) as file_object:
if label == source.metadata_attachment_name:
metadata_dictionary = yaml_load(
stream=file_object.read()
)
logger.debug(
'Got metadata dictionary: %s',
metadata_dictionary
)
else:
documents = source.handle_upload(
document_type=source.document_type,
file_object=file_object, expand=(
source.uncompress == SOURCE_UNCOMPRESS_CHOICE_Y
)
)
for document in documents:
document_ids.append(document.pk)
else:
# If it is not an attachment then it should be a body message part.
# Another option is to use message.is_body()
if message.detected_content_type == 'text/html':
label = 'email_body.html'
else:
label = 'email_body.txt'
if source.store_body:
with ContentFile(content=force_bytes(message.body), name=label) as file_object:
documents = source.handle_upload(
document_type=source.document_type,
expand=SOURCE_UNCOMPRESS_CHOICE_N,
file_object=file_object
)
for document in documents:
document_ids.append(document.pk)
return document_ids, metadata_dictionary
def clean(self):
if self.subject_metadata_type:
if self.subject_metadata_type.pk not in self.document_type.metadata.values_list('metadata_type', flat=True):
raise ValidationError(
{
'subject_metadata_type': _(
'Subject metadata type "%(metadata_type)s" is not '
'valid for the document type: %(document_type)s'
) % {
'metadata_type': self.subject_metadata_type,
'document_type': self.document_type
}
}
)
if self.from_metadata_type:
if self.from_metadata_type.pk not in self.document_type.metadata.values_list('metadata_type', flat=True):
raise ValidationError(
{
'from_metadata_type': _(
'"From" metadata type "%(metadata_type)s" is not '
'valid for the document type: %(document_type)s'
) % {
'metadata_type': self.from_metadata_type,
'document_type': self.document_type
}
}
)
class IMAPEmail(EmailBaseModel):
source_type = SOURCE_CHOICE_EMAIL_IMAP
mailbox = models.CharField(
default=DEFAULT_IMAP_MAILBOX,
help_text=_('IMAP Mailbox from which to check for messages.'),
max_length=64, verbose_name=_('Mailbox')
)
search_criteria = models.TextField(
blank=True, default=DEFAULT_IMAP_SEARCH_CRITERIA, help_text=_(
'Criteria to use when searching for messages to process. '
'Use the format specified in '
'https://tools.ietf.org/html/rfc2060.html#section-6.4.4'
), null=True, verbose_name=_('Search criteria')
)
store_commands = models.TextField(
blank=True, default=DEFAULT_IMAP_STORE_COMMANDS, help_text=_(
'IMAP STORE command to execute on messages after they are '
'processed. One command per line. Use the commands specified in '
'https://tools.ietf.org/html/rfc2060.html#section-6.4.6 or '
'the custom commands for your IMAP server.'
), null=True, verbose_name=_('Store commands')
)
execute_expunge = models.BooleanField(
default=True, help_text=_(
'Execute the IMAP expunge command after processing each email '
'message.'
), verbose_name=_('Execute expunge')
)
mailbox_destination = models.CharField(
blank=True, help_text=_(
'IMAP Mailbox to which processed messages will be copied.'
), max_length=96, null=True, verbose_name=_('Destination mailbox')
)
objects = models.Manager()
class Meta:
verbose_name = _('IMAP email')
verbose_name_plural = _('IMAP email')
# http://www.doughellmann.com/PyMOTW/imaplib/
def check_source(self, test=False):
logger.debug(msg='Starting IMAP email fetch')
logger.debug('host: %s', self.host)
logger.debug('ssl: %s', self.ssl)
if self.ssl:
server = imaplib.IMAP4_SSL(host=self.host, port=self.port)
else:
server = imaplib.IMAP4(host=self.host, port=self.port)
server.login(user=self.username, password=self.password)
try:
server.select(mailbox=self.mailbox)
except Exception as exception:
raise SourceException(
'Error selecting mailbox: {}; {}'.format(
self.mailbox, exception
)
)
try:
status, data = server.uid(
'SEARCH', None, *self.search_criteria.strip().split()
)
except Exception as exception:
raise SourceException(
'Error executing search command; {}'.format(exception)
)
if data:
# data is a space separated sequence of message uids
uids = data[0].split()
logger.debug('messages count: %s', len(uids))
logger.debug('message uids: %s', uids)
for uid in uids:
logger.debug('message uid: %s', uid)
try:
status, data = server.uid('FETCH', uid, '(RFC822)')
except Exception as exception:
raise SourceException(
'Error fetching message uid: {}; {}'.format(
uid, exception
)
)
try:
EmailBaseModel.process_message(
source=self, message_text=data[0][1]
)
except Exception as exception:
raise SourceException(
'Error processing message uid: {}; {}'.format(
uid, exception
)
)
if not test:
if self.store_commands:
for command in self.store_commands.split('\n'):
try:
args = [uid]
args.extend(command.strip().split(' '))
server.uid('STORE', *args)
except Exception as exception:
raise SourceException(
'Error executing IMAP store command "{}" '
'on message uid {}; {}'.format(
command, uid, exception
)
)
if self.mailbox_destination:
try:
server.uid(
'COPY', uid, self.mailbox_destination
)
except Exception as exception:
raise SourceException(
'Error copying message uid {} to mailbox {}; '
'{}'.format(
uid, self.mailbox_destination, exception
)
)
if self.execute_expunge:
server.expunge()
server.close()
server.logout()
class POP3Email(EmailBaseModel):
source_type = SOURCE_CHOICE_EMAIL_POP3
timeout = models.PositiveIntegerField(
default=DEFAULT_POP3_TIMEOUT, verbose_name=_('Timeout')
)
objects = models.Manager()
class Meta:
verbose_name = _('POP email')
verbose_name_plural = _('POP email')
def check_source(self, test=False):
logger.debug(msg='Starting POP3 email fetch')
logger.debug('host: %s', self.host)
logger.debug('ssl: %s', self.ssl)
if self.ssl:
server = poplib.POP3_SSL(host=self.host, port=self.port)
else:
server = poplib.POP3(
host=self.host, port=self.port, timeout=self.timeout
)
server.getwelcome()
server.user(self.username)
server.pass_(self.password)
messages_info = server.list()
logger.debug(msg='messages_info:')
logger.debug(msg=messages_info)
logger.debug('messages count: %s', len(messages_info[1]))
for message_info in messages_info[1]:
message_number, message_size = message_info.split()
logger.debug('message_number: %s', message_number)
logger.debug('message_size: %s', message_size)
complete_message = '\n'.join(server.retr(message_number)[1])
EmailBaseModel.process_message(
source=self, message_text=complete_message
)
if not test:
server.dele(which=message_number)
server.quit()
| 9,793 | 2,131 | 152 |
c1ffdd25bfab325deac866b7520b17bf16ab3879 | 8,678 | py | Python | maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py | SIAAAAAA/MMT-PSM | 0835c01c5010d3337778f452e9d96416e0f8a11a | [
"MIT"
] | 41 | 2020-07-22T03:55:08.000Z | 2022-02-27T12:04:41.000Z | maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py | SIAAAAAA/MMT-PSM | 0835c01c5010d3337778f452e9d96416e0f8a11a | [
"MIT"
] | 5 | 2020-11-08T08:47:34.000Z | 2021-07-09T03:53:42.000Z | maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py | SIAAAAAA/MMT-PSM | 0835c01c5010d3337778f452e9d96416e0f8a11a | [
"MIT"
] | 5 | 2020-10-13T11:09:53.000Z | 2021-07-28T12:41:53.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
from torch.nn import functional as F
import torch
from ..box_head.roi_box_feature_extractors import ResNet50Conv5ROIFeatureExtractor
from maskrcnn_benchmark.modeling.poolers import Pooler
from maskrcnn_benchmark.layers import Conv2d
import pdb
class PRCNNFeatureExtractor(nn.Module):
'''
second stage extractor for paper: Cell Segmentation Proposal Network for Microscopy Image Analysis
'''
class MaskRCNNFPNFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg):
"""
Arguments:
num_classes (int): number of output classes
input_size (int): number of channels of the input once it's flattened
representation_size (int): size of the intermediate representation
"""
super(MaskRCNNFPNFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_MASK_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS
self.pooler = pooler
layers = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS
next_feature = input_size
self.blocks = []
for layer_idx, layer_features in enumerate(layers, 1):
layer_name = "mask_fcn{}".format(layer_idx)
module = Conv2d(next_feature, layer_features, 3, stride=1, padding=1)
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(module.bias, 0)
self.add_module(layer_name, module)
next_feature = layer_features
self.blocks.append(layer_name)
_ROI_MASK_FEATURE_EXTRACTORS = {
"ResNet50Conv5ROIFeatureExtractor": ResNet50Conv5ROIFeatureExtractor,
"MaskRCNNFPNFeatureExtractor": MaskRCNNFPNFeatureExtractor,
"PRCNNFeatureExtractor":PRCNNFeatureExtractor,
'DeeperExtractor':DeeperExtractor,
'MaskRCNNFPNAdaptor':MaskRCNNFPNAdaptor
}
| 38.229075 | 114 | 0.623992 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
from torch.nn import functional as F
import torch
from ..box_head.roi_box_feature_extractors import ResNet50Conv5ROIFeatureExtractor
from maskrcnn_benchmark.modeling.poolers import Pooler
from maskrcnn_benchmark.layers import Conv2d
import pdb
class PRCNNFeatureExtractor(nn.Module):
'''
second stage extractor for paper: Cell Segmentation Proposal Network for Microscopy Image Analysis
'''
def __init__(self, cfg):
super(PRCNNFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_MASK_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
self.conv1 = Conv2d(3,32, 3, stride=1, padding=1)
self.conv2 = Conv2d(32,32, 3, stride=1, padding=1)
self.conv3 = Conv2d(32,64, 3, stride=1, padding=1)
self.conv4 = Conv2d(64, 64, 3, stride=1, padding=1)
self.conv5 = Conv2d(64,128, 3, stride=1, padding=1)
self.conv6 = Conv2d(128, 128, 3, stride=1, padding=1)
self.conv7 = Conv2d(128, 256, 3, stride=1, padding=1)
self.conv8 = Conv2d(256, 256, 3, stride=1, padding=1)
# pdb.set_trace()
self.pooler1 = Pooler(
output_size=(25, 25),
scales=(1.,),
sampling_ratio=sampling_ratio,
)
self.p1 = nn.MaxPool2d(3,2,1)
self.pooler2 =Pooler(
output_size=(25, 25),
scales=(0.5,),
sampling_ratio=sampling_ratio,
)
self.p2 = nn.MaxPool2d(3, 2, 1)
self.pooler3 =Pooler(
output_size=(25, 25),
scales=(0.25,),
sampling_ratio=sampling_ratio,
)
self.p3 = nn.MaxPool2d(3, 2, 1)
self.pooler4 =Pooler(
output_size=(25, 25),
scales=(0.125,),
sampling_ratio=sampling_ratio,
)
self.posconv1 = Conv2d(480,256, 3, stride=1, padding=1)
self.posconv2 = Conv2d(256, 32, 3, stride=1, padding=1)
for layer in [self.conv1, self.conv2,self.conv3, self.conv4,self.conv5, self.conv6,self.conv7, self.conv8,
self.posconv1, self.posconv2]:
nn.init.kaiming_normal_(layer.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(layer.bias, 0)
def forward(self, x, proposals):
pre_feature = x
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x1 = self.pooler1([x], proposals)
x = self.p1(x)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x2 = self.pooler2([x], proposals)
x = self.p2(x)
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x3 = self.pooler3([x], proposals)
x = self.p3(x)
x = F.relu(self.conv7(x))
x = F.relu(self.conv8(x))
x4 = self.pooler4([x], proposals)
# concate
num = x1.shape[0]
concate_feature = []
for i in range(num):
concate_feature.append(torch.cat([x1[i], x2[i],x3[i], x4[i]], 0)[None,:,:,:])
concate_feature = torch.cat(concate_feature, 0)
# pdb.set_trace()
concate_feature = self.posconv1(concate_feature)
concate_feature = F.relu(concate_feature)
concate_feature = self.posconv2(concate_feature)
return concate_feature, pre_feature
class MaskRCNNFPNFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg):
"""
Arguments:
num_classes (int): number of output classes
input_size (int): number of channels of the input once it's flattened
representation_size (int): size of the intermediate representation
"""
super(MaskRCNNFPNFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_MASK_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS
self.pooler = pooler
layers = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS
next_feature = input_size
self.blocks = []
for layer_idx, layer_features in enumerate(layers, 1):
layer_name = "mask_fcn{}".format(layer_idx)
module = Conv2d(next_feature, layer_features, 3, stride=1, padding=1)
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(module.bias, 0)
self.add_module(layer_name, module)
next_feature = layer_features
self.blocks.append(layer_name)
def forward(self, x, proposals):
# pdb.set_trace()
device = x[0].device.index
if device != 0:
x = [p.to('cuda:0') for p in x]
proposals = [p.to('cuda:0') for p in proposals]
x = self.pooler(x, proposals)
if device != 0:
x = x.to('cuda:1')
# x = self.pooler(x, proposals)
pre_feature = x
for layer_name in self.blocks:
x = F.relu(getattr(self, layer_name)(x))
return x, pre_feature
class MaskRCNNFPNAdaptor(nn.Module):
def __init__(self, cfg):
super(MaskRCNNFPNAdaptor, self).__init__()
channel = [256, 256, 256, 256, 256]
resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_MASK_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS
self.pooler = pooler
self.adapter_1 = self._init_adaptor(channel[0],channel[0])
self.adapter_2 = self._init_adaptor(channel[1],channel[1])
self.adapter_3 = self._init_adaptor(channel[2],channel[2])
self.adapter_4 = self._init_adaptor(channel[3],channel[3])
self.adapter_5 = self._init_adaptor(channel[4],channel[4])
def _init_adaptor(self, s_channel, t_channel):
adaptor = Conv2d( s_channel, t_channel, 1, 1, 1)
nn.init.kaiming_normal_(adaptor.weight, mode="fan_out",
nonlinearity="relu")
nn.init.constant_(adaptor.bias, 0)
return adaptor
def forward(self, features_s):
# adapt
features_s[0] = self.adapter_1(features_s[0])
features_s[1] = self.adapter_2(features_s[1])
features_s[2] = self.adapter_3(features_s[2])
features_s[3] = self.adapter_4(features_s[3])
features_s[4] = self.adapter_5(features_s[4])
# for layer_name in self.blocks:
# features_s = F.relu(getattr(self, layer_name)(features_s))
return features_s
class DeeperExtractor(nn.Module):
def __init__(self, cfg):
super(DeeperExtractor, self).__init__()
self.mask_fcn1 = Conv2d(257, 256, 3, 1, 1)
self.mask_fcn2 = Conv2d(256, 256, 3, 1, 1)
self.mask_fcn3 = Conv2d(256, 256, 3, 1, 1)
self.conv5_mask = Conv2d(256, cfg.MODEL.RELATION_MASK.EXTRACTOR_CHANNEL, 3, 1, 1)
for l in [self.mask_fcn1, self.mask_fcn2, self.mask_fcn3, self.conv5_mask, ]:
nn.init.kaiming_normal_(l.weight, mode="fan_out", nonlinearity="relu")
nn.init.constant_(l.bias, 0)
def forward(self, x):
x, mask = x
# import pdb;pdb.set_trace()
mask_pool = F.max_pool2d(mask, kernel_size=2, stride=2)
x = torch.cat((x, mask_pool), 1)
x = F.relu(self.mask_fcn1(x))
x = F.relu(self.mask_fcn2(x))
x = F.relu(self.mask_fcn3(x))
x = F.relu(self.conv5_mask(x))
return x
_ROI_MASK_FEATURE_EXTRACTORS = {
"ResNet50Conv5ROIFeatureExtractor": ResNet50Conv5ROIFeatureExtractor,
"MaskRCNNFPNFeatureExtractor": MaskRCNNFPNFeatureExtractor,
"PRCNNFeatureExtractor":PRCNNFeatureExtractor,
'DeeperExtractor':DeeperExtractor,
'MaskRCNNFPNAdaptor':MaskRCNNFPNAdaptor
}
def make_roi_mask_feature_extractor(cfg):
func = _ROI_MASK_FEATURE_EXTRACTORS[cfg.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR]
return func(cfg)
def make_mask_adapt_layer(cfg):
return MaskRCNNFPNAdaptor(cfg) | 5,958 | 27 | 305 |
86ef09b59f097db29d65d7707c8004ca9296f45c | 1,802 | py | Python | auditing/db/Service.py | dolfandringa/rolaguard_data-collectors | e212857a5e50ac84ded4059ff40f5c1744fdc614 | [
"Apache-2.0"
] | 1 | 2021-07-29T21:57:07.000Z | 2021-07-29T21:57:07.000Z | auditing/db/Service.py | dolfandringa/rolaguard_data-collectors | e212857a5e50ac84ded4059ff40f5c1744fdc614 | [
"Apache-2.0"
] | 1 | 2021-02-02T15:10:59.000Z | 2021-02-02T15:10:59.000Z | auditing/db/Service.py | dolfandringa/rolaguard_data-collectors | e212857a5e50ac84ded4059ff40f5c1744fdc614 | [
"Apache-2.0"
] | 1 | 2021-01-28T05:47:10.000Z | 2021-01-28T05:47:10.000Z | from auditing.db.Models import Packet
import datetime, json
import dateutil.parser as dp
| 33.37037 | 61 | 0.617647 | from auditing.db.Models import Packet
import datetime, json
import dateutil.parser as dp
def save(jsonPacket):
data = json.loads(jsonPacket)
new_packet = Packet(
date = dp.parse(data.get('date', None)),
topic = data.get('topic', None),
data_collector_id = data.get('data_collector_id', None),
organization_id = data.get('organization_id', None),
gateway = data.get('gateway', None),
tmst = data.get('tmst', None),
chan = data.get('chan', None),
rfch = data.get('rfch', None),
freq = data.get('freq', None),
stat = data.get('stat', None),
modu = data.get('modu', None),
datr = data.get('datr', None),
codr = data.get('codr', None),
lsnr = data.get('lsnr', None),
rssi = data.get('rssi', None),
size = data.get('size', None),
data = data.get('data', None),
m_type = data.get('m_type', None),
major = data.get('major', None),
mic = data.get('mic', None),
join_eui = data.get('join_eui', None),
dev_eui = data.get('dev_eui', None),
dev_nonce = data.get('dev_nonce', None),
dev_addr = data.get('dev_addr', None),
adr = data.get('adr', None),
ack = data.get('ack', None),
adr_ack_req = data.get('adr_ack_req', None),
f_pending = data.get('f_pending', None),
class_b = data.get('class_b', None),
f_count = data.get('f_count', None),
f_opts = data.get('f_opts', None),
f_port = data.get('f_port', None),
error = data.get('error', None),
latitude = data.get('latitude', None),
longitude = data.get('longitude', None),
altitude = data.get('altitude', None),
app_name = data.get('app_name', None),
dev_name = data.get('dev_name', None),
)
new_packet.save_to_db()
def find_all(from_id, size):
return Packet.find_all_from(from_id, size)
| 1,664 | 0 | 46 |
acc2fe39f661fb7f70cd2a0a0c3f78c6b23bdeb5 | 491 | py | Python | test_fmi/_nbdev.py | asvcode/test_fmi | 43111ef3e88625addd6e2609ea588f425841537d | [
"Apache-2.0"
] | null | null | null | test_fmi/_nbdev.py | asvcode/test_fmi | 43111ef3e88625addd6e2609ea588f425841537d | [
"Apache-2.0"
] | 3 | 2021-05-01T06:54:19.000Z | 2022-02-26T10:17:44.000Z | test_fmi/_nbdev.py | asvcode/test_fmi | 43111ef3e88625addd6e2609ea588f425841537d | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"instance_sort": "02_explore.ipynb",
"instance_dcmread": "02_explore.ipynb",
"instance_show": "02_explore.ipynb",
"get_dicom_image": "02_explore.ipynb"}
modules = ["one.py",
"explore.py"]
doc_url = "https://asvcode.github.io/test_fmi/"
git_url = "https://github.com/asvcode/test_fmi/tree/master/"
| 27.277778 | 61 | 0.668024 | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"instance_sort": "02_explore.ipynb",
"instance_dcmread": "02_explore.ipynb",
"instance_show": "02_explore.ipynb",
"get_dicom_image": "02_explore.ipynb"}
modules = ["one.py",
"explore.py"]
doc_url = "https://asvcode.github.io/test_fmi/"
git_url = "https://github.com/asvcode/test_fmi/tree/master/"
def custom_doc_links(name): return None
| 18 | 0 | 23 |
9374d8166bff6d7bca40d0073aa64863e3ce4784 | 1,131 | py | Python | Q3/q3.py | Xascoria/AdventOfCode2021 | 869d6f292f6444fe6bb26bf37049a0949bf49019 | [
"MIT"
] | null | null | null | Q3/q3.py | Xascoria/AdventOfCode2021 | 869d6f292f6444fe6bb26bf37049a0949bf49019 | [
"MIT"
] | null | null | null | Q3/q3.py | Xascoria/AdventOfCode2021 | 869d6f292f6444fe6bb26bf37049a0949bf49019 | [
"MIT"
] | null | null | null | f = open("Q3/inputs.txt","r")
z = f.readlines()
f.close()
a = ""
b = ""
for i in zip(*z):
lst = list(i)
if lst.count("1") > len(lst)//2:
a += "1"
b += "0"
else:
a += "0"
b += "1"
print(int(a,2)*int(b,2))
f = open("Q3/inputs.txt","r")
z = f.readlines()
f.close()
current_index = 0
while len(z) != 1:
lst = list(list(zip(*z))[current_index])
if lst.count("1") > lst.count("0"):
z = [i for i in z if i[current_index] == "1"]
elif lst.count("1") < lst.count("0"):
z = [i for i in z if i[current_index] == "0"]
else:
z = [i for i in z if i[current_index] == "1"]
current_index += 1
a = int(z[0],2)
f = open("Q3/inputs.txt","r")
z = f.readlines()
f.close()
current_index = 0
while len(z) != 1:
lst = list(list(zip(*z))[current_index])
if lst.count("1") > lst.count("0"):
z = [i for i in z if i[current_index] == "0"]
elif lst.count("1") < lst.count("0"):
z = [i for i in z if i[current_index] == "1"]
else:
z = [i for i in z if i[current_index] == "0"]
current_index += 1
b = int(z[0],2)
print(a*b) | 22.62 | 53 | 0.50221 | f = open("Q3/inputs.txt","r")
z = f.readlines()
f.close()
a = ""
b = ""
for i in zip(*z):
lst = list(i)
if lst.count("1") > len(lst)//2:
a += "1"
b += "0"
else:
a += "0"
b += "1"
print(int(a,2)*int(b,2))
f = open("Q3/inputs.txt","r")
z = f.readlines()
f.close()
current_index = 0
while len(z) != 1:
lst = list(list(zip(*z))[current_index])
if lst.count("1") > lst.count("0"):
z = [i for i in z if i[current_index] == "1"]
elif lst.count("1") < lst.count("0"):
z = [i for i in z if i[current_index] == "0"]
else:
z = [i for i in z if i[current_index] == "1"]
current_index += 1
a = int(z[0],2)
f = open("Q3/inputs.txt","r")
z = f.readlines()
f.close()
current_index = 0
while len(z) != 1:
lst = list(list(zip(*z))[current_index])
if lst.count("1") > lst.count("0"):
z = [i for i in z if i[current_index] == "0"]
elif lst.count("1") < lst.count("0"):
z = [i for i in z if i[current_index] == "1"]
else:
z = [i for i in z if i[current_index] == "0"]
current_index += 1
b = int(z[0],2)
print(a*b) | 0 | 0 | 0 |
5412dc36e7ca007a4c859648cda01115d199a51d | 449 | py | Python | Section 4/source/cgi-bin/wav2json.py | PacktPublishing/-Deep-Learning-Projects-with-JavaScript | 6842ab1b7611b34735029df47a35790f588e2d8c | [
"MIT"
] | 11 | 2019-01-11T11:37:59.000Z | 2021-10-02T03:20:02.000Z | Section 4/source/cgi-bin/wav2json.py | PacktPublishing/-Deep-Learning-Projects-with-JavaScript | 6842ab1b7611b34735029df47a35790f588e2d8c | [
"MIT"
] | null | null | null | Section 4/source/cgi-bin/wav2json.py | PacktPublishing/-Deep-Learning-Projects-with-JavaScript | 6842ab1b7611b34735029df47a35790f588e2d8c | [
"MIT"
] | 6 | 2019-01-03T18:43:44.000Z | 2020-12-29T10:25:34.000Z | #!/usr/bin/env python3
import librosa
import sys
import numpy as np
from os import path, listdir
import json
print('Content-Type: application/json\n\n')
print(wav2json('./test.wav'))
| 26.411765 | 74 | 0.697105 | #!/usr/bin/env python3
import librosa
import sys
import numpy as np
from os import path, listdir
import json
def wav2json(wav, dtype='train'):
x, _=librosa.load(wav, res_type='kaiser_fast', duration=3, offset=0.5)
mx=librosa.feature.mfcc(y=x, n_mfcc=25)
mfccs=np.mean(mx, axis=0)
mfccs=[-(mfccs/100)]
mfccs=mfccs[0].tolist()
return json.dumps(mfccs)
print('Content-Type: application/json\n\n')
print(wav2json('./test.wav'))
| 243 | 0 | 23 |
cb016f676082c29f82bfac8bc0749261eb089730 | 679 | py | Python | src/mfactcheck/models/sent_mbert.py | D-Roberts/multilingual_nli_ECIR | e7466f5e6c6b6246ae37c1c951003c13f56d84c8 | [
"Apache-2.0"
] | 2 | 2021-02-13T22:57:33.000Z | 2021-03-29T09:41:08.000Z | src/mfactcheck/models/sent_mbert.py | D-Roberts/multilingual_nli_ECIR | e7466f5e6c6b6246ae37c1c951003c13f56d84c8 | [
"Apache-2.0"
] | null | null | null | src/mfactcheck/models/sent_mbert.py | D-Roberts/multilingual_nli_ECIR | e7466f5e6c6b6246ae37c1c951003c13f56d84c8 | [
"Apache-2.0"
] | null | null | null | import os
from mfactcheck.utils.log_helper import LogHelper
from mfactcheck.multi_retriever.sentences.data import SentProcessor
from .mbert import MBert
LogHelper.setup()
logger = LogHelper.get_logger(os.path.splitext(os.path.basename(__file__))[0])
| 33.95 | 79 | 0.734904 | import os
from mfactcheck.utils.log_helper import LogHelper
from mfactcheck.multi_retriever.sentences.data import SentProcessor
from .mbert import MBert
LogHelper.setup()
logger = LogHelper.get_logger(os.path.splitext(os.path.basename(__file__))[0])
class SentMBert(MBert):
def __init__(self, output_dir, module="sent", add_ro=False):
super().__init__(
output_dir=output_dir, module=module, num_labels=2, add_ro=add_ro
)
self.processor = SentProcessor()
self.label_list = self.processor.get_labels()
self.num_labels = len(self.label_list)
self.label_verification_list = self.processor.get_labels_verification()
| 375 | 2 | 49 |
c3f66128e610e020af5a629e0b4aa0c08ad30f77 | 2,481 | py | Python | ecmdb/ecmserver.py | sethtroisi/ecm-db | 731196842c411ba12512336ac242c7b1d5ae8220 | [
"Apache-2.0"
] | null | null | null | ecmdb/ecmserver.py | sethtroisi/ecm-db | 731196842c411ba12512336ac242c7b1d5ae8220 | [
"Apache-2.0"
] | null | null | null | ecmdb/ecmserver.py | sethtroisi/ecm-db | 731196842c411ba12512336ac242c7b1d5ae8220 | [
"Apache-2.0"
] | null | null | null | import contextlib
import datetime
import os
import logging
import re
import time
from enum import Enum
import gmpy2
import sqlite3
class EcmServer:
"""ECM Server
Responsible for recording what curves have been run.
"""
SCHEMA_FILE = "schema.sql"
| 24.81 | 102 | 0.580814 | import contextlib
import datetime
import os
import logging
import re
import time
from enum import Enum
import gmpy2
import sqlite3
class EcmServer:
"""ECM Server
Responsible for recording what curves have been run.
"""
SCHEMA_FILE = "schema.sql"
class Status(Enum):
P = 1
PRP = 2
FF = 3
CF = 4
C = 5
def __init__(self, db_file="./ecm-server.db"):
self._db_file = db_file
self._db = None
self.init_db()
def init_db(self):
exists = os.path.isfile(self._db_file) and os.path.getsize(self._db_file) > 0
self._db = sqlite3.connect(self._db_file)
# Makes returns namedtuple like
self._db.row_factory = sqlite3.Row
# Turn on foreign_key constraints
self._db.execute("PRAGMA foreign_keys = 1")
if not exists:
schema_path = os.path.join(os.path.dirname(__file__), EcmServer.SCHEMA_FILE)
logging.warning(f"Creating db({self._db_file}) from {schema_path}")
with open(schema_path) as schema_f:
schema = schema_f.read()
with self.cursor() as cur:
cur.executescript(schema)
def _get_cursor(self):
# TODO: closing cursor one day.
return self._db.cursor()
def cursor(self):
return contextlib.closing(self._get_cursor())
def find_number(self, n):
with self.cursor() as cur:
cur.execute('SELECT * from numbers where n = ?', (n,))
records = cur.fetchall()
if len(records) == 0:
return None
elif len(records) >= 2:
raise ValueError(f"Duplicate records for {n}")
return records[0]
def add_number(self, expr):
if EcmServer._is_number(expr):
n = int(expr)
else:
raise ValueError(f"Bad expr: {expr}")
record = self.find_number(n)
if record:
return record
status = 2 if gmpy2.is_prime(n) else 5
with self.cursor() as cur:
cur.execute('INSERT INTO numbers VALUES (null,?,?)', (n, status))
self._db.commit()
return self.find_number(n)
def _is_number(n):
return isinstance(n, int) or re.match("[1-9][0-9]*", n)
def _is_number_expr(expr):
# TODO
# https://stackoverflow.com/questions/2371436/evaluating-a-mathematical-expression-in-a-string
return False
| 1,893 | 72 | 243 |
7423fea9e86c8e28e9deff5e6e14b0bb00592b33 | 1,376 | py | Python | editregions/contrib/textfiles/models.py | kezabelle/django-editregions | 961ddeffb37d30d40fb4e3e9224bc3f956b7a5b5 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2015-01-11T18:21:27.000Z | 2015-01-11T18:21:27.000Z | editregions/contrib/textfiles/models.py | kezabelle/django-editregions | 961ddeffb37d30d40fb4e3e9224bc3f956b7a5b5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | editregions/contrib/textfiles/models.py | kezabelle/django-editregions | 961ddeffb37d30d40fb4e3e9224bc3f956b7a5b5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from CommonMark import DocParser
from CommonMark import HTMLRenderer
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.db.models import CharField
from .utils import valid_md_file
from editregions.models import EditRegionChunk
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
| 30.577778 | 79 | 0.719477 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from CommonMark import DocParser
from CommonMark import HTMLRenderer
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.db.models import CharField
from .utils import valid_md_file
from editregions.models import EditRegionChunk
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
class Markdown(EditRegionChunk):
filepath = CharField(max_length=255, verbose_name=_('filename'),
validators=[valid_md_file])
def __str__(self):
return self.filepath
@cached_property
def content(self):
try:
return render_to_string(self.filepath, {})
except TemplateDoesNotExist:
logger.error("Markdown file has been moved or removed", exc_info=1)
return ''
@cached_property
def rendered_content(self):
if self.content:
parsed = DocParser().parse(self.content)
return HTMLRenderer().render(parsed)
return ''
class Meta:
verbose_name = _("Markdown file")
verbose_name_plural = _("Markdown files")
| 381 | 365 | 22 |
f6adc038e85b2f7df73be52ab4cbbdeaccb0f94a | 21,374 | py | Python | examples/my_mecrlp_problemv3.py | hdchantre/jMetalPy | dba62a865c01c9ab77465706bc498efbe59c3bc4 | [
"MIT"
] | null | null | null | examples/my_mecrlp_problemv3.py | hdchantre/jMetalPy | dba62a865c01c9ab77465706bc498efbe59c3bc4 | [
"MIT"
] | null | null | null | examples/my_mecrlp_problemv3.py | hdchantre/jMetalPy | dba62a865c01c9ab77465706bc498efbe59c3bc4 | [
"MIT"
] | null | null | null | from jmetal.core.problem import FloatProblem
from jmetal.core.solution import FloatSolution
import numpy as np
import math
import datameclpps260421v1
| 45.476596 | 300 | 0.495883 | from jmetal.core.problem import FloatProblem
from jmetal.core.solution import FloatSolution
import numpy as np
import math
import datameclpps260421v1
class MyProblem(FloatProblem):
def __init__(self, U1,V1,K1,N1,Psi_u1,Psi_u2,Phi_u1,Phi_u2,B_u1,B_u2,r_u1,tau_v1,tau_v2,sigma_v1,sigma_v2,b_v1,b_v2,L_v1,R_v1):
super().__init__()
#self.U = U # The array of MEC nodes
#self.V = V # The array of Demand points
#self.K = K # Redundancy
#self.N = N # Number of shared demand points
#self.C_u = C_u # The array of number of Slices in each MEC nodes
#self.Psi_u = Psi_u # Psi_u MEC CPU capacity in MIPS
#self.Phi_u = Phi_u # MEC RAM capacity in GB
#self.B_u = B_u #MEC total bandwidth capacity in Mbps
#self.r_u = r_u # Failure probability of the MEC u \in U
#self.tau_v = tau_v # Processing demand of the demand point v in MIPS
#self.sigma_v = sigma_v #Memory demand of the demand point v in GB = x8000 MBit
#self.b_v = b_v # Bandwidth consumed by the demand point v in Mbps
#self.L_v = L_v #Bound on the latency requirement of the demand point v in ms
#self.R_v = R_v #Bound on the reliability requirement of the demand point v
self.x = {}
self.y = {}
self.w = {}
self.psi_ui = {}
self.phi_ui = {}
self.U,self.V,self.K,self.N,self.E,self.C_u,self.Psi_u,self.Phi_u,self.B_u,self.r_u,self.tau_v,self.sigma_v,self.b_v,self.L_v,self.R_v,self.PMEC,self.PDP=datameclpps260421v1.make_data(U1,V1,K1,N1,Psi_u1,Psi_u2,Phi_u1,Phi_u2,B_u1,B_u2,r_u1,tau_v1,tau_v2,sigma_v1,sigma_v2,b_v1,b_v2,L_v1,R_v1)
self.I = sum(self.C_u)
#print(self.K)
self.number_of_variables = len(self.V)*(self.K)*(self.I) + (self.K)*(self.I) + len(self.U)
self.number_of_constraints = 11*self.I+9*len(self.V)+4*len(self.U)+2*self.K+self.K-3
"""
for u in self.U:
for i in range(self.C_u[u]):
#const +=i
#print(i)
for k in range(self.K):
for v in self.V:
self.number_of_variables = len(self.U)*len(self.V)*self.K*self.C_u[u]+ len(self.U)*self.K*self.C_u[u] + len(self.U)+1
self.number_of_constraints = len(self.U)*self.C_u[u]*2+len(self.V)*len(self.U)+1 """
#print(const)
self.number_of_objectives = 2
self.lower_bound = [0.0 for _ in range(self.number_of_variables)]
self.upper_bound = [1.0 for _ in range(self.number_of_variables)]
#print(self.upper_bound)
self.obj_directions = [self.MINIMIZE, self.MINIMIZE] # both objectives should be minimized
self.obj_labels = ['N#Slices', 'N#MECs'] # objectives' name
def evaluate(self, solution) -> FloatSolution:
for u in self.U:
if round(solution.variables[u])== 0:
self.y[u] = 0
for i in range(self.C_u[u]):
for k in range(self.K):
self.w[u,i,k] = 0
for v in self.V:
#self.x[u,v,i,k]= solution.variables[len(self.U)+len(self.U)*self.C_u[u]*self.K +u*self.C_u[u]*self.K*len(self.V)+i*self.K*len(self.V)+k*len(self.V)+v]
self.x[u,v,i,k] = 0
#self.z[u,v,i,k]= 0
else:
self.y[u] = round(solution.variables[u])
for i in range(self.C_u[u]):
for k in range(self.K):
if round(solution.variables[len(self.U)+u*self.C_u[u]*self.K+i*self.K+k]) == 0:
self.w[u,i,k] = 0
for v in self.V:
#self.x[u,v,i,k]= solution.variables[len(self.U)+len(self.U)*self.C_u[u]*self.K +u*self.C_u[u]*self.K*len(self.V)+i*self.K*len(self.V)+k*len(self.V)+v]
self.x[u,v,i,k]= 0
#self.z[u,v,i,k]= 0
else:
self.w[u,i,k] = round(solution.variables[len(self.U)+u*self.C_u[u]*self.K+i*self.K+k])
for v in self.V:
#self.x[u,v,i,k]= solution.variables[len(self.U)+len(self.U)*self.C_u[u]*self.K +u*self.C_u[u]*self.K*len(self.V)+i*self.K*len(self.V)+k*len(self.V)+v]
self.x[u,v,i,k]= round(solution.variables[len(self.U)+(self.C_u[u]*self.K)+ u*self.C_u[u] +i*(self.K) +k*(len(self.V))+v] )
#self.z[u,v,i,k]= round(solution.variables[u*self.C_u[u]+i*self.K +k*(len(self.V))+v] )
for u in self.U:
solution.objectives[1] += self.y[u]
for i in range(self.C_u[u]):
for k in range(self.K):
solution.objectives[0] += self.w[u,i,k]
#print(f"y[{u}]= {self.y[u]} \n")
#end defining the decision variables
self.__evaluate_constraints(solution)
return solution
def __evaluate_constraints(self, solution: FloatSolution) -> None:
constraints3,constraints3tmp = 0.0,0.0 #[0.0 for _ in range(self.number_of_constraints)]
constraints4, constraints5,constraints6,constraints7 = 0.0,0.0,0.0,0.0
""" constraints2 = 0.0
constraints5 = 0.0
constraints6 = 0.0
constraints7 = 0.0
constraints8 = 0.0
constraints9, constraints10, constraints11, constraints14,constraints15, constraints16,constraints17,constraints18,constraints19,constraints20 = 0.0,0.0,0.0,0.0,0.0,0.0,0.0, 0.0, 0.0, 0.0
constraints16 = [0.0 for _ in range(len(self.U))]
constraints17 = [0.0 for _ in range(len(self.U))]
constraints18 = [0.0 for _ in range(len(self.U))] """
self.z = {}
for u in self.U:
if round(solution.variables[u]) == 0:
self.y[u] = 0
for i in range(self.C_u[u]):
for k in range(self.K):
self.w[u,i,k] = 0
for v in self.V:
#self.x[u,v,i,k]= solution.variables[len(self.U)+len(self.U)*self.C_u[u]*self.K +u*self.C_u[u]*self.K*len(self.V)+i*self.K*len(self.V)+k*len(self.V)+v]
self.x[u,v,i,k]= 0
self.z[u,v,i,k]= 0
else:
self.y[u] = round(solution.variables[u])
for i in range(self.C_u[u]):
for k in range(self.K):
if round(solution.variables[len(self.U)+u*self.C_u[u]*self.K+i*self.K+k]) == 0:
self.w[u,i,k] = 0
for v in self.V:
#self.x[u,v,i,k]= solution.variables[len(self.U)+len(self.U)*self.C_u[u]*self.K +u*self.C_u[u]*self.K*len(self.V)+i*self.K*len(self.V)+k*len(self.V)+v]
self.x[u,v,i,k]= 0
self.z[u,v,i,k]= 0
else:
self.w[u,i,k] = round(solution.variables[len(self.U)+u*self.C_u[u]*self.K+i*self.K+k])
for v in self.V:
#self.x[u,v,i,k]= solution.variables[len(self.U)+len(self.U)*self.C_u[u]*self.K +u*self.C_u[u]*self.K*len(self.V)+i*self.K*len(self.V)+k*len(self.V)+v]
self.x[u,v,i,k]= round(solution.variables[len(self.U)+(self.C_u[u]*self.K)+ u*self.C_u[u] +i*(self.K) +k*(len(self.V))+v])
self.z[u,v,i,k]= round(solution.variables[len(self.U)+(self.C_u[u]*self.K)+ u*self.C_u[u] +i*(self.K) +k*(len(self.V))+v])
""" for i in range(self.C_u[u]):
for k in range(self.K):
self.w[u,i,k] = round(solution.variables[u*self.C_u[u]+i*self.K+k])
for v in self.V:
#self.x[u,v,i,k]= solution.variables[len(self.U)+len(self.U)*self.C_u[u]*self.K +u*self.C_u[u]*self.K*len(self.V)+i*self.K*len(self.V)+k*len(self.V)+v]
self.x[u,v,i,k]= round(solution.variables[u*self.C_u[u]+i*self.K +k*(len(self.V))+v] )
self.z[u,v,i,k]= round(solution.variables[u*self.C_u[u]+i*self.K +k*(len(self.V))+v] )
for u in self.U:
if self.y[u] == 0:
for i in range(self.C_u[u]):
for k in range(self.K):
self.w[u,i,k] = 0
for v in self.V:
#self.x[u,v,i,k]= solution.variables[len(self.U)+len(self.U)*self.C_u[u]*self.K +u*self.C_u[u]*self.K*len(self.V)+i*self.K*len(self.V)+k*len(self.V)+v]
self.x[u,v,i,k]= 0
self.z[u,v,i,k]= 0 """
constraints3 = [0.0 for u in self.U for i in range(self.C_u[u])]
constraints3tmp = [0.0 for u in self.U for i in range(self.C_u[u])]
constraints4 = [0.0 for u in self.U for i in range(self.C_u[u])]
constraints4tmp = [0.0 for u in self.U for i in range(self.C_u[u])]
constraints5 = [0.0 for v in self.V for u in self.U]
constraints5tmp = [0.0 for v in self.V for u in self.U]
constraints6 = [0.0 for v in self.V]
constraints6tmp = [0.0 for v in self.V]
constraints7 = [0.0 for u in self.U for i in range(self.C_u[u]) for k in range(2,self.K)]
constraints7tmp = [0.0 for u in self.U for i in range(self.C_u[u]) for k in range(2,self.K)]
constraints8 = [0.0 for v in self.V]
constraints8tmp = [0.0 for v in self.V]
constraints9 = [0.0 for v in self.V for u in self.U for i in range(self.C_u[u])]
constraints9tmp = [0.0 for v in self.V for u in self.U for i in range(self.C_u[u])]
constraints10 = [0.0 for v in self.V for u in self.U for i in range(self.C_u[u])]
constraints11 = [0.0 for u in self.U for i in range(self.C_u[u]) for k in range(self.K)]
self.psi_ui = {(u,i):0.0 for u in self.U for i in range(self.C_u[u])}
self.psi_uitmp = {(u,i):0.0 for u in self.U for i in range(self.C_u[u])}
self.phi_uitmp = {(u,i):0.0 for u in self.U for i in range(self.C_u[u])}
self.taspsi_ui = {(u,i):1/(min(self.Psi_u)) for u in self.U for i in range(self.C_u[u])}
constraints12 = [0.0 for u in self.U for i in range(self.C_u[u])]
constraints13 = [0.0 for u in self.U for i in range(self.C_u[u])]
constraints16 = [0.0 for u in self.U ]
constraints16tmp = [0.0 for u in self.U ]
constraints17 = [0.0 for u in self.U ]
constraints17tmp = [0.0 for u in self.U ]
constraints18 = [0.0 for u in self.U]
constraints18tmp = [0.0 for u in self.U]
constraints19 = [0.0 for v in self.V for u in self.U for i in range(self.C_u[u])]
constraints19tmp1=[0.0 for v in self.V for u in self.U for i in range(self.C_u[u])]
constraints19tmp2 =[0.0 for v in self.V for u in self.U for i in range(self.C_u[u])]
constraints20 = [0.0 for v in self.V]
constraints20tmp = [0.0 for v in self.V]
constraints21 = [0.0 for u in self.U for i in range(self.C_u[u]) for k in range(self.K)]
constraints22 = [0.0 for u in self.U for i in range(self.C_u[u]) for k in range(self.K)]
constraints23 = [0.0 for u in self.U for i in range(self.C_u[u]) for k in range(self.K)]
constraints24 = [0.0 for u in self.U for i in range(self.C_u[u]) for k in range(self.K)]
""" print(constraints18tmp)
print (len(constraints3))
print(f"total de Cu {self.I}")
print (len(self.x))
print (len(self.w))
print (len(self.y)) """
#constraint 3 and 4
#sum3,sum4 = 0.0,0.0
for u in self.U:
for i in range(self.C_u[u]):
for v in self.V:
constraints3tmp[i] += self.x[u,v,i,0]
constraints4tmp[i] += self.x[u,v,i,1]
#constraints4tmp[u*self.C_u[u]+i] += self.x[u,v,i,1]
#sum4 = sum4 + self.x[u,v,i,1]
for u in self.U:
for i in range(self.C_u[u]):
constraints3[i] = 1 - constraints3tmp[i]
constraints4[i] = self.N - constraints4tmp[i]
#constraints4[i] = self.N - constraints4tmp[u*self.C_u[u]+i]
#constraint 5
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints5tmp[v*len(self.U)+u] += self.x[u,v,i,k]
constraints5[v*len(self.U)+u] = 1 - abs(constraints5tmp[v*len(self.U)+u])
""" for v in self.V:
for u in self.U:
constraints5[v*len(self.U)+u] = 1 - abs(constraints5tmp[v*len(self.U)+u]) """
#Constraint 6
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
constraints6tmp[v] += self.x[u,v,i,0]
constraints6[v] = 1 - constraints6tmp[v]
""" for v in self.V:
constraints6[v] = 1 - constraints6tmp[v] """
#Constraint 7
for u in self.U:
for i in range(self.C_u[u]):
for k in range(2,self.K):
for v in self.V:
constraints7tmp[i*self.K+k] += self.x[u,v,i,k]
constraints7[i*self.K+k] = len(self.V) - constraints7tmp[i*self.K+k]
""" for u in self.U:
for i in range(self.C_u[u]):
for k in range(2,self.K):
constraints7[i*self.K+k] = len(self.V) - constraints7tmp[i*self.K+k] """
#Constraint 8
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(1,self.K):
constraints8tmp[v] = self.x[u,v,i,k]
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints8[v] =-self.K +abs(constraints8tmp[v])
#Constraint 9
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints9tmp[v*len(self.U)+u*self.C_u[u]] += self.x[u,v,i,k]
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints9[v*len(self.U)+u*self.C_u[u]] = 1 - constraints9tmp[v*len(self.U)+u*self.C_u[u]]
# Constraint 10 and 11
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints10[v*len(self.U)+u*self.C_u[u]+i*self.K+k] = -self.x[u,v,i,k]+self.w[u,i,k]
#Constraints11
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints11[u*self.C_u[u]+i*self.K +k] = self.y[u] - self.w[u,i,k]
#Constraint 12 and 13
for u in self.U:
for i in range(self.C_u[u]):
self.psi_uitmp[u,i] = max(self.tau_v[v]*self.x[u,v,i,k] for v in self.V for k in range(1,self.K) )
self.phi_uitmp[u,i] = max(self.sigma_v[v]*self.x[u,v,i,k] for v in self.V for k in range(1,self.K) )
#Constraint 14 and 15
for u in self.U:
for i in range(self.C_u[u]):
for v in self.V:
self.psi_uitmp[u,i] = self.tau_v[v]*self.x[u,v,i,0]
self.phi_uitmp[u,i] = self.sigma_v[v]*self.x[u,v,i,0]
#Constraint 16 and 17
for u in self.U:
for i in range(self.C_u[u]):
constraints16tmp [u] += self.psi_uitmp[u,i]
constraints16[u] = self.Psi_u[u] - constraints16tmp[u]
""" for u in self.U:
for i in range(self.C_u[u]):
constraints16[u] = self.Psi_u[u] - constraints16tmp[u]
"""
for u in self.U:
for i in range(self.C_u[u]):
constraints17tmp [u] += self.phi_uitmp[u,i]
constraints17[u] = self.Phi_u[u] - constraints17tmp[u]
""" for u in self.U:
for i in range(self.C_u[u]):
constraints17[u] = self.Phi_u[u] - constraints17tmp[u]
"""
#Constraint 18
for u in self.U:
for i in range(self.C_u[u]):
for v in self.V:
for k in range(self.K):
constraints18tmp[u] += self.b_v[u,v,i,k]*self.x[u,v,i,k]
constraints18[u] = self.B_u[u] - constraints18tmp[u]
""" for u in self.U:
constraints18[u] = self.B_u[u] - constraints18tmp[u] """
#constraint 19
for v in self.V:
for u in self.U:
for m in self.U:
if u!=m:
for i in range(self.C_u[m]):
for k in range(self.K):
constraints19tmp2[v*len(self.U)+u*self.C_u[m]+i] +=self.x[m,v,i,k]*math.log(1-self.r_u[m])
else:
for i in range(self.C_u[u]):
constraints19tmp1[v*len(self.U)+u*self.C_u[u]+i] +=self.x[u,v,i,0]*math.log(1-self.r_u[u])
constraints19[v] =-constraints19tmp1[v] -constraints19tmp2[v] +math.log(1-self.R_v[v])
#Constraint 20
for u in self.U:
for i in range(self.C_u[u]):
self.psi_ui[u,i]*self.taspsi_ui[u,i] == 1
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
self.z[u,v,i,k] = self.taspsi_ui[u,i]*self.x[u,v,i,k]
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints20tmp [v] += self.z[u,v,i,k]*self.tau_v[v]
constraints20[v] = self.L_v[v] -constraints20tmp[v]
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints21[u*self.C_u[u]+i*self.K+k] += -self.z[u,v,i,k]+(1/min(self.Psi_u))*self.x[u,v,i,k]
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints22[u*self.C_u[u]+i*self.K+k] = self.z[u,v,i,k]-(1/max(self.Psi_u))*self.x[u,v,i,k]
""" for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints20tmp[v] += (self.tau_v/self.psi_uitmp[u,i])*self.x[u,v,i,k]
constraints20[v] = self.L_v[v] - constraints20tmp[v] """
#Constraint 24
for v in self.V:
for u in self.U:
for i in range(self.C_u[u]):
for k in range(self.K):
constraints23[u*self.C_u[u]+i*self.K+k] += -self.z[u,v,i,k]+self.taspsi_ui[u,i]+(1/max(self.Psi_u))*(1-self.x[u,v,i,k])
constraints24[u*self.C_u[u]+i*self.K+k] += self.z[u,v,i,k]-self.taspsi_ui[u,i]+(1/min(self.Psi_u))*(1-self.x[u,v,i,k])
#solution.constraints = constraints2
solution.constraints = constraints3
solution.constraints = constraints4
solution.constraints = constraints5
solution.constraints = constraints6
solution.constraints = constraints7
solution.constraints = constraints8
solution.constraints = constraints9
solution.constraints = constraints10
solution.constraints = constraints11
solution.constraints = constraints16
solution.constraints = constraints17
solution.constraints = constraints18
solution.constraints = constraints19
solution.constraints = constraints20
solution.constraints = constraints21
solution.constraints = constraints22
solution.constraints = constraints23
solution.constraints = constraints24
def get_name(self):
return 'My_MECRLP_problem' | 21,039 | 9 | 150 |
ebe36ecfc0a182de69b399ff3cccedb98db61cdf | 21 | py | Python | src/msdss_base_api/__init__.py | rrwen/msdss-base-api | f2b05c3064dd375762b43335adcf360d6b40eb31 | [
"MIT"
] | null | null | null | src/msdss_base_api/__init__.py | rrwen/msdss-base-api | f2b05c3064dd375762b43335adcf360d6b40eb31 | [
"MIT"
] | null | null | null | src/msdss_base_api/__init__.py | rrwen/msdss-base-api | f2b05c3064dd375762b43335adcf360d6b40eb31 | [
"MIT"
] | null | null | null | from .core import API | 21 | 21 | 0.809524 | from .core import API | 0 | 0 | 0 |
cef7b3d2334bb46f89d544ad87c82f3b738b72db | 1,007 | py | Python | tests/python/pants_test/backend/python/tasks/checkstyle/test_new_style_classes.py | qma/pants | 604f58a366b66bc5cfa83e7250cb8af8130832cf | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/python/tasks/checkstyle/test_new_style_classes.py | qma/pants | 604f58a366b66bc5cfa83e7250cb8af8130832cf | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/python/tasks/checkstyle/test_new_style_classes.py | qma/pants | 604f58a366b66bc5cfa83e7250cb8af8130832cf | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.python.tasks.checkstyle.new_style_classes import NewStyleClasses
from pants_test.backend.python.tasks.checkstyle.plugin_test_base import CheckstylePluginTestBase
| 27.972222 | 96 | 0.706058 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.python.tasks.checkstyle.new_style_classes import NewStyleClasses
from pants_test.backend.python.tasks.checkstyle.plugin_test_base import CheckstylePluginTestBase
class NewStyleClassesTest(CheckstylePluginTestBase):
plugin_type = NewStyleClasses
def test_new_style_classes(self):
statement = """
class OldStyle:
pass
class NewStyle(object):
pass
"""
self.assertNit(statement, 'T606')
statement = """
class NewStyle(OtherThing, ThatThing, WhatAmIDoing):
pass
"""
self.assertNoNits(statement)
statement = """
class OldStyle(): # unspecified mro
pass
"""
self.assertNit(statement, 'T606')
| 414 | 88 | 23 |
78a08b1094138b77ca2c90ed9e478ef2099e432f | 2,842 | py | Python | main.py | asa-leholland/TaskmasterDataAcquisition | 1e40f8320053ceb3c4f1119562139556f430fbcc | [
"MIT"
] | null | null | null | main.py | asa-leholland/TaskmasterDataAcquisition | 1e40f8320053ceb3c4f1119562139556f430fbcc | [
"MIT"
] | null | null | null | main.py | asa-leholland/TaskmasterDataAcquisition | 1e40f8320053ceb3c4f1119562139556f430fbcc | [
"MIT"
] | null | null | null |
import urllib.request
from pprint import pprint
from html_table_parser.parser import HTMLTableParser
import pandas as pd
import os
from parse import parse_taskmaster_csv
if __name__ == "__main__":
dir_path_to_temp_csvs="temp_csvs"
dir_path_final_csvs="final_season_csvs"
create_final_raw_csv(dir_path_to_temp_csvs, dir_path_final_csvs)
merge_final_dataset(inpath_final_individual_csvs=dir_path_final_csvs) | 35.974684 | 104 | 0.692822 |
import urllib.request
from pprint import pprint
from html_table_parser.parser import HTMLTableParser
import pandas as pd
import os
from parse import parse_taskmaster_csv
def url_get_contents(url):
req = urllib.request.Request(url=url)
f = urllib.request.urlopen(req)
return f.read()
def scrape_tm_details_to_dfs():
xhtml = url_get_contents('https://taskmaster.fandom.com/wiki/Episode_list').decode('utf-8')
p = HTMLTableParser()
p.feed(xhtml)
all_dfs = []
for i, table in enumerate(p.tables):
if i in range(0, 15):
all_dfs.append(pd.DataFrame(table))
return all_dfs
def build_temp_csvs(all_dfs, temp_dir):
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
for i, df in enumerate(all_dfs):
index_value = "%02d" % (i,)
df.to_csv(f"{temp_dir}/df_{index_value}.csv", index=False)
def determine_series_names(all_input_dfs):
count = len(all_input_dfs)
current = 0
series_coc_count = 1
total_coc_count = 1
multiplier = 0
series_names = []
while current <= count:
if current == 14:
series_names.append("New Years")
else:
series_names.append("Series " + str(series_coc_count+(5*multiplier)))
series_coc_count += 1
if series_coc_count >= 6:
series_coc_count = 1
series_names.append("COC " + str(total_coc_count))
current += 1
multiplier += 1
total_coc_count += 1
current += 1
return series_names
def create_final_raw_csv(dir_path_to_temp_csvs, dir_path_final_csvs):
for f in os.listdir(dir_path_final_csvs):
os.remove(os.path.join(dir_path_final_csvs, f))
taskmaster_dfs = scrape_tm_details_to_dfs()
build_temp_csvs(all_dfs=taskmaster_dfs, temp_dir=dir_path_to_temp_csvs)
series_names = determine_series_names(all_input_dfs=taskmaster_dfs)
for i, filename in enumerate(os.listdir(dir_path_to_temp_csvs)):
print('processing', i, filename, series_names[i])
f = os.path.join(dir_path_to_temp_csvs, filename)
if os.path.isfile(f):
parse_taskmaster_csv(infile=f, series_name=series_names[i], result_path=dir_path_final_csvs)
def merge_final_dataset(inpath_final_individual_csvs):
arr = os.listdir(inpath_final_individual_csvs)
os.chdir(inpath_final_individual_csvs)
df = pd.concat(map(pd.read_csv, [val for val in arr if val.endswith(".csv")]), ignore_index=True)
os.chdir("..")
df.to_csv("Taskmaster Full Dataset.csv")
if __name__ == "__main__":
dir_path_to_temp_csvs="temp_csvs"
dir_path_final_csvs="final_season_csvs"
create_final_raw_csv(dir_path_to_temp_csvs, dir_path_final_csvs)
merge_final_dataset(inpath_final_individual_csvs=dir_path_final_csvs) | 2,279 | 0 | 138 |
708386fb48531f9d717c745431894bfffde53abf | 9,715 | py | Python | assignments/ps01/ps1.py | jperuggia/ComputerVision | 6a36cf96dec40fe4cd5584fbc2d8e384a74a66cf | [
"MIT"
] | null | null | null | assignments/ps01/ps1.py | jperuggia/ComputerVision | 6a36cf96dec40fe4cd5584fbc2d8e384a74a66cf | [
"MIT"
] | null | null | null | assignments/ps01/ps1.py | jperuggia/ComputerVision | 6a36cf96dec40fe4cd5584fbc2d8e384a74a66cf | [
"MIT"
] | 2 | 2020-11-02T08:36:01.000Z | 2022-01-05T19:08:53.000Z | import math
import numpy as np
import cv2
import sys
# # Implement the functions below.
def extract_red(image):
""" Returns the red channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the red channel.
"""
# Since Red is last index, we want all rows, columns, and the last channel.
return np.copy(image[:, :, 2])
def extract_green(image):
""" Returns the green channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the green channel.
"""
# Return green channel, all rows, columns
return np.copy(image[:, :, 1])
def extract_blue(image):
""" Returns the blue channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the blue channel.
"""
# Since blue is the first index, get first channel.
return np.copy(image[:, :, 0])
def swap_green_blue(image):
""" Returns an image with the green and blue channels of the input image swapped. It is highly
recommended to make a copy of the input image in order to avoid modifying the original array.
You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 3D array with the green and blue channels swapped.
"""
temp_image = np.copy(image)
temp_image[:, :, 0] = extract_green(image)
temp_image[:, :, 1] = extract_blue(image)
return temp_image
def copy_paste_middle(src, dst, shape):
""" Copies the middle region of size shape from src to the middle of dst. It is
highly recommended to make a copy of the input image in order to avoid modifying the
original array. You can do this by calling:
temp_image = np.copy(image)
Note: Assumes that src and dst are monochrome images, i.e. 2d arrays.
Note: Where 'middle' is ambiguous because of any difference in the oddness
or evenness of the size of the copied region and the image size, the function
rounds downwards. E.g. in copying a shape = (1,1) from a src image of size (2,2)
into an dst image of size (3,3), the function copies the range [0:1,0:1] of
the src into the range [1:2,1:2] of the dst.
Args:
src (numpy.array): 2D array where the rectangular shape will be copied from.
dst (numpy.array): 2D array where the rectangular shape will be copied to.
shape (tuple): Tuple containing the height (int) and width (int) of the section to be
copied.
Returns:
numpy.array: Output monochrome image (2D array)
"""
src = np.copy(src)
dst = np.copy(dst)
# height is rows, width is columns
src_rows, src_cols = src.shape
dst_rows, dst_cols = dst.shape
# shape size mid points.
shape_mid_rows = int(np.floor(shape[0] / 2))
shape_mid_cols = int(np.floor(shape[1] / 2))
# mid point of the "copy" image
copy_mid_row = int(np.floor(src_rows / 2))
copy_mid_col = int(np.floor(src_cols / 2))
# mid points of the paste image.
paste_mid_row = int(np.floor(dst_rows / 2))
paste_mid_col = int(np.floor(dst_cols / 2))
# calculate the shifts to make sure copy is correct.
r1_dst, r2_dst, c1_dst, c2_dst, r1_src, r2_src, c1_src, c2_src = [
paste_mid_row - shape_mid_rows,
paste_mid_row + shape_mid_rows,
paste_mid_col - shape_mid_cols,
paste_mid_col + shape_mid_cols,
copy_mid_row - shape_mid_rows,
copy_mid_row + shape_mid_rows,
copy_mid_col - shape_mid_cols,
copy_mid_col + shape_mid_cols
]
dst[r1_dst: r2_dst, c1_dst: c2_dst] = src[r1_src: r2_src, c1_src: c2_src]
return dst
def image_stats(image):
""" Returns the tuple (min,max,mean,stddev) of statistics for the input monochrome image.
In order to become more familiar with Numpy, you should look for pre-defined functions
that do these operations i.e. numpy.min.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
Returns:
tuple: Four-element tuple containing:
min (float): Input array minimum value.
max (float): Input array maximum value.
mean (float): Input array mean / average value.
stddev (float): Input array standard deviation.
"""
return 1.*np.min(image), 1.*np.max(image), 1.*np.mean(image), 1.*np.std(image)
def center_and_normalize(image, scale):
""" Returns an image with the same mean as the original but with values scaled about the
mean so as to have a standard deviation of "scale".
Note: This function makes no defense against the creation
of out-of-range pixel values. Consider converting the input image to
a float64 type before passing in an image.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
scale (int or float): scale factor.
Returns:
numpy.array: Output 2D image.
"""
i_min, i_max, i_mean, i_std = image_stats(image)
# take the mean from the image, then divide by the std deviation. We then scale by the
# scale factor and then add the mean back into the image.
normal_image = (((image-i_mean) / i_std) * scale) + i_mean
return normal_image
def shift_image_left(image, shift):
""" Outputs the input monochrome image shifted shift pixels to the left.
The returned image has the same shape as the original with
the BORDER_REPLICATE rule to fill-in missing values. See
http://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/copyMakeBorder/copyMakeBorder.html?highlight=copy
for further explanation.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
shift (int): Displacement value representing the number of pixels to shift the input image.
This parameter may be 0 representing zero displacement.
Returns:
numpy.array: Output shifted 2D image.
"""
temp_image = np.copy(image)
# take the temp image, all rows, from column defined in shift to end, move shift using border replicate.
return cv2.copyMakeBorder(temp_image[:, shift:], 0, 0, 0, shift, cv2.BORDER_REPLICATE)
def difference_image(img1, img2):
""" Returns the difference between the two input images (img1 - img2). The resulting array must be normalized
and scaled to fit [0, 255].
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
img1 (numpy.array): Input 2D image.
img2 (numpy.array): Input 2D image.
Returns:
numpy.array: Output 2D image containing the result of subtracting img2 from img1.
"""
difference = img1.astype(np.float) - img2.astype(np.float)
output_image = np.zeros(difference.shape)
cv2.normalize(difference, output_image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
# print("Max Value is ", max(output_image.flatten()))
# print("Min Value is ", min(output_image.flatten()))
return output_image
def add_noise(image, channel, sigma):
""" Returns a copy of the input color image with Gaussian noise added to
channel (0-2). The Gaussian noise mean must be zero. The parameter sigma
controls the standard deviation of the noise.
The returned array values must not be clipped or normalized and scaled. This means that
there could be values that are not in [0, 255].
Note: This function makes no defense against the creation
of out-of-range pixel values. Consider converting the input image to
a float64 type before passing in an image.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): input RGB (BGR in OpenCV) image.
channel (int): Channel index value.
sigma (float): Gaussian noise standard deviation.
Returns:
numpy.array: Output 3D array containing the result of adding Gaussian noise to the
specified channel.
"""
# generate random noise using the image.shape tuple as the dimensions.
gaussian_noise = np.random.randn(*image.shape) * sigma
temp_image = np.copy(image)
temp_image = (temp_image * 1.0) # make it a float
temp_image[:, :, channel] += gaussian_noise[:, :, channel]
return temp_image
| 37.509653 | 113 | 0.68142 | import math
import numpy as np
import cv2
import sys
# # Implement the functions below.
def extract_red(image):
""" Returns the red channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the red channel.
"""
# Since Red is last index, we want all rows, columns, and the last channel.
return np.copy(image[:, :, 2])
def extract_green(image):
""" Returns the green channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the green channel.
"""
# Return green channel, all rows, columns
return np.copy(image[:, :, 1])
def extract_blue(image):
""" Returns the blue channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the blue channel.
"""
# Since blue is the first index, get first channel.
return np.copy(image[:, :, 0])
def swap_green_blue(image):
""" Returns an image with the green and blue channels of the input image swapped. It is highly
recommended to make a copy of the input image in order to avoid modifying the original array.
You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 3D array with the green and blue channels swapped.
"""
temp_image = np.copy(image)
temp_image[:, :, 0] = extract_green(image)
temp_image[:, :, 1] = extract_blue(image)
return temp_image
def copy_paste_middle(src, dst, shape):
""" Copies the middle region of size shape from src to the middle of dst. It is
highly recommended to make a copy of the input image in order to avoid modifying the
original array. You can do this by calling:
temp_image = np.copy(image)
Note: Assumes that src and dst are monochrome images, i.e. 2d arrays.
Note: Where 'middle' is ambiguous because of any difference in the oddness
or evenness of the size of the copied region and the image size, the function
rounds downwards. E.g. in copying a shape = (1,1) from a src image of size (2,2)
into an dst image of size (3,3), the function copies the range [0:1,0:1] of
the src into the range [1:2,1:2] of the dst.
Args:
src (numpy.array): 2D array where the rectangular shape will be copied from.
dst (numpy.array): 2D array where the rectangular shape will be copied to.
shape (tuple): Tuple containing the height (int) and width (int) of the section to be
copied.
Returns:
numpy.array: Output monochrome image (2D array)
"""
src = np.copy(src)
dst = np.copy(dst)
# height is rows, width is columns
src_rows, src_cols = src.shape
dst_rows, dst_cols = dst.shape
# shape size mid points.
shape_mid_rows = int(np.floor(shape[0] / 2))
shape_mid_cols = int(np.floor(shape[1] / 2))
# mid point of the "copy" image
copy_mid_row = int(np.floor(src_rows / 2))
copy_mid_col = int(np.floor(src_cols / 2))
# mid points of the paste image.
paste_mid_row = int(np.floor(dst_rows / 2))
paste_mid_col = int(np.floor(dst_cols / 2))
# calculate the shifts to make sure copy is correct.
r1_dst, r2_dst, c1_dst, c2_dst, r1_src, r2_src, c1_src, c2_src = [
paste_mid_row - shape_mid_rows,
paste_mid_row + shape_mid_rows,
paste_mid_col - shape_mid_cols,
paste_mid_col + shape_mid_cols,
copy_mid_row - shape_mid_rows,
copy_mid_row + shape_mid_rows,
copy_mid_col - shape_mid_cols,
copy_mid_col + shape_mid_cols
]
dst[r1_dst: r2_dst, c1_dst: c2_dst] = src[r1_src: r2_src, c1_src: c2_src]
return dst
def image_stats(image):
""" Returns the tuple (min,max,mean,stddev) of statistics for the input monochrome image.
In order to become more familiar with Numpy, you should look for pre-defined functions
that do these operations i.e. numpy.min.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
Returns:
tuple: Four-element tuple containing:
min (float): Input array minimum value.
max (float): Input array maximum value.
mean (float): Input array mean / average value.
stddev (float): Input array standard deviation.
"""
return 1.*np.min(image), 1.*np.max(image), 1.*np.mean(image), 1.*np.std(image)
def center_and_normalize(image, scale):
""" Returns an image with the same mean as the original but with values scaled about the
mean so as to have a standard deviation of "scale".
Note: This function makes no defense against the creation
of out-of-range pixel values. Consider converting the input image to
a float64 type before passing in an image.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
scale (int or float): scale factor.
Returns:
numpy.array: Output 2D image.
"""
i_min, i_max, i_mean, i_std = image_stats(image)
# take the mean from the image, then divide by the std deviation. We then scale by the
# scale factor and then add the mean back into the image.
normal_image = (((image-i_mean) / i_std) * scale) + i_mean
return normal_image
def shift_image_left(image, shift):
""" Outputs the input monochrome image shifted shift pixels to the left.
The returned image has the same shape as the original with
the BORDER_REPLICATE rule to fill-in missing values. See
http://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/copyMakeBorder/copyMakeBorder.html?highlight=copy
for further explanation.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
shift (int): Displacement value representing the number of pixels to shift the input image.
This parameter may be 0 representing zero displacement.
Returns:
numpy.array: Output shifted 2D image.
"""
temp_image = np.copy(image)
# take the temp image, all rows, from column defined in shift to end, move shift using border replicate.
return cv2.copyMakeBorder(temp_image[:, shift:], 0, 0, 0, shift, cv2.BORDER_REPLICATE)
def difference_image(img1, img2):
""" Returns the difference between the two input images (img1 - img2). The resulting array must be normalized
and scaled to fit [0, 255].
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
img1 (numpy.array): Input 2D image.
img2 (numpy.array): Input 2D image.
Returns:
numpy.array: Output 2D image containing the result of subtracting img2 from img1.
"""
difference = img1.astype(np.float) - img2.astype(np.float)
output_image = np.zeros(difference.shape)
cv2.normalize(difference, output_image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
# print("Max Value is ", max(output_image.flatten()))
# print("Min Value is ", min(output_image.flatten()))
return output_image
def add_noise(image, channel, sigma):
""" Returns a copy of the input color image with Gaussian noise added to
channel (0-2). The Gaussian noise mean must be zero. The parameter sigma
controls the standard deviation of the noise.
The returned array values must not be clipped or normalized and scaled. This means that
there could be values that are not in [0, 255].
Note: This function makes no defense against the creation
of out-of-range pixel values. Consider converting the input image to
a float64 type before passing in an image.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): input RGB (BGR in OpenCV) image.
channel (int): Channel index value.
sigma (float): Gaussian noise standard deviation.
Returns:
numpy.array: Output 3D array containing the result of adding Gaussian noise to the
specified channel.
"""
# generate random noise using the image.shape tuple as the dimensions.
gaussian_noise = np.random.randn(*image.shape) * sigma
temp_image = np.copy(image)
temp_image = (temp_image * 1.0) # make it a float
temp_image[:, :, channel] += gaussian_noise[:, :, channel]
return temp_image
| 0 | 0 | 0 |
4c4a0f16906d029c85e9f42492e9b35e756b606c | 1,287 | py | Python | src/fixed_prob_counter.py | MarioCSilva/Approximate_Counting | 77ba5f5a87755bddabb5b0a8bb16d0ab7976a5a3 | [
"MIT"
] | null | null | null | src/fixed_prob_counter.py | MarioCSilva/Approximate_Counting | 77ba5f5a87755bddabb5b0a8bb16d0ab7976a5a3 | [
"MIT"
] | null | null | null | src/fixed_prob_counter.py | MarioCSilva/Approximate_Counting | 77ba5f5a87755bddabb5b0a8bb16d0ab7976a5a3 | [
"MIT"
] | null | null | null | from collections import defaultdict
import re
from utils import open_file
from random import random, seed
class FixedProbCounter():
'''Reads file in chunks
counts the letters and stores the event
gets the dictionary with the number of occurrences of each letter
using a fixed probability of 1 / 8
'''
| 27.978261 | 92 | 0.609946 | from collections import defaultdict
import re
from utils import open_file
from random import random, seed
class FixedProbCounter():
def __init__(self, fname="../datasets/it_book.txt"):
seed(93430)
self.fname = fname
self.fixed_probability = 1 / 8
def __str__(self):
return "Fixed Probability Counter with 1 / 8"
'''Reads file in chunks
counts the letters and stores the event
gets the dictionary with the number of occurrences of each letter
using a fixed probability of 1 / 8
'''
def count(self):
self.letter_occur = defaultdict(int)
file = open_file(self.fname, 'r')
# reads chunk by chunk
while chunk := file.read(1024):
# removes all non-alphabetical chars
for letter in chunk:
if letter.isalpha():
# counts event with a fixed probability
if random() <= self.fixed_probability:
self.letter_occur[letter.upper()] += 1
file.close
def estimate_events(self):
self.estimated_letter_occur = {}
for letter, occur in self.letter_occur.items():
self.estimated_letter_occur[letter] = int(occur * (1 / self.fixed_probability))
| 840 | 0 | 111 |
72bbc3885b283ceb98305b72a26a6cdb3e96e915 | 1,850 | py | Python | booksim2/utils/plot_reconfig_overhead.py | jyhuang91/flyover | 952a0fffee952c9f88b93017b6bba65a84d562cb | [
"MIT"
] | 3 | 2020-11-01T08:23:10.000Z | 2021-12-21T02:53:36.000Z | booksim2/utils/plot_reconfig_overhead.py | jyhuang91/flyover | 952a0fffee952c9f88b93017b6bba65a84d562cb | [
"MIT"
] | null | null | null | booksim2/utils/plot_reconfig_overhead.py | jyhuang91/flyover | 952a0fffee952c9f88b93017b6bba65a84d562cb | [
"MIT"
] | 1 | 2020-12-07T00:57:30.000Z | 2020-12-07T00:57:30.000Z | #!/usr/bin/python
import sys
import numpy as np
import matplotlib.pyplot as plt
from easypyplot import pdf, color
from easypyplot import format as fmt
if __name__ == '__main__':
main()
| 27.205882 | 72 | 0.564324 | #!/usr/bin/python
import sys
import numpy as np
import matplotlib.pyplot as plt
from easypyplot import pdf, color
from easypyplot import format as fmt
def main():
schemes = ['RP', 'FLOV']
latencies = []
filename = '../results/reconfig/latency.txt'
infile = open(filename)
for l, line in enumerate(infile):
if 'time' in line:
line = line.split()
cycles = [int(cycle) for cycle in line[1:-1]]
elif 'RP' in line:
line = line.split()
latencies.append([float(latency) for latency in line[1:-1]])
elif 'gFLOV' in line:
line = line.split()
latencies.append([float(latency) for latency in line[1:-1]])
# figure generation
plt.rc('font', size=14)
plt.rc('legend', fontsize=14)
colors = ['#f2a900', '#00a9e0']
linestyles = ['-', '-']
markers = ['o', '^']
figname = 'reconfig_overhead.pdf'
pdfpage, fig = pdf.plot_setup(figname, figsize=(8, 4), fontsize=14)
ax = fig.gca()
for s, scheme in enumerate(schemes):
ax.plot(
cycles,
latencies[s],
marker=markers[s],
markersize=9,
markeredgewidth=2,
markeredgecolor=colors[s],
fillstyle='none',
color=colors[s],
linestyle=linestyles[s],
linewidth=2,
label=scheme)
ax.set_ylabel('Packet Latency (Cycles)')
ax.set_xlabel('Timeline (Cycles)')
ax.yaxis.grid(True, linestyle='--', color='black')
hdls, lab = ax.get_legend_handles_labels()
ax.legend(
hdls,
lab,
loc='upper center',
bbox_to_anchor=(0.5, 1.2),
ncol=2,
frameon=False)
fig.subplots_adjust(top=0.8, bottom=0.2)
pdf.plot_teardown(pdfpage, fig)
if __name__ == '__main__':
main()
| 1,634 | 0 | 23 |
d8a8055897358a6f2081ccf724dd6e593ed1588a | 1,450 | py | Python | nipyapi/webui/nifi_web/migrations/0003_auto_20191105_1616.py | achristianson/nipyapi-ds | d027433abf0c2fd08a35b5ec95534346e205c681 | [
"Apache-2.0"
] | 3 | 2019-10-11T02:58:04.000Z | 2022-02-26T06:48:24.000Z | nipyapi/webui/nifi_web/migrations/0003_auto_20191105_1616.py | achristianson/nipyapi-ds | d027433abf0c2fd08a35b5ec95534346e205c681 | [
"Apache-2.0"
] | 2 | 2021-03-09T19:35:35.000Z | 2021-05-10T16:46:23.000Z | nipyapi/webui/nifi_web/migrations/0003_auto_20191105_1616.py | achristianson/nipyapi-ds | d027433abf0c2fd08a35b5ec95534346e205c681 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.6 on 2019-11-05 16:16
from django.db import migrations, models
import django.db.models.deletion
| 37.179487 | 118 | 0.590345 | # Generated by Django 2.2.6 on 2019-11-05 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nifi_web', '0002_nifiinstance_hostname'),
]
operations = [
migrations.CreateModel(
name='NifiImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('git_repo', models.CharField(max_length=1000)),
('branch', models.CharField(max_length=1000)),
('tag', models.CharField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='nifiinstance',
name='image',
field=models.CharField(default='apache/nifi:latest', max_length=1000),
),
migrations.CreateModel(
name='NifiImageBuild',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.CharField(default='PENDING_BUILD', max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nifi_web.K8sCluster')),
],
),
]
| 0 | 1,303 | 23 |
7a49b5a24a8d918efad9ff2cfdfd4e6763f07d4f | 27,448 | py | Python | Scripts/build/lib.linux-x86_64-2.7/rdpy/protocol/rdp/sec.py | FurqanKhan1/Dictator | 74e29c12a8f92292ab3275661622c0632cdd0a7b | [
"Unlicense"
] | 5 | 2019-03-14T10:17:22.000Z | 2019-10-23T14:04:12.000Z | Scripts/build/lib.linux-x86_64-2.7/rdpy/protocol/rdp/sec.py | FurqanKhan1/Dictator | 74e29c12a8f92292ab3275661622c0632cdd0a7b | [
"Unlicense"
] | null | null | null | Scripts/build/lib.linux-x86_64-2.7/rdpy/protocol/rdp/sec.py | FurqanKhan1/Dictator | 74e29c12a8f92292ab3275661622c0632cdd0a7b | [
"Unlicense"
] | 14 | 2019-03-14T10:34:02.000Z | 2021-10-31T17:34:13.000Z | #
# Copyright (c) 2014-2015 Sylvain Peyrefitte
#
# This file is part of rdpy.
#
# rdpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
RDP Standard security layer
"""
import sha, md5
import lic, tpkt
from t125 import gcc, mcs
from rdpy.core.type import CompositeType, CallableValue, Stream, UInt32Le, UInt16Le, String, sizeof, UInt8
from rdpy.core.layer import LayerAutomata, IStreamSender
from rdpy.core.error import InvalidExpectedDataException
from rdpy.core import log
from rdpy.security import rc4
import rdpy.security.rsa_wrapper as rsa
class SecurityFlag(object):
"""
@summary: Microsoft security flags
@see: http://msdn.microsoft.com/en-us/library/cc240579.aspx
"""
SEC_EXCHANGE_PKT = 0x0001
SEC_TRANSPORT_REQ = 0x0002
RDP_SEC_TRANSPORT_RSP = 0x0004
SEC_ENCRYPT = 0x0008
SEC_RESET_SEQNO = 0x0010
SEC_IGNORE_SEQNO = 0x0020
SEC_INFO_PKT = 0x0040
SEC_LICENSE_PKT = 0x0080
SEC_LICENSE_ENCRYPT_CS = 0x0200
SEC_LICENSE_ENCRYPT_SC = 0x0200
SEC_REDIRECTION_PKT = 0x0400
SEC_SECURE_CHECKSUM = 0x0800
SEC_AUTODETECT_REQ = 0x1000
SEC_AUTODETECT_RSP = 0x2000
SEC_HEARTBEAT = 0x4000
SEC_FLAGSHI_VALID = 0x8000
class InfoFlag(object):
"""
Client capabilities informations
"""
INFO_MOUSE = 0x00000001
INFO_DISABLECTRLALTDEL = 0x00000002
INFO_AUTOLOGON = 0x00000008
INFO_UNICODE = 0x00000010
INFO_MAXIMIZESHELL = 0x00000020
INFO_LOGONNOTIFY = 0x00000040
INFO_COMPRESSION = 0x00000080
INFO_ENABLEWINDOWSKEY = 0x00000100
INFO_REMOTECONSOLEAUDIO = 0x00002000
INFO_FORCE_ENCRYPTED_CS_PDU = 0x00004000
INFO_RAIL = 0x00008000
INFO_LOGONERRORS = 0x00010000
INFO_MOUSE_HAS_WHEEL = 0x00020000
INFO_PASSWORD_IS_SC_PIN = 0x00040000
INFO_NOAUDIOPLAYBACK = 0x00080000
INFO_USING_SAVED_CREDS = 0x00100000
INFO_AUDIOCAPTURE = 0x00200000
INFO_VIDEO_DISABLE = 0x00400000
INFO_CompressionTypeMask = 0x00001E00
class PerfFlag(object):
"""
Network performances flag
"""
PERF_DISABLE_WALLPAPER = 0x00000001
PERF_DISABLE_FULLWINDOWDRAG = 0x00000002
PERF_DISABLE_MENUANIMATIONS = 0x00000004
PERF_DISABLE_THEMING = 0x00000008
PERF_DISABLE_CURSOR_SHADOW = 0x00000020
PERF_DISABLE_CURSORSETTINGS = 0x00000040
PERF_ENABLE_FONT_SMOOTHING = 0x00000080
PERF_ENABLE_DESKTOP_COMPOSITION = 0x00000100
class AfInet(object):
"""
IPv4 or IPv6 address style
"""
AF_INET = 0x00002
AF_INET6 = 0x0017
def saltedHash(inputData, salt, salt1, salt2):
"""
@summary: Generate particular signature from combination of sha1 and md5
@see: http://msdn.microsoft.com/en-us/library/cc241992.aspx
@param inputData: strange input (see doc)
@param salt: salt for context call
@param salt1: another salt (ex : client random)
@param salt2: another another salt (ex: server random)
@return : MD5(Salt + SHA1(Input + Salt + Salt1 + Salt2))
"""
sha1Digest = sha.new()
md5Digest = md5.new()
sha1Digest.update(inputData)
sha1Digest.update(salt[:48])
sha1Digest.update(salt1)
sha1Digest.update(salt2)
sha1Sig = sha1Digest.digest()
md5Digest.update(salt[:48])
md5Digest.update(sha1Sig)
return md5Digest.digest()
def finalHash(key, random1, random2):
"""
@summary: MD5(in0[:16] + in1[:32] + in2[:32])
@param key: in 16
@param random1: in 32
@param random2: in 32
@return MD5(in0[:16] + in1[:32] + in2[:32])
"""
md5Digest = md5.new()
md5Digest.update(key)
md5Digest.update(random1)
md5Digest.update(random2)
return md5Digest.digest()
def masterSecret(secret, random1, random2):
"""
@summary: Generate master secret
@param secret: {str} secret
@param clientRandom : {str} client random
@param serverRandom : {str} server random
@see: http://msdn.microsoft.com/en-us/library/cc241992.aspx
"""
return saltedHash("A", secret, random1, random2) + saltedHash("BB", secret, random1, random2) + saltedHash("CCC", secret, random1, random2)
def sessionKeyBlob(secret, random1, random2):
"""
@summary: Generate master secret
@param secret: secret
@param clientRandom : client random
@param serverRandom : server random
"""
return saltedHash("X", secret, random1, random2) + saltedHash("YY", secret, random1, random2) + saltedHash("ZZZ", secret, random1, random2)
def macData(macSaltKey, data):
"""
@see: http://msdn.microsoft.com/en-us/library/cc241995.aspx
@param macSaltKey: {str} mac key
@param data: {str} data to sign
@return: {str} signature
"""
sha1Digest = sha.new()
md5Digest = md5.new()
#encode length
dataLength = Stream()
dataLength.writeType(UInt32Le(len(data)))
sha1Digest.update(macSaltKey)
sha1Digest.update("\x36" * 40)
sha1Digest.update(dataLength.getvalue())
sha1Digest.update(data)
sha1Sig = sha1Digest.digest()
md5Digest.update(macSaltKey)
md5Digest.update("\x5c" * 48)
md5Digest.update(sha1Sig)
return md5Digest.digest()
def macSaltedData(macSaltKey, data, encryptionCount):
"""
@see: https://msdn.microsoft.com/en-us/library/cc240789.aspx
@param macSaltKey: {str} mac key
@param data: {str} data to sign
@param encryptionCount: nb encrypted packet
@return: {str} signature
"""
sha1Digest = sha.new()
md5Digest = md5.new()
#encode length
dataLengthS = Stream()
dataLengthS.writeType(UInt32Le(len(data)))
encryptionCountS = Stream()
encryptionCountS.writeType(UInt32Le(encryptionCount))
sha1Digest.update(macSaltKey)
sha1Digest.update("\x36" * 40)
sha1Digest.update(dataLengthS.getvalue())
sha1Digest.update(data)
sha1Digest.update(encryptionCountS.getvalue())
sha1Sig = sha1Digest.digest()
md5Digest.update(macSaltKey)
md5Digest.update("\x5c" * 48)
md5Digest.update(sha1Sig)
return md5Digest.digest()
def tempKey(initialKey, currentKey):
"""
@see: http://msdn.microsoft.com/en-us/library/cc240792.aspx
@param initialKey: {str} key computed first time
@param currentKey: {str} key actually used
@return: {str} temp key
"""
sha1Digest = sha.new()
md5Digest = md5.new()
sha1Digest.update(initialKey)
sha1Digest.update("\x36" * 40)
sha1Digest.update(currentKey)
sha1Sig = sha1Digest.digest()
md5Digest.update(initialKey)
md5Digest.update("\x5c" * 48)
md5Digest.update(sha1Sig)
return md5Digest.digest()
def gen40bits(data):
"""
@summary: generate 40 bits data from 128 bits data
@param data: {str} 128 bits data
@return: {str} 40 bits data
@see: http://msdn.microsoft.com/en-us/library/cc240785.aspx
"""
return "\xd1\x26\x9e" + data[:8][-5:]
def gen56bits(data):
"""
@summary: generate 56 bits data from 128 bits data
@param data: {str} 128 bits data
@return: {str} 56 bits data
@see: http://msdn.microsoft.com/en-us/library/cc240785.aspx
"""
return "\xd1" + data[:8][-7:]
def generateKeys(clientRandom, serverRandom, method):
"""
@param method: {gcc.Encryption}
@param clientRandom: {str[32]} client random
@param serverRandom: {str[32]} server random
@see: http://msdn.microsoft.com/en-us/library/cc240785.aspx
@return: MACKey, initialFirstKey128(ClientdecryptKey, serverEncryptKey), initialSecondKey128(ServerDecryptKey, ClientEncryptKey)
"""
preMasterHash = clientRandom[:24] + serverRandom[:24]
masterHash = masterSecret(preMasterHash, clientRandom, serverRandom)
sessionKey = sessionKeyBlob(masterHash, clientRandom, serverRandom)
macKey128 = sessionKey[:16]
initialFirstKey128 = finalHash(sessionKey[16:32], clientRandom, serverRandom)
initialSecondKey128 = finalHash(sessionKey[32:48], clientRandom, serverRandom)
#generate valid key
if method == gcc.EncryptionMethod.ENCRYPTION_FLAG_40BIT:
return gen40bits(macKey128), gen40bits(initialFirstKey128), gen40bits(initialSecondKey128)
elif method == gcc.EncryptionMethod.ENCRYPTION_FLAG_56BIT:
return gen56bits(macKey128), gen56bits(initialFirstKey128), gen56bits(initialSecondKey128)
elif method == gcc.EncryptionMethod.ENCRYPTION_FLAG_128BIT:
return macKey128, initialFirstKey128, initialSecondKey128
raise InvalidExpectedDataException("Bad encryption method")
def updateKey(initialKey, currentKey, method):
"""
@summary: update session key
@param initialKey: {str} Initial key
@param currentKey: {str} Current key
@return newKey: {str} key to use
@see: http://msdn.microsoft.com/en-us/library/cc240792.aspx
"""
#generate valid key
if method == gcc.EncryptionMethod.ENCRYPTION_FLAG_40BIT:
tempKey128 = tempKey(initialKey[:8], currentKey[:8])
return gen40bits(rc4.crypt(rc4.RC4Key(tempKey128[:8]), tempKey128[:8]))
elif method == gcc.EncryptionMethod.ENCRYPTION_FLAG_56BIT:
tempKey128 = tempKey(initialKey[:8], currentKey[:8])
return gen56bits(rc4.crypt(rc4.RC4Key(tempKey128[:8]), tempKey128[:8]))
elif method == gcc.EncryptionMethod.ENCRYPTION_FLAG_128BIT:
tempKey128 = tempKey(initialKey, currentKey)
return rc4.crypt(rc4.RC4Key(tempKey128), tempKey128)
class ClientSecurityExchangePDU(CompositeType):
"""
@summary: contain client random for basic security
@see: http://msdn.microsoft.com/en-us/library/cc240472.aspx
"""
class RDPInfo(CompositeType):
"""
@summary: Client informations
Contains credentials (very important packet)
@see: http://msdn.microsoft.com/en-us/library/cc240475.aspx
"""
class RDPExtendedInfo(CompositeType):
"""
@summary: Add more client informations
"""
class SecLayer(LayerAutomata, IStreamSender, tpkt.IFastPathListener, tpkt.IFastPathSender, mcs.IGCCConfig):
"""
@summary: Standard RDP security layer
This layer is Transparent as possible for upper layer
"""
def __init__(self, presentation):
"""
@param presentation: Layer (generally pdu layer)
"""
LayerAutomata.__init__(self, presentation)
#thios layer is like a fastpath proxy
self._fastPathTransport = None
self._fastPathPresentation = None
#credentials
self._info = RDPInfo(extendedInfoConditional = lambda:(self.getGCCServerSettings().SC_CORE.rdpVersion.value == gcc.Version.RDP_VERSION_5_PLUS))
#True if classic encryption is enable
self._enableEncryption = False
#Enable Secure Mac generation
self._enableSecureCheckSum = False
#initialise decrypt and encrypt keys
self._macKey = None
self._initialDecrytKey = None
self._initialEncryptKey = None
self._currentDecrytKey = None
self._currentEncryptKey = None
#counter before update
self._nbEncryptedPacket = 0
self._nbDecryptedPacket = 0
#current rc4 tab
self._decryptRc4 = None
self._encryptRc4 = None
def readEncryptedPayload(self, s, saltedMacGeneration):
"""
@summary: decrypt basic RDP security payload
@param s: {Stream} encrypted stream
@param saltedMacGeneration: {bool} use salted mac generation
@return: {Stream} decrypted
"""
#if update is needed
if self._nbDecryptedPacket == 4096:
log.debug("update decrypt key")
self._currentDecrytKey = updateKey( self._initialDecrytKey, self._currentDecrytKey,
self.getGCCServerSettings().SC_SECURITY.encryptionMethod.value)
self._decryptRc4 = rc4.RC4Key(self._currentDecrytKey)
self._nbDecryptedPacket = 0
signature = String(readLen = CallableValue(8))
encryptedPayload = String()
s.readType((signature, encryptedPayload))
decrypted = rc4.crypt(self._decryptRc4, encryptedPayload.value)
#ckeck signature
if not saltedMacGeneration and macData(self._macKey, decrypted)[:8] != signature.value:
raise InvalidExpectedDataException("bad signature")
if saltedMacGeneration and macSaltedData(self._macKey, decrypted, self._nbDecryptedPacket)[:8] != signature.value:
raise InvalidExpectedDataException("bad signature")
#count
self._nbDecryptedPacket += 1
return Stream(decrypted)
def writeEncryptedPayload(self, data, saltedMacGeneration):
"""
@summary: sign and crypt data
@param data: {Type} raw stream
@param saltedMacGeneration: {bool} use salted mac generation
@return: {Tuple} (signature, encryptedData)
"""
if self._nbEncryptedPacket == 4096:
log.debug("update encrypt key")
self._currentEncryptKey = updateKey( self._initialEncryptKey, self._currentEncryptKey,
self.getGCCServerSettings().SC_SECURITY.encryptionMethod.value)
self._encryptRc4 = rc4.RC4Key(self._currentEncryptKey)
self._nbEncryptedPacket = 0
self._nbEncryptedPacket += 1
s = Stream()
s.writeType(data)
if saltedMacGeneration:
return (String(macSaltedData(self._macKey, s.getvalue(), self._nbEncryptedPacket - 1)[:8]), String(rc4.crypt(self._encryptRc4, s.getvalue())))
else:
return (String(macData(self._macKey, s.getvalue())[:8]), String(rc4.crypt(self._encryptRc4, s.getvalue())))
def recv(self, data):
"""
@summary: if basic RDP security layer is activate decrypt
else pass to upper layer
@param data : {Stream} input Stream
"""
if not self._enableEncryption:
self._presentation.recv(data)
return
securityFlag = UInt16Le()
securityFlagHi = UInt16Le()
data.readType((securityFlag, securityFlagHi))
if securityFlag.value & SecurityFlag.SEC_ENCRYPT:
data = self.readEncryptedPayload(data, securityFlag.value & SecurityFlag.SEC_SECURE_CHECKSUM)
self._presentation.recv(data)
def send(self, data):
"""
@summary: if basic RDP security layer is activate encrypt
else pass to upper layer
@param data: {Type | Tuple}
"""
if not self._enableEncryption:
self._transport.send(data)
return
flag = SecurityFlag.SEC_ENCRYPT
if self._enableSecureCheckSum:
flag |= SecurityFlag.SEC_SECURE_CHECKSUM
self.sendFlagged(flag, data)
def sendFlagged(self, flag, data):
"""
@summary: explicit send flag method for particular packet
(info packet or license packet)
If encryption is enable apply it
@param flag: {integer} security flag
@param data: {Type | Tuple}
"""
if flag & SecurityFlag.SEC_ENCRYPT:
data = self.writeEncryptedPayload(data, flag & SecurityFlag.SEC_SECURE_CHECKSUM)
self._transport.send((UInt16Le(flag), UInt16Le(), data))
def recvFastPath(self, secFlag, fastPathS):
"""
@summary: Call when fast path packet is received
@param secFlag: {SecFlags}
@param fastPathS: {Stream}
"""
if self._enableEncryption and secFlag & tpkt.SecFlags.FASTPATH_OUTPUT_ENCRYPTED:
fastPathS = self.readEncryptedPayload(fastPathS, secFlag & tpkt.SecFlags.FASTPATH_OUTPUT_SECURE_CHECKSUM)
self._fastPathPresentation.recvFastPath(secFlag, fastPathS)
def setFastPathListener(self, fastPathListener):
"""
@param fastPathListener : {IFastPathListener}
"""
self._fastPathPresentation = fastPathListener
def sendFastPath(self, secFlag, fastPathS):
"""
@summary: Send fastPathS Type as fast path packet
@param secFlag: {SecFlags}
@param fastPathS: {Stream} type transform to stream and send as fastpath
"""
if self._enableEncryption:
secFlag |= tpkt.SecFlags.FASTPATH_OUTPUT_ENCRYPTED
if self._enableSecureCheckSum:
secFlag |= tpkt.SecFlags.FASTPATH_OUTPUT_SECURE_CHECKSUM
fastPathS = self.writeEncryptedPayload(fastPathS, self._enableSecureCheckSum)
self._fastPathTransport.sendFastPath(secFlag, fastPathS)
def setFastPathSender(self, fastPathSender):
"""
@param fastPathSender: {tpkt.FastPathSender}
"""
self._fastPathTransport = fastPathSender
def getUserId(self):
"""
@return: {integer} mcs user id
@see: mcs.IGCCConfig
"""
return self._transport.getUserId()
def getChannelId(self):
"""
@return: {integer} return channel id of proxy
@see: mcs.IGCCConfig
"""
return self._transport.getChannelId()
def getGCCClientSettings(self):
"""
@return: {gcc.Settings} mcs layer gcc client settings
@see: mcs.IGCCConfig
"""
return self._transport.getGCCClientSettings()
def getGCCServerSettings(self):
"""
@return: {gcc.Settings} mcs layer gcc server settings
@see: mcs.IGCCConfig
"""
return self._transport.getGCCServerSettings()
class Client(SecLayer):
"""
@summary: Client side of security layer
"""
def connect(self):
"""
@summary: send client random if needed and send info packet
"""
self._enableEncryption = self.getGCCClientSettings().CS_CORE.serverSelectedProtocol == 0
if self._enableEncryption:
self.sendClientRandom()
self.sendInfoPkt()
def sendInfoPkt(self):
"""
@summary: send information packet (with credentials)
next state -> recvLicenceInfo
"""
secFlag = SecurityFlag.SEC_INFO_PKT
if self._enableEncryption:
secFlag |= SecurityFlag.SEC_ENCRYPT
self.sendFlagged(secFlag, self._info)
self.setNextState(self.recvLicenceInfo)
def sendClientRandom(self):
"""
@summary: generate and send client random and init session keys
"""
#generate client random
clientRandom = rsa.random(256)
self._macKey, self._initialDecrytKey, self._initialEncryptKey = generateKeys( clientRandom,
self.getGCCServerSettings().SC_SECURITY.serverRandom.value,
self.getGCCServerSettings().SC_SECURITY.encryptionMethod.value)
#initialize keys
self._currentDecrytKey = self._initialDecrytKey
self._currentEncryptKey = self._initialEncryptKey
self._decryptRc4 = rc4.RC4Key(self._currentDecrytKey)
self._encryptRc4 = rc4.RC4Key(self._currentEncryptKey)
#verify certificate
if not self.getGCCServerSettings().SC_SECURITY.serverCertificate.certData.verify():
log.warning("cannot verify server identity")
#send client random encrypted with
serverPublicKey = self.getGCCServerSettings().SC_SECURITY.serverCertificate.certData.getPublicKey()
message = ClientSecurityExchangePDU()
#reverse because bignum in little endian
message.encryptedClientRandom.value = rsa.encrypt(clientRandom[::-1], serverPublicKey)[::-1]
self.sendFlagged(SecurityFlag.SEC_EXCHANGE_PKT, message)
def recvLicenceInfo(self, s):
"""
@summary: Read license info packet and check if is a valid client info
Wait Demand Active PDU
@param s: Stream
"""
#packet preambule
securityFlag = UInt16Le()
securityFlagHi = UInt16Le()
s.readType((securityFlag, securityFlagHi))
if not (securityFlag.value & SecurityFlag.SEC_LICENSE_PKT):
raise InvalidExpectedDataException("waiting license packet")
if self._licenceManager.recv(s):
self.setNextState()
#end of connection step of
self._presentation.connect()
class Server(SecLayer):
"""
@summary: Client side of security layer
"""
def __init__(self, presentation):
"""
@param presentation: {Layer}
"""
SecLayer.__init__(self, presentation)
self._rsaPublicKey, self._rsaPrivateKey = rsa.newkeys(512)
def connect(self):
"""
@summary: init automata to wait info packet
"""
self._enableEncryption = self.getGCCClientSettings().CS_CORE.serverSelectedProtocol == 0
if self._enableEncryption:
self.setNextState(self.recvClientRandom)
else:
self.setNextState(self.recvInfoPkt)
def getCertificate(self):
"""
@summary: generate proprietary certificate from rsa public key
"""
certificate = gcc.ProprietaryServerCertificate()
certificate.PublicKeyBlob.modulus.value = rsa.int2bytes(self._rsaPublicKey.n)[::-1]
certificate.PublicKeyBlob.pubExp.value = self._rsaPublicKey.e
certificate.sign()
return gcc.ServerCertificate(certificate)
def recvClientRandom(self, s):
"""
@summary: receive client random and generate session keys
@param s: {Stream}
"""
#packet preambule
securityFlag = UInt16Le()
securityFlagHi = UInt16Le()
s.readType((securityFlag, securityFlagHi))
if not (securityFlag.value & SecurityFlag.SEC_EXCHANGE_PKT):
raise InvalidExpectedDataException("waiting client random")
message = ClientSecurityExchangePDU()
s.readType(message)
clientRandom = rsa.decrypt(message.encryptedClientRandom.value[::-1], self._rsaPrivateKey)[::-1]
self._macKey, self._initialEncryptKey, self._initialDecrytKey = generateKeys( clientRandom,
self.getGCCServerSettings().SC_SECURITY.serverRandom.value,
self.getGCCServerSettings().SC_SECURITY.encryptionMethod.value)
#initialize keys
self._currentDecrytKey = self._initialDecrytKey
self._currentEncryptKey = self._initialEncryptKey
self._decryptRc4 = rc4.RC4Key(self._currentDecrytKey)
self._encryptRc4 = rc4.RC4Key(self._currentEncryptKey)
self.setNextState(self.recvInfoPkt)
def recvInfoPkt(self, s):
"""
@summary: receive info packet from client
Client credentials
Send License valid error message
Send Demand Active PDU
Wait Confirm Active PDU
@param s: {Stream}
"""
securityFlag = UInt16Le()
securityFlagHi = UInt16Le()
s.readType((securityFlag, securityFlagHi))
if not (securityFlag.value & SecurityFlag.SEC_INFO_PKT):
raise InvalidExpectedDataException("Waiting info packet")
if securityFlag.value & SecurityFlag.SEC_ENCRYPT:
s = self.readEncryptedPayload(s, securityFlag.value & SecurityFlag.SEC_SECURE_CHECKSUM)
s.readType(self._info)
#next state send error license
self.sendLicensingErrorMessage()
#reinit state
self.setNextState()
self._presentation.connect()
def sendLicensingErrorMessage(self):
"""
@summary: Send a licensing error data
"""
self.sendFlagged(SecurityFlag.SEC_LICENSE_PKT, lic.createValidClientLicensingErrorMessage()) | 37.497268 | 196 | 0.652579 | #
# Copyright (c) 2014-2015 Sylvain Peyrefitte
#
# This file is part of rdpy.
#
# rdpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
RDP Standard security layer
"""
import sha, md5
import lic, tpkt
from t125 import gcc, mcs
from rdpy.core.type import CompositeType, CallableValue, Stream, UInt32Le, UInt16Le, String, sizeof, UInt8
from rdpy.core.layer import LayerAutomata, IStreamSender
from rdpy.core.error import InvalidExpectedDataException
from rdpy.core import log
from rdpy.security import rc4
import rdpy.security.rsa_wrapper as rsa
class SecurityFlag(object):
"""
@summary: Microsoft security flags
@see: http://msdn.microsoft.com/en-us/library/cc240579.aspx
"""
SEC_EXCHANGE_PKT = 0x0001
SEC_TRANSPORT_REQ = 0x0002
RDP_SEC_TRANSPORT_RSP = 0x0004
SEC_ENCRYPT = 0x0008
SEC_RESET_SEQNO = 0x0010
SEC_IGNORE_SEQNO = 0x0020
SEC_INFO_PKT = 0x0040
SEC_LICENSE_PKT = 0x0080
SEC_LICENSE_ENCRYPT_CS = 0x0200
SEC_LICENSE_ENCRYPT_SC = 0x0200
SEC_REDIRECTION_PKT = 0x0400
SEC_SECURE_CHECKSUM = 0x0800
SEC_AUTODETECT_REQ = 0x1000
SEC_AUTODETECT_RSP = 0x2000
SEC_HEARTBEAT = 0x4000
SEC_FLAGSHI_VALID = 0x8000
class InfoFlag(object):
"""
Client capabilities informations
"""
INFO_MOUSE = 0x00000001
INFO_DISABLECTRLALTDEL = 0x00000002
INFO_AUTOLOGON = 0x00000008
INFO_UNICODE = 0x00000010
INFO_MAXIMIZESHELL = 0x00000020
INFO_LOGONNOTIFY = 0x00000040
INFO_COMPRESSION = 0x00000080
INFO_ENABLEWINDOWSKEY = 0x00000100
INFO_REMOTECONSOLEAUDIO = 0x00002000
INFO_FORCE_ENCRYPTED_CS_PDU = 0x00004000
INFO_RAIL = 0x00008000
INFO_LOGONERRORS = 0x00010000
INFO_MOUSE_HAS_WHEEL = 0x00020000
INFO_PASSWORD_IS_SC_PIN = 0x00040000
INFO_NOAUDIOPLAYBACK = 0x00080000
INFO_USING_SAVED_CREDS = 0x00100000
INFO_AUDIOCAPTURE = 0x00200000
INFO_VIDEO_DISABLE = 0x00400000
INFO_CompressionTypeMask = 0x00001E00
class PerfFlag(object):
"""
Network performances flag
"""
PERF_DISABLE_WALLPAPER = 0x00000001
PERF_DISABLE_FULLWINDOWDRAG = 0x00000002
PERF_DISABLE_MENUANIMATIONS = 0x00000004
PERF_DISABLE_THEMING = 0x00000008
PERF_DISABLE_CURSOR_SHADOW = 0x00000020
PERF_DISABLE_CURSORSETTINGS = 0x00000040
PERF_ENABLE_FONT_SMOOTHING = 0x00000080
PERF_ENABLE_DESKTOP_COMPOSITION = 0x00000100
class AfInet(object):
"""
IPv4 or IPv6 address style
"""
AF_INET = 0x00002
AF_INET6 = 0x0017
def saltedHash(inputData, salt, salt1, salt2):
"""
@summary: Generate particular signature from combination of sha1 and md5
@see: http://msdn.microsoft.com/en-us/library/cc241992.aspx
@param inputData: strange input (see doc)
@param salt: salt for context call
@param salt1: another salt (ex : client random)
@param salt2: another another salt (ex: server random)
@return : MD5(Salt + SHA1(Input + Salt + Salt1 + Salt2))
"""
sha1Digest = sha.new()
md5Digest = md5.new()
sha1Digest.update(inputData)
sha1Digest.update(salt[:48])
sha1Digest.update(salt1)
sha1Digest.update(salt2)
sha1Sig = sha1Digest.digest()
md5Digest.update(salt[:48])
md5Digest.update(sha1Sig)
return md5Digest.digest()
def finalHash(key, random1, random2):
"""
@summary: MD5(in0[:16] + in1[:32] + in2[:32])
@param key: in 16
@param random1: in 32
@param random2: in 32
@return MD5(in0[:16] + in1[:32] + in2[:32])
"""
md5Digest = md5.new()
md5Digest.update(key)
md5Digest.update(random1)
md5Digest.update(random2)
return md5Digest.digest()
def masterSecret(secret, random1, random2):
"""
@summary: Generate master secret
@param secret: {str} secret
@param clientRandom : {str} client random
@param serverRandom : {str} server random
@see: http://msdn.microsoft.com/en-us/library/cc241992.aspx
"""
return saltedHash("A", secret, random1, random2) + saltedHash("BB", secret, random1, random2) + saltedHash("CCC", secret, random1, random2)
def sessionKeyBlob(secret, random1, random2):
"""
@summary: Generate master secret
@param secret: secret
@param clientRandom : client random
@param serverRandom : server random
"""
return saltedHash("X", secret, random1, random2) + saltedHash("YY", secret, random1, random2) + saltedHash("ZZZ", secret, random1, random2)
def macData(macSaltKey, data):
"""
@see: http://msdn.microsoft.com/en-us/library/cc241995.aspx
@param macSaltKey: {str} mac key
@param data: {str} data to sign
@return: {str} signature
"""
sha1Digest = sha.new()
md5Digest = md5.new()
#encode length
dataLength = Stream()
dataLength.writeType(UInt32Le(len(data)))
sha1Digest.update(macSaltKey)
sha1Digest.update("\x36" * 40)
sha1Digest.update(dataLength.getvalue())
sha1Digest.update(data)
sha1Sig = sha1Digest.digest()
md5Digest.update(macSaltKey)
md5Digest.update("\x5c" * 48)
md5Digest.update(sha1Sig)
return md5Digest.digest()
def macSaltedData(macSaltKey, data, encryptionCount):
"""
@see: https://msdn.microsoft.com/en-us/library/cc240789.aspx
@param macSaltKey: {str} mac key
@param data: {str} data to sign
@param encryptionCount: nb encrypted packet
@return: {str} signature
"""
sha1Digest = sha.new()
md5Digest = md5.new()
#encode length
dataLengthS = Stream()
dataLengthS.writeType(UInt32Le(len(data)))
encryptionCountS = Stream()
encryptionCountS.writeType(UInt32Le(encryptionCount))
sha1Digest.update(macSaltKey)
sha1Digest.update("\x36" * 40)
sha1Digest.update(dataLengthS.getvalue())
sha1Digest.update(data)
sha1Digest.update(encryptionCountS.getvalue())
sha1Sig = sha1Digest.digest()
md5Digest.update(macSaltKey)
md5Digest.update("\x5c" * 48)
md5Digest.update(sha1Sig)
return md5Digest.digest()
def tempKey(initialKey, currentKey):
"""
@see: http://msdn.microsoft.com/en-us/library/cc240792.aspx
@param initialKey: {str} key computed first time
@param currentKey: {str} key actually used
@return: {str} temp key
"""
sha1Digest = sha.new()
md5Digest = md5.new()
sha1Digest.update(initialKey)
sha1Digest.update("\x36" * 40)
sha1Digest.update(currentKey)
sha1Sig = sha1Digest.digest()
md5Digest.update(initialKey)
md5Digest.update("\x5c" * 48)
md5Digest.update(sha1Sig)
return md5Digest.digest()
def gen40bits(data):
"""
@summary: generate 40 bits data from 128 bits data
@param data: {str} 128 bits data
@return: {str} 40 bits data
@see: http://msdn.microsoft.com/en-us/library/cc240785.aspx
"""
return "\xd1\x26\x9e" + data[:8][-5:]
def gen56bits(data):
"""
@summary: generate 56 bits data from 128 bits data
@param data: {str} 128 bits data
@return: {str} 56 bits data
@see: http://msdn.microsoft.com/en-us/library/cc240785.aspx
"""
return "\xd1" + data[:8][-7:]
def generateKeys(clientRandom, serverRandom, method):
"""
@param method: {gcc.Encryption}
@param clientRandom: {str[32]} client random
@param serverRandom: {str[32]} server random
@see: http://msdn.microsoft.com/en-us/library/cc240785.aspx
@return: MACKey, initialFirstKey128(ClientdecryptKey, serverEncryptKey), initialSecondKey128(ServerDecryptKey, ClientEncryptKey)
"""
preMasterHash = clientRandom[:24] + serverRandom[:24]
masterHash = masterSecret(preMasterHash, clientRandom, serverRandom)
sessionKey = sessionKeyBlob(masterHash, clientRandom, serverRandom)
macKey128 = sessionKey[:16]
initialFirstKey128 = finalHash(sessionKey[16:32], clientRandom, serverRandom)
initialSecondKey128 = finalHash(sessionKey[32:48], clientRandom, serverRandom)
#generate valid key
if method == gcc.EncryptionMethod.ENCRYPTION_FLAG_40BIT:
return gen40bits(macKey128), gen40bits(initialFirstKey128), gen40bits(initialSecondKey128)
elif method == gcc.EncryptionMethod.ENCRYPTION_FLAG_56BIT:
return gen56bits(macKey128), gen56bits(initialFirstKey128), gen56bits(initialSecondKey128)
elif method == gcc.EncryptionMethod.ENCRYPTION_FLAG_128BIT:
return macKey128, initialFirstKey128, initialSecondKey128
raise InvalidExpectedDataException("Bad encryption method")
def updateKey(initialKey, currentKey, method):
"""
@summary: update session key
@param initialKey: {str} Initial key
@param currentKey: {str} Current key
@return newKey: {str} key to use
@see: http://msdn.microsoft.com/en-us/library/cc240792.aspx
"""
#generate valid key
if method == gcc.EncryptionMethod.ENCRYPTION_FLAG_40BIT:
tempKey128 = tempKey(initialKey[:8], currentKey[:8])
return gen40bits(rc4.crypt(rc4.RC4Key(tempKey128[:8]), tempKey128[:8]))
elif method == gcc.EncryptionMethod.ENCRYPTION_FLAG_56BIT:
tempKey128 = tempKey(initialKey[:8], currentKey[:8])
return gen56bits(rc4.crypt(rc4.RC4Key(tempKey128[:8]), tempKey128[:8]))
elif method == gcc.EncryptionMethod.ENCRYPTION_FLAG_128BIT:
tempKey128 = tempKey(initialKey, currentKey)
return rc4.crypt(rc4.RC4Key(tempKey128), tempKey128)
class ClientSecurityExchangePDU(CompositeType):
"""
@summary: contain client random for basic security
@see: http://msdn.microsoft.com/en-us/library/cc240472.aspx
"""
def __init__(self):
CompositeType.__init__(self)
self.length = UInt32Le(lambda:(sizeof(self) - 4))
self.encryptedClientRandom = String(readLen = CallableValue(lambda:(self.length.value - 8)))
self.padding = String("\x00" * 8, readLen = CallableValue(8))
class RDPInfo(CompositeType):
"""
@summary: Client informations
Contains credentials (very important packet)
@see: http://msdn.microsoft.com/en-us/library/cc240475.aspx
"""
def __init__(self, extendedInfoConditional):
CompositeType.__init__(self)
#code page
self.codePage = UInt32Le()
#support flag
self.flag = UInt32Le(InfoFlag.INFO_MOUSE | InfoFlag.INFO_UNICODE | InfoFlag.INFO_LOGONNOTIFY | InfoFlag.INFO_LOGONERRORS | InfoFlag.INFO_DISABLECTRLALTDEL | InfoFlag.INFO_ENABLEWINDOWSKEY)
self.cbDomain = UInt16Le(lambda:sizeof(self.domain) - 2)
self.cbUserName = UInt16Le(lambda:sizeof(self.userName) - 2)
self.cbPassword = UInt16Le(lambda:sizeof(self.password) - 2)
self.cbAlternateShell = UInt16Le(lambda:sizeof(self.alternateShell) - 2)
self.cbWorkingDir = UInt16Le(lambda:sizeof(self.workingDir) - 2)
#microsoft domain
self.domain = String(readLen = CallableValue(lambda:self.cbDomain.value + 2), unicode = True)
self.userName = String(readLen = CallableValue(lambda:self.cbUserName.value + 2), unicode = True)
self.password = String(readLen = CallableValue(lambda:self.cbPassword.value + 2), unicode = True)
#shell execute at start of session
self.alternateShell = String(readLen = CallableValue(lambda:self.cbAlternateShell.value + 2), unicode = True)
#working directory for session
self.workingDir = String(readLen = CallableValue(lambda:self.cbWorkingDir.value + 2), unicode = True)
self.extendedInfo = RDPExtendedInfo(conditional = extendedInfoConditional)
class RDPExtendedInfo(CompositeType):
"""
@summary: Add more client informations
"""
def __init__(self, conditional):
CompositeType.__init__(self, conditional = conditional)
self.clientAddressFamily = UInt16Le(AfInet.AF_INET)
self.cbClientAddress = UInt16Le(lambda:sizeof(self.clientAddress))
self.clientAddress = String(readLen = self.cbClientAddress, unicode = True)
self.cbClientDir = UInt16Le(lambda:sizeof(self.clientDir))
self.clientDir = String(readLen = self.cbClientDir, unicode = True)
#TODO make tiomezone
self.clientTimeZone = String("\x00" * 172)
self.clientSessionId = UInt32Le()
self.performanceFlags = UInt32Le()
class SecLayer(LayerAutomata, IStreamSender, tpkt.IFastPathListener, tpkt.IFastPathSender, mcs.IGCCConfig):
"""
@summary: Standard RDP security layer
This layer is Transparent as possible for upper layer
"""
def __init__(self, presentation):
"""
@param presentation: Layer (generally pdu layer)
"""
LayerAutomata.__init__(self, presentation)
#thios layer is like a fastpath proxy
self._fastPathTransport = None
self._fastPathPresentation = None
#credentials
self._info = RDPInfo(extendedInfoConditional = lambda:(self.getGCCServerSettings().SC_CORE.rdpVersion.value == gcc.Version.RDP_VERSION_5_PLUS))
#True if classic encryption is enable
self._enableEncryption = False
#Enable Secure Mac generation
self._enableSecureCheckSum = False
#initialise decrypt and encrypt keys
self._macKey = None
self._initialDecrytKey = None
self._initialEncryptKey = None
self._currentDecrytKey = None
self._currentEncryptKey = None
#counter before update
self._nbEncryptedPacket = 0
self._nbDecryptedPacket = 0
#current rc4 tab
self._decryptRc4 = None
self._encryptRc4 = None
def readEncryptedPayload(self, s, saltedMacGeneration):
"""
@summary: decrypt basic RDP security payload
@param s: {Stream} encrypted stream
@param saltedMacGeneration: {bool} use salted mac generation
@return: {Stream} decrypted
"""
#if update is needed
if self._nbDecryptedPacket == 4096:
log.debug("update decrypt key")
self._currentDecrytKey = updateKey( self._initialDecrytKey, self._currentDecrytKey,
self.getGCCServerSettings().SC_SECURITY.encryptionMethod.value)
self._decryptRc4 = rc4.RC4Key(self._currentDecrytKey)
self._nbDecryptedPacket = 0
signature = String(readLen = CallableValue(8))
encryptedPayload = String()
s.readType((signature, encryptedPayload))
decrypted = rc4.crypt(self._decryptRc4, encryptedPayload.value)
#ckeck signature
if not saltedMacGeneration and macData(self._macKey, decrypted)[:8] != signature.value:
raise InvalidExpectedDataException("bad signature")
if saltedMacGeneration and macSaltedData(self._macKey, decrypted, self._nbDecryptedPacket)[:8] != signature.value:
raise InvalidExpectedDataException("bad signature")
#count
self._nbDecryptedPacket += 1
return Stream(decrypted)
def writeEncryptedPayload(self, data, saltedMacGeneration):
"""
@summary: sign and crypt data
@param data: {Type} raw stream
@param saltedMacGeneration: {bool} use salted mac generation
@return: {Tuple} (signature, encryptedData)
"""
if self._nbEncryptedPacket == 4096:
log.debug("update encrypt key")
self._currentEncryptKey = updateKey( self._initialEncryptKey, self._currentEncryptKey,
self.getGCCServerSettings().SC_SECURITY.encryptionMethod.value)
self._encryptRc4 = rc4.RC4Key(self._currentEncryptKey)
self._nbEncryptedPacket = 0
self._nbEncryptedPacket += 1
s = Stream()
s.writeType(data)
if saltedMacGeneration:
return (String(macSaltedData(self._macKey, s.getvalue(), self._nbEncryptedPacket - 1)[:8]), String(rc4.crypt(self._encryptRc4, s.getvalue())))
else:
return (String(macData(self._macKey, s.getvalue())[:8]), String(rc4.crypt(self._encryptRc4, s.getvalue())))
def recv(self, data):
"""
@summary: if basic RDP security layer is activate decrypt
else pass to upper layer
@param data : {Stream} input Stream
"""
if not self._enableEncryption:
self._presentation.recv(data)
return
securityFlag = UInt16Le()
securityFlagHi = UInt16Le()
data.readType((securityFlag, securityFlagHi))
if securityFlag.value & SecurityFlag.SEC_ENCRYPT:
data = self.readEncryptedPayload(data, securityFlag.value & SecurityFlag.SEC_SECURE_CHECKSUM)
self._presentation.recv(data)
def send(self, data):
"""
@summary: if basic RDP security layer is activate encrypt
else pass to upper layer
@param data: {Type | Tuple}
"""
if not self._enableEncryption:
self._transport.send(data)
return
flag = SecurityFlag.SEC_ENCRYPT
if self._enableSecureCheckSum:
flag |= SecurityFlag.SEC_SECURE_CHECKSUM
self.sendFlagged(flag, data)
def sendFlagged(self, flag, data):
"""
@summary: explicit send flag method for particular packet
(info packet or license packet)
If encryption is enable apply it
@param flag: {integer} security flag
@param data: {Type | Tuple}
"""
if flag & SecurityFlag.SEC_ENCRYPT:
data = self.writeEncryptedPayload(data, flag & SecurityFlag.SEC_SECURE_CHECKSUM)
self._transport.send((UInt16Le(flag), UInt16Le(), data))
def recvFastPath(self, secFlag, fastPathS):
"""
@summary: Call when fast path packet is received
@param secFlag: {SecFlags}
@param fastPathS: {Stream}
"""
if self._enableEncryption and secFlag & tpkt.SecFlags.FASTPATH_OUTPUT_ENCRYPTED:
fastPathS = self.readEncryptedPayload(fastPathS, secFlag & tpkt.SecFlags.FASTPATH_OUTPUT_SECURE_CHECKSUM)
self._fastPathPresentation.recvFastPath(secFlag, fastPathS)
def setFastPathListener(self, fastPathListener):
"""
@param fastPathListener : {IFastPathListener}
"""
self._fastPathPresentation = fastPathListener
def sendFastPath(self, secFlag, fastPathS):
"""
@summary: Send fastPathS Type as fast path packet
@param secFlag: {SecFlags}
@param fastPathS: {Stream} type transform to stream and send as fastpath
"""
if self._enableEncryption:
secFlag |= tpkt.SecFlags.FASTPATH_OUTPUT_ENCRYPTED
if self._enableSecureCheckSum:
secFlag |= tpkt.SecFlags.FASTPATH_OUTPUT_SECURE_CHECKSUM
fastPathS = self.writeEncryptedPayload(fastPathS, self._enableSecureCheckSum)
self._fastPathTransport.sendFastPath(secFlag, fastPathS)
def setFastPathSender(self, fastPathSender):
"""
@param fastPathSender: {tpkt.FastPathSender}
"""
self._fastPathTransport = fastPathSender
def getUserId(self):
"""
@return: {integer} mcs user id
@see: mcs.IGCCConfig
"""
return self._transport.getUserId()
def getChannelId(self):
"""
@return: {integer} return channel id of proxy
@see: mcs.IGCCConfig
"""
return self._transport.getChannelId()
def getGCCClientSettings(self):
"""
@return: {gcc.Settings} mcs layer gcc client settings
@see: mcs.IGCCConfig
"""
return self._transport.getGCCClientSettings()
def getGCCServerSettings(self):
"""
@return: {gcc.Settings} mcs layer gcc server settings
@see: mcs.IGCCConfig
"""
return self._transport.getGCCServerSettings()
class Client(SecLayer):
"""
@summary: Client side of security layer
"""
def __init__(self, presentation):
SecLayer.__init__(self, presentation)
self._licenceManager = lic.LicenseManager(self)
def connect(self):
"""
@summary: send client random if needed and send info packet
"""
self._enableEncryption = self.getGCCClientSettings().CS_CORE.serverSelectedProtocol == 0
if self._enableEncryption:
self.sendClientRandom()
self.sendInfoPkt()
def sendInfoPkt(self):
"""
@summary: send information packet (with credentials)
next state -> recvLicenceInfo
"""
secFlag = SecurityFlag.SEC_INFO_PKT
if self._enableEncryption:
secFlag |= SecurityFlag.SEC_ENCRYPT
self.sendFlagged(secFlag, self._info)
self.setNextState(self.recvLicenceInfo)
def sendClientRandom(self):
"""
@summary: generate and send client random and init session keys
"""
#generate client random
clientRandom = rsa.random(256)
self._macKey, self._initialDecrytKey, self._initialEncryptKey = generateKeys( clientRandom,
self.getGCCServerSettings().SC_SECURITY.serverRandom.value,
self.getGCCServerSettings().SC_SECURITY.encryptionMethod.value)
#initialize keys
self._currentDecrytKey = self._initialDecrytKey
self._currentEncryptKey = self._initialEncryptKey
self._decryptRc4 = rc4.RC4Key(self._currentDecrytKey)
self._encryptRc4 = rc4.RC4Key(self._currentEncryptKey)
#verify certificate
if not self.getGCCServerSettings().SC_SECURITY.serverCertificate.certData.verify():
log.warning("cannot verify server identity")
#send client random encrypted with
serverPublicKey = self.getGCCServerSettings().SC_SECURITY.serverCertificate.certData.getPublicKey()
message = ClientSecurityExchangePDU()
#reverse because bignum in little endian
message.encryptedClientRandom.value = rsa.encrypt(clientRandom[::-1], serverPublicKey)[::-1]
self.sendFlagged(SecurityFlag.SEC_EXCHANGE_PKT, message)
def recvLicenceInfo(self, s):
"""
@summary: Read license info packet and check if is a valid client info
Wait Demand Active PDU
@param s: Stream
"""
#packet preambule
securityFlag = UInt16Le()
securityFlagHi = UInt16Le()
s.readType((securityFlag, securityFlagHi))
if not (securityFlag.value & SecurityFlag.SEC_LICENSE_PKT):
raise InvalidExpectedDataException("waiting license packet")
if self._licenceManager.recv(s):
self.setNextState()
#end of connection step of
self._presentation.connect()
class Server(SecLayer):
"""
@summary: Client side of security layer
"""
def __init__(self, presentation):
"""
@param presentation: {Layer}
"""
SecLayer.__init__(self, presentation)
self._rsaPublicKey, self._rsaPrivateKey = rsa.newkeys(512)
def connect(self):
"""
@summary: init automata to wait info packet
"""
self._enableEncryption = self.getGCCClientSettings().CS_CORE.serverSelectedProtocol == 0
if self._enableEncryption:
self.setNextState(self.recvClientRandom)
else:
self.setNextState(self.recvInfoPkt)
def getCertificate(self):
"""
@summary: generate proprietary certificate from rsa public key
"""
certificate = gcc.ProprietaryServerCertificate()
certificate.PublicKeyBlob.modulus.value = rsa.int2bytes(self._rsaPublicKey.n)[::-1]
certificate.PublicKeyBlob.pubExp.value = self._rsaPublicKey.e
certificate.sign()
return gcc.ServerCertificate(certificate)
def recvClientRandom(self, s):
"""
@summary: receive client random and generate session keys
@param s: {Stream}
"""
#packet preambule
securityFlag = UInt16Le()
securityFlagHi = UInt16Le()
s.readType((securityFlag, securityFlagHi))
if not (securityFlag.value & SecurityFlag.SEC_EXCHANGE_PKT):
raise InvalidExpectedDataException("waiting client random")
message = ClientSecurityExchangePDU()
s.readType(message)
clientRandom = rsa.decrypt(message.encryptedClientRandom.value[::-1], self._rsaPrivateKey)[::-1]
self._macKey, self._initialEncryptKey, self._initialDecrytKey = generateKeys( clientRandom,
self.getGCCServerSettings().SC_SECURITY.serverRandom.value,
self.getGCCServerSettings().SC_SECURITY.encryptionMethod.value)
#initialize keys
self._currentDecrytKey = self._initialDecrytKey
self._currentEncryptKey = self._initialEncryptKey
self._decryptRc4 = rc4.RC4Key(self._currentDecrytKey)
self._encryptRc4 = rc4.RC4Key(self._currentEncryptKey)
self.setNextState(self.recvInfoPkt)
def recvInfoPkt(self, s):
"""
@summary: receive info packet from client
Client credentials
Send License valid error message
Send Demand Active PDU
Wait Confirm Active PDU
@param s: {Stream}
"""
securityFlag = UInt16Le()
securityFlagHi = UInt16Le()
s.readType((securityFlag, securityFlagHi))
if not (securityFlag.value & SecurityFlag.SEC_INFO_PKT):
raise InvalidExpectedDataException("Waiting info packet")
if securityFlag.value & SecurityFlag.SEC_ENCRYPT:
s = self.readEncryptedPayload(s, securityFlag.value & SecurityFlag.SEC_SECURE_CHECKSUM)
s.readType(self._info)
#next state send error license
self.sendLicensingErrorMessage()
#reinit state
self.setNextState()
self._presentation.connect()
def sendLicensingErrorMessage(self):
"""
@summary: Send a licensing error data
"""
self.sendFlagged(SecurityFlag.SEC_LICENSE_PKT, lic.createValidClientLicensingErrorMessage()) | 2,403 | 0 | 104 |
25c0f6f874faa9aa47653224c9fd47dbdd6c61ee | 4,474 | py | Python | scripts/sync.py | bastionhost/dbhub | 825ce53f0b1c0e3c2e033ebe2c92bfcfbf51bd9e | [
"MIT"
] | 15 | 2018-09-13T00:53:57.000Z | 2022-03-28T02:13:01.000Z | scripts/sync.py | bastionhost/dbhub | 825ce53f0b1c0e3c2e033ebe2c92bfcfbf51bd9e | [
"MIT"
] | 1 | 2018-12-17T05:52:53.000Z | 2018-12-17T05:52:53.000Z | scripts/sync.py | huifenqi/dbhub | 825ce53f0b1c0e3c2e033ebe2c92bfcfbf51bd9e | [
"MIT"
] | 3 | 2018-11-16T08:56:24.000Z | 2019-01-26T03:51:42.000Z | import pymongo
from django.conf import settings
from sqlalchemy import create_engine, inspect
from sqlalchemy import MetaData
from pymongo_schema.extract import extract_pymongo_client_schema
from pymongo_schema.tosql import mongo_schema_to_mapping
from apps.schema.models import Database, Table, Column, Index
if __name__ == '__main__':
run()
| 37.915254 | 97 | 0.601475 | import pymongo
from django.conf import settings
from sqlalchemy import create_engine, inspect
from sqlalchemy import MetaData
from pymongo_schema.extract import extract_pymongo_client_schema
from pymongo_schema.tosql import mongo_schema_to_mapping
from apps.schema.models import Database, Table, Column, Index
class RelationalDBSync(object):
@staticmethod
def save_indexes(t, indexes):
for index in indexes:
i, created = Index.objects.get_or_create(table=t, name=index.name)
i.type = 'UNIQUE KEY' if index.unique else 'KEY'
i.include_columns = ', '.join([c.name for c in index.columns])
i.save()
@staticmethod
def save_primary_keys(t, primary_keys):
for index in primary_keys:
i, created = Index.objects.get_or_create(table=t, name=index.name)
i.type = 'PRIMARY KEY'
i.include_columns = index.name
i.save()
@staticmethod
def save_columns(t, columns):
for column in columns:
default_value = column.server_default.arg if column.server_default else None
c, created = Column.objects.get_or_create(table=t, name=column.name)
try:
c.data_type = str(column.type).split(' ')[0]
except Exception:
c.data_type = repr(column.type)
c.is_null = column.nullable
c.default_value = default_value
if not c.comment and column.comment:
c.comment = column.comment
c.save()
def build(self, database):
engine = create_engine(database.config)
m = MetaData()
m.reflect(engine)
if not database.charset:
# fill database info
database.charset = engine.dialect.encoding
database.save()
for table in m.sorted_tables:
print(table.name)
dialect = database.config.split(':')[0]
table_info = table.dialect_options[dialect]._non_defaults
t, created = Table.objects.get_or_create(database=database, name=table.name)
t.engine = table_info.get('engine', '')
t.charset = table_info.get('default charset', '')
if not t.comment and table.comment:
t.comment = table.comment
t.save()
self.save_columns(t, table.columns)
self.save_primary_keys(t, table.primary_key.columns)
self.save_indexes(t, table.indexes)
class MongoDBSync(object):
def __init__(self, database):
self.database = database
parts1 = database.config.rsplit(':', 1)
parts2 = parts1[1].split('/')
self.host = parts1[0]
self.port = int(parts2[0])
self.db = parts2[1]
def build(self):
with pymongo.MongoClient(self.host, self.port) as client:
for collection in client[self.db].list_collection_names():
print(collection)
schema = extract_pymongo_client_schema(client, [self.db], [collection])
mapping = mongo_schema_to_mapping(schema)
t, created = Table.objects.get_or_create(database=self.database, name=collection)
if self.db not in mapping or collection not in mapping[self.db]:
continue
for column in mapping[self.db][collection].keys():
if column == 'pk':
continue
c, created = Column.objects.get_or_create(table=t, name=column)
c.data_type = mapping[self.db][collection][column]['type']
c.is_null = True
c.save()
def init_databases():
for instance in settings.DB_INSTANCES:
engine = create_engine(instance)
insp = inspect(engine)
db_list = insp.get_schema_names()
dbs = set(db_list) - {'information_schema', 'performance_schema', 'mysql', 'sys'}
for db in dbs:
config = '{}/{}?charset=utf8'.format(instance.rstrip('/'), db)
d, created = Database.objects.get_or_create(name=db)
d.config = config
d.save()
def run():
init_databases()
databases = Database.objects.filter(enable=True)
for database in databases:
if database.config.startswith('mongodb'):
MongoDBSync(database).build()
else:
RelationalDBSync().build(database)
if __name__ == '__main__':
run()
| 3,799 | 176 | 145 |
c47f4ce83c15b758edbdd15a5d99eac73f06c99d | 1,969 | py | Python | examples/pick_and_place_CGO.py | bgheneti/rai-python | 2956004853525795dfde210266d9bbf3292d27ee | [
"MIT"
] | null | null | null | examples/pick_and_place_CGO.py | bgheneti/rai-python | 2956004853525795dfde210266d9bbf3292d27ee | [
"MIT"
] | null | null | null | examples/pick_and_place_CGO.py | bgheneti/rai-python | 2956004853525795dfde210266d9bbf3292d27ee | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
sys.path.append('../ry')
from libry import *
K = Config()
D = K.view()
K.addFile('../test/kitchen.g')
K.addFile('../rai-robotModels/pr2/pr2.g')
K.addFrame('item1', 'sink1', 'type:ssBox Q:<t(-.1 -.1 .52)> size:[.1 .1 .25 .02] color:[1. 0. 0.], contact' )
K.addFrame('item2', 'sink1', 'type:ssBox Q:<t(.1 .1 .52)> size:[.1 .1 .25 .02] color:[1. 1. 0.], contact' )
K.addFrame("tray", "stove1", "type:ssBox Q:<t(.0 .0 .42)> size:[.2 .2 .05 .02] color:[0. 1. 0.], contact" )
obj1 = 'item2'
obj2 = 'item1'
arm='pr2L'
tray = "tray";
table='_12'
T = 6
komo = K.komo_CGO(T)
#komo.makeObjectsFree([obj1, obj2])
komo.addObjective(confs=[3], type=OT.ineq, feature=FS.distance, frames=[obj1, obj2], target=[-.05]) # distance between objects!
komo.addObjective(type=OT.eq, feature=FS.accumulatedCollisions)
komo.addObjective(type=OT.ineq, feature=FS.jointLimits)
komo.add_StableRelativePose(confs=[0, 1], gripper=arm, object=obj1)
komo.add_StableRelativePose(confs=[2, 3], gripper=arm, object=obj2)
komo.add_StableRelativePose(confs=[4, 5], gripper=arm, object=tray)
komo.add_StableRelativePose(confs=[1,2,3,4,5], gripper=tray, object=obj1)
komo.add_StableRelativePose(confs=[3,4,5], gripper=tray, object=obj2)
komo.add_StablePose(confs=[-1, 0], object=obj1)
komo.add_StablePose(confs=[-1, 0, 1, 2], object=obj2)
komo.add_StablePose(confs=[-1, 0, 1, 2, 3, 4], object=tray)
komo.add_grasp(0, arm, obj1)
komo.add_place(1, obj1, tray)
komo.add_grasp(2, arm, obj2)
komo.add_place(3, obj2, tray)
komo.add_grasp(4, arm, tray)
komo.add_place(5, tray, table)
# komo.add_resting(-1, 0, obj1)
# komo.add_restingRelative(0, 1 , obj1, arm)
# komo.add_resting(1, 2, obj1)
# komo.add_resting(2, 3, obj1)
# komo.add_resting(-1, 0, obj2)
# komo.add_resting(0, 1, obj2)
# komo.add_resting(1, 2, obj2)
# komo.add_restingRelative(2, 3 , obj2, arm)
komo.optimize()
for t in range(-1, T):
komo.getConfiguration(t)
input("Press Enter to continue...")
| 30.292308 | 127 | 0.681056 | #!/usr/bin/python3
import sys
sys.path.append('../ry')
from libry import *
K = Config()
D = K.view()
K.addFile('../test/kitchen.g')
K.addFile('../rai-robotModels/pr2/pr2.g')
K.addFrame('item1', 'sink1', 'type:ssBox Q:<t(-.1 -.1 .52)> size:[.1 .1 .25 .02] color:[1. 0. 0.], contact' )
K.addFrame('item2', 'sink1', 'type:ssBox Q:<t(.1 .1 .52)> size:[.1 .1 .25 .02] color:[1. 1. 0.], contact' )
K.addFrame("tray", "stove1", "type:ssBox Q:<t(.0 .0 .42)> size:[.2 .2 .05 .02] color:[0. 1. 0.], contact" )
obj1 = 'item2'
obj2 = 'item1'
arm='pr2L'
tray = "tray";
table='_12'
T = 6
komo = K.komo_CGO(T)
#komo.makeObjectsFree([obj1, obj2])
komo.addObjective(confs=[3], type=OT.ineq, feature=FS.distance, frames=[obj1, obj2], target=[-.05]) # distance between objects!
komo.addObjective(type=OT.eq, feature=FS.accumulatedCollisions)
komo.addObjective(type=OT.ineq, feature=FS.jointLimits)
komo.add_StableRelativePose(confs=[0, 1], gripper=arm, object=obj1)
komo.add_StableRelativePose(confs=[2, 3], gripper=arm, object=obj2)
komo.add_StableRelativePose(confs=[4, 5], gripper=arm, object=tray)
komo.add_StableRelativePose(confs=[1,2,3,4,5], gripper=tray, object=obj1)
komo.add_StableRelativePose(confs=[3,4,5], gripper=tray, object=obj2)
komo.add_StablePose(confs=[-1, 0], object=obj1)
komo.add_StablePose(confs=[-1, 0, 1, 2], object=obj2)
komo.add_StablePose(confs=[-1, 0, 1, 2, 3, 4], object=tray)
komo.add_grasp(0, arm, obj1)
komo.add_place(1, obj1, tray)
komo.add_grasp(2, arm, obj2)
komo.add_place(3, obj2, tray)
komo.add_grasp(4, arm, tray)
komo.add_place(5, tray, table)
# komo.add_resting(-1, 0, obj1)
# komo.add_restingRelative(0, 1 , obj1, arm)
# komo.add_resting(1, 2, obj1)
# komo.add_resting(2, 3, obj1)
# komo.add_resting(-1, 0, obj2)
# komo.add_resting(0, 1, obj2)
# komo.add_resting(1, 2, obj2)
# komo.add_restingRelative(2, 3 , obj2, arm)
komo.optimize()
for t in range(-1, T):
komo.getConfiguration(t)
input("Press Enter to continue...")
| 0 | 0 | 0 |
5d0d73e2287292f6ef347ee205eff50261f050ca | 185 | py | Python | tests/sorting/conftest.py | dieb/algorithms.py | da657002cb35395bab547a9f29ca2c8a171e986d | [
"MIT"
] | null | null | null | tests/sorting/conftest.py | dieb/algorithms.py | da657002cb35395bab547a9f29ca2c8a171e986d | [
"MIT"
] | null | null | null | tests/sorting/conftest.py | dieb/algorithms.py | da657002cb35395bab547a9f29ca2c8a171e986d | [
"MIT"
] | null | null | null | import pytest
@pytest.fixture
| 20.555556 | 64 | 0.713514 | import pytest
@pytest.fixture
def assert_sorted():
def assert_fun(original, sort_function):
assert sorted(original[:]) == sort_function(original[:])
return assert_fun
| 131 | 0 | 22 |
91285f0c3a7a9c77f819019ebc7ea7e3b259c995 | 5,902 | py | Python | app/__init__.py | winlongit/shop_pc_server | 376fa5c0504e50a863aad68ead67e0a149ef5dd2 | [
"Apache-2.0"
] | 1 | 2021-12-08T13:59:32.000Z | 2021-12-08T13:59:32.000Z | app/__init__.py | winlongit/shop_pc_server | 376fa5c0504e50a863aad68ead67e0a149ef5dd2 | [
"Apache-2.0"
] | null | null | null | app/__init__.py | winlongit/shop_pc_server | 376fa5c0504e50a863aad68ead67e0a149ef5dd2 | [
"Apache-2.0"
] | 1 | 2021-12-08T13:59:34.000Z | 2021-12-08T13:59:34.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2019/11/25 21:07
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description :
-------------------------------------------------
"""
import base64
import io
from flask import Flask, request, g, send_file
from flask_cors import CORS
from app import jsonReturn
from app.utils.jwt import JWT
from app.models.User import User, Role, Permission
from config import load_config
__author__ = 'Max_Pengjb'
| 40.986111 | 230 | 0.596069 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2019/11/25 21:07
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description :
-------------------------------------------------
"""
import base64
import io
from flask import Flask, request, g, send_file
from flask_cors import CORS
from app import jsonReturn
from app.utils.jwt import JWT
from app.models.User import User, Role, Permission
from config import load_config
__author__ = 'Max_Pengjb'
def create_app():
# 初始化 App
config = load_config()
app = Flask(__name__)
app.config.from_object(config)
# CORS(app)
CORS(app, resources={r"/*": {"expose_headers": ["Authorization"]}})
# Alternatively, you can specify CORS options on a resource and origin level of granularity by passing a dictionary as the resources option, mapping paths to a set of options. See the full list of options in the documentation.
# cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
from app.models import db, ma
db.init_app(app)
# 序列化 反序列化 插件
ma.init_app(app)
# 把各个蓝图在注册进来,routes中的bp
from app import routes
from flask.blueprints import Blueprint
def _import_submodules_from_package(package):
import pkgutil
modules = []
# 在只知道包名的情况下,成功获取了包下所有模块
for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix=package.__name__ + "."):
print("{} name: {}, is_sub_package: {}".format(importer, modname, ispkg))
modules.append(__import__(modname, fromlist="dummy"))
return modules
for module in _import_submodules_from_package(routes):
bp = getattr(module, 'bp')
if bp and isinstance(bp, Blueprint):
# 注册蓝图
app.register_blueprint(bp)
@app.before_first_request
def create_admin_casual_user():
admin = User.objects(username='admin').first()
if not admin:
permission_admin = Permission(url='*', name='超级权限', description='超级管理员的权限,哪里都能访问')
permission_admin.save()
super_admin = Role(name='超级管理员', description='超级管理员,哪里都能访问哦')
super_admin.permissions.append(permission_admin)
super_admin.save()
admin = User(username='admin')
admin.password = 'admin'
admin.roles.append(super_admin)
admin.save()
# permission_casual = Permission(url='/login', name='登录', description='谁都能访问')
# permission_casual.save()
# permission_casual_reg = Permission(url='/register', name='注册', description='谁都能访问')
# permission_casual_reg.save()
# permission_casual_role = Role(name='everyone', description='临时用户')
# permission_casual_role.permissions.append(permission_casual)
# permission_casual_role.permissions.append(permission_casual_reg)
# permission_casual_role.save()
@app.before_request
def auth_jwt():
# print('config.HTTP_ROOT', config.HTTP_ROOT)
# 在每一个请求的时候判断token,然后根据请求的 path 判断有没有权限来控制放行
token = request.headers.get('Authorization')
# print(token)
if token:
payload = JWT.decode_auth_token(token)
# payload 的返回值如果是字符串,那就是错误的token,过期或者无效token,正确的话会返回一个dict对象
if not isinstance(payload, str):
# 从数据库中找一个username和token解析的username一致的用户出来,
user = User.objects(username=payload['data']['username']).first()
# TODO 这里判断 user 存在不存在,实际应该用 redis 来做,token 存的肯定是登录过的用户啊,没有登录不会在token里面
if user is None:
return jsonReturn.falseReturn('', '错误的token', 403)
username = user.username
# 设置一个该次访问的全局 username,用于response 检查是否登录,登录了的话就更新 token
g.username = username
# 鉴权 rbac
# TODO 这里需要改成 redis 取 permissions 后,设置为redis 直接存 user->permissions
permissions = set(config.ALLOWED_URL)
for role in user.roles:
# print(role)
for permission in role.permissions:
# print(permission.url)
permissions.add(permission.url)
# print(user.username, user.roles, permissions)
# print(request.path)
if request.path not in permissions and '*' not in permissions:
return jsonReturn.falseReturn(request.path, '没有访问权限', 403)
else:
# 返回 token 过期,或者 token 无效
# print(payload)
return jsonReturn.falseReturn('', payload, 403)
else:
# TODO token不存在只有一种情况: 系统临时用户,权限只有config['ALLOWED_URL']里的
# 生成唯一id
# uuid_id = uuid.uuid5(uuid.NAMESPACE_DNS, str(time.time()) + "".join(
# random.choice(string.ascii_letters + string.digits) for _ in range(16)))
# 不在可匿名访问的目录中,就返回错误
if request.path not in config.ALLOWED_URL:
return jsonReturn.falseReturn(request.path, '需要登录', 403)
# 请求结束后干的事
@app.after_request
def after_request(response):
# 在每一个请求结束的时候根据 request 中的 user 信息,存在 username 就说明是登录的用户,更新并返回 token
if 'username' in g:
token = JWT.encode_auth_token(g.username)
# print("token:", token)
print(g.username, JWT.decode_auth_token(token))
response.headers.add('Authorization', token)
# 加上这一段,不然前台 axios 拿到的 response 中没有 Authorization,也可以如上 CORS 中设置
# response.headers.add('Access-Control-Expose-Headers', 'Authorization')
return response
return app
| 5,997 | 0 | 23 |
93e92d07bd7ae1362f4f38832f8ccfdd0576d278 | 173 | py | Python | tasks.py | MinchinWeb/prjct | 182c4b5c849c920fcc1d84fd7431aaae36bd8760 | [
"MIT"
] | 1 | 2020-05-29T00:26:52.000Z | 2020-05-29T00:26:52.000Z | tasks.py | MinchinWeb/prjct | 182c4b5c849c920fcc1d84fd7431aaae36bd8760 | [
"MIT"
] | 46 | 2016-10-18T00:57:24.000Z | 2022-01-01T17:22:29.000Z | tasks.py | MinchinWeb/prjct | 182c4b5c849c920fcc1d84fd7431aaae36bd8760 | [
"MIT"
] | 1 | 2018-09-17T14:30:09.000Z | 2018-09-17T14:30:09.000Z | import invoke
#import minchin.releaser
try:
from minchin.releaser import make_release, vendorize
except ImportError:
print("[WARN] minchin.releaser not installed")
| 21.625 | 56 | 0.780347 | import invoke
#import minchin.releaser
try:
from minchin.releaser import make_release, vendorize
except ImportError:
print("[WARN] minchin.releaser not installed")
| 0 | 0 | 0 |
7ad5d1bd4a4bcd3a0020da28df929a858e7a36db | 1,421 | py | Python | rpi-rgb-led-matrix-master/snappsvisa1.py | hammal/macapar | 05fb84b8f5e967ed6d3edb0891ac58674e6b60bc | [
"MIT"
] | null | null | null | rpi-rgb-led-matrix-master/snappsvisa1.py | hammal/macapar | 05fb84b8f5e967ed6d3edb0891ac58674e6b60bc | [
"MIT"
] | null | null | null | rpi-rgb-led-matrix-master/snappsvisa1.py | hammal/macapar | 05fb84b8f5e967ed6d3edb0891ac58674e6b60bc | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
import Image
import ImageDraw
import time
import ImageFont
from rgbmatrix import Adafruit_RGBmatrix
# Rows and chain length are both required parameters:
matrix = Adafruit_RGBmatrix(32, 4)
font=ImageFont.load_default()
font=ImageFont.truetype("DejaVuSerif.ttf",size=11)
img=Image.new('RGB',(128,32))
text1="Fååm fååm"
text2="Fååm fååm"
text3="Fåm vi lite upp i kosi"
text4="Jengan gå jengan gå jengan gå jengan teee"
for n in range(1,3,1):
time.sleep(0.5)
img=Image.new('RGB',(128,32))
d=ImageDraw.Draw(img)
d.text((-1,0),text1,fill=(0,200,200),font=font)
matrix.SetImage(img.im.id, 0, 0)
time.sleep(3)
img=Image.new('RGB',(128,32))
d=ImageDraw.Draw(img)
d.text((-1,0),text2,fill=(200,200,0),font=font)
matrix.SetImage(img.im.id, 65, 0)
time.sleep(3)
img=Image.new('RGB',(128,32))
d=ImageDraw.Draw(img)
d.text((-1,0),text3,fill=(0,200,0),font=font)
matrix.SetImage(img.im.id, 0, 15)
time.sleep(5.5)
matrix.Clear()
font=ImageFont.truetype("DejaVuSerif.ttf",size=14)
img=Image.new('RGB',(300,32))
d=ImageDraw.Draw(img)
d.text((1,0),text4,fill=(200,0,200),font=font)
for n in range(128, -img.size[0], -1): # Scroll R to L
matrix.SetImage(img.im.id, n, 8)
time.sleep(0.018)
# //: Fåm, fåm, fåm, fåm, fåm vi lite opp i kosa ://
# Jen gang och jen gang och jen gang och jen gang te
# Jen gang och jen gang och jen gang och jen gang te
| 26.811321 | 54 | 0.701619 | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
import Image
import ImageDraw
import time
import ImageFont
from rgbmatrix import Adafruit_RGBmatrix
# Rows and chain length are both required parameters:
matrix = Adafruit_RGBmatrix(32, 4)
font=ImageFont.load_default()
font=ImageFont.truetype("DejaVuSerif.ttf",size=11)
img=Image.new('RGB',(128,32))
text1="Fååm fååm"
text2="Fååm fååm"
text3="Fåm vi lite upp i kosi"
text4="Jengan gå jengan gå jengan gå jengan teee"
for n in range(1,3,1):
time.sleep(0.5)
img=Image.new('RGB',(128,32))
d=ImageDraw.Draw(img)
d.text((-1,0),text1,fill=(0,200,200),font=font)
matrix.SetImage(img.im.id, 0, 0)
time.sleep(3)
img=Image.new('RGB',(128,32))
d=ImageDraw.Draw(img)
d.text((-1,0),text2,fill=(200,200,0),font=font)
matrix.SetImage(img.im.id, 65, 0)
time.sleep(3)
img=Image.new('RGB',(128,32))
d=ImageDraw.Draw(img)
d.text((-1,0),text3,fill=(0,200,0),font=font)
matrix.SetImage(img.im.id, 0, 15)
time.sleep(5.5)
matrix.Clear()
font=ImageFont.truetype("DejaVuSerif.ttf",size=14)
img=Image.new('RGB',(300,32))
d=ImageDraw.Draw(img)
d.text((1,0),text4,fill=(200,0,200),font=font)
for n in range(128, -img.size[0], -1): # Scroll R to L
matrix.SetImage(img.im.id, n, 8)
time.sleep(0.018)
# //: Fåm, fåm, fåm, fåm, fåm vi lite opp i kosa ://
# Jen gang och jen gang och jen gang och jen gang te
# Jen gang och jen gang och jen gang och jen gang te
| 0 | 0 | 0 |
2a2404f29f3a92e6c43d59dd176c1c34c49e5597 | 1,478 | py | Python | projects/pico/deployment/plates/plates-consumer.py | henriwoodcock/developer | 7ddd7f0b56564c0c13d5505c16b6f89c0c29886a | [
"CC-BY-4.0"
] | 47 | 2020-08-10T12:58:13.000Z | 2022-03-18T18:11:44.000Z | projects/pico/deployment/plates/plates-consumer.py | henriwoodcock/developer | 7ddd7f0b56564c0c13d5505c16b6f89c0c29886a | [
"CC-BY-4.0"
] | 7 | 2021-02-13T02:13:52.000Z | 2021-08-18T14:03:49.000Z | projects/pico/deployment/plates/plates-consumer.py | henriwoodcock/developer | 7ddd7f0b56564c0c13d5505c16b6f89c0c29886a | [
"CC-BY-4.0"
] | 27 | 2020-08-07T19:44:52.000Z | 2022-03-08T19:16:46.000Z |
# coding: utf-8
# In[17]:
import datetime
from kafka import KafkaConsumer
import boto3
import json
import base64
# Fire up the Kafka Consumer
topic = "testpico"
brokers = ["35.189.130.4:9092"]
# Initialising Kafka consumer(Lambda) with topic
consumer = KafkaConsumer(
topic,
bootstrap_servers=brokers,
value_deserializer=lambda m: json.loads(m.decode('utf-8')))
# In[18]:
# Initialising AWS session using Secrey Keys
session = boto3.session.Session(aws_access_key_id='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
aws_secret_access_key='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
region_name='us-west-2')
# Reading every message in the consumer topic(queue) followed by decoding at the producer
for msg in consumer:
img_bytes = base64.b64decode(msg.value['image_bytes'])
# Initializing the AWS Rekognition System
rekog_client = session.client('rekognition')
# Sending the Image byte array to the AWS Rekognition System to detect the text in the image
response = rekog_client.detect_text(Image={'Bytes':img_bytes})
# Capturing the text detections from the AWS Rekognition System response
textDetections=response['TextDetections']
for text in textDetections:
print ('Detected text:' + text['DetectedText'])
print ('Confidence: ' + "{:.2f}".format(text['Confidence']) + "%")
print("#"*50)
# In[ ]:
| 24.229508 | 93 | 0.675237 |
# coding: utf-8
# In[17]:
import datetime
from kafka import KafkaConsumer
import boto3
import json
import base64
# Fire up the Kafka Consumer
topic = "testpico"
brokers = ["35.189.130.4:9092"]
# Initialising Kafka consumer(Lambda) with topic
consumer = KafkaConsumer(
topic,
bootstrap_servers=brokers,
value_deserializer=lambda m: json.loads(m.decode('utf-8')))
# In[18]:
# Initialising AWS session using Secrey Keys
session = boto3.session.Session(aws_access_key_id='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
aws_secret_access_key='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
region_name='us-west-2')
# Reading every message in the consumer topic(queue) followed by decoding at the producer
for msg in consumer:
img_bytes = base64.b64decode(msg.value['image_bytes'])
# Initializing the AWS Rekognition System
rekog_client = session.client('rekognition')
# Sending the Image byte array to the AWS Rekognition System to detect the text in the image
response = rekog_client.detect_text(Image={'Bytes':img_bytes})
# Capturing the text detections from the AWS Rekognition System response
textDetections=response['TextDetections']
for text in textDetections:
print ('Detected text:' + text['DetectedText'])
print ('Confidence: ' + "{:.2f}".format(text['Confidence']) + "%")
print("#"*50)
# In[ ]:
| 0 | 0 | 0 |
feb0301eab01f99751323c1174bb62d39c474c2d | 7,475 | py | Python | tests/test_compositealgo.py | AurelienGasser/substra-tools | 2e90f1224eb6c9bc1c620713738b645b2951693e | [
"Apache-2.0"
] | 17 | 2019-10-25T13:35:59.000Z | 2021-01-06T09:18:07.000Z | tests/test_compositealgo.py | AurelienGasser/substra-tools | 2e90f1224eb6c9bc1c620713738b645b2951693e | [
"Apache-2.0"
] | 21 | 2019-11-05T20:39:47.000Z | 2020-07-17T17:15:42.000Z | tests/test_compositealgo.py | AurelienGasser/substra-tools | 2e90f1224eb6c9bc1c620713738b645b2951693e | [
"Apache-2.0"
] | 5 | 2021-06-03T11:52:17.000Z | 2022-02-22T21:21:58.000Z | import json
import pathlib
from substratools import algo, exceptions
from substratools.workspace import CompositeAlgoWorkspace
import pytest
@pytest.fixture(autouse=True)
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.mark.parametrize("fake_data,n_fake_samples,expected_pred", [
(False, 0, []),
(True, 1, []),
])
@pytest.mark.parametrize('algo_class', (
NoSavedTrunkModelAggregateAlgo,
NoSavedHeadModelAggregateAlgo,
WrongSavedTrunkModelAggregateAlgo,
WrongSavedHeadModelAggregateAlgo
))
| 29.662698 | 89 | 0.697525 | import json
import pathlib
from substratools import algo, exceptions
from substratools.workspace import CompositeAlgoWorkspace
import pytest
@pytest.fixture(autouse=True)
def setup(valid_opener):
pass
class DummyCompositeAlgo(algo.CompositeAlgo):
def train(self, X, y, head_model, trunk_model, rank):
# init phase
if head_model and trunk_model:
new_head_model = dict(head_model)
new_trunk_model = dict(trunk_model)
else:
new_head_model = {'value': 0}
new_trunk_model = {'value': 0}
# train models
new_head_model['value'] += 1
new_trunk_model['value'] -= 1
return new_head_model, new_trunk_model
def predict(self, X, head_model, trunk_model):
pred = list(range(head_model['value'], trunk_model['value']))
return pred
def load_head_model(self, path):
return self._load_model(path)
def save_head_model(self, model, path):
return self._save_model(model, path)
def load_trunk_model(self, path):
return self._load_model(path)
def save_trunk_model(self, model, path):
return self._save_model(model, path)
def _load_model(self, path):
with open(path, 'r') as f:
return json.load(f)
def _save_model(self, model, path):
with open(path, 'w') as f:
json.dump(model, f)
class NoSavedTrunkModelAggregateAlgo(DummyCompositeAlgo):
def save_trunk_model(self, model, path):
# do not save model at all
pass
class NoSavedHeadModelAggregateAlgo(DummyCompositeAlgo):
def save_head_model(self, model, path):
# do not save model at all
pass
class WrongSavedTrunkModelAggregateAlgo(DummyCompositeAlgo):
def save_trunk_model(self, model, path):
# simulate numpy.save behavior
with open(path + '.npy', 'w') as f:
json.dump(model, f)
class WrongSavedHeadModelAggregateAlgo(DummyCompositeAlgo):
def save_head_model(self, model, path):
# simulate numpy.save behavior
with open(path + '.npy', 'w') as f:
json.dump(model, f)
@pytest.fixture
def workspace(workdir):
models_dir = workdir / "input_models"
models_dir.mkdir()
return CompositeAlgoWorkspace(
input_models_folder_path=str(models_dir),
)
@pytest.fixture
def dummy_wrapper(workspace):
a = DummyCompositeAlgo()
return algo.CompositeAlgoWrapper(a, workspace=workspace)
@pytest.fixture
def create_models(workspace):
head_model = {'value': 1}
trunk_model = {'value': -1}
def _create_model(model_data, name):
filename = "{}.json".format(name)
path = pathlib.Path(workspace.input_models_folder_path) / filename
path.write_text(json.dumps(model_data))
return filename
return (
[head_model, trunk_model],
workspace.input_models_folder_path,
_create_model(head_model, 'head'),
_create_model(trunk_model, 'trunk')
)
def test_create():
# check we can instantiate a dummy algo class
DummyCompositeAlgo()
def test_train_no_model(dummy_wrapper):
head_model, trunk_model = dummy_wrapper.train()
assert head_model['value'] == 1
assert trunk_model['value'] == -1
def test_train_input_head_trunk_models(create_models, dummy_wrapper):
_, _, head_filename, trunk_filename = create_models
head_model, trunk_model = dummy_wrapper.train(head_filename, trunk_filename)
assert head_model['value'] == 2
assert trunk_model['value'] == -2
def test_train_fake_data(dummy_wrapper):
head_model, trunk_model = dummy_wrapper.train(fake_data=True, n_fake_samples=2)
assert head_model['value'] == 1
assert trunk_model['value'] == -1
@pytest.mark.parametrize("fake_data,n_fake_samples,expected_pred", [
(False, 0, []),
(True, 1, []),
])
def test_predict(fake_data, n_fake_samples, expected_pred, create_models, dummy_wrapper):
_, _, head_filename, trunk_filename = create_models
pred = dummy_wrapper.predict(
head_filename,
trunk_filename,
fake_data=fake_data,
n_fake_samples=n_fake_samples,
)
assert pred == expected_pred
def test_execute_train(workdir):
output_models_path = workdir / 'output_models'
output_head_model_filename = 'head_model'
output_trunk_model_filename = 'trunk_model'
output_head_model_path = output_models_path / output_head_model_filename
assert not output_head_model_path.exists()
output_trunk_model_path = output_models_path / output_trunk_model_filename
assert not output_trunk_model_path.exists()
common_args = [
'--output-models-path', str(output_models_path),
'--output-head-model-filename', output_head_model_filename,
'--output-trunk-model-filename', output_trunk_model_filename,
]
algo.execute(DummyCompositeAlgo(), sysargs=['train'] + common_args)
assert output_head_model_path.exists()
assert output_trunk_model_path.exists()
def test_execute_train_multiple_models(workdir, create_models):
_, input_models_folder, head_filename, trunk_filename = create_models
output_models_folder_path = workdir / 'output_models'
output_head_model_filename = 'output_head_model'
output_head_model_path = output_models_folder_path / output_head_model_filename
assert not output_head_model_path.exists()
output_trunk_model_filename = 'output_trunk_model'
output_trunk_model_path = output_models_folder_path / output_trunk_model_filename
assert not output_trunk_model_path.exists()
pred_path = workdir / 'pred' / 'pred'
assert not pred_path.exists()
command = [
'train',
'--input-models-path', str(input_models_folder),
'--input-head-model-filename', head_filename,
'--input-trunk-model-filename', trunk_filename,
'--output-models-path', str(output_models_folder_path),
'--output-head-model-filename', output_head_model_filename,
'--output-trunk-model-filename', output_trunk_model_filename,
]
algo.execute(DummyCompositeAlgo(), sysargs=command)
assert output_head_model_path.exists()
with open(output_head_model_path, 'r') as f:
head_model = json.load(f)
assert head_model['value'] == 2
assert output_trunk_model_path.exists()
with open(output_trunk_model_path, 'r') as f:
trunk_model = json.load(f)
assert trunk_model['value'] == -2
assert not pred_path.exists()
def test_execute_predict(workdir, create_models):
_, input_models_folder, head_filename, trunk_filename = create_models
pred_path = workdir / 'pred' / 'pred'
assert not pred_path.exists()
command = [
'predict',
'--input-models-path', str(input_models_folder),
'--input-head-model-filename', head_filename,
'--input-trunk-model-filename', trunk_filename,
]
algo.execute(DummyCompositeAlgo(), sysargs=command)
assert pred_path.exists()
with open(pred_path, 'r') as f:
pred = json.load(f)
assert pred == []
pred_path.unlink()
@pytest.mark.parametrize('algo_class', (
NoSavedTrunkModelAggregateAlgo,
NoSavedHeadModelAggregateAlgo,
WrongSavedTrunkModelAggregateAlgo,
WrongSavedHeadModelAggregateAlgo
))
def test_model_check(algo_class):
a = algo_class()
wp = algo.CompositeAlgoWrapper(a)
with pytest.raises(exceptions.MissingFileError):
wp.train([])
| 6,028 | 172 | 728 |
0a0e643e3f48f6fc458be3375ed09d7479d57e9a | 53,021 | py | Python | msgraph/cli/command_modules/devicescorpmgt/azext_devicescorpmgt/generated/action.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | null | null | null | msgraph/cli/command_modules/devicescorpmgt/azext_devicescorpmgt/generated/action.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | 22 | 2022-03-29T22:54:37.000Z | 2022-03-29T22:55:27.000Z | msgraph/cli/command_modules/devicescorpmgt/azext_devicescorpmgt/generated/action.py | microsoftgraph/msgraph-cli-archived | 489f70bf4ede1ce67b84bfb31e66da3e4db76062 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=protected-access
# pylint: disable=no-self-use
import argparse
from collections import defaultdict
from knack.util import CLIError
| 35.370914 | 120 | 0.534486 | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=protected-access
# pylint: disable=no-self-use
import argparse
from collections import defaultdict
from knack.util import CLIError
class AddMobileAppCategories(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddMobileAppCategories, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'last-modified-date-time':
d['last_modified_date_time'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter mobile-app-categories. All possible keys are:'
' display-name, last-modified-date-time, id'.format(k)
)
return d
class AddVppTokens(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddVppTokens, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'apple-id':
d['apple_id'] = v[0]
elif kl == 'automatically-update-apps':
d['automatically_update_apps'] = v[0]
elif kl == 'country-or-region':
d['country_or_region'] = v[0]
elif kl == 'expiration-date-time':
d['expiration_date_time'] = v[0]
elif kl == 'last-modified-date-time':
d['last_modified_date_time'] = v[0]
elif kl == 'last-sync-date-time':
d['last_sync_date_time'] = v[0]
elif kl == 'last-sync-status':
d['last_sync_status'] = v[0]
elif kl == 'organization-name':
d['organization_name'] = v[0]
elif kl == 'state':
d['state'] = v[0]
elif kl == 'token':
d['token'] = v[0]
elif kl == 'vpp-token-account-type':
d['vpp_token_account_type'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter vpp-tokens. All possible keys are: apple-id,'
' automatically-update-apps, country-or-region, expiration-date-time, last-modified-date-time,'
' last-sync-date-time, last-sync-status, organization-name, state, token,'
' vpp-token-account-type, id'.format(k)
)
return d
class AddManagedAppPolicies(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddManagedAppPolicies, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'created-date-time':
d['created_date_time'] = v[0]
elif kl == 'description':
d['description'] = v[0]
elif kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'last-modified-date-time':
d['last_modified_date_time'] = v[0]
elif kl == 'version':
d['version'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter managed-app-policies. All possible keys are:'
' created-date-time, description, display-name, last-modified-date-time, version, id'.format(k)
)
return d
class AddManagedAppStatuses(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddManagedAppStatuses, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'version':
d['version'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter managed-app-statuses. All possible keys are:'
' display-name, version, id'.format(k)
)
return d
class AddMdmWindowsInformationProtectionPolicies(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddMdmWindowsInformationProtectionPolicies, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'azure-rights-management-services-allowed':
d['azure_rights_management_services_allowed'] = v[0]
elif kl == 'data-recovery-certificate':
d['data_recovery_certificate'] = v[0]
elif kl == 'enforcement-level':
d['enforcement_level'] = v[0]
elif kl == 'enterprise-domain':
d['enterprise_domain'] = v[0]
elif kl == 'enterprise-internal-proxy-servers':
d['enterprise_internal_proxy_servers'] = v
elif kl == 'enterprise-ip-ranges':
d['enterprise_ip_ranges'] = v
elif kl == 'enterprise-ip-ranges-are-authoritative':
d['enterprise_ip_ranges_are_authoritative'] = v[0]
elif kl == 'enterprise-network-domain-names':
d['enterprise_network_domain_names'] = v
elif kl == 'enterprise-protected-domain-names':
d['enterprise_protected_domain_names'] = v
elif kl == 'enterprise-proxied-domains':
d['enterprise_proxied_domains'] = v
elif kl == 'enterprise-proxy-servers':
d['enterprise_proxy_servers'] = v
elif kl == 'enterprise-proxy-servers-are-authoritative':
d['enterprise_proxy_servers_are_authoritative'] = v[0]
elif kl == 'exempt-apps':
d['exempt_apps'] = v
elif kl == 'icons-visible':
d['icons_visible'] = v[0]
elif kl == 'indexing-encrypted-stores-or-items-blocked':
d['indexing_encrypted_stores_or_items_blocked'] = v[0]
elif kl == 'is-assigned':
d['is_assigned'] = v[0]
elif kl == 'neutral-domain-resources':
d['neutral_domain_resources'] = v
elif kl == 'protected-apps':
d['protected_apps'] = v
elif kl == 'protection-under-lock-config-required':
d['protection_under_lock_config_required'] = v[0]
elif kl == 'revoke-on-unenroll-disabled':
d['revoke_on_unenroll_disabled'] = v[0]
elif kl == 'rights-management-services-template-id':
d['rights_management_services_template_id'] = v[0]
elif kl == 'smb-auto-encrypted-file-extensions':
d['smb_auto_encrypted_file_extensions'] = v
elif kl == 'assignments':
d['assignments'] = v
elif kl == 'exempt-app-locker-files':
d['exempt_app_locker_files'] = v
elif kl == 'protected-app-locker-files':
d['protected_app_locker_files'] = v
elif kl == 'created-date-time':
d['created_date_time'] = v[0]
elif kl == 'description':
d['description'] = v[0]
elif kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'last-modified-date-time':
d['last_modified_date_time'] = v[0]
elif kl == 'version':
d['version'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter mdm-windows-information-protection-policies. All'
' possible keys are: azure-rights-management-services-allowed, data-recovery-certificate,'
' enforcement-level, enterprise-domain, enterprise-internal-proxy-servers, enterprise-ip-ranges,'
' enterprise-ip-ranges-are-authoritative, enterprise-network-domain-names,'
' enterprise-protected-domain-names, enterprise-proxied-domains, enterprise-proxy-servers,'
' enterprise-proxy-servers-are-authoritative, exempt-apps, icons-visible,'
' indexing-encrypted-stores-or-items-blocked, is-assigned, neutral-domain-resources,'
' protected-apps, protection-under-lock-config-required, revoke-on-unenroll-disabled,'
' rights-management-services-template-id, smb-auto-encrypted-file-extensions, assignments,'
' exempt-app-locker-files, protected-app-locker-files, created-date-time, description,'
' display-name, last-modified-date-time, version, id'.format(k)
)
return d
class AddWindowsInformationProtectionPolicies(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddWindowsInformationProtectionPolicies, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'days-without-contact-before-unenroll':
d['days_without_contact_before_unenroll'] = v[0]
elif kl == 'mdm-enrollment-url':
d['mdm_enrollment_url'] = v[0]
elif kl == 'minutes-of-inactivity-before-device-lock':
d['minutes_of_inactivity_before_device_lock'] = v[0]
elif kl == 'number-of-past-pins-remembered':
d['number_of_past_pins_remembered'] = v[0]
elif kl == 'password-maximum-attempt-count':
d['password_maximum_attempt_count'] = v[0]
elif kl == 'pin-expiration-days':
d['pin_expiration_days'] = v[0]
elif kl == 'pin-lowercase-letters':
d['pin_lowercase_letters'] = v[0]
elif kl == 'pin-minimum-length':
d['pin_minimum_length'] = v[0]
elif kl == 'pin-special-characters':
d['pin_special_characters'] = v[0]
elif kl == 'pin-uppercase-letters':
d['pin_uppercase_letters'] = v[0]
elif kl == 'revoke-on-mdm-handoff-disabled':
d['revoke_on_mdm_handoff_disabled'] = v[0]
elif kl == 'windows-hello-for-business-blocked':
d['windows_hello_for_business_blocked'] = v[0]
elif kl == 'azure-rights-management-services-allowed':
d['azure_rights_management_services_allowed'] = v[0]
elif kl == 'data-recovery-certificate':
d['data_recovery_certificate'] = v[0]
elif kl == 'enforcement-level':
d['enforcement_level'] = v[0]
elif kl == 'enterprise-domain':
d['enterprise_domain'] = v[0]
elif kl == 'enterprise-internal-proxy-servers':
d['enterprise_internal_proxy_servers'] = v
elif kl == 'enterprise-ip-ranges':
d['enterprise_ip_ranges'] = v
elif kl == 'enterprise-ip-ranges-are-authoritative':
d['enterprise_ip_ranges_are_authoritative'] = v[0]
elif kl == 'enterprise-network-domain-names':
d['enterprise_network_domain_names'] = v
elif kl == 'enterprise-protected-domain-names':
d['enterprise_protected_domain_names'] = v
elif kl == 'enterprise-proxied-domains':
d['enterprise_proxied_domains'] = v
elif kl == 'enterprise-proxy-servers':
d['enterprise_proxy_servers'] = v
elif kl == 'enterprise-proxy-servers-are-authoritative':
d['enterprise_proxy_servers_are_authoritative'] = v[0]
elif kl == 'exempt-apps':
d['exempt_apps'] = v
elif kl == 'icons-visible':
d['icons_visible'] = v[0]
elif kl == 'indexing-encrypted-stores-or-items-blocked':
d['indexing_encrypted_stores_or_items_blocked'] = v[0]
elif kl == 'is-assigned':
d['is_assigned'] = v[0]
elif kl == 'neutral-domain-resources':
d['neutral_domain_resources'] = v
elif kl == 'protected-apps':
d['protected_apps'] = v
elif kl == 'protection-under-lock-config-required':
d['protection_under_lock_config_required'] = v[0]
elif kl == 'revoke-on-unenroll-disabled':
d['revoke_on_unenroll_disabled'] = v[0]
elif kl == 'rights-management-services-template-id':
d['rights_management_services_template_id'] = v[0]
elif kl == 'smb-auto-encrypted-file-extensions':
d['smb_auto_encrypted_file_extensions'] = v
elif kl == 'assignments':
d['assignments'] = v
elif kl == 'exempt-app-locker-files':
d['exempt_app_locker_files'] = v
elif kl == 'protected-app-locker-files':
d['protected_app_locker_files'] = v
elif kl == 'created-date-time':
d['created_date_time'] = v[0]
elif kl == 'description':
d['description'] = v[0]
elif kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'last-modified-date-time':
d['last_modified_date_time'] = v[0]
elif kl == 'version':
d['version'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter windows-information-protection-policies. All possible'
' keys are: days-without-contact-before-unenroll, mdm-enrollment-url,'
' minutes-of-inactivity-before-device-lock, number-of-past-pins-remembered,'
' password-maximum-attempt-count, pin-expiration-days, pin-lowercase-letters, pin-minimum-length,'
' pin-special-characters, pin-uppercase-letters, revoke-on-mdm-handoff-disabled,'
' windows-hello-for-business-blocked, azure-rights-management-services-allowed,'
' data-recovery-certificate, enforcement-level, enterprise-domain,'
' enterprise-internal-proxy-servers, enterprise-ip-ranges, enterprise-ip-ranges-are-authoritative,'
' enterprise-network-domain-names, enterprise-protected-domain-names, enterprise-proxied-domains,'
' enterprise-proxy-servers, enterprise-proxy-servers-are-authoritative, exempt-apps, icons-visible,'
' indexing-encrypted-stores-or-items-blocked, is-assigned, neutral-domain-resources,'
' protected-apps, protection-under-lock-config-required, revoke-on-unenroll-disabled,'
' rights-management-services-template-id, smb-auto-encrypted-file-extensions, assignments,'
' exempt-app-locker-files, protected-app-locker-files, created-date-time, description,'
' display-name, last-modified-date-time, version, id'.format(k)
)
return d
class AddAppliedPolicies(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddAppliedPolicies, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'created-date-time':
d['created_date_time'] = v[0]
elif kl == 'description':
d['description'] = v[0]
elif kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'last-modified-date-time':
d['last_modified_date_time'] = v[0]
elif kl == 'version':
d['version'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter applied-policies. All possible keys are:'
' created-date-time, description, display-name, last-modified-date-time, version, id'.format(k)
)
return d
class AddIntendedPolicies(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddIntendedPolicies, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'created-date-time':
d['created_date_time'] = v[0]
elif kl == 'description':
d['description'] = v[0]
elif kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'last-modified-date-time':
d['last_modified_date_time'] = v[0]
elif kl == 'version':
d['version'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter intended-policies. All possible keys are:'
' created-date-time, description, display-name, last-modified-date-time, version, id'.format(k)
)
return d
class AddOperations(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddOperations, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'last-modified-date-time':
d['last_modified_date_time'] = v[0]
elif kl == 'state':
d['state'] = v[0]
elif kl == 'version':
d['version'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter operations. All possible keys are: display-name,'
' last-modified-date-time, state, version, id'.format(k)
)
return d
class AddLargeCover(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.large_cover = action
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'type':
d['type'] = v[0]
elif kl == 'value':
d['value'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter large-cover. All possible keys are: type, value'
.format(k)
)
return d
class AddDeviceappmanagementDeviceStates(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDeviceappmanagementDeviceStates, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'device-id':
d['device_id'] = v[0]
elif kl == 'device-name':
d['device_name'] = v[0]
elif kl == 'error-code':
d['error_code'] = v[0]
elif kl == 'install-state':
d['install_state'] = v[0]
elif kl == 'last-sync-date-time':
d['last_sync_date_time'] = v[0]
elif kl == 'os-description':
d['os_description'] = v[0]
elif kl == 'os-version':
d['os_version'] = v[0]
elif kl == 'user-name':
d['user_name'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter device-states. All possible keys are: device-id,'
' device-name, error-code, install-state, last-sync-date-time, os-description, os-version,'
' user-name, id'.format(k)
)
return d
class AddInstallSummary(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.install_summary = action
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'failed-device-count':
d['failed_device_count'] = v[0]
elif kl == 'failed-user-count':
d['failed_user_count'] = v[0]
elif kl == 'installed-device-count':
d['installed_device_count'] = v[0]
elif kl == 'installed-user-count':
d['installed_user_count'] = v[0]
elif kl == 'not-installed-device-count':
d['not_installed_device_count'] = v[0]
elif kl == 'not-installed-user-count':
d['not_installed_user_count'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter install-summary. All possible keys are:'
' failed-device-count, failed-user-count, installed-device-count, installed-user-count,'
' not-installed-device-count, not-installed-user-count, id'.format(k)
)
return d
class AddDataRecoveryCertificate(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.data_recovery_certificate = action
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'certificate':
d['certificate'] = v[0]
elif kl == 'description':
d['description'] = v[0]
elif kl == 'expiration-date-time':
d['expiration_date_time'] = v[0]
elif kl == 'subject-name':
d['subject_name'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter data-recovery-certificate. All possible keys are:'
' certificate, description, expiration-date-time, subject-name'.format(k)
)
return d
class AddEnterpriseInternalProxyServers(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddEnterpriseInternalProxyServers, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'resources':
d['resources'] = v
else:
raise CLIError(
'Unsupported Key {} is provided for parameter enterprise-internal-proxy-servers. All possible keys'
' are: display-name, resources'.format(k)
)
return d
class AddEnterpriseNetworkDomainNames(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddEnterpriseNetworkDomainNames, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'resources':
d['resources'] = v
else:
raise CLIError(
'Unsupported Key {} is provided for parameter enterprise-network-domain-names. All possible keys'
' are: display-name, resources'.format(k)
)
return d
class AddEnterpriseProtectedDomainNames(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddEnterpriseProtectedDomainNames, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'resources':
d['resources'] = v
else:
raise CLIError(
'Unsupported Key {} is provided for parameter enterprise-protected-domain-names. All possible keys'
' are: display-name, resources'.format(k)
)
return d
class AddEnterpriseProxyServers(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddEnterpriseProxyServers, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'resources':
d['resources'] = v
else:
raise CLIError(
'Unsupported Key {} is provided for parameter enterprise-proxy-servers. All possible keys are:'
' display-name, resources'.format(k)
)
return d
class AddExemptApps(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddExemptApps, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'denied':
d['denied'] = v[0]
elif kl == 'description':
d['description'] = v[0]
elif kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'product-name':
d['product_name'] = v[0]
elif kl == 'publisher-name':
d['publisher_name'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter exempt-apps. All possible keys are: denied,'
' description, display-name, product-name, publisher-name'.format(k)
)
return d
class AddNeutralDomainResources(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddNeutralDomainResources, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'resources':
d['resources'] = v
else:
raise CLIError(
'Unsupported Key {} is provided for parameter neutral-domain-resources. All possible keys are:'
' display-name, resources'.format(k)
)
return d
class AddProtectedApps(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddProtectedApps, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'denied':
d['denied'] = v[0]
elif kl == 'description':
d['description'] = v[0]
elif kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'product-name':
d['product_name'] = v[0]
elif kl == 'publisher-name':
d['publisher_name'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter protected-apps. All possible keys are: denied,'
' description, display-name, product-name, publisher-name'.format(k)
)
return d
class AddSmbAutoEncryptedFileExtensions(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddSmbAutoEncryptedFileExtensions, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'resources':
d['resources'] = v
else:
raise CLIError(
'Unsupported Key {} is provided for parameter smb-auto-encrypted-file-extensions. All possible keys'
' are: display-name, resources'.format(k)
)
return d
class AddExemptAppLockerFiles(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddExemptAppLockerFiles, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'file':
d['file'] = v[0]
elif kl == 'file-hash':
d['file_hash'] = v[0]
elif kl == 'version':
d['version'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter exempt-app-locker-files. All possible keys are:'
' display-name, file, file-hash, version, id'.format(k)
)
return d
class AddProtectedAppLockerFiles(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddProtectedAppLockerFiles, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'file':
d['file'] = v[0]
elif kl == 'file-hash':
d['file_hash'] = v[0]
elif kl == 'version':
d['version'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter protected-app-locker-files. All possible keys are:'
' display-name, file, file-hash, version, id'.format(k)
)
return d
class AddDeviceStatuses(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDeviceStatuses, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'compliance-grace-period-expiration-date-time':
d['compliance_grace_period_expiration_date_time'] = v[0]
elif kl == 'device-display-name':
d['device_display_name'] = v[0]
elif kl == 'device-model':
d['device_model'] = v[0]
elif kl == 'last-reported-date-time':
d['last_reported_date_time'] = v[0]
elif kl == 'status':
d['status'] = v[0]
elif kl == 'user-name':
d['user_name'] = v[0]
elif kl == 'user-principal-name':
d['user_principal_name'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter device-statuses. All possible keys are:'
' compliance-grace-period-expiration-date-time, device-display-name, device-model,'
' last-reported-date-time, status, user-name, user-principal-name, id'.format(k)
)
return d
class AddDeviceStatusSummary(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.device_status_summary = action
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'configuration-version':
d['configuration_version'] = v[0]
elif kl == 'error-count':
d['error_count'] = v[0]
elif kl == 'failed-count':
d['failed_count'] = v[0]
elif kl == 'last-update-date-time':
d['last_update_date_time'] = v[0]
elif kl == 'not-applicable-count':
d['not_applicable_count'] = v[0]
elif kl == 'pending-count':
d['pending_count'] = v[0]
elif kl == 'success-count':
d['success_count'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter device-status-summary. All possible keys are:'
' configuration-version, error-count, failed-count, last-update-date-time, not-applicable-count,'
' pending-count, success-count, id'.format(k)
)
return d
class AddUserStatuses(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddUserStatuses, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'devices-count':
d['devices_count'] = v[0]
elif kl == 'last-reported-date-time':
d['last_reported_date_time'] = v[0]
elif kl == 'status':
d['status'] = v[0]
elif kl == 'user-display-name':
d['user_display_name'] = v[0]
elif kl == 'user-principal-name':
d['user_principal_name'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter user-statuses. All possible keys are: devices-count,'
' last-reported-date-time, status, user-display-name, user-principal-name, id'.format(k)
)
return d
class AddUserStatusSummary(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
namespace.user_status_summary = action
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'configuration-version':
d['configuration_version'] = v[0]
elif kl == 'error-count':
d['error_count'] = v[0]
elif kl == 'failed-count':
d['failed_count'] = v[0]
elif kl == 'last-update-date-time':
d['last_update_date_time'] = v[0]
elif kl == 'not-applicable-count':
d['not_applicable_count'] = v[0]
elif kl == 'pending-count':
d['pending_count'] = v[0]
elif kl == 'success-count':
d['success_count'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter user-status-summary. All possible keys are:'
' configuration-version, error-count, failed-count, last-update-date-time, not-applicable-count,'
' pending-count, success-count, id'.format(k)
)
return d
class AddCategories(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddCategories, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'display-name':
d['display_name'] = v[0]
elif kl == 'last-modified-date-time':
d['last_modified_date_time'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter categories. All possible keys are: display-name,'
' last-modified-date-time, id'.format(k)
)
return d
class AddCustomSettings(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddCustomSettings, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'name':
d['name'] = v[0]
elif kl == 'value':
d['value'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter custom-settings. All possible keys are: name, value'
.format(k)
)
return d
class AddDeviceappmanagementManagedebooksDeviceStates(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AddDeviceappmanagementManagedebooksDeviceStates, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string):
try:
properties = defaultdict(list)
for (k, v) in (x.split('=', 1) for x in values):
properties[k].append(v)
properties = dict(properties)
except ValueError:
raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
d = {}
for k in properties:
kl = k.lower()
v = properties[k]
if kl == 'device-id':
d['device_id'] = v[0]
elif kl == 'device-name':
d['device_name'] = v[0]
elif kl == 'error-code':
d['error_code'] = v[0]
elif kl == 'install-state':
d['install_state'] = v[0]
elif kl == 'last-sync-date-time':
d['last_sync_date_time'] = v[0]
elif kl == 'os-description':
d['os_description'] = v[0]
elif kl == 'os-version':
d['os_version'] = v[0]
elif kl == 'user-name':
d['user_name'] = v[0]
elif kl == 'id':
d['id'] = v[0]
else:
raise CLIError(
'Unsupported Key {} is provided for parameter device-states. All possible keys are: device-id,'
' device-name, error-code, install-state, last-sync-date-time, os-description, os-version,'
' user-name, id'.format(k)
)
return d
| 49,128 | 971 | 2,280 |
3ed27044adeb49e0f6f0eacd3dfe8a1f38c4d1a1 | 24,701 | py | Python | Reinforcement Learning/examples/envs/mobile_robot_vrep_env.py | HusseinLezzaik/Consensus-Algorithm-for-2-Mobile-Robots | 0109c78106dff7640a8fc5601e0333b5397f5b4e | [
"MIT"
] | 9 | 2021-04-20T08:12:47.000Z | 2022-02-18T02:25:29.000Z | Reinforcement Learning/examples/envs/mobile_robot_vrep_env.py | HusseinLezzaik/Deep-Learning-for-Multi-Robotics | ecdb28793cc1f5fa6cded752908105ec37e9bfc7 | [
"MIT"
] | null | null | null | Reinforcement Learning/examples/envs/mobile_robot_vrep_env.py | HusseinLezzaik/Deep-Learning-for-Multi-Robotics | ecdb28793cc1f5fa6cded752908105ec37e9bfc7 | [
"MIT"
] | 1 | 2021-11-16T08:16:36.000Z | 2021-11-16T08:16:36.000Z | """
Defining Class of custom environment for V-Rep
@author: hussein
"""
import vrep_env
from vrep_env import vrep
import os
vrep_scenes_path = os.environ['/home/hussein/Desktop/Multi-agent-path-planning/Reinforcement Learning/examples/scenes']
import rclpy
from rclpy.node import Node
from tf2_msgs.msg import TFMessage
from std_msgs.msg import Float32
import sim
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import time
L = 1 # Parameter of robot
d = 0.5 # Parameter of robot
A = np.ones(6) - np.identity(6) # Adjancency Matrix fully connected case 6x6
ux = np.zeros((6,1)) # 6x1
uy = np.zeros((6,1)) # 6x1
" Connecting to V-Rep "
sim.simxFinish(-1) # just in case, close all opened connections
clientID=sim.simxStart('127.0.0.1',19997,True,True,-500000,5) # Connect to CoppeliaSim
N_SCENES = 80
scenes = np.hstack(( np.random.uniform(-2,2,size=(N_SCENES,2)), np.random.uniform(0,np.pi,size=(N_SCENES,1)), np.random.uniform(-2,2,(N_SCENES,2)), np.random.uniform(0,np.pi,size=(N_SCENES,1)) ))
"""
Description:
Consensus environment of 6 robots, where each episode they converge towards each other. DQN applied to robot 1 and rest are controlled with the consensus algorithm.
Source:
This environment corresponds to V-Rep simulator, integrated with ROS to publish actions & subscribe to observations.
Observation:
Type: Box(4)
Num Observation Min Max
0 Mx -4.8 4.8
1 My -4.8 4.8
2 Phix -4.8 4.8
3 Phiy -4.8 4.8
Actions:
Type: Discrete(4)
Num Action
0 Move the robot upwards
1 Move the robot downwards
2 Move the robot to the left
3 Move the robot to the right
""" | 39.1458 | 271 | 0.52759 | """
Defining Class of custom environment for V-Rep
@author: hussein
"""
import vrep_env
from vrep_env import vrep
import os
vrep_scenes_path = os.environ['/home/hussein/Desktop/Multi-agent-path-planning/Reinforcement Learning/examples/scenes']
import rclpy
from rclpy.node import Node
from tf2_msgs.msg import TFMessage
from std_msgs.msg import Float32
import sim
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import time
L = 1 # Parameter of robot
d = 0.5 # Parameter of robot
A = np.ones(6) - np.identity(6) # Adjancency Matrix fully connected case 6x6
ux = np.zeros((6,1)) # 6x1
uy = np.zeros((6,1)) # 6x1
" Connecting to V-Rep "
sim.simxFinish(-1) # just in case, close all opened connections
clientID=sim.simxStart('127.0.0.1',19997,True,True,-500000,5) # Connect to CoppeliaSim
N_SCENES = 80
scenes = np.hstack(( np.random.uniform(-2,2,size=(N_SCENES,2)), np.random.uniform(0,np.pi,size=(N_SCENES,1)), np.random.uniform(-2,2,(N_SCENES,2)), np.random.uniform(0,np.pi,size=(N_SCENES,1)) ))
def euler_from_quaternion(x, y, z, w):
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return yaw_z # in radians
"""
Description:
Consensus environment of 6 robots, where each episode they converge towards each other. DQN applied to robot 1 and rest are controlled with the consensus algorithm.
Source:
This environment corresponds to V-Rep simulator, integrated with ROS to publish actions & subscribe to observations.
Observation:
Type: Box(4)
Num Observation Min Max
0 Mx -4.8 4.8
1 My -4.8 4.8
2 Phix -4.8 4.8
3 Phiy -4.8 4.8
Actions:
Type: Discrete(4)
Num Action
0 Move the robot upwards
1 Move the robot downwards
2 Move the robot to the left
3 Move the robot to the right
"""
class MobileRobotVrepEnv(vrep_env.VrepEnv):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self, server_addr='127.0.0.1', server_port=19997, scene_path=vrep_scenes_path+'/Scene_of_Six_Robots.ttt'):
vrep_env.VrepEnv.__init__(self, server_addr, server_port, scene_path)
super().__init__('minimal_publisher1')
self.publisher_l1 = self.create_publisher(Float32, '/leftMotorSpeedrobot1', 0) #Change according to topic in child script,String to Float32
self.publisher_r1 = self.create_publisher(Float32, '/rightMotorSpeedrobot1',0) #Change according to topic in child script,String to Float32
self.publisher_l2 = self.create_publisher(Float32, '/leftMotorSpeedrobot2', 0) #Change according to topic in child script,String to Float32
self.publisher_r2 = self.create_publisher(Float32, '/rightMotorSpeedrobot2',0) #Change according to topic in child script,String to Float32
self.publisher_l3 = self.create_publisher(Float32, '/leftMotorSpeedrobot3', 0) #Change according to topic in child script,String to Float32
self.publisher_r3 = self.create_publisher(Float32, '/rightMotorSpeedrobot3',0) #Change according to topic in child script,String to Float32
self.publisher_l4 = self.create_publisher(Float32, '/leftMotorSpeedrobot4', 0) #Change according to topic in child script,String to Float32
self.publisher_r4 = self.create_publisher(Float32, '/rightMotorSpeedrobot4',0) #Change according to topic in child script,String to Float32
self.publisher_l5 = self.create_publisher(Float32, '/leftMotorSpeedrobot5', 0) #Change according to topic in child script,String to Float32
self.publisher_r5 = self.create_publisher(Float32, '/rightMotorSpeedrobot5',0) #Change according to topic in child script,String to Float32
self.publisher_l6 = self.create_publisher(Float32, '/leftMotorSpeedrobot6', 0) #Change according to topic in child script,String to Float32
self.publisher_r6 = self.create_publisher(Float32, '/rightMotorSpeedrobot6',0) #Change according to topic in child script,String to Float32
self.subscription = self.create_subscription(
TFMessage,
'/tf',
self.listener_callback,
0)
" Timer Callback "
timer_period = 0.03 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
" Parameters "
self.t = 0 # Just to intialized Phix's and Phiy's
" Initialize Phi's "
self.Phix1 = 0 # 1x1
self.Phiy1 = 0 # 1x1
self.Phix2 = 0 # 1x1
self.Phiy2 = 0 # 1x1
self.Phix3 = 0 # 1x1
self.Phiy3 = 0 # 1x1
self.Phix4 = 0 # 1x1
self.Phiy4 = 0 # 1x1
self.Phix5 = 0 # 1x1
self.Phiy5 = 0 # 1x1
self.Phix6 = 0 # 1x1
self.Phiy6 = 0 # 1x1
" Mobile Robot 1 Parameters "
self.x1 = 0
self.y1 = 0
self.Theta1 = 0
self.v1 = 0
self.w1 = 0
self.vL1 = 0
self.vR1 = 0
" Mobile Robot 2 Parameters "
self.x2 = 0
self.y2 = 0
self.Theta2 = 0
self.v2 = 0
self.w2 = 0
self.vL2 = 0
self.vR2 = 0
" Mobile Robot 3 Parameters "
self.x3 = 0
self.y3 = 0
self.Theta3 = 0
self.v3 = 0
self.w3 = 0
self.vL3 = 0
self.vR3 = 0
" Mobile Robot 4 Parameters "
self.x4 = 0
self.y4 = 0
self.Theta4 = 0
self.v4 = 0
self.w4 = 0
self.vL4 = 0
self.vR4 = 0
" Mobile Robot 5 Parameters "
self.x5 = 0
self.y5 = 0
self.Theta5 = 0
self.v5 = 0
self.w5 = 0
self.vL5 = 0
self.vR5 = 0
" Mobile Robot 6 Parameters "
self.x6 = 0
self.y6 = 0
self.Theta6 = 0
self.v6 = 0
self.w6 = 0
self.vL6 = 0
self.vR6 = 0
" Distance at which to fail the episode "
self.distance_threshold = 2.2
" Observation & Action Space "
# Define Action Space
self.action_space = spaces.Discrete(4)
# Define Observation Space
high_observation = np.array([4.8,
4.8,
4.8,
4.8],
dtype=np.float32)
self.observation_space = spaces.Box(-high_observation, -high_observation, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
def listener_callback(self, msg):
if msg.transforms[0].child_frame_id == 'robot1' :
self.x1 = msg.transforms[0].transform.translation.x
self.y1 = msg.transforms[0].transform.translation.y
self.xr1 = msg.transforms[0].transform.rotation.x
self.yr1 = msg.transforms[0].transform.rotation.y
self.zr1 = msg.transforms[0].transform.rotation.z
self.wr1 = msg.transforms[0].transform.rotation.w
self.Theta1 = euler_from_quaternion(self.xr1,self.yr1,self.zr1,self.wr1)
self.state1 = (self.x1,self.y1,self.Theta1)
if msg.transforms[0].child_frame_id == 'robot2' :
self.x2 = msg.transforms[0].transform.translation.x
self.y2 = msg.transforms[0].transform.translation.y
self.xr2 = msg.transforms[0].transform.rotation.x
self.yr2 = msg.transforms[0].transform.rotation.y
self.zr2 = msg.transforms[0].transform.rotation.z
self.wr2 = msg.transforms[0].transform.rotation.w
self.Theta2 = euler_from_quaternion(self.xr2,self.yr2,self.zr2,self.wr2)
self.state2 = (self.x2,self.y2,self.Theta2)
if msg.transforms[0].child_frame_id == 'robot3' :
self.x3 = msg.transforms[0].transform.translation.x
self.y3 = msg.transforms[0].transform.translation.y
self.xr3 = msg.transforms[0].transform.rotation.x
self.yr3 = msg.transforms[0].transform.rotation.y
self.zr3 = msg.transforms[0].transform.rotation.z
self.wr3 = msg.transforms[0].transform.rotation.w
self.Theta3 = euler_from_quaternion(self.xr3,self.yr3,self.zr3,self.wr3)
self.state3 = (self.x3,self.y3,self.Theta3)
if msg.transforms[0].child_frame_id == 'robot4' :
self.x4 = msg.transforms[0].transform.translation.x
self.y4 = msg.transforms[0].transform.translation.y
self.xr4 = msg.transforms[0].transform.rotation.x
self.yr4 = msg.transforms[0].transform.rotation.y
self.zr4 = msg.transforms[0].transform.rotation.z
self.wr4 = msg.transforms[0].transform.rotation.w
self.Theta4 = euler_from_quaternion(self.xr4,self.yr4,self.zr4,self.wr4)
self.state4 = (self.x4,self.y4,self.Theta4)
if msg.transforms[0].child_frame_id == 'robot5' :
self.x5 = msg.transforms[0].transform.translation.x
self.y5 = msg.transforms[0].transform.translation.y
self.xr5 = msg.transforms[0].transform.rotation.x
self.yr5 = msg.transforms[0].transform.rotation.y
self.zr5 = msg.transforms[0].transform.rotation.z
self.wr5 = msg.transforms[0].transform.rotation.w
self.Theta5 = euler_from_quaternion(self.xr5,self.yr5,self.zr5,self.wr5)
self.state5 = (self.x5,self.y5,self.Theta5)
if msg.transforms[0].child_frame_id == 'robot6' :
self.x6 = msg.transforms[0].transform.translation.x
self.y6 = msg.transforms[0].transform.translation.y
self.xr6 = msg.transforms[0].transform.rotation.x
self.yr6 = msg.transforms[0].transform.rotation.y
self.zr6 = msg.transforms[0].transform.rotation.z
self.wr6 = msg.transforms[0].transform.rotation.w
self.Theta6 = euler_from_quaternion(self.xr6,self.yr6,self.zr6,self.wr6)
self.state6 = (self.x6,self.y6,self.Theta6)
def timer_callback(self):
A = np.ones(6) - np.identity(6) # Adjancency Matrix
self.X = np.array([ [self.x1], [self.x2], [self.x3], [self.x4], [self.x5], [self.x6] ]) #6x1
self.Y = np.array([ [self.y1], [self.y2], [self.y3], [self.y4], [self.y5], [self.y6] ]) #6x1
ux = np.zeros((6,1)) # 6x1
uy = np.zeros((6,1)) # 6x1
for i in range(1,7):
for j in range(1,7):
ux[i-1] += -(A[i-1][j-1])*(self.X[i-1]-self.X[j-1]) # 1x1 each
uy[i-1] += -(A[i-1][j-1])*(self.Y[i-1]-self.Y[j-1]) # 1x1 each
if self.action_input1[0]==0:
self.v1 = -1.0
else:
self.v1 = +1.0
if self.action_input1[1]==0:
self.w1 = -1.0
else:
self.w1 = +1.0
u2 = np.array([ [float(ux[1])], [float(uy[1])] ]) # 2x1
u3 = np.array([ [float(ux[2])], [float(uy[2])] ]) # 2x1
u4 = np.array([ [float(ux[3])], [float(uy[3])] ]) # 2x1
u5 = np.array([ [float(ux[4])], [float(uy[4])] ]) # 2x1
u6 = np.array([ [float(ux[5])], [float(uy[5])] ]) # 2x1
" Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 "
S1 = np.array([[self.v1], [self.w1]]) #2x1
# G1 = np.array([[1,0], [0,1/L]]) #2x2
# R1 = np.array([[math.cos(self.Theta1),math.sin(self.Theta1)],[-math.sin(self.Theta1),math.cos(self.Theta1)]]) #2x2
# S1 = np.dot(np.dot(G1, R1), u1) #2x1
S2 = np.array([[self.v2], [self.w2]]) #2x1
G2 = np.array([[1,0], [0,1/L]]) #2x2
R2 = np.array([[math.cos(self.Theta2),math.sin(self.Theta2)],[-math.sin(self.Theta2),math.cos(self.Theta2)]]) #2x2
S2 = np.dot(np.dot(G2, R2), u2) # 2x1
S3 = np.array([[self.v3], [self.w3]]) #2x1
G3 = np.array([[1,0], [0,1/L]]) #2x2
R3 = np.array([[math.cos(self.Theta3),math.sin(self.Theta3)],[-math.sin(self.Theta3),math.cos(self.Theta3)]]) #2x2
S3 = np.dot(np.dot(G3, R3), u3) #2x1
S4 = np.array([[self.v4], [self.w4]]) #2x1
G4 = np.array([[1,0], [0,1/L]]) #2x2
R4 = np.array([[math.cos(self.Theta4),math.sin(self.Theta4)],[-math.sin(self.Theta4),math.cos(self.Theta4)]]) #2x2
S4 = np.dot(np.dot(G4, R4), u4) #2x1
S5 = np.array([[self.v5], [self.w5]]) #2x1
G5 = np.array([[1,0], [0,1/L]]) #2x2
R5 = np.array([[math.cos(self.Theta5),math.sin(self.Theta5)],[-math.sin(self.Theta5),math.cos(self.Theta5)]]) #2x2
S5 = np.dot(np.dot(G5, R5), u5) #2x1
S6 = np.array([[self.v6], [self.w6]]) #2x1
G6 = np.array([[1,0], [0,1/L]]) #2x2
R6 = np.array([[math.cos(self.Theta6),math.sin(self.Theta6)],[-math.sin(self.Theta6),math.cos(self.Theta6)]]) #2x2
S6 = np.dot(np.dot(G6, R6), u6) #2x1
" Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 "
D = np.array([[1/2,1/2],[-1/(2*d),1/(2*d)]]) #2x2
Di = np.linalg.inv(D) #2x2
Speed_L1 = np.array([[self.vL1], [self.vR1]]) # Vector 2x1 for Speed of Robot 1
Speed_L2 = np.array([[self.vL2], [self.vR2]]) # Vector 2x1 for Speed of Robot 2
Speed_L3 = np.array([[self.vL3], [self.vR3]]) # Vector 2x1 for Speed of Robot 3
Speed_L4 = np.array([[self.vL4], [self.vR4]]) # Vector 2x1 for Speed of Robot 4
Speed_L5 = np.array([[self.vL5], [self.vR5]]) # Vector 2x1 for Speed of Robot 5
Speed_L6 = np.array([[self.vL6], [self.vR6]]) # Vector 2x1 for Speed of Robot 6
M1 = np.array([[S1[0]],[S1[1]]]).reshape(2,1) #2x1
M2 = np.array([[S2[0]],[S2[1]]]).reshape(2,1) #2x1
M3 = np.array([[S3[0]],[S3[1]]]).reshape(2,1) #2x1
M4 = np.array([[S4[0]],[S4[1]]]).reshape(2,1) #2x1
M5 = np.array([[S5[0]],[S5[1]]]).reshape(2,1) #2x1
M6 = np.array([[S6[0]],[S6[1]]]).reshape(2,1) #2x1
Speed_L1 = np.dot(Di, M1) # 2x1 (VL1, VR1)
Speed_L2 = np.dot(Di, M2) # 2x1 (VL2, VR2)
Speed_L3 = np.dot(Di, M3) # 2x1 (VL3, VR3)
Speed_L4 = np.dot(Di, M4) # 2x1 (VL4, VR4)
Speed_L5 = np.dot(Di, M5) # 2x1 (VL5, VR5)
Speed_L6 = np.dot(Di, M6) # 2x1 (VL6, VR6)
VL1 = float(Speed_L1[0])
VR1 = float(Speed_L1[1])
VL2 = float(Speed_L2[0])
VR2 = float(Speed_L2[1])
VL3 = float(Speed_L3[0])
VR3 = float(Speed_L3[1])
VL4 = float(Speed_L4[0])
VR4 = float(Speed_L4[1])
VL5 = float(Speed_L5[0])
VR5 = float(Speed_L5[1])
VL6 = float(Speed_L6[0])
VR6 = float(Speed_L6[1])
" Publish Speed Commands to Robot 1 "
msgl1 = Float32()
msgr1 = Float32()
msgl1.data = VL1
msgr1.data = VR1
self.publisher_l1.publish(msgl1)
self.publisher_r1.publish(msgr1)
" Publish Speed Commands to Robot 2 "
msgl2 = Float32()
msgr2 = Float32()
msgl2.data = VL2
msgr2.data = VR2
self.publisher_l2.publish(msgl2)
self.publisher_r2.publish(msgr2)
" Publish Speed Commands to Robot 3 "
msgl3 = Float32()
msgr3 = Float32()
msgl3.data = VL3
msgr3.data = VR3
self.publisher_l3.publish(msgl3)
self.publisher_r3.publish(msgr3)
" Publish Speed Commands to Robot 4 "
msgl4 = Float32()
msgr4 = Float32()
msgl4.data = VL4
msgr4.data = VR4
self.publisher_l4.publish(msgl4)
self.publisher_r4.publish(msgr4)
" Publish Speed Commands to Robot 5 "
msgl5 = Float32()
msgr5 = Float32()
msgl5.data = VL5
msgr5.data = VR5
self.publisher_l5.publish(msgl5)
self.publisher_r5.publish(msgr5)
" Publish Speed Commands to Robot 6 "
msgl6 = Float32()
msgr6 = Float32()
msgl6.data = VL6
msgr6.data = VR6
self.publisher_l6.publish(msgl6)
self.publisher_r6.publish(msgr6)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
" Distance Threshold "
self.distance = abs(self.x1 - self.x2) + abs(self.y1 - self.y2) + abs(self.x1 - self.x3) + abs(self.y1 - self.y3) + abs(self.x1 - self.x4) + abs(self.y1 - self.y4) + abs(self.x1 - self.x5) + abs(self.y1 - self.y5) + abs(self.x1 - self.x6) + abs(self.y1 - self.y6)
" Use Adjacency Matrix to find Mxy and Phi's "
A = np.ones(6) - np.identity(6) # Adjancency Matrix
self.X = np.array([ [self.x1], [self.x2], [self.x3], [self.x4], [self.x5], [self.x6] ]) #6x1
self.Y = np.array([ [self.y1], [self.y2], [self.y3], [self.y4], [self.y5], [self.y6] ]) #6x1
Mx = np.zeros((6,1)) # 6x1
My = np.zeros((6,1)) # 6x1
for i in range(1,7):
for j in range(1,7):
Mx[i-1] += (A[i-1][j-1])*(self.X[j-1] - self.X[i-1]) # 1x1 each
My[i-1] += (A[i-1][j-1])*(self.Y[j-1] - self.Y[i-1]) # 1x1 each
Mx1 = float(Mx[0]) / 5 # 1x1
My1 = float(My[0]) / 5 # 1x1
Mx2 = float(Mx[1]) / 5 # 1x1
My2 = float(My[1]) / 5 # 1x1
Mx3 = float(Mx[2]) / 5 # 1x1
My3 = float(My[2]) / 5 # 1x1
Mx4 = float(Mx[3]) / 5 # 1x1
My4 = float(My[3]) / 5 # 1x1
Mx5 = float(Mx[4]) / 5 # 1x1
My5 = float(My[4]) / 5 # 1x1
Mx6 = float(Mx[5]) / 5 # 1x1
My6 = float(My[5]) / 5 # 1x1
self.Phix1 = ( Mx2 + Mx3 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
self.Phiy1 = ( My2 + My3 + My4 + My5 + My6 ) / 5 # 1x1
self.Phix2 = ( Mx1 + Mx3 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
self.Phiy2 = ( My1 + My3 + My4 + My5 + My6 ) / 5 # 1x1
self.Phix3 = ( Mx1 + Mx2 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
self.Phiy3 = ( My1 + My2 + My4 + My5 + My6 ) / 5 # 1x1
self.Phix4 = ( Mx1 + Mx2 + Mx3 + Mx5 + Mx6 ) / 5 # 1x1
self.Phiy4 = ( My1 + My2 + My3 + My5 + My6 ) / 5 # 1x1
self.Phix5 = ( Mx1 + Mx2 + Mx3 + Mx4 + Mx6 ) / 5 # 1x1
self.Phiy5 = ( My1 + My2 + My3 + My4 + My6 ) / 5 # 1x1
self.Phix6 = ( Mx1 + Mx2 + Mx3 + Mx4 + Mx5 ) / 5 # 1x1
self.Phiy6 = ( My1 + My2 + My3 + My4 + My5 ) / 5 # 1x1
observation_DQN = np.array([Mx1, My1, self.Phix1, self.Phiy1])
self.action_input1 = action
done = self.distance < self.distance_threshold
done = bool(done)
reward = -self.distanced
return observation_DQN, reward, done, {}
def reset(self):
if self.sim_running:
self.stop_simulation()
# Stop Simulation
sim.simxStopSimulation(clientID, sim.simx_opmode_oneshot_wait)
# Retrieve some handles:
ErrLocM1,LocM1 =sim.simxGetObjectHandle(clientID, 'robot1', sim.simx_opmode_oneshot_wait)
if (not ErrLocM1==sim.simx_return_ok):
pass
ErrLocM2,LocM2 =sim.simxGetObjectHandle(clientID, 'robot2#0', sim.simx_opmode_oneshot_wait)
if (not ErrLocM2==sim.simx_return_ok):
pass
ErrLoc1,Loc1 =sim.simxGetObjectPosition(clientID, LocM1, -1, sim.simx_opmode_oneshot_wait)
if (not ErrLoc1==sim.simx_return_ok):
pass
ErrLoc2,Loc2 =sim.simxGetObjectPosition(clientID, LocM2, -1, sim.simx_opmode_oneshot_wait)
if (not ErrLoc2==sim.simx_return_ok):
pass
ErrLocO1,OriRobo1 =sim.simxGetObjectOrientation(clientID,LocM1, -1, sim.simx_opmode_oneshot_wait)
if (not ErrLocO1==sim.simx_return_ok):
pass
ErrLocO2,OriRobo2 =sim.simxGetObjectOrientation(clientID,LocM2, -1, sim.simx_opmode_oneshot_wait)
if (not ErrLocO2==sim.simx_return_ok):
pass
OriRobo1[2] = scenes[self.scene][2]
OriRobo2[2] = scenes[self.scene][5]
# Set Robot Orientation
sim.simxSetObjectOrientation(clientID, LocM1, -1, OriRobo1, sim.simx_opmode_oneshot_wait)
sim.simxSetObjectOrientation(clientID, LocM2, -1, OriRobo2, sim.simx_opmode_oneshot_wait)
Loc1[0] = scenes[self.scene][0]
Loc2[0] = scenes[self.scene][3]
Loc1[1] = scenes[self.scene][1]
Loc2[1] = scenes[self.scene][4]
# Set Robot Position
sim.simxSetObjectPosition(clientID, LocM1, -1, Loc1, sim.simx_opmode_oneshot)
sim.simxSetObjectPosition(clientID, LocM2, -1, Loc2, sim.simx_opmode_oneshot)
# Nb of Scene Counter
self.scene += 1
" Use Adjacency Matrix to find Mxy and Phi's "
A = np.ones(6) - np.identity(6) # Adjancency Matrix
self.X = np.array([ [self.x1], [self.x2], [self.x3], [self.x4], [self.x5], [self.x6] ]) #6x1
self.Y = np.array([ [self.y1], [self.y2], [self.y3], [self.y4], [self.y5], [self.y6] ]) #6x1
Mx = np.zeros((6,1)) # 6x1
My = np.zeros((6,1)) # 6x1
for i in range(1,7):
for j in range(1,7):
Mx[i-1] += (A[i-1][j-1])*(self.X[j-1] - self.X[i-1]) # 1x1 each
My[i-1] += (A[i-1][j-1])*(self.Y[j-1] - self.Y[i-1]) # 1x1 each
Mx1 = float(Mx[0]) / 5 # 1x1
My1 = float(My[0]) / 5 # 1x1
Mx2 = float(Mx[1]) / 5 # 1x1
My2 = float(My[1]) / 5 # 1x1
Mx3 = float(Mx[2]) / 5 # 1x1
My3 = float(My[2]) / 5 # 1x1
Mx4 = float(Mx[3]) / 5 # 1x1
My4 = float(My[3]) / 5 # 1x1
Mx5 = float(Mx[4]) / 5 # 1x1
My5 = float(My[4]) / 5 # 1x1
Mx6 = float(Mx[5]) / 5 # 1x1
My6 = float(My[5]) / 5 # 1x1
self.Phix1 = ( Mx2 + Mx3 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
self.Phiy1 = ( My2 + My3 + My4 + My5 + My6 ) / 5 # 1x1
self.Phix2 = ( Mx1 + Mx3 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
self.Phiy2 = ( My1 + My3 + My4 + My5 + My6 ) / 5 # 1x1
self.Phix3 = ( Mx1 + Mx2 + Mx4 + Mx5 + Mx6 ) / 5 # 1x1
self.Phiy3 = ( My1 + My2 + My4 + My5 + My6 ) / 5 # 1x1
self.Phix4 = ( Mx1 + Mx2 + Mx3 + Mx5 + Mx6 ) / 5 # 1x1
self.Phiy4 = ( My1 + My2 + My3 + My5 + My6 ) / 5 # 1x1
self.Phix5 = ( Mx1 + Mx2 + Mx3 + Mx4 + Mx6 ) / 5 # 1x1
self.Phiy5 = ( My1 + My2 + My3 + My4 + My6 ) / 5 # 1x1
self.Phix6 = ( Mx1 + Mx2 + Mx3 + Mx4 + Mx5 ) / 5 # 1x1
self.Phiy6 = ( My1 + My2 + My3 + My4 + My5 ) / 5 # 1x1
observation_DQN = np.array([Mx1, My1, self.Phix1, self.Phiy1])
# Start Simulation
sim.simxStartSimulation(clientID, sim.simx_opmode_oneshot_wait)
time.sleep(5)
return observation_DQN
def render(self):
pass
def close(self):
vrep_env.VrepEnv.close(self) | 22,314 | 396 | 46 |
f723f8c5703335b1a0fa5a181861df8ecc2a13f8 | 6,240 | py | Python | python/istio_api/mixer/v1/config/client/service_pb2.py | mt-inside/api | 3197d4dee332beb55f830899f37091c9899833f9 | [
"Apache-2.0"
] | 3 | 2020-11-30T15:35:37.000Z | 2022-01-06T14:17:18.000Z | python/istio_api/mixer/v1/config/client/service_pb2.py | mt-inside/api | 3197d4dee332beb55f830899f37091c9899833f9 | [
"Apache-2.0"
] | 54 | 2020-06-23T17:34:04.000Z | 2022-03-31T02:04:06.000Z | python/istio_api/mixer/v1/config/client/service_pb2.py | mt-inside/api | 3197d4dee332beb55f830899f37091c9899833f9 | [
"Apache-2.0"
] | 12 | 2020-07-14T23:59:57.000Z | 2022-03-22T09:59:18.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mixer/v1/config/client/service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mixer/v1/config/client/service.proto',
package='istio.mixer.v1.config.client',
syntax='proto3',
serialized_options=_b('Z#istio.io/api/mixer/v1/config/client\310\341\036\000\250\342\036\000\360\341\036\000\330\342\036\001'),
serialized_pb=_b('\n$mixer/v1/config/client/service.proto\x12\x1cistio.mixer.v1.config.client\x1a\x14gogoproto/gogo.proto\"\xc7\x01\n\x0cIstioService\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x0e\n\x06\x64omain\x18\x03 \x01(\t\x12\x0f\n\x07service\x18\x04 \x01(\t\x12\x46\n\x06labels\x18\x05 \x03(\x0b\x32\x36.istio.mixer.v1.config.client.IstioService.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x35Z#istio.io/api/mixer/v1/config/client\xc8\xe1\x1e\x00\xa8\xe2\x1e\x00\xf0\xe1\x1e\x00\xd8\xe2\x1e\x01\x62\x06proto3')
,
dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,])
_ISTIOSERVICE_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=247,
serialized_end=292,
)
_ISTIOSERVICE = _descriptor.Descriptor(
name='IstioService',
full_name='istio.mixer.v1.config.client.IstioService',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='istio.mixer.v1.config.client.IstioService.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='istio.mixer.v1.config.client.IstioService.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='domain', full_name='istio.mixer.v1.config.client.IstioService.domain', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service', full_name='istio.mixer.v1.config.client.IstioService.service', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='istio.mixer.v1.config.client.IstioService.labels', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ISTIOSERVICE_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=292,
)
_ISTIOSERVICE_LABELSENTRY.containing_type = _ISTIOSERVICE
_ISTIOSERVICE.fields_by_name['labels'].message_type = _ISTIOSERVICE_LABELSENTRY
DESCRIPTOR.message_types_by_name['IstioService'] = _ISTIOSERVICE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IstioService = _reflection.GeneratedProtocolMessageType('IstioService', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _ISTIOSERVICE_LABELSENTRY,
'__module__' : 'mixer.v1.config.client.service_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService.LabelsEntry)
})
,
'DESCRIPTOR' : _ISTIOSERVICE,
'__module__' : 'mixer.v1.config.client.service_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService)
})
_sym_db.RegisterMessage(IstioService)
_sym_db.RegisterMessage(IstioService.LabelsEntry)
DESCRIPTOR._options = None
_ISTIOSERVICE_LABELSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 41.6 | 624 | 0.747276 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mixer/v1/config/client/service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mixer/v1/config/client/service.proto',
package='istio.mixer.v1.config.client',
syntax='proto3',
serialized_options=_b('Z#istio.io/api/mixer/v1/config/client\310\341\036\000\250\342\036\000\360\341\036\000\330\342\036\001'),
serialized_pb=_b('\n$mixer/v1/config/client/service.proto\x12\x1cistio.mixer.v1.config.client\x1a\x14gogoproto/gogo.proto\"\xc7\x01\n\x0cIstioService\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x0e\n\x06\x64omain\x18\x03 \x01(\t\x12\x0f\n\x07service\x18\x04 \x01(\t\x12\x46\n\x06labels\x18\x05 \x03(\x0b\x32\x36.istio.mixer.v1.config.client.IstioService.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x35Z#istio.io/api/mixer/v1/config/client\xc8\xe1\x1e\x00\xa8\xe2\x1e\x00\xf0\xe1\x1e\x00\xd8\xe2\x1e\x01\x62\x06proto3')
,
dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,])
_ISTIOSERVICE_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=247,
serialized_end=292,
)
_ISTIOSERVICE = _descriptor.Descriptor(
name='IstioService',
full_name='istio.mixer.v1.config.client.IstioService',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='istio.mixer.v1.config.client.IstioService.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='istio.mixer.v1.config.client.IstioService.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='domain', full_name='istio.mixer.v1.config.client.IstioService.domain', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service', full_name='istio.mixer.v1.config.client.IstioService.service', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='istio.mixer.v1.config.client.IstioService.labels', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ISTIOSERVICE_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=292,
)
_ISTIOSERVICE_LABELSENTRY.containing_type = _ISTIOSERVICE
_ISTIOSERVICE.fields_by_name['labels'].message_type = _ISTIOSERVICE_LABELSENTRY
DESCRIPTOR.message_types_by_name['IstioService'] = _ISTIOSERVICE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IstioService = _reflection.GeneratedProtocolMessageType('IstioService', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _ISTIOSERVICE_LABELSENTRY,
'__module__' : 'mixer.v1.config.client.service_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService.LabelsEntry)
})
,
'DESCRIPTOR' : _ISTIOSERVICE,
'__module__' : 'mixer.v1.config.client.service_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService)
})
_sym_db.RegisterMessage(IstioService)
_sym_db.RegisterMessage(IstioService.LabelsEntry)
DESCRIPTOR._options = None
_ISTIOSERVICE_LABELSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
89707121790cc25aa1f25ca1fe56ce628f54fe0a | 548 | py | Python | src/consensus/weight_verifier.py | nondejus/chia-blockchain | 67373400e7f88adff0c86e3bae2ddeadb49429ae | [
"Apache-2.0"
] | null | null | null | src/consensus/weight_verifier.py | nondejus/chia-blockchain | 67373400e7f88adff0c86e3bae2ddeadb49429ae | [
"Apache-2.0"
] | null | null | null | src/consensus/weight_verifier.py | nondejus/chia-blockchain | 67373400e7f88adff0c86e3bae2ddeadb49429ae | [
"Apache-2.0"
] | null | null | null | from typing import List
from src.types.header_block import HeaderBlock
def verify_weight(
tip: HeaderBlock, proof_blocks: List[HeaderBlock], fork_point: HeaderBlock
) -> bool:
"""
Verifies whether the weight of the tip is valid or not. Naively, looks at every block
from genesis, verifying proof of space, proof of time, and difficulty resets.
# TODO: implement
"""
for height, block in enumerate(proof_blocks):
if not block.height == height + fork_point.height + 1:
return False
return True
| 28.842105 | 89 | 0.698905 | from typing import List
from src.types.header_block import HeaderBlock
def verify_weight(
tip: HeaderBlock, proof_blocks: List[HeaderBlock], fork_point: HeaderBlock
) -> bool:
"""
Verifies whether the weight of the tip is valid or not. Naively, looks at every block
from genesis, verifying proof of space, proof of time, and difficulty resets.
# TODO: implement
"""
for height, block in enumerate(proof_blocks):
if not block.height == height + fork_point.height + 1:
return False
return True
| 0 | 0 | 0 |
86730fd27a0440e8d0df28be564156a50855e775 | 9,755 | py | Python | honeybot/lib/utils.py | RogerDeng/HoneyBot | 3843ec6d684786091ced053857d1718ef1fa495c | [
"MIT"
] | 67 | 2019-08-16T05:03:19.000Z | 2021-11-25T01:48:23.000Z | honeybot/lib/utils.py | RogerDeng/HoneyBot | 3843ec6d684786091ced053857d1718ef1fa495c | [
"MIT"
] | null | null | null | honeybot/lib/utils.py | RogerDeng/HoneyBot | 3843ec6d684786091ced053857d1718ef1fa495c | [
"MIT"
] | 16 | 2020-02-20T12:38:40.000Z | 2022-03-22T17:45:25.000Z | """
__author__: Jamin Becker (jamin@packettotal.com)
"""
import os
import sys
import time
import socket
import logging
import pathlib
import warnings
from hashlib import md5
import psutil
import pyshark
import progressbar
from magic import from_buffer
from terminaltables import AsciiTable
from packettotal_sdk.packettotal_api import PacketTotalApi
from honeybot.lib import const
def capture_on_interface(interface, name, timeout=60):
"""
:param interface: The name of the interface on which to capture traffic
:param name: The name of the capture file
:param timeout: A limit in seconds specifying how long to capture traffic
"""
if timeout < 15:
logger.error("Timeout must be over 15 seconds.")
return
if not sys.warnoptions:
warnings.simplefilter("ignore")
start = time.time()
widgets = [
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ',
progressbar.FormatLabel('Packets Captured: %(value)d'),
' ',
progressbar.Timer(),
]
progress = progressbar.ProgressBar(widgets=widgets)
capture = pyshark.LiveCapture(interface=interface, output_file=os.path.join('tmp', name))
pcap_size = 0
for i, packet in enumerate(capture.sniff_continuously()):
progress.update(i)
if os.path.getsize(os.path.join('tmp', name)) != pcap_size:
pcap_size = os.path.getsize(os.path.join('tmp', name))
if not isinstance(packet, pyshark.packet.packet.Packet):
continue
if time.time() - start > timeout:
break
if pcap_size > const.PT_MAX_BYTES:
break
capture.clear()
capture.close()
return pcap_size
def get_filepath_md5_hash(file_path):
"""
:param file_path: path to the file being hashed
:return: the md5 hash of a file
"""
with open(file_path, 'rb') as afile:
return get_file_md5_hash(afile)
def get_mac_address_of_interface(interface):
"""
:param interface: The friendly name of a network interface
:return: the MAC address associated with that interface
"""
for k,v in psutil.net_if_addrs().items():
if interface == k:
for item in v:
try:
if item.family == socket.AF_LINK:
return item.address
except AttributeError:
# Linux
if item.family == socket.AF_PACKET:
return item.address
return None
def gen_unique_id(interface):
"""
Generates a unique ID based on your MAC address that will be used to tag all PCAPs uploaded to PacketTotal.com
This ID can be used to search and view PCAPs you have uploaded.
:param interface: The friendly name of a network interface
:return: A unique id
"""
mac_address = get_mac_address_of_interface(interface)
if mac_address:
return get_str_md5_hash(get_str_md5_hash(mac_address))[0:15]
return None
def get_file_md5_hash(fh):
"""
:param fh: file handle
:return: the md5 hash of the file
"""
block_size = 65536
md5_hasher = md5()
buf = fh.read(block_size)
while len(buf) > 0:
md5_hasher.update(buf)
buf = fh.read(block_size)
return md5_hasher.hexdigest()
def get_network_interfaces():
"""
:return: A list of valid interfaces and their addresses
"""
return psutil.net_if_addrs().items()
def is_packet_capture(bytes):
"""
:param bytes: raw bytes
:return: True is valid pcap or pcapng file
"""
result = from_buffer(bytes)
valid = "pcap-ng" in result or "tcpdump" in result or "NetMon" in result or 'pcap capture file' in result
return valid
def mkdir_p(path):
"""
:param path: Path to the new directory to create
"""
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
def listen_on_interface(interface, timeout=60):
"""
:param interface: The name of the interface on which to capture traffic
:return: generator containing live packets
"""
start = time.time()
capture = pyshark.LiveCapture(interface=interface)
for item in capture.sniff_continuously():
if timeout and time.time() - start > timeout:
break
yield item
def print_network_interaces():
"""
:return: Prints a human readable representation of the available network interfaces
"""
for intf, items in get_network_interfaces():
table = [["family", "address", "netmask", "broadcast", "ptp"]]
for item in items:
family, address, netmask, broadcast, ptp = item
table.append([str(family), str(address), str(netmask), str(broadcast), str(ptp)])
print(AsciiTable(table_data=table, title=intf).table)
print('\n')
def print_pt_ascii_logo():
"""
:return: Prints a PacketTotal.com (visit https://PacketTotal.com!)
"""
logo = """
,,*****, ,****,
****/**, / ,*//*,
****/**, ,,**********,. ******
.. .,*****, .. ******** ************,. .
. .,,***, ,******, .,***,,.. *****////***, ,***,
... ,******, ,******, .,******** *****////***,. ****,
.,,****, ,******, ,******, ,******** ************, .///
/////* // ////// /( ////// /( *///// ************
****,. ,,******. ,******. ,******, .******,,.,******,. *
*****, ********, ,******, ,******, .,********.,******** ,*******,
*****, ********, ,******, ,******, .,********.,******** ****//**,
*****, /*******, ,******* *//////* ********/ ,******** ****//**,
////* / .////// ((.(((((( ## (((((( (& /((((( % ////// ,******** .
,,,,,. ,,,,,,,,. ,,*****, ,******, ,*****,,,..,,,,,,,. .,,,,,,
*****, /*******, *//////* *//////* .*//////*/.*********,,*****,
*****, /*******, *//////* (. ( *////////.*********,,******
*****, /******** */////(* ..,,,,,,.. (/////// ********* *******
////* / ,/((((/ #@,####*..,**********,. /####. & /(((((. @ *////
,,,,,. ..,,,,*,, ,**, ,*****////*****,./***,. ,,,,,,,.. .,,,,,
,***, /*****//* */// .,***//(##((/****, /////,*//******,,***,
****, /*****//* *//* ,****/(%&&%(//**,, ////(,*//******,,****
****, /*****//* *//* ,***//(##(//***** *///( *//******.***,
*** ( **///// #@//((. ******////****** /(((* @ //////* **,
,,. ..,,,,,,, ,****,. ************ .,****,. ,,,,,,,.. ,,*
*, /******** *//////, ,****, ,////////.*********,,*
* /*******, *//////* ,******, .*////////.*********,,
/******** *//////* *//////* *//////*/ *********
*******,# ////////# ////////# //////* /******
,,,,,..((.,,,,,,.(#.,,,,,,.(#.,,,,,,..(..,,,,,
* *****, ,******, *//////* .,*******/.,*****
***, ,******, ,******, .,********.,*** /
* ,*, ,******, ,******, ,********.,* .
*******/ *******/ ,******* ,
,,,,,..// .,,,,..// .,,,,,
/ .****, ,******, .,****, /
/ *** ,******, ***
******** #
- honeybot: Capture and analyze network traffic; powered by PacketTotal.com
: VERSION: {}
""".format(const.VERSION)
print(logo)
# Setup Logging
mkdir_p('logs/')
mkdir_p('tmp/')
logger = logging.getLogger('honeybot.utils')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('logs/honeybot.log')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch) | 34.348592 | 115 | 0.476371 | """
__author__: Jamin Becker (jamin@packettotal.com)
"""
import os
import sys
import time
import socket
import logging
import pathlib
import warnings
from hashlib import md5
import psutil
import pyshark
import progressbar
from magic import from_buffer
from terminaltables import AsciiTable
from packettotal_sdk.packettotal_api import PacketTotalApi
from honeybot.lib import const
def capture_on_interface(interface, name, timeout=60):
"""
:param interface: The name of the interface on which to capture traffic
:param name: The name of the capture file
:param timeout: A limit in seconds specifying how long to capture traffic
"""
if timeout < 15:
logger.error("Timeout must be over 15 seconds.")
return
if not sys.warnoptions:
warnings.simplefilter("ignore")
start = time.time()
widgets = [
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ',
progressbar.FormatLabel('Packets Captured: %(value)d'),
' ',
progressbar.Timer(),
]
progress = progressbar.ProgressBar(widgets=widgets)
capture = pyshark.LiveCapture(interface=interface, output_file=os.path.join('tmp', name))
pcap_size = 0
for i, packet in enumerate(capture.sniff_continuously()):
progress.update(i)
if os.path.getsize(os.path.join('tmp', name)) != pcap_size:
pcap_size = os.path.getsize(os.path.join('tmp', name))
if not isinstance(packet, pyshark.packet.packet.Packet):
continue
if time.time() - start > timeout:
break
if pcap_size > const.PT_MAX_BYTES:
break
capture.clear()
capture.close()
return pcap_size
def check_auth():
home = str(pathlib.Path.home())
key = ''
auth_path = os.path.join(home, 'honeybot.auth')
if not os.path.exists(auth_path):
print('HoneyBot requires a PacketTotal API key.')
print('Signup at: \n\t: https://packettotal.com/api.html\n')
else:
key = open(auth_path, 'r').read()
while PacketTotalApi(key).usage().status_code == 403:
print('Invalid API Key. Try again.')
key = input('API Key: ')
open(auth_path, 'w').write(key)
return open(auth_path, 'r').read()
def get_filepath_md5_hash(file_path):
"""
:param file_path: path to the file being hashed
:return: the md5 hash of a file
"""
with open(file_path, 'rb') as afile:
return get_file_md5_hash(afile)
def get_str_md5_hash(s):
return md5(str(s).encode('utf-8')).hexdigest()
def get_mac_address_of_interface(interface):
"""
:param interface: The friendly name of a network interface
:return: the MAC address associated with that interface
"""
for k,v in psutil.net_if_addrs().items():
if interface == k:
for item in v:
try:
if item.family == socket.AF_LINK:
return item.address
except AttributeError:
# Linux
if item.family == socket.AF_PACKET:
return item.address
return None
def gen_unique_id(interface):
"""
Generates a unique ID based on your MAC address that will be used to tag all PCAPs uploaded to PacketTotal.com
This ID can be used to search and view PCAPs you have uploaded.
:param interface: The friendly name of a network interface
:return: A unique id
"""
mac_address = get_mac_address_of_interface(interface)
if mac_address:
return get_str_md5_hash(get_str_md5_hash(mac_address))[0:15]
return None
def get_file_md5_hash(fh):
"""
:param fh: file handle
:return: the md5 hash of the file
"""
block_size = 65536
md5_hasher = md5()
buf = fh.read(block_size)
while len(buf) > 0:
md5_hasher.update(buf)
buf = fh.read(block_size)
return md5_hasher.hexdigest()
def get_network_interfaces():
"""
:return: A list of valid interfaces and their addresses
"""
return psutil.net_if_addrs().items()
def is_packet_capture(bytes):
"""
:param bytes: raw bytes
:return: True is valid pcap or pcapng file
"""
result = from_buffer(bytes)
valid = "pcap-ng" in result or "tcpdump" in result or "NetMon" in result or 'pcap capture file' in result
return valid
def mkdir_p(path):
"""
:param path: Path to the new directory to create
"""
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
def listen_on_interface(interface, timeout=60):
"""
:param interface: The name of the interface on which to capture traffic
:return: generator containing live packets
"""
start = time.time()
capture = pyshark.LiveCapture(interface=interface)
for item in capture.sniff_continuously():
if timeout and time.time() - start > timeout:
break
yield item
def print_network_interaces():
"""
:return: Prints a human readable representation of the available network interfaces
"""
for intf, items in get_network_interfaces():
table = [["family", "address", "netmask", "broadcast", "ptp"]]
for item in items:
family, address, netmask, broadcast, ptp = item
table.append([str(family), str(address), str(netmask), str(broadcast), str(ptp)])
print(AsciiTable(table_data=table, title=intf).table)
print('\n')
def print_analysis_disclaimer():
print("""
WARNING: Analysis will result in the network traffic becoming public at https://packettotal.com.
ADVERTENCIA: El análisis hará que el tráfico de la red se haga público en https://packettotal.com.
WARNUNG: Die Analyse führt dazu, dass der Netzwerkverkehr unter https://packettotal.com öffentlich wird.
ПРЕДУПРЕЖДЕНИЕ. Анализ приведет к тому, что сетевой трафик станет общедоступным на https://packettotal.com.
चेतावनी: विश्लेषण का परिणाम नेटवर्क ट्रैफिक https://packettotal.com पर सार्वजनिक हो जाएगा
警告:分析将导致网络流量在https://packettotal.com上公开
警告:分析により、ネットワークトラフィックはhttps://packettotal.comで公開されます。
tahdhir: sayuadiy altahlil 'iilaa 'an tusbih harakat murur alshabakat eamat ealaa https://packettotal.com
""")
answer = input('Continue? [Y/n]: ')
if answer.lower() == 'n':
exit(0)
def print_pt_ascii_logo():
"""
:return: Prints a PacketTotal.com (visit https://PacketTotal.com!)
"""
logo = """
,,*****, ,****,
****/**, / ,*//*,
****/**, ,,**********,. ******
.. .,*****, .. ******** ************,. .
. .,,***, ,******, .,***,,.. *****////***, ,***,
... ,******, ,******, .,******** *****////***,. ****,
.,,****, ,******, ,******, ,******** ************, .///
/////* // ////// /( ////// /( *///// ************
****,. ,,******. ,******. ,******, .******,,.,******,. *
*****, ********, ,******, ,******, .,********.,******** ,*******,
*****, ********, ,******, ,******, .,********.,******** ****//**,
*****, /*******, ,******* *//////* ********/ ,******** ****//**,
////* / .////// ((.(((((( ## (((((( (& /((((( % ////// ,******** .
,,,,,. ,,,,,,,,. ,,*****, ,******, ,*****,,,..,,,,,,,. .,,,,,,
*****, /*******, *//////* *//////* .*//////*/.*********,,*****,
*****, /*******, *//////* (. ( *////////.*********,,******
*****, /******** */////(* ..,,,,,,.. (/////// ********* *******
////* / ,/((((/ #@,####*..,**********,. /####. & /(((((. @ *////
,,,,,. ..,,,,*,, ,**, ,*****////*****,./***,. ,,,,,,,.. .,,,,,
,***, /*****//* */// .,***//(##((/****, /////,*//******,,***,
****, /*****//* *//* ,****/(%&&%(//**,, ////(,*//******,,****
****, /*****//* *//* ,***//(##(//***** *///( *//******.***,
*** ( **///// #@//((. ******////****** /(((* @ //////* **,
,,. ..,,,,,,, ,****,. ************ .,****,. ,,,,,,,.. ,,*
*, /******** *//////, ,****, ,////////.*********,,*
* /*******, *//////* ,******, .*////////.*********,,
/******** *//////* *//////* *//////*/ *********
*******,# ////////# ////////# //////* /******
,,,,,..((.,,,,,,.(#.,,,,,,.(#.,,,,,,..(..,,,,,
* *****, ,******, *//////* .,*******/.,*****
***, ,******, ,******, .,********.,*** /
* ,*, ,******, ,******, ,********.,* .
*******/ *******/ ,******* ,
,,,,,..// .,,,,..// .,,,,,
/ .****, ,******, .,****, /
/ *** ,******, ***
******** #
- honeybot: Capture and analyze network traffic; powered by PacketTotal.com
: VERSION: {}
""".format(const.VERSION)
print(logo)
# Setup Logging
mkdir_p('logs/')
mkdir_p('tmp/')
logger = logging.getLogger('honeybot.utils')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('logs/honeybot.log')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch) | 1,810 | 0 | 69 |
8a5c676db35ceafbfa87c565e021037cd2bb5456 | 5,969 | py | Python | test/test_shape_map_format.py | DaniFdezAlvarez/dbpedia-shexer | e2c0685b167810e13411b592b6bdd4acfc11e8c1 | [
"Apache-2.0"
] | 16 | 2019-03-18T21:32:36.000Z | 2022-03-28T17:53:57.000Z | test/test_shape_map_format.py | DaniFdezAlvarez/shexer | 4f4dffc95bcad038cbc1bd85b58e5558c7c0a6d1 | [
"Apache-2.0"
] | 67 | 2019-02-27T12:58:55.000Z | 2022-03-28T20:38:48.000Z | test/test_shape_map_format.py | DaniFdezAlvarez/dbpedia-shexer | e2c0685b167810e13411b592b6bdd4acfc11e8c1 | [
"Apache-2.0"
] | 2 | 2019-03-09T00:30:28.000Z | 2020-01-09T18:04:14.000Z | import unittest
from shexer.shaper import Shaper
from test.const import G1, BASE_FILES, default_namespaces
from shexer.consts import JSON, FIXED_SHAPE_MAP
from test.t_utils import file_vs_str_tunned_comparison
import os.path as pth
from shexer.consts import TURTLE
_BASE_DIR = BASE_FILES + "shape_map" + pth.sep
| 50.584746 | 118 | 0.54716 | import unittest
from shexer.shaper import Shaper
from test.const import G1, BASE_FILES, default_namespaces
from shexer.consts import JSON, FIXED_SHAPE_MAP
from test.t_utils import file_vs_str_tunned_comparison
import os.path as pth
from shexer.consts import TURTLE
_BASE_DIR = BASE_FILES + "shape_map" + pth.sep
class TestShapeMapFormat(unittest.TestCase):
def test_some_fixed_shape_map(self):
shape_map = "<http://example.org/Jimmy>@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map,
shape_map_format=FIXED_SHAPE_MAP
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "a_node.shex",
str_target=str_result))
def test_json_node(self):
# shape_map = "<http://example.org/Jimmy>@<Person>"
shape_map = '[{"nodeSelector" : "<http://example.org/Jimmy>", "shapeLabel": "<Person>"}]'
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map,
shape_map_format=JSON
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "a_node.shex",
str_target=str_result))
def test_json_prefixed_node(self):
shape_map = '[{"nodeSelector" : "ex:Jimmy", "shapeLabel": "<Person>"}]'
# shape_map = "ex:Jimmy@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map,
shape_map_format=JSON
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "a_node.shex",
str_target=str_result))
def test_json_focus(self):
shape_map = '[{"nodeSelector" : "{FOCUS a foaf:Person}", "shapeLabel": "<Person>"}]'
# shape_map = "{FOCUS a foaf:Person}@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map,
shape_map_format=JSON
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_nodes.shex",
str_target=str_result))
def test_json_focus_wildcard(self):
shape_map = '[{"nodeSelector" : "{FOCUS foaf:name _}", "shapeLabel": "<WithName>"}]'
# shape_map = "{FOCUS foaf:name _}@<WithName>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map,
shape_map_format=JSON
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_and_wildcard.shex",
str_target=str_result))
def test_json_sparql_selector(self):
shape_map = '[{"nodeSelector" : "SPARQL \'select ?p where { ?p a foaf:Person }\'", "shapeLabel": "<Person>"}]'
# shape_map = "SPARQL \"select ?p where { ?p a foaf:Person }\"@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map,
shape_map_format=JSON
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_nodes.shex",
str_target=str_result))
def test_json_several_shapemap_items(self):
shape_map = '[{"nodeSelector" : "{FOCUS a foaf:Person}", "shapeLabel": "<Person>"},' \
'{"nodeSelector" : "{FOCUS a foaf:Document}", "shapeLabel": "<Document>"}]'
# shape_map = "{FOCUS a foaf:Person}@<Person>\n{FOCUS a foaf:Document}@<Document>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map,
shape_map_format=JSON
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "several_shm_items.shex",
str_target=str_result)) | 5,420 | 23 | 212 |
0fc5bc4b5bbfcf8687ba770ac9a21a8952ebdebb | 143 | py | Python | ecommerce_api/data/admin.py | khaledmontaser20/Ecommerce_Backend | fc245e2a381d0b7e7b47220fa23623c64abac997 | [
"MIT"
] | null | null | null | ecommerce_api/data/admin.py | khaledmontaser20/Ecommerce_Backend | fc245e2a381d0b7e7b47220fa23623c64abac997 | [
"MIT"
] | null | null | null | ecommerce_api/data/admin.py | khaledmontaser20/Ecommerce_Backend | fc245e2a381d0b7e7b47220fa23623c64abac997 | [
"MIT"
] | 1 | 2021-12-17T18:12:54.000Z | 2021-12-17T18:12:54.000Z | from django.contrib import admin
from .models import *
admin.site.register(Product)
admin.site.register(Category)
admin.site.register(Review)
| 20.428571 | 32 | 0.811189 | from django.contrib import admin
from .models import *
admin.site.register(Product)
admin.site.register(Category)
admin.site.register(Review)
| 0 | 0 | 0 |
7f4653337e7137db4574c3ae8da4f111e668e483 | 7,478 | py | Python | tests/distributions_test.py | danhey/exoplanet | bc82756dfa1b084e82cbcfa6185800833415e847 | [
"MIT"
] | 80 | 2018-10-28T09:55:34.000Z | 2020-01-19T20:54:08.000Z | tests/distributions_test.py | danhey/exoplanet | bc82756dfa1b084e82cbcfa6185800833415e847 | [
"MIT"
] | 56 | 2018-11-18T20:08:51.000Z | 2020-01-20T17:18:33.000Z | tests/distributions_test.py | danhey/exoplanet | bc82756dfa1b084e82cbcfa6185800833415e847 | [
"MIT"
] | 26 | 2018-11-30T17:07:33.000Z | 2020-01-16T04:07:52.000Z | # -*- coding: utf-8 -*-
import logging
from collections import namedtuple
import aesara_theano_fallback.tensor as tt
import numpy as np
import pymc3 as pm
import pytest
from pymc3.tests.test_distributions import R, Unit, Vector
from pymc3.tests.test_transforms import check_transform, get_values
from scipy.stats import beta, halfnorm, kstest, rayleigh
from exoplanet.distributions import transforms as tr
from exoplanet.distributions.eccentricity import kipping13, vaneylen19
from exoplanet.distributions.physical import ImpactParameter, QuadLimbDark
| 29.32549 | 76 | 0.537577 | # -*- coding: utf-8 -*-
import logging
from collections import namedtuple
import aesara_theano_fallback.tensor as tt
import numpy as np
import pymc3 as pm
import pytest
from pymc3.tests.test_distributions import R, Unit, Vector
from pymc3.tests.test_transforms import check_transform, get_values
from scipy.stats import beta, halfnorm, kstest, rayleigh
from exoplanet.distributions import transforms as tr
from exoplanet.distributions.eccentricity import kipping13, vaneylen19
from exoplanet.distributions.physical import ImpactParameter, QuadLimbDark
class _Base:
random_seed = 20160911
def _sample(self, **kwargs):
logger = logging.getLogger("pymc3")
logger.propagate = False
logger.setLevel(logging.ERROR)
kwargs["draws"] = kwargs.get("draws", 1000)
kwargs["progressbar"] = kwargs.get("progressbar", False)
return pm.sample(**kwargs)
def _model(self, **kwargs):
np.random.seed(self.random_seed)
return pm.Model(**kwargs)
class TestEccentricity(_Base):
random_seed = 19910626
def test_kipping13(self):
with self._model() as model:
dist = kipping13("ecc", shape=(5, 2))
assert "ecc_alpha" in model.named_vars
assert "ecc_beta" in model.named_vars
# Test random sampling
samples = dist.random(size=100)
assert np.shape(samples) == (100, 5, 2)
assert np.all((0 <= samples) & (samples <= 1))
trace = self._sample()
ecc = trace["ecc"]
assert np.all((0 <= ecc) & (ecc <= 1))
def test_kipping13_all(self):
with self._model():
kipping13("ecc", fixed=True, shape=2)
trace = self._sample()
ecc = trace["ecc"].flatten()
assert np.all((0 <= ecc) & (ecc <= 1))
cdf = lambda x: beta.cdf(x, 1.12, 3.09) # NOQA
s, p = kstest(ecc, cdf)
assert s < 0.05
def test_kipping13_long(self):
with self._model():
kipping13("ecc", fixed=True, long=True, shape=3)
trace = self._sample()
ecc = trace["ecc"].flatten()
assert np.all((0 <= ecc) & (ecc <= 1))
cdf = lambda x: beta.cdf(x, 1.12, 3.09) # NOQA
s, p = kstest(ecc, cdf)
assert s < 0.05
def test_kipping13_short(self):
with self._model():
kipping13("ecc", fixed=True, long=False, shape=4)
trace = self._sample()
ecc = trace["ecc"].flatten()
assert np.all((0 <= ecc) & (ecc <= 1))
cdf = lambda x: beta.cdf(x, 0.697, 3.27) # NOQA
s, p = kstest(ecc, cdf)
assert s < 0.05
@pytest.mark.parametrize(
"kwargs",
[dict(lower=0.1), dict(upper=0.5), dict(lower=0.3, upper=0.4)],
)
def test_kipping13_bounds(self, kwargs):
with self._model():
kipping13("ecc", **kwargs)
trace = self._sample()
ecc = trace["ecc"].flatten()
assert np.all(
(kwargs.get("lower", 0.0) <= ecc)
& (ecc <= kwargs.get("upper", 1.0))
)
@pytest.mark.parametrize("kwargs", [dict(), dict(multi=True)])
def test_vaneylen19(self, kwargs):
with self._model() as model:
dist = vaneylen19("ecc", shape=(5, 2), **kwargs)
if not kwargs.get("fixed", False):
assert "ecc_sigma_gauss" in model.named_vars
assert "ecc_sigma_rayleigh" in model.named_vars
assert "ecc_frac" in model.named_vars
# Test random sampling
samples = dist.random(size=100)
assert np.shape(samples) == (100, 5, 2)
assert np.all((0 <= samples) & (samples <= 1))
trace = self._sample()
ecc = trace["ecc"]
assert np.all((0 <= ecc) & (ecc <= 1))
def test_vaneylen19_single(self):
with self._model():
vaneylen19("ecc", fixed=True, multi=False, shape=2)
trace = self._sample()
ecc = trace["ecc"].flatten()
assert np.all((0 <= ecc) & (ecc <= 1))
f = 0.76
cdf = lambda x: ( # NOQA
(1 - f) * halfnorm.cdf(x, scale=0.049)
+ f * rayleigh.cdf(x, scale=0.26)
)
s, p = kstest(ecc, cdf)
assert s < 0.05
def test_vaneylen19_multi(self):
with self._model():
vaneylen19("ecc", fixed=True, multi=True, shape=3)
trace = self._sample()
ecc = trace["ecc"].flatten()
assert np.all((0 <= ecc) & (ecc <= 1))
f = 0.08
cdf = lambda x: ( # NOQA
(1 - f) * halfnorm.cdf(x, scale=0.049)
+ f * rayleigh.cdf(x, scale=0.26)
)
s, p = kstest(ecc, cdf)
assert s < 0.05
@pytest.mark.parametrize(
"kwargs",
[dict(lower=0.1), dict(upper=0.5), dict(lower=0.3, upper=0.4)],
)
def test_vaneylen19_bounds(self, kwargs):
with self._model():
vaneylen19("ecc", **kwargs)
trace = self._sample()
ecc = trace["ecc"].flatten()
assert np.all(
(kwargs.get("lower", 0.0) <= ecc)
& (ecc <= kwargs.get("upper", 1.0))
)
class TestPhysical(_Base):
random_seed = 19860925
def test_quad_limb_dark(self):
with self._model():
dist = QuadLimbDark("u", shape=2)
# Test random sampling
samples = dist.random(size=100)
assert np.shape(samples) == (100, 2)
logp = QuadLimbDark.dist(shape=2).logp(samples).eval().flatten()
assert np.all(np.isfinite(logp))
assert np.allclose(logp[0], logp)
trace = self._sample()
u1 = trace["u"][:, 0]
u2 = trace["u"][:, 1]
# Make sure that the physical constraints are satisfied
assert np.all(u1 + u2 < 1)
assert np.all(u1 > 0)
assert np.all(u1 + 2 * u2 > 0)
# Make sure that the qs are uniform
q1 = (u1 + u2) ** 2
q2 = 0.5 * u1 / (u1 + u2)
cdf = lambda x: np.clip(x, 0, 1) # NOQA
for q in (q1, q2):
s, p = kstest(q, cdf)
assert s < 0.05
def test_impact(self):
lower = 0.1
upper = 1.0
with self._model():
ror = pm.Uniform("ror", lower=lower, upper=upper, shape=(5, 2))
dist = ImpactParameter("b", ror=ror)
# Test random sampling
samples = dist.random(size=100)
assert np.shape(samples) == (100, 5, 2)
assert np.all((0 <= samples) & (samples <= 1 + upper))
trace = self._sample()
u = trace["ror"]
u = np.reshape(u, (len(u), -1))
cdf = lambda x: np.clip((x - lower) / (upper - lower), 0, 1) # NOQA
for i in range(u.shape[1]):
s, p = kstest(u[:, i], cdf)
assert s < 0.05
assert np.all(trace["b"] <= 1 + trace["ror"])
def test_quad_limb_dark_transform():
values = get_values(
tr.quad_limb_dark,
Vector(R, 2),
constructor=tt.vector,
test=np.array([0.0, 0.0]),
)
domain = namedtuple("Domain", ["vals"])(values)
check_transform(
tr.quad_limb_dark,
domain,
constructor=tt.vector,
test=np.array([0.0, 0.0]),
)
def test_impact_parameter_transform():
ror = np.float64(0.03)
check_transform(
tr.impact_parameter(ror),
Unit * (1 + ror),
test=0.5,
)
| 6,047 | 756 | 115 |
41bb8f8efbe2c21cd44cc434cadb92d66fd2ca08 | 2,554 | py | Python | bambu_webhooks/forms.py | iamsteadman/bambu-webhooks | 91c2517430cb1e82e60dc1533796b60c9774faa4 | [
"Apache-2.0"
] | 1 | 2022-03-31T23:12:18.000Z | 2022-03-31T23:12:18.000Z | bambu_webhooks/forms.py | amarksteadman/bambu-webhooks | 91c2517430cb1e82e60dc1533796b60c9774faa4 | [
"Apache-2.0"
] | null | null | null | bambu_webhooks/forms.py | amarksteadman/bambu-webhooks | 91c2517430cb1e82e60dc1533796b60c9774faa4 | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from bambu_webhooks.models import Receiver
from bambu_webhooks import site | 32.74359 | 84 | 0.459671 | from django import forms
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from bambu_webhooks.models import Receiver
from bambu_webhooks import site
class ReceiverForm(forms.Form):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
hooks = {}
for (hook, url) in self.user.webhooks.values_list('hook', 'url'):
hooks[hook] = hooks.get(hook, []) + [url]
kwargs['initial'] = dict(
[
(h, '\n'.join(u)) for (h, u) in hooks.items()
]
)
super(ReceiverForm, self).__init__(*args, **kwargs)
for name, hook in site._registry.items():
if hook.get('staff_only') and not self.user.is_staff:
continue
self.fields[name] = forms.CharField(
widget = forms.Textarea(
attrs = {
'rows': 3
}
),
label = hook.get('verbose_name', name).capitalize(),
help_text = hook.get('description'),
required = False
)
setattr(self, 'clean_%s' % name, self.clean_FIELD(name))
def clean_FIELD(self, name):
def inner():
data = self.cleaned_data.get(name, '')
validate = URLValidator()
for i, url in enumerate([u.strip() for u in data.splitlines()]):
try:
validate(url)
except ValidationError:
raise forms.ValidationError(
u'Line %d contains an invalid URL' % (i + 1)
)
return data
return inner
def save(self):
hooks = site._registry.keys()
for name in hooks:
urls = [u.strip() for u in self.cleaned_data.get(name, '').splitlines()]
for url in urls:
if not self.user.webhooks.filter(
hook = name,
url = url
).exists():
self.user.webhooks.create(
hook = name,
url = url
)
self.user.webhooks.filter(
hook = name
).exclude(
url__in = urls
).delete()
self.user.webhooks.exclude(hook__in = hooks).delete() | 2,235 | 10 | 111 |
2dd045f1f78168cc8d91f81b6b9efa0da474a6d2 | 2,886 | py | Python | roles/openshift_health_checker/openshift_checks/ovs_version.py | baileyvw/openshift-ansible | b1184a1544a1cd13fc2623bfa468759cb9bff4ba | [
"Apache-2.0"
] | null | null | null | roles/openshift_health_checker/openshift_checks/ovs_version.py | baileyvw/openshift-ansible | b1184a1544a1cd13fc2623bfa468759cb9bff4ba | [
"Apache-2.0"
] | null | null | null | roles/openshift_health_checker/openshift_checks/ovs_version.py | baileyvw/openshift-ansible | b1184a1544a1cd13fc2623bfa468759cb9bff4ba | [
"Apache-2.0"
] | null | null | null | """
Ansible module for determining if an installed version of Open vSwitch is incompatible with the
currently installed version of OpenShift.
"""
from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
from openshift_checks.mixins import NotContainerizedMixin
class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
"""Check that packages in a package_list are installed on the host
and are the correct version as determined by an OpenShift installation.
"""
name = "ovs_version"
tags = ["health"]
openshift_to_ovs_version = {
"3.6": "2.6",
"3.5": "2.6",
"3.4": "2.4",
}
# map major release versions across releases
# to a common major version
openshift_major_release_version = {
"1": "3",
}
@classmethod
def is_active(cls, task_vars):
"""Skip hosts that do not have package requirements."""
group_names = get_var(task_vars, "group_names", default=[])
master_or_node = 'masters' in group_names or 'nodes' in group_names
return super(OvsVersion, cls).is_active(task_vars) and master_or_node
def get_required_ovs_version(self, task_vars):
"""Return the correct Open vSwitch version for the current OpenShift version"""
openshift_version = self._get_openshift_version(task_vars)
if float(openshift_version) < 3.5:
return self.openshift_to_ovs_version["3.4"]
ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
if ovs_version:
return self.openshift_to_ovs_version[str(openshift_version)]
msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
raise OpenShiftCheckException(msg.format(openshift_version))
| 36.531646 | 104 | 0.66736 | """
Ansible module for determining if an installed version of Open vSwitch is incompatible with the
currently installed version of OpenShift.
"""
from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
from openshift_checks.mixins import NotContainerizedMixin
class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
"""Check that packages in a package_list are installed on the host
and are the correct version as determined by an OpenShift installation.
"""
name = "ovs_version"
tags = ["health"]
openshift_to_ovs_version = {
"3.6": "2.6",
"3.5": "2.6",
"3.4": "2.4",
}
# map major release versions across releases
# to a common major version
openshift_major_release_version = {
"1": "3",
}
@classmethod
def is_active(cls, task_vars):
"""Skip hosts that do not have package requirements."""
group_names = get_var(task_vars, "group_names", default=[])
master_or_node = 'masters' in group_names or 'nodes' in group_names
return super(OvsVersion, cls).is_active(task_vars) and master_or_node
def run(self, tmp, task_vars):
args = {
"package_list": [
{
"name": "openvswitch",
"version": self.get_required_ovs_version(task_vars),
},
],
}
return self.execute_module("rpm_version", args, task_vars=task_vars)
def get_required_ovs_version(self, task_vars):
"""Return the correct Open vSwitch version for the current OpenShift version"""
openshift_version = self._get_openshift_version(task_vars)
if float(openshift_version) < 3.5:
return self.openshift_to_ovs_version["3.4"]
ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
if ovs_version:
return self.openshift_to_ovs_version[str(openshift_version)]
msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
raise OpenShiftCheckException(msg.format(openshift_version))
def _get_openshift_version(self, task_vars):
openshift_version = get_var(task_vars, "openshift_image_tag")
if openshift_version and openshift_version[0] == 'v':
openshift_version = openshift_version[1:]
return self._parse_version(openshift_version)
def _parse_version(self, version):
components = version.split(".")
if not components or len(components) < 2:
msg = "An invalid version of OpenShift was found for this host: {}"
raise OpenShiftCheckException(msg.format(version))
if components[0] in self.openshift_major_release_version:
components[0] = self.openshift_major_release_version[components[0]]
return '.'.join(components[:2])
| 1,009 | 0 | 81 |
ca66dfc36676c0e005f6d50e7becb184769fea76 | 90 | py | Python | flask_filealchemy/__init__.py | calpt/flask-filealchemy | b3575299f0230d5a64865af8066122c2e0c485ec | [
"MIT"
] | null | null | null | flask_filealchemy/__init__.py | calpt/flask-filealchemy | b3575299f0230d5a64865af8066122c2e0c485ec | [
"MIT"
] | null | null | null | flask_filealchemy/__init__.py | calpt/flask-filealchemy | b3575299f0230d5a64865af8066122c2e0c485ec | [
"MIT"
] | null | null | null | from .filealchemy import FileAlchemy, LoadError # NOQA
from .common import ColumnMapping
| 30 | 55 | 0.822222 | from .filealchemy import FileAlchemy, LoadError # NOQA
from .common import ColumnMapping
| 0 | 0 | 0 |
0ddc35f02a21b4ce96b0c521c0282ae967dda2d7 | 1,293 | py | Python | punting/twodim/twonormalization.py | microprediction/punting | 5de62913f8eb9777df1d465db0f2d606a3d19c42 | [
"MIT"
] | null | null | null | punting/twodim/twonormalization.py | microprediction/punting | 5de62913f8eb9777df1d465db0f2d606a3d19c42 | [
"MIT"
] | null | null | null | punting/twodim/twonormalization.py | microprediction/punting | 5de62913f8eb9777df1d465db0f2d606a3d19c42 | [
"MIT"
] | null | null | null | from punting.twodim.twodimensions import to_flat_exacta,from_flat_exacta,to_flat_quinella, from_flat_quinella
from punting.onedim.onenormalization import to_normalized_dividends, to_normalized_probabilities
def to_normalized_quinella_probabilities(q,scr=-1):
"""
:param q: 2-d representation
:param scr:
:return: 2-d representation
"""
fq = to_normalized_probabilities( to_flat_quinella(q), scr=scr )
return from_flat_quinella(fq, diag_val=0)
def to_normalized_quinella_dividends(q,scr=-1):
""" Convert 2-d representation of probabilities to dividends
:param q:
:param scr:
:return:
"""
fd = to_normalized_dividends( to_flat_quinella(q), scr=scr )
return from_flat_quinella(fd, diag_val=-1)
def to_normalized_exacta_probabilities(x,scr=-1):
"""
:param x: 2-d representation
:param scr:
:return: 2-d representation
"""
fx = to_normalized_probabilities( to_flat_exacta(x), scr=scr )
return from_flat_exacta(fx, diag_value=0)
def to_normalized_exacta_dividends(x,scr=-1):
""" Convert 2-d representation of probabilities to dividends
:param x:
:param scr:
:return:
"""
fx = to_normalized_dividends( to_flat_exacta(x), scr=scr )
return from_flat_exacta(fx, diag_value=scr)
| 26.9375 | 109 | 0.721578 | from punting.twodim.twodimensions import to_flat_exacta,from_flat_exacta,to_flat_quinella, from_flat_quinella
from punting.onedim.onenormalization import to_normalized_dividends, to_normalized_probabilities
def to_normalized_quinella_probabilities(q,scr=-1):
"""
:param q: 2-d representation
:param scr:
:return: 2-d representation
"""
fq = to_normalized_probabilities( to_flat_quinella(q), scr=scr )
return from_flat_quinella(fq, diag_val=0)
def to_normalized_quinella_dividends(q,scr=-1):
""" Convert 2-d representation of probabilities to dividends
:param q:
:param scr:
:return:
"""
fd = to_normalized_dividends( to_flat_quinella(q), scr=scr )
return from_flat_quinella(fd, diag_val=-1)
def to_normalized_exacta_probabilities(x,scr=-1):
"""
:param x: 2-d representation
:param scr:
:return: 2-d representation
"""
fx = to_normalized_probabilities( to_flat_exacta(x), scr=scr )
return from_flat_exacta(fx, diag_value=0)
def to_normalized_exacta_dividends(x,scr=-1):
""" Convert 2-d representation of probabilities to dividends
:param x:
:param scr:
:return:
"""
fx = to_normalized_dividends( to_flat_exacta(x), scr=scr )
return from_flat_exacta(fx, diag_value=scr)
| 0 | 0 | 0 |
f4e18f6021386ba5dffdfdf479fa308f4411e811 | 1,939 | py | Python | AD2/FunctionGenerator.py | holla2040/ad2 | 38abe08ba0d2e65fad3a7a7914d1bf567045d614 | [
"MIT"
] | 5 | 2018-06-25T05:12:48.000Z | 2021-11-22T23:14:26.000Z | AD2/FunctionGenerator.py | holla2040/ad2 | 38abe08ba0d2e65fad3a7a7914d1bf567045d614 | [
"MIT"
] | null | null | null | AD2/FunctionGenerator.py | holla2040/ad2 | 38abe08ba0d2e65fad3a7a7914d1bf567045d614 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from ctypes import *
from dwfconstants import *
import time,sys
| 34.625 | 110 | 0.698814 | #!/usr/bin/env python
from ctypes import *
from dwfconstants import *
import time,sys
class FunctionGenerator(object):
def __init__(self,dwf,hdwf,channel):
self.dwf = dwf
self.hdwf = hdwf
self.channel = c_int(channel)
self.dwf.FDwfAnalogOutNodeEnableSet(self.hdwf, self.channel, AnalogOutNodeCarrier, c_bool(True))
#self.dwf.FDwfAnalogOutNodeFunctionSet(self.hdwf, self.channel, AnalogOutNodeCarrier, funcSine)
self.frequency = 0.00
self.amplitude = 0.00
self.offset = 0.00
self.function = funcSine
@property
def frequency(self):
v = c_double()
self.dwf.FDwfAnalogOutNodeFrequencyGet(self.hdwf, self.channel, AnalogOutNodeCarrier, byref(v))
return v.value
@frequency.setter
def frequency(self,value):
self.dwf.FDwfAnalogOutNodeFrequencySet(self.hdwf, self.channel, AnalogOutNodeCarrier, c_double(value))
@property
def amplitude(self):
v = c_double()
self.dwf.FDwfAnalogOutNodeAmplitudeGet(self.hdwf,self.channel,AnalogOutNodeCarrier,byref(v))
return v.value
def amplitude(self,value):
self.dwf.FDwfAnalogOutNodeAmplitudeSet(self.hdwf,self.channel,AnalogOutNodeCarrier,c_double(value))
@property
def offset(self):
v = c_double()
self.dwf.FDwfAnalogOutNodeOffsetGet(self.hdwf,self.channel,AnalogOutNodeCarrier,byref(v))
return v.value
@offset.setter
def offset(self,value):
self.dwf.FDwfAnalogOutNodeOffsetSet(self.hdwf,self.channel,AnalogOutNodeCarrier,c_double(value))
@property #see FUNC
def function(self):
v = c_ubyte()
self.dwf.FDwfAnalogOutNodeFunctionGet(self.hdwf, self.channel, AnalogOutNodeCarrier, byref(v))
return v
@function.setter
def function(self,value):
self.dwf.FDwfAnalogOutNodeFunctionSet(self.hdwf, self.channel, AnalogOutNodeCarrier, value)
| 1,450 | 377 | 23 |
3da7d3d604f3f675f9665b51dc88bf37cd90e5bc | 67 | py | Python | backend/consts.py | roechsli/planhub | aaf6cb6fded6d1c124ec32c03a9dc23b80c7b9aa | [
"MIT"
] | 1 | 2021-09-25T09:38:03.000Z | 2021-09-25T09:38:03.000Z | backend/consts.py | roechsli/planhub | aaf6cb6fded6d1c124ec32c03a9dc23b80c7b9aa | [
"MIT"
] | null | null | null | backend/consts.py | roechsli/planhub | aaf6cb6fded6d1c124ec32c03a9dc23b80c7b9aa | [
"MIT"
] | null | null | null | MINUTE_TIME_UNIT_MULTIPLIER = {
"hours": 60,
"minutes": 1
} | 16.75 | 31 | 0.626866 | MINUTE_TIME_UNIT_MULTIPLIER = {
"hours": 60,
"minutes": 1
} | 0 | 0 | 0 |
a621f79cb80d81894fd07f221871293abdc639f4 | 8,386 | py | Python | models/baseline.py | DoubtedSteam/MPANet | fe4f3f1d83c45485b1498786f89ace96c634f187 | [
"MIT"
] | 25 | 2021-06-25T03:37:21.000Z | 2022-03-11T02:21:06.000Z | models/baseline.py | DoubtedSteam/MPANet | fe4f3f1d83c45485b1498786f89ace96c634f187 | [
"MIT"
] | 6 | 2021-06-25T06:46:03.000Z | 2022-03-25T06:47:12.000Z | models/baseline.py | DoubtedSteam/MPANet | fe4f3f1d83c45485b1498786f89ace96c634f187 | [
"MIT"
] | 2 | 2021-12-14T07:40:59.000Z | 2021-12-20T02:40:58.000Z | import math
import torch
import torch.nn as nn
from torch.nn import init
from torch.nn import functional as F
from torch.nn import Parameter
import numpy as np
import cv2
from models.resnet import resnet50
from utils.calc_acc import calc_acc
from layers import TripletLoss
from layers import CenterTripletLoss
from layers import CenterLoss
from layers import cbam
from layers import NonLocalBlockND
| 41.721393 | 145 | 0.61543 | import math
import torch
import torch.nn as nn
from torch.nn import init
from torch.nn import functional as F
from torch.nn import Parameter
import numpy as np
import cv2
from models.resnet import resnet50
from utils.calc_acc import calc_acc
from layers import TripletLoss
from layers import CenterTripletLoss
from layers import CenterLoss
from layers import cbam
from layers import NonLocalBlockND
class Baseline(nn.Module):
def __init__(self, num_classes=None, drop_last_stride=False, pattern_attention=False, modality_attention=0, mutual_learning=False, **kwargs):
super(Baseline, self).__init__()
self.drop_last_stride = drop_last_stride
self.pattern_attention = pattern_attention
self.modality_attention = modality_attention
self.mutual_learning = mutual_learning
self.backbone = resnet50(pretrained=True, drop_last_stride=drop_last_stride, modality_attention=modality_attention)
self.base_dim = 2048
self.dim = 0
self.part_num = kwargs.get('num_parts', 0)
if pattern_attention:
self.base_dim = 2048
self.dim = 2048
self.part_num = kwargs.get('num_parts', 6)
self.spatial_attention = nn.Conv2d(self.base_dim, self.part_num, kernel_size=1, stride=1, padding=0, bias=True)
torch.nn.init.constant_(self.spatial_attention.bias, 0.0)
self.activation = nn.Sigmoid()
self.weight_sep = kwargs.get('weight_sep', 0.1)
if mutual_learning:
self.visible_classifier = nn.Linear(self.base_dim + self.dim * self.part_num, num_classes, bias=False)
self.infrared_classifier = nn.Linear(self.base_dim + self.dim * self.part_num, num_classes, bias=False)
self.visible_classifier_ = nn.Linear(self.base_dim + self.dim * self.part_num, num_classes, bias=False)
self.visible_classifier_.weight.requires_grad_(False)
self.visible_classifier_.weight.data = self.visible_classifier.weight.data
self.infrared_classifier_ = nn.Linear(self.base_dim + self.dim * self.part_num, num_classes, bias=False)
self.infrared_classifier_.weight.requires_grad_(False)
self.infrared_classifier_.weight.data = self.infrared_classifier.weight.data
self.KLDivLoss = nn.KLDivLoss(reduction='batchmean')
self.weight_sid = kwargs.get('weight_sid', 0.5)
self.weight_KL = kwargs.get('weight_KL', 2.0)
self.update_rate = kwargs.get('update_rate', 0.2)
self.update_rate_ = self.update_rate
print("output feat length:{}".format(self.base_dim + self.dim * self.part_num))
self.bn_neck = nn.BatchNorm1d(self.base_dim + self.dim * self.part_num)
nn.init.constant_(self.bn_neck.bias, 0)
self.bn_neck.bias.requires_grad_(False)
if kwargs.get('eval', False):
return
self.classification = kwargs.get('classification', False)
self.triplet = kwargs.get('triplet', False)
self.center_cluster = kwargs.get('center_cluster', False)
self.center_loss = kwargs.get('center', False)
self.margin = kwargs.get('margin', 0.3)
if self.classification:
self.classifier = nn.Linear(self.base_dim + self.dim * self.part_num , num_classes, bias=False)
if self.mutual_learning or self.classification:
self.id_loss = nn.CrossEntropyLoss(ignore_index=-1)
if self.triplet:
self.triplet_loss = TripletLoss(margin=self.margin)
if self.center_cluster:
k_size = kwargs.get('k_size', 8)
self.center_cluster_loss = CenterTripletLoss(k_size=k_size, margin=self.margin)
if self.center_loss:
self.center_loss = CenterLoss(num_classes, self.base_dim + self.dim * self.part_num)
def forward(self, inputs, labels=None, **kwargs):
loss_reg = 0
loss_center = 0
modality_logits = None
modality_feat = None
cam_ids = kwargs.get('cam_ids')
sub = (cam_ids == 3) + (cam_ids == 6)
# CNN
global_feat = self.backbone(inputs)
b, c, w, h = global_feat.shape
if self.pattern_attention:
masks = global_feat
masks = self.spatial_attention(masks)
masks = self.activation(masks)
feats = []
for i in range(self.part_num):
mask = masks[:, i:i+1, :, :]
feat = mask * global_feat
feat = F.avg_pool2d(feat, feat.size()[2:])
feat = feat.view(feat.size(0), -1)
feats.append(feat)
global_feat = F.avg_pool2d(global_feat, global_feat.size()[2:])
global_feat = global_feat.view(global_feat.size(0), -1)
feats.append(global_feat)
feats = torch.cat(feats, 1)
if self.training:
masks = masks.view(b, self.part_num, w*h)
loss_reg = torch.bmm(masks, masks.permute(0, 2, 1))
loss_reg = torch.triu(loss_reg, diagonal = 1).sum() / (b * self.part_num * (self.part_num - 1) / 2)
else:
feats = F.avg_pool2d(global_feat, global_feat.size()[2:])
feats = feats.view(feats.size(0), -1)
if not self.training:
feats = self.bn_neck(feats)
return feats
else:
return self.train_forward(feats, labels, loss_reg, sub, **kwargs)
def train_forward(self, feat, labels, loss_reg, sub, **kwargs):
epoch = kwargs.get('epoch')
metric = {}
if self.pattern_attention and loss_reg != 0 :
loss = loss_reg.float() * self.weight_sep
metric.update({'p-reg': loss_reg.data})
else:
loss = 0
if self.triplet:
triplet_loss, _, _ = self.triplet_loss(feat.float(), labels)
loss += triplet_loss
metric.update({'tri': triplet_loss.data})
if self.center_loss:
center_loss = self.center_loss(feat.float(), labels)
loss += center_loss
metric.update({'cen': center_loss.data})
if self.center_cluster:
center_cluster_loss, _, _ = self.center_cluster_loss(feat.float(), labels)
loss += center_cluster_loss
metric.update({'cc': center_cluster_loss.data})
feat = self.bn_neck(feat)
if self.classification:
logits = self.classifier(feat)
cls_loss = self.id_loss(logits.float(), labels)
loss += cls_loss
metric.update({'acc': calc_acc(logits.data, labels), 'ce': cls_loss.data})
if self.mutual_learning:
# cam_ids = kwargs.get('cam_ids')
# sub = (cam_ids == 3) + (cam_ids == 6)
logits_v = self.visible_classifier(feat[sub == 0])
v_cls_loss = self.id_loss(logits_v.float(), labels[sub == 0])
loss += v_cls_loss * self.weight_sid
logits_i = self.infrared_classifier(feat[sub == 1])
i_cls_loss = self.id_loss(logits_i.float(), labels[sub == 1])
loss += i_cls_loss * self.weight_sid
logits_m = torch.cat([logits_v, logits_i], 0).float()
with torch.no_grad():
self.infrared_classifier_.weight.data = self.infrared_classifier_.weight.data * (1 - self.update_rate) \
+ self.infrared_classifier.weight.data * self.update_rate
self.visible_classifier_.weight.data = self.visible_classifier_.weight.data * (1 - self.update_rate) \
+ self.visible_classifier.weight.data * self.update_rate
logits_v_ = self.infrared_classifier_(feat[sub == 0])
logits_i_ = self.visible_classifier_(feat[sub == 1])
logits_m_ = torch.cat([logits_v_, logits_i_], 0).float()
logits_m = F.softmax(logits_m, 1)
logits_m_ = F.log_softmax(logits_m_, 1)
mod_loss = self.KLDivLoss(logits_m_, logits_m)
loss += mod_loss * self.weight_KL + (v_cls_loss + i_cls_loss) * self.weight_sid
metric.update({'ce-v': v_cls_loss.data})
metric.update({'ce-i': i_cls_loss.data})
metric.update({'KL': mod_loss.data})
return loss, metric
| 7,876 | 5 | 103 |
c6947747816547467c9afc2ee5856aad33e82f28 | 623 | py | Python | p645_set_mismatch.py | feigaochn/leetcode | abf0877fae02aa9c2549051f0b68df0ace952512 | [
"MIT"
] | null | null | null | p645_set_mismatch.py | feigaochn/leetcode | abf0877fae02aa9c2549051f0b68df0ace952512 | [
"MIT"
] | null | null | null | p645_set_mismatch.py | feigaochn/leetcode | abf0877fae02aa9c2549051f0b68df0ace952512 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
if __name__ == '__main__':
sol = Solution()
print(sol.findErrorNums([1, 2, 2, 4]))
print(sol.findErrorNums([2, 2]))
| 24.92 | 104 | 0.499197 | #!/usr/bin/env python
# coding: utf-8
class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n = len(nums)
minus = n * (n + 1) // 2 - sum(nums) # miss - dup
minus2 = sum(i * i for i in range(1, n + 1)) - sum(map(lambda x: x * x, nums)) # miss^2 - dup^2
plus = minus2 // minus
miss = (minus + plus) // 2
dup = (plus - minus) // 2
return [dup, miss]
if __name__ == '__main__':
sol = Solution()
print(sol.findErrorNums([1, 2, 2, 4]))
print(sol.findErrorNums([2, 2]))
| 0 | 431 | 23 |
69b669dec472fa3555c9d25213f71d5da0489450 | 6,338 | py | Python | tests/framework_graphql/_target_application.py | lrafeei/newrelic-python-agent | 3dbf080d4104514e49ad8e1d06abac75b6914ee1 | [
"Apache-2.0"
] | null | null | null | tests/framework_graphql/_target_application.py | lrafeei/newrelic-python-agent | 3dbf080d4104514e49ad8e1d06abac75b6914ee1 | [
"Apache-2.0"
] | 1 | 2021-07-30T18:31:14.000Z | 2021-07-30T18:31:14.000Z | tests/framework_graphql/_target_application.py | lrafeei/newrelic-python-agent | 3dbf080d4104514e49ad8e1d06abac75b6914ee1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from graphql import (
GraphQLArgument,
GraphQLField,
GraphQLInt,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType,
)
authors = [
{
"first_name": "New",
"last_name": "Relic",
},
{
"first_name": "Bob",
"last_name": "Smith",
},
{
"first_name": "Leslie",
"last_name": "Jones",
},
]
books = [
{
"id": 1,
"name": "Python Agent: The Book",
"isbn": "a-fake-isbn",
"author": authors[0],
"branch": "riverside",
},
{
"id": 2,
"name": "Ollies for O11y: A Sk8er's Guide to Observability",
"isbn": "a-second-fake-isbn",
"author": authors[1],
"branch": "downtown",
},
{
"id": 3,
"name": "[Redacted]",
"isbn": "a-third-fake-isbn",
"author": authors[2],
"branch": "riverside",
},
]
magazines = [
{"id": 1, "name": "Reli Updates Weekly", "issue": 1, "branch": "riverside"},
{"id": 2, "name": "Reli Updates Weekly", "issue": 2, "branch": "downtown"},
{"id": 3, "name": "Node Weekly", "issue": 1, "branch": "riverside"},
]
libraries = ["riverside", "downtown"]
libraries = [
{
"id": i + 1,
"branch": branch,
"magazine": [m for m in magazines if m["branch"] == branch],
"book": [b for b in books if b["branch"] == branch],
}
for i, branch in enumerate(libraries)
]
storage = []
Author = GraphQLObjectType(
"Author",
{
"first_name": GraphQLField(GraphQLString),
"last_name": GraphQLField(GraphQLString),
},
)
Book = GraphQLObjectType(
"Book",
{
"id": GraphQLField(GraphQLInt),
"name": GraphQLField(GraphQLString),
"isbn": GraphQLField(GraphQLString),
"author": GraphQLField(GraphQLList(Author)),
"branch": GraphQLField(GraphQLString),
},
)
Magazine = GraphQLObjectType(
"Magazine",
{
"id": GraphQLField(GraphQLInt),
"name": GraphQLField(GraphQLString),
"issue": GraphQLField(GraphQLInt),
"branch": GraphQLField(GraphQLString),
},
)
Library = GraphQLObjectType(
"Library",
{
"id": GraphQLField(GraphQLInt),
"branch": GraphQLField(GraphQLString),
"book": GraphQLField(GraphQLList(Book)),
"magazine": GraphQLField(GraphQLList(Magazine)),
},
)
Storage = GraphQLList(GraphQLString)
try:
hello_field = GraphQLField(GraphQLString, resolver=resolve_hello)
library_field = GraphQLField(
Library,
resolver=resolve_library,
args={"index": GraphQLArgument(GraphQLNonNull(GraphQLInt))},
)
search_field = GraphQLField(
GraphQLList(
GraphQLUnionType("Item", (Book, Magazine), resolve_type=resolve_search)
),
args={"contains": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
echo_field = GraphQLField(
GraphQLString,
resolver=resolve_echo,
args={"echo": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
storage_field = GraphQLField(
Storage,
resolver=resolve_storage,
)
storage_add_field = GraphQLField(
Storage,
resolver=resolve_storage_add,
args={"string": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
error_field = GraphQLField(GraphQLString, resolver=resolve_error)
error_non_null_field = GraphQLField(
GraphQLNonNull(GraphQLString), resolver=resolve_error
)
except TypeError:
hello_field = GraphQLField(GraphQLString, resolve=resolve_hello)
library_field = GraphQLField(
Library,
resolve=resolve_library,
args={"index": GraphQLArgument(GraphQLNonNull(GraphQLInt))},
)
search_field = GraphQLField(
GraphQLList(
GraphQLUnionType("Item", (Book, Magazine), resolve_type=resolve_search)
),
args={"contains": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
echo_field = GraphQLField(
GraphQLString,
resolve=resolve_echo,
args={"echo": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
storage_field = GraphQLField(
Storage,
resolve=resolve_storage,
)
storage_add_field = GraphQLField(
GraphQLString,
resolve=resolve_storage_add,
args={"string": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
error_field = GraphQLField(GraphQLString, resolve=resolve_error)
error_non_null_field = GraphQLField(
GraphQLNonNull(GraphQLString), resolve=resolve_error
)
query = GraphQLObjectType(
name="Query",
fields={
"hello": hello_field,
"library": library_field,
"search": search_field,
"echo": echo_field,
"storage": storage_field,
"error": error_field,
"error_non_null": error_non_null_field,
},
)
mutation = GraphQLObjectType(
name="Mutation",
fields={
"storage_add": storage_add_field,
},
)
_target_application = GraphQLSchema(query=query, mutation=mutation)
| 25.869388 | 83 | 0.630956 | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from graphql import (
GraphQLArgument,
GraphQLField,
GraphQLInt,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType,
)
authors = [
{
"first_name": "New",
"last_name": "Relic",
},
{
"first_name": "Bob",
"last_name": "Smith",
},
{
"first_name": "Leslie",
"last_name": "Jones",
},
]
books = [
{
"id": 1,
"name": "Python Agent: The Book",
"isbn": "a-fake-isbn",
"author": authors[0],
"branch": "riverside",
},
{
"id": 2,
"name": "Ollies for O11y: A Sk8er's Guide to Observability",
"isbn": "a-second-fake-isbn",
"author": authors[1],
"branch": "downtown",
},
{
"id": 3,
"name": "[Redacted]",
"isbn": "a-third-fake-isbn",
"author": authors[2],
"branch": "riverside",
},
]
magazines = [
{"id": 1, "name": "Reli Updates Weekly", "issue": 1, "branch": "riverside"},
{"id": 2, "name": "Reli Updates Weekly", "issue": 2, "branch": "downtown"},
{"id": 3, "name": "Node Weekly", "issue": 1, "branch": "riverside"},
]
libraries = ["riverside", "downtown"]
libraries = [
{
"id": i + 1,
"branch": branch,
"magazine": [m for m in magazines if m["branch"] == branch],
"book": [b for b in books if b["branch"] == branch],
}
for i, branch in enumerate(libraries)
]
storage = []
def resolve_library(parent, info, index):
return libraries[index]
def resolve_storage_add(parent, info, string):
storage.append(string)
return string
def resolve_storage(parent, info):
return storage
def resolve_search(parent, info, contains):
search_books = [b for b in books if contains in b["name"]]
search_magazines = [m for m in magazines if contains in m["name"]]
return search_books + search_magazines
Author = GraphQLObjectType(
"Author",
{
"first_name": GraphQLField(GraphQLString),
"last_name": GraphQLField(GraphQLString),
},
)
Book = GraphQLObjectType(
"Book",
{
"id": GraphQLField(GraphQLInt),
"name": GraphQLField(GraphQLString),
"isbn": GraphQLField(GraphQLString),
"author": GraphQLField(GraphQLList(Author)),
"branch": GraphQLField(GraphQLString),
},
)
Magazine = GraphQLObjectType(
"Magazine",
{
"id": GraphQLField(GraphQLInt),
"name": GraphQLField(GraphQLString),
"issue": GraphQLField(GraphQLInt),
"branch": GraphQLField(GraphQLString),
},
)
Library = GraphQLObjectType(
"Library",
{
"id": GraphQLField(GraphQLInt),
"branch": GraphQLField(GraphQLString),
"book": GraphQLField(GraphQLList(Book)),
"magazine": GraphQLField(GraphQLList(Magazine)),
},
)
Storage = GraphQLList(GraphQLString)
def resolve_hello(root, info):
return "Hello!"
def resolve_echo(root, info, echo):
return echo
def resolve_error(root, info):
raise RuntimeError("Runtime Error!")
try:
hello_field = GraphQLField(GraphQLString, resolver=resolve_hello)
library_field = GraphQLField(
Library,
resolver=resolve_library,
args={"index": GraphQLArgument(GraphQLNonNull(GraphQLInt))},
)
search_field = GraphQLField(
GraphQLList(
GraphQLUnionType("Item", (Book, Magazine), resolve_type=resolve_search)
),
args={"contains": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
echo_field = GraphQLField(
GraphQLString,
resolver=resolve_echo,
args={"echo": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
storage_field = GraphQLField(
Storage,
resolver=resolve_storage,
)
storage_add_field = GraphQLField(
Storage,
resolver=resolve_storage_add,
args={"string": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
error_field = GraphQLField(GraphQLString, resolver=resolve_error)
error_non_null_field = GraphQLField(
GraphQLNonNull(GraphQLString), resolver=resolve_error
)
except TypeError:
hello_field = GraphQLField(GraphQLString, resolve=resolve_hello)
library_field = GraphQLField(
Library,
resolve=resolve_library,
args={"index": GraphQLArgument(GraphQLNonNull(GraphQLInt))},
)
search_field = GraphQLField(
GraphQLList(
GraphQLUnionType("Item", (Book, Magazine), resolve_type=resolve_search)
),
args={"contains": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
echo_field = GraphQLField(
GraphQLString,
resolve=resolve_echo,
args={"echo": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
storage_field = GraphQLField(
Storage,
resolve=resolve_storage,
)
storage_add_field = GraphQLField(
GraphQLString,
resolve=resolve_storage_add,
args={"string": GraphQLArgument(GraphQLNonNull(GraphQLString))},
)
error_field = GraphQLField(GraphQLString, resolve=resolve_error)
error_non_null_field = GraphQLField(
GraphQLNonNull(GraphQLString), resolve=resolve_error
)
query = GraphQLObjectType(
name="Query",
fields={
"hello": hello_field,
"library": library_field,
"search": search_field,
"echo": echo_field,
"storage": storage_field,
"error": error_field,
"error_non_null": error_non_null_field,
},
)
mutation = GraphQLObjectType(
name="Mutation",
fields={
"storage_add": storage_add_field,
},
)
_target_application = GraphQLSchema(query=query, mutation=mutation)
| 458 | 0 | 161 |
ade73391147394a210ab7e939baae2a264e0589f | 3,964 | py | Python | templatestore/models.py | ackotech/django-templatestore | ec96f9b1f5b3d129bea0b2e8f3773bfcea440f36 | [
"Apache-2.0"
] | null | null | null | templatestore/models.py | ackotech/django-templatestore | ec96f9b1f5b3d129bea0b2e8f3773bfcea440f36 | [
"Apache-2.0"
] | 2 | 2021-06-02T05:06:54.000Z | 2022-02-28T07:02:25.000Z | templatestore/models.py | ackotech/django-templatestore | ec96f9b1f5b3d129bea0b2e8f3773bfcea440f36 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from templatestore import app_settings as ts_settings
import re
| 38.115385 | 88 | 0.687689 | from django.db import models
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from templatestore import app_settings as ts_settings
import re
class Template(models.Model):
def attributes_default():
return {k: "" for k in ts_settings.TE_TEMPLATE_ATTRIBUTES_KEYS}
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=1000)
type = models.CharField(max_length=1000)
default_version_id = models.IntegerField(blank=True, null=True)
attributes = JSONField(default=attributes_default)
created_on = models.DateTimeField(auto_now_add=True) # TODO: Timezone support check
modified_on = models.DateTimeField(auto_now=True)
deleted_on = models.DateTimeField(blank=True, null=True)
created_by = models.IntegerField(null=True, blank=True)
user_email = models.CharField(max_length=200, null=True, blank=True)
class Meta:
db_table = "templatestore_template"
def clean(self):
# all validations here
if self.default_version_id and (
not len(
TemplateVersion.objects.filter(
id=self.default_version_id, template_id=self.id
)
)
):
raise ValidationError(
{
"default_version_id": _(
"specified id doesn't correspond to same template version"
)
}
)
# add default attributes
for k in ts_settings.TE_TEMPLATE_ATTRIBUTES.keys():
if k not in self.attributes:
self.attributes[k] = ""
def save(self, *args, **kwargs):
self.full_clean()
super(Template, self).save(*args, **kwargs)
class TemplateVersion(models.Model):
id = models.AutoField(primary_key=True)
template_id = models.ForeignKey(Template, on_delete=models.PROTECT)
version = models.CharField(max_length=50)
sample_context_data = JSONField(default=dict)
created_on = models.DateTimeField(auto_now_add=True)
modified_on = models.DateTimeField(auto_now=True)
deleted_on = models.DateTimeField(null=True, blank=True)
version_alias = models.CharField(blank=True, max_length=100)
created_by = models.IntegerField(null=True, blank=True)
user_email = models.CharField(max_length=200, null=True, blank=True)
class Meta:
db_table = "templatestore_template_version"
unique_together = ("template_id", "version")
def clean(self):
# all validations here
if self.version and not re.fullmatch("\d+\.\d+", self.version):
raise ValidationError({"version": _("version must be specified like 1.3")})
def save(self, *args, **kwargs):
self.full_clean()
super(TemplateVersion, self).save(*args, **kwargs)
class TemplateConfig(models.Model):
type = models.CharField(max_length=1000)
sub_type = models.CharField(max_length=1000)
render_mode = models.CharField(max_length=1000)
created_on = models.DateTimeField(auto_now_add=True)
modified_on = models.DateTimeField(auto_now=True)
deleted_on = models.DateTimeField(null=True, blank=True)
attributes = JSONField(default=dict, blank=True)
class Meta:
db_table = "templatestore_template_config"
unique_together = ("type", "sub_type")
class SubTemplate(models.Model):
id = models.AutoField(primary_key=True)
template_version_id = models.ForeignKey(TemplateVersion, on_delete=models.PROTECT)
config = models.ForeignKey(TemplateConfig, on_delete=models.PROTECT)
data = models.TextField(blank=True)
created_on = models.DateTimeField(auto_now_add=True)
modified_on = models.DateTimeField(auto_now=True)
deleted_on = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = "templatestore_sub_template"
| 1,103 | 2,513 | 92 |
1553588cdb6125b7a8404633ab139fc78c91675e | 4,469 | py | Python | rudetox/seq2seq/predict.py | IlyaGusev/rudetox | e1c6334744bf9d28639efbb61c3605be51642ce9 | [
"Apache-2.0"
] | 1 | 2022-03-02T15:50:10.000Z | 2022-03-02T15:50:10.000Z | rudetox/seq2seq/predict.py | IlyaGusev/rudetox | e1c6334744bf9d28639efbb61c3605be51642ce9 | [
"Apache-2.0"
] | null | null | null | rudetox/seq2seq/predict.py | IlyaGusev/rudetox | e1c6334744bf9d28639efbb61c3605be51642ce9 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import copy
import torch
import razdel
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from rudetox.util.io import read_jsonl, write_jsonl
from rudetox.util.dl import gen_batch, set_random_seed
from rudetox.ranker import Ranker
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-file", type=str, required=True)
parser.add_argument("--output-file", type=str, required=True)
parser.add_argument("--model-name", type=str, required=True)
parser.add_argument("--sample-rate", type=float, default=1.0)
parser.add_argument("--batch-size", type=int, default=1)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--max-source-tokens-count", type=int, default=600)
parser.add_argument("--max-target-tokens-count", type=int, default=200)
parser.add_argument("--repetition-penalty", type=float, default=1.0)
parser.add_argument("--length-penalty", type=float, default=1.0)
parser.add_argument("--no-repeat-ngram-size", type=int, default=4)
parser.add_argument("--num-beams", type=int, default=5)
parser.add_argument("--num-return-sequences", type=int, default=1)
parser.add_argument("--early-stopping", action="store_true", default=False)
parser.add_argument("--style-token", type=str, default=None)
parser.add_argument("--source-field", type=str, default="source")
parser.add_argument("--ranker-config", type=str, default=None)
args = parser.parse_args()
predict(**vars(args))
| 35.468254 | 100 | 0.658984 | import argparse
import json
import copy
import torch
import razdel
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from rudetox.util.io import read_jsonl, write_jsonl
from rudetox.util.dl import gen_batch, set_random_seed
from rudetox.ranker import Ranker
def predict(
model_name,
input_file,
sample_rate,
output_file,
batch_size,
max_source_tokens_count,
max_target_tokens_count,
seed,
no_repeat_ngram_size,
repetition_penalty,
length_penalty,
num_beams,
num_return_sequences,
early_stopping,
source_field,
ranker_config,
style_token
):
set_random_seed(seed)
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model = model.to(device)
ranker = None
if ranker_config:
with open(ranker_config, "r") as r:
ranker_config = json.load(r)
ranker = Ranker(**ranker_config)
output_texts, scores = [], []
records = list(read_jsonl(input_file, sample_rate))
for batch in tqdm(gen_batch(records, batch_size)):
texts = []
for r in batch:
text = r[source_field]
if style_token:
text = style_token + " " + text
texts.append(text)
input_ids = tokenizer(
texts,
add_special_tokens=True,
max_length=max_source_tokens_count,
padding="max_length",
truncation=True,
return_tensors="pt",
)["input_ids"].to(device)
output_ids = model.generate(
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
early_stopping=early_stopping,
num_beams=num_beams,
num_return_sequences=num_return_sequences,
max_length=max_target_tokens_count
)
output_ids = output_ids.reshape((len(batch), num_return_sequences, output_ids.size(1)))
for text, sample_output_ids in zip(texts, output_ids):
targets = [tokenizer.decode(ids, skip_special_tokens=True) for ids in sample_output_ids]
if ranker:
best_target, best_target_scores = ranker(text, targets)
scores.append(best_target_scores)
targets = [best_target]
output_texts.extend(targets)
if not ranker:
fixed_records = []
for r in records:
for _ in range(num_return_sequences):
fixed_records.append(copy.copy(r))
records = fixed_records
for target, r in zip(output_texts, records):
r["target"] = target
for score, r in zip(scores, records):
r["scores"] = score
if ranker:
print("Style:", sum([s["style"] for s in scores]) / len(output_texts))
print("Fluency:", sum([s["fluency"] for s in scores]) / len(output_texts))
print("Sim:", sum([s["sim"] for s in scores]) / len(output_texts))
write_jsonl(records, output_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input-file", type=str, required=True)
parser.add_argument("--output-file", type=str, required=True)
parser.add_argument("--model-name", type=str, required=True)
parser.add_argument("--sample-rate", type=float, default=1.0)
parser.add_argument("--batch-size", type=int, default=1)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--max-source-tokens-count", type=int, default=600)
parser.add_argument("--max-target-tokens-count", type=int, default=200)
parser.add_argument("--repetition-penalty", type=float, default=1.0)
parser.add_argument("--length-penalty", type=float, default=1.0)
parser.add_argument("--no-repeat-ngram-size", type=int, default=4)
parser.add_argument("--num-beams", type=int, default=5)
parser.add_argument("--num-return-sequences", type=int, default=1)
parser.add_argument("--early-stopping", action="store_true", default=False)
parser.add_argument("--style-token", type=str, default=None)
parser.add_argument("--source-field", type=str, default="source")
parser.add_argument("--ranker-config", type=str, default=None)
args = parser.parse_args()
predict(**vars(args))
| 2,869 | 0 | 23 |
8ec2d573ab0ceaac64cdb9c39985ee065ca71da5 | 1,944 | py | Python | bamboo/tests/core/test_calculator.py | pld/bamboo | a0fc77aebd6ff6b1087ba46896b0ce705fbb25a3 | [
"BSD-3-Clause"
] | 27 | 2015-01-14T15:57:54.000Z | 2020-12-27T19:34:41.000Z | bamboo/tests/core/test_calculator.py | biswapanda/bamboo | 72fc260822a27ce52cbe65de178f8fa1b60311f3 | [
"BSD-3-Clause"
] | 2 | 2015-08-06T15:23:28.000Z | 2016-01-28T00:05:25.000Z | bamboo/tests/core/test_calculator.py | biswapanda/bamboo | 72fc260822a27ce52cbe65de178f8fa1b60311f3 | [
"BSD-3-Clause"
] | 10 | 2015-08-07T01:50:39.000Z | 2019-05-15T21:41:18.000Z | from bamboo.core.parser import Parser
from bamboo.core.calculator import calculate_columns
from bamboo.lib.datetools import now, recognize_dates
from bamboo.models.calculation import Calculation
from bamboo.models.dataset import Dataset
from bamboo.tests.test_base import TestBase
| 35.345455 | 77 | 0.652778 | from bamboo.core.parser import Parser
from bamboo.core.calculator import calculate_columns
from bamboo.lib.datetools import now, recognize_dates
from bamboo.models.calculation import Calculation
from bamboo.models.dataset import Dataset
from bamboo.tests.test_base import TestBase
class TestCalculator(TestBase):
def setUp(self):
TestBase.setUp(self)
self.dataset = Dataset()
self.dataset.save(
self.test_dataset_ids['good_eats_with_calculations.csv'])
dframe = recognize_dates(
self.get_data('good_eats_with_calculations.csv'))
self.dataset.save_observations(dframe)
self.group = None
self.places = 5
def _equal_msg(self, calculated, stored, formula):
return '(calculated %s) %s != (stored %s) %s ' % (type(calculated),
calculated, type(stored), stored) +\
'(within %s places), formula: %s' % (self.places, formula)
def _test_calculator(self):
self.dframe = self.dataset.dframe()
columns = self.dframe.columns.tolist()
self.start_num_cols = len(columns)
self.added_num_cols = 0
column_labels_to_slugs = {
column_attrs[Dataset.LABEL]: (column_name) for
(column_name, column_attrs) in self.dataset.schema.items()
}
self.label_list, self.slugified_key_list = [
list(ary) for ary in zip(*column_labels_to_slugs.items())
]
for idx, formula in enumerate(self.calculations):
name = 'test-%s' % idx
Parser.validate_formula(formula, self.dataset)
calculation = Calculation()
calculation.save(self.dataset, formula, name, self.group)
self.now = now()
calculate_columns(self.dataset, [calculation])
self.column_labels_to_slugs = self.dataset.schema.labels_to_slugs
self._test_calculation_results(name, formula)
| 1,548 | 10 | 104 |
96ed8b33dab9bb069a735a68ce4463baaee67e58 | 371 | py | Python | users/urls.py | flaviofontes29/online-course-platform | 2cebacce58b7ee8ca004b4b70581d51f827869d5 | [
"MIT"
] | null | null | null | users/urls.py | flaviofontes29/online-course-platform | 2cebacce58b7ee8ca004b4b70581d51f827869d5 | [
"MIT"
] | null | null | null | users/urls.py | flaviofontes29/online-course-platform | 2cebacce58b7ee8ca004b4b70581d51f827869d5 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path("registration/", views.registration, name="registration"),
path("login/", views.login, name="login"),
path("valid_registry", views.valid_registry, name="valid_registry"),
path("valid_login", views.valid_login, name="valid_login"),
path("logout/", views.logout, name="logout"),
]
| 30.916667 | 72 | 0.698113 | from django.urls import path
from . import views
urlpatterns = [
path("registration/", views.registration, name="registration"),
path("login/", views.login, name="login"),
path("valid_registry", views.valid_registry, name="valid_registry"),
path("valid_login", views.valid_login, name="valid_login"),
path("logout/", views.logout, name="logout"),
]
| 0 | 0 | 0 |
f34d1f1fdb44abb6e5ea8015474e49216be4b0b3 | 2,967 | py | Python | powerstrip/utils/utils.py | keans/powerstrip | fb5ba4d19b60d4404d68fd9b4af739afc64a161c | [
"MIT"
] | null | null | null | powerstrip/utils/utils.py | keans/powerstrip | fb5ba4d19b60d4404d68fd9b4af739afc64a161c | [
"MIT"
] | null | null | null | powerstrip/utils/utils.py | keans/powerstrip | fb5ba4d19b60d4404d68fd9b4af739afc64a161c | [
"MIT"
] | null | null | null | from hashlib import sha3_256
from typing import Union, BinaryIO
from pathlib import Path
from importlib_metadata import pathlib
def ensure_path(
path: Union[str, Path], must_exist: bool = False
) -> Path:
"""
ensures that given path is of type Path
and that HOME directory is resolved
"""
path = (
path
if isinstance(path, Path) else
Path(path)
).expanduser()
if must_exist and not path.exists():
# path does not exist
raise ValueError(
f"The directory '{path}' does not exist!"
)
return path
def hash_file(
filename: Union[str, Path],
hash_func: callable = sha3_256
) -> bytes:
"""
obtain the hash of the file with given hash function
:param filename: file object on which hash is computed
:type filename: Path
:param hash_func: hash function that is used, defaults to sha3_256
:type hash_func: callable, optional
:return: hash digest
:rtype: bytes
"""
assert isinstance(filename, (str, Path))
assert callable(hash_func)
# ensure that filename does exist
filename = ensure_path(filename, must_exist=True)
# initialize hash function
h = hash_func()
# read file chunkwise and update hash function
with filename.open("rb") as f:
while True:
chunk = f.read(h.block_size)
if not chunk:
break
h.update(chunk)
return h.digest()
def hash_directory(
directory: Union[str, Path],
glob: str = "**/*",
exclude_suffixes: list = [],
exclude_filenames: list = [],
hash_func: callable = sha3_256
) -> bytes:
"""
obtain the XORed hash of all files in the given directory
with given hash function
:param directory: directory from which all files are hashed
:type directory: Path
:param glob: glob to obtain files from directory, defaults to **/*
:type glob: str, optional
:param exclude_suffixes: suffixes that are ignored
:type exclude_suffixes: list
:param exclude_filenames: filenames that are ignored
:type exclude_filenames: list
:param hash_func: hash function that is used, defaults to sha3_256
:type hash_func: callable, optional
:return: hash digest
:rtype: bytes
"""
assert isinstance(directory, (str, Path))
assert isinstance(glob, str)
assert callable(hash_func)
# ensure that filename does exist
directory = ensure_path(directory, must_exist=True)
assert directory.is_dir()
h = hash_func()
digest = b"\0" * h.digest_size
for fn in directory.glob(glob):
if (
fn.suffix in exclude_suffixes or
fn.name in exclude_filenames
):
# skip excluded files
continue
# XOR digest with file's digest
digest = bytes([
a ^ b
for a, b in zip(digest, hash_file(fn, hash_func))
])
return h.digest()
| 26.256637 | 70 | 0.634648 | from hashlib import sha3_256
from typing import Union, BinaryIO
from pathlib import Path
from importlib_metadata import pathlib
def ensure_path(
path: Union[str, Path], must_exist: bool = False
) -> Path:
"""
ensures that given path is of type Path
and that HOME directory is resolved
"""
path = (
path
if isinstance(path, Path) else
Path(path)
).expanduser()
if must_exist and not path.exists():
# path does not exist
raise ValueError(
f"The directory '{path}' does not exist!"
)
return path
def hash_file(
filename: Union[str, Path],
hash_func: callable = sha3_256
) -> bytes:
"""
obtain the hash of the file with given hash function
:param filename: file object on which hash is computed
:type filename: Path
:param hash_func: hash function that is used, defaults to sha3_256
:type hash_func: callable, optional
:return: hash digest
:rtype: bytes
"""
assert isinstance(filename, (str, Path))
assert callable(hash_func)
# ensure that filename does exist
filename = ensure_path(filename, must_exist=True)
# initialize hash function
h = hash_func()
# read file chunkwise and update hash function
with filename.open("rb") as f:
while True:
chunk = f.read(h.block_size)
if not chunk:
break
h.update(chunk)
return h.digest()
def hash_directory(
directory: Union[str, Path],
glob: str = "**/*",
exclude_suffixes: list = [],
exclude_filenames: list = [],
hash_func: callable = sha3_256
) -> bytes:
"""
obtain the XORed hash of all files in the given directory
with given hash function
:param directory: directory from which all files are hashed
:type directory: Path
:param glob: glob to obtain files from directory, defaults to **/*
:type glob: str, optional
:param exclude_suffixes: suffixes that are ignored
:type exclude_suffixes: list
:param exclude_filenames: filenames that are ignored
:type exclude_filenames: list
:param hash_func: hash function that is used, defaults to sha3_256
:type hash_func: callable, optional
:return: hash digest
:rtype: bytes
"""
assert isinstance(directory, (str, Path))
assert isinstance(glob, str)
assert callable(hash_func)
# ensure that filename does exist
directory = ensure_path(directory, must_exist=True)
assert directory.is_dir()
h = hash_func()
digest = b"\0" * h.digest_size
for fn in directory.glob(glob):
if (
fn.suffix in exclude_suffixes or
fn.name in exclude_filenames
):
# skip excluded files
continue
# XOR digest with file's digest
digest = bytes([
a ^ b
for a, b in zip(digest, hash_file(fn, hash_func))
])
return h.digest()
| 0 | 0 | 0 |
2b57936d0ac7bfbb6b3535903ca37ced53fa3380 | 20,247 | py | Python | dtr_code/shared/pt_trial_util.py | merrymercy/dtr-prototype | bf40e182453a7d8d23581ea68f32a9d7d2037d62 | [
"Linux-OpenIB"
] | 1 | 2021-08-02T02:42:58.000Z | 2021-08-02T02:42:58.000Z | dtr_code/shared/pt_trial_util.py | merrymercy/dtr-prototype | bf40e182453a7d8d23581ea68f32a9d7d2037d62 | [
"Linux-OpenIB"
] | null | null | null | dtr_code/shared/pt_trial_util.py | merrymercy/dtr-prototype | bf40e182453a7d8d23581ea68f32a9d7d2037d62 | [
"Linux-OpenIB"
] | 1 | 2021-08-05T08:58:53.000Z | 2021-08-05T08:58:53.000Z | """
Utilities for setting up PyTorch memory usage experiments.
"""
import csv
from itertools import product as iter_product
import os
import subprocess
import time
import numpy as np
from common import (check_file_exists, prepare_out_file,
read_json, render_exception, write_json)
MEASURED_KEYS = ['time', 'gpu_time', 'input_mem', 'model_mem', 'total_mem', 'sync_time',
# profiling output
'base_compute_time', 'remat_compute_time', 'search_time', 'cost_time', 'memory_budget']
def run_trials(config_dir, python_cmd,
experiment_name, model_name,
specific_params,
n_inputs,
path_prefix,
report_errors=False,
append_to_csv=False,
trial_run=False,
trial_run_outfile='',
cmd_id=0,
conf_cnt=0):
"""
Responsible for recording the time and max memory usage
from running a model (the user must provide a lambda for
actually running the model because different kinds of models
need different kinds of setup and a lambda that generates an
input for running that model)
:params:
trial_run: When set to true, no persistent experiment data will be saved. It is used to
run a baseline trial and record how much memory is used then set the memory budget
for `ratio` commands of DTR experiments
trial_run_out_file: the temporary file that stores the memory usage data of the baseline run
cmd_id: the command id for current model, starting from 0 by default
conf_cnt: the id of confguration generated from `unfold_settings`; this is used for tracking
which exact configuration that caused errors.
"""
try:
cwd = os.getcwd()
params_file = 'specific_params.json'
try:
write_json(cwd, params_file, specific_params)
if not trial_run:
filename = prepare_out_file(path_prefix,
'{}-{}.csv'.format(get_report_prefix(experiment_name, specific_params, cmd_id), model_name))
mode = 'a' if append_to_csv else 'w'
with open(filename, mode, newline='') as csvfile:
writer = create_csv_writer(csvfile, specific_params)
if not append_to_csv:
writer.writeheader()
else:
filename = ''
shared_dir = os.path.dirname(os.path.abspath(__file__))
run_script = os.path.join(shared_dir, 'run_torch_trial.py')
for i in range(n_inputs):
try:
subprocess.run(
[python_cmd, run_script,
'--config-dir', config_dir,
'--experiment-mode', experiment_name,
'--model-name', model_name,
'--input-idx', str(i),
'--params-file', params_file,
'--out-file', filename,
'--trial-run', str(trial_run),
'--trial-run-outfile', trial_run_outfile
],
check=True, timeout=specific_params.get('timeout', 60))
except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
if not report_errors:
raise e
if trial_run:
return (False, 'Baseline failed: {}'.format(render_exception(e)))
log_error(experiment_name, model_name, specific_params, i, render_exception(e), path_prefix)
return (False, 'successfully caught error')
time.sleep(4)
return (True, 'success')
finally:
os.remove(params_file)
except Exception as e:
return (False,
'Encountered exception on ({}, {}, {}):\n'.format(
experiment_name, model_name, specific_params) + render_exception(e))
def process_command(command: dict, config_template: dict):
'''
Generate a setting with all necessary fields according
to the default setting and the type of the commands.
:params:
command: the command provided in the config.json
config_template: default values for the settings that is required
while running an experiment
'''
result = command.copy()
if result['type'] == 'baseline':
if 'batch_size' not in result:
result['batch_size'] = config_template['batch_size']
return result
elif result['type'] == 'dtr' or result['type'] == 'simrd':
for (k, v) in config_template.items():
if k not in command:
result[k] = v
return result
else:
raise Exception('Unknown type: {}'.format(result['type']))
def validate_setting(method, exp_config):
'''
Check whether the settings for an experiment contains
all the required values
'''
return {
'dtr' : check_dtr,
'simrd': check_simrd,
'baseline': lambda: ('batch_size' in exp_config, '')
}.get(method, lambda: (False, 'unknown kind'))()
def unfold_settings(exp_config):
'''
Unfold a command and get all possible
settings. Returned as an Iterable.
The possible settings are generated by taking
a Cartesian product over list fields of the command
Note: for `ratio` command, the `memory_budget` is calculated here in order to
avoid multiple runs of baseline trial
'''
setting_heading = list()
list_fields = list()
for (k, v) in exp_config.items():
if isinstance(v, list):
setting_heading.append(k)
list_fields.append(v)
if not list_fields:
yield exp_config
else:
for combo in iter_product(*list_fields):
# necessary to copy each time
# since the old data might be used later
result = exp_config.copy()
for i in range(len(list_fields)):
result[setting_heading[i]] = combo[i]
if result.get('kind') == 'ratio':
result['memory_budget'] *= result['ratio']
yield result
def run_baseline(model, exp_config, config, config_dir, output_dir):
'''
Run a baseline triral and obtain memory usage.
This is used for getting a reference memory usage for
DTR `ratio` commands
'''
baseline_config = { 'batch_size' : exp_config['batch_size'],
'timeout': exp_config.get('timeout', 60),
# only doing a minimal number of runs because we are only getting the memory usage,
# which should be identical between runs
'n_reps': 10,
'extra_params': exp_config.get('extra_params', {})
}
if 'input_params' in exp_config:
baseline_config['input_params'] = exp_config['input_params']
filename = str(time.time()) + '.json'
temp_file = prepare_out_file(os.getcwd(), filename)
success, msg = run_trials(config_dir,
python_command('baseline', config),
'baseline', model, baseline_config,
exp_config.get('n_inputs', config['n_inputs']),
output_dir,
report_errors=config['report_errors'],
append_to_csv=False,
trial_run=True,
trial_run_outfile=temp_file)
if not success:
return False, 'Error while running baseline trial: \n{}'.format(msg)
mem_usage = read_json(output_dir, temp_file)
os.remove(temp_file)
if 'mem' not in mem_usage:
return False, 'failed to get baseline memory usage'
return True, mem_usage['mem']
def parse_commands(model, config):
'''
Parse a command and return a processed command, which
can be used to generate settings for experiments
:params:
model: the name of the model
config: the top-level config
'''
if not config['dtr_settings'].get(model):
yield False, 'No settings for {}'.format(model), None
default_setting = config['dtr_settings'].get('default')
model_commands = config['dtr_settings'].get(model)
if default_setting is not None:
config_template = default_setting.copy()
else:
config_template = dict()
for command in model_commands:
exp_config = process_command(command, config_template)
if exp_config.get('kind') in ('ratio',):
exp_config['memory_budget'] = -1.0
success, msg = validate_setting(exp_config['type'], exp_config)
if not success:
yield False, 'Malformat configuration for {}-{}: {}'.format(model, exp_config['type'], msg), None
else:
yield True, 'Success', exp_config
def bootstrap_conf_intervals(data, stat, bootstrap_iters=10000, confidence=95, measure='mean'):
"""
Given an array of floats, performs bootstrap resampling for the specified number
of iterations to estimate confidence intervals.
"""
summary_stat = None
if measure == 'mean':
summary_stat = np.mean
elif measure == 'median':
summary_stat = np.median
else:
raise Exception(f'Invalid measure, must be mean or median but received {measure}')
assert summary_stat is not None
estimates = [
summary_stat(np.random.choice(data, replace=True, size=len(data)))
for i in range(bootstrap_iters)
]
# To get C% confidence intervals, we exclude the bottom (100-C)/2 % and the top (100-C)/2 %
conf_span = (100 - confidence) / 2
return (np.percentile(estimates, conf_span), np.percentile(estimates, 100 - conf_span))
def collect_raw_measurements(experiment_name, model, specific_params, path_prefix, cmd_id):
"""
Reads the raw data for the given experiment name and params and returns a tuple (metrics dictionary, memory budget if applicable, error message if there is no data file).
The first two fields will be None if there is no data file.
"""
filename = '{}-{}.csv'.format(get_report_prefix(experiment_name, specific_params, cmd_id), model)
if not check_file_exists(path_prefix, filename):
return (None, None, 'Data file {} does not exist at {}'.format(filename, path_prefix))
full_path = os.path.join(path_prefix, filename)
metrics = {}
memory_budget = None
with open(full_path, 'r', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
# In case there are commands for the same model
# that have the same values for all configurations
idx = int(row['input'])
measured = {
key: float(row[key]) for key in MEASURED_KEYS
}
if memory_budget is None and specific_params.get('kind') == 'ratio':
memory_budget = float(row['memory_budget'])
if idx not in metrics.keys():
metrics[idx] = {
key: [] for key in MEASURED_KEYS
}
for key in MEASURED_KEYS:
metrics[idx][key].append(measured[key])
return (metrics, memory_budget, 'success')
def compute_slowdowns(exp_times, baseline_times):
"""
Given arrays of prototype times and baseline times of the same length,
returns an array of slowdowns
"""
return [exp_times[i]/baseline_times[i] for i in range(len(exp_times))]
def compute_throughputs(batch_size, gpu_times):
"""
Given a batch size and an array of time running on GPU,
returns an array of throughputs
"""
return [batch_size / gpu_times[i] * 1000 for i in range(len(gpu_times))]
def parse_data_file(experiment_name, model, config, specific_params, path_prefix, cmd_id=0, baseline_params=None):
"""
Given an experiment name, model name, directory, and number of inputs,
parses the corresponding data file if it exists and computes
summary statistics for the (wall-clock) time, GPU time, and memory used in that data file for choice of specific settings
baseline_params: If the command is a ratio command, this will use
the baseline to compute the slowdown per data point
in order to better measure its distribution.
Returns None and an error message if it fails
"""
try:
report_errors = config['report_errors']
metrics, budget, msg = collect_raw_measurements(experiment_name, model, specific_params, path_prefix, cmd_id)
if metrics is None:
return (None, msg)
if budget is not None and specific_params.get('kind') == 'ratio':
specific_params['memory_budget'] = float(budget)
summary = {
'specific_params': specific_params
}
# in case everything errored out, this ensure that we will have a record of the error
if report_errors:
if check_error(experiment_name, model, specific_params, path_prefix):
summary['summary'] = 'error'
return summary, 'success'
# if this was a ratio experiment
# and we have a baseline available, let's compute
# the slowdown per data point, head to head
# and bootstrap confidence intervals
if (specific_params.get('type') != 'baseline'
and specific_params.get('kind') == 'ratio'
and baseline_params is not None):
baseline_metrics, _, baseline_msg = collect_raw_measurements(baseline_params['type'], model, baseline_params['specific_params'], path_prefix, baseline_params['cmd_id'])
if baseline_metrics is None:
return (None, baseline_msg)
# compute slowdown in metrics
for i in range(config['n_inputs']):
dtr_times = metrics[i]['gpu_time']
baseline_times = baseline_metrics[i]['gpu_time']
assert len(dtr_times) == len(baseline_times)
metrics[i]['slowdown'] = compute_slowdowns(dtr_times, baseline_times)
# Compute throughputs for baseline param_sweep commands
if specific_params.get('kind') == 'param_sweep' or specific_params.get('type') == 'baseline':
for i in range(config['n_inputs']):
metrics[i]['throughput'] = compute_throughputs(specific_params['batch_size'], metrics[i]['gpu_time'])
summary_stats = []
for (_, stat) in metrics.items():
summary_dict = {
key: compute_summary_stats(stat[key], bootstrap=('time' in key))
for key in MEASURED_KEYS
}
if 'slowdown' in stat:
summary_dict['slowdown'] = compute_summary_stats(stat['slowdown'], bootstrap=True)
if 'throughput' in stat:
summary_dict['throughput'] = compute_summary_stats(stat['throughput'], bootstrap=True)
summary_stats.append(summary_dict)
summary['summary'] = summary_stats
return (summary, 'success')
except Exception as e:
return (None, 'Encountered exception on ({}, {}): '.format(experiment_name, model) + render_exception(e))
| 40.332669 | 180 | 0.604089 | """
Utilities for setting up PyTorch memory usage experiments.
"""
import csv
from itertools import product as iter_product
import os
import subprocess
import time
import numpy as np
from common import (check_file_exists, prepare_out_file,
read_json, render_exception, write_json)
MEASURED_KEYS = ['time', 'gpu_time', 'input_mem', 'model_mem', 'total_mem', 'sync_time',
# profiling output
'base_compute_time', 'remat_compute_time', 'search_time', 'cost_time', 'memory_budget']
def create_csv_writer(csvfile, specific_params):
fieldnames = ['input', 'rep'] + MEASURED_KEYS + list(specific_params.keys())
return csv.DictWriter(csvfile, fieldnames=fieldnames)
def python_command(setting, config):
if setting == 'dtr':
return os.path.expanduser(config['dtr_torch_cmd'])
return 'python3'
def log_error(experiment_name, model_name, specific_params, inp, err_msg, path_prefix):
err_info = {
'input': inp,
'msg': err_msg
}
logged_errors = {}
if check_file_exists(path_prefix, 'errors.json'):
logged_errors = read_json(path_prefix, 'errors.json')
if experiment_name not in logged_errors:
logged_errors[experiment_name] = {}
if model_name not in logged_errors[experiment_name]:
logged_errors[experiment_name][model_name] = []
logged_errors[experiment_name][model_name].append({
'err_info': err_info,
**specific_params
})
write_json(path_prefix, 'errors.json', logged_errors)
def check_error(experiment_name, model_name, specific_params, path_prefix):
if not check_file_exists(path_prefix, 'errors.json'):
return False
logged_errors = read_json(path_prefix, 'errors.json')
if experiment_name not in logged_errors:
return False
if model_name not in logged_errors[experiment_name]:
return False
errors = logged_errors[experiment_name][model_name]
check_func = lambda err: lambda kv: err.get(kv[0]) == kv[1]
if specific_params.get('kind') == 'ratio':
check_func = lambda err: lambda kv: err.get(kv[0]) == kv[1] if kv[0] != 'memory_budget' else True
return any(map(lambda err: all(map(check_func(err), specific_params.items())), errors))
def get_report_prefix(experiment_name, specific_params, cmd_id=0):
if experiment_name == 'dtr':
if specific_params.get('kind') == 'ratio':
return 'cmd-{}-dtr-ratio-{}'.format(cmd_id, specific_params['ratio'])
elif specific_params.get('kind') == 'fixed':
return 'cmd-{}-dtr-fixed-{}-{}'.format(cmd_id, specific_params['batch_size'], specific_params['memory_budget'])
elif specific_params.get('kind') == 'param_sweep':
return 'cmd-{}-dtr-sweep-{}-{}'.format(cmd_id, specific_params['batch_size'], specific_params['memory_budget'])
elif experiment_name == 'baseline':
return 'cmd-{}-baseline-{}'.format(cmd_id, specific_params['batch_size'])
def run_trials(config_dir, python_cmd,
experiment_name, model_name,
specific_params,
n_inputs,
path_prefix,
report_errors=False,
append_to_csv=False,
trial_run=False,
trial_run_outfile='',
cmd_id=0,
conf_cnt=0):
"""
Responsible for recording the time and max memory usage
from running a model (the user must provide a lambda for
actually running the model because different kinds of models
need different kinds of setup and a lambda that generates an
input for running that model)
:params:
trial_run: When set to true, no persistent experiment data will be saved. It is used to
run a baseline trial and record how much memory is used then set the memory budget
for `ratio` commands of DTR experiments
trial_run_out_file: the temporary file that stores the memory usage data of the baseline run
cmd_id: the command id for current model, starting from 0 by default
conf_cnt: the id of confguration generated from `unfold_settings`; this is used for tracking
which exact configuration that caused errors.
"""
try:
cwd = os.getcwd()
params_file = 'specific_params.json'
try:
write_json(cwd, params_file, specific_params)
if not trial_run:
filename = prepare_out_file(path_prefix,
'{}-{}.csv'.format(get_report_prefix(experiment_name, specific_params, cmd_id), model_name))
mode = 'a' if append_to_csv else 'w'
with open(filename, mode, newline='') as csvfile:
writer = create_csv_writer(csvfile, specific_params)
if not append_to_csv:
writer.writeheader()
else:
filename = ''
shared_dir = os.path.dirname(os.path.abspath(__file__))
run_script = os.path.join(shared_dir, 'run_torch_trial.py')
for i in range(n_inputs):
try:
subprocess.run(
[python_cmd, run_script,
'--config-dir', config_dir,
'--experiment-mode', experiment_name,
'--model-name', model_name,
'--input-idx', str(i),
'--params-file', params_file,
'--out-file', filename,
'--trial-run', str(trial_run),
'--trial-run-outfile', trial_run_outfile
],
check=True, timeout=specific_params.get('timeout', 60))
except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
if not report_errors:
raise e
if trial_run:
return (False, 'Baseline failed: {}'.format(render_exception(e)))
log_error(experiment_name, model_name, specific_params, i, render_exception(e), path_prefix)
return (False, 'successfully caught error')
time.sleep(4)
return (True, 'success')
finally:
os.remove(params_file)
except Exception as e:
return (False,
'Encountered exception on ({}, {}, {}):\n'.format(
experiment_name, model_name, specific_params) + render_exception(e))
def process_command(command: dict, config_template: dict):
'''
Generate a setting with all necessary fields according
to the default setting and the type of the commands.
:params:
command: the command provided in the config.json
config_template: default values for the settings that is required
while running an experiment
'''
result = command.copy()
if result['type'] == 'baseline':
if 'batch_size' not in result:
result['batch_size'] = config_template['batch_size']
return result
elif result['type'] == 'dtr' or result['type'] == 'simrd':
for (k, v) in config_template.items():
if k not in command:
result[k] = v
return result
else:
raise Exception('Unknown type: {}'.format(result['type']))
def validate_setting(method, exp_config):
'''
Check whether the settings for an experiment contains
all the required values
'''
def check_dtr():
for required_keys in ('batch_size', 'memory_budget'):
if required_keys not in exp_config:
return False, 'Missing {}'.format(required_keys)
return True, ''
def check_simrd():
if exp_config['kind'] == 'use_log':
return 'file_path' in exp_config and 'config' in exp_config, ''
elif exp_config['kind'] == 'get_log':
return check_dtr()
else:
raise Exception(f'unknown kind: {exp_config["kind"]}')
return {
'dtr' : check_dtr,
'simrd': check_simrd,
'baseline': lambda: ('batch_size' in exp_config, '')
}.get(method, lambda: (False, 'unknown kind'))()
def unfold_settings(exp_config):
'''
Unfold a command and get all possible
settings. Returned as an Iterable.
The possible settings are generated by taking
a Cartesian product over list fields of the command
Note: for `ratio` command, the `memory_budget` is calculated here in order to
avoid multiple runs of baseline trial
'''
setting_heading = list()
list_fields = list()
for (k, v) in exp_config.items():
if isinstance(v, list):
setting_heading.append(k)
list_fields.append(v)
if not list_fields:
yield exp_config
else:
for combo in iter_product(*list_fields):
# necessary to copy each time
# since the old data might be used later
result = exp_config.copy()
for i in range(len(list_fields)):
result[setting_heading[i]] = combo[i]
if result.get('kind') == 'ratio':
result['memory_budget'] *= result['ratio']
yield result
def run_baseline(model, exp_config, config, config_dir, output_dir):
'''
Run a baseline triral and obtain memory usage.
This is used for getting a reference memory usage for
DTR `ratio` commands
'''
baseline_config = { 'batch_size' : exp_config['batch_size'],
'timeout': exp_config.get('timeout', 60),
# only doing a minimal number of runs because we are only getting the memory usage,
# which should be identical between runs
'n_reps': 10,
'extra_params': exp_config.get('extra_params', {})
}
if 'input_params' in exp_config:
baseline_config['input_params'] = exp_config['input_params']
filename = str(time.time()) + '.json'
temp_file = prepare_out_file(os.getcwd(), filename)
success, msg = run_trials(config_dir,
python_command('baseline', config),
'baseline', model, baseline_config,
exp_config.get('n_inputs', config['n_inputs']),
output_dir,
report_errors=config['report_errors'],
append_to_csv=False,
trial_run=True,
trial_run_outfile=temp_file)
if not success:
return False, 'Error while running baseline trial: \n{}'.format(msg)
mem_usage = read_json(output_dir, temp_file)
os.remove(temp_file)
if 'mem' not in mem_usage:
return False, 'failed to get baseline memory usage'
return True, mem_usage['mem']
def eval_command(model, exp_config, config, config_dir, output_dir, cmd_id):
try:
if exp_config.get('kind') == 'ratio':
success, result = run_baseline(model, exp_config, config, config_dir, output_dir)
if not success:
return False, result
# the actual memory budget calculation is
# in `unfold_settings`
exp_config['memory_budget'] = result
conf_cnt = 0
for combo in unfold_settings(exp_config):
success, msg = run_trials(config_dir,
python_command(combo['type'], config),
combo['type'], model, combo,
config['n_inputs'],
output_dir,
report_errors=config['report_errors'],
append_to_csv=False,
trial_run=False,
cmd_id=cmd_id,
conf_cnt=conf_cnt)
if not success:
return False, msg
conf_cnt += 1
return True, 'success'
except Exception as e:
return (False,
'Encountered outer iteration exception:\n' + render_exception(e))
def parse_commands(model, config):
'''
Parse a command and return a processed command, which
can be used to generate settings for experiments
:params:
model: the name of the model
config: the top-level config
'''
if not config['dtr_settings'].get(model):
yield False, 'No settings for {}'.format(model), None
default_setting = config['dtr_settings'].get('default')
model_commands = config['dtr_settings'].get(model)
if default_setting is not None:
config_template = default_setting.copy()
else:
config_template = dict()
for command in model_commands:
exp_config = process_command(command, config_template)
if exp_config.get('kind') in ('ratio',):
exp_config['memory_budget'] = -1.0
success, msg = validate_setting(exp_config['type'], exp_config)
if not success:
yield False, 'Malformat configuration for {}-{}: {}'.format(model, exp_config['type'], msg), None
else:
yield True, 'Success', exp_config
def bootstrap_conf_intervals(data, stat, bootstrap_iters=10000, confidence=95, measure='mean'):
"""
Given an array of floats, performs bootstrap resampling for the specified number
of iterations to estimate confidence intervals.
"""
summary_stat = None
if measure == 'mean':
summary_stat = np.mean
elif measure == 'median':
summary_stat = np.median
else:
raise Exception(f'Invalid measure, must be mean or median but received {measure}')
assert summary_stat is not None
estimates = [
summary_stat(np.random.choice(data, replace=True, size=len(data)))
for i in range(bootstrap_iters)
]
# To get C% confidence intervals, we exclude the bottom (100-C)/2 % and the top (100-C)/2 %
conf_span = (100 - confidence) / 2
return (np.percentile(estimates, conf_span), np.percentile(estimates, 100 - conf_span))
def compute_summary_stats(l, bootstrap=False):
summary = {
'mean': np.mean(l),
'median': np.median(l),
'std': np.std(l)
}
if bootstrap:
summary['mean_conf'] = bootstrap_conf_intervals(l, summary['mean'], measure='mean')
summary['median_conf'] = bootstrap_conf_intervals(l, summary['median'], measure='median')
return summary
def collect_raw_measurements(experiment_name, model, specific_params, path_prefix, cmd_id):
"""
Reads the raw data for the given experiment name and params and returns a tuple (metrics dictionary, memory budget if applicable, error message if there is no data file).
The first two fields will be None if there is no data file.
"""
filename = '{}-{}.csv'.format(get_report_prefix(experiment_name, specific_params, cmd_id), model)
if not check_file_exists(path_prefix, filename):
return (None, None, 'Data file {} does not exist at {}'.format(filename, path_prefix))
full_path = os.path.join(path_prefix, filename)
metrics = {}
memory_budget = None
with open(full_path, 'r', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
# In case there are commands for the same model
# that have the same values for all configurations
idx = int(row['input'])
measured = {
key: float(row[key]) for key in MEASURED_KEYS
}
if memory_budget is None and specific_params.get('kind') == 'ratio':
memory_budget = float(row['memory_budget'])
if idx not in metrics.keys():
metrics[idx] = {
key: [] for key in MEASURED_KEYS
}
for key in MEASURED_KEYS:
metrics[idx][key].append(measured[key])
return (metrics, memory_budget, 'success')
def compute_slowdowns(exp_times, baseline_times):
"""
Given arrays of prototype times and baseline times of the same length,
returns an array of slowdowns
"""
return [exp_times[i]/baseline_times[i] for i in range(len(exp_times))]
def compute_throughputs(batch_size, gpu_times):
"""
Given a batch size and an array of time running on GPU,
returns an array of throughputs
"""
return [batch_size / gpu_times[i] * 1000 for i in range(len(gpu_times))]
def parse_data_file(experiment_name, model, config, specific_params, path_prefix, cmd_id=0, baseline_params=None):
"""
Given an experiment name, model name, directory, and number of inputs,
parses the corresponding data file if it exists and computes
summary statistics for the (wall-clock) time, GPU time, and memory used in that data file for choice of specific settings
baseline_params: If the command is a ratio command, this will use
the baseline to compute the slowdown per data point
in order to better measure its distribution.
Returns None and an error message if it fails
"""
try:
report_errors = config['report_errors']
metrics, budget, msg = collect_raw_measurements(experiment_name, model, specific_params, path_prefix, cmd_id)
if metrics is None:
return (None, msg)
if budget is not None and specific_params.get('kind') == 'ratio':
specific_params['memory_budget'] = float(budget)
summary = {
'specific_params': specific_params
}
# in case everything errored out, this ensure that we will have a record of the error
if report_errors:
if check_error(experiment_name, model, specific_params, path_prefix):
summary['summary'] = 'error'
return summary, 'success'
# if this was a ratio experiment
# and we have a baseline available, let's compute
# the slowdown per data point, head to head
# and bootstrap confidence intervals
if (specific_params.get('type') != 'baseline'
and specific_params.get('kind') == 'ratio'
and baseline_params is not None):
baseline_metrics, _, baseline_msg = collect_raw_measurements(baseline_params['type'], model, baseline_params['specific_params'], path_prefix, baseline_params['cmd_id'])
if baseline_metrics is None:
return (None, baseline_msg)
# compute slowdown in metrics
for i in range(config['n_inputs']):
dtr_times = metrics[i]['gpu_time']
baseline_times = baseline_metrics[i]['gpu_time']
assert len(dtr_times) == len(baseline_times)
metrics[i]['slowdown'] = compute_slowdowns(dtr_times, baseline_times)
# Compute throughputs for baseline param_sweep commands
if specific_params.get('kind') == 'param_sweep' or specific_params.get('type') == 'baseline':
for i in range(config['n_inputs']):
metrics[i]['throughput'] = compute_throughputs(specific_params['batch_size'], metrics[i]['gpu_time'])
summary_stats = []
for (_, stat) in metrics.items():
summary_dict = {
key: compute_summary_stats(stat[key], bootstrap=('time' in key))
for key in MEASURED_KEYS
}
if 'slowdown' in stat:
summary_dict['slowdown'] = compute_summary_stats(stat['slowdown'], bootstrap=True)
if 'throughput' in stat:
summary_dict['throughput'] = compute_summary_stats(stat['throughput'], bootstrap=True)
summary_stats.append(summary_dict)
summary['summary'] = summary_stats
return (summary, 'success')
except Exception as e:
return (None, 'Encountered exception on ({}, {}): '.format(experiment_name, model) + render_exception(e))
| 4,478 | 0 | 214 |
7060bd715ab72e7b9f69d15367cada6ec0d11942 | 2,857 | py | Python | sac_release/scripts/plot_traces.py | anonymouscode114/iclr2021_rlreg | 142566f4316bfed9e3858b92f87a792b7c7b8b1b | [
"MIT"
] | 1 | 2021-05-17T16:40:59.000Z | 2021-05-17T16:40:59.000Z | sac_release/scripts/plot_traces.py | anonymouscode114/iclr2021_rlreg | 142566f4316bfed9e3858b92f87a792b7c7b8b1b | [
"MIT"
] | null | null | null | sac_release/scripts/plot_traces.py | anonymouscode114/iclr2021_rlreg | 142566f4316bfed9e3858b92f87a792b7c7b8b1b | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
import numpy as np
import joblib
import tensorflow as tf
import os
from sac.misc import utils
from sac.policies.hierarchical_policy import FixedOptionPolicy
from sac.misc.sampler import rollouts
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='Path to the snapshot file.')
parser.add_argument('--max-path-length', '-l', type=int, default=100)
parser.add_argument('--n_paths', type=int, default=1)
parser.add_argument('--dim_0', type=int, default=0)
parser.add_argument('--dim_1', type=int, default=1)
parser.add_argument('--use_qpos', type=bool, default=False)
parser.add_argument('--use_action', type=bool, default=False)
parser.add_argument('--deterministic', '-d', dest='deterministic',
action='store_true')
parser.add_argument('--no-deterministic', '-nd', dest='deterministic',
action='store_false')
parser.set_defaults(deterministic=True)
args = parser.parse_args()
filename = '{}_{}_{}_trace.png'.format(os.path.splitext(args.file)[0],
args.dim_0, args.dim_1)
with tf.Session() as sess:
data = joblib.load(args.file)
policy = data['policy']
env = data['env']
num_skills = data['policy'].observation_space.flat_dim - data['env'].spec.observation_space.flat_dim
plt.figure(figsize=(6, 6))
palette = sns.color_palette('hls', num_skills)
with policy.deterministic(args.deterministic):
for z in range(num_skills):
fixed_z_policy = FixedOptionPolicy(policy, num_skills, z)
for path_index in range(args.n_paths):
obs = env.reset()
if args.use_qpos:
qpos = env.wrapped_env.env.model.data.qpos[:, 0]
obs_vec = [qpos]
else:
obs_vec = [obs]
for t in range(args.max_path_length):
action, _ = fixed_z_policy.get_action(obs)
(obs, _, _, _) = env.step(action)
if args.use_qpos:
qpos = env.wrapped_env.env.model.data.qpos[:, 0]
obs_vec.append(qpos)
elif args.use_action:
obs_vec.append(action)
else:
obs_vec.append(obs)
obs_vec = np.array(obs_vec)
x = obs_vec[:, args.dim_0]
y = obs_vec[:, args.dim_1]
plt.plot(x, y, c=palette[z])
plt.savefig(filename)
plt.close()
| 40.239437 | 108 | 0.561078 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
import numpy as np
import joblib
import tensorflow as tf
import os
from sac.misc import utils
from sac.policies.hierarchical_policy import FixedOptionPolicy
from sac.misc.sampler import rollouts
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='Path to the snapshot file.')
parser.add_argument('--max-path-length', '-l', type=int, default=100)
parser.add_argument('--n_paths', type=int, default=1)
parser.add_argument('--dim_0', type=int, default=0)
parser.add_argument('--dim_1', type=int, default=1)
parser.add_argument('--use_qpos', type=bool, default=False)
parser.add_argument('--use_action', type=bool, default=False)
parser.add_argument('--deterministic', '-d', dest='deterministic',
action='store_true')
parser.add_argument('--no-deterministic', '-nd', dest='deterministic',
action='store_false')
parser.set_defaults(deterministic=True)
args = parser.parse_args()
filename = '{}_{}_{}_trace.png'.format(os.path.splitext(args.file)[0],
args.dim_0, args.dim_1)
with tf.Session() as sess:
data = joblib.load(args.file)
policy = data['policy']
env = data['env']
num_skills = data['policy'].observation_space.flat_dim - data['env'].spec.observation_space.flat_dim
plt.figure(figsize=(6, 6))
palette = sns.color_palette('hls', num_skills)
with policy.deterministic(args.deterministic):
for z in range(num_skills):
fixed_z_policy = FixedOptionPolicy(policy, num_skills, z)
for path_index in range(args.n_paths):
obs = env.reset()
if args.use_qpos:
qpos = env.wrapped_env.env.model.data.qpos[:, 0]
obs_vec = [qpos]
else:
obs_vec = [obs]
for t in range(args.max_path_length):
action, _ = fixed_z_policy.get_action(obs)
(obs, _, _, _) = env.step(action)
if args.use_qpos:
qpos = env.wrapped_env.env.model.data.qpos[:, 0]
obs_vec.append(qpos)
elif args.use_action:
obs_vec.append(action)
else:
obs_vec.append(obs)
obs_vec = np.array(obs_vec)
x = obs_vec[:, args.dim_0]
y = obs_vec[:, args.dim_1]
plt.plot(x, y, c=palette[z])
plt.savefig(filename)
plt.close()
| 0 | 0 | 0 |
1b35a6a437b8cb176fd083149154bbbac97c60bb | 4,657 | py | Python | src/pyams_layer/interfaces.py | Py-AMS/pyams-layer | 815652091bb137d3b6bf48c476a17e7ae9c4bbe9 | [
"ZPL-2.1"
] | null | null | null | src/pyams_layer/interfaces.py | Py-AMS/pyams-layer | 815652091bb137d3b6bf48c476a17e7ae9c4bbe9 | [
"ZPL-2.1"
] | null | null | null | src/pyams_layer/interfaces.py | Py-AMS/pyams-layer | 815652091bb137d3b6bf48c476a17e7ae9c4bbe9 | [
"ZPL-2.1"
] | null | null | null | #
# Copyright (c) 2015-2019 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_layer.interfaces module
This module provides all layers and skins related interfaces.
"""
from pyramid.interfaces import IRequest
from zope.configuration.fields import GlobalInterface
from zope.interface import Attribute, Interface, implementer
from zope.interface.interfaces import IObjectEvent, ObjectEvent
from zope.schema import Bool, Choice, TextLine
from pyams_file.schema import FileField
__docformat__ = 'restructuredtext'
from pyams_layer import _
MANAGE_SKIN_PERMISSION = 'pyams.ManageSkin'
'''Permission required to manage rendering skin'''
class IResources(Interface):
"""Get list of CSS and Javascript resources associated with given context"""
resources = Attribute("Resources to include")
"""Iterable of resources to include into page
The best way to handle resources is to use Fanstatic to automatically
include CSS and Javascript tags.
"""
PYAMS_BASE_SKIN_NAME = 'PyAMS base skin'
class IBaseLayer(IRequest):
"""Base layer marker interface"""
class IFormLayer(Interface):
"""Custom layer for forms management"""
class IPyAMSLayer(IBaseLayer, IFormLayer):
"""PyAMS default layer"""
class IPyAMSUserLayer(IPyAMSLayer):
"""PyAMS custom user layer
This layer is the base for all custom skins.
Any component should provide a look and feel for this layer.
"""
BASE_SKINS_VOCABULARY_NAME = 'pyams_layer.skins'
USER_SKINS_VOCABULARY_NAME = 'pyams_layer.skin.user'
class ISkin(Interface):
"""Skin interface
Skins are registered as utilities implementing this interface
and defining request layer as attribute.
"""
label = TextLine(title="Skin name")
layer = GlobalInterface(title="Request layer",
description="This interface will be used to tag request layer",
required=True)
class ISkinChangedEvent(IObjectEvent):
"""Skin changed event"""
@implementer(ISkinChangedEvent)
class SkinChangedEvent(ObjectEvent):
"""Request skin changed event"""
class ISkinnable(Interface):
"""Skinnable content interface"""
can_inherit_skin = Attribute("Check if skin can be inherited")
inherit_skin = Bool(title=_("Inherit parent skin?"),
description=_("Should we reuse parent skin?"),
required=True,
default=False)
override_skin = Bool(title=_("Don't inherit parent skin?"),
description=_("Should we override parent skin?"),
required=True,
default=True)
skin_parent = Attribute("Skin parent (local or inherited)")
skin = Choice(title=_("Custom graphic theme"),
description=_("This theme will be used to handle graphic design (colors and "
"images)"),
vocabulary=USER_SKINS_VOCABULARY_NAME,
required=False)
def get_skin(self, request=None):
"""Get skin matching this content"""
container_class = TextLine(title=_("Container class"),
description=_("CSS class given to main page container element"),
required=False)
custom_stylesheet = FileField(title=_("Custom stylesheet"),
description=_("This custom stylesheet will be used to override "
"selected theme styles"),
required=False)
editor_stylesheet = FileField(title=_("Editor stylesheet"),
description=_("Styles defined into this stylesheet will be "
"available into HTML editor"),
required=False)
custom_script = FileField(title=_("Custom script"),
description=_("This custom javascript file will be used to add "
"dynamic features to selected theme"),
required=False)
class IUserSkinnable(ISkinnable):
"""User skinnable content interface"""
| 32.117241 | 98 | 0.640971 | #
# Copyright (c) 2015-2019 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_layer.interfaces module
This module provides all layers and skins related interfaces.
"""
from pyramid.interfaces import IRequest
from zope.configuration.fields import GlobalInterface
from zope.interface import Attribute, Interface, implementer
from zope.interface.interfaces import IObjectEvent, ObjectEvent
from zope.schema import Bool, Choice, TextLine
from pyams_file.schema import FileField
__docformat__ = 'restructuredtext'
from pyams_layer import _
MANAGE_SKIN_PERMISSION = 'pyams.ManageSkin'
'''Permission required to manage rendering skin'''
class IResources(Interface):
"""Get list of CSS and Javascript resources associated with given context"""
resources = Attribute("Resources to include")
"""Iterable of resources to include into page
The best way to handle resources is to use Fanstatic to automatically
include CSS and Javascript tags.
"""
PYAMS_BASE_SKIN_NAME = 'PyAMS base skin'
class IBaseLayer(IRequest):
"""Base layer marker interface"""
class IFormLayer(Interface):
"""Custom layer for forms management"""
class IPyAMSLayer(IBaseLayer, IFormLayer):
"""PyAMS default layer"""
class IPyAMSUserLayer(IPyAMSLayer):
"""PyAMS custom user layer
This layer is the base for all custom skins.
Any component should provide a look and feel for this layer.
"""
BASE_SKINS_VOCABULARY_NAME = 'pyams_layer.skins'
USER_SKINS_VOCABULARY_NAME = 'pyams_layer.skin.user'
class ISkin(Interface):
"""Skin interface
Skins are registered as utilities implementing this interface
and defining request layer as attribute.
"""
label = TextLine(title="Skin name")
layer = GlobalInterface(title="Request layer",
description="This interface will be used to tag request layer",
required=True)
class ISkinChangedEvent(IObjectEvent):
"""Skin changed event"""
@implementer(ISkinChangedEvent)
class SkinChangedEvent(ObjectEvent):
"""Request skin changed event"""
class ISkinnable(Interface):
"""Skinnable content interface"""
can_inherit_skin = Attribute("Check if skin can be inherited")
inherit_skin = Bool(title=_("Inherit parent skin?"),
description=_("Should we reuse parent skin?"),
required=True,
default=False)
override_skin = Bool(title=_("Don't inherit parent skin?"),
description=_("Should we override parent skin?"),
required=True,
default=True)
skin_parent = Attribute("Skin parent (local or inherited)")
skin = Choice(title=_("Custom graphic theme"),
description=_("This theme will be used to handle graphic design (colors and "
"images)"),
vocabulary=USER_SKINS_VOCABULARY_NAME,
required=False)
def get_skin(self, request=None):
"""Get skin matching this content"""
container_class = TextLine(title=_("Container class"),
description=_("CSS class given to main page container element"),
required=False)
custom_stylesheet = FileField(title=_("Custom stylesheet"),
description=_("This custom stylesheet will be used to override "
"selected theme styles"),
required=False)
editor_stylesheet = FileField(title=_("Editor stylesheet"),
description=_("Styles defined into this stylesheet will be "
"available into HTML editor"),
required=False)
custom_script = FileField(title=_("Custom script"),
description=_("This custom javascript file will be used to add "
"dynamic features to selected theme"),
required=False)
class IUserSkinnable(ISkinnable):
"""User skinnable content interface"""
| 0 | 0 | 0 |
cb86014120f9b3abdecf38c0897bbbd5da882129 | 383 | py | Python | juno/resources/routes/plan_routes.py | leogregianin/juno-python | 0be2b70516b0dde713ff36cdb40888f06cc538f5 | [
"MIT"
] | 2 | 2022-03-25T21:08:46.000Z | 2022-03-31T21:10:17.000Z | juno/resources/routes/plan_routes.py | leogregianin/juno-python | 0be2b70516b0dde713ff36cdb40888f06cc538f5 | [
"MIT"
] | null | null | null | juno/resources/routes/plan_routes.py | leogregianin/juno-python | 0be2b70516b0dde713ff36cdb40888f06cc538f5 | [
"MIT"
] | null | null | null | from ..handler_request import get_resource_url
| 21.277778 | 53 | 0.754569 | from ..handler_request import get_resource_url
def get_base_url():
return f"{get_resource_url()}/plans"
def get_specific_plan_by_id_url(plan_id):
return f"{get_base_url()}/{plan_id}"
def get_deactivation_plan_url(plan_id):
return f"{get_base_url()}/{plan_id}/deactivation"
def get_activation_plan_url(plan_id):
return f"{get_base_url()}/{plan_id}/activation"
| 240 | 0 | 92 |
8517208114b4255485d8d12bb384924a3f34b6b4 | 1,080 | py | Python | ex045.py | natancordeiro/python | a4b82847640bd7d2f8ed9aff83a9afabe57bc37d | [
"MIT"
] | null | null | null | ex045.py | natancordeiro/python | a4b82847640bd7d2f8ed9aff83a9afabe57bc37d | [
"MIT"
] | null | null | null | ex045.py | natancordeiro/python | a4b82847640bd7d2f8ed9aff83a9afabe57bc37d | [
"MIT"
] | null | null | null | from random import randint
itens = ('Pedra', 'Papel', 'Tesoura')
pc = randint(0, 2)
print('''Suas opções:
[ 0 ] PEDRA
[ 1 ] PAPEL
[ 2 ] TESOURA ''')
op = int(input('Qual é a sua jogada: '))
print('-=' * 11)
print('Computador jogou {}'.format(itens[pc]))
print('jogador jogou {}'.format(itens[op]))
print('-=' * 11)
if pc == 0:
if op == 0:
print('\033[33mEMPATE!')
elif op == 1:
print('\033[32mJOGADOR VENCE!')
elif op == 2:
print('\033[31mCOMPUTADOR VENCE!')
else:
print('\033[31mJOGADA INVÁLIDA!')
elif pc == 1:
if op == 0:
print('\033[31mCOMPUTADOR VENCE!')
elif op == 1:
print('\033[33mEMPATE!')
elif op == 2:
print('\033[32mJOGADOR VENCE!')
else:
print('\033[31mJOGADA INVÁLIDA!')
elif pc == 2:
if op == 0:
print('\033[32mJOGADOR VENCE!')
elif op == 1:
print('\033[31mCOMPUTADOR VENCE!')
elif op == 2:
print('\033[33mEMPATE!')
else:
print('\033[31mJOGADA INVÁLDA!')
else:
print('\033[31mNÚMERO INVÁLIDO! Tente novamente....')
| 25.714286 | 57 | 0.551852 | from random import randint
itens = ('Pedra', 'Papel', 'Tesoura')
pc = randint(0, 2)
print('''Suas opções:
[ 0 ] PEDRA
[ 1 ] PAPEL
[ 2 ] TESOURA ''')
op = int(input('Qual é a sua jogada: '))
print('-=' * 11)
print('Computador jogou {}'.format(itens[pc]))
print('jogador jogou {}'.format(itens[op]))
print('-=' * 11)
if pc == 0:
if op == 0:
print('\033[33mEMPATE!')
elif op == 1:
print('\033[32mJOGADOR VENCE!')
elif op == 2:
print('\033[31mCOMPUTADOR VENCE!')
else:
print('\033[31mJOGADA INVÁLIDA!')
elif pc == 1:
if op == 0:
print('\033[31mCOMPUTADOR VENCE!')
elif op == 1:
print('\033[33mEMPATE!')
elif op == 2:
print('\033[32mJOGADOR VENCE!')
else:
print('\033[31mJOGADA INVÁLIDA!')
elif pc == 2:
if op == 0:
print('\033[32mJOGADOR VENCE!')
elif op == 1:
print('\033[31mCOMPUTADOR VENCE!')
elif op == 2:
print('\033[33mEMPATE!')
else:
print('\033[31mJOGADA INVÁLDA!')
else:
print('\033[31mNÚMERO INVÁLIDO! Tente novamente....')
| 0 | 0 | 0 |
1cb755c1b899160b38a043a452f7aa868561f139 | 7,609 | py | Python | biostar/pdf_layer/migrations/0003_auto__add_unique_publication_cluster_id.py | AshwinParanjape/mugap | 753c61c537bcc36ed6d6ccf593fb0d91015275e4 | [
"MIT"
] | null | null | null | biostar/pdf_layer/migrations/0003_auto__add_unique_publication_cluster_id.py | AshwinParanjape/mugap | 753c61c537bcc36ed6d6ccf593fb0d91015275e4 | [
"MIT"
] | 6 | 2020-02-11T21:45:21.000Z | 2021-06-01T21:41:38.000Z | biostar/pdf_layer/migrations/0003_auto__add_unique_publication_cluster_id.py | AshwinParanjape/mugap | 753c61c537bcc36ed6d6ccf593fb0d91015275e4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 71.783019 | 168 | 0.554212 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Publication', fields ['cluster_id']
db.create_unique(u'pdf_layer_publication', ['cluster_id'])
def backwards(self, orm):
# Removing unique constraint on 'Publication', fields ['cluster_id']
db.delete_unique(u'pdf_layer_publication', ['cluster_id'])
models = {
u'pdf_layer.annotation': {
'Meta': {'object_name': 'Annotation'},
'annotated_text': ('django.db.models.fields.TextField', [], {}),
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pdf_layer.Publication']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['posts.Post']"}),
'seralized_version': ('django.db.models.fields.TextField', [], {})
},
u'pdf_layer.publication': {
'Meta': {'object_name': 'Publication'},
'cluster_id': ('django.db.models.fields.TextField', [], {'unique': 'True', 'max_length': '50'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_citations': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'num_versions': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pdf_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'url': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'url_citations': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'url_pdf': ('django.db.models.fields.TextField', [], {}),
'url_versions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'posts.post': {
'Meta': {'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.User']"}),
'book_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'changed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'comment_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'has_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'html': ('django.db.models.fields.TextField', [], {'default': "u''"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastedit_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'lastedit_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'editor'", 'to': u"orm['users.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['posts.Post']"}),
'rank': ('django.db.models.fields.FloatField', [], {'default': '0', 'blank': 'True'}),
'reply_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'descendants'", 'null': 'True', 'to': u"orm['posts.Post']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'subs_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tag_set': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['posts.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'tag_val': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'blank': 'True'}),
'thread_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'view_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'vote_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'})
},
u'posts.tag': {
'Meta': {'object_name': 'Tag'},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'max_length': '50', 'db_index': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'users.user': {
'Meta': {'object_name': 'User'},
'activity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'badges': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'flair': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'new_messages': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['pdf_layer'] | 293 | 7,128 | 23 |
3312115a01e53b52755173edd27063a999926ad7 | 660 | py | Python | examples/dagster_examples_tests/pyspark_pagerank_tests/test_original.py | shahvineet98/dagster | 2471d39c52f660e23e8c0d8e8ded873ddc3df036 | [
"Apache-2.0"
] | 3 | 2020-09-09T04:10:23.000Z | 2021-11-08T02:10:42.000Z | examples/dagster_examples_tests/pyspark_pagerank_tests/test_original.py | shahvineet98/dagster | 2471d39c52f660e23e8c0d8e8ded873ddc3df036 | [
"Apache-2.0"
] | 2 | 2021-05-11T13:36:27.000Z | 2021-09-03T01:53:11.000Z | examples/dagster_examples_tests/pyspark_pagerank_tests/test_original.py | shahvineet98/dagster | 2471d39c52f660e23e8c0d8e8ded873ddc3df036 | [
"Apache-2.0"
] | 1 | 2021-02-21T12:16:47.000Z | 2021-02-21T12:16:47.000Z | import os
from dagster_examples.pyspark_pagerank.original import (
computeContribs,
execute_pagerank,
parseNeighbors,
)
from .util import checks_for_helper_functions
| 25.384615 | 72 | 0.721212 | import os
from dagster_examples.pyspark_pagerank.original import (
computeContribs,
execute_pagerank,
parseNeighbors,
)
from .util import checks_for_helper_functions
def test_helpers():
checks_for_helper_functions(computeContribs, parseNeighbors)
def test_execute_pagerank():
cwd = os.path.dirname(__file__)
result = execute_pagerank(os.path.join(cwd, 'pagerank_data.txt'), 2)
assert set(result) == {
('anotherlessimportantsite.com', 0.9149999999999999),
('whatdoesitallmeananyways.com', 0.9149999999999999),
('importantsite.com', 1.255),
('alessimportantsite.com', 0.9149999999999999),
}
| 432 | 0 | 46 |
753157c552d6a43cec2d453e8e22e61d47e8c0b1 | 3,587 | py | Python | pygtfs/schedule.py | olalid/pygtfs | 2f097effe9bb31ca9699d95eabf39d1496ea5c10 | [
"MIT"
] | 49 | 2015-01-12T17:49:25.000Z | 2022-03-27T10:48:01.000Z | pygtfs/schedule.py | olalid/pygtfs | 2f097effe9bb31ca9699d95eabf39d1496ea5c10 | [
"MIT"
] | 43 | 2015-07-15T06:32:54.000Z | 2022-02-19T17:42:04.000Z | pygtfs/schedule.py | olalid/pygtfs | 2f097effe9bb31ca9699d95eabf39d1496ea5c10 | [
"MIT"
] | 46 | 2015-02-13T18:33:08.000Z | 2022-02-19T17:16:46.000Z | from __future__ import (division, absolute_import, print_function,
unicode_literals)
import sqlalchemy
import sqlalchemy.orm
from .gtfs_entities import gtfs_all, Feed, Base
class Schedule:
"""Represents the full database.
The schedule is the most important object in pygtfs. It represents the
entire dataset. Most of the properties come straight from the gtfs
reference. Two of them were renamed: calendar is called `services`, and
calendar_dates `service_exceptions`. One addition is the `feeds` table,
which is here to support more than one feed in a database.
Each of the properties is a list created upon access by sqlalchemy. Then,
each element of the list as attributes following the gtfs reference. In
addition, if they are related to another table, this can also be accessed
by attribute.
:param db_conection: Either a sqlalchemy database url or a filename to be used with sqlite.
"""
def drop_feed(self, feed_id):
""" Delete a feed from a database by feed id"""
# the following does not cascade unfortunatly.
# self.session.query(Feed).filter(Feed.feed_id == feed_id).delete()
feed = self.session.query(Feed).get(feed_id)
self.session.delete(feed)
self.session.commit()
for entity in (gtfs_all + [Feed]):
entity_doc = "A list of :py:class:`pygtfs.gtfs_entities.{0}` objects".format(entity.__name__)
entity_raw_doc = ("A :py:class:`sqlalchemy.orm.Query` object to fetch "
":py:class:`pygtfs.gtfs_entities.{0}` objects"
.format(entity.__name__))
entity_by_id_doc = "A list of :py:class:`pygtfs.gtfs_entities.{0}` objects with matching id".format(entity.__name__)
setattr(Schedule, entity._plural_name_, _meta_query_all(entity, entity_doc))
setattr(Schedule, entity._plural_name_ + "_query",
_meta_query_raw(entity, entity_raw_doc))
if hasattr(entity, 'id'):
setattr(Schedule, entity._plural_name_ + "_by_id", _meta_query_by_id(entity, entity_by_id_doc))
| 38.98913 | 120 | 0.686367 | from __future__ import (division, absolute_import, print_function,
unicode_literals)
import sqlalchemy
import sqlalchemy.orm
from .gtfs_entities import gtfs_all, Feed, Base
class Schedule:
"""Represents the full database.
The schedule is the most important object in pygtfs. It represents the
entire dataset. Most of the properties come straight from the gtfs
reference. Two of them were renamed: calendar is called `services`, and
calendar_dates `service_exceptions`. One addition is the `feeds` table,
which is here to support more than one feed in a database.
Each of the properties is a list created upon access by sqlalchemy. Then,
each element of the list as attributes following the gtfs reference. In
addition, if they are related to another table, this can also be accessed
by attribute.
:param db_conection: Either a sqlalchemy database url or a filename to be used with sqlite.
"""
def __init__(self, db_connection):
self.db_connection = db_connection
self.db_filename = None
if '://' not in db_connection:
self.db_connection = 'sqlite:///%s' % self.db_connection
if self.db_connection.startswith('sqlite'):
self.db_filename = self.db_connection
self.engine = sqlalchemy.create_engine(self.db_connection)
Session = sqlalchemy.orm.sessionmaker(bind=self.engine)
self.session = Session()
Base.metadata.create_all(self.engine)
def drop_feed(self, feed_id):
""" Delete a feed from a database by feed id"""
# the following does not cascade unfortunatly.
# self.session.query(Feed).filter(Feed.feed_id == feed_id).delete()
feed = self.session.query(Feed).get(feed_id)
self.session.delete(feed)
self.session.commit()
def _meta_query_all(entity, docstring=None):
def _query_all(instance_self):
""" A list generated on access """
return instance_self.session.query(entity).all()
if docstring is not None:
_query_all.__doc__ = docstring
return property(_query_all)
def _meta_query_by_id(entity, docstring=None):
def _query_by_id(self, id):
""" A function that returns a list of entries with matching ids """
return self.session.query(entity).filter(entity.id == id).all()
if docstring is not None:
_query_by_id.__doc__ = docstring
return _query_by_id
def _meta_query_raw(entity, docstring=None):
def _query_raw(instance_self):
"""
A raw sqlalchemy query object that the user can then manipulate
manually
"""
return instance_self.session.query(entity)
if docstring is not None:
_query_raw.__doc__ = docstring
return property(_query_raw)
for entity in (gtfs_all + [Feed]):
entity_doc = "A list of :py:class:`pygtfs.gtfs_entities.{0}` objects".format(entity.__name__)
entity_raw_doc = ("A :py:class:`sqlalchemy.orm.Query` object to fetch "
":py:class:`pygtfs.gtfs_entities.{0}` objects"
.format(entity.__name__))
entity_by_id_doc = "A list of :py:class:`pygtfs.gtfs_entities.{0}` objects with matching id".format(entity.__name__)
setattr(Schedule, entity._plural_name_, _meta_query_all(entity, entity_doc))
setattr(Schedule, entity._plural_name_ + "_query",
_meta_query_raw(entity, entity_raw_doc))
if hasattr(entity, 'id'):
setattr(Schedule, entity._plural_name_ + "_by_id", _meta_query_by_id(entity, entity_by_id_doc))
| 1,400 | 0 | 96 |
c2a6137a992f38d7a2604a9015b1f9156504fe6a | 644 | py | Python | hexa/plugins/connector_airflow/migrations/0012_remove_airflow_run_message.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 4 | 2021-07-19T12:53:21.000Z | 2022-01-26T17:45:02.000Z | hexa/plugins/connector_airflow/migrations/0012_remove_airflow_run_message.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 20 | 2021-05-17T12:27:06.000Z | 2022-03-30T11:35:26.000Z | hexa/plugins/connector_airflow/migrations/0012_remove_airflow_run_message.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 2 | 2021-09-07T04:19:59.000Z | 2022-02-08T15:33:29.000Z | # Generated by Django 3.2.7 on 2021-10-11 12:43
from django.db import migrations
| 24.769231 | 73 | 0.569876 | # Generated by Django 3.2.7 on 2021-10-11 12:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("connector_airflow", "0011_alter_dagrun_options"),
]
operations = [
migrations.AlterModelOptions(
name="dagconfig",
options={"ordering": ["name"], "verbose_name": "DAG config"},
),
migrations.AlterModelOptions(
name="dagrun",
options={"ordering": ("-execution_date", "-execution_date")},
),
migrations.RemoveField(
model_name="dagrun",
name="message",
),
]
| 0 | 538 | 23 |
bae54090a3727a76745b4b316ac38f161ed1caa4 | 3,683 | py | Python | matrix/plugin.program.openwizard/resources/libs/test.py | nzmodbox/repo.modbox | 5a5d77089f94f2fdde755ccc2e5f93e81f54f261 | [
"Apache-2.0"
] | null | null | null | matrix/plugin.program.openwizard/resources/libs/test.py | nzmodbox/repo.modbox | 5a5d77089f94f2fdde755ccc2e5f93e81f54f261 | [
"Apache-2.0"
] | null | null | null | matrix/plugin.program.openwizard/resources/libs/test.py | nzmodbox/repo.modbox | 5a5d77089f94f2fdde755ccc2e5f93e81f54f261 | [
"Apache-2.0"
] | null | null | null | ################################################################################
# Copyright (C) 2019 drinfernoo #
# #
# This Program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2, or (at your option) #
# any later version. #
# #
# This Program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with XBMC; see the file COPYING. If not, write to #
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #
# http://www.gnu.org/copyleft/gpl.html #
################################################################################
import xbmc
try: # Python 3
import zipfile
except ImportError: # Python 2
from resources.libs import zipfile
from resources.libs.common.config import CONFIG
| 37.20202 | 184 | 0.553353 | ################################################################################
# Copyright (C) 2019 drinfernoo #
# #
# This Program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2, or (at your option) #
# any later version. #
# #
# This Program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with XBMC; see the file COPYING. If not, write to #
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #
# http://www.gnu.org/copyleft/gpl.html #
################################################################################
import xbmc
try: # Python 3
import zipfile
except ImportError: # Python 2
from resources.libs import zipfile
from resources.libs.common.config import CONFIG
def str_test(teststr):
a = (teststr.lower()).split(' ')
if 'test' in a:
return True
else:
return False
def test_theme(path):
from resources.libs.common import logging
zfile = zipfile.ZipFile(path, allowZip64=True)
for item in zfile.infolist():
logging.log(str(item.filename))
if '/settings.xml' in item.filename:
return True
return False
def test_gui(path):
zfile = zipfile.ZipFile(path, allowZip64=True)
for item in zfile.infolist():
if '/guisettings.xml' in item.filename:
return True
return False
def test_notify():
from resources.libs.common import logging
from resources.libs.common import tools
from resources.libs.gui import window
response = tools.open_url(CONFIG.NOTIFICATION, check=True)
if response:
try:
id, msg = window.split_notify(CONFIG.NOTIFICATION)
if not id:
logging.log_notify(CONFIG.ADDONTITLE,
"[COLOR {0}]Notification: Not Formatted Correctly[/COLOR]".format(CONFIG.COLOR2))
return
window.show_notification(msg, test=True)
except Exception as e:
logging.log("Error on Notifications Window: {0}".format(str(e)), level=xbmc.LOGERROR)
else:
logging.log_notify(CONFIG.ADDONTITLE,
"[COLOR {0}]Invalid URL for Notification[/COLOR]".format(CONFIG.COLOR2))
def test_update():
from resources.libs import check
from resources.libs.gui import window
if CONFIG.BUILDNAME == "":
window.show_update_window()
else:
window.show_update_window(CONFIG.BUILDNAME, CONFIG.BUILDVERSION, CONFIG.BUILDLATEST, check.check_build(CONFIG.BUILDNAME, 'icon'), check.check_build(CONFIG.BUILDNAME, 'fanart'))
def test_first_run():
from resources.libs.gui import window
window.show_build_prompt()
def test_save_data_settings():
from resources.libs.gui import window
window.show_save_data_settings()
| 1,887 | 0 | 161 |
2f756ce131e3d8539e389b9a570b87b3a3b29ccb | 2,905 | py | Python | 2018/day07/part2.py | zagura/aoc-2017 | bfd38fb6fbe4211017a306d218b32ecff741e006 | [
"MIT"
] | 2 | 2018-12-09T16:00:09.000Z | 2018-12-09T17:56:15.000Z | 2018/day07/part2.py | zagura/aoc-2017 | bfd38fb6fbe4211017a306d218b32ecff741e006 | [
"MIT"
] | null | null | null | 2018/day07/part2.py | zagura/aoc-2017 | bfd38fb6fbe4211017a306d218b32ecff741e006 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# Example line: Step A must be finished before step L can begin.
edges = [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for x in
map(lambda x: x.split(), open('input.in').readlines())]
workers = 5
for e in edges:
print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1])))
graph = {}
# for l in range(ord('Z') - ord('A') + 1):
# graph[l] = Node(l)
for source, target in edges:
if source not in graph:
graph[source] = Node(source)
if target not in graph:
graph[target] = Node(target)
graph[source].insert_target(target, graph[target])
graph[target].insert_source(source, graph[source])
output = []
nodes_to_insert = []
graph_len = len(graph)
time_point = 0
workers = [ -1 for i in range(6)]
while(len(output) < graph_len):
# print(len(output))
# print(len(graph))
for w in range(len(workers)):
nodes_to_insert = []
for node in graph:
# print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs)))
# print('{}: {}'.format(node, graph[node]))
if len(graph[node].inputs) == 0:
nodes_to_insert.append(node)
#print(nodes_to_insert)
if len(nodes_to_insert) == 0:
print('Total time: {} .'.format(time_point))
break
nodes_to_insert.sort()
limit = min(len(workers), len(nodes_to_insert))
processed_nodes = nodes_to_insert[:limit]
for n in processed_nodes:
if n in graph:
if w != 0 and workers[w] == -1 and graph[n].busy == -1:
print('Assigning {} to worker {} at time point: {}'.format(chr(n+ord('A')), w, time_point))
graph[n].begin_time = time_point
graph[n].end_time = time_point + n + 1 + 60
workers[w] = n
graph[n].busy = w
if time_point == graph[n].end_time and graph[n].busy >= 0 and w == 0:
for k in graph[n].outputs:
out = graph[n].outputs[k]
del out.inputs[n]
print("Removing {} TP {}.".format(n, time_point))
output.append(n)
workers[graph[n].busy] = -1
graph[n].busy = -1
del graph[n]
time_point += 1
print('Total time: {} .'.format(time_point))
| 35.426829 | 111 | 0.532186 | #!/usr/bin/python3
# Example line: Step A must be finished before step L can begin.
edges = [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for x in
map(lambda x: x.split(), open('input.in').readlines())]
workers = 5
for e in edges:
print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1])))
class Node(object):
def __init__(self, no):
self.id = no
self.inputs = {}
self.outputs = {}
self.begin_time = 0
self.end_time = -1
self.busy = -1
def insert_source(self, source_id, source):
self.inputs[source_id] = source
def insert_target(self, target_id, target):
self.outputs[target_id] = target
def __repr__(self):
return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]})
graph = {}
# for l in range(ord('Z') - ord('A') + 1):
# graph[l] = Node(l)
for source, target in edges:
if source not in graph:
graph[source] = Node(source)
if target not in graph:
graph[target] = Node(target)
graph[source].insert_target(target, graph[target])
graph[target].insert_source(source, graph[source])
output = []
nodes_to_insert = []
graph_len = len(graph)
time_point = 0
workers = [ -1 for i in range(6)]
while(len(output) < graph_len):
# print(len(output))
# print(len(graph))
for w in range(len(workers)):
nodes_to_insert = []
for node in graph:
# print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs)))
# print('{}: {}'.format(node, graph[node]))
if len(graph[node].inputs) == 0:
nodes_to_insert.append(node)
#print(nodes_to_insert)
if len(nodes_to_insert) == 0:
print('Total time: {} .'.format(time_point))
break
nodes_to_insert.sort()
limit = min(len(workers), len(nodes_to_insert))
processed_nodes = nodes_to_insert[:limit]
for n in processed_nodes:
if n in graph:
if w != 0 and workers[w] == -1 and graph[n].busy == -1:
print('Assigning {} to worker {} at time point: {}'.format(chr(n+ord('A')), w, time_point))
graph[n].begin_time = time_point
graph[n].end_time = time_point + n + 1 + 60
workers[w] = n
graph[n].busy = w
if time_point == graph[n].end_time and graph[n].busy >= 0 and w == 0:
for k in graph[n].outputs:
out = graph[n].outputs[k]
del out.inputs[n]
print("Removing {} TP {}.".format(n, time_point))
output.append(n)
workers[graph[n].busy] = -1
graph[n].busy = -1
del graph[n]
time_point += 1
print('Total time: {} .'.format(time_point))
| 369 | -2 | 129 |
00d19d569ee8948072fd77d26286654501cb8321 | 569 | py | Python | backend_rest/tracking/migrations/0006_auto_20200620_1535.py | ezrankayamba/twiga_distribution | ac4fd3d4f6b111e734a932398be564c863582be2 | [
"MIT"
] | null | null | null | backend_rest/tracking/migrations/0006_auto_20200620_1535.py | ezrankayamba/twiga_distribution | ac4fd3d4f6b111e734a932398be564c863582be2 | [
"MIT"
] | 16 | 2020-03-23T13:24:11.000Z | 2022-03-12T00:17:58.000Z | backend_rest/tracking/migrations/0006_auto_20200620_1535.py | ezrankayamba/twiga_distribution | ac4fd3d4f6b111e734a932398be564c863582be2 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-06-20 12:35
from django.db import migrations, models
| 23.708333 | 62 | 0.585237 | # Generated by Django 3.0.2 on 2020-06-20 12:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracking', '0005_auto_20200620_1521'),
]
operations = [
migrations.AddField(
model_name='contact',
name='email_alt',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='contact',
name='mobile_alt',
field=models.CharField(max_length=100, null=True),
),
]
| 0 | 455 | 23 |
708616fb52ff53ea202e769ba39d75481910a2d1 | 3,812 | py | Python | utilities/prune_infreq.py | ShadenSmith/parse-tools | dc6e52821184729bf694e26e45a4b3eace5d4bc8 | [
"MIT"
] | null | null | null | utilities/prune_infreq.py | ShadenSmith/parse-tools | dc6e52821184729bf694e26e45a4b3eace5d4bc8 | [
"MIT"
] | 2 | 2017-01-13T03:27:25.000Z | 2017-03-23T15:50:03.000Z | utilities/prune_infreq.py | ShadenSmith/parse-tools | dc6e52821184729bf694e26e45a4b3eace5d4bc8 | [
"MIT"
] | 1 | 2017-07-06T17:40:17.000Z | 2017-07-06T17:40:17.000Z | #!/usr/bin/env python3
import sys
import argparse
from collections import Counter
my_description = '''
Prune empty (or infrequent) slices from a tensor. With default options, this
script scans a tensor file and removes empty slices. Those slices are then
specified with "mode-X-gaps.map" files which map the old dimensionality into
the new one.
Infrequent items can also be pruned by specifying "--mode=MODE,FREQ"
options. For example, "--mode=3,5" will remove any slices in the third mode
with less than five non-zeros.
NOTE: since this process removes non-zeros, it can cause slices in other modes
to become infrequent or empty. If any non-zeros are pruned, this script should
be re-run until no additional empty/infrequent slices are present. Map files
should be merged with `merge_gap_keys.py` after each run, as they will be
overwritten if a mode is pruned additional times.
'''
parser = argparse.ArgumentParser(description=my_description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('tensor', type=str, help='tensor to prune')
parser.add_argument('output', type=str, help='output tensor')
parser.add_argument('--mode', metavar='MODE,MIN-FREQ', action='append',
help='min. frequency for a mode (default: 1)')
args = parser.parse_args()
# First get the number of modes
nmodes = 0
with open(args.tensor, 'r') as fin:
line = fin.readline()
nmodes = len(line.split()[:-1]) # skip the val at the end
# Get user-specified minimum frequencies
mode_mins = [1] * nmodes
if args.mode:
for mode_tup in args.mode:
mode_tup = mode_tup.split(',')
m = int(mode_tup[0]) - 1
freq = int(mode_tup[1])
mode_mins[m] = freq
print('minimum frequencies: {}'.format(mode_mins))
def read_tensor(fname):
'''
Read each line of the tensor and return a list of indices and the value.
This function skips comments and blank lines.
'''
with open(fname, 'r') as fin:
for line in fin:
# skip comments and blank lines
if line[0] == '#' or line is '':
continue
# convert to integers and return list
line = line.strip().split()
yield [int(x) for x in line[:-1]], line[-1]
# Count appearances in each mode.
ind_counts = [Counter() for x in range(nmodes)]
for inds, val in read_tensor(args.tensor):
for m in range(nmodes):
ind_counts[m][inds[m]] += 1
# indmaps[m][i] gives the NEW index for original slice i
ind_maps = [dict() for m in range(nmodes)]
# Go over counts and prune infrequent slices
gapped_modes = []
for m in range(nmodes):
for index in ind_counts[m].keys():
if ind_counts[m][index] < mode_mins[m]:
ind_counts[m][index] = 0
# prune
keep = [x for x in sorted(ind_counts[m]) if ind_counts[m][x] >= mode_mins[m]]
gaplen = max(ind_counts[m]) - len(keep)
# Have we pruned any slices?
if gaplen > 0:
gapped_modes.append(m)
print('mode-{}: {} empty slices'.format(m+1, gaplen))
# assign new IDs and write map file
with open('mode-{}-gaps.map'.format(m+1), 'w') as mapfile:
for i in keep:
ind_maps[m][i] = len(ind_maps[m]) + 1
# invert map and write to file
print('{}'.format(i), file=mapfile)
if len(gapped_modes) == 0:
print('no empty slices')
sys.exit(0)
# Go back over the tensor and map indices
nnz = 0
pruned_nnz = 0
with open(args.output, 'w') as fout:
for inds, val in read_tensor(args.tensor):
pruned = False
# map indices and check for pruned nnz
for m in gapped_modes:
if inds[m] in ind_maps[m]:
inds[m] = ind_maps[m][inds[m]]
else:
pruned = True
pruned_nnz += 1
# write non-zero
if not pruned:
print('{} {}'.format(' '.join(map(str, inds)), val), file=fout)
nnz += 1
print('pruned nnz: {:,d} new nnz: {:,d}'.format(pruned_nnz, nnz))
| 28.661654 | 79 | 0.672875 | #!/usr/bin/env python3
import sys
import argparse
from collections import Counter
my_description = '''
Prune empty (or infrequent) slices from a tensor. With default options, this
script scans a tensor file and removes empty slices. Those slices are then
specified with "mode-X-gaps.map" files which map the old dimensionality into
the new one.
Infrequent items can also be pruned by specifying "--mode=MODE,FREQ"
options. For example, "--mode=3,5" will remove any slices in the third mode
with less than five non-zeros.
NOTE: since this process removes non-zeros, it can cause slices in other modes
to become infrequent or empty. If any non-zeros are pruned, this script should
be re-run until no additional empty/infrequent slices are present. Map files
should be merged with `merge_gap_keys.py` after each run, as they will be
overwritten if a mode is pruned additional times.
'''
parser = argparse.ArgumentParser(description=my_description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('tensor', type=str, help='tensor to prune')
parser.add_argument('output', type=str, help='output tensor')
parser.add_argument('--mode', metavar='MODE,MIN-FREQ', action='append',
help='min. frequency for a mode (default: 1)')
args = parser.parse_args()
# First get the number of modes
nmodes = 0
with open(args.tensor, 'r') as fin:
line = fin.readline()
nmodes = len(line.split()[:-1]) # skip the val at the end
# Get user-specified minimum frequencies
mode_mins = [1] * nmodes
if args.mode:
for mode_tup in args.mode:
mode_tup = mode_tup.split(',')
m = int(mode_tup[0]) - 1
freq = int(mode_tup[1])
mode_mins[m] = freq
print('minimum frequencies: {}'.format(mode_mins))
def read_tensor(fname):
'''
Read each line of the tensor and return a list of indices and the value.
This function skips comments and blank lines.
'''
with open(fname, 'r') as fin:
for line in fin:
# skip comments and blank lines
if line[0] == '#' or line is '':
continue
# convert to integers and return list
line = line.strip().split()
yield [int(x) for x in line[:-1]], line[-1]
# Count appearances in each mode.
ind_counts = [Counter() for x in range(nmodes)]
for inds, val in read_tensor(args.tensor):
for m in range(nmodes):
ind_counts[m][inds[m]] += 1
# indmaps[m][i] gives the NEW index for original slice i
ind_maps = [dict() for m in range(nmodes)]
# Go over counts and prune infrequent slices
gapped_modes = []
for m in range(nmodes):
for index in ind_counts[m].keys():
if ind_counts[m][index] < mode_mins[m]:
ind_counts[m][index] = 0
# prune
keep = [x for x in sorted(ind_counts[m]) if ind_counts[m][x] >= mode_mins[m]]
gaplen = max(ind_counts[m]) - len(keep)
# Have we pruned any slices?
if gaplen > 0:
gapped_modes.append(m)
print('mode-{}: {} empty slices'.format(m+1, gaplen))
# assign new IDs and write map file
with open('mode-{}-gaps.map'.format(m+1), 'w') as mapfile:
for i in keep:
ind_maps[m][i] = len(ind_maps[m]) + 1
# invert map and write to file
print('{}'.format(i), file=mapfile)
if len(gapped_modes) == 0:
print('no empty slices')
sys.exit(0)
# Go back over the tensor and map indices
nnz = 0
pruned_nnz = 0
with open(args.output, 'w') as fout:
for inds, val in read_tensor(args.tensor):
pruned = False
# map indices and check for pruned nnz
for m in gapped_modes:
if inds[m] in ind_maps[m]:
inds[m] = ind_maps[m][inds[m]]
else:
pruned = True
pruned_nnz += 1
# write non-zero
if not pruned:
print('{} {}'.format(' '.join(map(str, inds)), val), file=fout)
nnz += 1
print('pruned nnz: {:,d} new nnz: {:,d}'.format(pruned_nnz, nnz))
| 0 | 0 | 0 |
755f967e61125ac5cfadb640c0e1d4eef7cfafe6 | 574 | py | Python | books/PythonCleanCode/ch4_solid/lsp_1.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | books/PythonCleanCode/ch4_solid/lsp_1.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | books/PythonCleanCode/ch4_solid/lsp_1.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | """Clean Code in Python - Chater 4, The SOLID Principles
> Liskov's Substitution Principle (LSP)
Detecting violatinos os LSP through tools (mypy, pylint, etc.)
"""
| 22.076923 | 88 | 0.655052 | """Clean Code in Python - Chater 4, The SOLID Principles
> Liskov's Substitution Principle (LSP)
Detecting violatinos os LSP through tools (mypy, pylint, etc.)
"""
class Event:
...
def meets_condition(self, event_data: dict) -> bool:
return False
class LoginEvent(Event):
def meets_condition(self, event_data: list) -> bool: # type: ignore
return bool(event_data)
class LogoutEvent(Event):
def meets_condition(self, event_data: dict, override: bool) -> bool: # type: ignore
if override:
return True
...
| 251 | 33 | 121 |