hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
04d4c6a6adaa5b0b5caf062aa6670d9b1d632568
| 15,525
|
py
|
Python
|
tests/field_deconstruction/tests.py
|
brylie/django
|
f12b68af71efa0b061710c83364f231734d07e55
|
[
"BSD-3-Clause"
] | 1
|
2019-03-26T02:49:39.000Z
|
2019-03-26T02:49:39.000Z
|
tests/field_deconstruction/tests.py
|
radiosilence/django
|
901353d87538d51b7ae97623f28569df179e05b6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/field_deconstruction/tests.py
|
radiosilence/django
|
901353d87538d51b7ae97623f28569df179e05b6
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
from django.db import models
from django.test import TestCase, override_settings
from django.utils import six
class FieldDeconstructionTests(TestCase):
"""
Tests the deconstruct() method on all core fields.
"""
def test_name(self):
"""
Tests the outputting of the correct name if assigned one.
"""
# First try using a "normal" field
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("is_awesome_test")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "is_awesome_test")
self.assertIsInstance(name, six.text_type)
# Now try with a ForeignKey
field = models.ForeignKey("some_fake.ModelName")
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("author")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "author")
def test_auto_field(self):
field = models.AutoField(primary_key=True)
field.set_attributes_from_name("id")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.AutoField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"primary_key": True})
def test_big_integer_field(self):
field = models.BigIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BigIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_boolean_field(self):
field = models.BooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.BooleanField(default=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"default": True})
def test_char_field(self):
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65})
field = models.CharField(max_length=65, null=True, blank=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True})
def test_csi_field(self):
field = models.CommaSeparatedIntegerField(max_length=100)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 100})
def test_date_field(self):
field = models.DateField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now": True})
def test_datetime_field(self):
field = models.DateTimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateTimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True})
# Bug #21785
field = models.DateTimeField(auto_now=True, auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True, "auto_now": True})
def test_decimal_field(self):
field = models.DecimalField(max_digits=5, decimal_places=2)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2})
def test_decimal_field_0_decimal_places(self):
"""
A DecimalField with decimal_places=0 should work (#22272).
"""
field = models.DecimalField(max_digits=5, decimal_places=0)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 0})
def test_email_field(self):
field = models.EmailField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 75})
field = models.EmailField(max_length=255)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 255})
def test_file_field(self):
field = models.FileField(upload_to="foo/bar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar"})
# Test max_length
field = models.FileField(upload_to="foo/bar", max_length=200)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar", "max_length": 200})
def test_file_path_field(self):
field = models.FilePathField(match=".*\.txt$")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"match": ".*\.txt$"})
field = models.FilePathField(recursive=True, allow_folders=True, max_length=123)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"recursive": True, "allow_folders": True, "max_length": 123})
def test_float_field(self):
field = models.FloatField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FloatField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_foreign_key(self):
# Test basic pointing
from django.contrib.auth.models import Permission
field = models.ForeignKey("auth.Permission")
field.rel.to = Permission
field.rel.field_name = "id"
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swap detection for swappable model
field = models.ForeignKey("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test nonexistent (for now) model
field = models.ForeignKey("something.Else")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "something.Else"})
# Test on_delete
field = models.ForeignKey("auth.User", on_delete=models.SET_NULL)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL})
# Test to_field preservation
field = models.ForeignKey("auth.Permission", to_field="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "to_field": "foobar"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_foreign_key_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ForeignKey("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_image_field(self):
field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ImageField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"})
def test_integer_field(self):
field = models.IntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_ip_address_field(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
field = models.IPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_generic_ip_address_field(self):
field = models.GenericIPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.GenericIPAddressField(protocol="IPv6")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"protocol": "IPv6"})
def test_many_to_many_field(self):
# Test normal
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swappable
field = models.ManyToManyField("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test through
field = models.ManyToManyField("auth.Permission", through="auth.Group")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "through": "auth.Group"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_many_to_many_field_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_null_boolean_field(self):
field = models.NullBooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.NullBooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_integer_field(self):
field = models.PositiveIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_small_integer_field(self):
field = models.PositiveSmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveSmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_slug_field(self):
field = models.SlugField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.SlugField(db_index=False, max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"db_index": False, "max_length": 231})
def test_small_integer_field(self):
field = models.SmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_text_field(self):
field = models.TextField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TextField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_url_field(self):
field = models.URLField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.URLField(max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 231})
| 45.130814
| 112
| 0.648631
|
3740b3c0c6f6a1546405b0f3aa6ea7dbef463117
| 9,460
|
py
|
Python
|
saleor/graphql/order/schema.py
|
Vultik/saleor
|
dc8548f7ad49cc26950dbfa0fd81f02617350240
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/order/schema.py
|
Vultik/saleor
|
dc8548f7ad49cc26950dbfa0fd81f02617350240
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/order/schema.py
|
Vultik/saleor
|
dc8548f7ad49cc26950dbfa0fd81f02617350240
|
[
"CC-BY-4.0"
] | null | null | null |
from typing import List, Optional
import graphene
from graphql import GraphQLError
from ...core.permissions import OrderPermissions
from ..core.connection import create_connection_slice, filter_connection_queryset
from ..core.descriptions import DEPRECATED_IN_3X_FIELD
from ..core.enums import ReportingPeriod
from ..core.fields import ConnectionField, FilterConnectionField, PermissionsField
from ..core.scalars import UUID
from ..core.types import FilterInputObjectType, TaxedMoney
from ..core.utils import from_global_id_or_error
from .bulk_mutations.draft_orders import DraftOrderBulkDelete, DraftOrderLinesBulkDelete
from .bulk_mutations.orders import OrderBulkCancel
from .filters import DraftOrderFilter, OrderFilter
from .mutations.draft_order_complete import DraftOrderComplete
from .mutations.draft_order_create import DraftOrderCreate
from .mutations.draft_order_delete import DraftOrderDelete
from .mutations.draft_order_update import DraftOrderUpdate
from .mutations.fulfillment_approve import FulfillmentApprove
from .mutations.fulfillment_cancel import FulfillmentCancel
from .mutations.fulfillment_refund_products import FulfillmentRefundProducts
from .mutations.fulfillment_return_products import FulfillmentReturnProducts
from .mutations.fulfillment_update_tracking import FulfillmentUpdateTracking
from .mutations.order_add_note import OrderAddNote
from .mutations.order_cancel import OrderCancel
from .mutations.order_capture import OrderCapture
from .mutations.order_confirm import OrderConfirm
from .mutations.order_discount_add import OrderDiscountAdd
from .mutations.order_discount_delete import OrderDiscountDelete
from .mutations.order_discount_update import OrderDiscountUpdate
from .mutations.order_fulfill import OrderFulfill
from .mutations.order_line_delete import OrderLineDelete
from .mutations.order_line_discount_remove import OrderLineDiscountRemove
from .mutations.order_line_discount_update import OrderLineDiscountUpdate
from .mutations.order_line_update import OrderLineUpdate
from .mutations.order_lines_create import OrderLinesCreate
from .mutations.order_mark_as_paid import OrderMarkAsPaid
from .mutations.order_refund import OrderRefund
from .mutations.order_update import OrderUpdate
from .mutations.order_update_shipping import OrderUpdateShipping
from .mutations.order_void import OrderVoid
from .resolvers import (
resolve_draft_orders,
resolve_homepage_events,
resolve_order,
resolve_order_by_token,
resolve_orders,
resolve_orders_total,
)
from .sorters import OrderSortField, OrderSortingInput
from .types import Order, OrderCountableConnection, OrderEventCountableConnection
def search_string_in_kwargs(kwargs: dict) -> bool:
return bool(kwargs.get("filter", {}).get("search", "").strip())
def sort_field_from_kwargs(kwargs: dict) -> Optional[List[str]]:
return kwargs.get("sort_by", {}).get("field") or None
class OrderFilterInput(FilterInputObjectType):
class Meta:
filterset_class = OrderFilter
class OrderDraftFilterInput(FilterInputObjectType):
class Meta:
filterset_class = DraftOrderFilter
class OrderQueries(graphene.ObjectType):
homepage_events = ConnectionField(
OrderEventCountableConnection,
description=(
"List of activity events to display on "
"homepage (at the moment it only contains order-events)."
),
permissions=[
OrderPermissions.MANAGE_ORDERS,
],
)
order = graphene.Field(
Order,
description="Look up an order by ID.",
id=graphene.Argument(graphene.ID, description="ID of an order.", required=True),
)
orders = FilterConnectionField(
OrderCountableConnection,
sort_by=OrderSortingInput(description="Sort orders."),
filter=OrderFilterInput(description="Filtering options for orders."),
channel=graphene.String(
description="Slug of a channel for which the data should be returned."
),
description="List of orders.",
permissions=[
OrderPermissions.MANAGE_ORDERS,
],
)
draft_orders = FilterConnectionField(
OrderCountableConnection,
sort_by=OrderSortingInput(description="Sort draft orders."),
filter=OrderDraftFilterInput(description="Filtering options for draft orders."),
description="List of draft orders.",
permissions=[
OrderPermissions.MANAGE_ORDERS,
],
)
orders_total = PermissionsField(
TaxedMoney,
description="Return the total sales amount from a specific period.",
period=graphene.Argument(ReportingPeriod, description="A period of time."),
channel=graphene.Argument(
graphene.String,
description="Slug of a channel for which the data should be returned.",
),
permissions=[
OrderPermissions.MANAGE_ORDERS,
],
)
order_by_token = graphene.Field(
Order,
description="Look up an order by token.",
deprecation_reason=DEPRECATED_IN_3X_FIELD,
token=graphene.Argument(UUID, description="The order's token.", required=True),
)
@staticmethod
def resolve_homepage_events(_root, info, **kwargs):
qs = resolve_homepage_events()
return create_connection_slice(qs, info, kwargs, OrderEventCountableConnection)
@staticmethod
def resolve_order(_root, _info, **data):
_, id = from_global_id_or_error(data.get("id"), Order)
return resolve_order(id)
@staticmethod
def resolve_orders(_root, info, *, channel=None, **kwargs):
if sort_field_from_kwargs(kwargs) == OrderSortField.RANK:
# sort by RANK can be used only with search filter
if not search_string_in_kwargs(kwargs):
raise GraphQLError(
"Sorting by RANK is available only when using a search filter."
)
if search_string_in_kwargs(kwargs) and not sort_field_from_kwargs(kwargs):
# default to sorting by RANK if search is used
# and no explicit sorting is requested
product_type = info.schema.get_type("OrderSortingInput")
kwargs["sort_by"] = product_type.create_container(
{"direction": "-", "field": ["search_rank", "id"]}
)
qs = resolve_orders(info, channel)
qs = filter_connection_queryset(qs, kwargs)
return create_connection_slice(qs, info, kwargs, OrderCountableConnection)
@staticmethod
def resolve_draft_orders(_root, info, **kwargs):
if sort_field_from_kwargs(kwargs) == OrderSortField.RANK:
# sort by RANK can be used only with search filter
if not search_string_in_kwargs(kwargs):
raise GraphQLError(
"Sorting by RANK is available only when using a search filter."
)
if search_string_in_kwargs(kwargs) and not sort_field_from_kwargs(kwargs):
# default to sorting by RANK if search is used
# and no explicit sorting is requested
product_type = info.schema.get_type("OrderSortingInput")
kwargs["sort_by"] = product_type.create_container(
{"direction": "-", "field": ["search_rank", "id"]}
)
qs = resolve_draft_orders(info)
qs = filter_connection_queryset(qs, kwargs)
return create_connection_slice(qs, info, kwargs, OrderCountableConnection)
@staticmethod
def resolve_orders_total(_root, info, *, period, channel=None):
return resolve_orders_total(info, period, channel)
@staticmethod
def resolve_order_by_token(_root, _info, *, token):
return resolve_order_by_token(token)
class OrderMutations(graphene.ObjectType):
draft_order_complete = DraftOrderComplete.Field()
draft_order_create = DraftOrderCreate.Field()
draft_order_delete = DraftOrderDelete.Field()
draft_order_bulk_delete = DraftOrderBulkDelete.Field()
draft_order_lines_bulk_delete = DraftOrderLinesBulkDelete.Field(
deprecation_reason=DEPRECATED_IN_3X_FIELD
)
draft_order_update = DraftOrderUpdate.Field()
order_add_note = OrderAddNote.Field()
order_cancel = OrderCancel.Field()
order_capture = OrderCapture.Field()
order_confirm = OrderConfirm.Field()
order_fulfill = OrderFulfill.Field()
order_fulfillment_cancel = FulfillmentCancel.Field()
order_fulfillment_approve = FulfillmentApprove.Field()
order_fulfillment_update_tracking = FulfillmentUpdateTracking.Field()
order_fulfillment_refund_products = FulfillmentRefundProducts.Field()
order_fulfillment_return_products = FulfillmentReturnProducts.Field()
order_lines_create = OrderLinesCreate.Field()
order_line_delete = OrderLineDelete.Field()
order_line_update = OrderLineUpdate.Field()
order_discount_add = OrderDiscountAdd.Field()
order_discount_update = OrderDiscountUpdate.Field()
order_discount_delete = OrderDiscountDelete.Field()
order_line_discount_update = OrderLineDiscountUpdate.Field()
order_line_discount_remove = OrderLineDiscountRemove.Field()
order_mark_as_paid = OrderMarkAsPaid.Field()
order_refund = OrderRefund.Field()
order_update = OrderUpdate.Field()
order_update_shipping = OrderUpdateShipping.Field()
order_void = OrderVoid.Field()
order_bulk_cancel = OrderBulkCancel.Field()
| 41.858407
| 88
| 0.737632
|
c07a9a906d0cd1c2fedccfa17afe5f55b08d1426
| 1,729
|
py
|
Python
|
src/util/boost_pad_tracker.py
|
kennytrytek/RLBotPythonExample
|
40ab147dbc62da012f3618a34cd8f58b646a349d
|
[
"MIT"
] | null | null | null |
src/util/boost_pad_tracker.py
|
kennytrytek/RLBotPythonExample
|
40ab147dbc62da012f3618a34cd8f58b646a349d
|
[
"MIT"
] | null | null | null |
src/util/boost_pad_tracker.py
|
kennytrytek/RLBotPythonExample
|
40ab147dbc62da012f3618a34cd8f58b646a349d
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import List
from rlbot.utils.structures.game_data_struct import GameTickPacket, FieldInfoPacket
from util.vec import Vec3
@dataclass
class BoostPad:
location: Vec3
is_full_boost: bool
is_active: bool # Active means it's available to be picked up
timer: float # Counts the number of seconds that the pad has been *inactive*
class BoostPadTracker:
"""
This class merges together the boost pad location info with the is_active info so you can access it
in one convenient list. For it to function correctly, you need to call initialize_boosts once when the
game has started, and then update_boost_status every frame so that it knows which pads are active.
"""
def __init__(self, field_info: FieldInfoPacket):
self.boost_pads: List[BoostPad] = []
self._full_boosts_only: List[BoostPad] = []
raw_boosts = [field_info.boost_pads[i] for i in range(field_info.num_boosts)]
self.boost_pads: List[BoostPad] = [BoostPad(Vec3(rb.location), rb.is_full_boost, False, 0) for rb in raw_boosts]
# Cache the list of full boosts since they're commonly requested.
# They reference the same objects in the boost_pads list.
self._full_boosts_only: List[BoostPad] = [bp for bp in self.boost_pads if bp.is_full_boost]
def update_boost_status(self, packet: GameTickPacket):
for i in range(packet.num_boost):
our_pad = self.boost_pads[i]
packet_pad = packet.game_boosts[i]
our_pad.is_active = packet_pad.is_active
our_pad.timer = packet_pad.timer
def get_full_boosts(self) -> List[BoostPad]:
return self._full_boosts_only
| 40.209302
| 120
| 0.713707
|
78980457f42ad54eb2423739f1657db45baffde4
| 196
|
py
|
Python
|
rules_dict.py
|
CrEaTiiOn187/tg-management-bot
|
554dbdd2134c14466baff3f3d448ac66f54ced03
|
[
"MIT"
] | 3
|
2021-10-21T18:16:37.000Z
|
2022-03-10T18:24:55.000Z
|
rules_dict.py
|
NoodleSoup/tg-management-bot
|
554dbdd2134c14466baff3f3d448ac66f54ced03
|
[
"MIT"
] | null | null | null |
rules_dict.py
|
NoodleSoup/tg-management-bot
|
554dbdd2134c14466baff3f3d448ac66f54ced03
|
[
"MIT"
] | 1
|
2020-07-15T07:43:55.000Z
|
2020-07-15T07:43:55.000Z
|
def load():
from pickle import load
rules_dict = {}
rules_db = open('rules.db', 'rb')
try:
rules_dict = load(rules_db)
return rules_dict
except:
return {}
rules_dict = load()
| 17.818182
| 35
| 0.637755
|
19d8a442219855054e82777ba7d9c06d45591b9c
| 5,713
|
py
|
Python
|
docs/conf.py
|
CurtLH/phonefinder
|
22b5d4cfbe8aded82e60b1a1c07974f6366363f3
|
[
"Apache-2.0"
] | 2
|
2020-07-08T19:12:00.000Z
|
2020-08-06T03:21:01.000Z
|
docs/conf.py
|
CurtLH/phonefinder
|
22b5d4cfbe8aded82e60b1a1c07974f6366363f3
|
[
"Apache-2.0"
] | 13
|
2020-01-04T05:54:31.000Z
|
2020-06-12T02:28:56.000Z
|
docs/conf.py
|
CurtLH/phonefinder
|
22b5d4cfbe8aded82e60b1a1c07974f6366363f3
|
[
"Apache-2.0"
] | null | null | null |
{%- import '.github/ght/macros/selected.j2' as selected -%}
#!/usr/bin/env python
#
# {{ cookiecutter.project_repo }} documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
import {{cookiecutter.project_namespace}}.{{ cookiecutter.project_slug }}
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
# fmt: off
"sphinx.ext.autodoc",
{%- call(cli) selected.first(ght.command_line_interface) %}
{%- if cli|lower == 'click' %}
"sphinx_click.ext",
{%- endif %}
{%- endcall %}
"sphinx.ext.viewcode",
# fmt: on
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "{{ cookiecutter.project_name }}"
copyright = "{% now 'local', '%Y' %}, {{ cookiecutter.full_name }}"
author = "{{ cookiecutter.full_name }}"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = {{cookiecutter.project_namespace}}.{{ cookiecutter.project_slug }}.__version__
# The full version, including alpha/beta/rc tags.
release = {{cookiecutter.project_namespace}}.{{ cookiecutter.project_slug }}.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "{{ cookiecutter.project_slug }}doc"
# -- Options for LaTeX output ------------------------------------------
# fmt: off
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# fmt: on
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
# fmt: off
(
master_doc,
"{{ cookiecutter.project_slug }}.tex",
"{{ cookiecutter.project_name }} Documentation",
"{{ cookiecutter.full_name }}",
"manual",
),
# fmt: on
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
# fmt: off
(
master_doc,
"{{ cookiecutter.project_slug }}",
"{{ cookiecutter.project_name }} Documentation",
[author],
1
)
# fmt: on
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
# fmt: off
(
master_doc,
"{{ cookiecutter.project_slug }}",
"{{ cookiecutter.project_name }} Documentation",
author,
"{{ cookiecutter.project_slug }}",
"One line description of project.",
"Miscellaneous",
),
# fmt: on
]
| 30.068421
| 88
| 0.664099
|
ecb29ff6117ed9ef2c94d3a35f287e2935a5c5a8
| 1,775
|
py
|
Python
|
TestMultiplicationVisual.py
|
mgualti/MentalMath
|
be2e2ec2380b8298765962d869573c65d3d7b761
|
[
"MIT"
] | null | null | null |
TestMultiplicationVisual.py
|
mgualti/MentalMath
|
be2e2ec2380b8298765962d869573c65d3d7b761
|
[
"MIT"
] | null | null | null |
TestMultiplicationVisual.py
|
mgualti/MentalMath
|
be2e2ec2380b8298765962d869573c65d3d7b761
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''Test a human's ability to do mental multiplication when the question is visible.'''
# python
import sys
from time import sleep
from random import random
# pyttsx
import pyttsx
def main(nQuestions, nDigitsTop, nDigitsBottom, delay):
'''Entrypoint to the program.'''
# PARAMETERS =====================================================================================
rate = 120
# INITIALIZATION =================================================================================
# RUN TEST =======================================================================================
for question in xrange(nQuestions):
a = 0; b = 0
while a <= 1 or b <= 1: # prevent trivial questions
a = int(10**nDigitsTop*random())
b = int(10**nDigitsBottom*random())
c = a * b
sys.stdout.write("Question: {}. {} * {} = ".format(question+1,a,b))
sys.stdout.flush()
speechEng = pyttsx.init()
speechEng.setProperty("rate", rate)
speechEng.say(str(a) + " times " + str(b))
speechEng.runAndWait()
del speechEng
sleep(delay)
sys.stdout.write("{}\n".format(c))
sys.stdout.flush()
speechEng = pyttsx.init()
speechEng.setProperty("rate", rate)
speechEng.say(str(c))
speechEng.runAndWait()
del speechEng
raw_input("Press [Enter] to continue...")
if __name__ == "__main__":
'''Call main with the appropriate command line arguments.'''
try:
nQuestions = int(sys.argv[1])
nDigitsTop = int(sys.argv[2])
nDigitsBottom = int(sys.argv[3])
delay = int(sys.argv[4])
except:
print("Usage: TestMultiplication.py nQuestions nDigitsTop nDigitsBottom delay")
exit()
main(nQuestions, nDigitsTop, nDigitsBottom, delay)
exit()
| 29.098361
| 100
| 0.549296
|
f4d20f14043e741ec79eeacda6a31836b01fe439
| 1,287
|
py
|
Python
|
apps/accounts/urls.py
|
BR0kEN-/admin-portal
|
0c38dc0d790031f45bf07660bce690e972fe2858
|
[
"Apache-2.0"
] | null | null | null |
apps/accounts/urls.py
|
BR0kEN-/admin-portal
|
0c38dc0d790031f45bf07660bce690e972fe2858
|
[
"Apache-2.0"
] | null | null | null |
apps/accounts/urls.py
|
BR0kEN-/admin-portal
|
0c38dc0d790031f45bf07660bce690e972fe2858
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from apps.accounts.views import AdminRegistrationView
from apps.accounts.views import AdminActivationView
urlpatterns = []
urlpatterns = [
path(
"password_reset/",
auth_views.PasswordResetView.as_view(),
name="admin_password_reset",
),
path("registration/", AdminRegistrationView.as_view(), name="registration"),
url(
r"activation/(?P<activation_key>[-:\w]+)/",
AdminActivationView.as_view(),
name="activation",
),
path(
"password_change/",
auth_views.PasswordChangeView.as_view(),
name="password_change",
),
path(
"password_change/done/",
auth_views.PasswordChangeDoneView.as_view(),
name="password_change_done",
),
path(
"password_reset/done/",
auth_views.PasswordResetDoneView.as_view(),
name="password_reset_done",
),
path(
"reset/<uidb64>/<token>/",
auth_views.PasswordResetConfirmView.as_view(),
name="password_reset_confirm",
),
path(
"reset/done/",
auth_views.PasswordResetCompleteView.as_view(),
name="password_reset_complete",
),
]
| 26.8125
| 80
| 0.643357
|
27f994f09865de8a181ff91d104d10af6f24fb87
| 39,812
|
py
|
Python
|
axelrod/tests/unit/test_tournament.py
|
dashiellfryer/Axelrod
|
0d684b3273d15e3e0ecf70be8e893fffc5277c84
|
[
"MIT"
] | null | null | null |
axelrod/tests/unit/test_tournament.py
|
dashiellfryer/Axelrod
|
0d684b3273d15e3e0ecf70be8e893fffc5277c84
|
[
"MIT"
] | null | null | null |
axelrod/tests/unit/test_tournament.py
|
dashiellfryer/Axelrod
|
0d684b3273d15e3e0ecf70be8e893fffc5277c84
|
[
"MIT"
] | null | null | null |
"""Tests for the main tournament class."""
import csv
import filecmp
import io
import logging
import os
import pickle
import unittest
import warnings
from multiprocessing import Queue, cpu_count
from unittest.mock import MagicMock, patch
import axelrod
import numpy as np
import pandas as pd
from axelrod.tests.property import (
prob_end_tournaments,
spatial_tournaments,
strategy_lists,
tournaments,
)
from axelrod.tournament import _close_objects
from tqdm import tqdm
from hypothesis import example, given, settings
from hypothesis.strategies import floats, integers
C, D = axelrod.Action.C, axelrod.Action.D
test_strategies = [
axelrod.Cooperator,
axelrod.TitForTat,
axelrod.Defector,
axelrod.Grudger,
axelrod.GoByMajority,
]
test_repetitions = 5
test_turns = 100
test_prob_end = 0.5
test_edges = [(0, 1), (1, 2), (3, 4)]
deterministic_strategies = [
s for s in axelrod.short_run_time_strategies if not s().classifier["stochastic"]
]
class RecordedTQDM(tqdm):
"""This is a tqdm.tqdm that keeps a record of every RecordedTQDM created.
It is used to test that progress bars were correctly created and then
closed."""
record = []
def __init__(self, *args, **kwargs):
super(RecordedTQDM, self).__init__(*args, **kwargs)
RecordedTQDM.record.append(self)
@classmethod
def reset_record(cls):
cls.record = []
class TestTournament(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
cls.players = [s() for s in test_strategies]
cls.test_name = "test"
cls.test_repetitions = test_repetitions
cls.test_turns = test_turns
cls.expected_payoff = [
[600, 600, 0, 600, 600],
[600, 600, 199, 600, 600],
[1000, 204, 200, 204, 204],
[600, 600, 199, 600, 600],
[600, 600, 199, 600, 600],
]
cls.expected_cooperation = [
[200, 200, 200, 200, 200],
[200, 200, 1, 200, 200],
[0, 0, 0, 0, 0],
[200, 200, 1, 200, 200],
[200, 200, 1, 200, 200],
]
cls.filename = "test_outputs/test_tournament.csv"
def setUp(self):
self.test_tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=2,
repetitions=1,
)
def test_init(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=self.test_turns,
noise=0.2,
)
self.assertEqual(len(tournament.players), len(test_strategies))
self.assertIsInstance(
tournament.players[0].match_attributes["game"], axelrod.Game
)
self.assertEqual(tournament.game.score((C, C)), (3, 3))
self.assertEqual(tournament.turns, self.test_turns)
self.assertEqual(tournament.repetitions, 10)
self.assertEqual(tournament.name, "test")
self.assertIsInstance(tournament._logger, logging.Logger)
self.assertEqual(tournament.noise, 0.2)
anonymous_tournament = axelrod.Tournament(players=self.players)
self.assertEqual(anonymous_tournament.name, "axelrod")
def test_init_with_match_attributes(self):
tournament = axelrod.Tournament(
players=self.players, match_attributes={"length": float("inf")}
)
mg = tournament.match_generator
match_params = mg.build_single_match_params()
self.assertEqual(match_params["match_attributes"], {"length": float("inf")})
def test_warning(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=10,
repetitions=1,
)
with warnings.catch_warnings(record=True) as w:
# Check that a warning is raised if no results set is built and no
# filename is given
results = tournament.play(build_results=False, progress_bar=False)
self.assertEqual(len(w), 1)
with warnings.catch_warnings(record=True) as w:
# Check that no warning is raised if no results set is built and a
# is filename given
tournament.play(
build_results=False, filename=self.filename, progress_bar=False
)
self.assertEqual(len(w), 0)
def test_setup_output_with_filename(self):
self.test_tournament.setup_output(self.filename)
self.assertEqual(self.test_tournament.filename, self.filename)
self.assertIsNone(self.test_tournament._temp_file_descriptor)
self.assertFalse(hasattr(self.test_tournament, "interactions_dict"))
def test_setup_output_no_filename(self):
self.test_tournament.setup_output()
self.assertIsInstance(self.test_tournament.filename, str)
self.assertIsInstance(self.test_tournament._temp_file_descriptor, int)
self.assertFalse(hasattr(self.test_tournament, "interactions_dict"))
os.close(self.test_tournament._temp_file_descriptor)
os.remove(self.test_tournament.filename)
def test_play_resets_num_interactions(self):
self.assertEqual(self.test_tournament.num_interactions, 0)
self.test_tournament.play(progress_bar=False)
self.assertEqual(self.test_tournament.num_interactions, 15)
self.test_tournament.play(progress_bar=False)
self.assertEqual(self.test_tournament.num_interactions, 15)
def test_play_changes_use_progress_bar(self):
self.assertTrue(self.test_tournament.use_progress_bar)
self.test_tournament.play(progress_bar=False)
self.assertFalse(self.test_tournament.use_progress_bar)
self.test_tournament.play(progress_bar=True)
self.assertTrue(self.test_tournament.use_progress_bar)
def test_play_changes_temp_file_descriptor(self):
self.assertIsNone(self.test_tournament._temp_file_descriptor)
# No file descriptor for a named file.
self.test_tournament.play(filename=self.filename, progress_bar=False)
self.assertIsNone(self.test_tournament._temp_file_descriptor)
# Temp file creates file descriptor.
self.test_tournament.play(filename=None, progress_bar=False)
self.assertIsInstance(self.test_tournament._temp_file_descriptor, int)
def test_play_tempfile_removed(self):
self.test_tournament.play(filename=None, progress_bar=False)
self.assertFalse(os.path.isfile(self.test_tournament.filename))
def test_play_resets_filename_and_temp_file_descriptor_each_time(self):
self.test_tournament.play(progress_bar=False)
self.assertIsInstance(self.test_tournament._temp_file_descriptor, int)
self.assertIsInstance(self.test_tournament.filename, str)
old_filename = self.test_tournament.filename
self.test_tournament.play(filename=self.filename, progress_bar=False)
self.assertIsNone(self.test_tournament._temp_file_descriptor)
self.assertEqual(self.test_tournament.filename, self.filename)
self.assertNotEqual(old_filename, self.test_tournament.filename)
self.test_tournament.play(progress_bar=False)
self.assertIsInstance(self.test_tournament._temp_file_descriptor, int)
self.assertIsInstance(self.test_tournament.filename, str)
self.assertNotEqual(old_filename, self.test_tournament.filename)
self.assertNotEqual(self.test_tournament.filename, self.filename)
def test_get_file_objects_no_filename(self):
file, writer = self.test_tournament._get_file_objects()
self.assertIsNone(file)
self.assertIsNone(writer)
def test_get_file_object_with_filename(self):
self.test_tournament.filename = self.filename
file_object, writer = self.test_tournament._get_file_objects()
self.assertIsInstance(file_object, io.TextIOWrapper)
self.assertEqual(writer.__class__.__name__, "writer")
file_object.close()
def test_get_progress_bar(self):
self.test_tournament.use_progress_bar = False
pbar = self.test_tournament._get_progress_bar()
self.assertIsNone(pbar)
self.test_tournament.use_progress_bar = True
pbar = self.test_tournament._get_progress_bar()
self.assertIsInstance(pbar, tqdm)
self.assertEqual(pbar.desc, "Playing matches")
self.assertEqual(pbar.n, 0)
self.assertEqual(pbar.total, self.test_tournament.match_generator.size)
new_edges = [(0, 1), (1, 2), (2, 3), (3, 4)]
new_tournament = axelrod.Tournament(players=self.players, edges=new_edges)
new_tournament.use_progress_bar = True
pbar = new_tournament._get_progress_bar()
self.assertEqual(pbar.desc, "Playing matches")
self.assertEqual(pbar.n, 0)
self.assertEqual(pbar.total, len(new_edges))
def test_serial_play(self):
# Test that we get an instance of ResultSet
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
# Test that _run_serial_repetitions is called with empty matches list
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
results = tournament.play(progress_bar=False)
self.assertEqual(tournament.num_interactions, 75)
def test_serial_play_with_different_game(self):
# Test that a non default game is passed to the result set
game = axelrod.Game(p=-1, r=-1, s=-1, t=-1)
tournament = axelrod.Tournament(
name=self.test_name, players=self.players, game=game, turns=1, repetitions=1
)
results = tournament.play(progress_bar=False)
self.assertLessEqual(np.max(results.scores), 0)
@patch("tqdm.tqdm", RecordedTQDM)
def test_no_progress_bar_play(self):
"""Test that progress bar is not created for progress_bar=False"""
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
# Test with build results
RecordedTQDM.reset_record()
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
# Check that no progress bar was created.
self.assertEqual(RecordedTQDM.record, [])
# Test without build results
RecordedTQDM.reset_record()
results = tournament.play(
progress_bar=False, build_results=False, filename=self.filename
)
self.assertIsNone(results)
self.assertEqual(RecordedTQDM.record, [])
def assert_play_pbar_correct_total_and_finished(self, pbar, total):
self.assertEqual(pbar.desc, "Playing matches")
self.assertEqual(pbar.total, total)
self.assertEqual(pbar.n, total)
self.assertTrue(pbar.disable, True)
@patch("tqdm.tqdm", RecordedTQDM)
def test_progress_bar_play(self):
"""Test that progress bar is created by default and with True argument"""
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
RecordedTQDM.reset_record()
results = tournament.play()
self.assertIsInstance(results, axelrod.ResultSet)
# Check that progress bar was created, updated and closed.
self.assertEqual(len(RecordedTQDM.record), 2)
play_pbar = RecordedTQDM.record[0]
self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15)
# Check all progress bars are closed.
self.assertTrue(all(pbar.disable for pbar in RecordedTQDM.record))
RecordedTQDM.reset_record()
results = tournament.play(progress_bar=True)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(len(RecordedTQDM.record), 2)
play_pbar = RecordedTQDM.record[0]
self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15)
# Test without build results
RecordedTQDM.reset_record()
results = tournament.play(
progress_bar=True, build_results=False, filename=self.filename
)
self.assertIsNone(results)
self.assertEqual(len(RecordedTQDM.record), 1)
play_pbar = RecordedTQDM.record[0]
self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15)
@patch("tqdm.tqdm", RecordedTQDM)
def test_progress_bar_play_parallel(self):
"""Test that tournament plays when asking for progress bar for parallel
tournament and that progress bar is created."""
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
# progress_bar = False
RecordedTQDM.reset_record()
results = tournament.play(progress_bar=False, processes=2)
self.assertEqual(RecordedTQDM.record, [])
self.assertIsInstance(results, axelrod.ResultSet)
# progress_bar = True
RecordedTQDM.reset_record()
results = tournament.play(progress_bar=True, processes=2)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(len(RecordedTQDM.record), 2)
play_pbar = RecordedTQDM.record[0]
self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15)
# progress_bar is default
RecordedTQDM.reset_record()
results = tournament.play(processes=2)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(len(RecordedTQDM.record), 2)
play_pbar = RecordedTQDM.record[0]
self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15)
@given(
tournament=tournaments(
min_size=2,
max_size=5,
min_turns=2,
max_turns=5,
min_repetitions=2,
max_repetitions=4,
)
)
@settings(max_examples=50)
@example(
tournament=axelrod.Tournament(
players=[s() for s in test_strategies],
turns=test_turns,
repetitions=test_repetitions,
)
)
# These two examples are to make sure #465 is fixed.
# As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465,
# these two examples were identified by hypothesis.
@example(
tournament=axelrod.Tournament(
players=[axelrod.BackStabber(), axelrod.MindReader()],
turns=2,
repetitions=1,
)
)
@example(
tournament=axelrod.Tournament(
players=[axelrod.BackStabber(), axelrod.ThueMorse()], turns=2, repetitions=1
)
)
def test_property_serial_play(self, tournament):
"""Test serial play using hypothesis"""
# Test that we get an instance of ResultSet
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(results.num_players, len(tournament.players))
self.assertEqual(results.players, [str(p) for p in tournament.players])
def test_parallel_play(self):
# Test that we get an instance of ResultSet
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
results = tournament.play(processes=2, progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(tournament.num_interactions, 75)
# The following relates to #516
players = [
axelrod.Cooperator(),
axelrod.Defector(),
axelrod.BackStabber(),
axelrod.PSOGambler2_2_2(),
axelrod.ThueMorse(),
axelrod.DoubleCrosser(),
]
tournament = axelrod.Tournament(
name=self.test_name,
players=players,
game=self.game,
turns=20,
repetitions=self.test_repetitions,
)
scores = tournament.play(processes=2, progress_bar=False).scores
self.assertEqual(len(scores), len(players))
def test_parallel_play_with_writing_to_file(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
results = tournament.play(
processes=2, progress_bar=False, filename=self.filename
)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(tournament.num_interactions, 75)
def test_run_serial(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
tournament._write_interactions_to_file = MagicMock(
name="_write_interactions_to_file"
)
self.assertTrue(tournament._run_serial())
# Get the calls made to write_interactions
calls = tournament._write_interactions_to_file.call_args_list
self.assertEqual(len(calls), 15)
def test_run_parallel(self):
class PickleableMock(MagicMock):
def __reduce__(self):
return MagicMock, ()
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
tournament._write_interactions_to_file = PickleableMock(
name="_write_interactions_to_file"
)
# For test coverage purposes. This confirms PickleableMock can be
# pickled exactly once. Windows multi-processing must pickle this Mock
# exactly once during testing.
pickled = pickle.loads(pickle.dumps(tournament))
self.assertIsInstance(pickled._write_interactions_to_file, MagicMock)
self.assertRaises(pickle.PicklingError, pickle.dumps, pickled)
self.assertTrue(tournament._run_parallel())
# Get the calls made to write_interactions
calls = tournament._write_interactions_to_file.call_args_list
self.assertEqual(len(calls), 15)
def test_n_workers(self):
max_processes = cpu_count()
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
self.assertEqual(tournament._n_workers(processes=1), max_processes)
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
self.assertEqual(
tournament._n_workers(processes=max_processes + 2), max_processes
)
@unittest.skipIf(cpu_count() < 2, "not supported on single processor machines")
def test_2_workers(self):
# This is a separate test with a skip condition because we
# cannot guarantee that the tests will always run on a machine
# with more than one processor
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
self.assertEqual(tournament._n_workers(processes=2), 2)
def test_start_workers(self):
workers = 2
work_queue = Queue()
done_queue = Queue()
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
chunks = tournament.match_generator.build_match_chunks()
for chunk in chunks:
work_queue.put(chunk)
tournament._start_workers(workers, work_queue, done_queue)
stops = 0
while stops < workers:
payoffs = done_queue.get()
if payoffs == "STOP":
stops += 1
self.assertEqual(stops, workers)
def test_worker(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
work_queue = Queue()
chunks = tournament.match_generator.build_match_chunks()
count = 0
for chunk in chunks:
work_queue.put(chunk)
count += 1
work_queue.put("STOP")
done_queue = Queue()
tournament._worker(work_queue, done_queue)
for r in range(count):
new_matches = done_queue.get()
for index_pair, matches in new_matches.items():
self.assertIsInstance(index_pair, tuple)
self.assertEqual(len(matches), self.test_repetitions)
queue_stop = done_queue.get()
self.assertEqual(queue_stop, "STOP")
def test_build_result_set(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
def test_no_build_result_set(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=axelrod.DEFAULT_TURNS,
repetitions=self.test_repetitions,
)
tournament._calculate_results = MagicMock(name="_calculate_results")
# Mocking this as it is called by play
self.assertIsNone(
tournament.play(
filename=self.filename, progress_bar=False, build_results=False
)
)
# Get the calls made to write_interactions
calls = tournament._calculate_results.call_args_list
self.assertEqual(len(calls), 0)
@given(turns=integers(min_value=1, max_value=200))
@settings(max_examples=5)
@example(turns=3)
@example(turns=axelrod.DEFAULT_TURNS)
def test_play_matches(self, turns):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
repetitions=self.test_repetitions,
)
def make_chunk_generator():
for player1_index in range(len(self.players)):
for player2_index in range(player1_index, len(self.players)):
index_pair = (player1_index, player2_index)
match_params = {"turns": turns, "game": self.game}
yield (index_pair, match_params, self.test_repetitions)
chunk_generator = make_chunk_generator()
interactions = {}
for chunk in chunk_generator:
result = tournament._play_matches(chunk)
for index_pair, inters in result.items():
try:
interactions[index_pair].append(inters)
except KeyError:
interactions[index_pair] = [inters]
self.assertEqual(len(interactions), 15)
for index_pair, inter in interactions.items():
self.assertEqual(len(index_pair), 2)
for plays in inter:
# Check that have the expected number of repetitions
self.assertEqual(len(plays), self.test_repetitions)
for repetition in plays:
actions, results = repetition
self.assertEqual(len(actions), turns)
self.assertEqual(len(results), 10)
# Check that matches no longer exist
self.assertEqual((len(list(chunk_generator))), 0)
def test_match_cache_is_used(self):
"""
Create two Random players that are classified as deterministic.
As they are deterministic the cache will be used.
"""
FakeRandom = axelrod.Random
FakeRandom.classifier["stochastic"] = False
p1 = FakeRandom()
p2 = FakeRandom()
tournament = axelrod.Tournament((p1, p2), turns=5, repetitions=2)
results = tournament.play(progress_bar=False)
for player_scores in results.scores:
self.assertEqual(player_scores[0], player_scores[1])
def test_write_interactions(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=2,
repetitions=2,
)
tournament._write_interactions_to_file = MagicMock(
name="_write_interactions_to_file"
)
# Mocking this as it is called by play
self.assertIsNone(
tournament.play(
filename=self.filename, progress_bar=False, build_results=False
)
)
# Get the calls made to write_interactions
calls = tournament._write_interactions_to_file.call_args_list
self.assertEqual(len(calls), 15)
def test_write_to_csv_with_results(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=2,
repetitions=2,
)
tournament.play(filename=self.filename, progress_bar=False)
df = pd.read_csv(self.filename)
expected_df = pd.read_csv("test_outputs/expected_test_tournament.csv")
self.assertTrue(df.equals(expected_df))
def test_write_to_csv_without_results(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=2,
repetitions=2,
)
tournament.play(filename=self.filename, progress_bar=False, build_results=False)
df = pd.read_csv(self.filename)
expected_df = pd.read_csv(
"test_outputs/expected_test_tournament_no_results.csv"
)
self.assertTrue(df.equals(expected_df))
class TestProbEndTournament(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
cls.players = [s() for s in test_strategies]
cls.test_name = "test"
cls.test_repetitions = test_repetitions
cls.test_prob_end = test_prob_end
def test_init(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
prob_end=self.test_prob_end,
noise=0.2,
)
self.assertEqual(tournament.match_generator.prob_end, tournament.prob_end)
self.assertEqual(len(tournament.players), len(test_strategies))
self.assertEqual(tournament.game.score((C, C)), (3, 3))
self.assertIsNone(tournament.turns)
self.assertEqual(tournament.repetitions, 10)
self.assertEqual(tournament.name, "test")
self.assertIsInstance(tournament._logger, logging.Logger)
self.assertEqual(tournament.noise, 0.2)
anonymous_tournament = axelrod.Tournament(players=self.players)
self.assertEqual(anonymous_tournament.name, "axelrod")
@given(
tournament=prob_end_tournaments(
min_size=2,
max_size=5,
min_prob_end=0.1,
max_prob_end=0.9,
min_repetitions=2,
max_repetitions=4,
)
)
@settings(max_examples=5)
@example(
tournament=axelrod.Tournament(
players=[s() for s in test_strategies],
prob_end=0.2,
repetitions=test_repetitions,
)
)
# These two examples are to make sure #465 is fixed.
# As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465,
# these two examples were identified by hypothesis.
@example(
tournament=axelrod.Tournament(
players=[axelrod.BackStabber(), axelrod.MindReader()],
prob_end=0.2,
repetitions=1,
)
)
@example(
tournament=axelrod.Tournament(
players=[axelrod.ThueMorse(), axelrod.MindReader()],
prob_end=0.2,
repetitions=1,
)
)
def test_property_serial_play(self, tournament):
"""Test serial play using hypothesis"""
# Test that we get an instance of ResultSet
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(results.num_players, len(tournament.players))
self.assertEqual(results.players, [str(p) for p in tournament.players])
class TestSpatialTournament(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
cls.players = [s() for s in test_strategies]
cls.test_name = "test"
cls.test_repetitions = test_repetitions
cls.test_turns = test_turns
cls.test_edges = test_edges
def test_init(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=self.test_turns,
edges=self.test_edges,
noise=0.2,
)
self.assertEqual(tournament.match_generator.edges, tournament.edges)
self.assertEqual(len(tournament.players), len(test_strategies))
self.assertEqual(tournament.game.score((C, C)), (3, 3))
self.assertEqual(tournament.turns, 100)
self.assertEqual(tournament.repetitions, 10)
self.assertEqual(tournament.name, "test")
self.assertIsInstance(tournament._logger, logging.Logger)
self.assertEqual(tournament.noise, 0.2)
self.assertEqual(tournament.match_generator.noise, 0.2)
anonymous_tournament = axelrod.Tournament(players=self.players)
self.assertEqual(anonymous_tournament.name, "axelrod")
@given(
strategies=strategy_lists(
strategies=deterministic_strategies, min_size=2, max_size=2
),
turns=integers(min_value=1, max_value=20),
repetitions=integers(min_value=1, max_value=5),
noise=floats(min_value=0, max_value=1),
seed=integers(min_value=0, max_value=4294967295),
)
@settings(max_examples=5)
def test_complete_tournament(self, strategies, turns, repetitions, noise, seed):
"""
A test to check that a spatial tournament on the complete multigraph
gives the same results as the round robin.
"""
players = [s() for s in strategies]
# edges
edges = []
for i in range(0, len(players)):
for j in range(i, len(players)):
edges.append((i, j))
# create a round robin tournament
tournament = axelrod.Tournament(
players, repetitions=repetitions, turns=turns, noise=noise
)
# create a complete spatial tournament
spatial_tournament = axelrod.Tournament(
players, repetitions=repetitions, turns=turns, noise=noise, edges=edges
)
axelrod.seed(seed)
results = tournament.play(progress_bar=False)
axelrod.seed(seed)
spatial_results = spatial_tournament.play(progress_bar=False)
self.assertEqual(results.ranked_names, spatial_results.ranked_names)
self.assertEqual(results.num_players, spatial_results.num_players)
self.assertEqual(results.repetitions, spatial_results.repetitions)
self.assertEqual(results.payoff_diffs_means, spatial_results.payoff_diffs_means)
self.assertEqual(results.payoff_matrix, spatial_results.payoff_matrix)
self.assertEqual(results.payoff_stddevs, spatial_results.payoff_stddevs)
self.assertEqual(results.payoffs, spatial_results.payoffs)
self.assertEqual(results.cooperating_rating, spatial_results.cooperating_rating)
self.assertEqual(results.cooperation, spatial_results.cooperation)
self.assertEqual(
results.normalised_cooperation, spatial_results.normalised_cooperation
)
self.assertEqual(results.normalised_scores, spatial_results.normalised_scores)
self.assertEqual(
results.good_partner_matrix, spatial_results.good_partner_matrix
)
self.assertEqual(
results.good_partner_rating, spatial_results.good_partner_rating
)
def test_particular_tournament(self):
"""A test for a tournament that has caused failures during some bug
fixing"""
players = [
axelrod.Cooperator(),
axelrod.Defector(),
axelrod.TitForTat(),
axelrod.Grudger(),
]
edges = [(0, 2), (0, 3), (1, 2), (1, 3)]
tournament = axelrod.Tournament(players, edges=edges)
results = tournament.play(progress_bar=False)
expected_ranked_names = ["Cooperator", "Tit For Tat", "Grudger", "Defector"]
self.assertEqual(results.ranked_names, expected_ranked_names)
# Check that this tournament runs with noise
tournament = axelrod.Tournament(players, edges=edges, noise=0.5)
results = tournament.play(progress_bar=False)
self.assertIsInstance(results, axelrod.ResultSet)
class TestProbEndingSpatialTournament(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
cls.players = [s() for s in test_strategies]
cls.test_name = "test"
cls.test_repetitions = test_repetitions
cls.test_prob_end = test_prob_end
cls.test_edges = test_edges
def test_init(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
prob_end=self.test_prob_end,
edges=self.test_edges,
noise=0.2,
)
self.assertEqual(tournament.match_generator.edges, tournament.edges)
self.assertEqual(len(tournament.players), len(test_strategies))
self.assertEqual(tournament.game.score((C, C)), (3, 3))
self.assertIsNone(tournament.turns)
self.assertEqual(tournament.repetitions, 10)
self.assertEqual(tournament.name, "test")
self.assertIsInstance(tournament._logger, logging.Logger)
self.assertEqual(tournament.noise, 0.2)
self.assertEqual(tournament.match_generator.noise, 0.2)
self.assertEqual(tournament.prob_end, self.test_prob_end)
@given(
strategies=strategy_lists(
strategies=deterministic_strategies, min_size=2, max_size=2
),
prob_end=floats(min_value=0.1, max_value=0.9),
reps=integers(min_value=1, max_value=3),
seed=integers(min_value=0, max_value=4294967295),
)
@settings(max_examples=5)
def test_complete_tournament(self, strategies, prob_end, seed, reps):
"""
A test to check that a spatial tournament on the complete graph
gives the same results as the round robin.
"""
players = [s() for s in strategies]
# create a prob end round robin tournament
tournament = axelrod.Tournament(players, prob_end=prob_end, repetitions=reps)
axelrod.seed(seed)
results = tournament.play(progress_bar=False)
# create a complete spatial tournament
# edges
edges = [(i, j) for i in range(len(players)) for j in range(i, len(players))]
spatial_tournament = axelrod.Tournament(
players, prob_end=prob_end, repetitions=reps, edges=edges
)
axelrod.seed(seed)
spatial_results = spatial_tournament.play(progress_bar=False)
self.assertEqual(results.match_lengths, spatial_results.match_lengths)
self.assertEqual(results.ranked_names, spatial_results.ranked_names)
self.assertEqual(results.wins, spatial_results.wins)
self.assertEqual(results.scores, spatial_results.scores)
self.assertEqual(results.cooperation, spatial_results.cooperation)
@given(
tournament=spatial_tournaments(
strategies=axelrod.basic_strategies,
max_turns=1,
max_noise=0,
max_repetitions=3,
),
seed=integers(min_value=0, max_value=4294967295),
)
@settings(max_examples=5)
def test_one_turn_tournament(self, tournament, seed):
"""
Tests that gives same result as the corresponding spatial round robin
spatial tournament
"""
prob_end_tour = axelrod.Tournament(
tournament.players,
prob_end=1,
edges=tournament.edges,
repetitions=tournament.repetitions,
)
axelrod.seed(seed)
prob_end_results = prob_end_tour.play(progress_bar=False)
axelrod.seed(seed)
one_turn_results = tournament.play(progress_bar=False)
self.assertEqual(prob_end_results.scores, one_turn_results.scores)
self.assertEqual(prob_end_results.wins, one_turn_results.wins)
self.assertEqual(prob_end_results.cooperation, one_turn_results.cooperation)
class TestHelperFunctions(unittest.TestCase):
def test_close_objects_with_none(self):
self.assertIsNone(_close_objects(None, None))
def test_close_objects_with_file_objs(self):
f1 = open("to_delete_1", "w")
f2 = open("to_delete_2", "w")
f2.close()
f2 = open("to_delete_2", "r")
self.assertFalse(f1.closed)
self.assertFalse(f2.closed)
_close_objects(f1, f2)
self.assertTrue(f1.closed)
self.assertTrue(f2.closed)
os.remove("to_delete_1")
os.remove("to_delete_2")
def test_close_objects_with_tqdm(self):
pbar_1 = tqdm(range(5))
pbar_2 = tqdm(total=10, desc="hi", file=io.StringIO())
self.assertFalse(pbar_1.disable)
self.assertFalse(pbar_2.disable)
_close_objects(pbar_1, pbar_2)
self.assertTrue(pbar_1.disable)
self.assertTrue(pbar_2.disable)
def test_close_objects_with_different_objects(self):
file = open("to_delete_1", "w")
pbar = tqdm(range(5))
num = 5
empty = None
word = "hi"
_close_objects(file, pbar, num, empty, word)
self.assertTrue(pbar.disable)
self.assertTrue(file.closed)
os.remove("to_delete_1")
| 36.897127
| 88
| 0.647167
|
7ef56c5523ddf81598edf1f001dd1f28e3dab071
| 3,076
|
py
|
Python
|
fontais/table.py
|
wallarelvo/fontais
|
7baf8af531967f72820e8ccab8e162d40bd7f951
|
[
"Apache-2.0"
] | 1
|
2015-07-09T16:51:16.000Z
|
2015-07-09T16:51:16.000Z
|
fontais/table.py
|
wallarelvo/fontais
|
7baf8af531967f72820e8ccab8e162d40bd7f951
|
[
"Apache-2.0"
] | null | null | null |
fontais/table.py
|
wallarelvo/fontais
|
7baf8af531967f72820e8ccab8e162d40bd7f951
|
[
"Apache-2.0"
] | null | null | null |
import csv
import numpy as np
class Table(object):
"""
A generic table structure that works as an in memory database where
every column is used as an index
"""
def __init__(self, headers):
self.data = dict()
self.rows = list()
self.headers = map(lambda val: val.strip(), headers)
self.index_data = dict()
for header in self.headers:
self.data[header] = list()
self.index_data[header] = dict()
def get_headers(self):
return self.headers
def plot(self, h_x, h_y, plt, **kwargs):
plt.scatter(self(h_x), self(h_y), **kwargs)
plt.xlabel(h_x)
plt.ylabel(h_y)
def plot_func(self, h_x, h_y, func, plt, label=""):
xs = np.linspace(min(self(h_x)), max(self(h_x)), 1000)
ys = func(xs)
plt.plot(xs, ys, lw=3, label=label)
plt.xlabel(h_x)
plt.ylabel(h_y)
def splice(self, headers):
ret_dict = dict()
for header in headers:
ret_dict[header] = self.data[header]
return ret_dict
def to_matrix(self, *headers):
mat = [list() for _ in xrange(len(self.rows))]
for header in headers:
for j, datum in enumerate(self.data[header]):
mat[j].append(datum)
return mat
def try_conv(self, elem):
try:
return float(elem)
except ValueError:
if elem == "NA":
raise ValueError()
else:
return elem
def get_num_rows(self):
return len(self.rows)
def add_row(self, row):
try:
row = map(self.try_conv, row)
except ValueError:
return self
self.rows.append(row)
for i, elem in enumerate(row):
self.data[self.headers[i]].append(elem)
if not elem in self.index_data[self.headers[i]].keys():
self.index_data[self.headers[i]][elem] = dict()
for header in self.headers:
self.index_data[self.headers[i]][elem][header] = list()
for j, inner_elem in enumerate(row):
self.index_data[self.headers[i]][elem][self.headers[j]]\
.append(inner_elem)
return self
def get_row(self, i):
return self.rows[i]
def __getitem__(self, index):
ret_dict = dict()
for i, header in enumerate(self.headers):
ret_dict[header] = self.rows[index][i]
return ret_dict
def __call__(self, index, value=None, s_index=None):
if value:
if s_index:
return self.index_data[index][value][s_index]
else:
return self.index_data[index][value]
else:
return self.data[index]
def load_csv(csv_filename):
tab = None
with open(csv_filename) as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i == 0:
tab = Table(row)
else:
tab.add_row(row)
return tab
| 27.963636
| 75
| 0.542263
|
d49e38928384b7729f790d609d889bf407f126fd
| 442
|
py
|
Python
|
labs/stacktrain/setup.py
|
finishertech/openstack-training-labs
|
50332026a2f00aaba3788decd7b3e0c5e8b231a7
|
[
"Apache-2.0"
] | 73
|
2015-10-18T02:57:15.000Z
|
2021-06-04T08:28:09.000Z
|
labs/stacktrain/setup.py
|
finishertech/openstack-training-labs
|
50332026a2f00aaba3788decd7b3e0c5e8b231a7
|
[
"Apache-2.0"
] | 1
|
2016-05-26T12:32:45.000Z
|
2016-05-26T12:32:45.000Z
|
labs/stacktrain/setup.py
|
finishertech/openstack-training-labs
|
50332026a2f00aaba3788decd7b3e0c5e8b231a7
|
[
"Apache-2.0"
] | 46
|
2015-11-15T14:31:27.000Z
|
2021-03-25T10:59:25.000Z
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'My Project',
'author': 'Roger Luethi',
'url': 'URL to get it at.',
'download_url': 'Where to download it.',
'author_email': 'rl@patchworkscience.org',
'version': '0.1',
'install_requires': ['pytest'],
'packages': ['NAME'],
'scripts': [],
'name': 'projectname'
}
setup(**config)
| 22.1
| 46
| 0.608597
|
df55beadbe02c86ad8c1a1874abbeb56154323a9
| 1,358
|
py
|
Python
|
examples/plotting/action_plotting.py
|
ndangtt/LeadingOnesDAC
|
953747d8702f179851d7973c65779a1f830e03a1
|
[
"Apache-2.0"
] | 11
|
2020-11-09T10:50:31.000Z
|
2022-02-19T09:23:44.000Z
|
examples/plotting/action_plotting.py
|
ndangtt/LeadingOnesDAC
|
953747d8702f179851d7973c65779a1f830e03a1
|
[
"Apache-2.0"
] | 95
|
2020-11-18T09:37:30.000Z
|
2022-02-17T10:05:33.000Z
|
examples/plotting/action_plotting.py
|
ndangtt/LeadingOnesDAC
|
953747d8702f179851d7973c65779a1f830e03a1
|
[
"Apache-2.0"
] | 11
|
2020-11-15T15:24:27.000Z
|
2022-03-14T14:51:43.000Z
|
from pathlib import Path
from dacbench.logger import load_logs, log2dataframe
from dacbench.plotting import plot_action
import matplotlib.pyplot as plt
def plot_scalar_action():
"""
Plot Sigmoid actions over time by action component and by mean action component in intervals
"""
file = Path("./data/sigmoid_example/ActionFrequencyWrapper.jsonl")
logs = load_logs(file)
dataframe = log2dataframe(logs, wide=True)
Path("output").mkdir(exist_ok=True)
grid = plot_action(dataframe, interval=18, title="Sigmoid", col="seed", col_wrap=3)
grid.savefig("output/sigmoid_example_action_interval_18.pdf")
plt.show()
grid = plot_action(dataframe, title="Sigmoid", col="seed", col_wrap=3)
grid.savefig("output/sigmoid_example_action.pdf")
plt.show()
def plot_action_modea():
"""
Plot ModEA actions over time and in intervals
"""
file = Path("data/ModeaBenchmark/ActionFrequencyWrapper.jsonl")
logs = load_logs(file)
dataframe = log2dataframe(logs, wide=True)
Path("output").mkdir(exist_ok=True)
grid = plot_action(dataframe, interval=5)
grid.savefig("output/modea_action_interval_5.pdf")
plt.show()
grid = plot_action(dataframe)
grid.savefig("output/modea_action.pdf")
plt.show()
if __name__ == "__main__":
plot_action_modea()
plot_scalar_action()
| 28.893617
| 96
| 0.717231
|
73386d0f7026f8ec8785c57b0917773d28d80e8d
| 7,757
|
py
|
Python
|
slim/prune/eval.py
|
linglanfeng/PaddleDetection
|
487e6e60b2cfe0b411bba64622ca7f8fe467f3d2
|
[
"Apache-2.0"
] | 13
|
2020-09-09T12:23:36.000Z
|
2022-03-16T09:42:07.000Z
|
slim/prune/eval.py
|
rqjtsq/PaddleDetection
|
b457850659c43fbd4a26c4fc4b70a3709b9952d4
|
[
"Apache-2.0"
] | null | null | null |
slim/prune/eval.py
|
rqjtsq/PaddleDetection
|
b457850659c43fbd4a26c4fc4b70a3709b9952d4
|
[
"Apache-2.0"
] | 5
|
2020-09-14T07:35:39.000Z
|
2021-12-22T02:03:31.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))
if parent_path not in sys.path:
sys.path.append(parent_path)
import paddle.fluid as fluid
from paddleslim.prune import Pruner
from paddleslim.analysis import flops
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.data.reader import create_reader
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.utils.cli import ArgsParser
import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def main():
"""
Main evaluate function
"""
cfg = load_config(FLAGS.config)
merge_config(FLAGS.opt)
check_config(cfg)
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
main_arch = cfg.architecture
multi_scale_test = getattr(cfg, 'MultiScaleTEST', None)
# define executor
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# build program
model = create(main_arch)
startup_prog = fluid.Program()
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
inputs_def = cfg['EvalReader']['inputs_def']
feed_vars, loader = model.build_inputs(**inputs_def)
if multi_scale_test is None:
fetches = model.eval(feed_vars)
else:
fetches = model.eval(feed_vars, multi_scale_test)
eval_prog = eval_prog.clone(True)
exe.run(startup_prog)
reader = create_reader(cfg.EvalReader)
loader.set_sample_list_generator(reader, place)
dataset = cfg['EvalReader']['dataset']
# eval already exists json file
if FLAGS.json_eval:
logger.info(
"In json_eval mode, PaddleDetection will evaluate json files in "
"output_eval directly. And proposal.json, bbox.json and mask.json "
"will be detected by default.")
json_eval_results(
cfg.metric, json_directory=FLAGS.output_eval, dataset=dataset)
return
pruned_params = FLAGS.pruned_params
assert (
FLAGS.pruned_params is not None
), "FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option."
pruned_params = FLAGS.pruned_params.strip().split(",")
logger.info("pruned params: {}".format(pruned_params))
pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(",")]
logger.info("pruned ratios: {}".format(pruned_ratios))
assert (len(pruned_params) == len(pruned_ratios)
), "The length of pruned params and pruned ratios should be equal."
assert (pruned_ratios > [0] * len(pruned_ratios) and
pruned_ratios < [1] * len(pruned_ratios)
), "The elements of pruned ratios should be in range (0, 1)."
base_flops = flops(eval_prog)
pruner = Pruner()
eval_prog, _, _ = pruner.prune(
eval_prog,
fluid.global_scope(),
params=pruned_params,
ratios=pruned_ratios,
place=place,
only_graph=False)
pruned_flops = flops(eval_prog)
logger.info("pruned FLOPS: {}".format(
float(base_flops - pruned_flops) / base_flops))
compile_program = fluid.CompiledProgram(eval_prog).with_data_parallel()
assert cfg.metric != 'OID', "eval process of OID dataset \
is not supported."
if cfg.metric == "WIDERFACE":
raise ValueError("metric type {} does not support in tools/eval.py, "
"please use tools/face_eval.py".format(cfg.metric))
assert cfg.metric in ['COCO', 'VOC'], \
"unknown metric type {}".format(cfg.metric)
extra_keys = []
if cfg.metric == 'COCO':
extra_keys = ['im_info', 'im_id', 'im_shape']
if cfg.metric == 'VOC':
extra_keys = ['gt_bbox', 'gt_class', 'is_difficult']
keys, values, cls = parse_fetches(fetches, eval_prog, extra_keys)
# whether output bbox is normalized in model output layer
is_bbox_normalized = False
if hasattr(model, 'is_bbox_normalized') and \
callable(model.is_bbox_normalized):
is_bbox_normalized = model.is_bbox_normalized()
sub_eval_prog = None
sub_keys = None
sub_values = None
# build sub-program
if 'Mask' in main_arch and multi_scale_test:
sub_eval_prog = fluid.Program()
with fluid.program_guard(sub_eval_prog, startup_prog):
with fluid.unique_name.guard():
inputs_def = cfg['EvalReader']['inputs_def']
inputs_def['mask_branch'] = True
feed_vars, eval_loader = model.build_inputs(**inputs_def)
sub_fetches = model.eval(
feed_vars, multi_scale_test, mask_branch=True)
assert cfg.metric == 'COCO'
extra_keys = ['im_id', 'im_shape']
sub_keys, sub_values, _ = parse_fetches(sub_fetches, sub_eval_prog,
extra_keys)
sub_eval_prog = sub_eval_prog.clone(True)
# load model
if 'weights' in cfg:
checkpoint.load_checkpoint(exe, eval_prog, cfg.weights)
resolution = None
if 'Mask' in cfg.architecture:
resolution = model.mask_head.resolution
results = eval_run(
exe,
compile_program,
loader,
keys,
values,
cls,
cfg,
sub_eval_prog,
sub_keys,
sub_values,
resolution=resolution)
# if map_type not set, use default 11point, only use in VOC eval
map_type = cfg.map_type if 'map_type' in cfg else '11point'
eval_results(
results,
cfg.metric,
cfg.num_classes,
resolution,
is_bbox_normalized,
FLAGS.output_eval,
map_type,
dataset=dataset)
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"--json_eval",
action='store_true',
default=False,
help="Whether to re eval with already exists bbox.json or mask.json")
parser.add_argument(
"-f",
"--output_eval",
default=None,
type=str,
help="Evaluation file directory, default is current directory.")
parser.add_argument(
"-p",
"--pruned_params",
default=None,
type=str,
help="The parameters to be pruned when calculating sensitivities.")
parser.add_argument(
"--pruned_ratios",
default=None,
type=str,
help="The ratios pruned iteratively for each parameter when calculating sensitivities."
)
FLAGS = parser.parse_args()
main()
| 33.873362
| 95
| 0.655537
|
addfa18e243fce8885d6e8087431c4827639d524
| 1,728
|
py
|
Python
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/logger_update_contract.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/logger_update_contract.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/logger_update_contract.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LoggerUpdateContract(Model):
"""Logger update contract.
:param logger_type: Logger type. Possible values include: 'azureEventHub',
'applicationInsights'
:type logger_type: str or ~azure.mgmt.apimanagement.models.LoggerType
:param description: Logger description.
:type description: str
:param credentials: Logger credentials.
:type credentials: dict[str, str]
:param is_buffered: Whether records are buffered in the logger before
publishing. Default is assumed to be true.
:type is_buffered: bool
"""
_attribute_map = {
'logger_type': {'key': 'properties.loggerType', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'credentials': {'key': 'properties.credentials', 'type': '{str}'},
'is_buffered': {'key': 'properties.isBuffered', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(LoggerUpdateContract, self).__init__(**kwargs)
self.logger_type = kwargs.get('logger_type', None)
self.description = kwargs.get('description', None)
self.credentials = kwargs.get('credentials', None)
self.is_buffered = kwargs.get('is_buffered', None)
| 40.186047
| 78
| 0.62963
|
5352a02b80e5a25b747278e4a431bce20e3094eb
| 1,973
|
py
|
Python
|
setup.py
|
boltnev/iktomi
|
bc92006c026f9b42e56f1af5ced2fe577673a486
|
[
"MIT"
] | null | null | null |
setup.py
|
boltnev/iktomi
|
bc92006c026f9b42e56f1af5ced2fe577673a486
|
[
"MIT"
] | null | null | null |
setup.py
|
boltnev/iktomi
|
bc92006c026f9b42e56f1af5ced2fe577673a486
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
from distutils.core import setup
install_requires = ['six']
extras_requires = {
'web': ['webob'],
'fcgi': ['flup6'],
'sqla': ['sqlalchemy'],
'memcached': ['python-memcached'],
'cleanhtml': ['lxml'],
'renderhtml': ['jinja2'],
'images': ['pillow'],
}
tests_requires = [
'pymysql',
'testalchemy==0.4',
'pytest',
'pytest-cov',
'mockcache==1.0.3_alpha',
'webtest',
]
if sys.version_info[0] < 3:
tests_requires.append('mock')
dependency_links = [
'https://github.com/ods/testalchemy/tarball/master#egg=testalchemy-0.4',
'https://github.com/lunant/mockcache/tarball/master#egg=mockcache-1.0.3_alpha',
]
extras_requires['tests'] = tests_requires
setup(
name='iktomi',
version='0.5.2',
packages=['iktomi',
'iktomi.utils',
'iktomi.forms',
'iktomi.web',
'iktomi.templates',
'iktomi.templates.jinja2',
'iktomi.db',
'iktomi.db.sqla',
'iktomi.cli',
'iktomi.unstable',
'iktomi.unstable.forms',
'iktomi.unstable.utils',
'iktomi.unstable.db',
'iktomi.unstable.db.sqla'],
package_dir={
'iktomi.templates.jinja2': 'iktomi/templates/jinja2',
},
package_data={
'iktomi.templates.jinja2': ['templates/*/*.html'],
},
install_requires=install_requires,
extras_require=extras_requires,
tests_require=tests_requires,
dependency_links=dependency_links,
author='Denis Otkidach',
author_email='denis.otkidach@gmail.com',
maintainer='Harut Dagesyan',
maintainer_email='yes@harutune.name',
description='A web tool: routing, forms, other useful things.',
# long_description=open('README').read(),
url='http://github.com/SmartTeleMax/iktomi/',
license='MIT',
keywords='web forms',
)
| 27.402778
| 83
| 0.582869
|
20909c5a26c3e7b77395e01c788a9d05bb8b08b8
| 392
|
py
|
Python
|
day5/day5.py
|
alexmotoc/AoC17
|
c496b1da3a5863b370e677b87b6f14c65863227c
|
[
"MIT"
] | null | null | null |
day5/day5.py
|
alexmotoc/AoC17
|
c496b1da3a5863b370e677b87b6f14c65863227c
|
[
"MIT"
] | null | null | null |
day5/day5.py
|
alexmotoc/AoC17
|
c496b1da3a5863b370e677b87b6f14c65863227c
|
[
"MIT"
] | null | null | null |
import numpy as np
input = np.loadtxt('day5_input.txt', dtype='i', delimiter='\n')
current_index = 0
steps = 0
while current_index >= 0 and current_index < len(input):
previous_index = current_index
current_index += input[current_index]
if input[previous_index] >= 3:
input[previous_index] -= 1
else:
input[previous_index] += 1
steps += 1
print(steps)
| 21.777778
| 63
| 0.665816
|
f7047db12c36770b75c15cf10d8ae3d6e3572d9e
| 1,130
|
py
|
Python
|
automatia/main.py
|
ShadowJonathan/automatia.py
|
555d473c11372b2eec14bcd7508b4593be4fba7f
|
[
"MIT"
] | 1
|
2018-01-01T21:41:46.000Z
|
2018-01-01T21:41:46.000Z
|
automatia/main.py
|
ShadowJonathan/automatia.py
|
555d473c11372b2eec14bcd7508b4593be4fba7f
|
[
"MIT"
] | null | null | null |
automatia/main.py
|
ShadowJonathan/automatia.py
|
555d473c11372b2eec14bcd7508b4593be4fba7f
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import sys
CLI = False
DEBUG = False
AUTO = False
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def setdebug(state):
global DEBUG
DEBUG = state
Debug("Python:")
for p in sys.version.split("\n"):
Debug(p)
def setauto(state):
global AUTO
AUTO = state
def isauto():
global AUTO
return AUTO
def setcli():
global CLI
CLI = True
def Warn(*message):
"""
:param Any message:
"""
print("[WARN] " + " ".join([str(m) for m in message]))
def Inform(*message):
"""
:param Any message:
"""
print("[AUTOMATIA] " + " ".join([str(m) for m in message]))
def Debug(*message):
global DEBUG
if DEBUG:
Inform("[D] " + " ".join([str(m) for m in message]))
def Error(*message):
"""
:param Any message:
"""
eprint("[ERROR] " + " ".join([str(m) for m in message]))
class FinishFinal(Exception):
pass
class FinishNow(FinishFinal):
pass
class FinishResult(Exception):
def __init__(self, URL, m=None):
self.URL = URL
self.m = m
| 14.868421
| 63
| 0.576991
|
f3eedcdfe5af58df24078cf851ebf7f2a9831040
| 4,062
|
py
|
Python
|
examples/cqc/python/hello_noLib/aliceTest.py
|
bvdvecht/SimulaQron
|
d190ebf5c4683db5b940efa55420bbef5eecf17a
|
[
"BSD-3-Clause"
] | null | null | null |
examples/cqc/python/hello_noLib/aliceTest.py
|
bvdvecht/SimulaQron
|
d190ebf5c4683db5b940efa55420bbef5eecf17a
|
[
"BSD-3-Clause"
] | null | null | null |
examples/cqc/python/hello_noLib/aliceTest.py
|
bvdvecht/SimulaQron
|
d190ebf5c4683db5b940efa55420bbef5eecf17a
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2017, Stephanie Wehner and Axel Dahlberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Stephanie Wehner, QuTech.
# 4. Neither the name of the QuTech organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import socket
from SimulaQron.general.hostConfig import networkConfig
from SimulaQron.cqc.backend.cqcHeader import CQCHeader, CQC_TP_HELLO, CQC_VERSION
#####################################################################################################
#
# init
#
def init(name, cqcFile=None):
"""
Initialize a connection to the cqc server with the name given as input.
A path to a configure file for the cqc network can be given,
if it's not given the config file '$NETSIM/config/cqcNodes.cfg' will be used.
Returns a socket object.
"""
# This file defines the network of CQC servers interfacing to virtual quantum nodes
if cqcFile is None:
cqcFile = os.environ.get("NETSIM") + "/config/cqcNodes.cfg"
# Read configuration files for the cqc network
cqcNet = networkConfig(cqcFile)
# Host data
if name in cqcNet.hostDict:
myHost = cqcNet.hostDict[name]
else:
logging.error("The name '%s' is not in the cqc network.", name)
raise LookupError("The name '%s' is not in the cqc network.".format(name))
addr = myHost.addr
# Connect to cqc server and run protocol
cqc = None
try:
cqc = socket.socket(addr[0], addr[1], addr[2])
except socket.error:
logging.error("Could not connect to cqc server: %s", name)
try:
cqc.connect(addr[4])
except socket.error:
cqc.close()
logging.error("Could not connect to cqc server: %s", name)
return cqc
#####################################################################################################
#
# main
#
def main():
# In this example, we are Alice.
myName = "Alice"
# Initialize the connection
cqc = init(myName)
# Send Hello message
hdr = CQCHeader()
hdr.setVals(CQC_VERSION, CQC_TP_HELLO, 0, 0)
msg = hdr.pack()
cqc.send(msg)
# Receive return message
data = cqc.recv(192)
hdr = CQCHeader(data)
if hdr.tp == CQC_TP_HELLO:
print("CQC tells App {}: 'HELLO'".format(myName))
else:
print("Did not receive a hello message, but rather: {}".format(hdr.printable()))
# Close the connection
cqc.close()
##################################################################################################
main()
| 36.267857
| 101
| 0.657558
|
75a7e11f4f6e0dcc5288661aaa8cbfea928c7941
| 3,013
|
py
|
Python
|
src/compas_view2/objects/textobject.py
|
BlockResearchGroup/compas_view2
|
75ec0640d5c1c23c2d6e52b3403fd44a30967c6f
|
[
"MIT"
] | null | null | null |
src/compas_view2/objects/textobject.py
|
BlockResearchGroup/compas_view2
|
75ec0640d5c1c23c2d6e52b3403fd44a30967c6f
|
[
"MIT"
] | null | null | null |
src/compas_view2/objects/textobject.py
|
BlockResearchGroup/compas_view2
|
75ec0640d5c1c23c2d6e52b3403fd44a30967c6f
|
[
"MIT"
] | null | null | null |
from .object import Object
from compas_view2 import HOME
from OpenGL import GL
from ..buffers import make_index_buffer, make_vertex_buffer
import freetype as ft
import numpy as np
import os
class TextObject(Object):
"""Object for displaying text sprites."""
def __init__(self, data, name=None, color=None, height=10, opacity=1, font="FreeSans.ttf"):
super().__init__(data, name=name)
self.color = color or [0, 0, 0]
self.characters = []
self.buffers = []
self.height = height
self.opacity = opacity
self.font = font
def init(self):
self.make_buffers()
def make_buffers(self):
self._text_buffer = {
'positions': make_vertex_buffer(self._data.position),
'elements': make_index_buffer([0]),
'text_texture': self.make_text_texture(),
'n': 1
}
def make_text_texture(self):
# change the filename if necessary
face = ft.Face(os.path.join(HOME, "fonts", self.font))
char_width = 48
char_height = 80
# the size is specified in 1/64 pixel
face.set_char_size(64*char_width)
text = self._data.text
string_buffer = np.zeros(shape=(char_height, char_width*len(text)))
for i, c in enumerate(text):
if c == " ":
continue
face.load_char(c, ft.FT_LOAD_FLAGS['FT_LOAD_RENDER'])
glyph = face.glyph
bitmap = glyph.bitmap
char = np.array(bitmap.buffer)
char = char.reshape((bitmap.rows, bitmap.width))
string_buffer[-char.shape[0]:, i*char_width: i*char_width+char.shape[1]] = char
string_buffer = string_buffer.reshape((string_buffer.shape[0]*string_buffer.shape[1]))
# create glyph texture
texture = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_R8, char_width*len(text), char_height, 0, GL.GL_RED, GL.GL_UNSIGNED_BYTE, string_buffer)
return texture
def draw(self, shader):
"""Draw the object from its buffers"""
shader.enable_attribute('position')
shader.uniform4x4('transform', self.matrix)
shader.uniform1f('object_opacity', self.opacity)
shader.uniform1i('text_height', self._data.height)
shader.uniform1i('text_num', len(self._data.text))
shader.uniform3f('text_color', self.color)
shader.uniformTex("text_texture", self._text_buffer['text_texture'])
shader.bind_attribute('position', self._text_buffer['positions'])
shader.draw_texts(elements=self._text_buffer['elements'], n=self._text_buffer['n'])
shader.uniform1i('is_text', 0)
shader.uniform1f('object_opacity', 1)
shader.disable_attribute('position')
| 37.197531
| 139
| 0.645204
|
fc9f2ab4e668712c44d5a107c1cfd4386f8d3b58
| 6,518
|
py
|
Python
|
tools/linter/clang_tidy/__main__.py
|
edward-io/pytorch
|
04caef8e1d4f951cc380d6cebb9967b71695de13
|
[
"Intel"
] | 5
|
2021-08-17T17:44:20.000Z
|
2021-08-21T05:03:42.000Z
|
tools/linter/clang_tidy/__main__.py
|
edward-io/pytorch
|
04caef8e1d4f951cc380d6cebb9967b71695de13
|
[
"Intel"
] | 1
|
2021-09-03T09:35:27.000Z
|
2021-09-03T09:35:27.000Z
|
tools/linter/clang_tidy/__main__.py
|
edward-io/pytorch
|
04caef8e1d4f951cc380d6cebb9967b71695de13
|
[
"Intel"
] | null | null | null |
import argparse
import pathlib
import os
import shutil
import subprocess
import re
import sys
from typing import List
from tools.linter.clang_tidy.run import run
from tools.linter.clang_tidy.generate_build_files import generate_build_files
from tools.linter.install.clang_tidy import INSTALLATION_PATH
def clang_search_dirs() -> List[str]:
# Compilers are ordered based on fallback preference
# We pick the first one that is available on the system
compilers = ["clang", "gcc", "cpp", "cc"]
compilers = [c for c in compilers if shutil.which(c) is not None]
if len(compilers) == 0:
raise RuntimeError(f"None of {compilers} were found")
compiler = compilers[0]
result = subprocess.run(
[compiler, "-E", "-x", "c++", "-", "-v"],
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
stderr = result.stderr.decode().strip().split("\n")
search_start = r"#include.*search starts here:"
search_end = r"End of search list."
append_path = False
search_paths = []
for line in stderr:
if re.match(search_start, line):
if append_path:
continue
else:
append_path = True
elif re.match(search_end, line):
break
elif append_path:
search_paths.append(line.strip())
return search_paths
DEFAULTS = {
"glob": [
# The negative filters below are to exclude files that include onnx_pb.h or
# caffe2_pb.h, otherwise we'd have to build protos as part of this CI job.
# FunctionsManual.cpp is excluded to keep this diff clean. It will be fixed
# in a follow up PR.
# /torch/csrc/generic/*.cpp is excluded because those files aren't actually built.
# deploy/interpreter files are excluded due to using macros and other techniquies
# that are not easily converted to accepted c++
"-torch/csrc/jit/passes/onnx/helper.cpp",
"-torch/csrc/jit/passes/onnx/shape_type_inference.cpp",
"-torch/csrc/jit/serialization/onnx.cpp",
"-torch/csrc/jit/serialization/export.cpp",
"-torch/csrc/jit/serialization/import.cpp",
"-torch/csrc/jit/serialization/import_legacy.cpp",
"-torch/csrc/onnx/init.cpp",
"-torch/csrc/cuda/nccl.*",
"-torch/csrc/cuda/python_nccl.cpp",
"-torch/csrc/autograd/FunctionsManual.cpp",
"-torch/csrc/generic/*.cpp",
"-torch/csrc/jit/codegen/cuda/runtime/*",
"-torch/csrc/deploy/interpreter/interpreter.cpp",
"-torch/csrc/deploy/interpreter/interpreter.h",
"-torch/csrc/deploy/interpreter/interpreter_impl.h",
"-torch/csrc/deploy/interpreter/test_main.cpp",
],
"paths": ["torch/csrc/"],
"include-dir": ["/usr/lib/llvm-11/include/openmp"] + clang_search_dirs(),
"clang-tidy-exe": INSTALLATION_PATH,
"compile-commands-dir": "build",
"config-file": ".clang-tidy",
"disable-progress-bar": False,
}
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="clang-tidy wrapper script")
parser.add_argument(
"-e",
"--clang-tidy-exe",
default=DEFAULTS["clang-tidy-exe"],
help="Path to clang-tidy executable",
)
parser.add_argument(
"-g",
"--glob",
action="append",
default=DEFAULTS["glob"],
help="Only lint files that match these glob patterns "
"(see documentation for `fnmatch` for supported syntax)."
"If a pattern starts with a - the search is negated for that pattern.",
)
parser.add_argument(
"-x",
"--regex",
action="append",
default=[],
help="Only lint files that match these regular expressions (from the start of the filename). "
"If a pattern starts with a - the search is negated for that pattern.",
)
parser.add_argument(
"-c",
"--compile-commands-dir",
default=DEFAULTS["compile-commands-dir"],
help="Path to the folder containing compile_commands.json",
)
parser.add_argument(
"--diff-file",
help="File containing diff to use for determining files to lint and line filters",
)
parser.add_argument(
"-p",
"--paths",
nargs="+",
default=DEFAULTS["paths"],
help="Lint only the given paths (recursively)",
)
parser.add_argument(
"-n",
"--dry-run",
action="store_true",
help="Only show the command to be executed, without running it",
)
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
parser.add_argument("-q", "--quiet", action="store_true", help="Don't print output")
parser.add_argument(
"--config-file",
default=DEFAULTS["config-file"],
help="Path to a clang-tidy config file. Defaults to '.clang-tidy'.",
)
parser.add_argument(
"--print-include-paths",
action="store_true",
help="Print the search paths used for include directives",
)
parser.add_argument(
"-I",
"--include-dir",
action="append",
default=DEFAULTS["include-dir"],
help="Add the specified directory to the search path for include files",
)
parser.add_argument(
"-s",
"--suppress-diagnostics",
action="store_true",
help="Add NOLINT to suppress clang-tidy violations",
)
parser.add_argument(
"--disable-progress-bar",
action="store_true",
default=DEFAULTS["disable-progress-bar"],
help="Disable the progress bar",
)
parser.add_argument(
"extra_args", nargs="*", help="Extra arguments to forward to clang-tidy"
)
return parser.parse_args()
def main() -> None:
options = parse_args()
if not pathlib.Path("build").exists():
generate_build_files()
# Check if clang-tidy executable exists
exists = os.access(options.clang_tidy_exe, os.X_OK)
if not exists:
msg = (
f"Could not find '{options.clang_tidy_exe}'\n"
+ "We provide a custom build of clang-tidy that has additional checks.\n"
+ "You can install it by running:\n"
+ "$ python3 tools/linter/install/clang_tidy.py"
)
raise RuntimeError(msg)
result, _ = run(options)
sys.exit(result.returncode)
if __name__ == "__main__":
main()
| 33.255102
| 102
| 0.619822
|
9d3b94558c8815d5910a35cc2c5a4e837a35d7ea
| 7,325
|
py
|
Python
|
samples/openapi3/client/petstore/python-experimental/petstore_api/models/__init__.py
|
LeComptoirDesPharmacies/openapi-generator
|
a4325ec5205b68298fcc66b9e85927aa64d840ff
|
[
"Apache-2.0"
] | null | null | null |
samples/openapi3/client/petstore/python-experimental/petstore_api/models/__init__.py
|
LeComptoirDesPharmacies/openapi-generator
|
a4325ec5205b68298fcc66b9e85927aa64d840ff
|
[
"Apache-2.0"
] | 1
|
2022-03-31T10:08:39.000Z
|
2022-03-31T10:08:39.000Z
|
samples/openapi3/client/petstore/python-experimental/petstore_api/models/__init__.py
|
LeComptoirDesPharmacies/openapi-generator
|
a4325ec5205b68298fcc66b9e85927aa64d840ff
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from petstore_api.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from petstore_api.model.additional_properties_class import AdditionalPropertiesClass
from petstore_api.model.additional_properties_with_array_of_enums import AdditionalPropertiesWithArrayOfEnums
from petstore_api.model.address import Address
from petstore_api.model.animal import Animal
from petstore_api.model.animal_farm import AnimalFarm
from petstore_api.model.api_response import ApiResponse
from petstore_api.model.apple import Apple
from petstore_api.model.apple_req import AppleReq
from petstore_api.model.array_holding_any_type import ArrayHoldingAnyType
from petstore_api.model.array_of_array_of_number_only import ArrayOfArrayOfNumberOnly
from petstore_api.model.array_of_enums import ArrayOfEnums
from petstore_api.model.array_of_number_only import ArrayOfNumberOnly
from petstore_api.model.array_test import ArrayTest
from petstore_api.model.array_with_validations_in_items import ArrayWithValidationsInItems
from petstore_api.model.banana import Banana
from petstore_api.model.banana_req import BananaReq
from petstore_api.model.bar import Bar
from petstore_api.model.basque_pig import BasquePig
from petstore_api.model.boolean import Boolean
from petstore_api.model.boolean_enum import BooleanEnum
from petstore_api.model.capitalization import Capitalization
from petstore_api.model.cat import Cat
from petstore_api.model.cat_all_of import CatAllOf
from petstore_api.model.category import Category
from petstore_api.model.child_cat import ChildCat
from petstore_api.model.child_cat_all_of import ChildCatAllOf
from petstore_api.model.class_model import ClassModel
from petstore_api.model.client import Client
from petstore_api.model.complex_quadrilateral import ComplexQuadrilateral
from petstore_api.model.complex_quadrilateral_all_of import ComplexQuadrilateralAllOf
from petstore_api.model.composed_any_of_different_types_no_validations import ComposedAnyOfDifferentTypesNoValidations
from petstore_api.model.composed_array import ComposedArray
from petstore_api.model.composed_bool import ComposedBool
from petstore_api.model.composed_none import ComposedNone
from petstore_api.model.composed_number import ComposedNumber
from petstore_api.model.composed_object import ComposedObject
from petstore_api.model.composed_one_of_different_types import ComposedOneOfDifferentTypes
from petstore_api.model.composed_string import ComposedString
from petstore_api.model.danish_pig import DanishPig
from petstore_api.model.date_time_test import DateTimeTest
from petstore_api.model.date_time_with_validations import DateTimeWithValidations
from petstore_api.model.date_with_validations import DateWithValidations
from petstore_api.model.dog import Dog
from petstore_api.model.dog_all_of import DogAllOf
from petstore_api.model.drawing import Drawing
from petstore_api.model.enum_arrays import EnumArrays
from petstore_api.model.enum_class import EnumClass
from petstore_api.model.enum_test import EnumTest
from petstore_api.model.equilateral_triangle import EquilateralTriangle
from petstore_api.model.equilateral_triangle_all_of import EquilateralTriangleAllOf
from petstore_api.model.file import File
from petstore_api.model.file_schema_test_class import FileSchemaTestClass
from petstore_api.model.foo import Foo
from petstore_api.model.format_test import FormatTest
from petstore_api.model.fruit import Fruit
from petstore_api.model.fruit_req import FruitReq
from petstore_api.model.gm_fruit import GmFruit
from petstore_api.model.grandparent_animal import GrandparentAnimal
from petstore_api.model.has_only_read_only import HasOnlyReadOnly
from petstore_api.model.health_check_result import HealthCheckResult
from petstore_api.model.inline_response_default import InlineResponseDefault
from petstore_api.model.integer_enum import IntegerEnum
from petstore_api.model.integer_enum_big import IntegerEnumBig
from petstore_api.model.integer_enum_one_value import IntegerEnumOneValue
from petstore_api.model.integer_enum_with_default_value import IntegerEnumWithDefaultValue
from petstore_api.model.integer_max10 import IntegerMax10
from petstore_api.model.integer_min15 import IntegerMin15
from petstore_api.model.isosceles_triangle import IsoscelesTriangle
from petstore_api.model.isosceles_triangle_all_of import IsoscelesTriangleAllOf
from petstore_api.model.mammal import Mammal
from petstore_api.model.map_test import MapTest
from petstore_api.model.mixed_properties_and_additional_properties_class import MixedPropertiesAndAdditionalPropertiesClass
from petstore_api.model.model200_response import Model200Response
from petstore_api.model.model_return import ModelReturn
from petstore_api.model.name import Name
from petstore_api.model.no_additional_properties import NoAdditionalProperties
from petstore_api.model.nullable_class import NullableClass
from petstore_api.model.nullable_shape import NullableShape
from petstore_api.model.nullable_string import NullableString
from petstore_api.model.number import Number
from petstore_api.model.number_only import NumberOnly
from petstore_api.model.number_with_validations import NumberWithValidations
from petstore_api.model.object_interface import ObjectInterface
from petstore_api.model.object_model_with_ref_props import ObjectModelWithRefProps
from petstore_api.model.object_with_difficultly_named_props import ObjectWithDifficultlyNamedProps
from petstore_api.model.object_with_validations import ObjectWithValidations
from petstore_api.model.order import Order
from petstore_api.model.parent_pet import ParentPet
from petstore_api.model.pet import Pet
from petstore_api.model.pig import Pig
from petstore_api.model.player import Player
from petstore_api.model.quadrilateral import Quadrilateral
from petstore_api.model.quadrilateral_interface import QuadrilateralInterface
from petstore_api.model.read_only_first import ReadOnlyFirst
from petstore_api.model.scalene_triangle import ScaleneTriangle
from petstore_api.model.scalene_triangle_all_of import ScaleneTriangleAllOf
from petstore_api.model.shape import Shape
from petstore_api.model.shape_or_null import ShapeOrNull
from petstore_api.model.simple_quadrilateral import SimpleQuadrilateral
from petstore_api.model.simple_quadrilateral_all_of import SimpleQuadrilateralAllOf
from petstore_api.model.some_object import SomeObject
from petstore_api.model.special_model_name import SpecialModelName
from petstore_api.model.string import String
from petstore_api.model.string_boolean_map import StringBooleanMap
from petstore_api.model.string_enum import StringEnum
from petstore_api.model.string_enum_with_default_value import StringEnumWithDefaultValue
from petstore_api.model.string_with_validation import StringWithValidation
from petstore_api.model.tag import Tag
from petstore_api.model.triangle import Triangle
from petstore_api.model.triangle_interface import TriangleInterface
from petstore_api.model.user import User
from petstore_api.model.whale import Whale
from petstore_api.model.zebra import Zebra
| 57.677165
| 123
| 0.894608
|
149265cf92bc9f9ed3248205d608faf6611b538b
| 865
|
py
|
Python
|
bridgedb/_langs.py
|
jugheadjones10/bridgedb
|
94d6bca4b22458c156898785d8f6ccedf562d884
|
[
"BSD-3-Clause-Clear"
] | 1
|
2016-09-21T12:55:21.000Z
|
2016-09-21T12:55:21.000Z
|
bridgedb/_langs.py
|
jugheadjones10/bridgedb
|
94d6bca4b22458c156898785d8f6ccedf562d884
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
bridgedb/_langs.py
|
jugheadjones10/bridgedb
|
94d6bca4b22458c156898785d8f6ccedf562d884
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-clause BSD, see included LICENSE for information
"""_langs.py - Storage for information on installed language support."""
def get_langs():
"""Return a list of two-letter country codes of translations which were
installed (if we've already been installed).
"""
return supported
#: This list will be rewritten by :func:`get_supported_langs` in setup.py at
#: install time, so that the :attr:`bridgedb.__langs__` will hold a list of
#: two-letter country codes for languages which were installed.
supported = []
| 34.6
| 76
| 0.708671
|
6b00ca5491880879563b709dc07d1ac93996a91e
| 5,412
|
py
|
Python
|
handlers/checkers/type.py
|
n0s0r0g/perfect_OSM
|
d07fef525865a337d8d9bd3e8168cf6b411a182b
|
[
"MIT"
] | 4
|
2016-04-03T21:12:57.000Z
|
2016-05-04T09:14:43.000Z
|
handlers/checkers/type.py
|
n0s0r0g/perfect_OSM
|
d07fef525865a337d8d9bd3e8168cf6b411a182b
|
[
"MIT"
] | null | null | null |
handlers/checkers/type.py
|
n0s0r0g/perfect_OSM
|
d07fef525865a337d8d9bd3e8168cf6b411a182b
|
[
"MIT"
] | null | null | null |
from handlers.simplehandler import SimpleHandler
POINT = 'p'
LINE = 'l'
AREA = 'a'
RELATION = 'r'
_BAD_TYPE = {
'title': 'Некорректный тип элемента',
'help_text': 'Для данного тега, либо пары тег=значение в Wiki указаны другие типы данных (точка, линия, '
'полигон/мультиполигон/отношение).',
}
_TAGS = {
'building': {POINT, AREA}, # TODO: wiki doesn't allow POINT on building=*; tracked in todo/building/is_node
'landuse': {POINT, AREA}, # TODO: wiki doesn't allow POINT on industrial=*
'entrance': {POINT},
'amenity': {POINT, LINE, AREA, RELATION}, # TODO: wiki doesn't allow LINE on amenity=*
}
_TAG_VALUES = {
'highway': {
'motorway': {LINE},
'trunk': {LINE},
'primary': {LINE},
'secondary': {LINE},
'tertiary': {LINE},
'unclassified': {LINE},
'residential': {LINE},
'service': {LINE, AREA},
'motorway_link': {LINE},
'trunk_link': {LINE},
'primary_link': {LINE},
'secondary_link': {LINE},
'tertiary_link': {LINE},
'living_street': {LINE},
'pedestrian': {LINE, AREA},
'track': {LINE},
'bus_guideway': {LINE},
'raceway': {LINE},
'road': {LINE},
'footway': {LINE},
'bridleway': {LINE},
'steps': {LINE},
'path': {LINE},
'cycleway': {LINE},
'proposed': {LINE},
'construction': {LINE},
'bus_stop': {POINT},
'crossing': {POINT},
'elevator': {POINT},
'emergency_access_point': {POINT},
'escape': {POINT},
'give_way': {POINT},
'mini_roundabout': {POINT},
'motorway_junction': {POINT},
'passing_place': {POINT},
'rest_area': {POINT, AREA},
'speed_camera': {POINT},
'street_lamp': {POINT},
'services': {POINT, AREA},
'stop': {POINT},
'traffic_signals': {POINT},
'turning_circle': {POINT},
},
'natural': {
'wood': {POINT, AREA},
'tree_row': {LINE},
'tree': {POINT},
'scrub': {POINT, AREA},
'heath': {POINT, AREA},
'moor': {POINT, AREA},
'grassland': {AREA},
'fell': {POINT, AREA},
'bare_rock': {AREA},
'scree': {POINT, AREA},
'shingle': {POINT, AREA},
'sand': {POINT, AREA},
'mud': {POINT, AREA},
'water': {POINT, AREA},
'wetland': {POINT, AREA},
'glacier': {POINT, AREA},
'bay': {POINT, AREA},
'beach': {POINT, AREA},
'coastline': {LINE},
'spring': {POINT},
'hot_spring': {POINT},
'geyser': {POINT},
'peak': {POINT},
'volcano': {POINT},
'valley': {POINT, LINE},
'river_terrace': {POINT, LINE},
'ridge': {LINE},
'arete': {LINE},
'cliff': {POINT, LINE, AREA},
'saddle': {POINT},
'rock': {POINT, AREA},
'stone': {POINT},
'sinkhole': {POINT, AREA},
'cave_entrance': {POINT},
},
'waterway': {
'river': {LINE, RELATION}, # TODO: relation: type=waterway - document in Wiki
'riverbank': {AREA},
'stream': {LINE, RELATION}, # TODO: relation: type=waterway - document in Wiki
'canal': {LINE, RELATION}, # TODO: relation: type=waterway - document in Wiki
'drain': {LINE, RELATION}, # TODO: relation: type=waterway - document in Wiki
'ditch': {LINE, RELATION}, # TODO: relation: type=waterway - document in Wiki
'dock': {POINT, AREA},
'boatyard': {POINT, AREA},
'dam': {LINE, AREA},
'weir': {POINT, LINE},
'waterfall': {POINT},
'lock_gate': {POINT},
'turning_point': {POINT},
'water_point': {POINT},
}
}
def _is_point(obj):
return obj['@type'] == 'node'
def _is_line(obj):
if obj['@type'] == 'way':
return True
return False
def _is_area(obj):
if obj['@type'] == 'way':
return obj['@nodes'][0] == obj['@nodes'][-1]
elif obj['@type'] == 'relation':
if obj.get('type') == 'multipolygon':
return True
return False
def _is_relation(obj):
if obj['@type'] == 'relation':
return True
return False
class TypeChecker(SimpleHandler):
def __init__(self):
self._bad_type = []
def process(self, obj):
allowed_type = {POINT, LINE, AREA, RELATION}
for k, v in obj.items():
if k.startswith('@'):
continue
if k in _TAGS:
allowed_type = allowed_type.intersection(_TAGS[k])
if k in _TAG_VALUES and v in _TAG_VALUES[k]:
allowed_type = allowed_type.intersection(_TAG_VALUES[k][v])
valid = False
if POINT in allowed_type and _is_point(obj):
valid = True
if not valid and LINE in allowed_type and _is_line(obj):
valid = True
if not valid and AREA in allowed_type and _is_area(obj):
valid = True
if not valid and RELATION in allowed_type and _is_relation(obj):
valid = True
if not valid:
self._bad_type.append((obj['@type'], obj['@id']))
def finish(self, issues):
issues.add_issue_type('warnings/bad_type', _BAD_TYPE)
for obj_type, obj_id in self._bad_type:
issues.add_issue_obj('warnings/bad_type', obj_type, obj_id)
| 30.576271
| 111
| 0.529379
|
c09dab34be1e9280cbdda16b5f871b896d82f986
| 24,192
|
py
|
Python
|
tests/python/relay/test_json_runtime.py
|
janifer112x/incubator-tvm
|
98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6
|
[
"Apache-2.0"
] | 22
|
2022-03-18T07:29:31.000Z
|
2022-03-23T14:54:32.000Z
|
tests/python/relay/test_json_runtime.py
|
janifer112x/incubator-tvm
|
98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6
|
[
"Apache-2.0"
] | null | null | null |
tests/python/relay/test_json_runtime.py
|
janifer112x/incubator-tvm
|
98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6
|
[
"Apache-2.0"
] | 5
|
2018-03-27T01:02:13.000Z
|
2020-12-29T00:32:31.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for JSON codegen and runtime."""
import os
import sys
import numpy as np
import tvm
import tvm.relay.op as reg
import tvm.relay.testing
from tvm import relay, runtime
from tvm.contrib import util
from tvm.relay import transform
from tvm.relay.backend import compile_engine
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.contrib.register import get_pattern_table
def set_func_attr(func, compile_name, symbol_name):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compile_name)
func = func.with_attr("global_symbol", symbol_name)
return func
def check_result(
mod, ref_mod, map_inputs, out_shape, tol=1e-5, target="llvm", ctx=tvm.cpu(), params=None
):
if sys.platform == "win32":
print("Skip test on Windows for now")
return
# Run the reference result
compile_engine.get().clear()
with tvm.transform.PassContext(opt_level=3):
json, lib, param = relay.build(ref_mod, target=target, params=params)
rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out = tvm.nd.empty(out_shape, ctx=ctx)
out = rt_mod.get_output(0, out)
ref_result = out.asnumpy()
def check_vm_result():
compile_engine.get().clear()
with relay.build_config(opt_level=3):
exe = relay.vm.compile(mod, target=target, params=params)
code, lib = exe.save()
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, ctx)
out = vm.run(**map_inputs)
tvm.testing.assert_allclose(out.asnumpy(), ref_result, rtol=tol, atol=tol)
def check_graph_runtime_result():
compile_engine.get().clear()
with relay.build_config(opt_level=3):
json, lib, param = relay.build(mod, target=target, params=params)
rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out = tvm.nd.empty(out_shape, ctx=ctx)
out = rt_mod.get_output(0, out)
tvm.testing.assert_allclose(out.asnumpy(), ref_result, rtol=tol, atol=tol)
check_vm_result()
check_graph_runtime_result()
def test_conv2d():
"""Test a subgraph with a single conv2d operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
def conv2d_direct():
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=w1shape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1))
func = relay.Function([data0, weight0], out)
func = set_func_attr(func, "dnnl", "dnnl_0")
glb_var = relay.GlobalVar("dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=(ishape), dtype=dtype)
weight = relay.var("weight", shape=(w1shape), dtype=dtype)
main_f = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=w1shape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1))
main_f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data}, (1, 32, 14, 14)
def group_conv2d():
dtype = "float32"
ishape = (1, 32, 14, 14)
w2shape = (32, 1, 3, 3)
data0 = relay.var("data", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight", shape=(w2shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=32)
func = relay.Function([data0, weight0], out)
func = set_func_attr(func, "dnnl", "dnnl_0")
glb_var = relay.GlobalVar("dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=(ishape), dtype=dtype)
weight = relay.var("weight", shape=(w2shape), dtype=dtype)
main_f = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight", shape=(w2shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=32)
main_f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w2shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w_data}, (1, 32, 14, 14)
for mod, ref_mod, map_inputs, out_shape in [conv2d_direct(), group_conv2d()]:
check_result(mod, ref_mod, map_inputs, out_shape, tol=1e-5)
def test_add():
"""Test a subgraph with a single add operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (10, 10)
def gen_add():
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.add(data0, data1)
func = relay.Function([data0, data1], out)
func = set_func_attr(func, "dnnl", "dnnl_0")
glb_var = relay.GlobalVar("dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
main_f = relay.Function([data0, data1], glb_var(data0, data1))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.add(data0, data1)
main_f = relay.Function([data0, data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_add()
data0 = np.random.uniform(0, 1, shape).astype(dtype)
data1 = np.random.uniform(0, 1, shape).astype(dtype)
check_result(mod, ref_mod, {"data0": data0, "data1": data1}, shape, tol=1e-5)
def test_relu():
"""Test a subgraph with a single ReLU operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (1, 32, 14, 14)
def gen_relu():
data0 = relay.var("data0", shape=shape, dtype=dtype)
out = relay.nn.relu(data0)
func = relay.Function([data0], out)
func = set_func_attr(func, "dnnl", "dnnl_0")
glb_var = relay.GlobalVar("dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
main_f = relay.Function([data0], glb_var(data0))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
out = relay.nn.relu(data0)
main_f = relay.Function([data0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_relu()
data0 = np.random.uniform(-1, 1, shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data0": data0,
},
(1, 32, 14, 14),
tol=1e-5,
)
def test_dense():
"""Test a subgraph with a single dense operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
a_shape = (1, 512)
b_shape = (1024, 512)
def gen_dense():
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
out = relay.nn.dense(a, b)
func = relay.Function([a, b], out)
func = set_func_attr(func, "dnnl", "dnnl_0")
glb_var = relay.GlobalVar("dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
main_f = relay.Function([a, b], glb_var(a, b))
mod["main"] = main_f
mod = transform.InferType()(mod)
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
out = relay.nn.dense(a, b)
main_f = relay.Function([a, b], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_dense()
data_a = np.random.uniform(0, 1, a_shape).astype(dtype)
data_b = np.random.uniform(0, 1, b_shape).astype(dtype)
check_result(mod, ref_mod, {"A": data_a, "B": data_b}, (1, 1024), tol=1e-5)
def test_bn():
"""Test a subgraph with a single batch_norm operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
d_shape = (1, 8)
c_shape = (8,)
def gen_bn():
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
bn = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var)
out = bn[0]
func = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
func = set_func_attr(func, "dnnl", "dnnl_0")
glb_var = relay.GlobalVar("dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
main_f = relay.Function(
[data, gamma, beta, moving_mean, moving_var],
glb_var(data, gamma, beta, moving_mean, moving_var),
)
mod["main"] = main_f
mod = transform.InferType()(mod)
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
bn = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var)
out = bn[0]
main_f = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_bn()
data = np.random.uniform(-1, 1, d_shape).astype(dtype)
gamma = np.random.uniform(-1, 1, c_shape).astype(dtype)
beta = np.random.uniform(-1, 1, c_shape).astype(dtype)
moving_mean = np.random.uniform(-1, 1, c_shape).astype(dtype)
moving_var = np.random.uniform(-1, 1, c_shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data": data,
"gamma": gamma,
"beta": beta,
"moving_mean": moving_mean,
"moving_var": moving_var,
},
d_shape,
tol=1e-5,
)
def test_multiple_ops():
"""Test a subgraph with multiple operators."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
w2shape = (64, 32, 5, 5)
def get_net():
data = relay.var("data", relay.TensorType(ishape, dtype))
w1 = relay.var("w1", relay.TensorType(w1shape, dtype))
w2 = relay.var("w2", relay.TensorType(w2shape, dtype))
layer = relay.nn.conv2d(data=data, weight=w1, kernel_size=(3, 3), padding=(1, 1))
layer = relay.nn.relu(layer)
layer = relay.nn.conv2d(data=layer, weight=w2, kernel_size=(5, 5), padding=(2, 2))
layer = relay.nn.relu(layer)
main_f = relay.Function([data, w1, w2], layer)
mod = tvm.IRModule()
mod["main"] = main_f
return mod
def get_partitoned_mod(mod):
remove_bn_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
byoc_pass = tvm.transform.Sequential(
[
remove_bn_pass,
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
return byoc_pass(mod)
ref_mod = get_net()
mod = get_partitoned_mod(ref_mod)
data = np.random.uniform(0, 1, ishape).astype(dtype)
w1 = np.random.uniform(0, 1, w1shape).astype(dtype)
w2 = np.random.uniform(0, 1, w2shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data": data,
"w1": w1,
"w2": w2,
},
(1, 64, 14, 14),
tol=1e-5,
)
def test_composite():
"""Test DNNL patterns and there composite functions."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
def conv2d_relu():
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
# Composite function
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=w1shape, dtype=dtype)
conv2d = relay.nn.conv2d(in_1, in_2, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
func = relay.Function([in_1, in_2], relu)
func = func.with_attr("Composite", "dnnl.conv2d_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_nn.relu_")
# Partition function
arg_1 = relay.var("arg_1", shape=ishape, dtype=dtype)
arg_2 = relay.var("arg_2", shape=w1shape, dtype=dtype)
call = relay.Call(func, [arg_1, arg_2])
p_func = relay.Function([arg_1, arg_2], call)
p_func = set_func_attr(p_func, "dnnl", "dnnl_0")
glb_var = relay.GlobalVar("dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = p_func
mod = transform.InferType()(mod)
# Main function
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
main_func = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_func
mod = transform.InferType()(mod)
# Reference module
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
main_func = relay.Function([data, weight], relu)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_func
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data}, (1, 32, 14, 14)
def conv2d_bias_relu():
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
bshape = (32, 1, 1)
# Composite function
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=w1shape, dtype=dtype)
in_3 = relay.var("in_3", shape=bshape, dtype=dtype)
conv2d = relay.nn.conv2d(in_1, in_2, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d, in_3)
relu = relay.nn.relu(add)
func = relay.Function([in_1, in_2, in_3], relu)
func = func.with_attr("Composite", "dnnl.conv2d_bias_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_add_nn.relu_")
# Partition function
arg_1 = relay.var("arg_1", shape=ishape, dtype=dtype)
arg_2 = relay.var("arg_2", shape=w1shape, dtype=dtype)
arg_3 = relay.var("arg_3", shape=bshape, dtype=dtype)
call = relay.Call(func, [arg_1, arg_2, arg_3])
p_func = relay.Function([arg_1, arg_2, arg_3], call)
p_func = set_func_attr(p_func, "dnnl", "dnnl_0")
glb_var = relay.GlobalVar("dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = p_func
mod = transform.InferType()(mod)
# Main function
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
bias = relay.var("bias", shape=bshape, dtype=dtype)
main_func = relay.Function([data, weight, bias], glb_var(data, weight, bias))
mod["main"] = main_func
mod = transform.InferType()(mod)
# Reference module
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
bias = relay.var("bias", shape=bshape, dtype=dtype)
conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d, bias)
relu = relay.nn.relu(add)
main_func = relay.Function([data, weight, bias], relu)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_func
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
b_data = np.random.uniform(0, 1, bshape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data, "bias": b_data}, (1, 32, 14, 14)
for mod, ref_mod, input_maps, out_shape in [conv2d_relu(), conv2d_bias_relu()]:
check_result(mod, ref_mod, input_maps, out_shape, tol=1e-5)
def test_constant():
"""Test the subgraph with (var, const, ...) arguments."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, 32, 3, 3)
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=wshape, dtype=dtype)
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
layer = relay.nn.conv2d(data=data, weight=weight, kernel_size=(3, 3), padding=(1, 1))
bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta, bn_mmean, bn_mvar)
out = bn_output[0]
out = relay.nn.relu(out)
func = relay.Function(relay.analysis.free_vars(out), out)
ref_mod, params = tvm.relay.testing.create_workload(func)
ref_mod["main"] = bind_params_by_name(ref_mod["main"], params)
remove_bn_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
dnnl_patterns = get_pattern_table("dnnl")
composite_partition = tvm.transform.Sequential(
[
transform.MergeComposite(dnnl_patterns),
transform.AnnotateTarget("dnnl"),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
ref_mod = remove_bn_pass(ref_mod)
mod = composite_partition(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
check_result(mod, ref_mod, {"data": i_data}, (1, 32, 14, 14), tol=1e-5)
def test_partial_constant():
"""Test the subgraph with (const, var, const, var) arguments."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (10, 10)
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=ishape, dtype=dtype)
in_3 = relay.var("in_3", shape=ishape, dtype=dtype)
in_4 = relay.var("in_4", shape=ishape, dtype=dtype)
add1 = relay.add(in_1, in_2)
add2 = relay.add(add1, in_3)
add3 = relay.add(add2, in_3)
add4 = relay.add(add3, in_3)
func = relay.Function([in_1, in_2, in_3, in_4], add4)
ref_mod = tvm.IRModule.from_expr(func)
ref_mod = relay.transform.InferType()(ref_mod)
data1 = np.random.uniform(0, 1, ishape).astype(dtype)
data3 = np.random.uniform(0, 1, ishape).astype(dtype)
params = {
"in_1": tvm.nd.array(data1, ctx=tvm.cpu(0)),
"in_3": tvm.nd.array(data3, ctx=tvm.cpu(0)),
}
ref_mod["main"] = bind_params_by_name(ref_mod["main"], params)
opt_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
mod = opt_pass(ref_mod)
data2 = np.random.uniform(0, 1, ishape).astype(dtype)
data4 = np.random.uniform(0, 1, ishape).astype(dtype)
check_result(mod, ref_mod, {"in_2": data2, "in_4": data4}, (10, 10), tol=1e-5)
if __name__ == "__main__":
test_conv2d()
test_add()
test_relu()
test_dense()
test_bn()
test_multiple_ops()
test_composite()
test_constant()
test_partial_constant()
| 35.893175
| 97
| 0.613426
|
a2cb04ea50ab0363f3d617b3c6bdd890d606000e
| 9,395
|
py
|
Python
|
homeworks/hw5/task1_tkinter.py
|
ermekaitygulov/made-robotics
|
aefaa96627bf2d871fb925acfaab43b3deb8ee57
|
[
"Apache-2.0"
] | null | null | null |
homeworks/hw5/task1_tkinter.py
|
ermekaitygulov/made-robotics
|
aefaa96627bf2d871fb925acfaab43b3deb8ee57
|
[
"Apache-2.0"
] | null | null | null |
homeworks/hw5/task1_tkinter.py
|
ermekaitygulov/made-robotics
|
aefaa96627bf2d871fb925acfaab43b3deb8ee57
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
import math
'''=================Yours Methods================='''
class Window():
def __init__(self):
self.root = Tk()
self.root.title("")
self.width = self.root.winfo_screenwidth()
self.height = self.root.winfo_screenheight()
self.root.geometry(f'{self.width}x{self.height}')
self.canvas = Canvas(self.root, bg="#777777", height=self.height, width=self.width)
self.canvas.pack()
def go(self, event):
#Write your code here
#print("Start position:", self.get_start_position())
#print("Target position:", self.get_target_position())
#print("Obstacles:", self.get_obstacles())
return True
'''================= Task Interface Methods ================='''
def get_obstacles(self) :
obstacles = []
potential_obstacles = self.canvas.find_all()
for i in potential_obstacles:
if (i > 2) :
coords = self.canvas.coords(i)
obstacles.append(coords)
return obstacles
def get_start_position(self) :
x,y = self.get_center(2) # Purple block has id 2
yaw = self.get_yaw(2)
return x,y,yaw
def get_target_position(self) :
x,y = self.get_center(1) # Green block has id 1
yaw = self.get_yaw(1)
return x,y,yaw
def get_center(self, id_block):
coords = self.canvas.coords(id_block)
center_x, center_y = ((coords[0] + coords[4]) / 2, (coords[1] + coords[5]) / 2)
return [center_x, center_y]
def get_yaw(self, id_block):
center_x, center_y = self.get_center(id_block)
first_x = 0.0
first_y = -1.0
second_x = 1.0
second_y = 0.0
points = self.canvas.coords(id_block)
end_x = (points[0] + points[2])/2
end_y = (points[1] + points[3])/2
direction_x = end_x - center_x
direction_y = end_y - center_y
length = math.hypot(direction_x, direction_y)
unit_x = direction_x / length
unit_y = direction_y / length
cos_yaw = unit_x * first_x + unit_y * first_y
sign_yaw = unit_x * second_x + unit_y * second_y
if (sign_yaw >= 0 ) :
return math.acos(cos_yaw)
else :
return -math.acos(cos_yaw)
def get_vertices(self, id_block):
return self.canvas.coords(id_block)
'''=================================================='''
def rotate(self, points, angle, center):
angle = math.radians(angle)
cos_val = math.cos(angle)
sin_val = math.sin(angle)
cx, cy = center
new_points = []
for x_old, y_old in points:
x_old -= cx
y_old -= cy
x_new = x_old * cos_val - y_old * sin_val
y_new = x_old * sin_val + y_old * cos_val
new_points.append(x_new+cx)
new_points.append(y_new+cy)
return new_points
def start_block(self, event):
widget = event.widget
widget.start_x = event.x
widget.start_y = event.y
def in_rect(self, point, rect):
x_start, x_end = min(rect[::2]), max(rect[::2])
y_start, y_end = min(rect[1::2]), max(rect[1::2])
if x_start < point[0] < x_end and y_start < point[1] < y_end:
return True
def motion_block(self, event):
widget = event.widget
for i in range(1, 10):
if widget.coords(i) == []:
break
if self.in_rect([event.x, event.y], widget.coords(i)):
coords = widget.coords(i)
id = i
break
res_cords = []
try:
coords
except:
return
for ii, i in enumerate(coords):
if ii % 2 == 0:
res_cords.append(i + event.x - widget.start_x)
else:
res_cords.append(i + event.y - widget.start_y)
widget.start_x = event.x
widget.start_y = event.y
widget.coords(id, res_cords)
widget.center = ((res_cords[0] + res_cords[4]) / 2, (res_cords[1] + res_cords[5]) / 2)
def draw_block(self, points, color):
x = self.canvas.create_polygon(points, fill=color)
return x
def distance(self, x1, y1, x2, y2):
return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5
def set_id_block(self, event):
widget = event.widget
for i in range(1, 10):
if widget.coords(i) == []:
break
if self.in_rect([event.x, event.y], widget.coords(i)):
coords = widget.coords(i)
id = i
widget.id_block = i
break
widget.center = ((coords[0] + coords[4]) / 2, (coords[1] + coords[5]) / 2)
def rotate_block(self, event):
angle = 0
widget = event.widget
if widget.id_block == None:
for i in range(1, 10):
if widget.coords(i) == []:
break
if self.in_rect([event.x, event.y], widget.coords(i)):
coords = widget.coords(i)
id = i
widget.id_block == i
break
else:
id = widget.id_block
coords = widget.coords(id)
wx, wy = event.x_root, event.y_root
try:
coords
except:
return
block = coords
center = widget.center
x, y = block[2], block[3]
cat1 = self.distance(x, y, block[4], block[5])
cat2 = self.distance(wx, wy, block[4], block[5])
hyp = self.distance(x, y, wx, wy)
if wx - x > 0: angle = math.acos((cat1**2 + cat2**2 - hyp**2) / (2 * cat1 * cat2))
elif wx - x < 0: angle = -math.acos((cat1**2 + cat2**2 - hyp**2) / (2 * cat1 * cat2))
new_block = self.rotate([block[0:2], block[2:4], block[4:6], block[6:8]], angle, center)
self.canvas.coords(id, new_block)
def delete_block(self, event):
widget = event.widget.children["!canvas"]
for i in range(1, 10):
if widget.coords(i) == []:
break
if self.in_rect([event.x, event.y], widget.coords(i)):
widget.coords(i, [0,0])
break
def create_block(self, event):
block = [[0, 100], [100, 100], [100, 300], [0, 300]]
id = self.draw_block(block, "black")
self.canvas.tag_bind(id, "<Button-1>", self.start_block)
self.canvas.tag_bind(id, "<Button-3>", self.set_id_block)
self.canvas.tag_bind(id, "<B1-Motion>", self.motion_block)
self.canvas.tag_bind(id, "<B3-Motion>", self.rotate_block)
def make_draggable(self, widget):
widget.bind("<Button-1>", self.drag_start)
widget.bind("<B1-Motion>", self.drag_motion)
def drag_start(self, event):
widget = event.widget
widget.start_x = event.x
widget.start_y = event.y
def drag_motion(self, event):
widget = event.widget
x = widget.winfo_x() - widget.start_x + event.x + 200
y = widget.winfo_y() - widget.start_y + event.y + 100
widget.place(rely=0.0, relx=0.0, x=x, y=y)
def create_button_create(self):
button = Button(
text="New",
bg="#555555",
activebackground="blue",
borderwidth=0
)
button.place(rely=0.0, relx=0.0, x=200, y=100, anchor=SE, width=200, height=100)
button.bind("<Button-1>", self.create_block)
def create_green_block(self, center_x):
block = [[center_x - 50, 100],
[center_x + 50, 100],
[center_x + 50, 300],
[center_x - 50, 300]]
id = self.draw_block(block, "green")
self.canvas.tag_bind(id, "<Button-1>", self.start_block)
self.canvas.tag_bind(id, "<Button-3>", self.set_id_block)
self.canvas.tag_bind(id, "<B1-Motion>", self.motion_block)
self.canvas.tag_bind(id, "<B3-Motion>", self.rotate_block)
def create_purple_block(self, center_x, center_y):
block = [[center_x - 50, center_y - 300],
[center_x + 50, center_y - 300],
[center_x + 50, center_y - 100],
[center_x - 50, center_y - 100]]
id = self.draw_block(block, "purple")
self.canvas.tag_bind(id, "<Button-1>", self.start_block)
self.canvas.tag_bind(id, "<Button-3>", self.set_id_block)
self.canvas.tag_bind(id, "<B1-Motion>", self.motion_block)
self.canvas.tag_bind(id, "<B3-Motion>", self.rotate_block)
def create_button_go(self):
button = Button(
text="Go",
bg="#555555",
activebackground="blue",
borderwidth=0
)
button.place(rely=0.0, relx=1.0, x=0, y=200, anchor=SE, width=100, height=200)
button.bind("<Button-1>", self.go)
def run(self):
root = self.root
self.create_button_create()
self.create_button_go()
self.create_green_block(self.width/2)
self.create_purple_block(self.width/2, self.height)
root.bind("<Delete>", self.delete_block)
root.mainloop()
if __name__ == "__main__":
run = Window()
run.run()
| 31.847458
| 96
| 0.530601
|
712ffe2d631c14d7c778aafa23826e8fad2b861c
| 6,432
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20190401/get_route_filter.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20190401/get_route_filter.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20190401/get_route_filter.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetRouteFilterResult',
'AwaitableGetRouteFilterResult',
'get_route_filter',
]
@pulumi.output_type
class GetRouteFilterResult:
"""
Route Filter Resource.
"""
def __init__(__self__, etag=None, id=None, ipv6_peerings=None, location=None, name=None, peerings=None, provisioning_state=None, rules=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ipv6_peerings and not isinstance(ipv6_peerings, list):
raise TypeError("Expected argument 'ipv6_peerings' to be a list")
pulumi.set(__self__, "ipv6_peerings", ipv6_peerings)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peerings and not isinstance(peerings, list):
raise TypeError("Expected argument 'peerings' to be a list")
pulumi.set(__self__, "peerings", peerings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if rules and not isinstance(rules, list):
raise TypeError("Expected argument 'rules' to be a list")
pulumi.set(__self__, "rules", rules)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipv6Peerings")
def ipv6_peerings(self) -> Optional[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
A collection of references to express route circuit ipv6 peerings.
"""
return pulumi.get(self, "ipv6_peerings")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> Optional[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> Optional[Sequence['outputs.RouteFilterRuleResponse']]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetRouteFilterResult(GetRouteFilterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteFilterResult(
etag=self.etag,
id=self.id,
ipv6_peerings=self.ipv6_peerings,
location=self.location,
name=self.name,
peerings=self.peerings,
provisioning_state=self.provisioning_state,
rules=self.rules,
tags=self.tags,
type=self.type)
def get_route_filter(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
route_filter_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteFilterResult:
"""
Route Filter Resource.
:param str expand: Expands referenced express route bgp peering resources.
:param str resource_group_name: The name of the resource group.
:param str route_filter_name: The name of the route filter.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['routeFilterName'] = route_filter_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20190401:getRouteFilter', __args__, opts=opts, typ=GetRouteFilterResult).value
return AwaitableGetRouteFilterResult(
etag=__ret__.etag,
id=__ret__.id,
ipv6_peerings=__ret__.ipv6_peerings,
location=__ret__.location,
name=__ret__.name,
peerings=__ret__.peerings,
provisioning_state=__ret__.provisioning_state,
rules=__ret__.rules,
tags=__ret__.tags,
type=__ret__.type)
| 34.395722
| 167
| 0.634173
|
70efe49062ab8ebed09feac4534efedad7464f61
| 6,978
|
py
|
Python
|
core/tests/performance_tests/base.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | 1
|
2019-08-31T17:06:41.000Z
|
2019-08-31T17:06:41.000Z
|
core/tests/performance_tests/base.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | null | null | null |
core/tests/performance_tests/base.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for performance test classes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
import random
import unittest
from core.tests.performance_framework import perf_domain
from core.tests.performance_framework import perf_services
from core.tests.performance_tests import test_config
import python_utils
class TestBase(unittest.TestCase):
"""Base class for performance tests."""
# The default number of page load sessions used to collect timing metrics.
DEFAULT_SESSION_SAMPLE_COUNT = 3
BASE_URL = 'http://localhost:%d' % test_config.PERFORMANCE_TESTS_SERVER_PORT
def setUp(self):
self.data_fetcher = None
self.page_metrics = None
self.page_url = None
self.size_limit_uncached_bytes = None
self.size_limit_cached_bytes = None
self.load_time_limit_uncached_ms = None
self.load_time_limit_cached_ms = None
self.username = 'user%d' % random.randint(1, 100000)
self.preload_option = None
def _initialize_data_fetcher(self):
"""Initializes SeleniumPerformanceDataFetcher instance."""
self.data_fetcher = perf_services.SeleniumPerformanceDataFetcher(
browser=perf_services.BROWSER_CHROME, username=self.username,
preload_option=self.preload_option)
def _get_complete_url(self, base_url, page_url_short):
"""Returns the absolute URL by joining the base_url and page_url_short.
Args:
base_url: str. The base URL.
page_url_short: str. The relative page URL to be joined to base_url.
Returns:
str. The resulting joined URL.
"""
return python_utils.url_join(base_url, page_url_short)
def _load_page_to_cache_server_resources(self):
"""Loads page for server side caching."""
self.data_fetcher.load_url(self.page_url)
def _record_page_metrics_from_uncached_session(self):
"""Records the page metrics from uncached session for a given page
URL.
"""
self.page_metrics = (
self.data_fetcher.get_page_metrics_from_uncached_session(
self.page_url))
def _record_page_metrics_from_cached_session(self):
"""Records the page metrics from cached session for a given page URL."""
self.page_metrics = (
self.data_fetcher.get_page_metrics_from_cached_session(
self.page_url))
def _record_average_page_timings_from_uncached_session(
self, session_count=DEFAULT_SESSION_SAMPLE_COUNT):
"""Records average page timings from uncached session.
Args:
session_count: int. Number of page load sessions used to
collect timing metrics. Defaults to
DEFAULT_SESSION_SAMPLE_COUNT.
"""
page_session_metrics_list = []
for _ in python_utils.RANGE(session_count):
page_session_metrics_list.append(
self.data_fetcher.get_page_timings_from_uncached_session(
self.page_url))
self.page_metrics = perf_domain.MultiplePageSessionMetrics(
page_session_metrics_list)
def _record_average_page_timings_from_cached_session(
self, session_count=DEFAULT_SESSION_SAMPLE_COUNT):
"""Records average page timings from cached session.
Args:
session_count: Number of page load sessions used to
collect timing metrics. Defaults to
DEFAULT_SESSION_SAMPLE_COUNT.
"""
page_session_metrics_list = []
for _ in python_utils.RANGE(session_count):
page_session_metrics_list.append(
self.data_fetcher.get_page_timings_from_cached_session(
self.page_url))
self.page_metrics = perf_domain.MultiplePageSessionMetrics(
page_session_metrics_list)
def _set_page_config(self, page_config, append_username=False):
"""Sets the page configuration parameters.
Args:
page_config: dict. The page configuration parameters.
append_username: bool. Whether to append username to the page URL.
"""
self.page_url = self._get_complete_url(
self.BASE_URL, page_config['url'])
self.size_limit_uncached_bytes = (
page_config['size_limits_mb']['uncached'] * 1024 * 1024)
self.size_limit_cached_bytes = (
page_config['size_limits_mb']['cached'] * 1024 * 1024)
self.load_time_limit_uncached_ms = (
page_config['load_time_limits_secs']['uncached'] * 1000)
self.load_time_limit_cached_ms = (
page_config['load_time_limits_secs']['cached'] * 1000)
self.preload_option = page_config['preload_options']
if append_username:
self.page_url = self._get_complete_url(
self.page_url, self.username)
def _test_total_page_size(self):
"""Checks whether the total page size is under the limit of
uncached session size.
"""
self._record_page_metrics_from_uncached_session()
self.assertLessEqual(
self.page_metrics.get_total_page_size_bytes(),
self.size_limit_uncached_bytes)
def _test_total_page_size_for_cached_session(self):
"""Checks whether the total page size is under the limit of
cached session size.
"""
self._record_page_metrics_from_cached_session()
self.assertLessEqual(
self.page_metrics.get_total_page_size_bytes(),
self.size_limit_cached_bytes)
def _test_page_load_time(self):
"""Checks whether the total page load time is under uncached session
time limit.
"""
self._record_average_page_timings_from_uncached_session()
self.assertLessEqual(
self.page_metrics.get_average_page_load_time_millisecs(),
self.load_time_limit_uncached_ms)
def _test_page_load_time_for_cached_session(self):
"""Checks whether the total page load time is under cached session
time limit.
"""
self._record_average_page_timings_from_cached_session()
self.assertLessEqual(
self.page_metrics.get_average_page_load_time_millisecs(),
self.load_time_limit_cached_ms)
| 36.920635
| 80
| 0.685153
|
d8c43c16e15978aa8f6eb49d204f2449b6e988bd
| 3,160
|
py
|
Python
|
lectures/scrape/lecture-scraper/tutorialscraper/settings.py
|
JiyuanLyu/PIC16B
|
1cb5c8aea797bc1f88ec5759e7f110d2ba5d2b5b
|
[
"MIT"
] | null | null | null |
lectures/scrape/lecture-scraper/tutorialscraper/settings.py
|
JiyuanLyu/PIC16B
|
1cb5c8aea797bc1f88ec5759e7f110d2ba5d2b5b
|
[
"MIT"
] | null | null | null |
lectures/scrape/lecture-scraper/tutorialscraper/settings.py
|
JiyuanLyu/PIC16B
|
1cb5c8aea797bc1f88ec5759e7f110d2ba5d2b5b
|
[
"MIT"
] | null | null | null |
# Scrapy settings for tutorialscraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# BOT_NAME = 'tutorialscraper'
SPIDER_MODULES = ['tutorialscraper.spiders']
NEWSPIDER_MODULE = 'tutorialscraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorialscraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
# ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tutorialscraper.middlewares.TutorialscraperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tutorialscraper.middlewares.TutorialscraperDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'tutorialscraper.pipelines.TutorialscraperPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.505618
| 103
| 0.782278
|
e0e6a90368397b4f87823b7d3df88414e47a4b81
| 1,015
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/histogram2d/_xcalendar.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/Lib/site-packages/plotly/validators/histogram2d/_xcalendar.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/Lib/site-packages/plotly/validators/histogram2d/_xcalendar.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class XcalendarValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xcalendar", parent_name="histogram2d", **kwargs):
super(XcalendarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"chinese",
"coptic",
"discworld",
"ethiopian",
"gregorian",
"hebrew",
"islamic",
"jalali",
"julian",
"mayan",
"nanakshahi",
"nepali",
"persian",
"taiwan",
"thai",
"ummalqura",
],
),
**kwargs
)
| 30.757576
| 85
| 0.407882
|
880c5b1df4ad4eec02aa8f8b4f27903fe449784c
| 38,213
|
py
|
Python
|
pacman-contest/capture.py
|
ctdempsey/pacman-ctf-comp-site
|
63a2bbef5008df619b0f5ead7eceecb880169c8e
|
[
"Unlicense"
] | null | null | null |
pacman-contest/capture.py
|
ctdempsey/pacman-ctf-comp-site
|
63a2bbef5008df619b0f5ead7eceecb880169c8e
|
[
"Unlicense"
] | null | null | null |
pacman-contest/capture.py
|
ctdempsey/pacman-ctf-comp-site
|
63a2bbef5008df619b0f5ead7eceecb880169c8e
|
[
"Unlicense"
] | null | null | null |
# capture.py
# ----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# capture.py
# ----------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
Capture.py holds the logic for Pacman capture the flag.
(i) Your interface to the pacman world:
Pacman is a complex environment. You probably don't want to
read through all of the code we wrote to make the game runs
correctly. This section contains the parts of the code
that you will need to understand in order to complete the
project. There is also some code in game.py that you should
understand.
(ii) The hidden secrets of pacman:
This section contains all of the logic code that the pacman
environment uses to decide who can move where, who dies when
things collide, etc. You shouldn't need to read this section
of code, but you can if you want.
(iii) Framework to start a game:
The final section contains the code for reading the command
you use to set up the game, then starting up a new game, along with
linking in all the external parts (agent functions, graphics).
Check this section out to see all the options available to you.
To play your first game, type 'python capture.py' from the command line.
The keys are
P1: 'a', 's', 'd', and 'w' to move
P2: 'l', ';', ',' and 'p' to move
"""
import datetime
from game import GameStateData
from game import Game
from game import Directions
from game import Actions
from util import nearestPoint
from util import manhattanDistance
from game import Grid
from game import Configuration
from game import Agent
from game import reconstituteGrid
import sys, util, types, time, random, imp
import keyboardAgents
# If you change these, you won't affect the server, so you can't cheat
KILL_POINTS = 0
SONAR_NOISE_RANGE = 13 # Must be odd
SONAR_NOISE_VALUES = [i - (SONAR_NOISE_RANGE - 1)/2 for i in range(SONAR_NOISE_RANGE)]
SIGHT_RANGE = 5 # Manhattan distance
MIN_FOOD = 2
TOTAL_FOOD = 60
DUMP_FOOD_ON_DEATH = True # if we have the gameplay element that dumps dots on death
SCARED_TIME = 40
def noisyDistance(pos1, pos2):
return int(util.manhattanDistance(pos1, pos2) + random.choice(SONAR_NOISE_VALUES))
###################################################
# YOUR INTERFACE TO THE PACMAN WORLD: A GameState #
###################################################
class GameState:
"""
A GameState specifies the full game state, including the food, capsules,
agent configurations and score changes.
GameStates are used by the Game object to capture the actual state of the game and
can be used by agents to reason about the game.
Much of the information in a GameState is stored in a GameStateData object. We
strongly suggest that you access that data via the accessor methods below rather
than referring to the GameStateData object directly.
"""
####################################################
# Accessor methods: use these to access state data #
####################################################
def getLegalActions( self, agentIndex=0 ):
"""
Returns the legal actions for the agent specified.
"""
return AgentRules.getLegalActions( self, agentIndex )
def generateSuccessor( self, agentIndex, action):
"""
Returns the successor state (a GameState object) after the specified agent takes the action.
"""
# Copy current state
state = GameState(self)
# Find appropriate rules for the agent
AgentRules.applyAction( state, action, agentIndex )
AgentRules.checkDeath(state, agentIndex)
AgentRules.decrementTimer(state.data.agentStates[agentIndex])
# Book keeping
state.data._agentMoved = agentIndex
state.data.score += state.data.scoreChange
state.data.timeleft = self.data.timeleft - 1
return state
def getAgentState(self, index):
return self.data.agentStates[index]
def getAgentPosition(self, index):
"""
Returns a location tuple if the agent with the given index is observable;
if the agent is unobservable, returns None.
"""
agentState = self.data.agentStates[index]
ret = agentState.getPosition()
if ret:
return tuple(int(x) for x in ret)
return ret
def getNumAgents( self ):
return len( self.data.agentStates )
def getScore( self ):
"""
Returns a number corresponding to the current score.
"""
return self.data.score
def getRedFood(self):
"""
Returns a matrix of food that corresponds to the food on the red team's side.
For the matrix m, m[x][y]=true if there is food in (x,y) that belongs to
red (meaning red is protecting it, blue is trying to eat it).
"""
return halfGrid(self.data.food, red = True)
def getBlueFood(self):
"""
Returns a matrix of food that corresponds to the food on the blue team's side.
For the matrix m, m[x][y]=true if there is food in (x,y) that belongs to
blue (meaning blue is protecting it, red is trying to eat it).
"""
return halfGrid(self.data.food, red = False)
def getRedCapsules(self):
return halfList(self.data.capsules, self.data.food, red = True)
def getBlueCapsules(self):
return halfList(self.data.capsules, self.data.food, red = False)
def getWalls(self):
"""
Just like getFood but for walls
"""
return self.data.layout.walls
def hasFood(self, x, y):
"""
Returns true if the location (x,y) has food, regardless of
whether it's blue team food or red team food.
"""
return self.data.food[x][y]
def hasWall(self, x, y):
"""
Returns true if (x,y) has a wall, false otherwise.
"""
return self.data.layout.walls[x][y]
def isOver( self ):
return self.data._win
def getRedTeamIndices(self):
"""
Returns a list of agent index numbers for the agents on the red team.
"""
return self.redTeam[:]
def getBlueTeamIndices(self):
"""
Returns a list of the agent index numbers for the agents on the blue team.
"""
return self.blueTeam[:]
def isOnRedTeam(self, agentIndex):
"""
Returns true if the agent with the given agentIndex is on the red team.
"""
return self.teams[agentIndex]
def getAgentDistances(self):
"""
Returns a noisy distance to each agent.
"""
if 'agentDistances' in dir(self) :
return self.agentDistances
else:
return None
def getDistanceProb(self, trueDistance, noisyDistance):
"Returns the probability of a noisy distance given the true distance"
if noisyDistance - trueDistance in SONAR_NOISE_VALUES:
return 1.0/SONAR_NOISE_RANGE
else:
return 0
def getInitialAgentPosition(self, agentIndex):
"Returns the initial position of an agent."
return self.data.layout.agentPositions[agentIndex][1]
def getCapsules(self):
"""
Returns a list of positions (x,y) of the remaining capsules.
"""
return self.data.capsules
#############################################
# Helper methods: #
# You shouldn't need to call these directly #
#############################################
def __init__( self, prevState = None ):
"""
Generates a new state by copying information from its predecessor.
"""
if prevState != None: # Initial state
self.data = GameStateData(prevState.data)
self.blueTeam = prevState.blueTeam
self.redTeam = prevState.redTeam
self.data.timeleft = prevState.data.timeleft
self.teams = prevState.teams
self.agentDistances = prevState.agentDistances
else:
self.data = GameStateData()
self.agentDistances = []
def deepCopy( self ):
state = GameState( self )
state.data = self.data.deepCopy()
state.data.timeleft = self.data.timeleft
state.blueTeam = self.blueTeam[:]
state.redTeam = self.redTeam[:]
state.teams = self.teams[:]
state.agentDistances = self.agentDistances[:]
return state
def makeObservation(self, index):
state = self.deepCopy()
# Adds the sonar signal
pos = state.getAgentPosition(index)
n = state.getNumAgents()
distances = [noisyDistance(pos, state.getAgentPosition(i)) for i in range(n)]
state.agentDistances = distances
# Remove states of distant opponents
if index in self.blueTeam:
team = self.blueTeam
otherTeam = self.redTeam
else:
otherTeam = self.blueTeam
team = self.redTeam
for enemy in otherTeam:
seen = False
enemyPos = state.getAgentPosition(enemy)
for teammate in team:
if util.manhattanDistance(enemyPos, state.getAgentPosition(teammate)) <= SIGHT_RANGE:
seen = True
if not seen: state.data.agentStates[enemy].configuration = None
return state
def __eq__( self, other ):
"""
Allows two states to be compared.
"""
if other == None: return False
return self.data == other.data
def __hash__( self ):
"""
Allows states to be keys of dictionaries.
"""
return int(hash( self.data ))
def __str__( self ):
return str(self.data)
def initialize( self, layout, numAgents):
"""
Creates an initial game state from a layout array (see layout.py).
"""
self.data.initialize(layout, numAgents)
positions = [a.configuration for a in self.data.agentStates]
self.blueTeam = [i for i,p in enumerate(positions) if not self.isRed(p)]
self.redTeam = [i for i,p in enumerate(positions) if self.isRed(p)]
self.teams = [self.isRed(p) for p in positions]
#This is usually 60 (always 60 with random maps)
#However, if layout map is specified otherwise, it could be less
global TOTAL_FOOD
TOTAL_FOOD = layout.totalFood
def isRed(self, configOrPos):
width = self.data.layout.width
if type(configOrPos) == type( (0,0) ):
return configOrPos[0] < width / 2
else:
return configOrPos.pos[0] < width / 2
def halfGrid(grid, red):
halfway = grid.width / 2
halfgrid = Grid(grid.width, grid.height, False)
if red: xrange = range(halfway)
else: xrange = range(halfway, grid.width)
for y in range(grid.height):
for x in xrange:
if grid[x][y]: halfgrid[x][y] = True
return halfgrid
def halfList(l, grid, red):
halfway = grid.width / 2
newList = []
for x,y in l:
if red and x <= halfway: newList.append((x,y))
elif not red and x > halfway: newList.append((x,y))
return newList
############################################################################
# THE HIDDEN SECRETS OF PACMAN #
# #
# You shouldn't need to look through the code in this section of the file. #
############################################################################
COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill
class CaptureRules:
"""
These game rules manage the control flow of a game, deciding when
and how the game starts and ends.
"""
def __init__(self, quiet = False):
self.quiet = quiet
def newGame( self, layout, agents, display, length, muteAgents, catchExceptions ):
initState = GameState()
initState.initialize( layout, len(agents) )
starter = random.randint(0,1)
print('%s team starts' % ['Red', 'Blue'][starter])
game = Game(agents, display, self, startingIndex=starter, muteAgents=muteAgents, catchExceptions=catchExceptions)
game.state = initState
game.length = length
game.state.data.timeleft = length
if 'drawCenterLine' in dir(display):
display.drawCenterLine()
self._initBlueFood = initState.getBlueFood().count()
self._initRedFood = initState.getRedFood().count()
return game
def process(self, state, game):
"""
Checks to see whether it is time to end the game.
"""
if 'moveHistory' in dir(game):
if len(game.moveHistory) == game.length:
state.data._win = True
if state.isOver():
game.gameOver = True
if not game.rules.quiet:
redCount = 0
blueCount = 0
foodToWin = (TOTAL_FOOD/2) - MIN_FOOD
for index in range(state.getNumAgents()):
agentState = state.data.agentStates[index]
if index in state.getRedTeamIndices():
redCount += agentState.numReturned
else:
blueCount += agentState.numReturned
if blueCount >= foodToWin:#state.getRedFood().count() == MIN_FOOD:
print 'The Blue team has returned at least %d of the opponents\' dots.' % foodToWin
elif redCount >= foodToWin:#state.getBlueFood().count() == MIN_FOOD:
print 'The Red team has returned at least %d of the opponents\' dots.' % foodToWin
else:#if state.getBlueFood().count() > MIN_FOOD and state.getRedFood().count() > MIN_FOOD:
print 'Time is up.'
if state.data.score == 0: print 'Tie game!'
else:
winner = 'Red'
if state.data.score < 0: winner = 'Blue'
print 'The %s team wins by %d points.' % (winner, abs(state.data.score))
def getProgress(self, game):
blue = 1.0 - (game.state.getBlueFood().count() / float(self._initBlueFood))
red = 1.0 - (game.state.getRedFood().count() / float(self._initRedFood))
moves = len(self.moveHistory) / float(game.length)
# return the most likely progress indicator, clamped to [0, 1]
return min(max(0.75 * max(red, blue) + 0.25 * moves, 0.0), 1.0)
def agentCrash(self, game, agentIndex):
if agentIndex % 2 == 0:
print >>sys.stderr, "Red agent crashed"
game.state.data.score = -1
else:
print >>sys.stderr, "Blue agent crashed"
game.state.data.score = 1
def getMaxTotalTime(self, agentIndex):
return 900 # Move limits should prevent this from ever happening
def getMaxStartupTime(self, agentIndex):
return 15 # 15 seconds for registerInitialState
def getMoveWarningTime(self, agentIndex):
return 1 # One second per move
def getMoveTimeout(self, agentIndex):
return 3 # Three seconds results in instant forfeit
def getMaxTimeWarnings(self, agentIndex):
return 2 # Third violation loses the game
class AgentRules:
"""
These functions govern how each agent interacts with her environment.
"""
def getLegalActions( state, agentIndex ):
"""
Returns a list of legal actions (which are both possible & allowed)
"""
agentState = state.getAgentState(agentIndex)
conf = agentState.configuration
possibleActions = Actions.getPossibleActions( conf, state.data.layout.walls )
return AgentRules.filterForAllowedActions( agentState, possibleActions)
getLegalActions = staticmethod( getLegalActions )
def filterForAllowedActions(agentState, possibleActions):
return possibleActions
filterForAllowedActions = staticmethod( filterForAllowedActions )
def applyAction( state, action, agentIndex ):
"""
Edits the state to reflect the results of the action.
"""
legal = AgentRules.getLegalActions( state, agentIndex )
if action not in legal:
raise Exception("Illegal action " + str(action))
# Update Configuration
agentState = state.data.agentStates[agentIndex]
speed = 1.0
# if agentState.isPacman: speed = 0.5
vector = Actions.directionToVector( action, speed )
oldConfig = agentState.configuration
agentState.configuration = oldConfig.generateSuccessor( vector )
# Eat
next = agentState.configuration.getPosition()
nearest = nearestPoint( next )
if next == nearest:
isRed = state.isOnRedTeam(agentIndex)
# Change agent type
agentState.isPacman = [isRed, state.isRed(agentState.configuration)].count(True) == 1
# if he's no longer pacman, he's on his own side, so reset the num carrying timer
#agentState.numCarrying *= int(agentState.isPacman)
if agentState.numCarrying > 0 and not agentState.isPacman:
score = agentState.numCarrying if isRed else -1*agentState.numCarrying
state.data.scoreChange += score
agentState.numReturned += agentState.numCarrying
agentState.numCarrying = 0
redCount = 0
blueCount = 0
for index in range(state.getNumAgents()):
agentState = state.data.agentStates[index]
if index in state.getRedTeamIndices():
redCount += agentState.numReturned
else:
blueCount += agentState.numReturned
if redCount >= (TOTAL_FOOD/2) - MIN_FOOD or blueCount >= (TOTAL_FOOD/2) - MIN_FOOD:
state.data._win = True
if agentState.isPacman and manhattanDistance( nearest, next ) <= 0.9 :
AgentRules.consume( nearest, state, state.isOnRedTeam(agentIndex) )
applyAction = staticmethod( applyAction )
def consume( position, state, isRed ):
x,y = position
# Eat food
if state.data.food[x][y]:
# blue case is the default
teamIndicesFunc = state.getBlueTeamIndices
score = -1
if isRed:
# switch if its red
score = 1
teamIndicesFunc = state.getRedTeamIndices
# go increase the variable for the pacman who ate this
agents = [state.data.agentStates[agentIndex] for agentIndex in teamIndicesFunc()]
for agent in agents:
if agent.getPosition() == position:
agent.numCarrying += 1
break # the above should only be true for one agent...
# do all the score and food grid maintainenace
#state.data.scoreChange += score
state.data.food = state.data.food.copy()
state.data.food[x][y] = False
state.data._foodEaten = position
#if (isRed and state.getBlueFood().count() == MIN_FOOD) or (not isRed and state.getRedFood().count() == MIN_FOOD):
# state.data._win = True
# Eat capsule
if isRed: myCapsules = state.getBlueCapsules()
else: myCapsules = state.getRedCapsules()
if( position in myCapsules ):
state.data.capsules.remove( position )
state.data._capsuleEaten = position
# Reset all ghosts' scared timers
if isRed: otherTeam = state.getBlueTeamIndices()
else: otherTeam = state.getRedTeamIndices()
for index in otherTeam:
state.data.agentStates[index].scaredTimer = SCARED_TIME
consume = staticmethod( consume )
def decrementTimer(state):
timer = state.scaredTimer
if timer == 1:
state.configuration.pos = nearestPoint( state.configuration.pos )
state.scaredTimer = max( 0, timer - 1 )
decrementTimer = staticmethod( decrementTimer )
def dumpFoodFromDeath(state, agentState, agentIndex):
if not (DUMP_FOOD_ON_DEATH):
# this feature is not turned on
return
if not agentState.isPacman:
raise Exception('something is seriously wrong, this agent isnt a pacman!')
# ok so agentState is this:
if (agentState.numCarrying == 0):
return
# first, score changes!
# we HACK pack that ugly bug by just determining if its red based on the first position
# to die...
dummyConfig = Configuration(agentState.getPosition(), 'North')
isRed = state.isRed(dummyConfig)
# the score increases if red eats dots, so if we are refunding points,
# the direction should be -1 if the red agent died, which means he dies
# on the blue side
scoreDirection = (-1)**(int(isRed) + 1)
#state.data.scoreChange += scoreDirection * agentState.numCarrying
def onRightSide(state, x, y):
dummyConfig = Configuration((x, y), 'North')
return state.isRed(dummyConfig) == isRed
# we have food to dump
# -- expand out in BFS. Check:
# - that it's within the limits
# - that it's not a wall
# - that no other agents are there
# - that no power pellets are there
# - that it's on the right side of the grid
def allGood(state, x, y):
width, height = state.data.layout.width, state.data.layout.height
food, walls = state.data.food, state.data.layout.walls
# bounds check
if x >= width or y >= height or x <= 0 or y <= 0:
return False
if walls[x][y]:
return False
if food[x][y]:
return False
# dots need to be on the side where this agent will be a pacman :P
if not onRightSide(state, x, y):
return False
if (x,y) in state.data.capsules:
return False
# loop through agents
agentPoses = [state.getAgentPosition(i) for i in range(state.getNumAgents())]
if (x,y) in agentPoses:
return False
return True
numToDump = agentState.numCarrying
state.data.food = state.data.food.copy()
foodAdded = []
def genSuccessors(x, y):
DX = [-1, 0, 1]
DY = [-1, 0, 1]
return [(x + dx, y + dy) for dx in DX for dy in DY]
# BFS graph search
positionQueue = [agentState.getPosition()]
seen = set()
while numToDump > 0:
if not len(positionQueue):
raise Exception('Exhausted BFS! uh oh')
# pop one off, graph check
popped = positionQueue.pop(0)
if popped in seen:
continue
seen.add(popped)
x, y = popped[0], popped[1]
x = int(x)
y = int(y)
if (allGood(state, x, y)):
state.data.food[x][y] = True
foodAdded.append((x, y))
numToDump -= 1
# generate successors
positionQueue = positionQueue + genSuccessors(x, y)
state.data._foodAdded = foodAdded
# now our agentState is no longer carrying food
agentState.numCarrying = 0
pass
dumpFoodFromDeath = staticmethod(dumpFoodFromDeath)
def checkDeath( state, agentIndex):
agentState = state.data.agentStates[agentIndex]
if state.isOnRedTeam(agentIndex):
otherTeam = state.getBlueTeamIndices()
else:
otherTeam = state.getRedTeamIndices()
if agentState.isPacman:
for index in otherTeam:
otherAgentState = state.data.agentStates[index]
if otherAgentState.isPacman: continue
ghostPosition = otherAgentState.getPosition()
if ghostPosition == None: continue
if manhattanDistance( ghostPosition, agentState.getPosition() ) <= COLLISION_TOLERANCE:
# award points to the other team for killing Pacmen
if otherAgentState.scaredTimer <= 0:
AgentRules.dumpFoodFromDeath(state, agentState, agentIndex)
score = KILL_POINTS
if state.isOnRedTeam(agentIndex):
score = -score
state.data.scoreChange += score
agentState.isPacman = False
agentState.configuration = agentState.start
agentState.scaredTimer = 0
else:
score = KILL_POINTS
if state.isOnRedTeam(agentIndex):
score = -score
state.data.scoreChange += score
otherAgentState.isPacman = False
otherAgentState.configuration = otherAgentState.start
otherAgentState.scaredTimer = 0
else: # Agent is a ghost
for index in otherTeam:
otherAgentState = state.data.agentStates[index]
if not otherAgentState.isPacman: continue
pacPos = otherAgentState.getPosition()
if pacPos == None: continue
if manhattanDistance( pacPos, agentState.getPosition() ) <= COLLISION_TOLERANCE:
#award points to the other team for killing Pacmen
if agentState.scaredTimer <= 0:
AgentRules.dumpFoodFromDeath(state, otherAgentState, agentIndex)
score = KILL_POINTS
if not state.isOnRedTeam(agentIndex):
score = -score
state.data.scoreChange += score
otherAgentState.isPacman = False
otherAgentState.configuration = otherAgentState.start
otherAgentState.scaredTimer = 0
else:
score = KILL_POINTS
if state.isOnRedTeam(agentIndex):
score = -score
state.data.scoreChange += score
agentState.isPacman = False
agentState.configuration = agentState.start
agentState.scaredTimer = 0
checkDeath = staticmethod( checkDeath )
def placeGhost(state, ghostState):
ghostState.configuration = ghostState.start
placeGhost = staticmethod( placeGhost )
#############################
# FRAMEWORK TO START A GAME #
#############################
def default(str):
return str + ' [Default: %default]'
def parseAgentArgs(str):
if str == None or str == '': return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key,val = p, 1
opts[key] = val
return opts
def readCommand( argv ):
"""
Processes the command used to run pacman from the command line.
"""
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python capture.py
- starts a game with two baseline agents
(2) python capture.py --keys0
- starts a two-player interactive game where the arrow keys control agent 0, and all other agents are baseline agents
(3) python capture.py -r baselineTeam -b myTeam
- starts a fully automated game where the red team is a baseline team and blue team is myTeam
"""
parser = OptionParser(usageStr)
parser.add_option('-r', '--red', help=default('Red team'),
default='baselineTeam')
parser.add_option('-b', '--blue', help=default('Blue team'),
default='baselineTeam')
parser.add_option('--red-name', help=default('Red team name'),
default='Red')
parser.add_option('--blue-name', help=default('Blue team name'),
default='Blue')
parser.add_option('--redOpts', help=default('Options for red team (e.g. first=keys)'),
default='')
parser.add_option('--blueOpts', help=default('Options for blue team (e.g. first=keys)'),
default='')
parser.add_option('--keys0', help='Make agent 0 (first red player) a keyboard agent', action='store_true',default=False)
parser.add_option('--keys1', help='Make agent 1 (second red player) a keyboard agent', action='store_true',default=False)
parser.add_option('--keys2', help='Make agent 2 (first blue player) a keyboard agent', action='store_true',default=False)
parser.add_option('--keys3', help='Make agent 3 (second blue player) a keyboard agent', action='store_true',default=False)
parser.add_option('-l', '--layout', dest='layout',
help=default('the LAYOUT_FILE from which to load the map layout; use RANDOM for a random maze; use RANDOM<seed> to use a specified random seed, e.g., RANDOM23'),
metavar='LAYOUT_FILE', default='defaultCapture')
parser.add_option('-t', '--textgraphics', action='store_true', dest='textgraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quiet', action='store_true',
help='Display minimal output and no graphics', default=False)
parser.add_option('-Q', '--super-quiet', action='store_true', dest="super_quiet",
help='Same as -q but agent output is also suppressed', default=False)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom in the graphics'), default=1)
parser.add_option('-i', '--time', type='int', dest='time',
help=default('TIME limit of a game in moves'), default=1200, metavar='TIME')
parser.add_option('-n', '--numGames', type='int',
help=default('Number of games to play'), default=1)
parser.add_option('-f', '--fixRandomSeed', action='store_true',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('--record', action='store_true',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', default=None,
help='Replays a recorded game file.')
parser.add_option('--delay-step', type='float', dest='delay_step',
help=default('Delay step in a play or replay.'), default=0.03)
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('-c', '--catchExceptions', action='store_true', default=False,
help='Catch exceptions and enforce time limits')
options, otherjunk = parser.parse_args(argv)
assert len(otherjunk) == 0, "Unrecognized options: " + str(otherjunk)
args = dict()
# Choose a display format
#if options.pygame:
# import pygameDisplay
# args['display'] = pygameDisplay.PacmanGraphics()
if options.textgraphics:
import textDisplay
args['display'] = textDisplay.PacmanGraphics()
elif options.quiet:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.super_quiet:
import textDisplay
args['display'] = textDisplay.NullGraphics()
args['muteAgents'] = True
else:
import captureGraphicsDisplay
# Hack for agents writing to the display
captureGraphicsDisplay.FRAME_TIME = 0
args['display'] = captureGraphicsDisplay.PacmanGraphics(options.red, options.blue, options.zoom, 0, capture=True)
import __main__
__main__.__dict__['_display'] = args['display']
args['redTeamName'] = options.red_name
args['blueTeamName'] = options.blue_name
if options.fixRandomSeed: random.seed('cs188')
# Special case: recorded games don't use the runGames method or args structure
if options.replay != None:
print 'Replaying recorded game %s.' % options.replay
import cPickle
recorded = cPickle.load(open(options.replay))
recorded['display'] = args['display']
recorded['delay'] = options.delay_step
recorded['redTeamName'] = options.red
recorded['blueTeamName'] = options.blue
replayGame(**recorded)
sys.exit(0)
# Choose a pacman agent
redArgs, blueArgs = parseAgentArgs(options.redOpts), parseAgentArgs(options.blueOpts)
if options.numTraining > 0:
redArgs['numTraining'] = options.numTraining
blueArgs['numTraining'] = options.numTraining
nokeyboard = options.textgraphics or options.quiet or options.numTraining > 0
print '\nRed team %s with %s:' % (options.red, redArgs)
redAgents = loadAgents(True, options.red, nokeyboard, redArgs)
print '\nBlue team %s with %s:' % (options.blue, blueArgs)
blueAgents = loadAgents(False, options.blue, nokeyboard, blueArgs)
args['agents'] = sum([list(el) for el in zip(redAgents, blueAgents)],[]) # list of agents
if None in blueAgents or None in redAgents:
if None in blueAgents:
print '\nBlue team failed to load!\n'
if None in redAgents:
print '\nRed team failed to load!\n'
raise Exception('No teams found!')
numKeyboardAgents = 0
for index, val in enumerate([options.keys0, options.keys1, options.keys2, options.keys3]):
if not val: continue
if numKeyboardAgents == 0:
agent = keyboardAgents.KeyboardAgent(index)
elif numKeyboardAgents == 1:
agent = keyboardAgents.KeyboardAgent2(index)
else:
raise Exception('Max of two keyboard agents supported')
numKeyboardAgents += 1
args['agents'][index] = agent
# Choose a layout
import layout
layouts = []
for i in range(options.numGames):
if options.layout == 'RANDOM':
l = layout.Layout(randomLayout().split('\n'))
elif options.layout.startswith('RANDOM'):
l = layout.Layout(randomLayout(int(options.layout[6:])).split('\n'))
elif options.layout.lower().find('capture') == -1:
raise Exception( 'You must use a capture layout with capture.py')
else:
l = layout.getLayout( options.layout )
if l == None: raise Exception("The layout " + options.layout + " cannot be found")
layouts.append(l)
args['layouts'] = layouts
args['length'] = options.time
args['numGames'] = options.numGames
args['numTraining'] = options.numTraining
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['delay_step'] = options.delay_step
return args
def randomLayout(seed = None):
if not seed:
seed = random.randint(0,99999999)
# layout = 'layouts/random%08dCapture.lay' % seed
# print 'Generating random layout in %s' % layout
import mazeGenerator
return mazeGenerator.generateMaze(seed)
import traceback
def loadAgents(isRed, factory, textgraphics, cmdLineArgs):
"Calls agent factories and returns lists of agents"
try:
if not factory.endswith(".py"):
factory += ".py"
print factory
module = imp.load_source('player' + str(int(isRed)), factory)
except (NameError, ImportError):
print >>sys.stderr, 'Error: The team "' + factory + '" could not be loaded! '
traceback.print_exc()
return [None for i in range(2)]
except IOError:
print >>sys.stderr, 'Error: The team "' + factory + '" could not be loaded! '
traceback.print_exc()
return [None for i in range(2)]
args = dict()
args.update(cmdLineArgs) # Add command line args with priority
print "Loading Team:", factory
print "Arguments:", args
# if textgraphics and factoryClassName.startswith('Keyboard'):
# raise Exception('Using the keyboard requires graphics (no text display, quiet or training games)')
try:
createTeamFunc = getattr(module, 'createTeam')
except AttributeError:
print >>sys.stderr, 'Error: The team "' + factory + '" could not be loaded! '
traceback.print_exc()
return [None for i in range(2)]
indexAddend = 0
if not isRed:
indexAddend = 1
indices = [2*i + indexAddend for i in range(2)]
return createTeamFunc(indices[0], indices[1], isRed, **args)
def replayGame( layout, agents, actions, display, length, redTeamName, blueTeamName, delay=1):
rules = CaptureRules()
game = rules.newGame( layout, agents, display, length, False, False )
state = game.state
display.redTeam = redTeamName
display.blueTeam = blueTeamName
display.initialize(state.data)
for action in actions:
# Execute the action
state = state.generateSuccessor( *action )
# Change the display
display.update( state.data )
# Allow for game specific conditions (winning, losing, etc.)
rules.process(state, game)
time.sleep(delay)
print("END")
try:
wait = input("PRESS ENTER TO CONTINUE")
except:
print("END")
display.finish()
def runGames( layouts, agents, display, length, numGames, record, numTraining, redTeamName, blueTeamName, muteAgents=False, catchExceptions=False, delay_step=0):
rules = CaptureRules()
games = []
if numTraining > 0:
print 'Playing %d training games' % numTraining
for i in range( numGames ):
beQuiet = i < numTraining
layout = layouts[i]
if beQuiet:
# Suppress output and graphics
import textDisplay
gameDisplay = textDisplay.NullGraphics()
rules.quiet = True
else:
gameDisplay = display
rules.quiet = False
g = rules.newGame( layout, agents, gameDisplay, length, muteAgents, catchExceptions )
g.run(delay=delay_step)
if not beQuiet: games.append(g)
g.record = None
if record:
import time, cPickle, game
#fname = ('recorded-game-%d' % (i + 1)) + '-'.join([str(t) for t in time.localtime()[1:6]])
#f = file(fname, 'w')
components = {'layout': layout, 'agents': [game.Agent() for a in agents], 'actions': g.moveHistory, 'length': length, 'redTeamName': redTeamName, 'blueTeamName':blueTeamName }
#f.close()
print "recorded"
g.record = cPickle.dumps(components)
log_name = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S") + "-" + redTeamName + "_vs_" + blueTeamName
with open('../logs/%s.replay'%log_name,'wb') as f:
f.write(g.record)
if numGames > 1:
scores = [game.state.data.score for game in games]
redWinRate = [s > 0 for s in scores].count(True)/ float(len(scores))
blueWinRate = [s < 0 for s in scores].count(True)/ float(len(scores))
print 'Average Score:', sum(scores) / float(len(scores))
print 'Scores: ', ', '.join([str(score) for score in scores])
print 'Red Win Rate: %d/%d (%.2f)' % ([s > 0 for s in scores].count(True), len(scores), redWinRate)
print 'Blue Win Rate: %d/%d (%.2f)' % ([s < 0 for s in scores].count(True), len(scores), blueWinRate)
print 'Record: ', ', '.join([('Blue', 'Tie', 'Red')[max(0, min(2, 1 + s))] for s in scores])
return games
def save_score(game):
with open('score', 'w') as f:
print >>f, game.state.data.score
if __name__ == '__main__':
"""
The main function called when pacman.py is run
from the command line:
> python capture.py
See the usage string for more details.
> python capture.py --help
"""
options = readCommand( sys.argv[1:] ) # Get game components based on input
games = runGames(**options)
save_score(games[0])
# import cProfile
# cProfile.run('runGames( **options )', 'profile')
| 36.05
| 181
| 0.652265
|
05798676d295906b89227eaaf7854196efa2289a
| 5,030
|
py
|
Python
|
tests/fugue/execution/test_factory.py
|
gityow/fugue
|
e975625b33766d8b9dc64c6954871569b59367ec
|
[
"Apache-2.0"
] | null | null | null |
tests/fugue/execution/test_factory.py
|
gityow/fugue
|
e975625b33766d8b9dc64c6954871569b59367ec
|
[
"Apache-2.0"
] | null | null | null |
tests/fugue/execution/test_factory.py
|
gityow/fugue
|
e975625b33766d8b9dc64c6954871569b59367ec
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any
from fugue import (
NativeExecutionEngine,
SqliteEngine,
make_execution_engine,
make_sql_engine,
register_default_execution_engine,
register_default_sql_engine,
register_execution_engine,
register_sql_engine,
)
from fugue.execution.factory import _ExecutionEngineFactory
from pytest import raises
class _MockExecutionEngine(NativeExecutionEngine):
def __init__(self, conf: Any, other: int = 0):
super().__init__(conf=conf)
self.other = other
class _MockSQlEngine(SqliteEngine):
def __init__(self, execution_engine, other: int = 1):
super().__init__(execution_engine)
self.other = other
def test_execution_engine():
f = _ExecutionEngineFactory()
assert isinstance(f.make(), NativeExecutionEngine)
c = f.make(conf={"a": 2})
assert 2 == c.conf.get_or_throw("a", int)
c = f.make(_MockExecutionEngine(conf={"a": 3}, other=4))
assert isinstance(c, _MockExecutionEngine)
assert 3 == c.conf.get_or_throw("a", int)
assert 4 == c.other
raises(TypeError, lambda: f.make("xyz"))
f.register("xyz", lambda conf, **kwargs: _MockExecutionEngine(conf, **kwargs))
c = f.make("xyz")
assert isinstance(c, _MockExecutionEngine)
raises(
KeyError,
lambda: f.register(
"xyz",
lambda conf, **kwargs: _MockExecutionEngine(conf, **kwargs),
on_dup="raise",
),
)
raises(
ValueError,
lambda: f.register(
"xyz",
lambda conf, **kwargs: _MockExecutionEngine(conf, **kwargs),
on_dup="dummy",
),
)
c = f.make("xyz", conf={"a": 3}, other=4)
assert isinstance(c, _MockExecutionEngine)
assert 3 == c.conf.get_or_throw("a", int)
assert 4 == c.other
assert isinstance(f.make(), NativeExecutionEngine)
f.register_default(lambda conf, **kwargs: _MockExecutionEngine(conf, **kwargs))
assert isinstance(f.make(), _MockExecutionEngine)
c = f.make(conf={"a": 3}, other=4)
assert isinstance(c, _MockExecutionEngine)
assert 3 == c.conf.get_or_throw("a", int)
assert 4 == c.other
def test_sql_engine():
f = _ExecutionEngineFactory()
assert not isinstance(f.make_sql_engine(None, f.make()), _MockSQlEngine)
assert isinstance(f.make_sql_engine(_MockSQlEngine, f.make()), _MockSQlEngine)
f.register("a", lambda conf: _MockExecutionEngine(conf))
f.register_sql_engine("aq", lambda engine: _MockSQlEngine(engine, other=11))
e = f.make(("a", "aq"))
assert isinstance(e, _MockExecutionEngine)
assert isinstance(e.sql_engine, _MockSQlEngine)
assert 0 == e.other
assert 11 == e.sql_engine.other
f.register_default(lambda conf: _MockExecutionEngine(conf))
e = f.make()
assert isinstance(e, _MockExecutionEngine)
assert not isinstance(e.sql_engine, _MockSQlEngine)
f.register_default_sql_engine(lambda engine: _MockSQlEngine(engine))
e = f.make()
assert isinstance(e, _MockExecutionEngine)
assert isinstance(e.sql_engine, _MockSQlEngine)
# SQL Engine override
e = f.make(NativeExecutionEngine)
assert type(e) == NativeExecutionEngine
assert isinstance(e.sql_engine, _MockSQlEngine)
# conditional override
def to_sql_engine(engine):
if isinstance(engine, _MockExecutionEngine):
return _MockSQlEngine(engine)
else:
return engine.sql_engine
f.register_default_sql_engine(to_sql_engine)
e = f.make(NativeExecutionEngine)
assert type(e) == NativeExecutionEngine
assert type(e.sql_engine) != _MockSQlEngine
e = f.make(_MockExecutionEngine)
assert type(e) == _MockExecutionEngine
assert type(e.sql_engine) == _MockSQlEngine
def test_global_funcs():
assert isinstance(make_execution_engine(), NativeExecutionEngine)
register_execution_engine(
"xyz", lambda conf, **kwargs: _MockExecutionEngine(conf, **kwargs)
)
assert isinstance(make_execution_engine("xyz"), _MockExecutionEngine)
register_default_execution_engine(
lambda conf, **kwargs: _MockExecutionEngine(conf, **kwargs), on_dup="ignore"
)
assert not isinstance(make_execution_engine(), _MockExecutionEngine)
register_default_execution_engine(
lambda conf, **kwargs: _MockExecutionEngine(conf, **kwargs), on_dup="overwrite"
)
assert isinstance(make_execution_engine(), _MockExecutionEngine)
se = SqliteEngine(make_execution_engine)
assert make_sql_engine(se) is se
assert not isinstance(make_sql_engine(None, make_execution_engine()), _MockSQlEngine)
register_sql_engine("x", lambda engine: _MockSQlEngine(engine))
assert isinstance(make_sql_engine("x", make_execution_engine()), _MockSQlEngine)
register_default_sql_engine(lambda engine: _MockSQlEngine(engine, other=10))
e = make_execution_engine()
assert isinstance(e, _MockExecutionEngine)
assert isinstance(e.sql_engine, _MockSQlEngine)
assert 10 == e.sql_engine.other
| 33.758389
| 89
| 0.699404
|
e92bf4474b60b454994992a6728516f13d054525
| 28,488
|
py
|
Python
|
fhirclient/models/composition.py
|
mdx-dev/client-py
|
f6c16c9bd386c5b05d69753b89c6519d568814ac
|
[
"Apache-2.0"
] | null | null | null |
fhirclient/models/composition.py
|
mdx-dev/client-py
|
f6c16c9bd386c5b05d69753b89c6519d568814ac
|
[
"Apache-2.0"
] | null | null | null |
fhirclient/models/composition.py
|
mdx-dev/client-py
|
f6c16c9bd386c5b05d69753b89c6519d568814ac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Composition) on 2019-01-22.
# 2019, SMART Health IT.
from . import domainresource
class Composition(domainresource.DomainResource):
"""
A
s
e
t
o
f
r
e
s
o
u
r
c
e
s
c
o
m
p
o
s
e
d
i
n
t
o
a
s
i
n
g
l
e
c
o
h
e
r
e
n
t
c
l
i
n
i
c
a
l
s
t
a
t
e
m
e
n
t
w
i
t
h
c
l
i
n
i
c
a
l
a
t
t
e
s
t
a
t
i
o
n
.
A
s
e
t
o
f
h
e
a
l
t
h
c
a
r
e
-
r
e
l
a
t
e
d
i
n
f
o
r
m
a
t
i
o
n
t
h
a
t
i
s
a
s
s
e
m
b
l
e
d
t
o
g
e
t
h
e
r
i
n
t
o
a
s
i
n
g
l
e
l
o
g
i
c
a
l
p
a
c
k
a
g
e
t
h
a
t
p
r
o
v
i
d
e
s
a
s
i
n
g
l
e
c
o
h
e
r
e
n
t
s
t
a
t
e
m
e
n
t
o
f
m
e
a
n
i
n
g
,
e
s
t
a
b
l
i
s
h
e
s
i
t
s
o
w
n
c
o
n
t
e
x
t
a
n
d
t
h
a
t
h
a
s
c
l
i
n
i
c
a
l
a
t
t
e
s
t
a
t
i
o
n
w
i
t
h
r
e
g
a
r
d
t
o
w
h
o
i
s
m
a
k
i
n
g
t
h
e
s
t
a
t
e
m
e
n
t
.
A
C
o
m
p
o
s
i
t
i
o
n
d
e
f
i
n
e
s
t
h
e
s
t
r
u
c
t
u
r
e
a
n
d
n
a
r
r
a
t
i
v
e
c
o
n
t
e
n
t
n
e
c
e
s
s
a
r
y
f
o
r
a
d
o
c
u
m
e
n
t
.
H
o
w
e
v
e
r
,
a
C
o
m
p
o
s
i
t
i
o
n
a
l
o
n
e
d
o
e
s
n
o
t
c
o
n
s
t
i
t
u
t
e
a
d
o
c
u
m
e
n
t
.
R
a
t
h
e
r
,
t
h
e
C
o
m
p
o
s
i
t
i
o
n
m
u
s
t
b
e
t
h
e
f
i
r
s
t
e
n
t
r
y
i
n
a
B
u
n
d
l
e
w
h
e
r
e
B
u
n
d
l
e
.
t
y
p
e
=
d
o
c
u
m
e
n
t
,
a
n
d
a
n
y
o
t
h
e
r
r
e
s
o
u
r
c
e
s
r
e
f
e
r
e
n
c
e
d
f
r
o
m
C
o
m
p
o
s
i
t
i
o
n
m
u
s
t
b
e
i
n
c
l
u
d
e
d
a
s
s
u
b
s
e
q
u
e
n
t
e
n
t
r
i
e
s
i
n
t
h
e
B
u
n
d
l
e
(
f
o
r
e
x
a
m
p
l
e
P
a
t
i
e
n
t
,
P
r
a
c
t
i
t
i
o
n
e
r
,
E
n
c
o
u
n
t
e
r
,
e
t
c
.
)
.
"""
resource_type = "Composition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.attester = None
"""
A
t
t
e
s
t
s
t
o
a
c
c
u
r
a
c
y
o
f
c
o
m
p
o
s
i
t
i
o
n
.
List of `CompositionAttester` items (represented as `dict` in JSON). """
self.author = None
"""
W
h
o
a
n
d
/
o
r
w
h
a
t
a
u
t
h
o
r
e
d
t
h
e
c
o
m
p
o
s
i
t
i
o
n
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.category = None
"""
C
a
t
e
g
o
r
i
z
a
t
i
o
n
o
f
C
o
m
p
o
s
i
t
i
o
n
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.confidentiality = None
"""
A
s
d
e
f
i
n
e
d
b
y
a
f
f
i
n
i
t
y
d
o
m
a
i
n
.
Type `str`. """
self.custodian = None
"""
O
r
g
a
n
i
z
a
t
i
o
n
w
h
i
c
h
m
a
i
n
t
a
i
n
s
t
h
e
c
o
m
p
o
s
i
t
i
o
n
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.date = None
"""
C
o
m
p
o
s
i
t
i
o
n
e
d
i
t
i
n
g
t
i
m
e
.
Type `FHIRDate` (represented as `str` in JSON). """
self.encounter = None
"""
C
o
n
t
e
x
t
o
f
t
h
e
C
o
m
p
o
s
i
t
i
o
n
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.event = None
"""
T
h
e
c
l
i
n
i
c
a
l
s
e
r
v
i
c
e
(
s
)
b
e
i
n
g
d
o
c
u
m
e
n
t
e
d
.
List of `CompositionEvent` items (represented as `dict` in JSON). """
self.identifier = None
"""
V
e
r
s
i
o
n
-
i
n
d
e
p
e
n
d
e
n
t
i
d
e
n
t
i
f
i
e
r
f
o
r
t
h
e
C
o
m
p
o
s
i
t
i
o
n
.
Type `Identifier` (represented as `dict` in JSON). """
self.relatesTo = None
"""
R
e
l
a
t
i
o
n
s
h
i
p
s
t
o
o
t
h
e
r
c
o
m
p
o
s
i
t
i
o
n
s
/
d
o
c
u
m
e
n
t
s
.
List of `CompositionRelatesTo` items (represented as `dict` in JSON). """
self.section = None
"""
C
o
m
p
o
s
i
t
i
o
n
i
s
b
r
o
k
e
n
i
n
t
o
s
e
c
t
i
o
n
s
.
List of `CompositionSection` items (represented as `dict` in JSON). """
self.status = None
"""
p
r
e
l
i
m
i
n
a
r
y
|
f
i
n
a
l
|
a
m
e
n
d
e
d
|
e
n
t
e
r
e
d
-
i
n
-
e
r
r
o
r
.
Type `str`. """
self.subject = None
"""
W
h
o
a
n
d
/
o
r
w
h
a
t
t
h
e
c
o
m
p
o
s
i
t
i
o
n
i
s
a
b
o
u
t
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.title = None
"""
H
u
m
a
n
R
e
a
d
a
b
l
e
n
a
m
e
/
t
i
t
l
e
.
Type `str`. """
self.type = None
"""
K
i
n
d
o
f
c
o
m
p
o
s
i
t
i
o
n
(
L
O
I
N
C
i
f
p
o
s
s
i
b
l
e
)
.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(Composition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Composition, self).elementProperties()
js.extend([
("attester", "attester", CompositionAttester, True, None, False),
("author", "author", fhirreference.FHIRReference, True, None, True),
("category", "category", codeableconcept.CodeableConcept, True, None, False),
("confidentiality", "confidentiality", str, False, None, False),
("custodian", "custodian", fhirreference.FHIRReference, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, True),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("event", "event", CompositionEvent, True, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("relatesTo", "relatesTo", CompositionRelatesTo, True, None, False),
("section", "section", CompositionSection, True, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("title", "title", str, False, None, True),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
from . import backboneelement
class CompositionAttester(backboneelement.BackboneElement):
"""
A
t
t
e
s
t
s
t
o
a
c
c
u
r
a
c
y
o
f
c
o
m
p
o
s
i
t
i
o
n
.
A
p
a
r
t
i
c
i
p
a
n
t
w
h
o
h
a
s
a
t
t
e
s
t
e
d
t
o
t
h
e
a
c
c
u
r
a
c
y
o
f
t
h
e
c
o
m
p
o
s
i
t
i
o
n
/
d
o
c
u
m
e
n
t
.
"""
resource_type = "CompositionAttester"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.mode = None
"""
p
e
r
s
o
n
a
l
|
p
r
o
f
e
s
s
i
o
n
a
l
|
l
e
g
a
l
|
o
f
f
i
c
i
a
l
.
Type `str`. """
self.party = None
"""
W
h
o
a
t
t
e
s
t
e
d
t
h
e
c
o
m
p
o
s
i
t
i
o
n
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.time = None
"""
W
h
e
n
t
h
e
c
o
m
p
o
s
i
t
i
o
n
w
a
s
a
t
t
e
s
t
e
d
.
Type `FHIRDate` (represented as `str` in JSON). """
super(CompositionAttester, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CompositionAttester, self).elementProperties()
js.extend([
("mode", "mode", str, False, None, True),
("party", "party", fhirreference.FHIRReference, False, None, False),
("time", "time", fhirdate.FHIRDate, False, None, False),
])
return js
class CompositionEvent(backboneelement.BackboneElement):
"""
T
h
e
c
l
i
n
i
c
a
l
s
e
r
v
i
c
e
(
s
)
b
e
i
n
g
d
o
c
u
m
e
n
t
e
d
.
T
h
e
c
l
i
n
i
c
a
l
s
e
r
v
i
c
e
,
s
u
c
h
a
s
a
c
o
l
o
n
o
s
c
o
p
y
o
r
a
n
a
p
p
e
n
d
e
c
t
o
m
y
,
b
e
i
n
g
d
o
c
u
m
e
n
t
e
d
.
"""
resource_type = "CompositionEvent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
"""
C
o
d
e
(
s
)
t
h
a
t
a
p
p
l
y
t
o
t
h
e
e
v
e
n
t
b
e
i
n
g
d
o
c
u
m
e
n
t
e
d
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.detail = None
"""
T
h
e
e
v
e
n
t
(
s
)
b
e
i
n
g
d
o
c
u
m
e
n
t
e
d
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.period = None
"""
T
h
e
p
e
r
i
o
d
c
o
v
e
r
e
d
b
y
t
h
e
d
o
c
u
m
e
n
t
a
t
i
o
n
.
Type `Period` (represented as `dict` in JSON). """
super(CompositionEvent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CompositionEvent, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, True, None, False),
("detail", "detail", fhirreference.FHIRReference, True, None, False),
("period", "period", period.Period, False, None, False),
])
return js
class CompositionRelatesTo(backboneelement.BackboneElement):
"""
R
e
l
a
t
i
o
n
s
h
i
p
s
t
o
o
t
h
e
r
c
o
m
p
o
s
i
t
i
o
n
s
/
d
o
c
u
m
e
n
t
s
.
R
e
l
a
t
i
o
n
s
h
i
p
s
t
h
a
t
t
h
i
s
c
o
m
p
o
s
i
t
i
o
n
h
a
s
w
i
t
h
o
t
h
e
r
c
o
m
p
o
s
i
t
i
o
n
s
o
r
d
o
c
u
m
e
n
t
s
t
h
a
t
a
l
r
e
a
d
y
e
x
i
s
t
.
"""
resource_type = "CompositionRelatesTo"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
"""
r
e
p
l
a
c
e
s
|
t
r
a
n
s
f
o
r
m
s
|
s
i
g
n
s
|
a
p
p
e
n
d
s
.
Type `str`. """
self.targetIdentifier = None
"""
T
a
r
g
e
t
o
f
t
h
e
r
e
l
a
t
i
o
n
s
h
i
p
.
Type `Identifier` (represented as `dict` in JSON). """
self.targetReference = None
"""
T
a
r
g
e
t
o
f
t
h
e
r
e
l
a
t
i
o
n
s
h
i
p
.
Type `FHIRReference` (represented as `dict` in JSON). """
super(CompositionRelatesTo, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CompositionRelatesTo, self).elementProperties()
js.extend([
("code", "code", str, False, None, True),
("targetIdentifier", "targetIdentifier", identifier.Identifier, False, "target", True),
("targetReference", "targetReference", fhirreference.FHIRReference, False, "target", True),
])
return js
class CompositionSection(backboneelement.BackboneElement):
"""
C
o
m
p
o
s
i
t
i
o
n
i
s
b
r
o
k
e
n
i
n
t
o
s
e
c
t
i
o
n
s
.
T
h
e
r
o
o
t
o
f
t
h
e
s
e
c
t
i
o
n
s
t
h
a
t
m
a
k
e
u
p
t
h
e
c
o
m
p
o
s
i
t
i
o
n
.
"""
resource_type = "CompositionSection"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.author = None
"""
W
h
o
a
n
d
/
o
r
w
h
a
t
a
u
t
h
o
r
e
d
t
h
e
s
e
c
t
i
o
n
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.code = None
"""
C
l
a
s
s
i
f
i
c
a
t
i
o
n
o
f
s
e
c
t
i
o
n
(
r
e
c
o
m
m
e
n
d
e
d
)
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.emptyReason = None
"""
W
h
y
t
h
e
s
e
c
t
i
o
n
i
s
e
m
p
t
y
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.entry = None
"""
A
r
e
f
e
r
e
n
c
e
t
o
d
a
t
a
t
h
a
t
s
u
p
p
o
r
t
s
t
h
i
s
s
e
c
t
i
o
n
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.focus = None
"""
W
h
o
/
w
h
a
t
t
h
e
s
e
c
t
i
o
n
i
s
a
b
o
u
t
,
w
h
e
n
i
t
i
s
n
o
t
a
b
o
u
t
t
h
e
s
u
b
j
e
c
t
o
f
c
o
m
p
o
s
i
t
i
o
n
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.mode = None
"""
w
o
r
k
i
n
g
|
s
n
a
p
s
h
o
t
|
c
h
a
n
g
e
s
.
Type `str`. """
self.orderedBy = None
"""
O
r
d
e
r
o
f
s
e
c
t
i
o
n
e
n
t
r
i
e
s
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.section = None
"""
N
e
s
t
e
d
S
e
c
t
i
o
n
.
List of `CompositionSection` items (represented as `dict` in JSON). """
self.text = None
"""
T
e
x
t
s
u
m
m
a
r
y
o
f
t
h
e
s
e
c
t
i
o
n
,
f
o
r
h
u
m
a
n
i
n
t
e
r
p
r
e
t
a
t
i
o
n
.
Type `Narrative` (represented as `dict` in JSON). """
self.title = None
"""
L
a
b
e
l
f
o
r
s
e
c
t
i
o
n
(
e
.
g
.
f
o
r
T
o
C
)
.
Type `str`. """
super(CompositionSection, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(CompositionSection, self).elementProperties()
js.extend([
("author", "author", fhirreference.FHIRReference, True, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("emptyReason", "emptyReason", codeableconcept.CodeableConcept, False, None, False),
("entry", "entry", fhirreference.FHIRReference, True, None, False),
("focus", "focus", fhirreference.FHIRReference, False, None, False),
("mode", "mode", str, False, None, False),
("orderedBy", "orderedBy", codeableconcept.CodeableConcept, False, None, False),
("section", "section", CompositionSection, True, None, False),
("text", "text", narrative.Narrative, False, None, False),
("title", "title", str, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import narrative
except ImportError:
narrative = sys.modules[__package__ + '.narrative']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| 11.408891
| 108
| 0.295352
|
9925fd51561ae5bcbbc477003f9b6759e32c8908
| 1,320
|
py
|
Python
|
pandas/tests/extension/test_external_block.py
|
flaboss/pandas
|
a62102932fd73da0604aba0199cd234317652c15
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-04-12T21:15:09.000Z
|
2020-04-12T21:15:09.000Z
|
pandas/tests/extension/test_external_block.py
|
flaboss/pandas
|
a62102932fd73da0604aba0199cd234317652c15
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3
|
2020-03-09T13:15:03.000Z
|
2020-03-20T10:07:10.000Z
|
pandas/tests/extension/test_external_block.py
|
flaboss/pandas
|
a62102932fd73da0604aba0199cd234317652c15
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
import pandas as pd
from pandas.core.internals import BlockManager
from pandas.core.internals.blocks import ExtensionBlock
class CustomBlock(ExtensionBlock):
_holder = np.ndarray
_can_hold_na = False
def concat_same_type(self, to_concat, placement=None):
"""
Always concatenate disregarding self.ndim as the values are
always 1D in this custom Block
"""
values = np.concatenate([blk.values for blk in to_concat])
placement = self.mgr_locs if self.ndim == 2 else slice(len(values))
return self.make_block_same_class(values, placement=placement)
@pytest.fixture
def df():
df1 = pd.DataFrame({"a": [1, 2, 3]})
blocks = df1._mgr.blocks
values = np.arange(3, dtype="int64")
custom_block = CustomBlock(values, placement=slice(1, 2))
blocks = blocks + (custom_block,)
block_manager = BlockManager(blocks, [pd.Index(["a", "b"]), df1.index])
return pd.DataFrame(block_manager)
def test_concat_dataframe(df):
# GH17728
res = pd.concat([df, df])
assert isinstance(res._mgr.blocks[1], CustomBlock)
def test_concat_axis1(df):
# GH17954
df2 = pd.DataFrame({"c": [0.1, 0.2, 0.3]})
res = pd.concat([df, df2], axis=1)
assert isinstance(res._mgr.blocks[1], CustomBlock)
| 28.695652
| 75
| 0.67803
|
f06756553fb502480d963ecdeca5e6cd8066b8c9
| 9,552
|
py
|
Python
|
models/resnet.py
|
YBZh/Label-Propagation-with-Augmented-Anchors
|
83c6d0ecaa1c7f79a7dfbbd9fe0cd367c695ead8
|
[
"MIT"
] | 18
|
2020-07-18T15:34:19.000Z
|
2021-06-07T01:32:52.000Z
|
models/resnet.py
|
Gorilla-Lab-SCUT/Label-Propagation-with-Augmented-Anchors
|
83c6d0ecaa1c7f79a7dfbbd9fe0cd367c695ead8
|
[
"MIT"
] | null | null | null |
models/resnet.py
|
Gorilla-Lab-SCUT/Label-Propagation-with-Augmented-Anchors
|
83c6d0ecaa1c7f79a7dfbbd9fe0cd367c695ead8
|
[
"MIT"
] | 4
|
2020-10-05T08:00:32.000Z
|
2021-03-26T19:29:58.000Z
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
import ipdb
from torch.autograd import Function
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
out = self.fc(x)
return x, out
def resnet18(args, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
# modify the structure of the model.
num_of_feature_map = model.fc.in_features
model.fc = nn.Linear(num_of_feature_map, args.num_classes)
model.fc.weight.data.normal_(0.0, 0.02)
model.fc.bias.data.normal_(0)
return model
def resnet34(args, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if args.pretrained:
print('Load ImageNet pre-trained resnet model')
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
# modify the structure of the model.
num_of_feature_map = model.fc.in_features
model.fc = nn.Linear(num_of_feature_map, args.num_classes)
return model
def resnet50(args, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if args.pretrained:
if args.pretrained_checkpoint:
# modify the structure of the model.
print('load the source data pretrained model from: ', args.pretrained_checkpoint)
num_of_feature_map = model.fc.in_features
model.fc = nn.Linear(num_of_feature_map, args.num_classes * 2)
init_dict = model.state_dict()
pretrained_dict_temp = torch.load(args.pretrained_checkpoint)['state_dict']
pretrained_dict = {k.replace('module.', ''): v for k, v in pretrained_dict_temp.items()}
pretrained_dict.pop('fc.weight')
pretrained_dict.pop('fc.bias')
init_dict.update(pretrained_dict)
model.load_state_dict(init_dict)
else:
print('load the imagenet pretrained model')
init_dict = model.state_dict()
pretrained_dict = model_zoo.load_url(model_urls['resnet50'])
pretrained_dict.pop('fc.weight')
pretrained_dict.pop('fc.bias')
init_dict.update(pretrained_dict)
model.load_state_dict(init_dict)
# modify the structure of the model.
num_of_feature_map = model.fc.in_features
model.fc = nn.Linear(num_of_feature_map, args.num_classes * 2)
return model
def resnet101(args, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if args.pretrained:
print('load the imagenet pretrained model')
init_dict = model.state_dict()
pretrained_dict = model_zoo.load_url(model_urls['resnet101'])
pretrained_dict.pop('fc.weight')
pretrained_dict.pop('fc.bias')
init_dict.update(pretrained_dict)
model.load_state_dict(init_dict)
# modify the structure of the model.
num_of_feature_map = model.fc.in_features
model.fc = nn.Linear(num_of_feature_map, args.num_classes * 2)
return model
def resnet152(args, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
# modify the structure of the model.
num_of_feature_map = model.fc.in_features
model.fc = nn.Linear(num_of_feature_map, args.num_classes)
return model
def resnet(args, **kwargs):
print("==> creating model '{}' ".format(args.arch))
if args.arch == 'resnet18':
return resnet18(args)
elif args.arch == 'resnet34':
return resnet34(args)
elif args.arch == 'resnet50':
return resnet50(args)
elif args.arch == 'resnet101':
return resnet101(args)
elif args.arch == 'resnet152':
return resnet152(args)
else:
raise ValueError('Unrecognized model architecture', args.arch)
| 32.937931
| 100
| 0.618405
|
716c4af65d715fc5c19592f7185ae3f2a3b3c92d
| 17,805
|
py
|
Python
|
pyVmomi/_typeinfo_ciscm.py
|
xweichu/pyvmomi
|
77aedef02974a63517a079c482e49fd9890c09a4
|
[
"Apache-2.0"
] | null | null | null |
pyVmomi/_typeinfo_ciscm.py
|
xweichu/pyvmomi
|
77aedef02974a63517a079c482e49fd9890c09a4
|
[
"Apache-2.0"
] | null | null | null |
pyVmomi/_typeinfo_ciscm.py
|
xweichu/pyvmomi
|
77aedef02974a63517a079c482e49fd9890c09a4
|
[
"Apache-2.0"
] | null | null | null |
# ******* WARNING - AUTO GENERATED CODE - DO NOT EDIT *******
from .VmomiSupport import CreateDataType, CreateManagedType
from .VmomiSupport import CreateEnumType
from .VmomiSupport import AddVersion, AddVersionParent
from .VmomiSupport import AddBreakingChangesInfo
from .VmomiSupport import F_LINK, F_LINKABLE
from .VmomiSupport import F_OPTIONAL, F_SECRET
from .VmomiSupport import newestVersions, ltsVersions
from .VmomiSupport import dottedVersions, oldestVersions
AddVersion("vmodl.version.version0", "", "", 0, "vim25")
AddVersion("vmodl.version.version1", "", "", 0, "vim25")
AddVersion("cis.metadata.version.version1", "cis.metadata", "1.0", 0, "")
AddVersion("cis.cm.version.version1", "cis.cm", "1.0", 0, "")
AddVersionParent("vmodl.version.version0", "vmodl.version.version0")
AddVersionParent("vmodl.version.version1", "vmodl.version.version0")
AddVersionParent("vmodl.version.version1", "vmodl.version.version1")
AddVersionParent("cis.metadata.version.version1", "vmodl.version.version0")
AddVersionParent("cis.metadata.version.version1", "vmodl.version.version1")
AddVersionParent("cis.metadata.version.version1", "cis.metadata.version.version1")
AddVersionParent("cis.cm.version.version1", "vmodl.version.version0")
AddVersionParent("cis.cm.version.version1", "vmodl.version.version1")
AddVersionParent("cis.cm.version.version1", "cis.metadata.version.version1")
AddVersionParent("cis.cm.version.version1", "cis.cm.version.version1")
newestVersions.Add("cis.cm.version.version1")
ltsVersions.Add("cis.cm.version.version1")
dottedVersions.Add("cis.cm.version.version1")
oldestVersions.Add("cis.cm.version.version1")
CreateManagedType("cis.cm.ComponentManager", "CmComponentManager", "vmodl.ManagedObject", "cis.cm.version.version1", None, [("loginByToken", "CmComponentManager_LoginByToken", "cis.cm.version.version1", (), (0, "void", "void"), "System.Anonymous", ["cis.cm.fault.InvalidLoginFault", ]), ("logout", "CmComponentManager_Logout", "cis.cm.version.version1", (), (0, "void", "void"), "System.Anonymous", None), ("retrieveServiceInstanceContent", "RetrieveServiceInstanceContent", "cis.cm.version.version1", (), (0, "cis.cm.ServiceInstanceContent", "cis.cm.ServiceInstanceContent"), "System.Anonymous", None)])
CreateDataType("cis.cm.SearchCriteria", "CmSearchCriteria", "vmodl.DynamicData", "cis.cm.version.version1", [("serviceType", "cis.cm.ServiceType", "cis.cm.version.version1", F_OPTIONAL), ("folder", "cis.cm.site.Folder", "cis.cm.version.version1", F_OPTIONAL), ("endPointType", "cis.cm.ServiceEndPointType", "cis.cm.version.version1", F_OPTIONAL), ("selectionMethod", "string", "cis.cm.version.version1", F_OPTIONAL), ("resultFieldList", "string[]", "cis.cm.version.version1", F_OPTIONAL)])
CreateEnumType("cis.cm.SearchCriteria.SelectionMethod", "CmSearchCriteriaSelectionMethod", "cis.cm.version.version1", ["ALL"])
CreateDataType("cis.cm.ServiceAttribute", "CmServiceAttribute", "vmodl.DynamicData", "cis.cm.version.version1", [("key", "string", "cis.cm.version.version1", 0), ("value", "string", "cis.cm.version.version1", 0)])
CreateDataType("cis.cm.ServiceBase", "CmServiceBase", "vmodl.DynamicData", "cis.cm.version.version1", [("serviceEndPoints", "cis.cm.ServiceEndPoint[]", "cis.cm.version.version1", F_OPTIONAL), ("serviceAttributes", "cis.cm.ServiceAttribute[]", "cis.cm.version.version1", F_OPTIONAL), ("serviceNameResourceKey", "string", "cis.cm.version.version1", F_OPTIONAL), ("serviceDescriptionResourceKey", "string", "cis.cm.version.version1", F_OPTIONAL), ("serviceGroupResourceKey", "string", "cis.cm.version.version1", F_OPTIONAL), ("serviceGroupInternalId", "string", "cis.cm.version.version1", F_OPTIONAL), ("hostId", "string", "cis.cm.version.version1", F_OPTIONAL), ("controlScriptPath", "string", "cis.cm.version.version1", F_OPTIONAL)])
CreateDataType("cis.cm.ServiceEndPoint", "CmServiceEndPoint", "vmodl.DynamicData", "cis.cm.version.version1", [("sslTrust", "string[]", "cis.cm.version.version1", F_OPTIONAL), ("url", "vmodl.URI", "cis.cm.version.version1", 0), ("endPointType", "cis.cm.ServiceEndPointType", "cis.cm.version.version1", 0), ("endPointData", "cis.cm.ServiceEndPointData[]", "cis.cm.version.version1", F_OPTIONAL)])
CreateDataType("cis.cm.ServiceEndPointData", "CmServiceEndPointData", "vmodl.DynamicData", "cis.cm.version.version1", [("key", "string", "cis.cm.version.version1", 0), ("value", "string", "cis.cm.version.version1", 0)])
CreateDataType("cis.cm.ServiceEndPointType", "CmServiceEndPointType", "vmodl.DynamicData", "cis.cm.version.version1", [("endPointProtocol", "string", "cis.cm.version.version1", F_OPTIONAL), ("typeId", "string", "cis.cm.version.version1", F_OPTIONAL)])
CreateEnumType("cis.cm.ServiceEndPointType.EndPointProtocol", "CmServiceEndPointTypeEndPointProtocol", "cis.cm.version.version1", ["unknown", "vmomi", "wsTrust", "rest", "http", "file", "vapi"])
CreateDataType("cis.cm.ServiceInfo", "CmServiceInfo", "cis.cm.ServiceBase", "cis.cm.version.version1", [("serviceId", "string", "cis.cm.version.version1", F_OPTIONAL), ("folder", "cis.cm.site.Folder", "cis.cm.version.version1", F_OPTIONAL), ("ownerId", "string", "cis.cm.version.version1", F_OPTIONAL), ("serviceVersion", "string", "cis.cm.version.version1", F_OPTIONAL), ("serviceType", "cis.cm.ServiceType", "cis.cm.version.version1", F_OPTIONAL)])
CreateEnumType("cis.cm.ServiceInfo.ServiceField", "CmServiceInfoServiceField", "cis.cm.version.version1", ["ALL", "VERSION", "SERVICE_TYPE", "SERVICE_END_POINTS", "OWNER_ID", "SERVICE_ATTRIBUTES", "NAME_KEY", "DESCRIPTION_KEY", "SERVICE_GROUP_INTERNAL_ID", "SERVICE_GROUP_RESOURCE_KEY", "SERVICE_ID", "FOLDER", "HOST_ID"])
CreateDataType("cis.cm.ServiceInstanceContent", "CmServiceInstanceContent", "vmodl.DynamicData", "cis.cm.version.version1", [("healthStatusManager", "cis.cm.monitor.HealthStatusManager", "cis.cm.version.version1", 0), ("serviceManager", "cis.cm.ServiceManager", "cis.cm.version.version1", 0), ("siteManager", "cis.cm.site.SiteManager", "cis.cm.version.version1", 0), ("serviceConfigurationManager", "cis.cm.scm.ServiceConfigurationManager", "cis.cm.version.version1", 0)])
CreateManagedType("cis.cm.ServiceManager", "CmServiceManager", "vmodl.ManagedObject", "cis.cm.version.version1", None, [("registerService", "RegisterService", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("serviceSpec", "cis.cm.ServiceSpec", "cis.cm.version.version1", 0, None),), (0, "string", "string"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ]), ("unregisterService", "UnregisterService", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),), (0, "void", "void"), "ComponentManager.Owner", ["cis.cm.fault.ComponentManagerFault", ]), ("reRegisterService", "ReRegisterService", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("serviceSpec", "cis.cm.ServiceSpec", "cis.cm.version.version1", 0, None),), (0, "void", "void"), "ComponentManager.Owner", ["cis.cm.fault.ComponentManagerFault", ]), ("retrieveService", "RetrieveService", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),), (0, "cis.cm.ServiceInfo", "cis.cm.ServiceInfo"), "System.Anonymous", ["cis.cm.fault.ComponentManagerFault", ]), ("search", "Search", "cis.cm.version.version1", (("searchCriteria", "cis.cm.SearchCriteria", "cis.cm.version.version1", F_OPTIONAL, None),), (F_OPTIONAL, "cis.cm.ServiceInfo[]", "cis.cm.ServiceInfo[]"), "System.Anonymous", ["cis.cm.fault.ComponentManagerFault", ]), ("startService", "StartService", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("timeout", "long", "cis.cm.version.version1", 0, None),), (0, "void", "void"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ]), ("stopService", "StopService", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("timeout", "long", "cis.cm.version.version1", 0, None),), (0, "void", "void"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ]), ("restartService", "RestartService", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("timeout", "long", "cis.cm.version.version1", 0, None),), (0, "void", "void"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ]), ("getServiceStatus", "GetServiceStatus", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("timeout", "long", "cis.cm.version.version1", 0, None),), (0, "string", "string"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ])])
CreateDataType("cis.cm.ServiceSpec", "CmServiceSpec", "cis.cm.ServiceBase", "cis.cm.version.version1", [("ownerId", "string", "cis.cm.version.version1", 0), ("serviceVersion", "string", "cis.cm.version.version1", 0), ("serviceType", "cis.cm.ServiceType", "cis.cm.version.version1", 0)])
CreateDataType("cis.cm.ServiceType", "CmServiceType", "vmodl.DynamicData", "cis.cm.version.version1", [("productId", "string", "cis.cm.version.version1", 0), ("typeId", "string", "cis.cm.version.version1", 0)])
CreateDataType("cis.cm.fault.ComponentManagerFault", "CmFaultComponentManagerFault", "vmodl.MethodFault", "cis.cm.version.version1", [("errorCode", "int", "cis.cm.version.version1", 0), ("errorMessage", "string", "cis.cm.version.version1", F_OPTIONAL)])
CreateDataType("cis.cm.fault.InvalidLoginFault", "CmFaultInvalidLoginFault", "vmodl.MethodFault", "cis.cm.version.version1", [("errorMessage", "vmodl.LocalizableMessage", "cis.cm.version.version1", 0)])
CreateDataType("cis.cm.monitor.HealthStatus", "CmHealthStatus", "vmodl.DynamicData", "cis.cm.version.version1", [("serviceId", "string", "cis.cm.version.version1", 0), ("timeReceived", "vmodl.DateTime", "cis.cm.version.version1", 0), ("statusCode", "string", "cis.cm.version.version1", 0), ("messages", "vmodl.LocalizableMessage[]", "cis.cm.version.version1", F_OPTIONAL), ("rescBundleServiceId", "string", "cis.cm.version.version1", 0)])
CreateEnumType("cis.cm.monitor.HealthStatus.Code", "CmHealthStatusCode", "cis.cm.version.version1", ["GREEN", "YELLOW", "ORANGE", "RED", "FAILED", "UNSUPPORTED", "UNKNOWN"])
CreateManagedType("cis.cm.monitor.HealthStatusManager", "CmHealthStatusManager", "vmodl.ManagedObject", "cis.cm.version.version1", None, [("fetchStatus", "FetchStatus", "cis.cm.version.version1", (("serviceId", "string[]", "cis.cm.version.version1", F_OPTIONAL, None),), (0, "cis.cm.monitor.HealthStatus[]", "cis.cm.monitor.HealthStatus[]"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ]), ("invalidateCacheEntry", "InvalidateCacheEntry", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),), (0, "void", "void"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ])])
CreateDataType("cis.cm.scm.Configuration", "CmConfiguration", "vmodl.DynamicData", "cis.cm.version.version1", [("resourceKey", "string", "cis.cm.version.version1", 0), ("parameters", "cis.cm.scm.Parameter[]", "cis.cm.version.version1", F_OPTIONAL)])
CreateDataType("cis.cm.scm.ConfigurationDefinition", "CmConfigurationDefinition", "vmodl.DynamicData", "cis.cm.version.version1", [("resourceKey", "string", "cis.cm.version.version1", 0), ("metadata", "cis.metadata.TypeDescriptor", "cis.cm.version.version1", F_OPTIONAL)])
CreateDataType("cis.cm.scm.ConfigurationUpdate", "CmConfigurationUpdate", "vmodl.DynamicData", "cis.cm.version.version1", [("resourceKey", "string", "cis.cm.version.version1", 0), ("add", "cis.cm.scm.Parameter[]", "cis.cm.version.version1", F_OPTIONAL), ("update", "cis.cm.scm.Parameter[]", "cis.cm.version.version1", F_OPTIONAL)])
CreateDataType("cis.cm.scm.Parameter", "CmParameter", "vmodl.DynamicData", "cis.cm.version.version1", [("name", "string", "cis.cm.version.version1", 0), ("value", "string", "cis.cm.version.version1", 0)])
CreateManagedType("cis.cm.scm.ServiceConfigurationManager", "CmServiceConfigurationManager", "vmodl.ManagedObject", "cis.cm.version.version1", None, [("validate", "Validate", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("config", "cis.cm.scm.ConfigurationUpdate", "cis.cm.version.version1", 0, None),), (0, "cis.cm.scm.response.UpdateResponse", "cis.cm.scm.response.UpdateResponse"), "ComponentManager.Owner", ["cis.cm.fault.ComponentManagerFault", ]), ("update", "Update", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("config", "cis.cm.scm.ConfigurationUpdate", "cis.cm.version.version1", 0, None),), (0, "cis.cm.scm.response.UpdateResponse", "cis.cm.scm.response.UpdateResponse"), "ComponentManager.Owner", ["cis.cm.fault.ComponentManagerFault", ]), ("forceUpdate", "ForceUpdate", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("config", "cis.cm.scm.ConfigurationUpdate", "cis.cm.version.version1", 0, None),), (0, "cis.cm.scm.response.UpdateResponse", "cis.cm.scm.response.UpdateResponse"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ]), ("setDefinitions", "SetDefinitions", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("configDef", "cis.cm.scm.ConfigurationDefinition[]", "cis.cm.version.version1", F_OPTIONAL, None),), (0, "void", "void"), "ComponentManager.Owner", ["cis.cm.fault.ComponentManagerFault", ]), ("getResources", "GetResources", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),), (0, "string[]", "string[]"), "System.Read", ["cis.cm.fault.ComponentManagerFault", ]), ("getDefinitions", "GetDefinitions", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),), (0, "cis.cm.scm.ConfigurationDefinition[]", "cis.cm.scm.ConfigurationDefinition[]"), "System.Read", ["cis.cm.fault.ComponentManagerFault", ]), ("getDefinition", "GetDefinition", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("resourceKey", "string", "cis.cm.version.version1", 0, None),), (0, "cis.cm.scm.ConfigurationDefinition", "cis.cm.scm.ConfigurationDefinition"), "System.Read", ["cis.cm.fault.ComponentManagerFault", ]), ("getConfigurations", "GetConfigurations", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),), (0, "cis.cm.scm.Configuration[]", "cis.cm.scm.Configuration[]"), "System.Read", ["cis.cm.fault.ComponentManagerFault", ]), ("getConfiguration", "GetConfiguration", "cis.cm.version.version1", (("serviceId", "string", "cis.cm.version.version1", 0, None),("resourceKey", "string", "cis.cm.version.version1", 0, None),), (0, "cis.cm.scm.Configuration", "cis.cm.scm.Configuration"), "System.Read", ["cis.cm.fault.ComponentManagerFault", ])])
CreateDataType("cis.cm.scm.response.ResponseError", "CmResponseResponseError", "vmodl.DynamicData", "cis.cm.version.version1", [("name", "string", "cis.cm.version.version1", F_OPTIONAL), ("error", "vmodl.LocalizableMessage", "cis.cm.version.version1", 0), ("resourceBundleId", "string", "cis.cm.version.version1", 0)])
CreateDataType("cis.cm.scm.response.UpdateResponse", "CmResponseUpdateResponse", "vmodl.DynamicData", "cis.cm.version.version1", [("success", "boolean", "cis.cm.version.version1", 0), ("errors", "cis.cm.scm.response.ResponseError[]", "cis.cm.version.version1", F_OPTIONAL)])
CreateDataType("cis.cm.site.Folder", "CmFolder", "vmodl.DynamicData", "cis.cm.version.version1", [("id", "string", "cis.cm.version.version1", 0), ("displayName", "string", "cis.cm.version.version1", F_OPTIONAL), ("parentId", "string", "cis.cm.version.version1", F_OPTIONAL)])
CreateDataType("cis.cm.site.Group", "CmGroup", "cis.cm.site.Folder", "cis.cm.version.version1", None)
CreateDataType("cis.cm.site.Site", "CmSite", "cis.cm.site.Folder", "cis.cm.version.version1", [("groups", "cis.cm.site.Group[]", "cis.cm.version.version1", F_OPTIONAL)])
CreateManagedType("cis.cm.site.SiteManager", "CmSiteManager", "vmodl.ManagedObject", "cis.cm.version.version1", None, [("retrieveLocalGroup", "RetrieveLocalGroup", "cis.cm.version.version1", (), (0, "cis.cm.site.Folder", "cis.cm.site.Folder"), "System.Read", ["cis.cm.fault.ComponentManagerFault", ]), ("retrieveLocalSite", "RetrieveLocalSite", "cis.cm.version.version1", (), (0, "cis.cm.site.Folder", "cis.cm.site.Folder"), "System.Read", ["cis.cm.fault.ComponentManagerFault", ]), ("retrieveSites", "RetrieveSites", "cis.cm.version.version1", (), (0, "cis.cm.site.Site[]", "cis.cm.site.Site[]"), "System.Read", ["cis.cm.fault.ComponentManagerFault", ]), ("attachGroup", "AttachGroup", "cis.cm.version.version1", (("siteId", "string", "cis.cm.version.version1", 0, None),), (0, "void", "void"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ]), ("detachGroup", "DetachGroup", "cis.cm.version.version1", (("siteId", "string", "cis.cm.version.version1", 0, None),), (0, "cis.cm.site.Folder", "cis.cm.site.Folder"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ]), ("deleteGroup", "DeleteGroup", "cis.cm.version.version1", (("groupId", "string", "cis.cm.version.version1", 0, None),), (0, "void", "void"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ]), ("deleteSite", "DeleteSite", "cis.cm.version.version1", (("siteId", "string", "cis.cm.version.version1", 0, None),), (0, "void", "void"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ]), ("updateFolder", "UpdateFolder", "cis.cm.version.version1", (("folder", "cis.cm.site.Folder", "cis.cm.version.version1", 0, None),), (0, "void", "void"), "ComponentManager.Administrator", ["cis.cm.fault.ComponentManagerFault", ])])
| 287.177419
| 2,897
| 0.724235
|
b31b26b28960e52231166d593a866902d4a66f31
| 454
|
py
|
Python
|
Source Code/Python API/run_livetop.py
|
D-TACQ/acq400_lv
|
684e06a294ceb865511a7568e5038b209bdc3374
|
[
"MIT"
] | null | null | null |
Source Code/Python API/run_livetop.py
|
D-TACQ/acq400_lv
|
684e06a294ceb865511a7568e5038b209bdc3374
|
[
"MIT"
] | 2
|
2018-04-23T16:37:19.000Z
|
2018-07-11T10:51:19.000Z
|
Source Code/Python API/run_livetop.py
|
D-TACQ/acq400_lv
|
684e06a294ceb865511a7568e5038b209bdc3374
|
[
"MIT"
] | 3
|
2018-04-20T11:53:29.000Z
|
2018-04-25T15:25:55.000Z
|
#!/usr/bin/env python
# run a livetop process
import acq400_hapi
import argparse
def run_shot(args):
uut = acq400_hapi.Acq400(args.uuts[0])
acq400_hapi.cleanup.init()
uut.run_livetop()
def run_main():
parser = argparse.ArgumentParser(description='acq1001 livetop demo')
parser.add_argument('uuts', nargs=1, help="uut ")
run_shot(parser.parse_args())
# execution starts here
if __name__ == '__main__':
run_main()
| 20.636364
| 72
| 0.696035
|
3218d9354259e0276f74fe13f06daafdb8bdcdd1
| 3,625
|
py
|
Python
|
public/client.py
|
danilo-p/redes-tp-1
|
c8516b500b14d1cd74a043dde06de160f172eac5
|
[
"MIT"
] | null | null | null |
public/client.py
|
danilo-p/redes-tp-1
|
c8516b500b14d1cd74a043dde06de160f172eac5
|
[
"MIT"
] | 5
|
2021-01-11T23:22:15.000Z
|
2021-01-16T02:29:47.000Z
|
public/client.py
|
danilo-p/redes-tp-1
|
c8516b500b14d1cd74a043dde06de160f172eac5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import resource
import logging
import re
import time
import select
import socket
import sys
BUFSZ = 4096
def create_parser():
desc = """Client controller for testing"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"--script",
dest="scriptfn",
metavar="FILE",
type=str,
required=True,
help="File with control script",
)
return parser
class Controller:
MAX_CLIENTS = 10
RE_CONNECT = re.compile(r"^!connect (?P<cid>\d+) (?P<port>\d+) (?P<outfn>[\w.-]+)$")
RE_SLEEP = re.compile(r"^!sleep (?P<secs>\d+(?:\.\d*)?)$")
RE_SEND = re.compile(r"^!s(?P<cid>\d+) (?P<msg>.+)$")
def __init__(self, scriptfn: str):
self.scriptfd = open(scriptfn, "r")
self.sockets = [None] * Controller.MAX_CLIENTS
self.files = [None] * Controller.MAX_CLIENTS
self.joinbufs = [""] * Controller.MAX_CLIENTS
def run(self):
for line in self.scriptfd:
self._handle(line)
self.scriptfd.close()
def _handle(self, line):
if Controller.RE_CONNECT.match(line):
# Use assignment expressions instead when Python 3.8 comes
m = Controller.RE_CONNECT.match(line)
cid = int(m.group("cid"))
assert self.sockets[cid] is None
addr = "127.0.0.1"
port = int(m.group("port"))
self.sockets[cid] = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.sockets[cid].connect((addr, port))
self.files[cid] = open(m.group("outfn"), "w")
logging.debug("cid %d connected", cid)
if Controller.RE_SLEEP.match(line):
m = Controller.RE_SLEEP.match(line)
secs = float(m.group("secs"))
logging.debug("sleeping %f secs", secs)
time.sleep(secs)
self._dump_messages()
if Controller.RE_SEND.match(line):
m = Controller.RE_SEND.match(line)
cid = int(m.group("cid"))
assert self.sockets[cid] is not None
msg = m.group("msg") + "\n"
msg = self.joinbufs[cid] + msg
self.joinbufs[cid] = ""
logging.debug("sending [%s]", msg.rstrip())
self.sockets[cid].send(msg.encode("ascii"))
def _dump_messages(self):
logging.debug("dumping messages")
sockets = list(s for s in self.sockets if s is not None)
rs, _ws, _xs = select.select(sockets, [], sockets, 0.0)
assert not _ws and not _xs
for cid, s in enumerate(self.sockets):
if s is None or s not in rs:
continue
# This assumes we get the whole message in one go.
# This implementation is insufficient for the server.
msg = s.recv(BUFSZ)
if not msg:
self._close_client(cid)
else:
self.files[cid].write(msg.decode("ascii"))
def _close_client(self, cid):
self.sockets[cid].close()
self.sockets[cid] = None
self.files[cid].close()
self.files[cid] = None
self.joinbufs[cid] = ""
def main():
resource.setrlimit(resource.RLIMIT_AS, (1 << 30, 1 << 30))
resource.setrlimit(resource.RLIMIT_FSIZE, (1 << 35, 1 << 35))
logging.basicConfig(
filename="client.log", format="%(levelname)s %(message)s", level=logging.DEBUG
)
parser = create_parser()
opts = parser.parse_args()
controller = Controller(opts.scriptfn)
controller.run()
if __name__ == "__main__":
sys.exit(main())
| 31.521739
| 88
| 0.575172
|
2db19f81e4e98d5897729af128056bcac6c6986d
| 8,125
|
py
|
Python
|
engine.py
|
Masrik-Dahir/Rampantry
|
51df90c041e5dad644df54872d85528ab8553672
|
[
"MIT"
] | null | null | null |
engine.py
|
Masrik-Dahir/Rampantry
|
51df90c041e5dad644df54872d85528ab8553672
|
[
"MIT"
] | null | null | null |
engine.py
|
Masrik-Dahir/Rampantry
|
51df90c041e5dad644df54872d85528ab8553672
|
[
"MIT"
] | null | null | null |
from labpack.storage.google.drive import driveClient
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
from time import sleep # Import the sleep function from the time module
import os
import time
import datetime
import urllib2
def generate():
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
LEDPIN = 8
MAGNETPIN = 7
SWITCHPIN = 12
GPIO.setup(LEDPIN, GPIO.OUT, initial=GPIO.LOW) # Set pin 8 to be an output pin and set initial value to low (off)
GPIO.setup(MAGNETPIN, GPIO.IN, pull_up_down=GPIO.PUD_UP) # magnet switch. Default is CLOSED
GPIO.setup(SWITCHPIN, GPIO.IN, pull_up_down=GPIO.PUD_UP) # tactile switch. Default is OPEN
# GPIO.setup(8, GPIO.OUT, initial=GPIO.LOW) # Set pin 8 to be an output pin and set initial value to low (off)
# GPIO.setup(7, GPIO.IN, pull_up_down = GPIO.PUD_UP) #magnet switch. Default is CLOSED
# GPIO.setup(12, GPIO.IN, pull_up_down = GPIO.PUD_UP) #tactile switch. Default is OPEN
lastSensorReading = 1 # use this to stop multiple recordings for the same event
lastButtonReading = 0
buttonPushed = 0 # make sure button push isn't registerd multiple times
stringStatus = "NA" #
# on start up create a .csv file with time stamp(so you know how long it's been running)
now = datetime.datetime.now() # get time
namestr = now.strftime("%Y-%m-%d_%H-%M-%S")
filename = '/home/pi/Desktop/data/' + namestr + '.csv'
f = open(filename, 'a+')
if os.stat(filename).st_size == 0:
f.write('Date,Time,DoorStatus\r\n')
# input first reading - is door open or closed?
if GPIO.input(MAGNETPIN) == 1: # Reading is HIGH (1), so open
stringStatus = "DOOR-OPEN"
else:
stringStatus = "DOOR-CLOSED"
f.write('{0},{1},{2}%\r\n'.format(time.strftime('%m/%d/%y'), time.strftime('%H:%M'), stringStatus))
f.flush()
f.close() # close the file until time to write again
# Declare File name
# file_name = '{}.{}'.format(time.strftime('%m.%d.%y'), time.strftime('%H.%M'))
# Run forever to take the rest of the readings
while True:
sleep(0.5) # Sleep for 0.5 seconds
if buttonPushed == 0: # button has not been pushed
# check if button is pushed
if GPIO.input(SWITCHPIN) == 1: # Reading is HIGH (1), so button is NOT pushed
buttonPushed = 0
else:
if lastButtonReading != GPIO.input(SWITCHPIN):
# stop the bouncing effect so button pushed is registered ONCE
buttonPushed = 1
lastButtonReading = GPIO.input(SWITCHPIN) # update it so new reading is saved
# check if sensor status has changed
if GPIO.input(MAGNETPIN) != lastSensorReading: # current reading does not equal last reading
if GPIO.input(MAGNETPIN) == 1: # Reading is HIGH (1), so open
stringStatus = "DOOR-OPEN"
# print("Switch Open!")
GPIO.output(LEDPIN, GPIO.HIGH) # Turn on LED for testing
else:
stringStatus = "DOOR-CLOSED"
# print("Switch Closed!")
GPIO.output(LEDPIN, GPIO.LOW) # Turn off LED
lastSensorReading = GPIO.input(MAGNETPIN) # update it so new reading is saved
now = datetime.datetime.now() # get time
print(now)
# append the csv file
with open(filename, "a") as f:
f.write('{0},{1},{2}%\r\n'.format(time.strftime('%m/%d/%y'), time.strftime('%H:%M'), stringStatus))
# don't need to flush & close here because of the 'with'
# else GPIO.input(7) == lastSensorReading: # current reading equals last reading
else: # button pushed, so file is being read to USB.
# write to USB
print("Button pushed!")
buttonPushed = 0
sleep(1)
# set buttn push to 0
print("******************")
'''
Access Token is permanent, so be careful where you use it!
file_path = filename
drive_space = 'drive'
'''
def migrate(file_path, access_token, drive_space='drive'):
'''
a method to save a posix file architecture to google drive
NOTE: to write to a google drive account using a non-approved app,
the oauth2 grantee account must also join this google group
https://groups.google.com/forum/#!forum/risky-access-by-unreviewed-apps
:param file_path: string with path to local file
:param access_token: string with oauth2 access token grant to write to google drive
:param drive_space: string with name of space to write to (drive, appDataFolder, photos)
:return: string with id of file on google drive
'''
# construct drive client
import httplib2
from googleapiclient import discovery
from oauth2client.client import AccessTokenCredentials
google_credentials = AccessTokenCredentials(access_token, 'my-user-agent/1.0')
google_http = httplib2.Http()
google_http = google_credentials.authorize(google_http)
google_drive = discovery.build('drive', 'v3', http=google_http)
drive_client = google_drive.files()
# prepare file body
from googleapiclient.http import MediaFileUpload
media_body = MediaFileUpload(filename=file_path, resumable=True)
# determine file modified time
import os
from datetime import datetime
modified_epoch = os.path.getmtime(file_path)
modified_time = datetime.utcfromtimestamp(modified_epoch).isoformat()
# determine path segments
path_segments = file_path.split(os.sep)
# construct upload kwargs
create_kwargs = {
'body': {
'name': path_segments.pop(),
'modifiedTime': modified_time
},
'media_body': media_body,
'fields': 'id'
}
# walk through parent directories
parent_id = ''
if path_segments:
# construct query and creation arguments
walk_folders = True
folder_kwargs = {
'body': {
'name': '',
'mimeType' : 'application/vnd.google-apps.folder'
},
'fields': 'id'
}
query_kwargs = {
'spaces': drive_space,
'fields': 'files(id, parents)'
}
while path_segments:
folder_name = path_segments.pop(0)
folder_kwargs['body']['name'] = folder_name
# search for folder id in existing hierarchy
if walk_folders:
walk_query = "name = '%s'" % folder_name
if parent_id:
walk_query += "and '%s' in parents" % parent_id
query_kwargs['q'] = walk_query
response = drive_client.list(**query_kwargs).execute()
file_list = response.get('files', [])
else:
file_list = []
if file_list:
parent_id = file_list[0].get('id')
# or create folder
# https://developers.google.com/drive/v3/web/folder
else:
if not parent_id:
if drive_space == 'appDataFolder':
folder_kwargs['body']['parents'] = [ drive_space ]
else:
del folder_kwargs['body']['parents']
else:
folder_kwargs['body']['parents'] = [parent_id]
response = drive_client.create(**folder_kwargs).execute()
parent_id = response.get('id')
walk_folders = False
# add parent id to file creation kwargs
if parent_id:
create_kwargs['body']['parents'] = [parent_id]
elif drive_space == 'appDataFolder':
create_kwargs['body']['parents'] = [drive_space]
# send create request
file = drive_client.create(**create_kwargs).execute()
file_id = file.get('id')
return file_id
# Output
file_name = generate()
access_token = "#########"
migrate(file_name,access_token)
| 38.14554
| 119
| 0.604554
|
1dfac409d745a364c0f38d9b31798df2ae33e419
| 9,528
|
py
|
Python
|
autogl/module/_feature/generators/graphlet.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | 824
|
2020-11-30T14:38:07.000Z
|
2022-03-19T10:14:04.000Z
|
autogl/module/_feature/generators/graphlet.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | 38
|
2020-12-21T12:32:57.000Z
|
2022-01-31T02:32:05.000Z
|
autogl/module/_feature/generators/graphlet.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | 85
|
2020-12-21T05:16:09.000Z
|
2022-03-28T08:44:22.000Z
|
import numpy as np
import copy
from .base import BaseGenerator
from tqdm import tqdm
from ....utils import get_logger
from .. import register_feature
LOGGER = get_logger("Feature")
class Graphlet:
def __init__(self, data, sample_error=0.1, sample_confidence=0.1):
self._data = data
self._init()
self._sample_error = sample_error
self._sample_confidence = sample_confidence
self._dw = int(
np.ceil(
0.5 * (self._sample_error ** -2) * np.log(2 / self._sample_confidence)
)
)
LOGGER.info(
"sample error {} , confidence {},num {}".format(
self._sample_error, self._sample_confidence, self._dw
)
)
def _init(self):
self._edges = list(self._data.edge_index)
self._edges = [self._edges[0], self._edges[1]]
self._num_nodes = self._data.x.shape[0]
self._num_edges = len(self._edges[0])
self._neighbours = [[] for _ in range(self._num_nodes)]
for i in range(len(self._edges[0])):
u, v = self._edges[0][i], self._edges[1][i]
self._neighbours[u].append(v)
LOGGER.info("nodes {} , edges {}".format(self._num_nodes, self._num_edges))
# sorting
self._node_degrees = np.array([len(x) for x in self._neighbours])
self._nodes = np.argsort(self._node_degrees)
for i in self._nodes:
self._neighbours[i] = [
x
for _, x in sorted(
zip(self._node_degrees[self._neighbours[i]], self._neighbours[i]),
reverse=True,
)
]
self._neighbours = [np.array(x) for x in self._neighbours]
def _get_gdv(self, v, u):
if self._node_degrees[v] >= self._node_degrees[u]:
pass
else:
u, v = v, u
Sv, Su, Te = set(), set(), set()
sigma1, sigma2 = 0, 0
nb = self._neighbours
N = self._num_nodes
M = self._num_edges
phi = np.zeros(self._num_nodes, dtype=int)
c1, c2, c3, c4 = 1, 2, 3, 4
x = np.zeros(16, dtype=int)
# p1
for w in nb[v]:
if w != u:
Sv.add(w)
phi[w] = c1
# p2
for w in nb[u]:
if w != v:
if phi[w] == c1:
Te.add(w)
phi[w] = c3
Sv.remove(w)
else:
Su.add(w)
phi[w] = c2
# p3
for w in Te:
for r in nb[w]:
if phi[r] == c3:
x[5] += 1
phi[w] = c4
sigma2 = sigma2 + len(nb[w]) - 2
# p4
for w in Su:
for r in nb[w]:
if phi[r] == c1:
x[8] += 1
if phi[r] == c2:
x[7] += 1
if phi[r] == c4:
sigma1 += 1
phi[w] = 0
sigma2 = sigma2 + len(nb[w]) - 1
# p5
for w in Sv:
for r in nb[w]:
if phi[r] == c1:
x[7] += 1
if phi[r] == c4:
sigma1 += 1
phi[w] = 0
sigma2 = sigma2 + len(nb[w]) - 1
lsv, lsu, lte, du, dv = len(Sv), len(Su), len(Te), len(nb[u]), len(nb[v])
# 3-graphlet
x[1] = lte
x[2] = du + dv - 2 - 2 * x[1]
x[3] = N - x[2] - x[1] - 2
x[4] = N * (N - 1) * (N - 2) / 6 - (x[1] + x[2] + x[3])
# 4 connected graphlets
x[6] = x[1] * (x[1] - 1) / 2 - x[5]
x[10] = lsv * lsu - x[8]
x[9] = lsv * (lsv - 1) / 2 + lsu * (lsu - 1) / 2 - x[7]
# 4 diconnected graphlets
t1 = N - (lte + lsu + lsv + 2)
x[11] = x[1] * t1
x[12] = M - (du + dv - 1) - (sigma2 - sigma1 - x[5] - x[8] - x[7])
x[13] = (lsu + lsv) * t1
x[14] = t1 * (t1 - 1) / 2 - x[12]
x[15] = N * (N - 1) * (N - 2) * (N - 3) / 24 - np.sum(x[5:15])
return x
def _get_gdv_sample(self, v, u):
if self._node_degrees[v] >= self._node_degrees[u]:
pass
else:
u, v = v, u
Sv = set()
sigma1, sigma2 = 0, 0
nb = self._neighbours
N = self._num_nodes
M = self._num_edges
phi = np.zeros(self._num_nodes, dtype=int)
c1, c2, c3, c4 = 1, 2, 3, 4
x = np.zeros(16)
dw = self._dw
# p1
Sv = set(nb[v][nb[v] != u])
phi[list(Sv)] = c1
# p2
p2w = nb[u][nb[u] != c1]
p2w1 = p2w[phi[p2w] == c1]
p2w2 = p2w[phi[p2w] != c1]
Te = p2w1
phi[p2w1] = c3
Sv -= set(list(p2w1))
Su = p2w2
phi[p2w2] = c2
# p3
for w in Te:
if dw >= len(nb[w]):
region = nb[w]
inc = 1
else:
region = np.random.choice(nb[w], dw, replace=False)
inc = self._node_degrees[w] / dw
phir = phi[region]
x[5] += inc * np.sum(phir == c3)
phi[w] = c4
sigma2 = sigma2 + len(nb[w]) - 2
# p4
for w in Su:
if dw >= len(nb[w]):
region = nb[w]
inc = 1
else:
region = np.random.choice(nb[w], dw, replace=False)
inc = self._node_degrees[w] / dw
phir = phi[region]
x[8] += inc * np.sum(phir == c1)
x[7] += inc * np.sum(phir == c2)
sigma1 += inc * np.sum(phir == c4)
phi[w] = 0
sigma2 = sigma2 + len(nb[w]) - 1
# p5
for w in Sv:
if dw >= len(nb[w]):
region = nb[w]
inc = 1
else:
region = np.random.choice(nb[w], dw, replace=False)
inc = self._node_degrees[w] / dw
phir = phi[region]
x[7] += inc * np.sum(phir == c1)
sigma1 += inc * np.sum(phir == c4)
phi[w] = 0
sigma2 = sigma2 + len(nb[w]) - 1
lsv, lsu, lte, du, dv = len(Sv), len(Su), len(Te), len(nb[u]), len(nb[v])
# 3-graphlet
x[1] = lte
x[2] = du + dv - 2 - 2 * x[1]
x[3] = N - x[2] - x[1] - 2
x[4] = N * (N - 1) * (N - 2) / 6 - (x[1] + x[2] + x[3])
# 4 connected graphlets
x[6] = x[1] * (x[1] - 1) / 2 - x[5]
x[10] = lsv * lsu - x[8]
x[9] = lsv * (lsv - 1) / 2 + lsu * (lsu - 1) / 2 - x[7]
# 4 diconnected graphlets
t1 = N - (lte + lsu + lsv + 2)
x[11] = x[1] * t1
x[12] = M - (du + dv - 1) - (sigma2 - sigma1 - x[5] - x[8] - x[7])
x[13] = (lsu + lsv) * t1
x[14] = t1 * (t1 - 1) / 2 - x[12]
x[15] = N * (N - 1) * (N - 2) * (N - 3) / 24 - np.sum(x[5:15])
return x
def get_gdvs(self, sample=True):
res = np.zeros((self._num_nodes, 15))
for u in tqdm(range(self._num_nodes)):
vs = self._neighbours[u]
if len(vs) != 0:
gdvs = []
for v in tqdm(vs, disable=len(vs) < 100):
if sample:
gdvs.append(self._get_gdv_sample(u, v))
else:
gdvs.append(self._get_gdv(u, v))
res[u, :] = np.mean(gdvs, axis=0)[1:]
return res
def get_gdvs_cp(self, workers="max"):
r"""
c++ parallel , same function as get_gdvs
"""
tmpfile = "tmp.mtx"
tmpmicro = "tmp.micro"
self._save(tmpfile)
os.system(
"{} -f {} --micro {} -w {}".format(pgd_path, tmpfile, tmpmicro, workers)
)
return self._load(tmpmicro)
def _save(self, filename):
with open(filename, "w") as file:
file.write(
"{} {} {}\n".format(self._num_nodes, self._num_nodes, self._num_edges)
)
for u in self._nodes:
for v in self._neighbours[u]:
file.write("{} {}\n".format(u + 1, v + 1))
def _load(self, filename):
df = pd.read_csv(filename)
edges = df[["% src", "dst"]].values
egdvs = df.values[:, 2:]
num_nodes = np.max(edges)
ngdvs = np.zeros((num_nodes, 8))
nbs = [[] for _ in range(num_nodes)]
for i, (u, v) in enumerate(edges):
u -= 1
v -= 1
nbs[u].append(i)
nbs[v].append(i)
for i in range(num_nodes):
if len(nbs[i]) != 0:
ngdvs[i, :] = np.mean(egdvs[nbs[i]], axis=0)
return ngdvs
@register_feature("graphlet")
class GeGraphlet(BaseGenerator):
r"""generate local graphlet numbers as features. The implementation refers to [#]_ .
References
----------
.. [#] Ahmed, N. K., Willke, T. L., & Rossi, R. A. (2016).
Estimation of local subgraph counts. Proceedings - 2016 IEEE International Conference on Big Data, Big Data 2016, 586–595.
https://doi.org/10.1109/BigData.2016.7840651
"""
def __init__(self, workers=1):
super(GeGraphlet, self).__init__()
self.workers = workers
def _transform(self, data):
r"""graphlet degree vectors"""
gl = Graphlet(data)
# res=gl.get_gdvs_cp(self.workers)
res = gl.get_gdvs()
data.x = np.concatenate([data.x, res], axis=1)
return data
| 32.189189
| 130
| 0.436608
|
8d96f361e8c840b06c4c33313a21659b29467db9
| 524
|
py
|
Python
|
AtCoder Beginner Contest 190/B - Magic 3.py
|
codedreamer-dg/AtCoder
|
6a4a9a2bc558bb0b21505877e00858d0c7981701
|
[
"MIT"
] | null | null | null |
AtCoder Beginner Contest 190/B - Magic 3.py
|
codedreamer-dg/AtCoder
|
6a4a9a2bc558bb0b21505877e00858d0c7981701
|
[
"MIT"
] | null | null | null |
AtCoder Beginner Contest 190/B - Magic 3.py
|
codedreamer-dg/AtCoder
|
6a4a9a2bc558bb0b21505877e00858d0c7981701
|
[
"MIT"
] | null | null | null |
import sys
from collections import defaultdict as dd
from math import *
from bisect import *
#sys.setrecursionlimit(10 ** 8)
def sinp():
return input()
def inp():
return int(input())
def minp():
return map(int, input().split())
def linp():
return list(minp())
def strl():
return list(input())
def pr(x):
print(x)
return
mod = int(1e9+7)
n, s, d = minp()
flag = False
for i in range(n):
x, y = minp()
if x < s and y > d:
flag = True
if flag:
print("Yes")
else:
print("No")
| 17.466667
| 41
| 0.59542
|
06ab59e774ccd186901e357fa564488cf9ccdab8
| 5,819
|
py
|
Python
|
salt/states/libcloud_dns.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | 1
|
2022-02-09T06:40:14.000Z
|
2022-02-09T06:40:14.000Z
|
salt/states/libcloud_dns.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | null | null | null |
salt/states/libcloud_dns.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | 4
|
2020-11-04T06:28:05.000Z
|
2022-02-09T10:54:49.000Z
|
# -*- coding: utf-8 -*-
'''
Manage DNS records and zones using libcloud
:codeauthor: Anthony Shaw <anthonyshaw@apache.org>
.. versionadded:: 2016.11.0
Create and delete DNS records or zones through Libcloud. Libcloud's DNS system supports over 20 DNS
providers including Amazon, Google, GoDaddy, Softlayer
This module uses ``libcloud``, which can be installed via package, or pip.
:configuration:
This module uses a configuration profile for one or multiple DNS providers
.. code-block:: yaml
libcloud_dns:
profile1:
driver: godaddy
key: 2orgk34kgk34g
profile2:
driver: route53
key: blah
secret: blah
Example:
.. code-block:: yaml
webserver:
libcloud_dns.zone_present:
name: mywebsite.com
profile: profile1
libcloud_dns.record_present:
name: www
zone: mywebsite.com
type: A
data: 12.34.32.3
profile: profile1
:depends: apache-libcloud
'''
# Import Python Libs
from __future__ import absolute_import
# Import salt libs
import salt.utils
def __virtual__():
return True
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
def state_result(name, result, message):
return {
'name': name,
'result': result,
'changes': {},
'comment': message
}
def zone_present(domain, type, profile):
'''
Ensures a record is present.
:param domain: Zone name, i.e. the domain name
:type domain: ``str``
:param type: Zone type (master / slave), defaults to master
:type type: ``str``
:param profile: The profile key
:type profile: ``str``
'''
zones = __salt__['libcloud_dns.list_zones'](profile)
if not type:
type = 'master'
matching_zone = [z for z in zones if z.domain == domain]
if len(matching_zone) > 0:
return state_result(domain, True, 'Zone already exists')
else:
result = __salt__['libcloud_dns.create_zone'](domain, profile, type)
return state_result(domain, result, 'Created new zone')
def zone_absent(domain, profile):
'''
Ensures a record is absent.
:param domain: Zone name, i.e. the domain name
:type domain: ``str``
:param profile: The profile key
:type profile: ``str``
'''
zones = __salt__['libcloud_dns.list_zones'](profile)
matching_zone = [z for z in zones if z.domain == domain]
if len(matching_zone) == 0:
return state_result(domain, True, 'Zone already absent')
else:
result = __salt__['libcloud_dns.delete_zone'](matching_zone[0].id, profile)
return state_result(domain, result, 'Deleted zone')
def record_present(name, zone, type, data, profile):
'''
Ensures a record is present.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created, the domain name
:type zone: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: ``str``
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param profile: The profile key
:type profile: ``str``
'''
zones = __salt__['libcloud_dns.list_zones'](profile)
try:
matching_zone = [z for z in zones if z.domain == zone][0]
except IndexError:
return state_result(zone, False, 'Could not locate zone')
records = __salt__['libcloud_dns.list_records'](matching_zone.id, profile)
matching_records = [record for record in records
if record.name == name and
record.type == type and
record.data == data]
if len(matching_records) == 0:
result = __salt__['libcloud_dns.create_record'](
name, matching_zone.id,
type, data, profile)
return state_result(name, result, 'Created new record')
else:
return state_result(name, True, 'Record already exists')
def record_absent(name, zone, type, data, profile):
'''
Ensures a record is absent.
:param name: Record name without the domain name (e.g. www).
Note: If you want to create a record for a base domain
name, you should specify empty string ('') for this
argument.
:type name: ``str``
:param zone: Zone where the requested record is created, the domain name
:type zone: ``str``
:param type: DNS record type (A, AAAA, ...).
:type type: ``str``
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param profile: The profile key
:type profile: ``str``
'''
zones = __salt__['libcloud_dns.list_zones'](profile)
try:
matching_zone = [z for z in zones if z.domain == zone][0]
except IndexError:
return state_result(zone, False, 'Zone could not be found')
records = __salt__['libcloud_dns.list_records'](matching_zone.id, profile)
matching_records = [record for record in records
if record.name == name and
record.type == type and
record.data == data]
if len(matching_records) > 0:
result = []
for record in matching_records:
result.append(__salt__['libcloud_dns.delete_record'](
matching_zone.id,
record.id,
profile))
return state_result(name, all(result), 'Removed {0} records'.format(len(result)))
else:
return state_result(name, True, 'Records already absent')
| 29.538071
| 99
| 0.614023
|
e8911eabb7dc5cfa5ad87d007dead6f0647e02c5
| 410
|
py
|
Python
|
app/models/especialidade.py
|
RaimundoLima/Zivot
|
d11e22cccaa1e25bb11244c1178cfd374e386a79
|
[
"Apache-2.0",
"bzip2-1.0.6"
] | null | null | null |
app/models/especialidade.py
|
RaimundoLima/Zivot
|
d11e22cccaa1e25bb11244c1178cfd374e386a79
|
[
"Apache-2.0",
"bzip2-1.0.6"
] | null | null | null |
app/models/especialidade.py
|
RaimundoLima/Zivot
|
d11e22cccaa1e25bb11244c1178cfd374e386a79
|
[
"Apache-2.0",
"bzip2-1.0.6"
] | null | null | null |
from .base import Base
from sqlalchemy import DateTime,Time,ForeignKey,Column, Integer, Numeric, Binary, String,VARCHAR,Float
from sqlalchemy.orm import relationship
class Especialidade(Base):
nome = Column(VARCHAR(100), nullable=False)
medicos=relationship('Medico',backref='especialidade',lazy=True)
#usuario_especialidade=relationship('usuario_especialidade',backref='especialidade',lazy=True)
| 45.555556
| 102
| 0.802439
|
62b09ec68bae8eb0b84fd38560d028d838a27f5d
| 1,730
|
py
|
Python
|
starlette/middleware/authentication.py
|
carlodri/starlette
|
2505df422532b1e76daaf03bfd0b9dfd380cfa94
|
[
"BSD-3-Clause"
] | null | null | null |
starlette/middleware/authentication.py
|
carlodri/starlette
|
2505df422532b1e76daaf03bfd0b9dfd380cfa94
|
[
"BSD-3-Clause"
] | null | null | null |
starlette/middleware/authentication.py
|
carlodri/starlette
|
2505df422532b1e76daaf03bfd0b9dfd380cfa94
|
[
"BSD-3-Clause"
] | null | null | null |
import functools
import typing
from starlette.authentication import (
AuthCredentials,
AuthenticationBackend,
AuthenticationError,
UnauthenticatedUser,
)
from starlette.requests import Request
from starlette.responses import PlainTextResponse, Response
from starlette.types import ASGIApp, ASGIInstance, Receive, Scope, Send
class AuthenticationMiddleware:
def __init__(
self,
app: ASGIApp,
backend: AuthenticationBackend,
on_error: typing.Callable[[Request, AuthenticationError], Response] = None,
) -> None:
self.app = app
self.backend = backend
self.on_error = (
on_error if on_error is not None else self.default_on_error
) # type: typing.Callable[[Request, AuthenticationError], Response]
def __call__(self, scope: Scope) -> ASGIInstance:
if scope["type"] in ["http", "websockets"]:
return functools.partial(self.asgi, scope=scope)
return self.app(scope)
async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:
request = Request(scope, receive=receive)
try:
auth_result = await self.backend.authenticate(request)
except AuthenticationError as exc:
response = self.on_error(request, exc)
await response(receive, send)
return
if auth_result is None:
auth_result = AuthCredentials(), UnauthenticatedUser()
scope["auth"], scope["user"] = auth_result
inner = self.app(scope)
await inner(receive, send)
@staticmethod
def default_on_error(request: Request, exc: Exception) -> Response:
return PlainTextResponse(str(exc), status_code=400)
| 33.921569
| 83
| 0.667052
|
1a9242f95cda17318e7e92ae7598f0ca613ba18d
| 2,253
|
py
|
Python
|
tests/test_skillsearch.py
|
allenai/alexafsm
|
0c2e8842ddbb4a34ac64a5139e7febee3b28889a
|
[
"Apache-2.0"
] | 108
|
2017-05-11T22:33:39.000Z
|
2022-03-04T03:04:51.000Z
|
tests/test_skillsearch.py
|
allenai/alexafsm
|
0c2e8842ddbb4a34ac64a5139e7febee3b28889a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_skillsearch.py
|
allenai/alexafsm
|
0c2e8842ddbb4a34ac64a5139e7febee3b28889a
|
[
"Apache-2.0"
] | 17
|
2017-05-12T23:26:38.000Z
|
2020-04-20T19:39:54.000Z
|
import pytest
import json
from tests.skillsearch.policy import Policy
from alexafsm.utils import validate, events_states_transitions, unused_events_states_transitions
from alexafsm.test_helpers import get_requests_responses
from tests.skillsearch.skill_settings import SkillSettings
def test_validate_policy():
policy = Policy.initialize()
validate(policy=policy,
schema_file='./tests/skillsearch/speech/alexa-schema.json',
ignore_intents={'DontUnderstand'})
policy_states = policy.machine.states
policy_stop_states = \
policy.states.EXIT_ON_STOP_STATES + \
policy.states.CONTINUE_ON_STOP_STATES + \
policy.states.PROMPT_ON_STOP_STATES
# "exiting" state does not need any outgoing transitions
missing = set(policy_states) - set(policy_stop_states) - {'exiting'}
assert not missing, f'Some states do not handle STOP/CANCEL intents: {missing}'
def the_test_playback(measure_coverage: bool = False):
"""Play back recorded responses to check that the system is still behaving the same
Change to test_playback to actually run this test once a recording is made."""
policy = Policy.initialize()
SkillSettings().playback = True
record_file = SkillSettings().get_record_file()
for request, expected_response in get_requests_responses(record_file):
actual_response = json.loads(json.dumps(policy.handle(request)))
assert expected_response == actual_response
if measure_coverage:
policy = SkillSettings().get_policy()
all_events, all_states, all_transitions = events_states_transitions(policy)
unused_events, unused_states, unused_transitions = \
unused_events_states_transitions(policy, get_requests_responses(record_file))
print(f"Summary: "
f"{len(unused_events)}/{len(all_events)} unused events, "
f"{len(unused_states)}/{len(all_states)} unused states, "
f"{len(unused_transitions)}/{len(all_transitions)} unused transitions ")
print(f"Unused events: {unused_events}")
print(f"Unused states: {unused_states}")
print(f"Unused transitions: {unused_transitions}")
if __name__ == '__main__':
pytest.main([__file__])
| 42.509434
| 96
| 0.725255
|
7a1ba992b6ed0aeef26f0f10860be90d9bf8de73
| 278
|
py
|
Python
|
vet_website/www/main/inventory/products/form.py
|
rezazrna/vet_website
|
26e731cb10c31d69292f33659c49c3cfa5646c39
|
[
"MIT"
] | null | null | null |
vet_website/www/main/inventory/products/form.py
|
rezazrna/vet_website
|
26e731cb10c31d69292f33659c49c3cfa5646c39
|
[
"MIT"
] | null | null | null |
vet_website/www/main/inventory/products/form.py
|
rezazrna/vet_website
|
26e731cb10c31d69292f33659c49c3cfa5646c39
|
[
"MIT"
] | null | null | null |
import frappe
def get_context(context):
context.no_cache = True
context.alt_page_title = "Product Baru"
if frappe.session.user == 'Guest':
frappe.local.flags.redirect_location = frappe.utils.get_url('/login')
raise frappe.Redirect
return context
| 30.888889
| 77
| 0.705036
|
a566e7cc9e3f131f5e5ac70f4181ed7b217936a5
| 724
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/servicebus/v20140901/__init__.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/servicebus/v20140901/__init__.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/servicebus/v20140901/__init__.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .get_namespace import *
from .get_namespace_authorization_rule import *
from .get_queue import *
from .get_queue_authorization_rule import *
from .get_subscription import *
from .get_topic import *
from .get_topic_authorization_rule import *
from .namespace import *
from .namespace_authorization_rule import *
from .queue import *
from .queue_authorization_rule import *
from .subscription import *
from .topic import *
from .topic_authorization_rule import *
from ._inputs import *
from . import outputs
| 32.909091
| 80
| 0.780387
|
61befb73f4573651252d6f7ff27d43a0f1e216bd
| 1,412
|
py
|
Python
|
main.py
|
Mdotta/playlist-genre-graph
|
88a46afae8c99cc476550c407b6e30e33b8c34be
|
[
"MIT"
] | null | null | null |
main.py
|
Mdotta/playlist-genre-graph
|
88a46afae8c99cc476550c407b6e30e33b8c34be
|
[
"MIT"
] | null | null | null |
main.py
|
Mdotta/playlist-genre-graph
|
88a46afae8c99cc476550c407b6e30e33b8c34be
|
[
"MIT"
] | null | null | null |
from collections import Counter
import os
from dotenv import load_dotenv
import spotipy
from spotipy import client
from spotipy.oauth2 import SpotifyOAuth
load_dotenv()
CLIENT_ID = os.getenv('CLIENT_ID')
CLIENT_SECRET = os.getenv('CLIENT_SECRET')
REDIRECT_URI = os.getenv('REDIRECT_URI')
SCOPE = os.getenv('SCOPE')
PLAYLIST_ID = "0HY9RlNjxtyG7VxaeYecMT?si=8006a44fa0d64744"
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
redirect_uri=REDIRECT_URI,
scope=SCOPE))
def get_artists(playlist):
playlist = sp.playlist(playlist_id=playlist)
artists = [artist for sublist in [track['track']['album']['artists'] for track in playlist['tracks']['items']] for artist in sublist]
return artists
def get_artist_genres(artist):
artist_detail = sp.artist(artist['external_urls']['spotify'])
return artist_detail['genres']
def get_list_of_percentages(l):
c = Counter(l)
return [(i, c[i] / len(l) * 100.0) for i in c]
artists = get_artists(PLAYLIST_ID)
genres = [genre for sublist in [get_artist_genres(artist) for artist in artists] for genre in sublist]
percentage = get_list_of_percentages(genres)
sorted_list = sorted(percentage,key=lambda x:x[1],reverse=True)
print([item[0] for item in sorted_list[:3]])
| 34.439024
| 137
| 0.692635
|
b7f02412cca47642900dca1a92bb66e76e8f5b1f
| 6,410
|
py
|
Python
|
src/retrieval_core/models/encoder/backbones/vgg.py
|
RImbriaco/OML
|
4998cdebc3ac553ccd53b4caacf24d8c3d8fc07b
|
[
"MIT"
] | 2
|
2021-09-08T12:33:05.000Z
|
2021-09-14T09:40:43.000Z
|
src/retrieval_core/models/encoder/backbones/vgg.py
|
RImbriaco/OML
|
4998cdebc3ac553ccd53b4caacf24d8c3d8fc07b
|
[
"MIT"
] | null | null | null |
src/retrieval_core/models/encoder/backbones/vgg.py
|
RImbriaco/OML
|
4998cdebc3ac553ccd53b4caacf24d8c3d8fc07b
|
[
"MIT"
] | 1
|
2021-09-08T12:35:10.000Z
|
2021-09-08T12:35:10.000Z
|
import torch.nn as nn
from torch.hub import load_state_dict_from_url
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(nn.Module):
def __init__(self, features, in_channels, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.in_channels = in_channels
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, in_channels=3, batch_norm=False):
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], in_channels=kwargs['in_channels'],
batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, **kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
| 36.420455
| 113
| 0.614041
|
1dd1e6885a72ce0c4ff106568c6b393e52c869ed
| 27,619
|
py
|
Python
|
seqapp/tabs/main_page.py
|
tlallema/essai
|
180230b2f847e09559b7bc8a32205b6ed9208415
|
[
"MIT"
] | null | null | null |
seqapp/tabs/main_page.py
|
tlallema/essai
|
180230b2f847e09559b7bc8a32205b6ed9208415
|
[
"MIT"
] | null | null | null |
seqapp/tabs/main_page.py
|
tlallema/essai
|
180230b2f847e09559b7bc8a32205b6ed9208415
|
[
"MIT"
] | 1
|
2021-11-12T04:02:22.000Z
|
2021-11-12T04:02:22.000Z
|
"""MAIN APP PAGE
Contains the principle tab of the application UI - where
the user uploads input files, launches analyses, and interprets
&/or downloads pipeline output QC results. This is the first tab
of the single-page UI (and therefore the default presentation
immediately upon loading the app), and the only one a user
will necessarily use. Remaining tabs provide supplemental info
or functionality.
Attributes:
children (list): The whole page's worth of Dash components, ordered
sequentially corresponding directly to displayed relative position
in the app, from the very top to very bottom.
components_list (list): Abstraction of `children` for the sake of diminishing
excess whitespace indentation pushing the code too far to the right (i.e.,
code wrap aesthetics).
updates (list): Easy-access to one of the more permanently variable
components - the app software version updates notifications displayed
at top of main page just below the log in dialog.
version (str): The config-imported automatically up-to-date software
release version as queried via subprocess call `git describe --tags`.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from seqapp.config import *
from seqapp.utils import ljoin
version = VERSION
updates = UPDATES
app_logo = f'data:image/png;base64,{base64.b64encode(open(f"{APP_HOME}/{APP_NAME}/assets/images/dash-webapp-template-logo-light-web.png", "rb").read()).decode()}'
updates = [u.split("Date: ") for u in updates]
updates = [
html.Details(
[
html.Ol(
[
html.Li(
[
html.P(
commit_log[0],
style={"fontSize": "0.5rem", "fontWeight": 200},
),
html.P(commit_log[1], style={"fontWeight": 400}),
]
)
for commit_log in updates
],
className="updates-list",
reversed="reversed",
),
html.Summary(html.Code("-UPDATES-", className="updates-header")),
],
id="app-updates",
)
]
navbar = [
html.Td(html.Img(src=app_logo, style={"width": "80%"}), style={"width": "7%"}),
html.Td(
html.Div(
[
html.Div(
[
html.Span("[server/network/cmpny]: ", style={"color": "#878686", "fontSize": "0.5rem"}),
html.Span("[YOUR APP NAME]"),
], # ╦ ",
style={
"marginLeft": "-3%",
"cursor": "pointer",
"marginBottom": "0.5%",
"fontSize": "80%",
},
),
html.Span(html.A("Top⏎", href="#back-to-top")),
html.Span(" | "),
html.Span(
html.A("Step One", href="#step-one", className="navbar-select")
),
html.Span(" | "),
html.Span(
html.A(
"Step Two (⮊Run🧬Pipeline)",
href="#step-two",
className="navbar-select",
)
), # ⧉
html.Span(" | "),
html.Span(
html.A("Results", href="#mutation-calls", className="navbar-select")
),
html.Span(" | "),
html.Span(
html.A(
"Download Links",
href="#download-links",
className="navbar-select",
)
), # (🖥⇉🗐)
html.Span(" | "),
html.Span(
html.A(
"Clear",
href="#pipeline-progress-disclaimer",
className="navbar-select",
)
), # (∅🖺➡🗋)
html.Div(
children=[
html.Span(
html.A(
["⁂ Sign In"],
style={"fontSize": "80%", "color": "goldenrod", "width": "fit-content"},
href=f"#log-in-below",
)
)
],
id="user-status",
style={"marginLeft": "1%"},
),
],
style={"text-align": "left"},
)
),
]
header = [html.Table(html.Tr(navbar)), html.Div(className="menubar-divider")]
download_options = [{"label": "—Select Output Type—", "value": "None"}] + sorted(
[
{"label": "Export [Streamlined] to Geneious (GENEIOUS)", "value": "GENEIOUS"},
{
"label": "Plasmid Maps [Full De Novo-Generated Archive] {ALL Putative TCR-α/β} (FASTA)",
"value": "REF",
},
{
"label": "Plasmid References [Current Session] (FASTA) {*plus*: Post-QC Consensus Sequence(s)}",
"value": "CONSENSUS",
},
{"label": "Mutation Calls (Variant Call Format [VCF])", "value": "VCF"},
{"label": "Reference Mapping Stats (TXT)", "value": "MAPSTATS"},
{"label": "Raw Aggregated Input Sequences (ABI→FASTQ)", "value": "AGG_FQ"},
{"label": "Quality Score QC Figures (PNG)", "value": "QUAL"},
{
"label": "Reference-Mapped Reads Assembly (Sequence Alignment Map [SAM])",
"value": "SEQ_ALGN",
},
{
"label": "Annotation Files [V(D)J-Specific De Novo Full Plasmid Map Features] (BED format)",
"value": "ANNOT",
},
{
"label": "Access LOGGING Records / Audit Trail [Current Session] (LOG)",
"value": "LOGS",
},
{"label": "All Analysis Files", "value": "ALL"},
],
key=lambda d: d["label"],
)
downloads = [
dcc.Dropdown(
id="download-dropdown",
value=None,
options=download_options,
placeholder="—Select Output File Type—",
style={"width": "80%", "marginLeft": "10%"},
),
html.Button("Show Relevant Files", id="refresh-downloads-links", n_clicks=0),
]
components_list = [
# NOTE:
# -----BROWSER MEMORY CACHING-----
#
# Three types of storage (storage_type prop):
# 1 - memory: default, keep the data as long the page is not refreshed.
# 2 - local: keep the data until it is manually cleared.
# 3 - session: keep the data until the browser/tab closes.
#
# [ For more info on `dcc.Store` browser caching, see:
# https://dash.plot.ly/dash-core-components/store ]
dcc.Store(id="memory", storage_type="session"),
dcc.Store(id="local", storage_type="local"),
dcc.Store(id="session", storage_type="session"),
html.Div(id="back-to-top", style={"display": "hidden"}),
html.Header(
children=header,
className="menubar",
style={"width": "102%", "marginLeft": "-1%"},
),
html.P(
"Nex╬Gen Bi⌬informat🜾cs Web Apps",
style={
"lineHeight": "100%",
"color": "#00000025",
"textAlign": "left",
"marginLeft": "-1%",
"marginTop": "5px",
"marginBottom": "-1%",
},
),
html.Div(
[
html.H2(
"ℐ⌖ ℌ ℕ Collins — Bǁ◎IℕFO𓏢MA𓇰ICS",
style={
"fontSize": "0.67rem",
"letterSpacing": "30px",
"lineHeight": "2.0rem",
"fontFamily": "'Cinzel', serif",
"color": "#8b8b8b",
"marginBottom": "-1%",
"textAlign": "center",
"marginLeft": "2.5%",
},
className="ml-title",
),
html.Div(
[
html.Pre(
"ୡ ୡ ୡୡ ୡ ୡ ୡ ୡ ୡ ୡ ୡ",
style={
"color": "#d6d6d684",
"fontSize": "1.2rem",
"letterSpacing": "2px",
"marginBottom": "-0.3rem",
},
),
html.Pre(
"◖𓇃𓇃𓇃𓏣🜾𓏣𓇃𓇃𓉽𓇃𓐩𓋥⸶⸷𓋥𓐩𓇃𓋏𓇃˥⎡𓇃𓇃࿅𓇃𓊢ꃊ𓊢𓇃𓇃𓇃ⴶ〰⸅‖⸄〰ж𓇃𓇃𓏟𓏞𓇃𓇃𓇃𓇃𓋅𓆬𓆬𓋅𓇃𓇃𓇊𓊢𓇊𓇃𓉽𓇃ண⎤꜒𓇃𓇃࿑◗",
style={
"filter": "blur(.4pt)",
"color": "#584e00a8",
"fontSize": "1.2rem",
"opacity": "0.85",
"marginBottom": "4%",
"marginTop": "6px",
},
),
html.Pre(
"◖𓇃𓇃𓇃𓇃⸠⎫𓏉⎧⸡𓇃𓇃𓇃𓇃⸣⸠࿇⸡⸢𓇃𓇃𓇃𓇃⎨⎬𓇃𓇃𓇃𓉽𓋏𓉽𓇃𓇃ཥ⅌ཤ𓇃𓇃𓇃𓍰𓇃𓇃𓇃𓇃ཀཫ𓇃𓇃𓇃╗╔𓇃𓇃⦄༽⸶⸷༼⦃𓇃𓇃𓇃◗",
style={
"marginTop": "-1.5%",
"fontSize": "1.55rem",
"color": "#AEA46E",
},
),
],
style={
"letterSpacing": "-1px",
"fontFamily": "Roboto, sans-serif",
"textAlign": "center",
"overflow": "hidden",
"transform": "translate3d(0,0,0)",
},
),
html.Br(),
]
),
html.H1(
"Custom Web Apps — [dash-webapp-template]",
style={
"fontSize": "4rem",
"fontColor": "#0d04a5f5",
"lineHeight": "90%",
"letterSpacing": "-8px",
"marginTop": "-110px",
"mixBlendMode": "multiply",
"textAlign": "center",
"cursor": "pointer",
"zIndex": "1",
},
),
html.Div(id="log-in-below", style={"display": "hidden"}),
html.Div(id="workflow-selection"),
html.Span(className="fader-line-long"),
html.H4("—Automated [Name/Function of Your App]—", style={"lineHeight": "80%"}),
html.H5(
"[Tagline-esque description for what this app does.]",
className="title-description",
),
html.Br(),
# html.Div(
# [
# html.Video(
# src="../assets/animations/rotate_DNA_HD_HB.mp4",
# autoPlay=True,
# loop=True,
# controls=False,
# preload="true",
# muted=True,
# className="dna-login-animation"
# )
# ]
# ),
html.Div(
[
html.H4(
"To Begin, Sign In Below",
style={
"textAlign": "center",
"animation": "gradient-text-flow 40s infinite linear",
"mixBlendMode": "multiply",
"fontSize": "3.0rem",
# "marginTop": "15px!important",
"fontWeight": "300",
# "marginBottom": "1.1%",
},
),
html.Span(
className="fader-line-short",
style={"width": "112.5%", "marginLeft": "-6.25%"},
),
dcc.RadioItems(
options=[
{
"label": "[Team]",
"value": "XX-XXX | Name of process / workflow",
"disabled": True,
}
],
value="XX-XXX | Name of process / workflow",
labelStyle={
"display": "none",
"textAlign": "center",
"padding": "8px 20px 0px 20px",
},
id="workflow-id",
# className="workflow-css",
),
],
style={"position": "relative", "textAlign": "center"},
),
html.Div(
"Log In, Here!",
className="H7",
style={
"textAlign": "center",
"zIndex": "1000",
"mixBlendMode": "color-dodge",
"fontWeight": "600",
"marginTop": "1.3%",
"marginLeft": "-25%",
},
),
html.Div(
[
dcc.Dropdown(
id="input-user-name",
value="None",
clearable=True,
searchable=True,
options=USERS,
placeholder="—Select Your Name—",
),
html.Div(
[
html.Button("Sign In", id="sign-on-submit", n_clicks=0),
html.Button(
"Return",
id="log-back-in-submit",
className="submit-buttons",
n_clicks=0,
),
html.Button(
"Log Out",
id="log-out-submit",
n_clicks=0,
style={"paddingRight": "3%"},
),
],
style={"display": "flow-root"},
),
html.H6(
[
"Current Version: ",
html.Br(),
html.Span(
f"{version}",
style={
"animation": "anim-text-flow-keys 25s infinite linear",
"fontSize": "133%",
},
),
],
className="version-tag",
),
],
style={"marginLeft": "35%", "width": "30%", "marginBottom": "2px"},
),
# html.Br(),
html.Span(className="hr-style-2"),
# html.Br(),
html.Div(
id="user-login-confirmation",
children=updates,
style={"position": "relative", "padding": "1%"},
),
html.Br(),
html.Div(id="accumulate-output-hidden", style={"display": "none"}),
html.Hr(id="step-one"),
html.Hr(style={"width": "50%", "marginLeft": "25%"}),
html.H3(
"Step One (1/2): [Simple instruction/Action command]",
style={"textAlign": "center", "marginTop": "-10px"},
),
html.H6("(subtitle / subdescription)", style={"marginTop": "-20px"}),
html.H4(html.Div(["[Description of this tool]"])),
html.Hr(style={"width": "50%", "marginLeft": "25%"}),
html.H5(
["Some instructions of some kind (e.g., sample ID)"],
style={"textAlign": "center"},
),
html.Br(),
html.Div(
[
html.Table(
[
html.Tr(
[
html.P(
"Type to search.",
style={
"textAlign": "left",
"color": "#fff",
"mixBlendMode": "darken",
},
),
html.H6(
"ℹ | Clear selections before subsequent submissions.",
style={
"textAlign": "left",
"color": "#fff",
"marginTop": "-4px",
},
),
dcc.Dropdown(
id="dd1-dropdown",
value="None",
clearable=True,
searchable=True,
options=[
{
"label": "—🔍⤑Select by Schema Name/ID—",
"value": "None",
}
]
+ [
{
"label": f" 📃 : —{name}🔑{ent_id}— ",
"value": f"{ent_id}-{name}",
}
for (name, ent_id) in sorted(
zip(entity_schemas.index, entity_schemas.id),
key=lambda x: x[0], # reverse=True
)
],
style={
"textAlign": "center",
"backgroundColor": "rgba(0,0,0,0.25)",
"zIndex": "3005",
"color": "rgb(255, 255, 255)",
},
placeholder="—🔍⤑Search all Schemas—",
),
]
),
html.Tr(
[
dcc.Dropdown(
id="dd2-dropdown",
clearable=True,
searchable=True,
options=[{"label": "—Select Field—", "value": "None"}],
style={
"textAlign": "center",
"backgroundColor": "rgba(0,0,0,0.25)",
"zIndex": "3000",
"color": "#00ffb8",
"position": "relative!important",
},
placeholder="—Select Entity Field—",
)
]
),
html.Tr(
[
html.Br(),
html.Div(
[
html.Button(
"Submit", id="submit-selected-dds", n_clicks=0
),
html.Button(
"Clear Selections",
id="clear-dd-selections",
n_clicks=0,
),
],
style={
"textAlign": "center",
"marginLeft": "10%",
"width": "80%",
},
),
]
),
],
style={
"marginLeft": "50%",
"transform": "translateX(-50%)",
"width": "50%",
},
)
],
id="crispr",
),
html.Br(),
html.Span(id="fader-line-short"),
html.Br(),
html.Br(),
html.Div(id="submission-status"),
html.Br(),
html.Div(id="dd-selections", style={"textAlign": "left"}),
html.Br(),
html.Hr(),
html.Br(id="step-two"),
html.Hr(style={"width": "50%", "marginLeft": "25%"}),
html.H3(
"Step Two (2/2): Upload [insert expected file types (e.g., clinical data .xlsx files)].",
style={"textAlign": "center", "marginTop": "-10px"},
),
html.H2(
"Click “Initiate Pipeline” to launch analysis.", style={"marginTop": "-0.75%"}
),
html.H6('Uploading from multiple directories? Or after reset? ⮊ Click "✥ Append"'),
html.Hr(style={"width": "50%", "marginLeft": "25%"}),
dcc.Upload(
id="upload-data",
children=html.Div(
[
"Drag/Drop ⤓ file(s) here ",
html.Spacer(),
" —or—",
html.A(
"📂 Select from your computer",
className="hvr-float-shadow",
style={"fontWeight": "400", "marginBottom": "5px"},
),
html.H6(
"(Other Info/Note)",
style={
"textAlign": "center",
"letterSpacing": "1px",
"fontFamily": "'Cinzel', serif",
"fontSize": "70%",
},
),
html.H6(
"(...Note details / description...)",
# NOTE - Overview of file formats
# ------------------------------------
# "FASTQ, C[/T]SV (Comma[/Tab]-Separated Values), "
# "XLS[X] (Excel), B[/S]AM (NGS alignments), "
# "VCF (Variant Call Format mutation/SN[P/V] files), "
# "BED ([track-based] annotations)",
style={
"textAlign": "center",
"letterSpacing": "4px",
"fontFamily": "'Cinzel', serif",
"marginTop": "-8px",
},
),
html.Span(
html.H5(
"(Optional supplementary message...)",
style={
"width": "45%",
"marginLeft": "27.5%",
"fontSize": "80%",
},
)
),
],
style={
"borderWidth": "1px",
"borderStyle": "solid",
"borderRadius": "100px",
"textAlign": "center",
"margin": "2% 15%",
"boxShadow": "0px 1px 5px 2px rgba(0, 0, 50, 0.16)",
"borderColor": "transparent",
"padding": "0.5%",
"backgroundColor": "rgba(255,255,255,0.5)",
},
),
multiple=True, # (Allow multiple files to be uploaded)
),
html.Br(),
html.Button(
"✥ Append", id="append-uploads", className="refresh-files-button", n_clicks=0
), # (📁+📁...)
html.Button(
"(↻ Refresh Uploads List)",
id="refresh-uploads",
className="refresh-files-button",
n_clicks=0,
),
html.Button(
"❌Reset Uploads",
id="clear-uploads",
className="refresh-files-button",
n_clicks=0,
),
html.Br(),
#####################
#### NOTE: ####
## PIPELINE ##
### O U T P U T ###
##### INSERTS #####
###### HERE: ######
html.Div(id="received-upload"),
html.Div(id="saved-reports"),
html.Div(id="output-data-upload"),
#####################
html.Div(
[
html.Button(
"—Initiate Pipeline—",
id="initiate-pipeline",
n_clicks=0,
className="hvr-float-shadow",
),
html.Br(),
html.Br(),
html.H6(
"⮩[Est.] Required Execution Time ≤ ≈ X s (per input sample)",
style={"textAlign": "center", "color": "unset"},
),
html.Br(),
],
style={"textAlign": "center", "position": "relative"},
),
html.H5(
html.Div(
[
html.Blockquote(" ⚠ Cautionary notes / hints / advice / warnings - #1"),
html.Blockquote(" ⚠ Cautionary notes / hints / advice / warnings - #2"),
]
),
style={
"fontSize": "0.85rem",
"color": "#00000080",
"width": "55%",
"marginLeft": "22.5%",
"textAlign": "justify",
},
id="pipeline-progress-disclaimer",
),
html.Br(),
html.Button("—Clear Current QC Results Output—", id="clear-pipeline", n_clicks=0),
html.Br(),
html.Br(),
html.Div(
[
html.Span(
"ℹ| Hint: Check the log! It may be very informative...",
className="notes",
),
html.Br(),
html.Span(
"(⮩️🚪: Refresh [by selecting] the 'Download Output Files'⤑'Log Files' option below for live updates of all ⌁backend⌁ app execution activity.)",
className="notes",
style={"fontSize": "0.75rem", "color": "gray"},
),
],
style={"width": "33.3%", "marginLeft": "33.3%"},
),
html.Br(),
html.Div(
[
dcc.Input(
placeholder="–Enter a Previous RUN ID–",
type="text",
value="",
id="save-results-as",
disabled=True,
),
html.Div(
[
html.Span(
html.Button(
"Gather Output from Saved History",
id="save-user-results",
n_clicks=0,
disabled=True,
)
),
html.Span(
html.Button(
"(↻ 📖 Show Preview)",
id="refresh-user-history",
n_clicks=0,
disabled=True,
)
),
]
),
],
style={"display": "none"},
),
html.Hr(id="download-links"),
html.Br(),
html.Div(dash_table.DataTable(data=[{}]), style={"display": "none"}),
html.H1("Download Output Files ⬇💻"),
html.Br(),
html.Span(className="fader-line-short", style={"marginBottom": "-36px"}),
html.H4("""Choose from the listed file types to limit downloads (or view all)"""),
html.P("E.g., Select LOG to view the audit trail of your current session."),
html.Div(
downloads,
style={"width": "60%", "marginLeft": "20%"},
className="dash-custom-btn",
),
html.Br(),
html.H3("Output File(s) Download Links", style={"textAlign": "left"}),
html.Ul(id="output-file-list", style={"textAlign": "left"}),
html.Ul(id="download-all", style={"textAlign": "left"}),
html.Button(
["Download All"],
id="request-all-zipped",
n_clicks=0,
style={"fontFamily": "Roboto"},
),
html.Hr(),
html.Br(),
html.P(
f"\n\nJohn Collins | Bioinformatics\t{CURRENT_YEAR}\n",
className="copyright",
style={
"fontSize": "1.1rem",
"letterSpacing": "10px",
"lineHeight": "2.0rem",
"fontFamily": "'Cinzel', serif",
"color": "#003b51",
"marginBottom": "4.2rem",
"textAlign": "center",
},
),
html.Img(src=app_logo, style={"width": "12.5%", "mixBlendMode": "screen"}),
html.Br(),
html.Br(),
html.Hr(),
html.Br(),
]
children = [html.Div(components_list, style={"width": "98%", "marginLeft": "1%"})]
| 36.150524
| 162
| 0.381694
|
7ff648ee09a0afb4b86bc1fe1c44604e90bd16ed
| 6,783
|
py
|
Python
|
plugin.video.plexodusplayer/resources/lib/meta/play/players.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | 1
|
2020-03-03T10:01:21.000Z
|
2020-03-03T10:01:21.000Z
|
plugin.video.plexodusplayer/resources/lib/meta/play/players.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | null | null | null |
plugin.video.plexodusplayer/resources/lib/meta/play/players.py
|
MR-Unknown-Cm/addons
|
8df1ebe58c95620bb02a05dbae7bf37954915cbd
|
[
"Apache-2.0"
] | null | null | null |
import re
import json
from xbmcswift2 import xbmc, xbmcgui, xbmcvfs
from meta import plugin
from meta.gui import dialogs
from meta.utils.text import to_unicode
from settings import SETTING_AUTOPATCH, SETTING_AUTOPATCHES
from language import get_string as _
EXTENSION = ".boneplayer.json"
HTML_TAGS_REGEX = re.compile(r'\[/?(?:color|b|i|u).*?\]', re.I|re.UNICODE)
class AddonPlayer(object):
def __init__(self, filename, media, meta):
self.media = media
self.title = meta["name"]
self.id = meta.get("id", filename.replace(".boneplayer.json", ""))
self.clean_title = HTML_TAGS_REGEX.sub('', self.title)
self.repoid = meta.get("repository")
self.pluginid = meta.get("plugin")
self.order = meta.get("priority") or 1000
self.filters = meta.get("filters", {})
self.commands = meta.get(media, [])
self._postprocess = meta.get("postprocess")
def postprocess(self, link):
code = self._postprocess
if not code or not isinstance(code, basestring) or "__" in code:
return link
link = eval(code, {"__builtins__": {}, "link": link})
return link
def is_empty(self):
if self.pluginid and not xbmc.getCondVisibility('System.HasAddon(%s)' % self.pluginid):
return True
return not bool(self.commands)
def get_players(media, filters = {}):
assert media in ("tvshows", "movies", "musicvideos", "music", "live")
players = []
players_path = "special://profile/addon_data/{0}/players/".format(plugin.id)
files = [x for x in xbmcvfs.listdir(players_path)[1] if x.endswith(EXTENSION)]
for file in files:
path = players_path + file
try:
f = xbmcvfs.File(path)
try:
content = f.read()
meta = json.loads(content)
finally:
f.close()
player = AddonPlayer(file, media, meta)
if not player.is_empty():
players.append(player)
except Exception, e:
plugin.log.error(repr(e))
msg = "player %s is invalid" % file
xbmcgui.Dialog().ok('Invalid player', msg)
raise
return sort_players(players, filters)
def sort_players(players, filters = {}):
result = []
for player in players:
filtered = False
checked = False
for filter_key, filter_value in filters.items():
value = player.filters.get(filter_key)
if value:
checked = True
if to_unicode(value) != to_unicode(filter_value):
filtered = True
if not filtered:
needs_browsing = False
for command_group in player.commands:
for command in command_group:
if command.get('steps'):
needs_browsing = True
break
result.append((not checked, needs_browsing, player.order, player.clean_title.lower(), player))
result.sort()
return [x[-1] for x in result]
def get_needed_langs(players):
languages = set()
for player in players:
for command_group in player.commands:
for command in command_group:
command_lang = command.get("language", "en")
languages.add(command_lang)
return languages
ADDON_SELECTOR = AddonPlayer("selector", "any", meta={"name": "Selector"})
ADDON_CONTEXT = AddonPlayer("context", "any", meta={"name": "Context"})
ADDON_DEFAULT = AddonPlayer("default", "any", meta={"name": "Default"})
@plugin.route('/patch/<mode>', options = {"mode": "all"})
def patch(mode):
import xbmcaddon
adir = "special://home/addons/"
AUTOS = eval(plugin.get_setting(SETTING_AUTOPATCHES, unicode))
# try: AUTOS = plugin.get_setting(SETTING_AUTOPATCHES, unicode)
# except: AUTOS = [[], [], [], []]
# return [p for p in get_players() if p.id in AUTOS]
# xbmc.log("QQQQQ AUTOS = {0}".format(str(AUTOS)), xbmc.LOGNOTICE)
INSTALLED = [i for i in xbmcvfs.listdir(adir)[0]]
PATCHES = [[], ["resources/lib/modules/control.py", "pass", "sys.exit()"], ["default.py", "", "\n cool_down_active = kodi.get_setting('cool_down') == 'true'\n if not salts_utils.is_salts() or cool_down_active:\n kodi.notify(msg=i18n('playback_limited'))\n return False"], ["lib/dudehere/routines/scrapers/__init__.py", "", "\n\t\tif self._caller not in ALLOWED_CALLERS and self._caller: \n\t\t\tplugin.log('Caller not allowed')\n\t\t\tplugin.raise_error('Violation', 'This addon is not allowed.', 'Please do not use %s with %s' % (self._caller, ADDON_NAME))\n\t\t\tif return_sources:\n\t\t\t\treturn [], [], []\n\t\t\telse:\n\t\t\t\treturn []"]]
if mode == "auto":
if AUTOS != [[], [], [], []]:
ADDONS = AUTOS
else:
if dialogs.yesno('{0}: Patch'.format(plugin.name), '{0}.[CR]{1} & {2}'.format(_("%s not found") % 'Auto-patches', _("Enable"), _("Continue?"))): return patch("all")
else:
plugin.set_setting(SETTING_AUTOPATCH, "false")
return
else:
ADDONS = [[], [i for i in INSTALLED if i.startswith("plugin.video.") and xbmcvfs.exists("{0}{1}/{2}".format(adir, i, PATCHES[1][0]))], [i for i in INSTALLED if i.startswith("plugin.video.") and xbmcvfs.exists("{0}{1}/{2}".format(adir, i, PATCHES[2][0]))], [i for i in INSTALLED if i.startswith("script.module.") and xbmcvfs.exists("{0}{1}/{2}".format(adir, i, PATCHES[3][0]))]]
count = 0
for i in range(1, len(ADDONS)):
for a in ADDONS[i]:
count = count + 1
b = "{0}{1}/{2}".format(adir, a, PATCHES[i][0])
c = xbmcvfs.File(b)
d = c.read()
c.close()
if PATCHES[i][2] in d:
ADDON = xbmcaddon.Addon(a)
if mode == "auto" or dialogs.yesno('{0}: Patch "{1}"?'.format(plugin.name, ADDON.getAddonInfo("name")), '"{0}" {1} block-code.[CR]{2}'.format(ADDON.getAddonInfo("name"), _("contains"), _("Would you like to remove it from the library?").replace(_("Library").lower(), _("Add-on").lower()))):
h = xbmcvfs.File(b, 'w')
d = d.replace(PATCHES[i][2], PATCHES[i][1])
result = h.write(d)
h.close()
if mode != "auto" and dialogs.yesno("{0}: {1} Patch?".format(plugin.name, _("Auto")), '"{0}"[CR]{1} {2} re-patching?'.format(ADDON.getAddonInfo("name"), _("Enable"), _("Auto").lower())):
if ADDON.getAddonInfo("id") not in AUTOS[i]: AUTOS[i].append(ADDON.getAddonInfo("id"))
if AUTOS != [[], [], [], []] and AUTOS != ADDONS:
plugin.set_setting(SETTING_AUTOPATCHES, AUTOS)
| 49.875
| 669
| 0.58396
|
aba9409ccc6a2089f70481ca6d8f766e131d1e3d
| 27,496
|
py
|
Python
|
HaiQuan/Api/Cloud/Registry/Registry.py
|
haiquan5396/K59_training
|
bc6a029aae54b28a8060e7c66747b40b5398d750
|
[
"MIT"
] | null | null | null |
HaiQuan/Api/Cloud/Registry/Registry.py
|
haiquan5396/K59_training
|
bc6a029aae54b28a8060e7c66747b40b5398d750
|
[
"MIT"
] | 8
|
2017-04-08T07:44:50.000Z
|
2017-09-05T12:26:19.000Z
|
HaiQuan/Api/Cloud/Registry/Registry.py
|
haiquan5396/K59_training
|
bc6a029aae54b28a8060e7c66747b40b5398d750
|
[
"MIT"
] | 8
|
2017-04-04T15:52:29.000Z
|
2018-05-13T07:45:38.000Z
|
import json
import uuid
import time
import threading
from mysql.connector.pooling import MySQLConnectionPool
from kombu import Producer, Connection, Consumer, exceptions, Exchange, Queue
from kombu.utils.compat import nested
import sys
class Registry():
def __init__(self, broker_cloud, mode, db_config, time_inactive_platform, time_update_conf, time_check_platform_active):
self.time_update_conf = time_update_conf
self.time_check_platform_active = time_check_platform_active
self.time_inactive_platform = time_inactive_platform
self.cnxpool = MySQLConnectionPool(pool_name="mypool", pool_size=32, **db_config)
self.mode = mode
self.producer_connection = Connection(broker_cloud)
self.consumer_connection = Connection(broker_cloud)
self.exchange = Exchange("IoT", type="direct")
def update_config_changes_by_platform_id(self, platform_id):
message = {
'reply_to': 'driver.response.registry.api_check_configuration_changes',
'platform_id': platform_id
}
# send request to Driver
queue = Queue(name='driver.request.api_check_configuration_changes', exchange=self.exchange,
routing_key='driver.request.api_check_configuration_changes')
routing_key = 'driver.request.api_check_configuration_changes'
self.producer_connection.ensure_connection()
with Producer(self.producer_connection) as producer:
producer.publish(
json.dumps(message),
exchange=self.exchange.name,
routing_key=routing_key,
declare=[queue],
retry=True
)
def check_platform_active(self):
# print("Check active platform")
list_platforms = self.get_list_platforms("active")
for platform in list_platforms:
if (time.time() - platform['last_response']) > self.time_inactive_platform:
# print("Mark inactive platform: {}".format(platform['platform_id']))
self.mark_inactive(str(platform['platform_id']))
self.send_notification_to_collector()
threading.Timer(self.time_check_platform_active, self.check_platform_active).start()
def update_changes_to_db(self, new_info, platform_id):
# print("Update change of {} to database".format(platform_id))
now_info = self.get_things_by_platform_id(platform_id, "all", "all")
inactive_things = now_info[:]
new_things = new_info[:]
cnx_1 = self.get_connection_to_db()
cursor_1 = cnx_1.cursor()
for now_thing in now_info:
for new_thing in new_info:
if now_thing["thing_global_id"] == new_thing["thing_global_id"]:
if (now_thing['thing_name'] != new_thing['thing_name'] \
or now_thing['thing_type'] != new_thing['thing_type'] \
or now_thing['location'] != new_thing['location']):
cursor_1.execute(
"""UPDATE Thing SET thing_name=%s, thing_type=%s, location=%s, thing_status=%s WHERE thing_global_id=%s""",
(new_thing["thing_name"], new_thing["thing_type"], new_thing["location"], 'active',
now_thing["thing_global_id"]))
if now_thing['thing_status'] == 'inactive':
cursor_1.execute("""UPDATE Thing SET thing_status=%s WHERE thing_global_id=%s""",
('active', now_thing["thing_global_id"]))
inactive_items = now_thing["items"][:]
new_items = new_thing['items'][:]
for now_item in now_thing["items"]:
for new_item in new_thing["items"]:
if now_item["item_global_id"] == new_item["item_global_id"]:
if (now_item["item_name"] != new_item["item_name"] or
now_item["item_type"] != new_item["item_type"] or
now_item['can_set_state'] != new_item['can_set_state']):
cursor_1.execute(
"""UPDATE Item SET item_name=%s, item_type=%s, can_set_state=%s WHERE item_global_id=%s""",
(new_item["item_name"], new_item["item_type"], new_item["can_set_state"],
now_item['item_global_id']))
if now_item['item_status'] == 'inactive':
cursor_1.execute("""UPDATE Item SET item_status=%s WHERE item_global_id=%s""",
('active', now_item['item_global_id']))
inactive_items.remove(now_item)
new_items.remove(new_item)
break
if len(inactive_items) != 0:
# Item inactive
# print("Item inactive")
for item_inactive in inactive_items:
cursor_1.execute("""UPDATE Item SET item_status=%s WHERE item_global_id=%s""",
("inactive", item_inactive['item_global_id']))
if len(new_items) != 0:
# print("New Item ")
for item in new_items:
cursor_1.execute("""INSERT INTO Item VALUES (%s,%s,%s,%s,%s,%s,%s)""",
(item['item_global_id'], new_thing['thing_global_id'], item['item_name'],
item['item_type'], item['item_local_id'], item['can_set_state'],
"active"))
inactive_things.remove(now_thing)
new_things.remove(new_thing)
break
if len(inactive_things) != 0:
# Thing inactive
# print("Thing inactive")
for thing_inactive in inactive_things:
cursor_1.execute("""UPDATE Thing SET thing_status=%s WHERE thing_global_id=%s""",
("inactive", thing_inactive['thing_global_id']))
for item_inactive in thing_inactive['items']:
cursor_1.execute("""UPDATE Item SET item_status=%s WHERE item_global_id=%s""",
("inactive", item_inactive['item_global_id']))
if len(new_things) != 0:
# New things
# print("New Thing")
for thing in new_things:
cursor_1.execute("""INSERT INTO Thing VALUES (%s,%s,%s,%s,%s,%s,%s)""",
(thing['thing_global_id'], platform_id, thing['thing_name'],
thing['thing_type'], thing['thing_local_id'], thing['location'], "active"))
# print('Updated Things')
for item in thing['items']:
# print("{}".format(item['item_global_id']))
cursor_1.execute("""INSERT INTO Item VALUES (%s,%s,%s,%s,%s,%s,%s)""",
(item['item_global_id'], thing['thing_global_id'], item['item_name'],
item['item_type'], item['item_local_id'], item['can_set_state'], "active"))
# print('Updated Items')
cnx_1.commit()
cursor_1.close()
cnx_1.close()
def get_things_by_platform_id(self, platform_id, thing_status, item_status):
# print("Get things in platform_id: {}".format(platform_id))
things_in_system = self.get_things(thing_status, item_status)
things_in_platform = []
for thing in things_in_system:
if thing['platform_id'] == platform_id:
things_in_platform.append(thing)
return things_in_platform
def handle_configuration_changes(self, body, message):
cnx_2 = self.get_connection_to_db()
cursor_2 = cnx_2.cursor()
body = json.loads(body)
platform_id = body['platform_id']
if body['have_change'] == False:
# print('Platform have Id: {} no changes'.format(platform_id))
cursor_2.execute("""SELECT platform_status FROM Platform WHERE platform_id=%s""",
(str(platform_id),))
platform_status = cursor_2.fetchone()[0]
if platform_status == 'active':
pass
else:
new_info = body['new_info']
self.update_changes_to_db(new_info, platform_id)
self.send_notification_to_collector()
else:
print('Platform have Id: {} changed the configuration file'.format(platform_id))
new_info = body['new_info']
self.update_changes_to_db(new_info, platform_id)
#Update last_response and status of platform
cursor_2.execute("""UPDATE Platform SET last_response=%s, platform_status=%s WHERE platform_id=%s""",
(time.time(), 'active', platform_id))
cnx_2.commit()
cursor_2.close()
cnx_2.close()
def mark_inactive(self, platform_id):
print('Mark Thing and Item inactive')
cnx_1 = self.get_connection_to_db()
cursor_1 = cnx_1.cursor()
cursor_1.execute("""SELECT thing_global_id FROM Thing WHERE platform_id = %s""", (str(platform_id),))
list_thing_global_id = cursor_1.fetchall()
# print('List_thing {}'.format(list_thing_global_id))
for thing_global_id in list_thing_global_id:
# print(thing_global_id[0])
# thing_global_id[0] để lấy ra kết quả. Còn thing_global_id vẫn là list.
# VD: ('d32d30b4-8917-4eb1-a273-17f7f440b240/sensor.humidity',)
cursor_1.execute("""UPDATE Item SET item_status=%s WHERE thing_global_id=%s""",
("inactive", str(thing_global_id[0])))
cnx_1.commit()
cursor_1.execute("""UPDATE Thing SET thing_status=%s WHERE platform_id=%s""", ("inactive", str(platform_id)))
cursor_1.execute("""UPDATE Platform SET platform_status=%s WHERE platform_id=%s""",
("inactive", str(platform_id)))
cnx_1.commit()
cursor_1.close()
cnx_1.close()
def update_all_config_changes(self):
# print('Run Update All Configuration Changes')
list_platforms = self.get_list_platforms("active")
for platform in list_platforms:
self.update_config_changes_by_platform_id(platform['platform_id'])
threading.Timer(self.time_update_conf, self.update_all_config_changes).start()
def get_list_platforms(self, platform_status):
# print('Get list platforms')
list_platforms = []
cnx_1 = self.get_connection_to_db()
cursor_1 = cnx_1.cursor()
if platform_status == "active":
cursor_1.execute("""SELECT platform_id, platform_name, host, port, last_response, platform_status
FROM Platform WHERE platform_status='active'""")
elif platform_status == "inactive":
cursor_1.execute("""SELECT platform_id, platform_name, host, port, last_response, platform_status
FROM Platform WHERE platform_status='inactive'""")
elif platform_status == "all":
cursor_1.execute("""SELECT platform_id, platform_name, host, port, last_response, platform_status
FROM Platform""")
else:
return list_platforms
rows = cursor_1.fetchall()
for row in rows:
list_platforms.append({
"platform_id": row[0],
"platform_name": row[1],
"host": row[2],
"port": row[3],
"last_response": row[4],
"platform_status": row[5]
})
# print(list_platforms)
cursor_1.close()
cnx_1.close()
return list_platforms
def api_get_list_platforms(self, body, message):
print("API get list platform with platform_status")
platform_status = json.loads(body)['platform_status']
reply_to = json.loads(body)['reply_to']
message_response = {
"list_platforms": self.get_list_platforms(platform_status)
}
self.producer_connection.ensure_connection()
with Producer(self.producer_connection) as producer:
producer.publish(
json.dumps(message_response),
exchange=self.exchange.name,
routing_key=reply_to,
retry=True
)
def api_add_platform(self, body, message):
body = json.loads(body)
host = body['host']
port = body['port']
platform_name = body['platform_name']
cnx_1 = self.get_connection_to_db()
cursor_1 = cnx_1.cursor()
if "platform_id" in body:
platform_id = body['platform_id']
print("Platform {} have id: {} come back to system".format(platform_name, platform_id))
cursor_1.execute("""UPDATE Platform SET platform_status=%s, last_response=%s WHERE platform_id=%s""",
('active', time.time(), platform_id))
else:
platform_id = str(uuid.uuid4())
print('Add {} have address {}:{} to system '.format(platform_name, host, port))
print('Generate id for this platform : ', platform_id)
cursor_1.execute("""INSERT INTO Platform VALUES (%s,%s,%s,%s,%s,%s)""",
(platform_id, platform_name, host, port, time.time(), "active"))
message_response = {
'platform_id': platform_id,
'host': host,
'port': port,
'platform_name': platform_name
}
# check connection and publish message
queue_response = Queue(name='registry.response.driver.api_add_platform', exchange=self.exchange,
routing_key='registry.response.driver.api_add_platform')
routing_key = 'registry.response.driver.api_add_platform'
self.producer_connection.ensure_connection()
with Producer(self.producer_connection) as producer:
producer.publish(
json.dumps(message_response),
exchange=self.exchange.name,
routing_key=routing_key,
declare=[queue_response],
retry=True
)
cnx_1.commit()
cursor_1.close()
cnx_1.close()
self.send_notification_to_collector()
def get_things(self, thing_status, item_status):
cnx_1 = self.get_connection_to_db()
cursor_1 = cnx_1.cursor()
query_thing = ""
query_item = ""
if thing_status == 'active':
query_thing = """SELECT Thing.platform_id, Thing.thing_global_id, Thing.thing_name,
Thing.thing_type, Thing.location, Thing.thing_local_id, Thing.thing_status
FROM Thing
WHERE Thing.thing_status = 'active'; """
elif thing_status == 'inactive':
query_thing = """SELECT Thing.platform_id, Thing.thing_global_id, Thing.thing_name,
Thing.thing_type, Thing.location, Thing.thing_local_id, Thing.thing_status
FROM Thing
WHERE Thing.thing_status = 'inactive'; """
elif thing_status == 'all':
query_thing = """SELECT Thing.platform_id, Thing.thing_global_id, Thing.thing_name,
Thing.thing_type, Thing.location, Thing.thing_local_id, Thing.thing_status
FROM Thing;"""
if item_status == 'active':
query_item = """SELECT Item.thing_global_id, Item.item_global_id, Item.item_name,
Item.item_type, Item.can_set_state, Item.item_local_id, Item.item_status
FROM Item
WHERE Item.item_status='active';"""
elif item_status == 'inactive':
query_item = """SELECT Item.thing_global_id, Item.item_global_id, Item.item_name,
Item.item_type, Item.can_set_state, Item.item_local_id, Item.item_status
FROM Item
WHERE Item.item_status='inactive';"""
elif item_status == 'all':
query_item = """SELECT Item.thing_global_id, Item.item_global_id, Item.item_name,
Item.item_type, Item.can_set_state, Item.item_local_id, Item.item_status
FROM Item;"""
cursor_1.execute(query_thing)
thing_rows = cursor_1.fetchall()
cursor_1.execute(query_item)
item_rows = cursor_1.fetchall()
cursor_1.close()
cnx_1.close()
things = []
for thing in thing_rows:
temp_thing = {
'platform_id': thing[0],
'thing_global_id': thing[1],
'thing_name': thing[2],
'thing_type': thing[3],
'location': thing[4],
'thing_local_id': thing[5],
'thing_status': thing[6],
'items': []
}
for item in item_rows:
if item[0] == thing[1]:
temp_item = {
'item_global_id': item[1],
'item_name': item[2],
'item_type': item[3],
'can_set_state': item[4],
'item_local_id': item[5],
'item_status': item[6]
}
temp_thing['items'].append(temp_item)
things.append(temp_thing)
return things
def get_thing_by_global_id(self, thing_global_id):
cnx_1 = self.get_connection_to_db()
cursor_1 = cnx_1.cursor()
cursor_1.execute("""SELECT Thing.platform_id, Thing.thing_global_id, Thing.thing_name,
Thing.thing_type, Thing.location, Thing.thing_local_id, Thing.thing_status
FROM Thing
WHERE Thing.thing_global_id=%s; """, (thing_global_id,))
thing_rows = cursor_1.fetchall()
cursor_1.execute("""SELECT Item.thing_global_id, Item.item_global_id, Item.item_name,
Item.item_type, Item.can_set_state, Item.item_local_id, Item.item_status
FROM Item
WHERE Item.thing_global_id=%s;""", (thing_global_id,))
item_rows = cursor_1.fetchall()
cursor_1.close()
cnx_1.close()
things = []
for thing in thing_rows:
temp_thing = {
'platform_id': thing[0],
'thing_global_id': thing[1],
'thing_name': thing[2],
'thing_type': thing[3],
'location': thing[4],
'thing_local_id': thing[5],
'thing_status': thing[6],
'items': []
}
for item in item_rows:
if item[0] == thing[1]:
temp_item = {
'item_global_id': item[1],
'item_name': item[2],
'item_type': item[3],
'can_set_state': item[4],
'item_local_id': item[5],
'item_status': item[6]
}
temp_thing['items'].append(temp_item)
things.append(temp_thing)
return things
def api_get_things(self, body, message):
print('API Get All Things')
reply_to = json.loads(body)['reply_to']
thing_status = json.loads(body)['thing_status']
item_status = json.loads(body)['item_status']
things = self.get_things(thing_status, item_status)
message_response = {
'things': things
}
self.producer_connection.ensure_connection()
with Producer(self.producer_connection) as producer:
producer.publish(
json.dumps(message_response),
exchange=self.exchange.name,
routing_key=reply_to,
retry=True
)
def api_get_thing_by_global_id(self, body, message):
print('API Get Thing by thing_global_id')
reply_to = json.loads(body)['reply_to']
thing_global_id = json.loads(body)['thing_global_id']
things = self.get_thing_by_global_id(thing_global_id)
message_response = {
'things': things
}
self.producer_connection.ensure_connection()
with Producer(self.producer_connection) as producer:
producer.publish(
json.dumps(message_response),
exchange=self.exchange.name,
routing_key=reply_to,
retry=True
)
def api_get_things_by_platform_id(self, body, message):
print('Get Thing by platform_id')
reply_to = json.loads(body)['reply_to']
platform_id = json.loads(body)['platform_id']
thing_status = json.loads(body)['thing_status']
item_status = json.loads(body)['item_status']
things = self.get_things_by_platform_id(platform_id, thing_status, item_status)
message_response = {
'things': things
}
self.producer_connection.ensure_connection()
with Producer(self.producer_connection) as producer:
producer.publish(
json.dumps(message_response),
exchange=self.exchange.name,
routing_key=reply_to,
retry=True
)
def get_connection_to_db(self):
while True:
try:
# print("Get connection DB")
connection = self.cnxpool.get_connection()
return connection
except:
# print("Can't get connection DB")
pass
def send_notification_to_collector(self):
print('Send notification to Collector')
message = {
'notification': 'Have Platform_id change'
}
queue = Queue(name='collector.request.notification', exchange=self.exchange,
routing_key='collector.request.notification')
routing_key = 'collector.request.notification'
self.producer_connection.ensure_connection()
with Producer(self.producer_connection) as producer:
producer.publish(
json.dumps(message),
exchange=self.exchange.name,
routing_key=routing_key,
declare=[queue],
retry=True
)
def run(self):
queue_get_things = Queue(name='registry.request.api_get_things', exchange=self.exchange,
routing_key='registry.request.api_get_things')
queue_get_list_platforms = Queue(name='registry.request.api_get_list_platforms', exchange=self.exchange,
routing_key='registry.request.api_get_list_platforms')
queue_add_platform = Queue(name='registry.request.api_add_platform', exchange=self.exchange,
routing_key='registry.request.api_add_platform')
queue_check_config = Queue(name='driver.response.registry.api_check_configuration_changes', exchange=self.exchange,
routing_key='driver.response.registry.api_check_configuration_changes')
queue_get_thing_by_global_id = Queue(name='registry.request.api_get_thing_by_global_id', exchange=self.exchange,
routing_key='registry.request.api_get_thing_by_global_id')
queue_get_things_by_platform_id = Queue(name='registry.request.api_get_things_by_platform_id',
exchange=self.exchange,
routing_key='registry.request.api_get_things_by_platform_id')
if self.mode == 'PULL':
self.update_all_config_changes()
self.check_platform_active()
while 1:
try:
self.consumer_connection.ensure_connection(max_retries=1)
with nested(Consumer(self.consumer_connection, queues=queue_get_things_by_platform_id,
callbacks=[self.api_get_things_by_platform_id], no_ack=True),
Consumer(self.consumer_connection, queues=queue_get_thing_by_global_id,
callbacks=[self.api_get_thing_by_global_id], no_ack=True),
Consumer(self.consumer_connection, queues=queue_add_platform, callbacks=[self.api_add_platform],
no_ack=True),
Consumer(self.consumer_connection, queues=queue_get_things, callbacks=[self.api_get_things],
no_ack=True),
Consumer(self.consumer_connection, queues=queue_get_list_platforms,
callbacks=[self.api_get_list_platforms], no_ack=True),
Consumer(self.consumer_connection, queues=queue_check_config,
callbacks=[self.handle_configuration_changes], no_ack=True)):
while True:
self.consumer_connection.drain_events()
except (ConnectionRefusedError, exceptions.OperationalError):
print('Connection lost')
except self.consumer_connection.connection_errors:
print('Connection error')
if __name__ == '__main__':
MODE_CODE = 'Develop'
# MODE_CODE = 'Deploy'
if MODE_CODE == 'Develop':
BROKER_CLOUD = 'localhost' # rabbitmq
MODE = 'PULL' # or PUSH or PULL
dbconfig = {
"database": "Registry_DB",
"user": "root",
"host": '0.0.0.0',
"passwd": "root",
"autocommit": "True"
}
TIME_INACTIVE_PLATFORM = 60 # Time when platform is marked inactive
TIME_UPDATE_CONF = 2 # Time when registry send request update conf to Driver
TIME_CHECK_PLATFORM_ACTIVE = 2 # Time when check active_platform in system
else:
BROKER_CLOUD = sys.argv[1] #rabbitmq
MODE = sys.argv[2] # or PUSH or PULL
dbconfig = {
"database": "Registry_DB",
"user": "root",
"host": sys.argv[3],
"passwd": "root",
"autocommit": "True"
}
TIME_INACTIVE_PLATFORM = sys.argv[4]
TIME_UPDATE_CONF = sys.argv[5]
TIME_CHECK_PLATFORM_ACTIVE = sys.argv[6]
registry = Registry(BROKER_CLOUD, MODE, dbconfig, TIME_INACTIVE_PLATFORM, TIME_UPDATE_CONF, TIME_CHECK_PLATFORM_ACTIVE)
registry.run()
| 46.445946
| 136
| 0.55899
|
cbe424ea2a15ea1bc4e5a6bd0bce63a54c68f21c
| 979
|
py
|
Python
|
RecoParticleFlow/PFProducer/python/particleFlow_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 13
|
2015-11-30T15:49:45.000Z
|
2022-02-08T16:11:30.000Z
|
RecoParticleFlow/PFProducer/python/particleFlow_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 640
|
2015-02-11T18:55:47.000Z
|
2022-03-31T14:12:23.000Z
|
RecoParticleFlow/PFProducer/python/particleFlow_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 51
|
2015-08-11T21:01:40.000Z
|
2022-03-30T07:31:34.000Z
|
import FWCore.ParameterSet.Config as cms
#Geometry
# include used for track reconstruction
# note that tracking is redone since we need updated hits and they
# are not stored in the event!
from RecoTracker.TrackProducer.CTFFinalFitWithMaterial_cff import *
from RecoParticleFlow.PFProducer.particleFlow_cfi import *
from RecoParticleFlow.PFProducer.particleFlowTmpPtrs_cfi import *
particleFlowTmp = particleFlow.clone()
from Configuration.Eras.Modifier_pf_badHcalMitigationOff_cff import pf_badHcalMitigationOff
pf_badHcalMitigationOff.toModify(particleFlowTmp.PFEGammaFiltersParameters,
electron_protectionsForBadHcal = dict(enableProtections = False),
photon_protectionsForBadHcal = dict(enableProtections = False))
from Configuration.ProcessModifiers.egamma_lowPt_exclusive_cff import egamma_lowPt_exclusive
egamma_lowPt_exclusive.toModify(particleFlowTmp.PFEGammaFiltersParameters,photon_MinEt = 1.)
| 48.95
| 98
| 0.808989
|
c4e534a30db846b91d2d6543c5a23f431cbd0087
| 998
|
py
|
Python
|
src/client_libraries/python/microsoft/dynamics/customerinsights/api/models/odata_error_detail.py
|
microsoft/Dynamics365-CustomerInsights-Client-Libraries
|
e00632f7972717b03e0fb1a9e2667e8f9444a0fe
|
[
"MIT"
] | null | null | null |
src/client_libraries/python/microsoft/dynamics/customerinsights/api/models/odata_error_detail.py
|
microsoft/Dynamics365-CustomerInsights-Client-Libraries
|
e00632f7972717b03e0fb1a9e2667e8f9444a0fe
|
[
"MIT"
] | null | null | null |
src/client_libraries/python/microsoft/dynamics/customerinsights/api/models/odata_error_detail.py
|
microsoft/Dynamics365-CustomerInsights-Client-Libraries
|
e00632f7972717b03e0fb1a9e2667e8f9444a0fe
|
[
"MIT"
] | 7
|
2021-02-11T19:48:57.000Z
|
2021-12-17T08:00:15.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ODataErrorDetail(Model):
"""ODataErrorDetail.
:param error_code:
:type error_code: str
:param message:
:type message: str
:param target:
:type target: str
"""
_attribute_map = {
'error_code': {'key': 'errorCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ODataErrorDetail, self).__init__(**kwargs)
self.error_code = kwargs.get('error_code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
| 30.242424
| 76
| 0.528056
|
cb5dfff2a2c8889458782acda7bd1ed4c2702600
| 179
|
py
|
Python
|
main.py
|
wangweijia/Chess
|
6de1350b5a8f498005dcc914a2c27e4588a768b0
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
wangweijia/Chess
|
6de1350b5a8f498005dcc914a2c27e4588a768b0
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
wangweijia/Chess
|
6de1350b5a8f498005dcc914a2c27e4588a768b0
|
[
"Apache-2.0"
] | null | null | null |
from Checkerboard import *
def main():
checkerboard = Checkerboard()
res = checkerboard.getNextPlayer()
print('--------,', res)
if __name__ == '__main__':
main()
| 16.272727
| 38
| 0.620112
|
e6320645763b44b78c386908ee05c96ec575bf1e
| 266
|
py
|
Python
|
scripts/patch.py
|
TekBoundary/libnode
|
2064932a579ff38826f100b08588c583dc2e70b3
|
[
"MIT"
] | null | null | null |
scripts/patch.py
|
TekBoundary/libnode
|
2064932a579ff38826f100b08588c583dc2e70b3
|
[
"MIT"
] | null | null | null |
scripts/patch.py
|
TekBoundary/libnode
|
2064932a579ff38826f100b08588c583dc2e70b3
|
[
"MIT"
] | null | null | null |
assert __name__ == "__main__"
import subprocess
import os
import shutil
from . import config
os.chdir('node-{}'.format(config.nodeVersion))
subprocess.check_call(['patch', '-p1', '-i', '../uv.patch'])
shutil.copyfile('deps/uv/include/uv.h', 'include/node/uv.h')
| 20.461538
| 60
| 0.703008
|
ed1bd2050a1ef46ed289a652202df3d938302b94
| 2,854
|
py
|
Python
|
nova/api/openstack/compute/admin_password.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 5
|
2016-04-28T16:20:38.000Z
|
2021-04-25T11:19:03.000Z
|
nova/api/openstack/compute/admin_password.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 5
|
2016-07-11T20:59:47.000Z
|
2020-07-28T09:56:35.000Z
|
nova/api/openstack/compute/admin_password.py
|
ebalduf/nova-backports
|
6bf97ec73467de522d34ab7a17ca0e0874baa7f9
|
[
"Apache-2.0"
] | 5
|
2020-04-08T20:24:45.000Z
|
2020-10-05T19:02:13.000Z
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import admin_password
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _
ALIAS = "os-admin-password"
authorize = extensions.os_compute_authorizer(ALIAS)
class AdminPasswordController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminPasswordController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
# TODO(eliqiao): Here should be 204(No content) instead of 202 by v2.1
# +micorversions because the password has been changed when returning
# a response.
@wsgi.action('changePassword')
@wsgi.response(202)
@extensions.expected_errors((400, 404, 409, 501))
@validation.schema(admin_password.change_password)
def change_password(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
password = body['changePassword']['adminPass']
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.set_admin_password(context, instance, password)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstancePasswordSetFailed as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as e:
raise common.raise_http_conflict_for_instance_invalid_state(
e, 'changePassword', id)
except NotImplementedError:
msg = _("Unable to set password on instance")
common.raise_feature_not_supported(msg=msg)
class AdminPassword(extensions.V21APIExtensionBase):
"""Admin password management support."""
name = "AdminPassword"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
controller = AdminPasswordController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| 36.589744
| 79
| 0.715137
|
9751270bfc91a8b9149afc18b79e55b4ad5fd98c
| 14,139
|
py
|
Python
|
tests/test_data_object/test_data_object.py
|
do-py-together/do-py
|
c55b5efd01dc3afd9979f74bce3a40e00e204236
|
[
"MIT"
] | 7
|
2020-07-07T02:53:44.000Z
|
2022-03-28T00:56:36.000Z
|
tests/test_data_object/test_data_object.py
|
do-py-together/do-py
|
c55b5efd01dc3afd9979f74bce3a40e00e204236
|
[
"MIT"
] | 31
|
2020-03-24T17:55:05.000Z
|
2022-03-31T04:27:14.000Z
|
tests/test_data_object/test_data_object.py
|
do-py-together/do-py
|
c55b5efd01dc3afd9979f74bce3a40e00e204236
|
[
"MIT"
] | null | null | null |
"""
Test resource base model.
:date_created: 2018-09-25
"""
import json
from builtins import object
import pytest
from do_py import DataObject
from do_py.common import R
from do_py.exceptions import DataObjectError, RestrictionError
from ..data import A, MyTestException, data, keys, short_data
def our_hasattr(instance, name):
"""
:param instance:
:param name:
:return:
"""
return name in instance.__dict__
class TestDataObject(object):
@pytest.mark.parametrize('id, name, status', data)
def test_init(self, id, name, status):
a = A.create(id=id, name=name, status=status)
assert a
assert a.id == id
assert a.name == name
assert a.status == status
assert a['id'] == id
assert a['name'] == name
assert a['status'] == status
assert a(data=a)
def test_class_namespace(self):
try:
class B(DataObject):
_restrictions = {
'x': R.INT.with_default(1)
}
x = None
B(data={'x': 1})
raise Exception('Failed to protect namespace clash between _restrictions and cls.x!')
except AttributeError:
assert True
except Exception as e:
assert False, str(e)
@pytest.mark.parametrize('deep', [
pytest.param(True, marks=pytest.mark.xfail(raises=DataObjectError), id='deep'),
pytest.param(False, id='!deep')
])
def test_deep_restriction(self, deep):
restric = {
'id': [0, 1, 2],
'x': R.INT.with_default(1),
'y': []
}
if deep:
restric['deep'] = {
'this': [],
'fails': R(1, 2, 3, default=1)
}
class B(DataObject):
_restrictions = restric
@pytest.mark.xfail(raises=DataObjectError)
def test_malformed_restrictions(self):
class FailsMalformed(DataObject):
_restrictions = {
'malformed': None
}
@pytest.mark.xfail(raises=RestrictionError)
def test_mixed_restrictions(self):
class FailsMixed(DataObject):
_restrictions = {
'mixed': R(int, 1, 2)
}
@pytest.mark.parametrize('restriction', [
[bool],
([bool], None)
])
@pytest.mark.xfail(raises=DataObjectError)
def test_legacy_restrictions(self, restriction):
class FailsLegacy(DataObject):
_restrictions = {
'legacy': restriction
}
@pytest.mark.xfail(raises=RestrictionError)
def test_int_default(self):
class FailsIntDefault(DataObject):
_restrictions = {
'int_default': R(default=int)
}
@pytest.mark.parametrize('d, strict, key', [
pytest.param(True, True, 'extra', marks=pytest.mark.xfail(raises=DataObjectError), id='d-strict-extra'),
pytest.param(True, True, 'missing', marks=pytest.mark.xfail(raises=DataObjectError), id='d-strict-missing'),
pytest.param(True, True, None, id='d-strict-None'),
pytest.param(True, False, 'extra', marks=pytest.mark.xfail(raises=DataObjectError), id='d-!strict-extra'),
pytest.param(True, False, 'missing', id='d-!strict-missing'),
pytest.param(True, False, None, id='d-!strict-None'),
pytest.param(False, True, None, marks=pytest.mark.xfail(raises=DataObjectError), id='!d-strict-None'),
pytest.param(False, False, None, id='!d-!strict-None')
])
def test_restrictions_runtime(self, d, strict, key):
restric = {
'id': R(0, 1, 2),
'x': R.INT.with_default(1),
'y': R()
}
class B(DataObject):
_restrictions = restric
data_ = {
'id': 0,
'x': 2,
'y': 'hi'
}
if key == 'extra':
data_['z'] = None
elif key == 'missing':
del data_['x']
if not d:
data_ = None
b = B(data=data_, strict=strict)
assert b
def test_nested_restrictions(self):
class B(DataObject):
_restrictions = {
'x': R(1, 2),
'y': R.INT.with_default(100),
}
class C(DataObject):
_restrictions = {
'a': A,
'b': B
}
data_ = {
'a': {
'id': 1,
'name': 'evil-jenkins',
'status': 0
},
'b': {
'x': 1,
'y': 23
}
}
c = C(data=data_)
assert c
assert c.get('a') == c['a'] == c.a
assert type(c.a) is A
assert c.a.id
assert type(c.b) is B
assert c.b.x
# Test nested validation
try:
c.b.x = 'invalid'
raise MyTestException('Invalid value assigned to c.b.x!')
except MyTestException as e:
assert False, str(e)
except Exception:
assert True
try:
c.b = {'invalid': 'values'}
raise MyTestException('Invalid data dict assigned to c.b!')
except MyTestException as e:
assert False, str(e)
except Exception:
assert True
# Test default value behavior
c_default = C(strict=False)
assert c_default
assert c_default.a
assert type(c_default.a) is A
assert type(c_default.b) is B
for k, v in c_default.a.items():
assert v is None, [(k, v) for k, v in c_default.a.items()]
@pytest.mark.parametrize('restrictions', [
pytest.param(R(A, type(None)), id='([A, type(None)], None)'),
A])
def test_supported_nested_restrictions_format(self, restrictions):
class B(DataObject):
_restrictions = {
'a': restrictions
}
class C(DataObject):
_restrictions = {
'b': B
}
c = C(data={
'b': {
'a': A(data={
'id': 1,
'name': 'evil-jenkins',
'status': 0
})
}
})
assert c
assert c.b
assert c.b.a
assert type(c.b.a) is A
@pytest.mark.parametrize('restrictions', [pytest.param((A, None),
marks=pytest.mark.xfail(
reason="'None' data not allowed for DO"),
id='(A, None)'),
pytest.param(A, marks=pytest.mark.xfail)])
def test_null_nested_object(self, restrictions):
class B(DataObject):
_restrictions = {
'a': restrictions
}
b = B(data={'a': None})
assert b
def test_missing_restrictions(self):
try:
class B(DataObject):
pass
B()
raise MyTestException('Error should have thrown.')
except MyTestException as e:
assert False, str(e)
except Exception:
assert True
def test_nesting_dict_restrictions(self):
try:
class B(DataObject):
_restrictions = {
'a': {
'x': [],
'y': []
}
}
B(data={'a': {'x': 1, 'y': 2}})
raise MyTestException('Error should have thrown.')
except MyTestException as e:
assert False, str(e)
except Exception:
assert True
@pytest.mark.parametrize('id, name, status', short_data)
def test_setitem(self, id, name, status):
a = A.create(id=id, name=name, status=status)
new_id = 10
a.id = new_id
assert not our_hasattr(a, 'id'), 'Restricted key should not be in attribute space'
assert a['id'] == new_id
assert a.id == new_id
newer_id = 11
a['id'] = newer_id
assert a['id'] == newer_id
assert a.id == newer_id
try:
a['invalid'] = 'something'
raise MyTestException('Able to assign a value to an unrestricted key!')
except MyTestException as e:
assert False, str(e)
except Exception:
assert True
# Attribute space can be freeform, but will not become part of restricted data schema
a.invalid = 'something'
assert our_hasattr(a, 'invalid'), 'Attribute not found'
assert 'invalid' not in a
assert a.invalid == 'something'
try:
_ = a['invalid']
raise MyTestException('Able to pull out value set in attribute namespace from keyspace!')
except MyTestException as e:
assert False, str(e)
except Exception:
assert True
@pytest.mark.parametrize('id, name, status', short_data)
def test_get(self, id, name, status):
a = A.create(id=id, name=name, status=status)
assert a.get('id') == a.id == id
assert a.get('name') == a.name == name
assert a.get('status') == a.status == status
try:
_ = a['nope']
assert False
except KeyError:
assert True
try:
_ = a.nope
assert False
except AttributeError:
assert True
@pytest.mark.parametrize('id, name, status', short_data)
@pytest.mark.parametrize('key', keys)
def test_get_2(self, id, name, status, key):
a = A.create(id=id, name=name, status=status)
assert a.get(key) is not None
@pytest.mark.parametrize('id, name, status', short_data)
def test_clear_pop(self, id, name, status):
a = A.create(id=id, name=name, status=status)
try:
a.clear()
assert False
except TypeError:
assert True
try:
a.pop('id')
assert False
except TypeError:
assert True
try:
a.popitem()
assert False
except TypeError:
assert True
try:
del a['id']
assert False
except TypeError:
assert True
try:
a.update({'id': 1})
assert False
except TypeError:
assert True
@pytest.mark.parametrize('complex', [pytest.param(True, marks=pytest.mark.xfail), False])
def test_str_repr(self, complex):
from datetime import date, datetime
class B(DataObject):
_restrictions = {
'datetime': R.DATETIME,
'date': R.DATE,
'default': R()
}
class MyObj(dict):
pass
a = B(data={'datetime': datetime.now(), 'date': date.today(), 'default': MyObj if complex else 'hello world'})
# __repr__ returns JSON
assert json.loads('%r' % a)
# __str__ returns string
assert '%s' % a
@pytest.mark.parametrize('d, strict', [
('valid', True),
pytest.param('invalid', True, marks=pytest.mark.xfail(reason='Data does not meet restrictions')),
pytest.param(None, True, marks=pytest.mark.xfail(reason='Data does not meet restrictions')),
pytest.param('partial', True, marks=pytest.mark.xfail(reason='Partial data not allowed when strict.')),
('valid', False),
pytest.param('invalid', False, marks=pytest.mark.xfail(reason='Data does not meet restrictions')),
(None, False),
('partial', False)
])
def test_strict(self, d, strict):
if d == 'valid':
d = {
'id': short_data[0][0],
'name': short_data[0][1],
'status': short_data[0][2]
}
elif d == 'invalid':
d = {
'id': None,
'name': None,
'status': None
}
elif d == 'partial':
d = {
'id': 1
}
a = A(data=d, strict=strict)
assert a, '__init__ failed!'
assert a(data=d, strict=strict), '__call__ failed!'
@pytest.mark.parametrize('id, name, status', short_data)
def test_attr_restr_mutually_exclusive(self, id, name, status):
"""
Restriction keys should not be present in attr space. Not key attributes should live in attribute space.
:return:
:rtype:
"""
a = A.create(id=id, name=name, status=status)
assert not any([our_hasattr(a, e) for e in A._restrictions.keys()])
assert all([e in a for e in A._restrictions.keys()])
a.x = 'x'
a.y = 'y'
attributes = ['x', 'y']
assert all([our_hasattr(a, e) for e in attributes])
assert not any([e in a for e in attributes])
def test_multiple_dataobjs_not_allowed(self):
class First(DataObject):
_restrictions = {'id': R.INT}
class Second(DataObject):
_restrictions = {'id': R.INT}
try:
type('Mixed',
(DataObject,),
{
'_restrictions': {'id': [First, Second]},
'__module__': 'pytest'
}
)
raise MyTestException('Mixed Data Objects should not be allowed in restrictions')
except DataObjectError:
assert True
@pytest.mark.parametrize('id, name, status', short_data)
def test_dir(self, id, name, status):
inst = A.create(id=id, name=name, status=status)
for k in A._restrictions:
assert k in dir(inst)
def test_schema(self):
schema = A.schema
for k in A._restrictions:
assert k in schema
| 30.938731
| 118
| 0.505198
|
856727eb61c62f1f078b98f142e2ab60287d37f9
| 835
|
py
|
Python
|
06_matrix/diffusion_2d/diffusion_numpy_memory.py
|
siddheshmhatre/high_performance_python
|
4c308dddb400fcd4c4ab7c4d59dc56eb5dd58d1d
|
[
"RSA-MD"
] | 698
|
2015-01-06T14:10:26.000Z
|
2022-03-29T03:05:09.000Z
|
06_matrix/diffusion_2d/diffusion_numpy_memory.py
|
siddheshmhatre/high_performance_python
|
4c308dddb400fcd4c4ab7c4d59dc56eb5dd58d1d
|
[
"RSA-MD"
] | 6
|
2015-01-12T18:03:24.000Z
|
2021-06-02T13:05:20.000Z
|
06_matrix/diffusion_2d/diffusion_numpy_memory.py
|
siddheshmhatre/high_performance_python
|
4c308dddb400fcd4c4ab7c4d59dc56eb5dd58d1d
|
[
"RSA-MD"
] | 260
|
2015-01-16T13:58:57.000Z
|
2022-03-28T16:49:34.000Z
|
#!/usr/bin/env python2.7
import numpy as np
import time
grid_shape = (512, 512)
def laplacian(grid, out):
np.copyto(out, grid)
out *= -4
out += np.roll(grid, +1, 0)
out += np.roll(grid, -1, 0)
out += np.roll(grid, +1, 1)
out += np.roll(grid, -1, 1)
def evolve(grid, dt, out, D=1):
laplacian(grid, out)
out *= D * dt
out += grid
def run_experiment(num_iterations):
scratch = np.zeros(grid_shape)
grid = np.zeros(grid_shape)
block_low = int(grid_shape[0] * .4)
block_high = int(grid_shape[0] * .5)
grid[block_low:block_high, block_low:block_high] = 0.005
start = time.time()
for i in range(num_iterations):
evolve(grid, 0.1, scratch)
grid, scratch = scratch, grid
return time.time() - start
if __name__ == "__main__":
run_experiment(500)
| 20.875
| 60
| 0.609581
|
f9a08ba4c9b146534fbd27361fdc2fc2a68f87d9
| 3,926
|
py
|
Python
|
python/paddle/fluid/tests/unittests/mlu/test_lookup_table_v2_op_mlu.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | 8
|
2016-08-15T07:02:27.000Z
|
2016-08-24T09:34:00.000Z
|
python/paddle/fluid/tests/unittests/mlu/test_lookup_table_v2_op_mlu.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/mlu/test_lookup_table_v2_op_mlu.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
paddle.enable_static()
SEED = 2022
class TestLookupTableV2(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "lookup_table_v2"
self.init_dtype()
self.init_dims()
self.init_padding_idx()
np.random.seed(SEED)
w = np.random.random([self.vocab, self.dim]).astype(self.dtype)
x = np.random.randint(
0, self.vocab, size=(self.bsz, self.seqlen)).astype(self.ids_dtype)
out = w[x]
if self.padding_idx != -1:
out[np.squeeze(x == self.padding_idx)] = np.zeros(self.dim)
self.inputs = {
'W': OpTest.np_dtype_to_fluid_dtype(w),
'Ids': OpTest.np_dtype_to_fluid_dtype(x)
}
self.attrs = {
'is_sparse': False,
'is_distributed': False,
'remote_prefetch': False,
'padding_idx': self.padding_idx
}
self.outputs = {'Out': out}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_dtype(self):
self.dtype = np.float32
self.ids_dtype = np.int32
def init_dims(self):
self.bsz = 6
self.seqlen = 8
self.vocab = 10
# embedding_dim is not multiple of 32
self.dim = 20
def init_padding_idx(self):
self.padding_idx = -1
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
if self.dtype == np.float16:
self.check_grad_with_place(
self.place, ['W'], 'Out', max_relative_error=0.01)
else:
self.check_grad_with_place(self.place, ['W'], 'Out')
class TestLookupTableV2FP16(TestLookupTableV2):
no_need_check_grad = True
def init_dtype(self):
self.dtype = np.float16
self.ids_dtype = np.int32
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
self.__class__.no_need_check_grad = True
class TestLookupTableV2Dim32(TestLookupTableV2):
def init_dims(self):
self.bsz = 6
self.seqlen = 8
self.vocab = 10
# embedding_dim is multiple of 32
self.dim = 64
class TestLookupTableV2Dim32FP16(TestLookupTableV2):
no_need_check_grad = True
def init_dtype(self):
self.dtype = np.float16
self.ids_dtype = np.int64
def init_dims(self):
self.bsz = 6
self.seqlen = 8
self.vocab = 10
self.dim = 64
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
self.__class__.no_need_check_grad = True
class TestLookupTableV2WithPadding(TestLookupTableV2):
def init_padding_idx(self):
self.padding_idx = np.random.randint(0, self.vocab)
class TestLookupTableV2WithPadding1(TestLookupTableV2):
def init_padding_idx(self):
self.padding_idx = np.random.randint(0, self.vocab)
def init_dtype(self):
self.dtype = np.float32
self.ids_dtype = np.int64
if __name__ == '__main__':
unittest.main()
| 27.454545
| 79
| 0.643658
|
e04305da5f4e723f39a937d59a8826d19561a519
| 1,807
|
py
|
Python
|
solum/conductor/api.py
|
openstack/solum
|
0a744883ca00a0ee80e1b6840ac42a78fc397450
|
[
"Apache-2.0"
] | 39
|
2015-09-26T01:30:52.000Z
|
2021-05-20T23:37:43.000Z
|
solum/conductor/api.py
|
openstack/solum
|
0a744883ca00a0ee80e1b6840ac42a78fc397450
|
[
"Apache-2.0"
] | null | null | null |
solum/conductor/api.py
|
openstack/solum
|
0a744883ca00a0ee80e1b6840ac42a78fc397450
|
[
"Apache-2.0"
] | 30
|
2015-10-25T18:06:39.000Z
|
2020-01-14T12:14:06.000Z
|
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for interfacing with Solum Conductor."""
from oslo_config import cfg
from solum.common.rpc import service
class API(service.API):
def __init__(self, context=None):
cfg.CONF.import_opt('topic', 'solum.conductor.config',
group='conductor')
super(API, self).__init__(context,
topic=cfg.CONF.conductor.topic)
def build_job_update(self, build_id, status, description, created_image_id,
docker_image_name, assembly_id):
self._cast('build_job_update', build_id=build_id, status=status,
description=description, created_image_id=created_image_id,
docker_image_name=docker_image_name,
assembly_id=assembly_id)
def update_assembly(self, assembly_id, data):
self._cast('update_assembly', assembly_id=assembly_id, data=data)
def update_image(self, image_id, status, external_ref=None,
docker_image_name=None):
self._cast('update_image', image_id=image_id, status=status,
external_ref=external_ref,
docker_image_name=docker_image_name)
| 41.068182
| 79
| 0.670172
|
a2d4e25a2b3990c9908cf4656117e451bfe7ec0d
| 879
|
py
|
Python
|
serverless-python-dynamodb/aliases/create.py
|
ilya40umov/aws-basics
|
925bdcce3b4d248929b8dd5536db163a83e42286
|
[
"Apache-2.0"
] | null | null | null |
serverless-python-dynamodb/aliases/create.py
|
ilya40umov/aws-basics
|
925bdcce3b4d248929b8dd5536db163a83e42286
|
[
"Apache-2.0"
] | null | null | null |
serverless-python-dynamodb/aliases/create.py
|
ilya40umov/aws-basics
|
925bdcce3b4d248929b8dd5536db163a83e42286
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import os
import time
import boto3
localstack_hostname = os.environ['LOCALSTACK_HOSTNAME']
if localstack_hostname:
dynamodb = boto3.resource('dynamodb', endpoint_url=f"http://{localstack_hostname}:4566")
else:
dynamodb = boto3.resource('dynamodb')
def create(event, context):
data = json.loads(event['body'])
if 'alias' not in data or 'url' not in data:
logging.error("Validation Failed")
raise Exception("Couldn't create the alias.")
timestamp = str(time.time())
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
item = {
'alias': data['alias'],
'url': data['url'],
'createdAt': timestamp,
'updatedAt': timestamp
}
table.put_item(Item=item)
response = {
"statusCode": 200,
"body": json.dumps(item)
}
return response
| 20.928571
| 92
| 0.632537
|
da9590c9acf273f792aaacd3e0bcbd83a365c997
| 10,056
|
py
|
Python
|
plugins/Factoids/test.py
|
MicrohexHQ/supybot_fixes
|
f14e51f0379eee641c8b9c80a9259628f509f832
|
[
"BSD-3-Clause"
] | 5
|
2016-06-26T01:25:21.000Z
|
2021-10-31T06:10:01.000Z
|
plugins/Factoids/test.py
|
MicrohexHQ/supybot_fixes
|
f14e51f0379eee641c8b9c80a9259628f509f832
|
[
"BSD-3-Clause"
] | 1
|
2015-02-16T16:56:10.000Z
|
2015-02-16T16:56:10.000Z
|
plugins/Factoids/test.py
|
MicrohexHQ/supybot_fixes
|
f14e51f0379eee641c8b9c80a9259628f509f832
|
[
"BSD-3-Clause"
] | 5
|
2015-08-23T22:59:26.000Z
|
2021-10-31T06:10:18.000Z
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2010, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
try:
import sqlite3
except ImportError:
from pysqlite2 import dbapi2 as sqlite3 # for python2.4
class FactoidsTestCase(ChannelPluginTestCase):
plugins = ('Factoids',)
def testRandomfactoid(self):
self.assertError('random')
self.assertNotError('learn jemfinch as my primary author')
self.assertRegexp('random', 'primary author')
def testLearn(self):
self.assertError('learn as my primary author')
self.assertError('learn jemfinch as')
self.assertNotError('learn jemfinch as my primary author')
self.assertNotError('info jemfinch')
self.assertRegexp('whatis jemfinch', 'my primary author')
self.assertRegexp('whatis JEMFINCH', 'my primary author')
self.assertRegexp('whatis JEMFINCH 1', 'my primary author')
self.assertNotError('learn jemfinch as a bad assembly programmer')
self.assertRegexp('whatis jemfinch 2', 'bad assembly')
self.assertNotRegexp('whatis jemfinch 2', 'primary author')
self.assertRegexp('whatis jemfinch', r'.*primary author.*assembly')
self.assertError('forget jemfinch')
self.assertError('forget jemfinch 3')
self.assertError('forget jemfinch 0')
self.assertNotError('forget jemfinch 2')
self.assertNotError('forget jemfinch 1')
self.assertError('whatis jemfinch')
self.assertError('info jemfinch')
self.assertNotError('learn foo bar as baz')
self.assertNotError('info foo bar')
self.assertRegexp('whatis foo bar', 'baz')
self.assertNotError('learn foo bar as quux')
self.assertRegexp('whatis foo bar', '.*baz.*quux')
self.assertError('forget foo bar')
self.assertNotError('forget foo bar 2')
self.assertNotError('forget foo bar 1')
self.assertError('whatis foo bar')
self.assertError('info foo bar')
self.assertError('learn foo bar baz') # No 'as'
self.assertError('learn foo bar') # No 'as'
def testChangeFactoid(self):
self.assertNotError('learn foo as bar')
self.assertNotError('change foo 1 s/bar/baz/')
self.assertRegexp('whatis foo', 'baz')
self.assertError('change foo 2 s/bar/baz/')
self.assertError('change foo 0 s/bar/baz/')
def testSearchFactoids(self):
self.assertNotError('learn jemfinch as my primary author')
self.assertNotError('learn strike as a cool guy working on me')
self.assertNotError('learn inkedmn as another of my developers')
self.assertNotError('learn jamessan as a developer of much python')
self.assertNotError('learn bwp as author of my weather command')
self.assertRegexp('factoids search --regexp /.w./', 'bwp')
self.assertRegexp('factoids search --regexp /^.+i/',
'jemfinch.*strike')
self.assertNotRegexp('factoids search --regexp /^.+i/', 'inkedmn')
self.assertRegexp('factoids search --regexp m/j/ --regexp m/ss/',
'jamessan')
self.assertRegexp('factoids search --regexp m/^j/ *ss*',
'jamessan')
self.assertRegexp('factoids search --regexp /^j/',
'jamessan.*jemfinch')
self.assertRegexp('factoids search j*', 'jamessan.*jemfinch')
self.assertRegexp('factoids search *ke*',
'inkedmn.*strike|strike.*inkedmn')
self.assertRegexp('factoids search ke',
'inkedmn.*strike|strike.*inkedmn')
self.assertRegexp('factoids search jemfinch',
'my primary author')
self.assertRegexp('factoids search --values primary author',
'my primary author')
def testWhatisOnNumbers(self):
self.assertNotError('learn 911 as emergency number')
self.assertRegexp('whatis 911', 'emergency number')
def testNotZeroIndexed(self):
self.assertNotError('learn foo as bar')
self.assertNotRegexp('info foo', '#0')
self.assertNotRegexp('whatis foo', '#0')
self.assertNotError('learn foo as baz')
self.assertNotRegexp('info foo', '#0')
self.assertNotRegexp('whatis foo', '#0')
def testInfoReturnsRightNumber(self):
self.assertNotError('learn foo as bar')
self.assertNotRegexp('info foo', '2 factoids')
def testInfoUsageCount(self):
self.assertNotError('learn moo as cow')
self.assertRegexp('info moo', 'recalled 0 times')
self.assertNotError('whatis moo')
self.assertRegexp('info moo', 'recalled 1 time')
def testLearnSeparator(self):
self.assertError('learn foo is bar')
self.assertNotError('learn foo as bar')
self.assertRegexp('whatis foo', 'bar')
orig = conf.supybot.plugins.Factoids.learnSeparator()
try:
conf.supybot.plugins.Factoids.learnSeparator.setValue('is')
self.assertError('learn bar as baz')
self.assertNotError('learn bar is baz')
self.assertRegexp('whatis bar', 'baz')
finally:
conf.supybot.plugins.Factoids.learnSeparator.setValue(orig)
def testShowFactoidIfOnlyOneMatch(self):
m1 = self.assertNotError('factoids search m/foo|bar/')
orig = conf.supybot.plugins.Factoids.showFactoidIfOnlyOneMatch()
try:
conf.supybot.plugins.Factoids. \
showFactoidIfOnlyOneMatch.setValue(False)
m2 = self.assertNotError('factoids search m/foo/')
self.failUnless(m1.args[1].startswith(m2.args[1]))
finally:
conf.supybot.plugins.Factoids. \
showFactoidIfOnlyOneMatch.setValue(orig)
def testInvalidCommand(self):
self.assertNotError('learn foo as bar')
self.assertRegexp('foo', 'bar')
self.assertNotError('learn mooz as cowz')
self.assertRegexp('moo', 'mooz')
self.assertRegexp('mzo', 'mooz')
self.assertRegexp('moz', 'mooz')
self.assertNotError('learn moped as pretty fast')
self.assertRegexp('moe', 'mooz.*moped')
self.assertError('nosuchthing')
def testWhatis(self):
self.assertNotError('learn foo as bar')
self.assertRegexp('whatis foo', 'bar')
self.assertRegexp('whatis foob', 'foo')
self.assertNotError('learn foob as barb')
self.assertRegexp('whatis foom', 'foo.*foob')
def testStandardSubstitute(self):
self.assertNotError('learn foo as this is $channel, and hour is $hour')
self.assertRegexp('whatis foo', 'this is #test, and hour is \d{1,2}')
self.assertRegexp('whatis --raw foo', 'this is \$channel, and hour is \$hour')
self.assertNotError('learn bar as this is $$channel escaped')
self.assertRegexp('whatis bar', 'this is \$channel')
self.assertNotError('learn bar as this is $minute')
self.assertRegexp('whatis bar', '\$channel.*\d{1,2}')
def testAlias(self):
self.assertNotError('learn foo as bar')
self.assertNotError('alias foo zoog')
self.assertRegexp('whatis zoog', 'bar')
self.assertNotError('learn foo as snorp')
self.assertError('alias foo gnoop')
self.assertNotError('alias foo gnoop 2')
self.assertRegexp('whatis gnoop', 'snorp')
self.assertNotError('learn floop as meep')
self.assertNotError('learn bar as baz')
self.assertNotError('alias floop bar')
self.assertRegexp('whatis bar', 'meep.*baz')
def testRank(self):
self.assertNotError('learn foo as bar')
self.assertNotError('learn moo as cow')
self.assertRegexp('factoids rank', '#1 foo \(0\), #2 moo \(0\)')
self.assertRegexp('whatis moo', '.*cow.*')
self.assertRegexp('factoids rank', '#1 moo \(1\), #2 foo \(0\)')
self.assertRegexp('factoids rank 1', '#1 moo \(1\)')
self.assertNotRegexp('factoids rank 1', 'foo')
self.assertRegexp('factoids rank --plain', 'moo, foo')
self.assertRegexp('factoids rank --plain --alpha', 'foo, moo')
self.assertResponse('factoids rank --plain 1', 'moo')
def testQuoteHandling(self):
self.assertNotError('learn foo as "\\"bar\\""')
self.assertRegexp('whatis foo', r'"bar"')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 46.555556
| 86
| 0.652546
|
ec82b640fba8271491eaef8e9b992580bd2baba2
| 595
|
py
|
Python
|
tradester/feeds/static/futures.py
|
wrieg123/tradester
|
440210940f80e94fde4d43841c729f63b05f597d
|
[
"MIT"
] | 5
|
2020-11-11T14:54:59.000Z
|
2020-11-13T04:00:25.000Z
|
tradester/feeds/static/futures.py
|
wrieg123/tradester
|
440210940f80e94fde4d43841c729f63b05f597d
|
[
"MIT"
] | null | null | null |
tradester/feeds/static/futures.py
|
wrieg123/tradester
|
440210940f80e94fde4d43841c729f63b05f597d
|
[
"MIT"
] | null | null | null |
from .feed import MetaFeed, TSFeed
__all__ = ['FuturesMeta', 'FuturesTS']
class FuturesMeta(MetaFeed):
def __init__(self, identifiers, credentials = None):
super().__init__(identifiers, "*", "futures", "contract", credentials)
class FuturesTS(TSFeed):
def __init__(self, identifiers, fields = 'open, high, low, close, volume', start_date = None, end_date = None, bar = 'daily', credentials = None, force_fast = True):
super().__init__(identifiers, fields, "futures", "contract", credentials, bar, start_date, end_date, force_fast = force_fast)
| 42.5
| 170
| 0.678992
|
df8fadcd24376e5fe34151339433b1f02d885bf6
| 2,859
|
py
|
Python
|
tools/keystone_version.py
|
horion/openstacksdk
|
cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3
|
[
"Apache-2.0"
] | 99
|
2018-03-28T15:41:45.000Z
|
2022-01-23T17:22:13.000Z
|
tools/keystone_version.py
|
horion/openstacksdk
|
cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3
|
[
"Apache-2.0"
] | 5
|
2018-05-25T16:54:23.000Z
|
2021-11-21T02:27:16.000Z
|
tools/keystone_version.py
|
horion/openstacksdk
|
cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3
|
[
"Apache-2.0"
] | 104
|
2018-04-06T14:33:54.000Z
|
2022-03-01T01:58:09.000Z
|
# Copyright (c) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
import sys
from urllib import parse as urlparse
import openstack.config
def print_versions(r):
if 'version' in r:
for version in r['version']:
print_version(version)
if 'values' in r:
for version in r['values']:
print_version(version)
if isinstance(r, list):
for version in r:
print_version(version)
def print_version(version):
if version['status'] in ('CURRENT', 'stable'):
print(
"\tVersion ID: {id} updated {updated}".format(
id=version.get('id'),
updated=version.get('updated')))
verbose = '-v' in sys.argv
ran = []
for cloud in openstack.config.OpenStackConfig().get_all_clouds():
if cloud.name in ran:
continue
ran.append(cloud.name)
# We don't actually need a compute client - but we'll be getting full urls
# anyway. Without this SSL cert info becomes wrong.
c = cloud.get_session_client('compute')
endpoint = cloud.config['auth']['auth_url']
try:
print(endpoint)
r = c.get(endpoint).json()
if verbose:
pprint.pprint(r)
except Exception as e:
print("Error with {cloud}: {e}".format(cloud=cloud.name, e=str(e)))
continue
if 'version' in r:
print_version(r['version'])
url = urlparse.urlparse(endpoint)
parts = url.path.split(':')
if len(parts) == 2:
path, port = parts
else:
path = url.path
port = None
stripped = path.rsplit('/', 2)[0]
if port:
stripped = '{stripped}:{port}'.format(stripped=stripped, port=port)
endpoint = urlparse.urlunsplit(
(url.scheme, url.netloc, stripped, url.params, url.query))
print(" also {endpoint}".format(endpoint=endpoint))
try:
r = c.get(endpoint).json()
if verbose:
pprint.pprint(r)
except Exception:
print("\tUnauthorized")
continue
if 'version' in r:
print_version(r)
elif 'versions' in r:
print_versions(r['versions'])
else:
print("\n\nUNKNOWN\n\n{r}".format(r=r))
else:
print_versions(r['versions'])
| 31.417582
| 79
| 0.601959
|
05c35aba0e2b6d69ad3e61c49f2a757f4f07e120
| 4,204
|
py
|
Python
|
tests/test_modules/thread_aware_modules.py
|
jleaniz/dftimewolf
|
9f4f2f3914f713e237974ce7efcb169885e5a4e6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modules/thread_aware_modules.py
|
jleaniz/dftimewolf
|
9f4f2f3914f713e237974ce7efcb169885e5a4e6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modules/thread_aware_modules.py
|
jleaniz/dftimewolf
|
9f4f2f3914f713e237974ce7efcb169885e5a4e6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Contains dummy modules used in thread aware tests."""
from typing import Dict, Any
import threading
import time
from dftimewolf.lib import module
from dftimewolf.lib.containers import interface
from dftimewolf.lib.containers import containers
class TestContainer(interface.AttributeContainer):
"""Test attribute container."""
CONTAINER_TYPE = 'test_container'
def __init__(self, value: str) -> None:
super(TestContainer, self).__init__()
self.value = value
def __eq__(self, other: object) -> bool:
return self.value == other.value
class TestContainerTwo(interface.AttributeContainer):
"""Test attribute container."""
CONTAINER_TYPE = 'test_container_two'
def __init__(self, value: str) -> None:
super(TestContainerTwo, self).__init__()
self.value = value
class TestContainerThree(interface.AttributeContainer):
"""Test attribute container."""
CONTAINER_TYPE = 'test_container_three'
def __init__(self, value: str) -> None:
super(TestContainerThree, self).__init__()
self.value = value
class ContainerGeneratorModule(module.BaseModule):
"""This is a dummy module. Generates test containers."""
def __init__(self, state, name=None):
self.list = []
super(ContainerGeneratorModule, self).__init__(state, name)
def SetUp(self, runtime_value=None): # pylint: disable=arguments-differ
"""Dummy setup function."""
print(self.name + ' Setup!')
self.list = runtime_value.split(',')
def Process(self):
"""Dummy Process function."""
print(self.name + ' Process!')
for item in self.list:
container = TestContainer(item)
self.state.StoreContainer(container)
container = TestContainerTwo(','.join(self.list))
self.state.StoreContainer(container)
class ThreadAwareConsumerModule(module.ThreadAwareModule):
"""This is a dummy Thread Aware Module. Consumes from
ContainerGeneratorModule based on the number of containers generated."""
def __init__(self, state, name=None):
super(ThreadAwareConsumerModule, self).__init__(state, name)
self.output_values = ['one', 'two', 'three']
self.output_lock = threading.Lock()
def SetUp(self): # pylint: disable=arguments-differ
"""SetUp"""
self.logger.info('{0:s} SetUp!'.format(self.name))
def Process(self, container) -> None:
"""Process"""
self.logger.info('{0:s} Process!'.format(self.name))
time.sleep(1)
# This modifies the container passed in as a parameter.
container.value += ' appended'
# This modifies some state-stored containers, generated by previous modules.
for c in self.state.GetContainers(TestContainerTwo):
c.value += ' appended'
# This generates and stores a container in state.
with self.output_lock:
new_container = TestContainerThree('output ' + self.output_values.pop())
self.state.StoreContainer(new_container)
@staticmethod
def GetThreadOnContainerType():
return TestContainer
def GetThreadPoolSize(self):
return 2
def PreProcess(self) -> None:
self.logger.info("ThreadAwareConsumerModule Static Pre Process")
def PostProcess(self) -> None:
self.logger.info("ThreadAwareConsumerModule Static Post Process")
class Issue503Module(module.ThreadAwareModule):
"""This is a module for testing a certain pattern of container handling.
As described by https://github.com/log2timeline/dftimewolf/issues/503 this
module pops containers for input, and uses the same container type as output.
"""
def __init__(self, state, name=None):
super(Issue503Module, self).__init__(state, name)
def SetUp(self): # pylint: disable=arguments-differ
"""SetUp"""
self.logger.info('{0:s} SetUp!'.format(self.name))
def Process(self, container) -> None:
"""Process"""
self.logger.info('{0:s} Process!'.format(self.name))
self.state.StoreContainer(TestContainer(container.value + " Processed"))
@staticmethod
def GetThreadOnContainerType():
return TestContainer
def GetThreadPoolSize(self):
return 2
def PreProcess(self) -> None:
pass
def PostProcess(self) -> None:
pass
def KeepThreadedContainersInState(self) -> bool:
return False
| 29.605634
| 80
| 0.714795
|
e90f256b42e51a28b45ee0cc2485f5f69a84af23
| 1,240
|
py
|
Python
|
setup.py
|
m4rk3r/django-filebrowser
|
cecf6560e6e343849b699dd04e189e5ccee45ad6
|
[
"BSD-3-Clause"
] | 1
|
2019-12-22T23:44:22.000Z
|
2019-12-22T23:44:22.000Z
|
setup.py
|
m4rk3r/django-filebrowser
|
cecf6560e6e343849b699dd04e189e5ccee45ad6
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
m4rk3r/django-filebrowser
|
cecf6560e6e343849b699dd04e189e5ccee45ad6
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-filebrowser',
version='3.12.1',
description='Media-Management with Grappelli',
long_description=read('README.rst'),
url='http://django-filebrowser.readthedocs.org',
download_url='',
author='Patrick Kranzlmueller, Axel Swoboda (vonautomatisch)',
author_email='office@vonautomatisch.at',
license='BSD',
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
zip_safe=False,
install_requires=[
'django-grappelli>=2.13,<2.14',
'pillow',
],
)
| 30.243902
| 70
| 0.625
|
c65c17e23aea751f3da0717d25da21d1edb1aeb0
| 2,782
|
py
|
Python
|
backend/pyrogram/raw/types/input_file_location.py
|
appheap/social-media-analyzer
|
0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c
|
[
"Apache-2.0"
] | 5
|
2021-09-11T22:01:15.000Z
|
2022-03-16T21:33:42.000Z
|
backend/pyrogram/raw/types/input_file_location.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | null | null | null |
backend/pyrogram/raw/types/input_file_location.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | 3
|
2022-01-18T11:06:22.000Z
|
2022-02-26T13:39:28.000Z
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class InputFileLocation(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.InputFileLocation`.
Details:
- Layer: ``123``
- ID: ``0xdfdaabe1``
Parameters:
volume_id: ``int`` ``64-bit``
local_id: ``int`` ``32-bit``
secret: ``int`` ``64-bit``
file_reference: ``bytes``
"""
__slots__: List[str] = ["volume_id", "local_id", "secret", "file_reference"]
ID = 0xdfdaabe1
QUALNAME = "types.InputFileLocation"
def __init__(self, *, volume_id: int, local_id: int, secret: int, file_reference: bytes) -> None:
self.volume_id = volume_id # long
self.local_id = local_id # int
self.secret = secret # long
self.file_reference = file_reference # bytes
@staticmethod
def read(data: BytesIO, *args: Any) -> "InputFileLocation":
# No flags
volume_id = Long.read(data)
local_id = Int.read(data)
secret = Long.read(data)
file_reference = Bytes.read(data)
return InputFileLocation(volume_id=volume_id, local_id=local_id, secret=secret, file_reference=file_reference)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(Long(self.volume_id))
data.write(Int(self.local_id))
data.write(Long(self.secret))
data.write(Bytes(self.file_reference))
return data.getvalue()
| 31.977011
| 118
| 0.634436
|
a2c7594cb1f816be4ec63ec502c7f91db319c15b
| 1,392
|
py
|
Python
|
web/admintools/views.py
|
tellg/arxcode
|
f04340f9466c31f59bc13b8e1afd4f5734da4848
|
[
"MIT"
] | null | null | null |
web/admintools/views.py
|
tellg/arxcode
|
f04340f9466c31f59bc13b8e1afd4f5734da4848
|
[
"MIT"
] | null | null | null |
web/admintools/views.py
|
tellg/arxcode
|
f04340f9466c31f59bc13b8e1afd4f5734da4848
|
[
"MIT"
] | null | null | null |
from django.db.models import Q
from django.http import Http404
from django.shortcuts import render
from web.character.models import Revelation, Clue
from world.dominion.models import PlotAction
def admin_search(request):
if not request.user.is_staff:
raise Http404
search_term = request.GET.get("search_term")
if not search_term:
return render(request, 'admintools/search.html', {'page_title': 'Admin Search Tool'})
lore_qs = Revelation.objects.filter(Q(name__icontains=search_term) | Q(desc__icontains=search_term) |
Q(gm_notes__icontains=search_term))
clue_qs = Clue.objects.filter(Q(name__icontains=search_term) | Q(desc__icontains=search_term) |
Q(gm_notes__icontains=search_term) | Q(search_tags__name__icontains=search_term))
clue_qs = clue_qs.distinct()
crisis_qs = PlotAction.objects.filter(Q(actions__icontains=search_term) |
Q(assisting_actions__actions__icontains=search_term) |
Q(story__icontains=search_term))
crisis_qs = crisis_qs.distinct()
context = {
'page_title': 'Admin Search Tool',
'lore': lore_qs,
'clues': clue_qs,
'crisis_actions': crisis_qs
}
return render(request, 'admintools/search_results.html', context)
| 38.666667
| 115
| 0.66092
|
2c1c978d9e338557883a90717c5bb33760bf1446
| 4,135
|
py
|
Python
|
scripts/plot_zcta_nys.py
|
iQoo/NYState-COVID-19-Tracker
|
2c39c182e3420768bc60902d220d6b257eb7b73f
|
[
"MIT"
] | 16
|
2020-04-01T16:22:32.000Z
|
2020-08-10T20:14:19.000Z
|
scripts/plot_zcta_nys.py
|
iQoo/NYState-COVID-19-Tracker
|
2c39c182e3420768bc60902d220d6b257eb7b73f
|
[
"MIT"
] | 11
|
2020-04-02T14:21:22.000Z
|
2022-03-12T00:28:20.000Z
|
scripts/plot_zcta_nys.py
|
iQoo/NYState-COVID-19-Tracker
|
2c39c182e3420768bc60902d220d6b257eb7b73f
|
[
"MIT"
] | 8
|
2020-04-02T14:10:31.000Z
|
2020-10-24T05:06:24.000Z
|
import numpy
from matplotlib import pyplot as plt
import csv
from collections import OrderedDict
import arrow
from datetime import timedelta
from matplotlib import lines
import matplotlib.patheffects as PathEffects
if __name__ == "__main__":
with open("data/NYS-county-testing-records.csv") as fp:
reader = csv.DictReader(fp)
records = list(reader)
counties = set(r["County"] for r in records)
data_by_county = OrderedDict((c, [
r for r in records
if r["County"] == c
]) for c in counties)
for idx, (county_name, county_data) in enumerate(data_by_county.items()):
if county_name == "Total": continue
x_array = list(reversed([int(d["Daily Tested"] if d['Daily Tested'] != '' else 0) for d in county_data]))
y_array = list(reversed([int(d["Daily Positive"] if d['Daily Positive'] != '' else 0) for d in county_data]))
d_array = list(reversed([arrow.get(d["Test Date"], "MM/DD/YYYY").date() for d in county_data]))
d_diff = numpy.diff(d_array) / timedelta(days=1)
x_array = x_array[1:] / d_diff
y_array = y_array[1:] / d_diff
if max(x_array) < 20: continue
line, = plt.plot(x_array[-7:], y_array[-7:], marker="o", alpha=.6, ls="dashed", markersize=1, lw=.75)
# plt.plot(x_array[-5:], y_array[-5:], marker="o", alpha=.4, ls="", markersize=1, lw=.75, color=line.get_color())
plt.plot(x_array[-1], y_array[-1], marker="o", c=line.get_color(), markersize=6, mec="white", zorder=10)
print(y_array, x_array)
if x_array[-1] < 10 or y_array[-1] < 5: continue
if idx % 2 == 1:
text = plt.text(
x_array[-1] / 1.15, y_array[-1],
" " + county_name + f" ({int(y_array[-1])}/{int(x_array[-1])})",
# color=colors[uhf_data["borough"]],
color=line.get_color(),
va="center", ha="right",
size=6, alpha=.9,
rotation=-0,
zorder=15
)
else:
text = plt.text(
x_array[-1] * 1.15, y_array[-1],
county_name + f" ({int(y_array[-1])}/{int(x_array[-1])})" + " ",
# color=colors[uhf_data["borough"]],
color=line.get_color(),
va="center", ha="left",
size=6, alpha=.9,
weight="bold",
rotation=-0,
zorder=15
)
text.set_path_effects([PathEffects.withStroke(linewidth=2, foreground='w', alpha=.5)])
plt.loglog()
plt.xlabel("Daily tested for COVID-19")
plt.ylabel("Daily positive for COVID-19")
# plt.legend([
# lines.Line2D([], [], c=color, marker="o")
# for color in colors.values()
# ], boroughs)
plt.gca().set_aspect("equal")
plt.xlim(left=10)
plt.ylim(bottom=5, top=2500)
for percent in numpy.arange(0, 0.7, .25):
plt.plot(plt.xlim(), numpy.array(plt.xlim()) * percent, c="grey", lw=.5, ls="dotted")
plt.text(plt.xlim()[1], plt.xlim()[1]*percent, f" {percent*100:.0f}%", ha="left", va="bottom", c="k", rotation=45)
for percent in numpy.arange(.75, 1.1, .25):
plt.plot(plt.xlim(), numpy.array(plt.xlim()) * percent, c="grey", lw=.5, ls="dotted")
plt.text(plt.ylim()[1]/percent, plt.ylim()[1], f" {percent*100:.0f}%", ha="left", va="bottom", c="k", rotation=45)
plt.text(plt.xlim()[0] * 1.1, 15, "daily positive rate $\\uparrow$", rotation=45, size=8)
from matplotlib.ticker import LogLocator
from util import LogFormatterSI
plt.gca().xaxis.set_major_locator(LogLocator(subs=(1, 2, 5)))
plt.gca().xaxis.set_major_formatter(LogFormatterSI(labelOnlyBase=False, minor_thresholds=(numpy.inf, numpy.inf)))
plt.gca().yaxis.set_major_locator(LogLocator(subs=(1, 2, 5)))
plt.gca().yaxis.set_major_formatter(LogFormatterSI(labelOnlyBase=False, minor_thresholds=(numpy.inf, numpy.inf)))
plt.savefig("plots/NYS-county-positive.png")
#for neighboor, data in data_by_zcta.items():
#plot(, c=[])
| 39.380952
| 123
| 0.576542
|
c427ff930a2e1ff1a1e5bb253811baaf8df55f45
| 22,556
|
py
|
Python
|
papers/ReTraCk/retriever/entitylinking/train_ner.py
|
microsoft/KC
|
928c74073246ef932f6b80f6fe353117a6cacb55
|
[
"MIT"
] | 29
|
2021-07-27T05:48:53.000Z
|
2022-03-30T00:05:41.000Z
|
papers/ReTraCk/retriever/entitylinking/train_ner.py
|
microsoft/KC
|
928c74073246ef932f6b80f6fe353117a6cacb55
|
[
"MIT"
] | 5
|
2021-07-29T08:00:26.000Z
|
2022-03-24T02:35:15.000Z
|
papers/ReTraCk/retriever/entitylinking/train_ner.py
|
microsoft/KC
|
928c74073246ef932f6b80f6fe353117a6cacb55
|
[
"MIT"
] | 7
|
2021-07-29T07:53:52.000Z
|
2022-02-21T08:10:26.000Z
|
"""
This script is adopted from https://github.com/kamalkraj/BERT-NER
"""
from __future__ import absolute_import, division, print_function
import argparse
import csv
import json
import logging
import os
import random
import sys
import numpy as np
import torch
import torch.nn.functional as F
from pytorch_transformers import (WEIGHTS_NAME, AdamW, BertConfig,
BertForTokenClassification, BertTokenizer,
WarmupLinearSchedule)
from torch import nn
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from seqeval.metrics import classification_report
from run_ner import Ner, DataProcessor, InputExample, convert_examples_to_features
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, valid_ids=None, label_mask=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.valid_ids = valid_ids
self.label_mask = label_mask
def readfile(filename):
'''
read file
'''
f = open(filename, mode="r", encoding="utf-8")
data = []
sentence = []
label= []
for line in f:
if len(line)==0 or line.startswith('-DOCSTART') or line[0]=="\n":
if len(sentence) > 0:
data.append((sentence,label))
sentence = []
label = []
continue
splits = line.split(' ')
sentence.append(splits[0])
label.append(splits[-1][:-1])
if len(sentence) >0:
data.append((sentence,label))
sentence = []
label = []
return data
class GrailQANerProcessor(DataProcessor):
"""Processor for the CoNLL-2003 data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_ner_datetime.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_ner_datetime.txt")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_ner.txt")), "test")
def get_labels(self):
return ["O", "B-entity", "I-entity",
"B-class", "I-class",
"B-type.int", "I-type.int",
"B-XMLSchema#date", "I-XMLSchema#date",
"B-XMLSchema#gYear", "I-XMLSchema#gYear",
"B-XMLSchema#gYearMonth", "I-XMLSchema#gYearMonth",
"B-XMLSchema#dateTime", "I-XMLSchema#dateTime",
"B-type.boolean", "I-type.boolean",
"B-type.float", "I-type.float",
"[CLS]", "[SEP]"]
def _create_examples(self,lines,set_type):
examples = []
for i,(sentence,label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = ' '.join(sentence)
text_b = None
label = label
examples.append(InputExample(guid=guid,text_a=text_a,text_b=text_b,label=label))
return examples
class WebQSPNerProcessor(DataProcessor):
"""Processor for the CoNLL-2003 data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "webqsp_train_distant_year.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "webqsp_dev_distant_year.txt")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "webqsp_test_distant_year.txt")), "test")
def get_labels(self):
return ["O", "B-entity", "I-entity",
"B-year", "I-year",
"[CLS]", "[SEP]"]
def _create_examples(self,lines,set_type):
examples = []
for i,(sentence,label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = ' '.join(sentence)
text_b = None
label = label
examples.append(InputExample(guid=guid,text_a=text_a,text_b=text_b,label=label))
return examples
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval or not.")
parser.add_argument("--eval_on",
default="dev",
help="Whether to run eval on the dev set or test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
processors = {"grailqa":GrailQANerProcessor,
"webqsp": WebQSPNerProcessor}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
num_labels = len(label_list) + 1
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_optimization_steps = 0
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
# Prepare model
config = BertConfig.from_pretrained(args.bert_model, num_labels=num_labels, finetuning_task=args.task_name)
model = Ner.from_pretrained(args.bert_model,
from_tf = False,
config = config)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias','LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
warmup_steps = int(args.warmup_proportion * num_train_optimization_steps)
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=num_train_optimization_steps)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
label_map = {i : label for i, label in enumerate(label_list,1)}
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in train_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,all_valid_ids,all_lmask_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, valid_ids,l_mask = batch
loss = model(input_ids, segment_ids, input_mask, label_ids,valid_ids,l_mask)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
label_map = {i : label for i, label in enumerate(label_list,1)}
model_config = {"bert_model":args.bert_model,"do_lower":args.do_lower_case,"max_seq_length":args.max_seq_length,"num_labels":len(label_list)+1,"label_map":label_map}
json.dump(model_config,open(os.path.join(args.output_dir,"model_config.json"),"w"))
# Load a trained model and config that you have fine-tuned
else:
# Load a trained model and vocabulary that you have fine-tuned
model = Ner.from_pretrained(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if args.eval_on == "dev":
eval_examples = processor.get_dev_examples(args.data_dir)
elif args.eval_on == "test":
eval_examples = processor.get_test_examples(args.data_dir)
else:
raise ValueError("eval on dev or test set only")
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in eval_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,all_valid_ids,all_lmask_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
y_true = []
y_pred = []
label_map = {i : label for i, label in enumerate(label_list,1)}
for input_ids, input_mask, segment_ids, label_ids,valid_ids,l_mask in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
valid_ids = valid_ids.to(device)
label_ids = label_ids.to(device)
l_mask = l_mask.to(device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask,valid_ids=valid_ids,attention_mask_label=l_mask)
logits = torch.argmax(F.log_softmax(logits,dim=2),dim=2)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
input_mask = input_mask.to('cpu').numpy()
for i, label in enumerate(label_ids):
temp_1 = []
temp_2 = []
for j,m in enumerate(label):
if j == 0:
continue
elif label_ids[i][j] == len(label_map):
y_true.append(temp_1)
y_pred.append(temp_2)
break
else:
temp_1.append(label_map[label_ids[i][j]])
temp_2.append(label_map[logits[i][j]])
report = classification_report(y_true, y_pred, digits=4)
logger.info("\n%s", report)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
logger.info("\n%s", report)
writer.write(report)
if __name__ == "__main__":
main()
| 46.603306
| 173
| 0.603919
|
20e97d30c750e0fa9b50035d7b82868c86b2a9b1
| 15,401
|
py
|
Python
|
framework/utils/android.py
|
maxim-filkov/mobile-test-helper
|
46dcb27b0ee25153b697d19c17801cee35e136ce
|
[
"Apache-2.0"
] | 1
|
2018-12-14T02:13:14.000Z
|
2018-12-14T02:13:14.000Z
|
framework/utils/android.py
|
maxim-filkov/mobile-test-helper
|
46dcb27b0ee25153b697d19c17801cee35e136ce
|
[
"Apache-2.0"
] | null | null | null |
framework/utils/android.py
|
maxim-filkov/mobile-test-helper
|
46dcb27b0ee25153b697d19c17801cee35e136ce
|
[
"Apache-2.0"
] | 1
|
2018-12-14T02:13:21.000Z
|
2018-12-14T02:13:21.000Z
|
"""
This module contains a list of utilities related to Android.
"""
import framework.utils.console as console
import logging
import string
import glob
import time
import os
import re
log = logging.getLogger("mth.utils")
def take_screenshot(device, target_dir, screenshot_name):
"""
Takes screenshot from attached Android device and saves this in specified folder.
:param device: device identifier (e.g. "TA9890AMTG").
:param target_dir: string, directory where to save screenshot.
:param screenshot_name: string, screenshot name.
"""
device_path = os.path.join("/sdcard/", screenshot_name)
command = "adb -s " + device + " shell screencap -p " + device_path
local_file = os.path.join(target_dir, screenshot_name)
console.execute(command)
download_file(device, device_path, local_file)
remove_file(device, device_path)
def download_file(device, device_file_path, target_file_path):
"""
Downloads file from attached Android device.
:param device: string, unique identifier of device (optional, by default connected device).
:param device_file_path: string, path to file that should be downloaded.
:param target_file_path: path where to save the downloaded file.
"""
command = "adb -s " + device + " pull " + device_file_path + " " + target_file_path
console.execute(command)
def remove_file(device, device_file_path):
"""
Removes file from attached Android device.
:param device: string, unique identifier of device (optional, by default connected device).
:param device_file_path: string, path to file that should be removed.
"""
command = "adb -s " + device + " shell rm -f " + device_file_path
console.execute(command)
def list_devices():
"""
Lists connected android devices.
"""
command = "adb devices"
stdout = console.execute(command)
lines = string.split(stdout, '\n')
del lines[0]
lines = filter(None, lines)
devices = []
for device in lines:
devices.append(string.split(device, "\t")[0])
return devices
def record_video(device, duration=180, bitrate=8000000):
"""
Records video from attached Android device.
:param device: string, device identifier, e.g. "TA9890AMTG".
:param duration: int, maximum duration for video, seconds.
:param bitrate: int, video bit-rate, megabits per second.
:returns string, recorded video file path on the device.
"""
file_name = str(int(time.time() * 1000)) + ".mp4"
device_path = os.path.join("/sdcard/", file_name)
command = "/usr/local/bin/adb -s " + device + " shell screenrecord --time-limit " + \
str(duration) + " --bit-rate " + str(bitrate) + " " + device_path
log.info("Recording in progress... To finish press Ctrl+C")
console.execute(command)
time.sleep(1)
return device_path
def get_log(device):
"""
Gets log file from device.
:param device: device identifier (e.g. "TA9890AMTG").
"""
file_name = str(int(time.time() * 1000)) + ".txt"
target_dir = os.getcwd()
log_path = os.path.join(target_dir, file_name)
clear_log_command = "adb -s " + device + " logcat -c"
get_log_command = "adb -s " + device + " logcat -v time"
console.execute(clear_log_command)
log.info("Logging in progress to '" + log_path + "'... To finish press Ctrl+C")
console.execute(get_log_command, False, log_path)
return log_path
def get_locale(device):
"""
Returns current locale for device.
:param device: string, device identifier, e.g. "TA9890AMTG".
:returns locale: string, locale set on the device, e.g. "en-US".
"""
language_command = "adb -s {0} shell getprop persist.sys.language".format(device)
country_command = "adb -s {0} shell getprop persist.sys.country".format(device)
language = console.execute(language_command).rstrip()
country = console.execute(country_command).rstrip()
return language + "-" + country
def set_locale(device, locale):
"""
Sets locale on device.
:param device: string, device identifier, e.g. "TA9890AMTG".
:param locale: string, locale to set on the device, e.g. "en-US".
"""
adbchangelanguage = "net.sanapeli.adbchangelanguage"
language, country = string.split(locale, "-")
command = "adb -s " + device + " shell am start -n net.sanapeli.adbchangelanguage/.AdbChangeLanguage " + \
"-e language " + language + " -e country " + country
if not _is_app_installed(device, adbchangelanguage):
_open_google_play_for_app(device, adbchangelanguage)
console.prompt("Please install adbchangelanguage then press Enter: ")
_grant_permissions_to_change_config(device, adbchangelanguage)
console.execute(command)
time.sleep(3)
def install_app(device, app):
"""
Installs the application onto the given device. If no app is given, installs the very latest app from ~/Downloads.
:param device: device identifier, e.g. "TA9890AMTG".
:param app: apk package name, e.g. "calc.apk".
"""
downloads_path = os.path.expanduser('~') + "/Downloads/"
template = "*.apk"
newest_apk = max(glob.iglob(downloads_path + template), key=os.path.getctime)
command = 'adb -s {0} install -r {1}{2}'.format(device, "" if get_sdk_version(device) < "17" else "-d ", newest_apk)
log.info("Installing '{0}' onto device '{1}'...".format(newest_apk, device))
console.execute(command)
def uninstall_app(device, package):
"""
Uninstalls given application by its package name.
:param device: device identifier, e.g. "TA9890AMTG".
:param package: package name (e.g. com.android.calculator2).
"""
command = "adb -s " + device + " uninstall " + package
console.execute(command)
def start_app(device, package):
"""
Launches application by the given package name and puts this to foreground.
:param device: device identifier, e.g. "TA9890AMTG".
:param package: package name (e.g. com.android.calculator2).
"""
command = "adb -s " + device + " shell am start -n " + package
console.execute(command)
def get_cpu_frequency(device):
"""
Returns CPU frequency for the given device, GHz.
:param device: Device to get its CPU frequency.
:returns string: CPU frequency, e.g. "2.27".
"""
command = 'adb -s {0} shell cat "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"'.format(device)
stdout = console.execute(command)
return "{0:.2f}".format(round(float(stdout) / 1000000, 2))
def get_ram_size(device):
"""
Returns RAM size for the given device, GB.
:param device: Device to get its RAM size.
:returns string: RAM size, e.g. "1.90".
"""
command = 'adb -s {0} shell cat /proc/meminfo'.format(device)
stdout = console.execute(command)
regex = "(?<=MemTotal:)\s+\d+(?= kB)"
ram_size = re.findall(regex, stdout)[0].lstrip()
return "{0:.2f}".format(round(float(ram_size) / 1000000, 2))
def get_resolution(device):
"""
Returns display resolution for the given device.
:param device: Device to get its resolution.
:returns string: Device resolution, e.g. "1080x1920".
"""
command1 = "adb -s {0} shell wm size".format(device)
command2 = "adb -s {0} shell dumpsys window".format(device)
regex = "\d{3,}x\d{3,}"
stdout1 = console.execute(command1)
stdout2 = console.execute(command2)
matches1 = re.findall(regex, stdout1)
matches2 = re.findall(regex, stdout2)
return matches1[0].lstrip() if matches1 else matches2[0].lstrip() if matches2 else None
def get_android_version(device):
"""
Returns Android OS version for the given device.
:param device: Device to get its Android OS version.
:returns string: Device Android version, e.g. "4.4.2".
"""
command = "adb -s {0} shell getprop ro.build.version.release".format(device)
return console.execute(command)
def get_device_model(device):
"""
Returns device model for the given device.
:param device: Device to get its model.
:returns string: Device model name, e.g. "Nexus 5".
"""
command = "adb -s {0} shell getprop ro.product.model".format(device)
return console.execute(command)
def get_ip_address(device):
"""
Returns network IP address used by the given device.
:param device: Device to get its IP address.
:returns string: IP address, e.g. "10.218.25.173".
"""
command1 = "adb -s {0} shell ifconfig".format(device)
command2 = "adb -s {0} shell netcfg".format(device)
regex1 = "(?<=inet addr:)\d[^2]\d*\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
regex2 = "(?<=wlan0\s{4}UP)\s+[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
stdout1 = console.execute(command1)
stdout2 = console.execute(command2)
matches1 = re.findall(regex1, stdout1)
matches2 = re.findall(regex2, stdout2)
return matches1[0].lstrip() if matches1 else matches2[0].lstrip() if matches2 else None
def get_sdk_version(device):
"""
Returns Android SDK version supported by the given device.
:param device: Device to get its SDK version.
:returns string: SDK version, e.g. "19".
"""
command = "adb -s {0} shell getprop ro.build.version.sdk".format(device)
return console.execute(command)
def get_language(device):
"""
Returns current language set on the given device.
:param device: Device to get its language.
:returns string: Device language, e.g. "en".
"""
command = "adb -s {0} shell getprop persist.sys.language".format(device)
return console.execute(command)
def get_country(device):
"""
Returns current country set on the given device.
:param device: Device to get its country.
:returns string: Device country, e.g. "US".
"""
command = "adb -s {0} shell getprop persist.sys.country".format(device)
return console.execute(command)
def get_manufacturer(device):
"""
Returns manufacturer for the given device.
:param device: Device to get its manufacturer.
:returns string: device manufacturer, e.g. "motorola".
"""
command = "adb -s {0} shell getprop ro.product.manufacturer".format(device)
return console.execute(command)
def enter_text(device, text):
"""
Enters given text on the device.
:param device: string, Device identifier.
:param text: string, Text to enter.
"""
command = "adb -s {0} shell input text {1}".format(device, text)
console.execute(command)
def switch_wifi(device, state):
"""
Switches WiFi ON/OFF.
:param device: string, Device identifier.
:param state: ON to enable, OFF to disable.
:returns string: Wifi state as number (0 - disabled, 1 - enabled).
"""
current_wifi_state = _get_wifi_state(device)
if (current_wifi_state == "0" and state == "OFF") or (current_wifi_state > "0" and state == "ON"):
return log.warn("WiFi is already '{0}' on the device '{1}'".format(state, device))
_open_wifi_settings(device)
_send_key_event(device, "KEYCODE_DPAD_UP")
_send_key_event(device, "KEYCODE_DPAD_UP")
_send_key_event(device, "KEYCODE_DPAD_CENTER")
_send_key_event(device, "KEYCODE_BACK")
def switch_cellular_data(device, state):
"""
Switches cellular data ON/OFF.
:param device: string, Device identifier.
:param state: ON to enable, OFF to disable.
"""
current_cellular_state = _get_cellular_data_state(device)
if (current_cellular_state == "0" and state == "OFF") or (current_cellular_state > "0" and state == "ON"):
return log.warn("Cellular Data is already '{0}' on the device '{1}'".format(state, device))
_open_data_usage_settings(device)
_send_key_event(device, "KEYCODE_DPAD_DOWN")
# needed for certain Android 5.0 devices
if state == "OFF":
_send_key_event(device, "KEYCODE_DPAD_DOWN")
_send_key_event(device, "KEYCODE_DPAD_CENTER")
# needed for certain Android 5.0 devices
if state == "ON":
_send_key_event(device, "KEYCODE_DPAD_DOWN")
_send_key_event(device, "KEYCODE_DPAD_CENTER")
# needed for confirmation dialog.
if state == "OFF":
_send_key_event(device, "KEYCODE_TAB")
_send_key_event(device, "KEYCODE_ENTER")
_send_key_event(device, "KEYCODE_BACK")
def _get_cellular_data_state(device):
"""
Returns current WiFi state - enabled or not (0 - disabled, 1 - enabled).
:param device: device identifier where to get Cellular Data state.
:return string: Cellular Data state.
"""
command = "adb -s {0} shell settings get global mobile_data".format(device)
return console.execute(command)
def _open_data_usage_settings(device):
"""
Opens Data Usage screen.
:param device: string, device identifier.
"""
command = 'adb -s {0} shell am start -n com.android.settings/.Settings\"\$\"DataUsageSummaryActivity'\
.format(device)
console.execute(command)
def _open_wifi_settings(device):
"""
Opens WiFi settings screen.
:param device: string, device identifier where to open WiFi settings.
"""
command = "adb -s {0} shell am start -a android.intent.action.MAIN -n com.android.settings/.wifi.WifiSettings" \
.format(device)
console.execute(command)
def _get_wifi_state(device):
"""
Returns current WiFi state - enabled or not (0 - disabled, 1 - enabled).
:param device: device identifier where to get WiFi state.
:return string: WiFi state.
"""
command = "adb -s {0} shell settings get global wifi_on".format(device)
return console.execute(command)
def _send_key_event(device, keycode):
"""
Sends given key event onto the device.
:param device: string, Device identifier to send key event to, e.g. "TA9890AMTG".
:param keycode: string Key code to send, e.g. "KEYCODE_ENDCALL" or "6".
"""
command = "adb -s {0} shell input keyevent {1}".format(device, keycode)
console.execute(command)
def _grant_permissions_to_change_config(device, package):
"""
Grants CHANGE_CONFIGURATION permissions for the given application specified by package.
:param device: string, device identifier, e.g. "TA9890AMTG".
:param package: string, application package, e.g. "com.android.calculator2".
"""
command = "adb -s " + device + " shell pm grant " + package + " android.permission.CHANGE_CONFIGURATION"
console.execute(command)
def _is_app_installed(device, package):
"""
Verifies if the given application is installed on the device.
:param device: string, device identifier, e.g. "TA9890AMTG".
:param package: string, application package, e.g. "com.android.calculator2".
:returns boolean: True if installed, otherwise False.
"""
command = "adb -s {0} shell pm list packages".format(device)
regex = "(?<=package:){0}[\r\n$]".format(package)
stdout = console.execute(command)
return True if re.findall(regex, stdout) else False
def _open_google_play_for_app(device, package):
"""
Opens Google Play to install the given application.
:param device: string, device identifier, e.g. "TA9890AMTG".
:param package: string, application package, e.g. "com.android.calculator2".
"""
command = "adb -s " + device + " shell am start -a android.intent.action.VIEW -d market://details?id=" + package
console.execute(command)
| 34.377232
| 120
| 0.673917
|
dc6a55f6fce4a58218e00f648d0f8921dcfd30f3
| 448
|
py
|
Python
|
trading_bot/exchanges/__init__.py
|
ArthurBernard/Strategy_Manager
|
a6c80fe1a51a300e8a612fb69e0e17d0ae06f455
|
[
"MIT"
] | 6
|
2020-02-24T02:19:30.000Z
|
2021-12-19T03:03:11.000Z
|
trading_bot/exchanges/__init__.py
|
ArthurBernard/Strategy_Manager
|
a6c80fe1a51a300e8a612fb69e0e17d0ae06f455
|
[
"MIT"
] | 1
|
2020-06-17T03:29:14.000Z
|
2020-06-17T04:45:34.000Z
|
trading_bot/exchanges/__init__.py
|
ArthurBernard/Trading_Bot
|
a6c80fe1a51a300e8a612fb69e0e17d0ae06f455
|
[
"MIT"
] | 1
|
2019-01-02T16:00:07.000Z
|
2019-01-02T16:00:07.000Z
|
#!/usr/bin/env python3
# coding: utf-8
# @Author: ArthurBernard
# @Email: arthur.bernard.92@gmail.com
# @Date: 2020-02-25 11:57:04
# @Last modified by: ArthurBernard
# @Last modified time: 2020-02-25 12:15:30
""" Module containing objects to connect to exchange client API. """
# Built-in packages
# Third party packages
# Local packages
from .API_bfx import *
from .API_kraken import *
__all__ = API_bfx.__all__
__all__ += API_kraken.__all__
| 21.333333
| 68
| 0.729911
|
45d92256480477a3ad00d51a67548d10f6500b74
| 1,338
|
py
|
Python
|
eden/converter/fasta.py
|
zaidurrehman/EDeN
|
1f29d4c9d458edb2bd62a98e57254d78a1f2093f
|
[
"MIT"
] | null | null | null |
eden/converter/fasta.py
|
zaidurrehman/EDeN
|
1f29d4c9d458edb2bd62a98e57254d78a1f2093f
|
[
"MIT"
] | null | null | null |
eden/converter/fasta.py
|
zaidurrehman/EDeN
|
1f29d4c9d458edb2bd62a98e57254d78a1f2093f
|
[
"MIT"
] | null | null | null |
import networkx as nx
from eden.modifier.fasta import fasta_to_fasta
from eden.util import is_iterable
def seq_to_networkx(header, seq, **options):
"""Convert sequence tuples to networkx graphs."""
graph = nx.Graph()
graph.graph['id'] = header
for id, character in enumerate(seq):
graph.add_node(id, label=character, position=id)
if id > 0:
graph.add_edge(id - 1, id, label='-')
assert(len(graph) > 0), 'ERROR: generated empty graph. Perhaps wrong format?'
graph.graph['sequence'] = seq
return graph
def sequence_to_eden(iterable, **options):
"""Convert sequence tuples to EDeN graphs."""
no_header = options.get('no_header', False)
assert(is_iterable(iterable)), 'Not iterable'
if no_header is True:
for seq in iterable:
graph = seq_to_networkx('NONE', seq, **options)
yield graph
else:
for header, seq in iterable:
graph = seq_to_networkx(header, seq, **options)
yield graph
def fasta_to_sequence(input, **options):
"""Load sequences tuples from fasta file."""
lines = fasta_to_fasta(input, **options)
for line in lines:
header = line
seq = lines.next()
if len(seq) == 0:
raise Exception('ERROR: empty sequence')
yield header, seq
| 29.733333
| 81
| 0.630792
|
da20b355536b1fc56b2f004fba0144127b94c1e1
| 32,686
|
py
|
Python
|
moto/sns/models.py
|
sindrig/moto
|
6b531796c1b4885193c6b3ae3b3128fc4f8e5370
|
[
"Apache-2.0"
] | null | null | null |
moto/sns/models.py
|
sindrig/moto
|
6b531796c1b4885193c6b3ae3b3128fc4f8e5370
|
[
"Apache-2.0"
] | null | null | null |
moto/sns/models.py
|
sindrig/moto
|
6b531796c1b4885193c6b3ae3b3128fc4f8e5370
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import uuid
import json
import requests
import re
from collections import OrderedDict
from moto.core import BaseBackend, BaseModel, CloudFormationModel
from moto.core.utils import (
iso_8601_datetime_with_milliseconds,
camelcase_to_underscores,
BackendDict,
)
from moto.sqs import sqs_backends
from moto.sqs.exceptions import MissingParameter
from .exceptions import (
SNSNotFoundError,
DuplicateSnsEndpointError,
SnsEndpointDisabled,
SNSInvalidParameter,
InvalidParameterValue,
InternalError,
ResourceNotFoundError,
TagLimitExceededError,
)
from .utils import make_arn_for_topic, make_arn_for_subscription, is_e164
from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID
DEFAULT_PAGE_SIZE = 100
MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB
MAXIMUM_SMS_MESSAGE_BYTES = 1600 # Amazon limit for a single publish SMS action
class Topic(CloudFormationModel):
def __init__(self, name, sns_backend):
self.name = name
self.sns_backend = sns_backend
self.account_id = DEFAULT_ACCOUNT_ID
self.display_name = ""
self.delivery_policy = ""
self.kms_master_key_id = ""
self.effective_delivery_policy = json.dumps(DEFAULT_EFFECTIVE_DELIVERY_POLICY)
self.arn = make_arn_for_topic(self.account_id, name, sns_backend.region_name)
self.subscriptions_pending = 0
self.subscriptions_confimed = 0
self.subscriptions_deleted = 0
self._policy_json = self._create_default_topic_policy(
sns_backend.region_name, self.account_id, name
)
self._tags = {}
self.fifo_topic = "false"
self.content_based_deduplication = "false"
def publish(self, message, subject=None, message_attributes=None, group_id=None):
message_id = str(uuid.uuid4())
subscriptions, _ = self.sns_backend.list_subscriptions(self.arn)
for subscription in subscriptions:
subscription.publish(
message,
message_id,
subject=subject,
message_attributes=message_attributes,
group_id=group_id,
)
return message_id
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["TopicName"]
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "TopicName":
return self.name
raise UnformattedGetAttTemplateException()
@property
def physical_resource_id(self):
return self.arn
@property
def policy(self):
return json.dumps(self._policy_json)
@policy.setter
def policy(self, policy):
self._policy_json = json.loads(policy)
@staticmethod
def cloudformation_name_type():
return "TopicName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sns-topic.html
return "AWS::SNS::Topic"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
sns_backend = sns_backends[region_name]
properties = cloudformation_json["Properties"]
topic = sns_backend.create_topic(resource_name)
for subscription in properties.get("Subscription", []):
sns_backend.subscribe(
topic.arn, subscription["Endpoint"], subscription["Protocol"]
)
return topic
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name
)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
sns_backend = sns_backends[region_name]
properties = cloudformation_json["Properties"]
topic_name = properties.get(cls.cloudformation_name_type()) or resource_name
topic_arn = make_arn_for_topic(
DEFAULT_ACCOUNT_ID, topic_name, sns_backend.region_name
)
subscriptions, _ = sns_backend.list_subscriptions(topic_arn)
for subscription in subscriptions:
sns_backend.unsubscribe(subscription.arn)
sns_backend.delete_topic(topic_arn)
def _create_default_topic_policy(self, region_name, account_id, name):
return {
"Version": "2008-10-17",
"Id": "__default_policy_ID",
"Statement": [
{
"Effect": "Allow",
"Sid": "__default_statement_ID",
"Principal": {"AWS": "*"},
"Action": [
"SNS:GetTopicAttributes",
"SNS:SetTopicAttributes",
"SNS:AddPermission",
"SNS:RemovePermission",
"SNS:DeleteTopic",
"SNS:Subscribe",
"SNS:ListSubscriptionsByTopic",
"SNS:Publish",
"SNS:Receive",
],
"Resource": make_arn_for_topic(self.account_id, name, region_name),
"Condition": {"StringEquals": {"AWS:SourceOwner": str(account_id)}},
}
],
}
class Subscription(BaseModel):
def __init__(self, topic, endpoint, protocol):
self.topic = topic
self.endpoint = endpoint
self.protocol = protocol
self.arn = make_arn_for_subscription(self.topic.arn)
self.attributes = {}
self._filter_policy = None # filter policy as a dict, not json.
self.confirmed = False
def publish(
self, message, message_id, subject=None, message_attributes=None, group_id=None
):
if not self._matches_filter_policy(message_attributes):
return
if self.protocol == "sqs":
queue_name = self.endpoint.split(":")[-1]
region = self.endpoint.split(":")[3]
if self.attributes.get("RawMessageDelivery") != "true":
sqs_backends[region].send_message(
queue_name,
json.dumps(
self.get_post_data(
message,
message_id,
subject,
message_attributes=message_attributes,
),
sort_keys=True,
indent=2,
separators=(",", ": "),
),
group_id=group_id,
)
else:
raw_message_attributes = {}
for key, value in message_attributes.items():
type = "string_value"
type_value = value["Value"]
if value["Type"].startswith("Binary"):
type = "binary_value"
elif value["Type"].startswith("Number"):
type_value = "{0:g}".format(value["Value"])
raw_message_attributes[key] = {
"data_type": value["Type"],
type: type_value,
}
sqs_backends[region].send_message(
queue_name,
message,
message_attributes=raw_message_attributes,
group_id=group_id,
)
elif self.protocol in ["http", "https"]:
post_data = self.get_post_data(message, message_id, subject)
requests.post(
self.endpoint,
json=post_data,
headers={"Content-Type": "text/plain; charset=UTF-8"},
)
elif self.protocol == "lambda":
# TODO: support bad function name
# http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
arr = self.endpoint.split(":")
region = arr[3]
qualifier = None
if len(arr) == 7:
assert arr[5] == "function"
function_name = arr[-1]
elif len(arr) == 8:
assert arr[5] == "function"
qualifier = arr[-1]
function_name = arr[-2]
else:
assert False
from moto.awslambda import lambda_backends
lambda_backends[region].send_sns_message(
function_name, message, subject=subject, qualifier=qualifier
)
def _matches_filter_policy(self, message_attributes):
# TODO: support Anything-but matching, prefix matching and
# numeric value matching.
if not self._filter_policy:
return True
if message_attributes is None:
message_attributes = {}
def _field_match(field, rules, message_attributes):
for rule in rules:
# TODO: boolean value matching is not supported, SNS behavior unknown
if isinstance(rule, str):
if field not in message_attributes:
return False
if message_attributes[field]["Value"] == rule:
return True
try:
json_data = json.loads(message_attributes[field]["Value"])
if rule in json_data:
return True
except (ValueError, TypeError):
pass
if isinstance(rule, (int, float)):
if field not in message_attributes:
return False
if message_attributes[field]["Type"] == "Number":
attribute_values = [message_attributes[field]["Value"]]
elif message_attributes[field]["Type"] == "String.Array":
try:
attribute_values = json.loads(
message_attributes[field]["Value"]
)
if not isinstance(attribute_values, list):
attribute_values = [attribute_values]
except (ValueError, TypeError):
return False
else:
return False
for attribute_values in attribute_values:
# Even the official documentation states a 5 digits of accuracy after the decimal point for numerics, in reality it is 6
# https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html#subscription-filter-policy-constraints
if int(attribute_values * 1000000) == int(rule * 1000000):
return True
if isinstance(rule, dict):
keyword = list(rule.keys())[0]
attributes = list(rule.values())[0]
if keyword == "exists":
if attributes and field in message_attributes:
return True
elif not attributes and field not in message_attributes:
return True
return False
return all(
_field_match(field, rules, message_attributes)
for field, rules in self._filter_policy.items()
)
def get_post_data(self, message, message_id, subject, message_attributes=None):
post_data = {
"Type": "Notification",
"MessageId": message_id,
"TopicArn": self.topic.arn,
"Message": message,
"Timestamp": iso_8601_datetime_with_milliseconds(
datetime.datetime.utcnow()
),
"SignatureVersion": "1",
"Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",
"SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",
"UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:{}:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55".format(
DEFAULT_ACCOUNT_ID
),
}
if subject:
post_data["Subject"] = subject
if message_attributes:
post_data["MessageAttributes"] = message_attributes
return post_data
class PlatformApplication(BaseModel):
def __init__(self, region, name, platform, attributes):
self.region = region
self.name = name
self.platform = platform
self.attributes = attributes
@property
def arn(self):
return "arn:aws:sns:{region}:{AccountId}:app/{platform}/{name}".format(
region=self.region,
platform=self.platform,
name=self.name,
AccountId=DEFAULT_ACCOUNT_ID,
)
class PlatformEndpoint(BaseModel):
def __init__(self, region, application, custom_user_data, token, attributes):
self.region = region
self.application = application
self.custom_user_data = custom_user_data
self.token = token
self.attributes = attributes
self.id = uuid.uuid4()
self.messages = OrderedDict()
self.__fixup_attributes()
def __fixup_attributes(self):
# When AWS returns the attributes dict, it always contains these two elements, so we need to
# automatically ensure they exist as well.
if "Token" not in self.attributes:
self.attributes["Token"] = self.token
if "Enabled" in self.attributes:
enabled = self.attributes["Enabled"]
self.attributes["Enabled"] = enabled.lower()
else:
self.attributes["Enabled"] = "true"
@property
def enabled(self):
return json.loads(self.attributes.get("Enabled", "true").lower())
@property
def arn(self):
return "arn:aws:sns:{region}:{AccountId}:endpoint/{platform}/{name}/{id}".format(
region=self.region,
AccountId=DEFAULT_ACCOUNT_ID,
platform=self.application.platform,
name=self.application.name,
id=self.id,
)
def publish(self, message):
if not self.enabled:
raise SnsEndpointDisabled("Endpoint %s disabled" % self.id)
# This is where we would actually send a message
message_id = str(uuid.uuid4())
self.messages[message_id] = message
return message_id
class SNSBackend(BaseBackend):
def __init__(self, region_name):
super(SNSBackend, self).__init__()
self.topics = OrderedDict()
self.subscriptions: OrderedDict[str, Subscription] = OrderedDict()
self.applications = {}
self.platform_endpoints = {}
self.region_name = region_name
self.sms_attributes = {}
self.sms_messages = OrderedDict()
self.opt_out_numbers = [
"+447420500600",
"+447420505401",
"+447632960543",
"+447632960028",
"+447700900149",
"+447700900550",
"+447700900545",
"+447700900907",
]
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
@staticmethod
def default_vpc_endpoint_service(service_region, zones):
"""List of dicts representing default VPC endpoints for this service."""
return BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "sns"
)
def update_sms_attributes(self, attrs):
self.sms_attributes.update(attrs)
def create_topic(self, name, attributes=None, tags=None):
if attributes is None:
attributes = {}
if (
attributes.get("FifoTopic")
and attributes.get("FifoTopic").lower() == "true"
):
fails_constraints = not re.match(r"^[a-zA-Z0-9_-]{1,256}\.fifo$", name)
msg = "Fifo Topic names must end with .fifo and must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long."
else:
fails_constraints = not re.match(r"^[a-zA-Z0-9_-]{1,256}$", name)
msg = "Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long."
if fails_constraints:
raise InvalidParameterValue(msg)
candidate_topic = Topic(name, self)
if attributes:
for attribute in attributes:
setattr(
candidate_topic,
camelcase_to_underscores(attribute),
attributes[attribute],
)
if tags:
candidate_topic._tags = tags
if candidate_topic.arn in self.topics:
return self.topics[candidate_topic.arn]
else:
self.topics[candidate_topic.arn] = candidate_topic
return candidate_topic
def _get_values_nexttoken(self, values_map, next_token=None):
if next_token is None or not next_token:
next_token = 0
next_token = int(next_token)
values = list(values_map.values())[next_token : next_token + DEFAULT_PAGE_SIZE]
if len(values) == DEFAULT_PAGE_SIZE:
next_token = next_token + DEFAULT_PAGE_SIZE
else:
next_token = None
return values, next_token
def _get_topic_subscriptions(self, topic):
return [sub for sub in self.subscriptions.values() if sub.topic == topic]
def list_topics(self, next_token=None):
return self._get_values_nexttoken(self.topics, next_token)
def delete_topic_subscriptions(self, topic):
for key, value in dict(self.subscriptions).items():
if value.topic == topic:
self.subscriptions.pop(key)
def delete_topic(self, arn):
try:
topic = self.get_topic(arn)
self.delete_topic_subscriptions(topic)
self.topics.pop(arn)
except KeyError:
raise SNSNotFoundError("Topic with arn {0} not found".format(arn))
def get_topic(self, arn):
try:
return self.topics[arn]
except KeyError:
raise SNSNotFoundError("Topic with arn {0} not found".format(arn))
def set_topic_attribute(self, topic_arn, attribute_name, attribute_value):
topic = self.get_topic(topic_arn)
setattr(topic, attribute_name, attribute_value)
def subscribe(self, topic_arn, endpoint, protocol):
if protocol == "sms":
if re.search(r"[./-]{2,}", endpoint) or re.search(
r"(^[./-]|[./-]$)", endpoint
):
raise SNSInvalidParameter("Invalid SMS endpoint: {}".format(endpoint))
reduced_endpoint = re.sub(r"[./-]", "", endpoint)
if not is_e164(reduced_endpoint):
raise SNSInvalidParameter("Invalid SMS endpoint: {}".format(endpoint))
# AWS doesn't create duplicates
old_subscription = self._find_subscription(topic_arn, endpoint, protocol)
if old_subscription:
return old_subscription
topic = self.get_topic(topic_arn)
subscription = Subscription(topic, endpoint, protocol)
attributes = {
"PendingConfirmation": "false",
"ConfirmationWasAuthenticated": "true",
"Endpoint": endpoint,
"TopicArn": topic_arn,
"Protocol": protocol,
"SubscriptionArn": subscription.arn,
"Owner": DEFAULT_ACCOUNT_ID,
"RawMessageDelivery": "false",
}
if protocol in ["http", "https"]:
attributes["EffectiveDeliveryPolicy"] = topic.effective_delivery_policy
subscription.attributes = attributes
self.subscriptions[subscription.arn] = subscription
return subscription
def _find_subscription(self, topic_arn, endpoint, protocol):
for subscription in self.subscriptions.values():
if (
subscription.topic.arn == topic_arn
and subscription.endpoint == endpoint
and subscription.protocol == protocol
):
return subscription
return None
def unsubscribe(self, subscription_arn):
self.subscriptions.pop(subscription_arn, None)
def list_subscriptions(self, topic_arn=None, next_token=None):
if topic_arn:
topic = self.get_topic(topic_arn)
filtered = OrderedDict(
[(sub.arn, sub) for sub in self._get_topic_subscriptions(topic)]
)
return self._get_values_nexttoken(filtered, next_token)
else:
return self._get_values_nexttoken(self.subscriptions, next_token)
def publish(
self,
message,
arn=None,
phone_number=None,
subject=None,
message_attributes=None,
group_id=None,
):
if subject is not None and len(subject) > 100:
# Note that the AWS docs around length are wrong: https://github.com/spulec/moto/issues/1503
raise ValueError("Subject must be less than 100 characters")
if phone_number:
# This is only an approximation. In fact, we should try to use GSM-7 or UCS-2 encoding to count used bytes
if len(message) > MAXIMUM_SMS_MESSAGE_BYTES:
raise ValueError("SMS message must be less than 1600 bytes")
message_id = str(uuid.uuid4())
self.sms_messages[message_id] = (phone_number, message)
return message_id
if len(message) > MAXIMUM_MESSAGE_LENGTH:
raise InvalidParameterValue(
"An error occurred (InvalidParameter) when calling the Publish operation: Invalid parameter: Message too long"
)
try:
topic = self.get_topic(arn)
fifo_topic = topic.fifo_topic == "true"
if group_id is None:
# MessageGroupId is a mandatory parameter for all
# messages in a fifo queue
if fifo_topic:
raise MissingParameter("MessageGroupId")
else:
if not fifo_topic:
msg = (
"Value {} for parameter MessageGroupId is invalid. "
"Reason: The request include parameter that is not valid for this queue type."
).format(group_id)
raise InvalidParameterValue(msg)
message_id = topic.publish(
message,
subject=subject,
message_attributes=message_attributes,
group_id=group_id,
)
except SNSNotFoundError:
endpoint = self.get_endpoint(arn)
message_id = endpoint.publish(message)
return message_id
def create_platform_application(self, region, name, platform, attributes):
application = PlatformApplication(region, name, platform, attributes)
self.applications[application.arn] = application
return application
def get_application(self, arn):
try:
return self.applications[arn]
except KeyError:
raise SNSNotFoundError("Application with arn {0} not found".format(arn))
def set_application_attributes(self, arn, attributes):
application = self.get_application(arn)
application.attributes.update(attributes)
return application
def list_platform_applications(self):
return self.applications.values()
def delete_platform_application(self, platform_arn):
self.applications.pop(platform_arn)
def create_platform_endpoint(
self, region, application, custom_user_data, token, attributes
):
for endpoint in self.platform_endpoints.values():
if token == endpoint.token:
if (
attributes.get("Enabled", "").lower()
== endpoint.attributes["Enabled"]
):
return endpoint
raise DuplicateSnsEndpointError(
"Duplicate endpoint token with different attributes: %s" % token
)
platform_endpoint = PlatformEndpoint(
region, application, custom_user_data, token, attributes
)
self.platform_endpoints[platform_endpoint.arn] = platform_endpoint
return platform_endpoint
def list_endpoints_by_platform_application(self, application_arn):
return [
endpoint
for endpoint in self.platform_endpoints.values()
if endpoint.application.arn == application_arn
]
def get_endpoint(self, arn):
try:
return self.platform_endpoints[arn]
except KeyError:
raise SNSNotFoundError("Endpoint does not exist")
def set_endpoint_attributes(self, arn, attributes):
endpoint = self.get_endpoint(arn)
if "Enabled" in attributes:
attributes["Enabled"] = attributes["Enabled"].lower()
endpoint.attributes.update(attributes)
return endpoint
def delete_endpoint(self, arn):
try:
del self.platform_endpoints[arn]
except KeyError:
raise SNSNotFoundError("Endpoint with arn {0} not found".format(arn))
def get_subscription_attributes(self, arn):
subscription = self.subscriptions.get(arn)
if not subscription:
raise SNSNotFoundError(
"Subscription does not exist", template="wrapped_single_error"
)
return subscription.attributes
def set_subscription_attributes(self, arn, name, value):
if name not in [
"RawMessageDelivery",
"DeliveryPolicy",
"FilterPolicy",
"RedrivePolicy",
]:
raise SNSInvalidParameter("AttributeName")
# TODO: should do validation
_subscription = [_ for _ in self.subscriptions.values() if _.arn == arn]
if not _subscription:
raise SNSNotFoundError("Subscription with arn {0} not found".format(arn))
subscription = _subscription[0]
subscription.attributes[name] = value
if name == "FilterPolicy":
filter_policy = json.loads(value)
self._validate_filter_policy(filter_policy)
subscription._filter_policy = filter_policy
def _validate_filter_policy(self, value):
# TODO: extend validation checks
combinations = 1
for rules in value.values():
combinations *= len(rules)
# Even the official documentation states the total combination of values must not exceed 100, in reality it is 150
# https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html#subscription-filter-policy-constraints
if combinations > 150:
raise SNSInvalidParameter(
"Invalid parameter: FilterPolicy: Filter policy is too complex"
)
for field, rules in value.items():
for rule in rules:
if rule is None:
continue
if isinstance(rule, str):
continue
if isinstance(rule, bool):
continue
if isinstance(rule, (int, float)):
if rule <= -1000000000 or rule >= 1000000000:
raise InternalError("Unknown")
continue
if isinstance(rule, dict):
keyword = list(rule.keys())[0]
attributes = list(rule.values())[0]
if keyword == "anything-but":
continue
elif keyword == "exists":
if not isinstance(attributes, bool):
raise SNSInvalidParameter(
"Invalid parameter: FilterPolicy: exists match pattern must be either true or false."
)
continue
elif keyword == "numeric":
continue
elif keyword == "prefix":
continue
else:
raise SNSInvalidParameter(
"Invalid parameter: FilterPolicy: Unrecognized match type {type}".format(
type=keyword
)
)
raise SNSInvalidParameter(
"Invalid parameter: FilterPolicy: Match value must be String, number, true, false, or null"
)
def add_permission(self, topic_arn, label, aws_account_ids, action_names):
if topic_arn not in self.topics:
raise SNSNotFoundError("Topic does not exist")
policy = self.topics[topic_arn]._policy_json
statement = next(
(
statement
for statement in policy["Statement"]
if statement["Sid"] == label
),
None,
)
if statement:
raise SNSInvalidParameter("Statement already exists")
if any(action_name not in VALID_POLICY_ACTIONS for action_name in action_names):
raise SNSInvalidParameter("Policy statement action out of service scope!")
principals = [
"arn:aws:iam::{}:root".format(account_id) for account_id in aws_account_ids
]
actions = ["SNS:{}".format(action_name) for action_name in action_names]
statement = {
"Sid": label,
"Effect": "Allow",
"Principal": {"AWS": principals[0] if len(principals) == 1 else principals},
"Action": actions[0] if len(actions) == 1 else actions,
"Resource": topic_arn,
}
self.topics[topic_arn]._policy_json["Statement"].append(statement)
def remove_permission(self, topic_arn, label):
if topic_arn not in self.topics:
raise SNSNotFoundError("Topic does not exist")
statements = self.topics[topic_arn]._policy_json["Statement"]
statements = [
statement for statement in statements if statement["Sid"] != label
]
self.topics[topic_arn]._policy_json["Statement"] = statements
def list_tags_for_resource(self, resource_arn):
if resource_arn not in self.topics:
raise ResourceNotFoundError
return self.topics[resource_arn]._tags
def tag_resource(self, resource_arn, tags):
if resource_arn not in self.topics:
raise ResourceNotFoundError
updated_tags = self.topics[resource_arn]._tags.copy()
updated_tags.update(tags)
if len(updated_tags) > 50:
raise TagLimitExceededError
self.topics[resource_arn]._tags = updated_tags
def untag_resource(self, resource_arn, tag_keys):
if resource_arn not in self.topics:
raise ResourceNotFoundError
for key in tag_keys:
self.topics[resource_arn]._tags.pop(key, None)
sns_backends = BackendDict(SNSBackend, "sns")
DEFAULT_EFFECTIVE_DELIVERY_POLICY = {
"defaultHealthyRetryPolicy": {
"numNoDelayRetries": 0,
"numMinDelayRetries": 0,
"minDelayTarget": 20,
"maxDelayTarget": 20,
"numMaxDelayRetries": 0,
"numRetries": 3,
"backoffFunction": "linear",
},
"sicklyRetryPolicy": None,
"throttlePolicy": None,
"guaranteed": False,
}
VALID_POLICY_ACTIONS = [
"GetTopicAttributes",
"SetTopicAttributes",
"AddPermission",
"RemovePermission",
"DeleteTopic",
"Subscribe",
"ListSubscriptionsByTopic",
"Publish",
"Receive",
]
| 37.270239
| 205
| 0.585113
|
51e695db2f33948021e0817474418643ec72c63a
| 1,224
|
py
|
Python
|
skywinder/utils/configuration.py
|
PolarMesosphericClouds/SkyWinder
|
ee136cffca167905ebcd3edf88e2c7456b56a51a
|
[
"BSD-3-Clause"
] | null | null | null |
skywinder/utils/configuration.py
|
PolarMesosphericClouds/SkyWinder
|
ee136cffca167905ebcd3edf88e2c7456b56a51a
|
[
"BSD-3-Clause"
] | null | null | null |
skywinder/utils/configuration.py
|
PolarMesosphericClouds/SkyWinder
|
ee136cffca167905ebcd3edf88e2c7456b56a51a
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from traitlets import (Bool, Int, List, Unicode)
from traitlets.config import Configurable
from skywinder import root_dir
default_config_dir = os.path.abspath(os.path.join(root_dir, '../config/balloon'))
default_ground_config_dir = os.path.abspath(os.path.join(root_dir, '../config/ground'))
camera_data_dir = os.path.abspath(os.path.join(root_dir, '../config/camera_data'))
# Currently the logging system needs its directory set up separately so that logging can happen while the system
# is being initialized
LOG_DIR = '/var/pmclogs'
class GlobalConfiguration(Configurable):
"""
General configuration parameters used throughout the balloon
"""
data_directories = List(trait=Unicode, default_value=['/data1', '/data2', '/data3', '/data4']).tag(config=True)
pipeline_pyro_port = Int(50000, min=1024, max=65535).tag(config=True)
controller_pyro_port = Int(50001, min=1024, max=65535).tag(config=True)
log_dir = Unicode('/var/pmclogs').tag(config=True)
housekeeping_dir = Unicode('/var/pmclogs/housekeeping').tag(config=True)
counters_dir = Unicode('/var/pmclogs/counters').tag(config=True)
camera_commands_dir = Unicode('/var/pmclogs/camera_commands').tag(config=True)
| 43.714286
| 115
| 0.749183
|
e43813e2b31379445f69203e866af88cc3e0b93d
| 330
|
py
|
Python
|
cma-es/batchcompute_python_sdk/src/batchcompute/core/__init__.py
|
luzhijun/-
|
c9a7f39fd033a3ba3c57acbbd309c05ac17e6bef
|
[
"Apache-2.0"
] | 17
|
2016-11-18T03:15:14.000Z
|
2022-01-09T07:50:56.000Z
|
cma-es/batchcompute_python_sdk/src/batchcompute/core/__init__.py
|
luzhijun/-
|
c9a7f39fd033a3ba3c57acbbd309c05ac17e6bef
|
[
"Apache-2.0"
] | null | null | null |
cma-es/batchcompute_python_sdk/src/batchcompute/core/__init__.py
|
luzhijun/-
|
c9a7f39fd033a3ba3c57acbbd309c05ac17e6bef
|
[
"Apache-2.0"
] | 7
|
2016-11-20T10:20:57.000Z
|
2021-04-20T05:29:57.000Z
|
'''
Core functions, classes for batch compute service.
Including a simple api implementation of batch compute service, core
exceptions for batch compute.
'''
__all__ = [
"Api", "Clienterror", "FieldError", "ValidationError",
]
from .api import Api
from .exceptions import (
ClientError, FieldError, ValidationError,
)
| 19.411765
| 68
| 0.736364
|
2022e679a5f7992a82d2c1a5e7e7c98bbc1a5cab
| 2,783
|
py
|
Python
|
toolchain/riscv/Linux/riscv64-unknown-elf/lib/rv64imfdc/lp64d/compact/libstdc++.a-gdb.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | null | null | null |
toolchain/riscv/Linux/riscv64-unknown-elf/lib/rv64imfdc/lp64d/compact/libstdc++.a-gdb.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | null | null | null |
toolchain/riscv/Linux/riscv64-unknown-elf/lib/rv64imfdc/lp64d/compact/libstdc++.a-gdb.py
|
zhiqiang-hu/bl_iot_sdk
|
154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d
|
[
"Apache-2.0"
] | null | null | null |
# -*- python -*-
# Copyright (C) 2009-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp03--build-binary-packages--parameterized/obj/x86_64-linux-ubuntu14/install/riscv64-unknown-elf-gcc-10.2.0-2020.12.8-x86_64-linux-ubuntu14/share/gcc-10.2.0/python'
libdir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp03--build-binary-packages--parameterized/obj/x86_64-linux-ubuntu14/install/riscv64-unknown-elf-gcc-10.2.0-2020.12.8-x86_64-linux-ubuntu14/riscv64-unknown-elf/lib/rv64imfdc/lp64d/compact'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| 44.887097
| 244
| 0.736974
|
93105606dc10c89847bee26b8cd19b41e008b324
| 2,680
|
py
|
Python
|
tests/bugs/core_4917_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/bugs/core_4917_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/bugs/core_4917_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: bugs.core_4917
# title: ALTER DOMAIN ... TO <new_name> allows to specify <new_name> matching to 'RDB$[[:DIGIT:]]*'
# decription:
# tracker_id: CORE-4917
# min_versions: ['2.5.5']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
-- CREATION:
-- #########
-- First, check that direct creation of domain with 'RDB$' prefix is disabled:
create domain rdb$1 int;
-- This also should FAIL becase new domain name is written in UPPER case (despite quotes):
create domain "RDB$2" int;
-- This should pass because new though name starts with 'rdb$' it
-- is written in quotes and not in upper case:
create domain "rdb$1" int;
-- ALTERING:
-- #########
alter domain "rdb$1" to foo;
alter domain foo to "rdb$1";
-- This should pass because new though name starts with 'rdb$' it
-- is written in quotes and not in upper case:
alter domain "rdb$1" to "rdb$2";
-- this should FAIL:
alter domain "rdb$2" to RDB$3;
-- this also should FAIL becase new domain name is written in UPPER case (despite quotes):
alter domain "rdb$2" to "RDB$3";
show domain;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
rdb$2
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-CREATE DOMAIN RDB$1 failed
-SQL error code = -637
-Implicit domain name RDB$1 not allowed in user created domain
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-CREATE DOMAIN RDB$2 failed
-SQL error code = -637
-Implicit domain name RDB$2 not allowed in user created domain
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-ALTER DOMAIN rdb$2 failed
-SQL error code = -637
-Implicit domain name RDB$3 not allowed in user created domain
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-ALTER DOMAIN rdb$2 failed
-SQL error code = -637
-Implicit domain name RDB$3 not allowed in user created domain
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_stderr == act_1.clean_expected_stderr
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 27.628866
| 106
| 0.663433
|
a121ba3a89edb0ea4b60c487b2e5843d38add5df
| 1,680
|
py
|
Python
|
mars/serialization/ray.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 2,413
|
2018-12-06T09:37:11.000Z
|
2022-03-30T15:47:39.000Z
|
mars/serialization/ray.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 1,335
|
2018-12-07T03:06:18.000Z
|
2022-03-31T11:45:57.000Z
|
mars/serialization/ray.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 329
|
2018-12-07T03:12:41.000Z
|
2022-03-29T21:49:57.000Z
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Dict, Any
from ..utils import lazy_import
from .core import Serializer, buffered, PickleSerializer
from .exception import ExceptionSerializer
ray = lazy_import('ray')
class RaySerializer(Serializer):
"""Return raw object to let ray do serialization."""
serializer_name = 'ray'
@buffered
def serialize(self, obj: Any, context: Dict):
header = {'o': obj}
buffers = []
return header, buffers
def deserialize(self, header: Dict, buffers: List, context: Dict):
assert not buffers
return header['o']
def register_ray_serializers():
PickleSerializer.unregister(object)
ExceptionSerializer.unregister(Exception)
RaySerializer.register(object)
RaySerializer.register(ray.ObjectRef)
RaySerializer.register(ray.actor.ActorHandle)
def unregister_ray_serializers():
RaySerializer.unregister(ray.actor.ActorHandle)
RaySerializer.unregister(ray.ObjectRef)
RaySerializer.unregister(object)
PickleSerializer.register(object)
ExceptionSerializer.register(Exception)
| 31.698113
| 74
| 0.743452
|
61091c67609c844f4b7f013920d5795be17cab81
| 2,381
|
py
|
Python
|
paleo/layers/base.py
|
JamieC1998/paleo
|
984bbf5d2942f28b8599db4374c0ad788efe0f6e
|
[
"Apache-2.0"
] | 78
|
2016-12-08T09:08:56.000Z
|
2022-03-17T22:49:11.000Z
|
paleo/layers/base.py
|
JamieC1998/paleo
|
984bbf5d2942f28b8599db4374c0ad788efe0f6e
|
[
"Apache-2.0"
] | 9
|
2017-02-28T02:09:21.000Z
|
2019-04-24T15:07:29.000Z
|
paleo/layers/base.py
|
JamieC1998/paleo
|
984bbf5d2942f28b8599db4374c0ad788efe0f6e
|
[
"Apache-2.0"
] | 34
|
2016-12-14T14:40:06.000Z
|
2021-11-23T17:48:30.000Z
|
"""The base class of estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod, abstractproperty
class BaseLayer(object):
"""Base class for estimator. """
def __init__(self, name, layertype):
self._name = name
self._layertype = layertype
self._inputs = None
self._outputs = None
self._parents = None
def __repr__(self):
return '%s\t%s %s' % (self.name, self.outputs,
self.additional_summary())
@property
def parents(self):
return self._parents
@parents.setter
def parents(self, val):
self._parents = val
@property
def batch_size(self):
assert self._inputs[0] == self._outputs[0]
return self._inputs[0]
@batch_size.setter
def batch_size(self, batch_size):
self._inputs[0] = batch_size
self._outputs[0] = batch_size
@abstractproperty
def name(self):
"""The name of this layer."""
return self._name
@abstractproperty
def layertype(self):
"""The type of this layer."""
return self._layertype
@abstractmethod
def additional_summary(self):
"""Returns the additional summary when print the layer as string."""
return ""
@abstractproperty
def inputs(self):
"""Returns the shape of input tensor of this layer."""
return self._inputs
@abstractproperty
def outputs(self):
"""Returns the shape of output tensor for this layer."""
return self._outputs
@abstractproperty
def weights_in_bytes(self):
"""Returns the size of weights in this layer in bytes."""
return 0
@abstractproperty
def num_params(self):
"""Returns the number of trainable parameters in this layer."""
return 0
class Generic(BaseLayer):
"""Estimator for Generic layers. """
def __init__(self, name, inputs, type):
"""Initialize estimator. """
super(Generic, self).__init__(name, 'generic_{}'.format(type))
self._inputs = inputs
self._outputs = list(self._inputs)
def additional_summary(self):
return 'Generic layer: %s' % self._layertype
def memory_in_bytes(self):
"""Returns weights."""
return 0
| 25.602151
| 76
| 0.625367
|
d9f1d947c6b92eee87f2c61db66a64e64109140e
| 405
|
py
|
Python
|
uniondrug_blog/asgi.py
|
Tareya/my_blog
|
e02906e7a2479494ef0612d3aecfc798beec7477
|
[
"Apache-2.0"
] | null | null | null |
uniondrug_blog/asgi.py
|
Tareya/my_blog
|
e02906e7a2479494ef0612d3aecfc798beec7477
|
[
"Apache-2.0"
] | null | null | null |
uniondrug_blog/asgi.py
|
Tareya/my_blog
|
e02906e7a2479494ef0612d3aecfc798beec7477
|
[
"Apache-2.0"
] | null | null | null |
"""
ASGI config for uniondrug_blog project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uniondrug_blog.settings')
application = get_asgi_application()
| 23.823529
| 78
| 0.792593
|
5d9896233333620fa309980c8b5d102c2c980433
| 2,624
|
py
|
Python
|
Scripts/core/shared_commands/help_command.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/core/shared_commands/help_command.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/core/shared_commands/help_command.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Core\shared_commands\help_command.py
# Compiled at: 2012-11-07 22:39:51
# Size of source mod 2**32: 2574 bytes
import sims4.commands
def display_command(command, output, prefix, detailed=False):
if command:
if detailed:
output(prefix + '{0} : {1}'.format(command[0], command[2]))
output(prefix + 'Usage : {0} {1}'.format(command[0], command[1]))
else:
output(prefix + command[0])
def display_tree(local_tree, output, prefix='', recurse=True):
def key_func(k):
if isinstance(local_tree.get(k), dict):
return '_' + k
return k
for k in sorted((local_tree.keys()), key=key_func):
v = local_tree[k]
if isinstance(v, dict):
if recurse:
output(prefix + k)
display_tree(v, output, prefix + ' ')
else:
output(prefix + '**' + k)
elif isinstance(v, tuple):
display_command(v, output, prefix)
@sims4.commands.Command('help')
def help_command(search_string=None, _connection=None):
output = sims4.commands.Output(_connection)
commands = sims4.commands.describe(search_string)
if len(commands) == 0:
if search_string:
output(" No commands found matching filter '{0}'".format(search_string))
else:
output(' No commands found')
else:
if len(commands) == 1:
display_command(commands[0], output, '', True)
else:
for command in commands:
if search_string == str(command[0]):
display_command(command, output, '', True)
output('')
if search_string:
output("Listing all commands matching filter '{0}'".format(search_string))
else:
output('Listing all commands')
global_tree = {}
for command in commands:
name = str(command[0])
local_tree = global_tree
components = name.split('.')
for idx in range(len(components)):
component = components[idx]
if idx < len(components) - 1:
local_tree.setdefault(component, {})
else:
local_tree.setdefault(component, command)
display_tree(global_tree, output, prefix=' ')
| 36.957746
| 107
| 0.557927
|
a0a8a6c6bb6fe54cbc6ee696a51457f3d7fd2269
| 24,516
|
py
|
Python
|
fisherman.py
|
redbouk/FisherMan
|
83f9786368e51f6499c8e4552c3b4697b85f4013
|
[
"BSD-3-Clause"
] | 1
|
2021-09-04T20:13:21.000Z
|
2021-09-04T20:13:21.000Z
|
fisherman.py
|
redbouk/FisherMan
|
83f9786368e51f6499c8e4552c3b4697b85f4013
|
[
"BSD-3-Clause"
] | null | null | null |
fisherman.py
|
redbouk/FisherMan
|
83f9786368e51f6499c8e4552c3b4697b85f4013
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python3
import datetime
import sys
import json
from argparse import ArgumentParser
from base64 import b64decode
from os import walk, remove, getcwd
from pathlib import Path
from re import findall
from time import sleep
from typing import Callable
from zipfile import ZipFile, ZIP_DEFLATED
import colorama
import requests
import requests.exceptions
from selenium.common import exceptions
from selenium.webdriver import Firefox, FirefoxOptions, FirefoxProfile
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from src.form_text import color_text
from src.logo import name
from src.manager import Manager, Xpaths
module_name = 'FisherMan: Extract information from facebook profiles.'
__version__ = "3.5.0"
class Fisher:
def __init__(self):
parser = ArgumentParser(description=f'{module_name} (Version {__version__})')
exclusive_group = parser.add_mutually_exclusive_group()
exclusive_group2 = parser.add_mutually_exclusive_group()
opt_search = parser.add_argument_group("search options")
opt_profile = parser.add_argument_group("profile options")
opt_login = parser.add_argument_group("credentials")
opt_out = parser.add_argument_group("output")
exclusive_filter = opt_search.add_mutually_exclusive_group()
parser.add_argument('--version', action='version', version=f'%(prog)s {__version__}',
help='Shows the current version of the program.')
exclusive_group.add_argument('-u', '--username', nargs='+', help='Defines one or more users for the search.')
exclusive_group.add_argument("-i", "--id", nargs="+", help="Set the profile identification number.")
exclusive_group.add_argument('--use-txt', dest='txt', metavar='TXT_FILE', nargs=1,
help='Replaces the USERNAME parameter with a user list in a txt.')
exclusive_group.add_argument("-S", "--search", metavar="USER", help="It does a shallow search for the username."
" Replace the spaces with '.'(period).")
opt_profile.add_argument('-sf', '--scrape-family', action='store_true', dest='scrpfm',
help='If this parameter is passed, '
'the information from family members will be scraped if available.')
opt_profile.add_argument("--specify", nargs="+", type=int, choices=(0, 1, 2, 3, 4, 5),
help="Use the index number to return a specific part of the page. "
"about: 0, "
"about_contact_and_basic_info: 1, "
"about_family_and_relationships: 2, "
"about_details: 3, "
"about_work_and_education: 4, "
"about_places: 5.")
opt_profile.add_argument("-s", "--several", action="store_true",
help="Returns extra data like profile picture, number of followers and friends.")
opt_search.add_argument("--filters", action="store_true",
help="Shows the list of available filters.")
exclusive_filter.add_argument("-work", help="Sets the work filter.")
exclusive_filter.add_argument("-education", help="Sets the education filter.")
exclusive_filter.add_argument("-city", help="Sets the city filter.")
parser.add_argument('-b', '--browser', action='store_true', help='Opens the browser/bot.')
opt_login.add_argument('--email', metavar='EMAIL', nargs=1,
help='If the profile is blocked, you can define your account, '
'however you have the search user in your friends list.')
opt_login.add_argument('--password', metavar='PASSWORD', dest='pwd', nargs=1,
help='Set the password for your facebook account, '
'this parameter has to be used with --email.')
opt_out.add_argument('-o', '--file-output', action='store_true', dest='out',
help='Save the output data to a .txt file.')
opt_out.add_argument("-c", "--compact", action="store_true",
help="Compress all .txt files. Use together with -o.")
exclusive_group2.add_argument('-v', '-d', '--verbose', '--debug', action='store_true',
help='It shows in detail the data search process.')
exclusive_group2.add_argument("-q", "--quiet", action="store_true",
help="Eliminates and simplifies some script outputs for "
"a simpler and more discrete visualization.")
self.args = parser.parse_args()
if not self.args.quiet:
print(color_text('cyan', name))
else:
print("Starting FisherMan...")
def update():
try:
r = requests.get("https://raw.githubusercontent.com/Godofcoffe/FisherMan/main/fisherman.py")
remote_version = str(findall('__version__ = "(.*)"', r.text)[0])
local_version = __version__
if remote_version != local_version:
if not ARGS.quiet:
print(color_text('yellow', "Update Available!\n" +
f"You are running version {local_version}. Version {remote_version} "
f"is available at https://github.com/Godofcoffe/FisherMan"))
else:
print(color_text("yellow", "Update Available!"))
except Exception as error:
print(color_text('red', f"A problem occured while checking for an update: {error}"))
def show_filters():
with open("filters.json", "r") as json_file:
for tag in json.load(json_file).items():
print(f"{tag[0]}:")
for t in tag[1]:
print("\t", t)
def upload_txt_file(name_file: str):
"""
Load a file to replace the username parameter.
:param name_file: txt file name.
:return: A list with each line of the file.
"""
if not name_file.endswith(".txt".lower()):
name_file += ".txt"
if Path(name_file).is_file():
try:
with open(name_file, 'r') as txt:
users_txt = [line.replace("\n", "") for line in txt.readlines()]
except Exception as error:
print(color_text('red', f'An error has occurred: {error}'))
else:
return users_txt
else:
raise Exception(color_text("red", "INVALID FILE!"))
def compact():
"""
Compress all .txt with the exception of requirements.txt.
"""
with ZipFile(f"{str(datetime.datetime.now())[:16]}.zip", "w", ZIP_DEFLATED) as zip_output:
for _, _, files in walk(getcwd()):
for archive in files:
extension = Path(archive).suffix
_file_name = archive.replace(extension, "")
if (extension.lower() == ".txt" and _file_name != "requeriments") or extension.lower() == ".jpeg":
zip_output.write(archive)
remove(archive)
print(f'[{color_text("green", "+")}] successful compression')
def check_connection():
"""
Check the internet connection.
"""
try:
requests.get("https://google.com")
except requests.exceptions.ConnectionError:
raise Exception("There is no internet connection.")
def search(brw: Firefox, user: str):
parameter = user.replace(".", "%20")
with open("filters.json", "r") as jsonfile:
filters = json.load(jsonfile)
if ARGS.work or ARGS.education or ARGS.city:
suffix = "&filters="
great_filter = ""
if ARGS.work is not None:
great_filter += filters["Work"][ARGS.work]
if ARGS.education is not None:
great_filter += filters["Education"][ARGS.education]
if ARGS.city is not None:
great_filter += filters["City"][ARGS.city]
brw.get(f"{manager.get_search_prefix()}{parameter}{suffix + great_filter}")
else:
brw.get(f"{manager.get_search_prefix()}{parameter}")
if ARGS.verbose:
print(f'[{color_text("white", "+")}] entering the search page')
sleep(2)
profiles = scrolling_by_element(browser, (By.CSS_SELECTOR, "[role='article']"))
if ARGS.verbose:
print(f'[{color_text("green", "+")}] loaded profiles: {color_text("green", len(profiles))}')
print(color_text("green", "Profiles found..."))
print()
for p in profiles:
try:
title = p.find_element_by_tag_name("h2")
except (exceptions.StaleElementReferenceException, AttributeError, exceptions.NoSuchElementException):
pass
else:
print(color_text("green", "Name:"), title.text)
try:
info = p.find_element_by_class_name("jktsbyx5").text
except (exceptions.NoSuchElementException, exceptions.StaleElementReferenceException):
pass
else:
print(color_text("green", "Info:"), str(info).replace("\n", ", "))
try:
link = str(title.find_element_by_css_selector("a[href]").get_attribute("href")).replace("\n", "")
except (AttributeError, UnboundLocalError):
pass
else:
print(color_text("green", "user|id:"), link)
print()
def extra_data(brw: Firefox, user: str):
"""
Save other data outside the about user page.
:param brw: Instance of WebDriver.
:param user: username to search.
"""
if ARGS.id:
brw.get(f"{manager.get_id_prefix() + user}")
else:
brw.get(f"{manager.get_url() + user}")
friends = None
wbw = WebDriverWait(brw, 10)
xpaths = Xpaths()
def collection_by_xpath(expected: Callable, xpath: str):
try:
wbw.until(expected((By.XPATH, xpath)))
except exceptions.NoSuchElementException:
print(f'[{color_text("red", "-")}] non-existent element')
except exceptions.TimeoutException:
if ARGS.verbose:
print(f'[{color_text("yellow", "-")}] timed out to get the extra data')
else:
print(f'[{color_text("yellow", "-")}] time limit exceeded')
else:
return brw.find_element_by_xpath(xpath)
img = collection_by_xpath(ec.element_to_be_clickable, xpaths.picture)
img.screenshot(f"{user}_profile_picture.png")
if not ARGS.quiet:
print(f'[{color_text("green", "+")}] picture saved')
element = collection_by_xpath(ec.visibility_of_element_located, xpaths.bio).text
if element:
bio = element
else:
bio = None
if collection_by_xpath(ec.visibility_of_element_located, xpaths.followers) is not None:
followers = str(collection_by_xpath(ec.visibility_of_element_located, xpaths.followers).text).split()[0]
else:
followers = None
try:
element = collection_by_xpath(ec.visibility_of_element_located, xpaths.friends)
element = element.find_elements_by_tag_name("span")[2].text
except IndexError:
print(f'[{color_text("red", "-")}] There is no number of friends to catch')
except:
friends = None
else:
friends = element
if ARGS.txt:
_file_name = rf"extraData-{user}-{str(datetime.datetime.now())[:16]}.txt"
if ARGS.compact:
_file_name = f"extraData-{user}.txt"
with open(_file_name, "w+") as extra:
extra.write(f"Bio: {bio}")
extra.write(f"Followers: {followers}")
extra.write(f"Friends: {friends}")
else:
# in the future to add more data variables, put in the dict
manager.add_extras(user, {"Bio": bio, "Followers": followers, "Friends": friends})
def scrolling_by_element(brw: Firefox, locator: tuple, n: int = 30):
"""
Scroll page by the number of elements.
:param brw: Instance of WebDriver.
:param locator: The element tuple as a "locator". Example: (By.NAME, "foo").
:param n: The number of elements you want it to return.
The page will scroll until the condition n is met, the default value of n is 30.
"""
wbw = WebDriverWait(brw, 10)
px = 0
elements = wbw.until(ec.presence_of_all_elements_located(locator))
while True:
if len(elements) > n:
break
px += 250
brw.execute_script(f"window.scroll(0, {px});")
elements = brw.find_elements(*locator)
return elements
def thin_out(user: str):
"""
Username Refiner.
:param user: user to be refined.
This function returns a username that is acceptable for the script to run correctly.
"""
if "id=" in user or user.isnumeric():
if "facebook.com" in user:
user = user[user.index("=") + 1:]
return manager.get_id_prefix(), user
else:
if "facebook.com" in user:
user = user[user.index("/", 9) + 1:]
return manager.get_url(), user
def scrape(brw: Firefox, items: list[str]):
"""
Extract certain information from the html of an item in the list provided.
:param brw: Instance of WebDriver.
:param items: List of users to apply to scrape.
All data is stored in a list for each iterable items.
"""
branch = ['/about', '/about_contact_and_basic_info', '/about_family_and_relationships', '/about_details',
'/about_work_and_education', '/about_places']
branch_id = [bn.replace("/", "&sk=") for bn in branch]
wbw = WebDriverWait(brw, 10)
for usrs in items:
prefix, usrs = thin_out(usrs)
temp_data = []
if not ARGS.quiet:
print(f'[{color_text("white", "*")}] Coming in {prefix + usrs}')
# here modifies the branch list to iterate only the parameter items --specify
if ARGS.specify:
temp_branch = []
for index in ARGS.specify:
temp_branch.append(branch[index])
if ARGS.verbose:
print(f'[{color_text("green", "+")}] branch {index} added to url')
branch = temp_branch
# search for extra data
if ARGS.several:
if ARGS.verbose:
print(f'[{color_text("blue", "+")}] getting extra data...')
extra_data(brw, usrs)
tot = len(branch)
rest = 0
for bn in branch if not usrs.isnumeric() else branch_id:
brw.get(f'{prefix + usrs + bn}')
try:
output = wbw.until(ec.presence_of_element_located((By.CLASS_NAME, 'f7vcsfb0')))
except exceptions.TimeoutException:
print(f'[{color_text("yellow", "-")}] time limit exceeded')
except Exception as error:
print(f'[{color_text("red", "-")}] class f7vcsfb0 did not return')
if ARGS.verbose:
print(color_text("yellow", f"error details:\n{error}"))
else:
if ARGS.verbose:
print(f'[{color_text("blue", "+")}] Collecting data from: div.f7vcsfb0')
else:
if ARGS.quiet:
rest += 1
print("\033[K", f'[{color_text("blue", "+")}] collecting data ({rest}:{tot})', end="\r")
else:
print(f'[{color_text("blue", "+")}] collecting data ...')
temp_data.append(output.text)
# check to start scrape family members
if "about_family_and_relationships" in bn:
members = output.find_elements(By.TAG_NAME, "a")
if members and ARGS.scrpfm:
members_list = []
for link in members:
members_list.append(link.get_attribute('href'))
manager.add_affluent(usrs, members_list)
# this scope will only be executed if the list of "affluents" is not empty.
if manager.get_affluent():
div = "\n\n\n" + '=' * 60 + "\n\n\n"
for memb in manager.get_affluent()[usrs]:
print()
if not ARGS.quiet:
print(f'[{color_text("white", "*")}] Coming in {memb}')
temp_data.append(div)
# search for extra data
if ARGS.several:
if ARGS.verbose:
print(f'[{color_text("blue", "+")}] getting extra data...')
extra_data(brw, memb)
rest = 0
for bn in branch if not thin_out(memb)[1].isnumeric() else branch_id:
brw.get(f'{memb + bn}')
try:
output2 = wbw.until(ec.presence_of_element_located((By.CLASS_NAME,
'f7vcsfb0')))
except exceptions.TimeoutException:
print(f'[{color_text("yellow", "-")}] time limit exceeded')
except Exception as error:
print(f'[{color_text("red", "-")}] class f7vcsfb0 did not return')
if ARGS.verbose:
print(color_text("yellow", f"error details:\n{error}"))
else:
if ARGS.verbose:
print(f'[{color_text("blue", "+")}] Collecting data from: div.f7vcsfb0')
else:
if ARGS.quiet:
rest += 1
print("\033[K", f'[{color_text("blue", "+")}] collecting data ({rest}:{tot})',
end="\r")
else:
print(f'[{color_text("blue", "+")}] collecting data ...')
temp_data.append(output2.text)
# complete addition of all data
manager.add_data(usrs, temp_data)
def login(brw: Firefox):
"""
Execute the login on the page.
:param brw: Instance of WebDriver.
"""
try:
brw.get(manager.get_url())
except exceptions.WebDriverException as error:
if ARGS.verbose:
print(f'[{color_text("red", "-")}] An error occurred while loading the home page:')
print(error)
print(f'[{color_text("yellow", "*")}] clearing cookies and starting over.')
elif ARGS.quiet:
print(f'[{color_text("yellow", "*")}] An error occurred, restarting.')
brw.delete_all_cookies()
brw.get(manager.get_url())
finally:
if brw.current_url != manager.get_url():
print(color_text("red", "Unfortunately, I could not load the facebook homepage to login."))
print(color_text("yellow", "Go to the repository and create a new issue reporting the problem."))
sys.exit(1)
wbw = WebDriverWait(brw, 10)
email = wbw.until(ec.element_to_be_clickable((By.NAME, "email")))
pwd = wbw.until(ec.element_to_be_clickable((By.NAME, "pass")))
ok = wbw.until(ec.element_to_be_clickable((By.NAME, "login")))
email.clear()
pwd.clear()
# custom accounts will only be applied if both fields are not empty
if ARGS.email is None or ARGS.args.pwd is None:
if ARGS.verbose:
print(f'[{color_text("white", "*")}] adding fake email: {manager.get_email()}')
email.send_keys(manager.get_email())
print(f'[{color_text("white", "*")}] adding password: ...')
pwd.send_keys(b64decode(manager.get_pass()).decode("utf-8"))
else:
print(f'[{color_text("white", "*")}] logging into the account: {manager.get_email()}')
email.send_keys(manager.get_email())
pwd.send_keys(b64decode(manager.get_pass()).decode("utf-8"))
else:
if ARGS.verbose:
print(f'adding email: {ARGS.email}')
email.send_keys(ARGS.args.email)
print('adding password: ...')
pwd.send_keys(ARGS.pwd)
else:
print(f'logging into the account: {ARGS.email}')
email.send_keys(ARGS.email)
pwd.send_keys(ARGS.pwd)
ok.click()
if ARGS.verbose:
print(f'[{color_text("green", "+")}] successfully logged in')
def init():
"""
Start the webdriver.
"""
# browser settings
_profile = FirefoxProfile()
_options = FirefoxOptions()
# eliminate pop-ups
_profile.set_preference("dom.popup_maximum", 0)
_profile.set_preference("privacy.popups.showBrowserMessage", False)
# incognito
_profile.set_preference("browser.privatebrowsing.autostart", True)
_options.add_argument("--incognito")
# arguments
# _options.add_argument('--disable-blink-features=AutomationControlled')
_options.add_argument("--disable-extensions")
# _options.add_argument('--profile-directory=Default')
_options.add_argument("--disable-plugins-discovery")
configs = {"firefox_profile": _profile, "options": _options}
if not ARGS.browser:
if ARGS.verbose:
print(f'[{color_text("blue", "*")}] Starting in hidden mode')
configs["options"].add_argument("--headless")
configs["options"].add_argument("--start-maximized")
if ARGS.verbose:
print(f'[{color_text("white", "*")}] Opening browser ...')
try:
engine = Firefox(**configs)
except Exception as error:
print(color_text("red",
f'The executable "geckodriver" was not found or the browser "Firefox" is not installed.'))
print(color_text("yellow", f"error details:\n{error}"))
else:
return engine
def out_file(_input: list[str]):
"""
Create the .txt output of the -o parameter.
:param _input: The list that will be iterated over each line of the file, in this case it is the list of users.
"""
for usr in _input:
usr = thin_out(usr)[1]
file_name = rf"{usr}-{str(datetime.datetime.now())[:16]}.txt"
if ARGS.compact:
file_name = usr + ".txt"
with open(file_name, 'w+') as file:
for data_list in manager.get_data()[usr]:
file.writelines(data_list)
print(f'[{color_text("green", "+")}] .txt file(s) created')
if ARGS.compact:
if ARGS.verbose:
print(f'[{color_text("white", "*")}] preparing compaction...')
compact()
if __name__ == '__main__':
colorama.init()
check_connection()
fs = Fisher()
manager = Manager()
ARGS = fs.args
update()
if ARGS.filters:
show_filters()
sys.exit(0)
browser = init()
try:
login(browser)
if ARGS.search:
search(browser, ARGS.search)
elif ARGS.txt:
scrape(browser, upload_txt_file(ARGS.txt[0]))
elif ARGS.username:
scrape(browser, ARGS.username)
elif ARGS.id:
scrape(browser, ARGS.id)
except Exception as error:
raise error
finally:
browser.quit()
if ARGS.out: # .txt output creation
if ARGS.username:
out_file(ARGS.username)
elif ARGS.txt:
out_file(upload_txt_file(ARGS.txt[0]))
elif ARGS.id:
out_file(ARGS.id)
else:
if ARGS.id or ARGS.username or ARGS.txt:
print(color_text('green', 'Information found:'))
count_profiles = len(manager.get_all_keys()[2])
for profile in manager.get_all_keys()[2]:
for data in manager.get_data()[profile]:
print('-' * 60)
print(data)
if count_profiles > 1:
print("\n\n")
print("-" * 30, "{:^}".format("/" * 20), "-" * 28)
print("\n\n")
if ARGS.several:
print("=" * 60)
print("EXTRAS:")
for data_extra in manager.get_extras()[profile].items():
print(f"{data_extra[0]:10}: {data_extra[1]}")
| 38.729858
| 120
| 0.568894
|
c2a0ae29acc7c9680ca718b2f869eaf4170a4cf8
| 39,859
|
py
|
Python
|
research/struct2depth/model.py
|
gauss-surgical/models
|
625acf8c404feb9e1248e3ea93e16556a1d861fa
|
[
"Apache-2.0"
] | null | null | null |
research/struct2depth/model.py
|
gauss-surgical/models
|
625acf8c404feb9e1248e3ea93e16556a1d861fa
|
[
"Apache-2.0"
] | null | null | null |
research/struct2depth/model.py
|
gauss-surgical/models
|
625acf8c404feb9e1248e3ea93e16556a1d861fa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build model for inference or training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import nets
import project
import reader
import util
gfile = tf.gfile
slim = tf.contrib.slim
NUM_SCALES = 4
class Model(object):
"""Model code based on SfMLearner."""
def __init__(self,
data_dir=None,
file_extension='png',
is_training=True,
learning_rate=0.0002,
beta1=0.9,
reconstr_weight=0.85,
smooth_weight=0.05,
ssim_weight=0.15,
icp_weight=0.0,
batch_size=4,
img_height=128,
img_width=416,
seq_length=3,
architecture=nets.RESNET,
imagenet_norm=True,
weight_reg=0.05,
exhaustive_mode=False,
random_scale_crop=False,
flipping_mode=reader.FLIP_RANDOM,
random_color=True,
depth_upsampling=True,
depth_normalization=True,
compute_minimum_loss=True,
use_skip=True,
joint_encoder=True,
build_sum=True,
shuffle=True,
input_file='train',
handle_motion=False,
equal_weighting=False,
size_constraint_weight=0.0,
train_global_scale_var=True):
self.data_dir = data_dir
self.file_extension = file_extension
self.is_training = is_training
self.learning_rate = learning_rate
self.reconstr_weight = reconstr_weight
self.smooth_weight = smooth_weight
self.ssim_weight = ssim_weight
self.icp_weight = icp_weight
self.beta1 = beta1
self.batch_size = batch_size
self.img_height = img_height
self.img_width = img_width
self.seq_length = seq_length
self.architecture = architecture
self.imagenet_norm = imagenet_norm
self.weight_reg = weight_reg
self.exhaustive_mode = exhaustive_mode
self.random_scale_crop = random_scale_crop
self.flipping_mode = flipping_mode
self.random_color = random_color
self.depth_upsampling = depth_upsampling
self.depth_normalization = depth_normalization
self.compute_minimum_loss = compute_minimum_loss
self.use_skip = use_skip
self.joint_encoder = joint_encoder
self.build_sum = build_sum
self.shuffle = shuffle
self.input_file = input_file
self.handle_motion = handle_motion
self.equal_weighting = equal_weighting
self.size_constraint_weight = size_constraint_weight
self.train_global_scale_var = train_global_scale_var
tf.logging.info('data_dir: %s', data_dir)
tf.logging.info('file_extension: %s', file_extension)
tf.logging.info('is_training: %s', is_training)
tf.logging.info('learning_rate: %s', learning_rate)
tf.logging.info('reconstr_weight: %s', reconstr_weight)
tf.logging.info('smooth_weight: %s', smooth_weight)
tf.logging.info('ssim_weight: %s', ssim_weight)
tf.logging.info('icp_weight: %s', icp_weight)
tf.logging.info('size_constraint_weight: %s', size_constraint_weight)
tf.logging.info('beta1: %s', beta1)
tf.logging.info('batch_size: %s', batch_size)
tf.logging.info('img_height: %s', img_height)
tf.logging.info('img_width: %s', img_width)
tf.logging.info('seq_length: %s', seq_length)
tf.logging.info('architecture: %s', architecture)
tf.logging.info('imagenet_norm: %s', imagenet_norm)
tf.logging.info('weight_reg: %s', weight_reg)
tf.logging.info('exhaustive_mode: %s', exhaustive_mode)
tf.logging.info('random_scale_crop: %s', random_scale_crop)
tf.logging.info('flipping_mode: %s', flipping_mode)
tf.logging.info('random_color: %s', random_color)
tf.logging.info('depth_upsampling: %s', depth_upsampling)
tf.logging.info('depth_normalization: %s', depth_normalization)
tf.logging.info('compute_minimum_loss: %s', compute_minimum_loss)
tf.logging.info('use_skip: %s', use_skip)
tf.logging.info('joint_encoder: %s', joint_encoder)
tf.logging.info('build_sum: %s', build_sum)
tf.logging.info('shuffle: %s', shuffle)
tf.logging.info('input_file: %s', input_file)
tf.logging.info('handle_motion: %s', handle_motion)
tf.logging.info('equal_weighting: %s', equal_weighting)
tf.logging.info('train_global_scale_var: %s', train_global_scale_var)
if self.size_constraint_weight > 0 or not is_training:
self.global_scale_var = tf.Variable(
0.1, name='global_scale_var',
trainable=self.is_training and train_global_scale_var,
dtype=tf.float32,
constraint=lambda x: tf.clip_by_value(x, 0, np.infty))
if self.is_training:
self.reader = reader.DataReader(self.data_dir, self.batch_size,
self.img_height, self.img_width,
self.seq_length, NUM_SCALES,
self.file_extension,
self.random_scale_crop,
self.flipping_mode,
self.random_color,
self.imagenet_norm,
self.shuffle,
self.input_file)
self.build_train_graph()
else:
self.build_depth_test_graph()
self.build_egomotion_test_graph()
if self.handle_motion:
self.build_objectmotion_test_graph()
# At this point, the model is ready. Print some info on model params.
util.count_parameters()
def build_train_graph(self):
self.build_inference_for_training()
self.build_loss()
self.build_train_op()
if self.build_sum:
self.build_summaries()
def build_inference_for_training(self):
"""Invokes depth and ego-motion networks and computes clouds if needed."""
(self.image_stack, self.image_stack_norm, self.seg_stack,
self.intrinsic_mat, self.intrinsic_mat_inv) = self.reader.read_data()
with tf.variable_scope('depth_prediction'):
# Organized by ...[i][scale]. Note that the order is flipped in
# variables in build_loss() below.
self.disp = {}
self.depth = {}
self.depth_upsampled = {}
self.inf_loss = 0.0
# Organized by [i].
disp_bottlenecks = [None] * self.seq_length
if self.icp_weight > 0:
self.cloud = {}
for i in range(self.seq_length):
image = self.image_stack_norm[:, :, :, 3 * i:3 * (i + 1)]
multiscale_disps_i, disp_bottlenecks[i] = nets.disp_net(
self.architecture, image, self.use_skip,
self.weight_reg, True)
multiscale_depths_i = [1.0 / d for d in multiscale_disps_i]
self.disp[i] = multiscale_disps_i
self.depth[i] = multiscale_depths_i
if self.depth_upsampling:
self.depth_upsampled[i] = []
# Upsample low-resolution depth maps using differentiable bilinear
# interpolation.
for s in range(len(multiscale_depths_i)):
self.depth_upsampled[i].append(tf.image.resize_bilinear(
multiscale_depths_i[s], [self.img_height, self.img_width],
align_corners=True))
if self.icp_weight > 0:
multiscale_clouds_i = [
project.get_cloud(d,
self.intrinsic_mat_inv[:, s, :, :],
name='cloud%d_%d' % (s, i))
for (s, d) in enumerate(multiscale_depths_i)
]
self.cloud[i] = multiscale_clouds_i
# Reuse the same depth graph for all images.
tf.get_variable_scope().reuse_variables()
if self.handle_motion:
# Define egomotion network. This network can see the whole scene except
# for any moving objects as indicated by the provided segmentation masks.
# To avoid the network getting clues of motion by tracking those masks, we
# define the segmentation masks as the union temporally.
with tf.variable_scope('egomotion_prediction'):
base_input = self.image_stack_norm # (B, H, W, 9)
seg_input = self.seg_stack # (B, H, W, 9)
ref_zero = tf.constant(0, dtype=tf.uint8)
# Motion model is currently defined for three-frame sequences.
object_mask1 = tf.equal(seg_input[:, :, :, 0], ref_zero)
object_mask2 = tf.equal(seg_input[:, :, :, 3], ref_zero)
object_mask3 = tf.equal(seg_input[:, :, :, 6], ref_zero)
mask_complete = tf.expand_dims(tf.logical_and( # (B, H, W, 1)
tf.logical_and(object_mask1, object_mask2), object_mask3), axis=3)
mask_complete = tf.tile(mask_complete, (1, 1, 1, 9)) # (B, H, W, 9)
# Now mask out base_input.
self.mask_complete = tf.to_float(mask_complete)
self.base_input_masked = base_input * self.mask_complete
self.egomotion = nets.egomotion_net(
image_stack=self.base_input_masked,
disp_bottleneck_stack=None,
joint_encoder=False,
seq_length=self.seq_length,
weight_reg=self.weight_reg)
# Define object motion network for refinement. This network only sees
# one object at a time over the whole sequence, and tries to estimate its
# motion. The sequence of images are the respective warped frames.
# For each scale, contains batch_size elements of shape (N, 2, 6).
self.object_transforms = {}
# For each scale, contains batch_size elements of shape (N, H, W, 9).
self.object_masks = {}
self.object_masks_warped = {}
# For each scale, contains batch_size elements of size N.
self.object_ids = {}
self.egomotions_seq = {}
self.warped_seq = {}
self.inputs_objectmotion_net = {}
with tf.variable_scope('objectmotion_prediction'):
# First, warp raw images according to overall egomotion.
for s in range(NUM_SCALES):
self.warped_seq[s] = []
self.egomotions_seq[s] = []
for source_index in range(self.seq_length):
egomotion_mat_i_1 = project.get_transform_mat(
self.egomotion, source_index, 1)
warped_image_i_1, _ = (
project.inverse_warp(
self.image_stack[
:, :, :, source_index*3:(source_index+1)*3],
self.depth_upsampled[1][s],
egomotion_mat_i_1,
self.intrinsic_mat[:, 0, :, :],
self.intrinsic_mat_inv[:, 0, :, :]))
self.warped_seq[s].append(warped_image_i_1)
self.egomotions_seq[s].append(egomotion_mat_i_1)
# Second, for every object in the segmentation mask, take its mask and
# warp it according to the egomotion estimate. Then put a threshold to
# binarize the warped result. Use this mask to mask out background and
# other objects, and pass the filtered image to the object motion
# network.
self.object_transforms[s] = []
self.object_masks[s] = []
self.object_ids[s] = []
self.object_masks_warped[s] = []
self.inputs_objectmotion_net[s] = {}
for i in range(self.batch_size):
seg_sequence = self.seg_stack[i] # (H, W, 9=3*3)
object_ids = tf.unique(tf.reshape(seg_sequence, [-1]))[0]
self.object_ids[s].append(object_ids)
color_stack = []
mask_stack = []
mask_stack_warped = []
for j in range(self.seq_length):
current_image = self.warped_seq[s][j][i] # (H, W, 3)
current_seg = seg_sequence[:, :, j * 3:(j+1) * 3] # (H, W, 3)
def process_obj_mask_warp(obj_id):
"""Performs warping of the individual object masks."""
obj_mask = tf.to_float(tf.equal(current_seg, obj_id))
# Warp obj_mask according to overall egomotion.
obj_mask_warped, _ = (
project.inverse_warp(
tf.expand_dims(obj_mask, axis=0),
# Middle frame, highest scale, batch element i:
tf.expand_dims(self.depth_upsampled[1][s][i], axis=0),
# Matrix for warping j into middle frame, batch elem. i:
tf.expand_dims(self.egomotions_seq[s][j][i], axis=0),
tf.expand_dims(self.intrinsic_mat[i, 0, :, :], axis=0),
tf.expand_dims(self.intrinsic_mat_inv[i, 0, :, :],
axis=0)))
obj_mask_warped = tf.squeeze(obj_mask_warped)
obj_mask_binarized = tf.greater( # Threshold to binarize mask.
obj_mask_warped, tf.constant(0.5))
return tf.to_float(obj_mask_binarized)
def process_obj_mask(obj_id):
"""Returns the individual object masks separately."""
return tf.to_float(tf.equal(current_seg, obj_id))
object_masks = tf.map_fn( # (N, H, W, 3)
process_obj_mask, object_ids, dtype=tf.float32)
if self.size_constraint_weight > 0:
# The object segmentation masks are all in object_masks.
# We need to measure the height of every of them, and get the
# approximate distance.
# self.depth_upsampled of shape (seq_length, scale, B, H, W).
depth_pred = self.depth_upsampled[j][s][i] # (H, W)
def get_losses(obj_mask):
"""Get motion constraint loss."""
# Find height of segment.
coords = tf.where(tf.greater( # Shape (num_true, 2=yx)
obj_mask[:, :, 0], tf.constant(0.5, dtype=tf.float32)))
y_max = tf.reduce_max(coords[:, 0])
y_min = tf.reduce_min(coords[:, 0])
seg_height = y_max - y_min
f_y = self.intrinsic_mat[i, 0, 1, 1]
approx_depth = ((f_y * self.global_scale_var) /
tf.to_float(seg_height))
reference_pred = tf.boolean_mask(
depth_pred, tf.greater(
tf.reshape(obj_mask[:, :, 0],
(self.img_height, self.img_width, 1)),
tf.constant(0.5, dtype=tf.float32)))
# Establish loss on approx_depth, a scalar, and
# reference_pred, our dense prediction. Normalize both to
# prevent degenerative depth shrinking.
global_mean_depth_pred = tf.reduce_mean(depth_pred)
reference_pred /= global_mean_depth_pred
approx_depth /= global_mean_depth_pred
spatial_err = tf.abs(reference_pred - approx_depth)
mean_spatial_err = tf.reduce_mean(spatial_err)
return mean_spatial_err
losses = tf.map_fn(
get_losses, object_masks, dtype=tf.float32)
self.inf_loss += tf.reduce_mean(losses)
object_masks_warped = tf.map_fn( # (N, H, W, 3)
process_obj_mask_warp, object_ids, dtype=tf.float32)
filtered_images = tf.map_fn(
lambda mask: current_image * mask, object_masks_warped,
dtype=tf.float32) # (N, H, W, 3)
color_stack.append(filtered_images)
mask_stack.append(object_masks)
mask_stack_warped.append(object_masks_warped)
# For this batch-element, if there are N moving objects,
# color_stack, mask_stack and mask_stack_warped contain both
# seq_length elements of shape (N, H, W, 3).
# We can now concatenate them on the last axis, creating a tensor of
# (N, H, W, 3*3 = 9), and, assuming N does not get too large so that
# we have enough memory, pass them in a single batch to the object
# motion network.
mask_stack = tf.concat(mask_stack, axis=3) # (N, H, W, 9)
mask_stack_warped = tf.concat(mask_stack_warped, axis=3)
color_stack = tf.concat(color_stack, axis=3) # (N, H, W, 9)
all_transforms = nets.objectmotion_net(
# We cut the gradient flow here as the object motion gradient
# should have no saying in how the egomotion network behaves.
# One could try just stopping the gradient for egomotion, but
# not for the depth prediction network.
image_stack=tf.stop_gradient(color_stack),
disp_bottleneck_stack=None,
joint_encoder=False, # Joint encoder not supported.
seq_length=self.seq_length,
weight_reg=self.weight_reg)
# all_transforms of shape (N, 2, 6).
self.object_transforms[s].append(all_transforms)
self.object_masks[s].append(mask_stack)
self.object_masks_warped[s].append(mask_stack_warped)
self.inputs_objectmotion_net[s][i] = color_stack
tf.get_variable_scope().reuse_variables()
else:
# Don't handle motion, classic model formulation.
with tf.name_scope('egomotion_prediction'):
if self.joint_encoder:
# Re-arrange disp_bottleneck_stack to be of shape
# [B, h_hid, w_hid, c_hid * seq_length]. Currently, it is a list with
# seq_length elements, each of dimension [B, h_hid, w_hid, c_hid].
disp_bottleneck_stack = tf.concat(disp_bottlenecks, axis=3)
else:
disp_bottleneck_stack = None
self.egomotion = nets.egomotion_net(
image_stack=self.image_stack_norm,
disp_bottleneck_stack=disp_bottleneck_stack,
joint_encoder=self.joint_encoder,
seq_length=self.seq_length,
weight_reg=self.weight_reg)
def build_loss(self):
"""Adds ops for computing loss."""
with tf.name_scope('compute_loss'):
self.reconstr_loss = 0
self.smooth_loss = 0
self.ssim_loss = 0
self.icp_transform_loss = 0
self.icp_residual_loss = 0
# self.images is organized by ...[scale][B, h, w, seq_len * 3].
self.images = [None for _ in range(NUM_SCALES)]
# Following nested lists are organized by ...[scale][source-target].
self.warped_image = [{} for _ in range(NUM_SCALES)]
self.warp_mask = [{} for _ in range(NUM_SCALES)]
self.warp_error = [{} for _ in range(NUM_SCALES)]
self.ssim_error = [{} for _ in range(NUM_SCALES)]
self.icp_transform = [{} for _ in range(NUM_SCALES)]
self.icp_residual = [{} for _ in range(NUM_SCALES)]
self.middle_frame_index = util.get_seq_middle(self.seq_length)
# Compute losses at each scale.
for s in range(NUM_SCALES):
# Scale image stack.
if s == 0: # Just as a precaution. TF often has interpolation bugs.
self.images[s] = self.image_stack
else:
height_s = int(self.img_height / (2**s))
width_s = int(self.img_width / (2**s))
self.images[s] = tf.image.resize_bilinear(
self.image_stack, [height_s, width_s], align_corners=True)
# Smoothness.
if self.smooth_weight > 0:
for i in range(self.seq_length):
# When computing minimum loss, use the depth map from the middle
# frame only.
if not self.compute_minimum_loss or i == self.middle_frame_index:
disp_smoothing = self.disp[i][s]
if self.depth_normalization:
# Perform depth normalization, dividing by the mean.
mean_disp = tf.reduce_mean(disp_smoothing, axis=[1, 2, 3],
keep_dims=True)
disp_input = disp_smoothing / mean_disp
else:
disp_input = disp_smoothing
scaling_f = (1.0 if self.equal_weighting else 1.0 / (2**s))
self.smooth_loss += scaling_f * self.depth_smoothness(
disp_input, self.images[s][:, :, :, 3 * i:3 * (i + 1)])
self.debug_all_warped_image_batches = []
for i in range(self.seq_length):
for j in range(self.seq_length):
if i == j:
continue
# When computing minimum loss, only consider the middle frame as
# target.
if self.compute_minimum_loss and j != self.middle_frame_index:
continue
# We only consider adjacent frames, unless either
# compute_minimum_loss is on (where the middle frame is matched with
# all other frames) or exhaustive_mode is on (where all frames are
# matched with each other).
if (not self.compute_minimum_loss and not self.exhaustive_mode and
abs(i - j) != 1):
continue
selected_scale = 0 if self.depth_upsampling else s
source = self.images[selected_scale][:, :, :, 3 * i:3 * (i + 1)]
target = self.images[selected_scale][:, :, :, 3 * j:3 * (j + 1)]
if self.depth_upsampling:
target_depth = self.depth_upsampled[j][s]
else:
target_depth = self.depth[j][s]
key = '%d-%d' % (i, j)
if self.handle_motion:
# self.seg_stack of shape (B, H, W, 9).
# target_depth corresponds to middle frame, of shape (B, H, W, 1).
# Now incorporate the other warping results, performed according
# to the object motion network's predictions.
# self.object_masks batch_size elements of (N, H, W, 9).
# self.object_masks_warped batch_size elements of (N, H, W, 9).
# self.object_transforms batch_size elements of (N, 2, 6).
self.all_batches = []
for batch_s in range(self.batch_size):
# To warp i into j, first take the base warping (this is the
# full image i warped into j using only the egomotion estimate).
base_warping = self.warped_seq[s][i][batch_s]
transform_matrices_thisbatch = tf.map_fn(
lambda transform: project.get_transform_mat(
tf.expand_dims(transform, axis=0), i, j)[0],
self.object_transforms[0][batch_s])
def inverse_warp_wrapper(matrix):
"""Wrapper for inverse warping method."""
warp_image, _ = (
project.inverse_warp(
tf.expand_dims(base_warping, axis=0),
tf.expand_dims(target_depth[batch_s], axis=0),
tf.expand_dims(matrix, axis=0),
tf.expand_dims(self.intrinsic_mat[
batch_s, selected_scale, :, :], axis=0),
tf.expand_dims(self.intrinsic_mat_inv[
batch_s, selected_scale, :, :], axis=0)))
return warp_image
warped_images_thisbatch = tf.map_fn(
inverse_warp_wrapper, transform_matrices_thisbatch,
dtype=tf.float32)
warped_images_thisbatch = warped_images_thisbatch[:, 0, :, :, :]
# warped_images_thisbatch is now of shape (N, H, W, 9).
# Combine warped frames into a single one, using the object
# masks. Result should be (1, 128, 416, 3).
# Essentially, we here want to sum them all up, filtered by the
# respective object masks.
mask_base_valid_source = tf.equal(
self.seg_stack[batch_s, :, :, i*3:(i+1)*3],
tf.constant(0, dtype=tf.uint8))
mask_base_valid_target = tf.equal(
self.seg_stack[batch_s, :, :, j*3:(j+1)*3],
tf.constant(0, dtype=tf.uint8))
mask_valid = tf.logical_and(
mask_base_valid_source, mask_base_valid_target)
self.base_warping = base_warping * tf.to_float(mask_valid)
background = tf.expand_dims(self.base_warping, axis=0)
def construct_const_filter_tensor(obj_id):
return tf.fill(
dims=[self.img_height, self.img_width, 3],
value=tf.sign(obj_id)) * tf.to_float(
tf.equal(self.seg_stack[batch_s, :, :, 3:6],
tf.cast(obj_id, dtype=tf.uint8)))
filter_tensor = tf.map_fn(
construct_const_filter_tensor,
tf.to_float(self.object_ids[s][batch_s]))
filter_tensor = tf.stack(filter_tensor, axis=0)
objects_to_add = tf.reduce_sum(
tf.multiply(warped_images_thisbatch, filter_tensor),
axis=0, keepdims=True)
combined = background + objects_to_add
self.all_batches.append(combined)
# Now of shape (B, 128, 416, 3).
self.warped_image[s][key] = tf.concat(self.all_batches, axis=0)
else:
# Don't handle motion, classic model formulation.
egomotion_mat_i_j = project.get_transform_mat(
self.egomotion, i, j)
# Inverse warp the source image to the target image frame for
# photometric consistency loss.
self.warped_image[s][key], self.warp_mask[s][key] = (
project.inverse_warp(
source,
target_depth,
egomotion_mat_i_j,
self.intrinsic_mat[:, selected_scale, :, :],
self.intrinsic_mat_inv[:, selected_scale, :, :]))
# Reconstruction loss.
self.warp_error[s][key] = tf.abs(self.warped_image[s][key] - target)
if not self.compute_minimum_loss:
self.reconstr_loss += tf.reduce_mean(
self.warp_error[s][key] * self.warp_mask[s][key])
# SSIM.
if self.ssim_weight > 0:
self.ssim_error[s][key] = self.ssim(self.warped_image[s][key],
target)
# TODO(rezama): This should be min_pool2d().
if not self.compute_minimum_loss:
ssim_mask = slim.avg_pool2d(self.warp_mask[s][key], 3, 1,
'VALID')
self.ssim_loss += tf.reduce_mean(
self.ssim_error[s][key] * ssim_mask)
# If the minimum loss should be computed, the loss calculation has been
# postponed until here.
if self.compute_minimum_loss:
for frame_index in range(self.middle_frame_index):
key1 = '%d-%d' % (frame_index, self.middle_frame_index)
key2 = '%d-%d' % (self.seq_length - frame_index - 1,
self.middle_frame_index)
tf.logging.info('computing min error between %s and %s', key1, key2)
min_error = tf.minimum(self.warp_error[s][key1],
self.warp_error[s][key2])
self.reconstr_loss += tf.reduce_mean(min_error)
if self.ssim_weight > 0: # Also compute the minimum SSIM loss.
min_error_ssim = tf.minimum(self.ssim_error[s][key1],
self.ssim_error[s][key2])
self.ssim_loss += tf.reduce_mean(min_error_ssim)
# Build the total loss as composed of L1 reconstruction, SSIM, smoothing
# and object size constraint loss as appropriate.
self.reconstr_loss *= self.reconstr_weight
self.total_loss = self.reconstr_loss
if self.smooth_weight > 0:
self.smooth_loss *= self.smooth_weight
self.total_loss += self.smooth_loss
if self.ssim_weight > 0:
self.ssim_loss *= self.ssim_weight
self.total_loss += self.ssim_loss
if self.size_constraint_weight > 0:
self.inf_loss *= self.size_constraint_weight
self.total_loss += self.inf_loss
def gradient_x(self, img):
return img[:, :, :-1, :] - img[:, :, 1:, :]
def gradient_y(self, img):
return img[:, :-1, :, :] - img[:, 1:, :, :]
def depth_smoothness(self, depth, img):
"""Computes image-aware depth smoothness loss."""
depth_dx = self.gradient_x(depth)
depth_dy = self.gradient_y(depth)
image_dx = self.gradient_x(img)
image_dy = self.gradient_y(img)
weights_x = tf.exp(-tf.reduce_mean(tf.abs(image_dx), 3, keepdims=True))
weights_y = tf.exp(-tf.reduce_mean(tf.abs(image_dy), 3, keepdims=True))
smoothness_x = depth_dx * weights_x
smoothness_y = depth_dy * weights_y
return tf.reduce_mean(abs(smoothness_x)) + tf.reduce_mean(abs(smoothness_y))
def ssim(self, x, y):
"""Computes a differentiable structured image similarity measure."""
c1 = 0.01**2 # As defined in SSIM to stabilize div. by small denominator.
c2 = 0.03**2
mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')
mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')
sigma_x = slim.avg_pool2d(x**2, 3, 1, 'VALID') - mu_x**2
sigma_y = slim.avg_pool2d(y**2, 3, 1, 'VALID') - mu_y**2
sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y
ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2)
ssim_d = (mu_x**2 + mu_y**2 + c1) * (sigma_x + sigma_y + c2)
ssim = ssim_n / ssim_d
return tf.clip_by_value((1 - ssim) / 2, 0, 1)
def build_train_op(self):
with tf.name_scope('train_op'):
optim = tf.train.AdamOptimizer(self.learning_rate, self.beta1)
self.train_op = slim.learning.create_train_op(self.total_loss, optim)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.incr_global_step = tf.assign(
self.global_step, self.global_step + 1)
def build_summaries(self):
"""Adds scalar and image summaries for TensorBoard."""
tf.summary.scalar('total_loss', self.total_loss)
tf.summary.scalar('reconstr_loss', self.reconstr_loss)
if self.smooth_weight > 0:
tf.summary.scalar('smooth_loss', self.smooth_loss)
if self.ssim_weight > 0:
tf.summary.scalar('ssim_loss', self.ssim_loss)
if self.icp_weight > 0:
tf.summary.scalar('icp_transform_loss', self.icp_transform_loss)
tf.summary.scalar('icp_residual_loss', self.icp_residual_loss)
if self.size_constraint_weight > 0:
tf.summary.scalar('inf_loss', self.inf_loss)
tf.summary.histogram('global_scale_var', self.global_scale_var)
if self.handle_motion:
for s in range(NUM_SCALES):
for batch_s in range(self.batch_size):
whole_strip = tf.concat([self.warped_seq[s][0][batch_s],
self.warped_seq[s][1][batch_s],
self.warped_seq[s][2][batch_s]], axis=1)
tf.summary.image('base_warp_batch%s_scale%s' % (batch_s, s),
tf.expand_dims(whole_strip, axis=0))
whole_strip_input = tf.concat(
[self.inputs_objectmotion_net[s][batch_s][:, :, :, 0:3],
self.inputs_objectmotion_net[s][batch_s][:, :, :, 3:6],
self.inputs_objectmotion_net[s][batch_s][:, :, :, 6:9]], axis=2)
tf.summary.image('input_objectmotion_batch%s_scale%s' % (batch_s, s),
whole_strip_input) # (B, H, 3*W, 3)
for batch_s in range(self.batch_size):
whole_strip = tf.concat([self.base_input_masked[batch_s, :, :, 0:3],
self.base_input_masked[batch_s, :, :, 3:6],
self.base_input_masked[batch_s, :, :, 6:9]],
axis=1)
tf.summary.image('input_egomotion_batch%s' % batch_s,
tf.expand_dims(whole_strip, axis=0))
# Show transform predictions (of all objects).
for batch_s in range(self.batch_size):
for i in range(self.seq_length - 1):
# self.object_transforms contains batch_size elements of (N, 2, 6).
tf.summary.histogram('batch%d_tx%d' % (batch_s, i),
self.object_transforms[0][batch_s][:, i, 0])
tf.summary.histogram('batch%d_ty%d' % (batch_s, i),
self.object_transforms[0][batch_s][:, i, 1])
tf.summary.histogram('batch%d_tz%d' % (batch_s, i),
self.object_transforms[0][batch_s][:, i, 2])
tf.summary.histogram('batch%d_rx%d' % (batch_s, i),
self.object_transforms[0][batch_s][:, i, 3])
tf.summary.histogram('batch%d_ry%d' % (batch_s, i),
self.object_transforms[0][batch_s][:, i, 4])
tf.summary.histogram('batch%d_rz%d' % (batch_s, i),
self.object_transforms[0][batch_s][:, i, 5])
for i in range(self.seq_length - 1):
tf.summary.histogram('tx%d' % i, self.egomotion[:, i, 0])
tf.summary.histogram('ty%d' % i, self.egomotion[:, i, 1])
tf.summary.histogram('tz%d' % i, self.egomotion[:, i, 2])
tf.summary.histogram('rx%d' % i, self.egomotion[:, i, 3])
tf.summary.histogram('ry%d' % i, self.egomotion[:, i, 4])
tf.summary.histogram('rz%d' % i, self.egomotion[:, i, 5])
for s in range(NUM_SCALES):
for i in range(self.seq_length):
tf.summary.image('scale%d_image%d' % (s, i),
self.images[s][:, :, :, 3 * i:3 * (i + 1)])
if i in self.depth:
tf.summary.histogram('scale%d_depth%d' % (s, i), self.depth[i][s])
tf.summary.histogram('scale%d_disp%d' % (s, i), self.disp[i][s])
tf.summary.image('scale%d_disparity%d' % (s, i), self.disp[i][s])
for key in self.warped_image[s]:
tf.summary.image('scale%d_warped_image%s' % (s, key),
self.warped_image[s][key])
tf.summary.image('scale%d_warp_error%s' % (s, key),
self.warp_error[s][key])
if self.ssim_weight > 0:
tf.summary.image('scale%d_ssim_error%s' % (s, key),
self.ssim_error[s][key])
if self.icp_weight > 0:
tf.summary.image('scale%d_icp_residual%s' % (s, key),
self.icp_residual[s][key])
transform = self.icp_transform[s][key]
tf.summary.histogram('scale%d_icp_tx%s' % (s, key), transform[:, 0])
tf.summary.histogram('scale%d_icp_ty%s' % (s, key), transform[:, 1])
tf.summary.histogram('scale%d_icp_tz%s' % (s, key), transform[:, 2])
tf.summary.histogram('scale%d_icp_rx%s' % (s, key), transform[:, 3])
tf.summary.histogram('scale%d_icp_ry%s' % (s, key), transform[:, 4])
tf.summary.histogram('scale%d_icp_rz%s' % (s, key), transform[:, 5])
def build_depth_test_graph(self):
"""Builds depth model reading from placeholders."""
with tf.variable_scope('depth_prediction'):
input_image = tf.placeholder(
tf.float32, [self.batch_size, self.img_height, self.img_width, 3],
name='raw_input')
if self.imagenet_norm:
input_image = (input_image - reader.IMAGENET_MEAN) / reader.IMAGENET_SD
est_disp, _ = nets.disp_net(architecture=self.architecture,
image=input_image,
use_skip=self.use_skip,
weight_reg=self.weight_reg,
is_training=True)
est_depth = 1.0 / est_disp[0]
self.input_image = input_image
self.est_depth = est_depth
def build_egomotion_test_graph(self):
"""Builds egomotion model reading from placeholders."""
input_image_stack = tf.placeholder(
tf.float32,
[1, self.img_height, self.img_width, self.seq_length * 3],
name='raw_input')
input_bottleneck_stack = None
if self.imagenet_norm:
im_mean = tf.tile(
tf.constant(reader.IMAGENET_MEAN), multiples=[self.seq_length])
im_sd = tf.tile(
tf.constant(reader.IMAGENET_SD), multiples=[self.seq_length])
input_image_stack = (input_image_stack - im_mean) / im_sd
if self.joint_encoder:
# Pre-compute embeddings here.
with tf.variable_scope('depth_prediction', reuse=True):
input_bottleneck_stack = []
encoder_selected = nets.encoder(self.architecture)
for i in range(self.seq_length):
input_image = input_image_stack[:, :, :, i * 3:(i + 1) * 3]
tf.get_variable_scope().reuse_variables()
embedding, _ = encoder_selected(
target_image=input_image,
weight_reg=self.weight_reg,
is_training=True)
input_bottleneck_stack.append(embedding)
input_bottleneck_stack = tf.concat(input_bottleneck_stack, axis=3)
with tf.variable_scope('egomotion_prediction'):
est_egomotion = nets.egomotion_net(
image_stack=input_image_stack,
disp_bottleneck_stack=input_bottleneck_stack,
joint_encoder=self.joint_encoder,
seq_length=self.seq_length,
weight_reg=self.weight_reg)
self.input_image_stack = input_image_stack
self.est_egomotion = est_egomotion
def build_objectmotion_test_graph(self):
"""Builds egomotion model reading from placeholders."""
input_image_stack_om = tf.placeholder(
tf.float32,
[1, self.img_height, self.img_width, self.seq_length * 3],
name='raw_input')
if self.imagenet_norm:
im_mean = tf.tile(
tf.constant(reader.IMAGENET_MEAN), multiples=[self.seq_length])
im_sd = tf.tile(
tf.constant(reader.IMAGENET_SD), multiples=[self.seq_length])
input_image_stack_om = (input_image_stack_om - im_mean) / im_sd
with tf.variable_scope('objectmotion_prediction'):
est_objectmotion = nets.objectmotion_net(
image_stack=input_image_stack_om,
disp_bottleneck_stack=None,
joint_encoder=self.joint_encoder,
seq_length=self.seq_length,
weight_reg=self.weight_reg)
self.input_image_stack_om = input_image_stack_om
self.est_objectmotion = est_objectmotion
def inference_depth(self, inputs, sess):
return sess.run(self.est_depth, feed_dict={self.input_image: inputs})
def inference_egomotion(self, inputs, sess):
return sess.run(
self.est_egomotion, feed_dict={self.input_image_stack: inputs})
def inference_objectmotion(self, inputs, sess):
return sess.run(
self.est_objectmotion, feed_dict={self.input_image_stack_om: inputs})
| 47.003538
| 80
| 0.591761
|
f934ce3627cf6c1c899480b6dff76dad85d6ca7e
| 12,465
|
py
|
Python
|
GMATE/plugins/project_manager/projecttreeview.py
|
lexrupy/gmate-editor
|
7036d58a083ad6c05c1eb4cf7cb92405b369adb7
|
[
"MIT"
] | 1
|
2016-05-09T11:48:44.000Z
|
2016-05-09T11:48:44.000Z
|
GMATE/plugins/project_manager/projecttreeview.py
|
lexrupy/gmate-editor
|
7036d58a083ad6c05c1eb4cf7cb92405b369adb7
|
[
"MIT"
] | null | null | null |
GMATE/plugins/project_manager/projecttreeview.py
|
lexrupy/gmate-editor
|
7036d58a083ad6c05c1eb4cf7cb92405b369adb7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# GMate - Plugin Based Programmer's Text Editor
# Copyright © 2008-2009 Alexandre da Silva
#
# This file is part of Gmate.
#
# See LICENTE.TXT for licence information
"""A widget used to display the file/folder structure of projects."""
import re
import gtk
import gnomevfs
from GMATE import files
from pathdescriptor import PathDescriptor
from icons import Icons
from settings import Settings
from i18n import msg0002, msg0003, err0010, err0011
class ProjectTreeView(gtk.TreeView):
"""A widget for displaying the files within a repositoy."""
def __init__(self):
"""Constructor.
Creates the initial view of the project repository."""
super(ProjectTreeView, self).__init__()
self.__current_repository = None
self.__activate_file = None
self.__refresh = None
self.__settings = Settings()
self.__initialize_treeview()
self.__initialize_icons()
self.__initialize_columns()
def set_activate_file(self, afile=None):
"""Sets the method to use when activating a file."""
if afile is not None and not callable(afile):
raise ValueError, err0010
self.__activate_file = afile
def set_refresh(self, refresh=None):
"""Sets the method to use when refreshing."""
if refresh is not None and not callable(refresh):
raise ValueError, err0011
self.__refresh = refresh
def get_repository(self):
"""Gets the URI associated with the currently opened repository."""
return self.__current_repository
def refresh(self):
"""Refreshes the current view."""
current_repo = self.get_repository()
# Check to be sure we have a current repository
if current_repo is not None:
# Collection to hold all expanded rows
open_paths = []
# Append all the expanded paths to the collection
self.map_expanded_rows(self.__map_expanded_rows, open_paths)
self.__refresh()
# Expand all previously expanded paths
path_iter = self.get_model().get_iter_root()
self.__expand_previously_open_rows(path_iter, open_paths)
del open_paths[0:]
self.queue_draw()
def set_repository(self, uri):
"""Sets the repository to be viewed.
@param uri: The URI to set the repository to.
@type uri: a gnomevfs.URI
"""
self.__current_repository = uri
self.get_model().clear()
# Create the root directory within the list
parent_dir = self.__append_descriptor(uri, True, None)
# Be sure there is a loading item within the current directory
self.__append_loading_cell(parent_dir)
# Expand the current directory to show the rest of the files
iterpath = self.get_model().get_path(parent_dir)
self.expand_row(iterpath, False)
self.queue_draw()
def __expand_previously_open_rows(self, path_iter, open_paths):
"""Expands any previously opened paths after a refresh."""
while path_iter is not None:
desc = self.get_model().get_value(path_iter, 0)
# Be sure we have a PathDescriptor
if isinstance(desc, PathDescriptor):
# If the path was previously opened open it
if desc.get_uri() in open_paths:
path = self.get_model().get_path(path_iter)
self.expand_row(path, False)
# Remove it from the list
open_paths.remove(desc.get_uri())
# If the iterator has children, check to see if any should
# be open
if self.get_model().iter_has_child(path_iter):
child = self.get_model().iter_nth_child(path_iter, 0)
self.__expand_previously_open_rows(child, open_paths)
# Move to the next row
path_iter = self.get_model().iter_next(path_iter)
def __map_expanded_rows(self, widget, path, data):
"""Store previously opened paths."""
# Append URI values to track what is open
path_iter = self.get_model().get_iter(path)
if path_iter is not None:
desc = self.get_model().get_value(path_iter, 0)
if isinstance(desc, PathDescriptor):
data.append(desc.get_uri())
def __initialize_treeview(self):
"""Create the view and set its properties."""
treestore = gtk.TreeStore(object, gtk.gdk.Pixbuf, gtk.gdk.Pixbuf)
self.set_property(u'model', treestore)
self.set_property(u'enable-search', False)
self.set_property(u'headers-visible', False)
self.connect(u'test-expand-row', self.__on_expand_row)
self.connect(u'row-activated', self.__on_row_activated)
self.connect(u'row-collapsed', self.__on_collapse_row)
def __initialize_columns(self):
"""Creates the columns for the view."""
# Create the necessary widgets for the view
image_renderer = gtk.CellRendererPixbuf()
name_renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn()
# Pach the icon renderer and the text label renderer into the view
column.pack_start(image_renderer, False)
column.pack_start(name_renderer, True)
# Set the icons for the icon renderer
column.set_attributes(image_renderer, pixbuf=1, pixbuf_expander_open=2,
pixbuf_expander_closed=1)
# Set the texit labels method for retrieving the file's name
column.set_cell_data_func(name_renderer, self.__retrieve_filename)
self.append_column(column)
def __initialize_icons(self):
"""Retrieves the icons needed to display within the file view."""
self.__icons = Icons(self)
def __populate_directory(self, uri, parent=None):
"""Populates the directory list alphabetically by directory then by
file.
@param uri: the URI of the directory.
@type uri: a gnomevfs.URI
@param parent: the parent iterator to append the child to.
@type parent: a gtk.TreeIter
"""
# Retrieve directories alphabetically
directory = gnomevfs.DirectoryHandle(uri)
file_filter = self.__settings.get_file_filter()
show_file = None
if len(file_filter) > 0:
comp = re.compile(file_filter)
def __show_file(file_name):
if comp.search(file_name) is not None:
return True
return False
show_file = __show_file
for file_info in sorted(directory, cmp=self.__compare_files):
# Process folders
if files.is_visible_dir(file_info):
file_uri = uri.append_file_name(file_info.name)
cur_dir = self.__append_descriptor(file_uri, True, parent)
self.__append_loading_cell(cur_dir)
# Process Files
elif files.is_visible_file(file_info):
if show_file is not None and not show_file(file_info.name):
continue
file_uri = uri.append_file_name(file_info.name)
self.__append_descriptor(file_uri, False, parent)
def __compare_files(self, file_a, file_b):
"""Compares to files and determines which is first based on file type
and file name."""
type_a = file_a.type
type_b = file_b.type
# Make folders the most important in the list
if type_a == gnomevfs.FILE_TYPE_DIRECTORY: type_a = 0
else: type_a = 1
if type_b == gnomevfs.FILE_TYPE_DIRECTORY: type_b = 0
else: type_b = 1
type_comp = cmp(type_a, type_b)
# If the files are the same type then compare names
if type_comp == 0:
return cmp(file_a.name, file_b.name)
return type_comp
def __empty_directory(self, iterator):
"""Removes all the items within a directory on the tree."""
model = self.get_model()
# Remove each of the child nodes within the iterator
while model.iter_has_child(iterator):
child = model.iter_nth_child(iterator, 0)
model.remove(child)
def __append_descriptor(self, uri, is_dir, parent):
"""Creates a tree node with a path descriptor."""
open_icon = None
default_icon = None
# Retrieve a default and open icon if the URI is a folder, otherwise
# just a default icon
if is_dir:
open_icon = self.__icons.folder_open
default_icon = self.__icons.folder
else:
default_icon = self.__icons.retrieve_file_icon(str(uri))
# Create a descriptor and append a new node that represents that
# descriptor into the tree
desc = PathDescriptor(uri, is_dir)
parent_dir = self.get_model().append(parent, [desc, default_icon,
open_icon])
# Attach the corresponding tree iterator to the descriptor
desc.set_iter(parent_dir)
return parent_dir
def __append_empty_cell(self, iterator):
"""Creates an 'empty' cell within the tree."""
self.get_model().append(iterator, [msg0003, None, None])
def __append_loading_cell(self, iterator):
"""Creates a 'loading' cell within the tree."""
self.get_model().append(iterator, [msg0002, None, None])
def __retrieve_filename(self, column, cell, model, iterator):
"""Retrieves the filename of the PathDescriptor."""
desc = model.get_value(iterator, 0)
# Retrieve the filename of the PathDescriptor or string.
if isinstance(desc, PathDescriptor):
cell.set_property(u'text', desc.get_name())
else:
cell.set_property(u'text', desc)
def __on_expand_row(self, widget, iterator, path, data=None):
"""Empties a directory then loads in the files."""
if iterator is not None:
desc = self.get_model().get_value(iterator, 0)
if not isinstance(desc, PathDescriptor):
return
# If the object is a directory clear its contents within the tree
# and rescan it
if desc.is_dir():
self.freeze_child_notify()
# Empty the directory
self.__empty_directory(iterator)
self.__populate_directory(desc.get_uri(), iterator)
# Append an "Empty" cell if the directory is empty
if not self.get_model().iter_has_child(iterator):
self.__append_empty_cell(iterator)
self.thaw_child_notify()
self.queue_draw()
def __on_collapse_row(self, widget, iterator, path, data=None):
"""Empties a directory to conserve memory."""
if iterator is not None:
desc = self.get_model().get_value(iterator, 0)
if not isinstance(desc, PathDescriptor):
return
# If the object is a directory clear its contents within the tree
# and rescan it
if desc.is_dir():
self.freeze_child_notify()
# Empty the directory
self.__empty_directory(iterator)
# Append a loading node to be used later when expanding
self.__append_loading_cell(iterator)
self.thaw_child_notify()
self.queue_draw()
def __on_row_activated(self, widget, path, view_column, data=None):
"""Enters a directory or loads a file."""
iterator = self.get_model().get_iter(path)
if iterator is not None:
desc = self.get_model().get_value(iterator, 0)
# Be sure we hane a PathDescriptor before we try to activate the
# node.
if not isinstance(desc, PathDescriptor):
return
# Expand or collapse a directory
if desc.is_dir():
if self.row_expanded(path):
self.collapse_row(path)
else:
self.expand_row(path, False)
# Activate the file
else:
if self.__activate_file is not None:
self.__activate_file(desc)
self.queue_draw()
| 35.411932
| 79
| 0.616286
|
75bdefb71931a4b4d0200c650da4a7f4f7feb43c
| 1,214
|
py
|
Python
|
0x08-python-more_classes/2-rectangle.py
|
ricardo1470/holbertonschool-higher_level_programming
|
aab73c8efee665b0215958ee7b338871f13634bc
|
[
"CNRI-Python"
] | null | null | null |
0x08-python-more_classes/2-rectangle.py
|
ricardo1470/holbertonschool-higher_level_programming
|
aab73c8efee665b0215958ee7b338871f13634bc
|
[
"CNRI-Python"
] | null | null | null |
0x08-python-more_classes/2-rectangle.py
|
ricardo1470/holbertonschool-higher_level_programming
|
aab73c8efee665b0215958ee7b338871f13634bc
|
[
"CNRI-Python"
] | null | null | null |
#!/usr/bin/python3
"""Create class"""
class Rectangle():
"""Rectangle class"""
def __init__(self, width=0, height=0):
self.height = height
self.width = width
@property
def height(self):
"""height"""
return self.__height
@property
def width(self):
"""width"""
return self.__width
@width.setter
def width(self, value):
"""Initialize Rectangle width"""
if not isinstance(value, int):
raise TypeError('width must be an integer')
if value < 0:
raise ValueError('width must be >= 0')
self.__width = value
@height.setter
def height(self, value):
"""Initialize Rectangle height"""
if not isinstance(value, int):
raise TypeError('height must be an integer')
if value < 0:
raise ValueError('height must be >= 0')
self.__height = value
def area(self):
"""area of rectangle"""
return self.__height * self.__width
def perimeter(self):
"""perimeter of rectangle"""
if self.__height == 0 or self.__width == 0:
return 0
return (self.__height + self.__width) * 2
| 25.291667
| 56
| 0.560132
|
4aaf5fb0c4b7990ce6e557f45d39916608f066b0
| 26,671
|
py
|
Python
|
shadowsocks/asyncdns.py
|
xzchsia/shadowsocks-analysis
|
9d8e3e2d3b3e2b41cee51bed99231803e9b13539
|
[
"MIT"
] | 42
|
2016-09-21T09:48:48.000Z
|
2022-02-10T14:57:35.000Z
|
shadowsocks/asyncdns.py
|
xzchsia/shadowsocks-analysis
|
9d8e3e2d3b3e2b41cee51bed99231803e9b13539
|
[
"MIT"
] | null | null | null |
shadowsocks/asyncdns.py
|
xzchsia/shadowsocks-analysis
|
9d8e3e2d3b3e2b41cee51bed99231803e9b13539
|
[
"MIT"
] | 23
|
2016-11-26T09:38:20.000Z
|
2021-08-22T03:55:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
dns处理
对dns协议并不熟悉,建议马上youtube查看
文档rfc1035:
http://www.ccs.neu.edu/home/amislove/teaching/cs4700/fall09/handouts/project1-primer.pdf
'''
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop
CACHE_SWEEP_INTERVAL = 30
# 该正则表达式意思如下:
# 零宽度正预测先行断言(?=表达式)
# 匹配26个字母,还有数字,横线-
# 重复[1,63)次。
# 负向零宽后发断言(?<!表达式),匹配
# 锚点:末尾
# 来自:http://stackoverflow.com/questions/2532053/validate-a-hostname-string
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
# 使用正则表达式的complie能更高效率。在匹配成千上万个字符串时候,使用complie函数速度更快
common.patch_socket()
# rfc1035定义:DNSformat
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
#
# header数据格式
# 这里一行16个位,就是两个字节
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# header = struct.pack('!HBBHHHH', request_id, 1, 0, 1, 0, 0, 0)
QTYPE_ANY = 255
QTYPE_A = 1 # A记录:WEB服务器的IP指向
QTYPE_AAAA = 28 # IPV6解析记录
QTYPE_CNAME = 5 # CNAME (Canonical Name)记录,通常称别名解析
QTYPE_NS = 2 # NS(Name Server)记录是域名服务器记录
QCLASS_IN = 1 # 规定就是0x01
# 构造dns请求的目标hostname,返回一个二进制流
# 对每逗号前的字符串打包一次,append进result
def build_address(address):
# strip()删除序列是只要边(开头或结尾)上的字符在删除序列内,就删除掉
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63: # hostname太长
return None
# result分别追加数据:逐级域名的长度,逐级域名的字符串
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
# rfc1035
# dns questions
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QNAME | -- a sequence of labels, where each label consists of a length octet followed by that number of octets.
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QTYPE | -- 前面已经定义的QTYPE_A。。等
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QCLASS | -- 0x01
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 构造一个dns查询的DNS QuestionS请求。
# 参数:address为域名,qtype为查询类型,id为查询id
def build_request(address, qtype, request_id):
# pack的‘!’表示结构体打包为网络顺序
header = struct.pack('!HBBHHHH', request_id, 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return header + addr + qtype_qclass
# 分析ip数据包,返回一个ip地址,点分格式
# TODO 参数:data是什么类型
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
# htons() host to network short
# htonl() host to network long
# ntohs() network to host short
# ntohl() network to host long
# 转换32位打包的IPV4地址为IP地址的标准点号分隔字符串表示。
# socket.inet_ntop(address_family,packed_ip)
# 转换IP地址字符串为打包二进制格式。地址家族为AF_INET和AF_INET6,它们分别表示IPV4和IPV6。
# socket.inet_pton(address_family,ip_string)
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
# 这里面涉及到DNS压缩指针的实现,参考rfc1035的“DNS Packet Compression”
# 例如,域名F.ISI.ARPA 和 FOO.F.ISI.ARPA 和 ARPA 可以打包为如下数据QNAME流(忽略其他报文段)
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# O + + +
# F + + +
# F + + +
# S + BYTE0 + BYTE1 +
# E + + +
# T + + +
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 20 | 1 | F |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 22 | 3 | I |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 24 | S | I |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 26 | 4 | A |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 28 | R | P |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 30 | A | 0 |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# ......
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 40 | 3 | F |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 42 | 0 | 0 |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 44 | 1 1 | 20 |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# ......
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 64 | 1 1 | 26 |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 处理dns的Qname报文,返回一个元组(长度,别名)的二进制流,遇到压缩指针则递归
def parse_name(data, offset):
p = offset
labels = []
# l为data数据流中的偏置offset的label的长度
l = common.ord(data[p])
while l > 0:
# 为什么是128+64, 因为未经压缩的合法name长度在63内,因此
# rfc定义0x11000000B为压缩指针的标志
if (l & (128 + 64)) == (128 + 64):
# 取出指针的位置
pointer = struct.unpack('!H', data[p:p + 2])[0]
# 指针取两个字节减去高两位(用于标志0x11000000B那两位)
pointer &= 0x3FFF
# 递归处理指针,找到上文的name记录,参考rfc1035
r = parse_name(data, pointer)
# 读取指针所指的数据,追加到labels中
labels.append(r[1])
# 指针偏移自增两个字节(跳过0x11000000B所在的两个字节的data数据段)
p += 2
# 递归返回
return p - offset, b'.'.join(labels)
# 若不是指针压缩,直接追加dns报文
else:
# 追加labels
labels.append(data[p + 1:p + 1 + l])
# 指针自增(1+len)
p += 1 + l
l = common.ord(data[p])
# 递归返回
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record答复
# A dns answer has the following format:
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME / -- The domain name that was queried
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE | -- A/AAAA/NS, etc
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS | -- Two octets which specify the class of the data in the RDATA field
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL | -- The number of seconds the results can be cached
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH | -- The length of the RDATA field
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA / -- The data of the response
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 处理dns记录
# 返回长度,元组(域名,ip,类型,class,ttl)
# 形参的offset难以理解。
def parse_record(data, offset, question = False):
nlen, name = parse_name(data, offset)
# 查询成功
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
# 查询失败
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
# DNS packets have a header that is shown below:
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# 处理header的函数,返回rcf1035各组的原始数据。
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128 # 0x80
res_tc = header[1] & 2 # 0x02
res_ra = header[2] & 128 # 0x80
res_rcode = header[2] & 15 # 0x0F
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
# 处理答复
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
# qds是啥
qds = []
ans = []
offset = 12
# QDCOUNT an unsigned 16 bit integer specifying the number of entries in the question section.
# You should set this field to 1, indicating you have one question.
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
# ANCOUNT an unsigned 16 bit integer specifying the number of resource records in the answer section.
# You should set this field to 0, indicating you are not providing any answers.
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
# NSCOUNT an unsigned 16 bit integer specifying the number of name server resource records in the authority records section.
# You should set this field to 0, and should ignore any response entries in this section.
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
# ARCOUNT an unsigned 16 bit integer specifying the number of resource records in the additional
# records section. You should set this field to 0, and should ignore any response entries in this section.
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
# 新建response实例,返回一个实例
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
import traceback
traceback.print_exc()
logging.error(e)
return None
# 输入二进制流返回判断是否一个有效ip
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
socket.inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
# 是否一个有效主机名
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
# VALID_HOSTNAME实例是一个re表达式。在本文件的顶部有声明,用于判读有效主机
# 该表达式是clowwidny从stackflow爬下来的。。。
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
# 类:DnsResponse.
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each element: (addr, type, class)
self.answers = [] # each element: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
# 类:Dns解析
class DNSResolver(object):
def __init__(self):
self._loop = None
self._request_id = 1
# 以下四个均为字典类型
self._hosts = {}
# 查询状态?
self._hostname_status = {}
# --------------------------------------------------
# hostname to callback 和 callback to hostname有什么区别>>>>参考本文件末尾的def resolve函数。
# hostname to callback就是每一个hostname对应一个回调函数
# 若多个函数对应相同的hostname,只需向远端发起一次dns请求,减少重复查询
# 删除重复键值(key=hostname,value=callback)的操作不需要人为干预,字典完成这个功能(key不允许重复)
self._hostname_to_cb = {}
# --------------------------------------------------
# callback to hostname就是一个回调函数对应一个hostname
# 一个hostname有可能是由多个进程调用查询dns的。因此回朔时候也要逐个返回。
self._cb_to_hostname = {}
# --------------------------------------------------
# todo : 阅lrucache的源码
self._cache = lru_cache.LRUCache(timeout = 300)
self._last_time = time.time()
self._sock = None
self._servers = None
self._parse_resolv()
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
# 从linux中读取dns地址,加入类DnsResolve的解析服务器列表中
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
# strip() returns a copy of the string with leading whitespace removed.
line = line.strip()
if line:
if line.startswith(b'nameserver'):
# split by space
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
# 系统没有指定dns,就用谷歌的
self._servers = ['8.8.4.4', '8.8.8.8']
# 自定义的域名解析,即hosts文件,添加到类的hosts列表中(字典)
def _parse_hosts(self):
etc_path = '/etc/hosts'
# windows用户
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
# 添加到事件循环中
def add_to_loop(self, loop, ref = False):
# 防止重复loop
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
# 新建一个socket实例,为udp查询类型
# SOCK_DGRAM 是无保障的面向消息的socket, 主要用于在网络上发广播信息。(基于UDP的)
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
# dnsserver只作为发送请求的一个东西,是客户端,应该是client,所以没bind
# 非阻塞
self._sock.setblocking(False)
# 把socket加到loop里面,事件触发类型:有数据可读
loop.add(self._sock, eventloop.POLL_IN)
# 这里加入了handler,eventloop检测到socket有“动静”时调用self.handle_events
loop.add_handler(self.handle_events, ref = ref)
# 这里触发回调:查找某个hostname对应的回调函数
# 回调是上一级函数调用查询dns的,因此需要回朔。
# 看了好多次,扔看不懂什么工作原理。。认为它字典是唯一的会漏掉某些键值。
def _call_callback(self, hostname, ip, error = None):
# 这里取出我们在请求的同时放进字典里面的callback函数
# cb = callback
callbacks = self._hostname_to_cb.get(hostname, []) # 寻找一个键为hostname的一个callback函数,实际上只能返回一个,而不是callbacks。
for callback in callbacks:
# 判断hostname是否已经被回调,已回调则删掉等待回调的字典对应键对。避免重复查询dns
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
# 注册回调
if ip or error:
# 实际调用发送数据的同时注册的回调函数callback to host
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
# 回调完,删除键值 hostname to callback 包括反过来。
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
# 服务函数,放进loop事件中。
# 看不懂什么逻辑。。。
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
# 从dns报文里面拿到ip地址,只取出一个记录,就break掉了
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
# 若没有解析到ip,则从hostname_status字典中取出该hostname对应的域名类型
# 若ip无效,且域名记录类型是v6,发送查询dns,以AAAA类型。
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
# 若ip有效
if ip:
# 缓存这个ip
self._cache[hostname] = ip
# 这里调用回调_call_callback.
self._call_callback(hostname, ip)
# 否则,若hostname的状态是ipv6,
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
# 回调
self._call_callback(hostname, None)
break
# 事件服务函数
def handle_events(self, events):
for sock, fd, event in events:
# 看是不是自己socket的,因为dns,tcp,udp的server都分别有自己的socket
if sock != self._sock:
continue
# 若出错了,销毁socket并重新注册一个。
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN)
# 没出错,接受数据
else:
# 因为是dns基于udp报文,所以没有连接要处理
data, addr = sock.recvfrom(1024)
# 被匿名答复了么?
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
break
# handle_events调用_handle_data
self._handle_data(data)
break
now = time.time()
# 清理缓存,cache sweep,每30秒。
if now - self._last_time > CACHE_SWEEP_INTERVAL:
self._cache.sweep()
self._last_time = now
# 移除回调。貌似全文没有用到这个函数。。。
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
# 发送dns请求
def _send_req(self, hostname, qtype):
self._request_id += 1
if self._request_id > 32768: # 15bit一个轮回。
self._request_id = 1
req = build_request(hostname, qtype, self._request_id)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
# 向远端服务器查询dns
self._sock.sendto(req, (server, 53))
# 解析dns函数。被tcprelay模块调用。
# callback对应一个上一级的事件循环函数
def resolve(self, hostname, callback): # 在tcprelay模块中,callback函数指向tcp类的_handle_dns_resolved()
# hostname是否是字节码
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif is_ip(hostname):
# 先看是不是一个ip,是就不用解析了,直接调用callback
callback((hostname, hostname), None)
elif hostname in self._hosts:
# 看是不是在host文件里面,是就直接callback
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
# 看是不是在cache里面,是就直接callback
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
# 以上条件都不满足,开始进行解析dns
else:
# 检查hostname的有效性
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
# 在host_to_callback中检查是否有记录
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
# 请求报文发出去
self._send_req(hostname, QTYPE_A)
# 同时在_hostname_to_cb注册一个{hostname:callback}的一对
# 要hostname因为这个socket可以发出去很多不同hostname的解析请求
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
# 回调中没有记录,则追加回调
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
# 析构函数
def close(self):
if self._sock:
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop, ref = True)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
# 下文测试样例就是9个,可以自己修改
if counter == 9:
loop.remove_handler(dns_resolver.handle_events)
dns_resolver.close()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
| 38.935766
| 155
| 0.479697
|
50e1c1382b0b12649a085ffcc7adb0311b269511
| 5,858
|
py
|
Python
|
src/smartexplorer.py
|
chrishasl/SmartNodeMonitorBot
|
62a76a6d92ec62cee4b1d95b1e4a71ada1a3ec53
|
[
"MIT"
] | null | null | null |
src/smartexplorer.py
|
chrishasl/SmartNodeMonitorBot
|
62a76a6d92ec62cee4b1d95b1e4a71ada1a3ec53
|
[
"MIT"
] | null | null | null |
src/smartexplorer.py
|
chrishasl/SmartNodeMonitorBot
|
62a76a6d92ec62cee4b1d95b1e4a71ada1a3ec53
|
[
"MIT"
] | null | null | null |
##
# Part of `SmartNodeMonitorBot`
#
# Copyright 2018 dustinface
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
import os, stat, sys
import re
import subprocess
import json
import time
from requests_futures.sessions import FuturesSession
import logging
import threading
import re
import uuid
logger = logging.getLogger("smartexplorer")
lockForever = sys.float_info.max
class Request(object):
def __init__(self,node, request, cb):
self.attempts = 0
self.node = node
self.explorer = request['explorer']
self.future = request['future']
self.future.add_done_callback(self.futureCB)
self.result = None
self.cb = cb
self.data = None
self.status = -1
self.error = None
def futureCB(self, future):
try:
self.result = self.future.result()
self.status = self.result.status_code
except:
self.error = "Could not fetch result"
try:
self.data = self.result.json()
except:
self.error = "Could not parse json {}".format(self.result)
self.cb(self.future)
class SmartExplorer(object):
def __init__(self, balancesCB):
self.balancesCB = balancesCB
def balances(self, addresses):
logger.warning("SmartExplorer balances")
class LocalExplorer(SmartExplorer):
def __init__(self,balancesCB):
super().__init__(balancesCB)
def balances(self,addresses):
logger.warning("LocalExplorer maybe later...")
class WebExplorer(SmartExplorer):
def __init__(self,balancesCB):
super().__init__(balancesCB)
self.lastUrl = 0
self.urls = {'https://explorer.smartcash.cc': lockForever,\
'https://explorer2.smartcash.cc': lockForever,\
'https://explorer3.smartcash.cc': lockForever,\
'https://explorer-webapi.azurewebsites.net': None}
self.urlLockSeconds = 3600
self.session = FuturesSession(max_workers=20)
self.checks = {}
self.results = {}
self.requestSem = threading.Lock()
def backgroundCB(self, future):
self.requestSem.acquire()
logger.info("Checks {}".format(len(self.checks)))
done = None
for check, requests in self.checks.items():
logger.info("CHECK {}, requests {}".format(check,len(requests)))
for request in requests:
if request.future == future:
if request.error != None:
logger.error("[{}] Request error {}".format(request.status, request.error))
self.balancesCB(check, None)
self.urls[request.explorer] = time.time()
done = check
break
if request.status != 200:
logger.warning("[{}] Request error {}".format(request.status, request.data))
self.urls[request.explorer] = time.time()
break
if done:
break
if not sum(map(lambda x: x.status == -1 , requests)):
done = check
self.balancesCB(check, requests)
if done:
self.checks.pop(done)
logger.info("URL states {}".format(self.urls))
self.requestSem.release()
def nextUrl(self):
def urlReady(x):
return x == None or \
(time.time() - x) >= self.urlLockSeconds
while True:
if not sum(map(lambda x: urlReady(x) ,self.urls.values())):
# If there is no unlocked url left
raise ValueError("No explorer url ready.")
nextIndex = (self.lastUrl + 1 ) % len(self.urls)
self.lastUrl = nextIndex
if urlReady(list(self.urls.values())[nextIndex]):
return list(self.urls.keys())[self.lastUrl]
def balance(self, address):
explorer = self.nextUrl()
requestUrl = "{}/api/smartexplorer/GetAddressBalance/{}".format(explorer,address)
logger.info("Add {}".format(requestUrl))
future = self.session.get(requestUrl)
return {'explorer' : explorer, 'future' : future}
def balances(self, nodes):
self.requestSem.acquire()
check = uuid.uuid4()
logger.info("Create balance check: {}".format(check))
try:
self.checks[check] = list(map(lambda x: Request(x, self.balance(x.payee), self.backgroundCB), nodes ))
except ValueError as e:
logger.warning("balances {}".format(e))
self.requestSem.release()
return None
else:
logger.info("Added balance check {}".format(check))
self.requestSem.release()
return check
| 30.670157
| 114
| 0.608569
|
70fe581de4ea704b057b8cdbe2c6196bc21ac832
| 7,765
|
py
|
Python
|
status/views.py
|
riptano/statuspage
|
eb5b3c983f2e23b9ec2e1f30c8cfe5cc624699d4
|
[
"BSD-3-Clause"
] | null | null | null |
status/views.py
|
riptano/statuspage
|
eb5b3c983f2e23b9ec2e1f30c8cfe5cc624699d4
|
[
"BSD-3-Clause"
] | null | null | null |
status/views.py
|
riptano/statuspage
|
eb5b3c983f2e23b9ec2e1f30c8cfe5cc624699d4
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import date, timedelta
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.template import RequestContext, Template
from django.template.loader import get_template
from django.utils.decorators import method_decorator
from django.views.generic import (
MonthArchiveView, YearArchiveView, CreateView, DeleteView, DetailView, ListView, TemplateView
)
from stronghold.decorators import public
from status.models import Incident, IncidentUpdate
from status.forms import IncidentCreateForm, IncidentUpdateCreateForm
import slack
import slack.chat
import logging
logger = logging.getLogger(__name__)
def send_to_slack(message, channel='engineering', username='statusbot', emoji=':statusbot:', override_debug=False):
slack.api_token = settings.SLACK_TOKEN
if settings.DEBUG and not override_debug:
logger.info('Diverting from %s to dev while in debug mode as %s: %s' % (channel, username, message))
slack.chat.post_message('dev', 'DEBUG: ' + message, username=username, icon_emoji=emoji)
else:
logger.info('Sending to channel %s as %s: %s' % (channel, username, message))
slack.chat.post_message(channel, message, username=username, icon_emoji=emoji)
def create_incident(request):
if request.method == 'POST':
form = IncidentCreateForm(request.POST)
form2 = IncidentUpdateCreateForm(request.POST)
if form.is_valid() and form2.is_valid():
i = form.save(commit=False)
i.user = request.user
print i
i.save()
f = form2.save(commit=False)
f.incident = i
f.user = request.user
f.save()
if settings.SLACK_CHANNEL and settings.SLACK_TOKEN:
if len(f.description) > 50:
description = f.description[:50] + '...'
else:
description = f.description
try:
message = "<https://%s%s|%s> (%s): %s" % (
get_current_site(request),
reverse('status:incident_detail', args=[i.pk, ]),
i.name,
f.status.name,
description
)
send_to_slack(message, username=settings.SLACK_USERNAME, channel=settings.SLACK_CHANNEL)
except Exception as e:
logger.warn('Unable to send to slack: %s' % (e))
return HttpResponseRedirect('/')
else:
form = IncidentCreateForm()
form2 = IncidentUpdateCreateForm()
request_context = RequestContext(request)
request_context.push({'form': form, 'form2': form2})
t = get_template('status/incident_create_form.html')
rendered_template = t.render(request_context.flatten(), request)
return HttpResponse(rendered_template)
#return get_template('status/incident_create_form.html').render(request_context.flatten(), request)
#return render(request, template_name='status/incident_create_form.html', context=request_context)
class DashboardView(ListView):
model = Incident
def get_queryset(self):
return Incident.objects.exclude(hidden=True)
class HiddenDashboardView(ListView):
model = Incident
class IncidentHideView(DeleteView):
model = Incident
template_name = 'status/incident_hide.html'
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.hidden = True
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('status:dashboard')
class IncidentDeleteView(DeleteView):
model = Incident
def get_success_url(self):
return reverse('status:dashboard')
class IncidentUpdateUpdateView(CreateView):
model = IncidentUpdate
form_class = IncidentUpdateCreateForm
template_name = 'status/incident_form.html'
def get_success_url(self):
return reverse('status:incident_detail', args=[self.kwargs['pk']])
def form_valid(self, form):
iu = form.save(commit=False)
i = Incident.objects.get(pk=self.kwargs['pk'])
i.hidden = False
i.save()
iu.incident = i
iu.incident.hidden = False
iu.incident.save()
iu.user = self.request.user
iu.save()
return HttpResponseRedirect(self.get_success_url())
class IncidentDetailView(DetailView):
model = Incident
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(IncidentDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(IncidentDetailView, self).get_context_data(**kwargs)
context.update({
'form': IncidentUpdateCreateForm(),
})
return context
class IncidentArchiveYearView(YearArchiveView):
make_object_list = True
queryset = Incident.objects.all()
date_field = 'updated'
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(IncidentArchiveYearView, self).dispatch(*args, **kwargs)
class IncidentArchiveMonthView(MonthArchiveView):
make_object_list = True
queryset = Incident.objects.all()
date_field = 'updated'
month_format = '%m'
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(IncidentArchiveMonthView, self).dispatch(*args, **kwargs)
class HomeView(TemplateView):
http_method_names = ['get', ]
template_name = 'status/home.html'
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(HomeView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
incident_list = Incident.objects.filter(hidden=False).order_by('-updated')
context.update({
'incident_list': incident_list
})
if hasattr(settings, 'STATUS_TICKET_URL'):
context.update({'STATUS_TICKET_URL': settings.STATUS_TICKET_URL})
if hasattr(settings, 'STATUS_LOGO_URL'):
context.update({'STATUS_LOGO_URL': settings.STATUS_LOGO_URL})
if hasattr(settings, 'STATUS_TITLE'):
context.update({'STATUS_TITLE': settings.STATUS_TITLE})
status_level = 'success'
active_list = []
completed_list = []
for incident in incident_list:
try:
if incident.get_latest_update().status.type == 'danger':
status_level = 'danger'
active_list.append(incident)
elif incident.get_latest_update().status.type == 'warning':
if status_level != 'danger':
status_level = 'warning'
active_list.append(incident)
elif incident.get_latest_update().status.type == 'info':
if status_level not in ('warning', 'danger'):
status_level = 'info'
active_list.append(incident)
elif incident.get_latest_update().status.type == 'success':
completed_list.append(incident)
except AttributeError:
# Unable to get_latest_update(), 'None' has no .status
pass
context.update({
'status_level': status_level,
'active_list': active_list,
'completed_list': completed_list,
})
return context
| 35.135747
| 115
| 0.642112
|
815e66bd8eeab23eafc68007780b8c2c8d223e8a
| 64,066
|
py
|
Python
|
Lib/site-packages/rest_framework/serializers.py
|
funydukenus/mousemanagement
|
0c755fb624f6e0d95c594d5041261bacfc0225fd
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/rest_framework/serializers.py
|
funydukenus/mousemanagement
|
0c755fb624f6e0d95c594d5041261bacfc0225fd
|
[
"bzip2-1.0.6"
] | 10
|
2020-06-05T23:30:34.000Z
|
2021-09-22T18:56:54.000Z
|
Lib/site-packages/rest_framework/serializers.py
|
funydukenus/mousemanagement
|
0c755fb624f6e0d95c594d5041261bacfc0225fd
|
[
"bzip2-1.0.6"
] | null | null | null |
"""
Serializers and ModelSerializers are similar to Forms and ModelForms.
Unlike forms, they are not constrained to dealing with HTML output, and
form encoded input.
Serialization in REST framework is a two-phase process:
1. Serializers marshal between complex types like model instances, and
python primitives.
2. The process of marshalling between python primitives and request and
response content is handled by parsers and renderers.
"""
import copy
import inspect
import traceback
from collections import OrderedDict
from collections.abc import Mapping
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
from django.core.exceptions import ValidationError as DjangoValidationError
from django.db import models
from django.db.models import DurationField as ModelDurationField
from django.db.models.fields import Field as DjangoModelField
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from rest_framework.compat import postgres_fields
from rest_framework.exceptions import ErrorDetail, ValidationError
from rest_framework.fields import get_error_detail, set_value
from rest_framework.settings import api_settings
from rest_framework.utils import html, model_meta, representation
from rest_framework.utils.field_mapping import (
ClassLookupDict, get_field_kwargs, get_nested_relation_kwargs,
get_relation_kwargs, get_url_kwargs
)
from rest_framework.utils.serializer_helpers import (
BindingDict, BoundField, JSONBoundField, NestedBoundField, ReturnDict,
ReturnList
)
from rest_framework.validators import (
UniqueForDateValidator, UniqueForMonthValidator, UniqueForYearValidator,
UniqueTogetherValidator
)
# Note: We do the following so that users of the framework can use this style:
#
# example_field = serializers.CharField(...)
#
# This helps keep the separation between model fields, form fields, and
# serializer fields more explicit.
from rest_framework.fields import ( # NOQA # isort:skip
BooleanField, CharField, ChoiceField, DateField, DateTimeField, DecimalField,
DictField, DurationField, EmailField, Field, FileField, FilePathField, FloatField,
HiddenField, HStoreField, IPAddressField, ImageField, IntegerField, JSONField,
ListField, ModelField, MultipleChoiceField, NullBooleanField, ReadOnlyField,
RegexField, SerializerMethodField, SlugField, TimeField, URLField, UUIDField,
)
from rest_framework.relations import ( # NOQA # isort:skip
HyperlinkedIdentityField, HyperlinkedRelatedField, ManyRelatedField,
PrimaryKeyRelatedField, RelatedField, SlugRelatedField, StringRelatedField,
)
# Non-field imports, but public API
from rest_framework.fields import ( # NOQA # isort:skip
CreateOnlyDefault, CurrentUserDefault, SkipField, empty
)
from rest_framework.relations import Hyperlink, PKOnlyObject # NOQA # isort:skip
# We assume that 'validators' are intended for the child serializer,
# rather than the parent serializer.
LIST_SERIALIZER_KWARGS = (
'read_only', 'write_only', 'required', 'default', 'initial', 'source',
'label', 'help_text', 'style', 'error_messages', 'allow_empty',
'instance', 'data', 'partial', 'context', 'allow_null'
)
ALL_FIELDS = '__all__'
# BaseSerializer
# --------------
class BaseSerializer(Field):
"""
The BaseSerializer class provides a minimal class which may be used
for writing custom serializer implementations.
Note that we strongly restrict the ordering of operations/properties
that may be used on the serializer in order to enforce correct usage.
In particular, if a `data=` argument is passed then:
.is_valid() - Available.
.initial_data - Available.
.validated_data - Only available after calling `is_valid()`
.errors - Only available after calling `is_valid()`
.data - Only available after calling `is_valid()`
If a `data=` argument is not passed then:
.is_valid() - Not available.
.initial_data - Not available.
.validated_data - Not available.
.errors - Not available.
.data - Available.
"""
def __init__(self, instance=None, data=empty, **kwargs):
self.instance = instance
if data is not empty:
self.initial_data = data
self.partial = kwargs.pop('partial', False)
self._context = kwargs.pop('context', {})
kwargs.pop('many', None)
super().__init__(**kwargs)
def __new__(cls, *args, **kwargs):
# We override this method in order to automagically create
# `ListSerializer` classes instead when `many=True` is set.
if kwargs.pop('many', False):
return cls.many_init(*args, **kwargs)
return super().__new__(cls, *args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
"""
This method implements the creation of a `ListSerializer` parent
class when `many=True` is used. You can customize it if you need to
control which keyword arguments are passed to the parent, and
which are passed to the child.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomListSerializer(*args, **kwargs)
"""
allow_empty = kwargs.pop('allow_empty', None)
child_serializer = cls(*args, **kwargs)
list_kwargs = {
'child': child_serializer,
}
if allow_empty is not None:
list_kwargs['allow_empty'] = allow_empty
list_kwargs.update({
key: value for key, value in kwargs.items()
if key in LIST_SERIALIZER_KWARGS
})
meta = getattr(cls, 'Meta', None)
list_serializer_class = getattr(meta, 'list_serializer_class', ListSerializer)
return list_serializer_class(*args, **list_kwargs)
def to_internal_value(self, data):
raise NotImplementedError('`to_internal_value()` must be implemented.')
def c(self, instance):
raise NotImplementedError('`to_representation()` must be implemented.')
def update(self, instance, validated_data):
raise NotImplementedError('`update()` must be implemented.')
def create(self, validated_data):
raise NotImplementedError('`create()` must be implemented.')
def save(self, **kwargs):
assert not hasattr(self, 'save_object'), (
'Serializer `%s.%s` has old-style version 2 `.save_object()` '
'that is no longer compatible with REST framework 3. '
'Use the new-style `.create()` and `.update()` methods instead.' %
(self.__class__.__module__, self.__class__.__name__)
)
assert hasattr(self, '_errors'), (
'You must call `.is_valid()` before calling `.save()`.'
)
assert not self.errors, (
'You cannot call `.save()` on a serializer with invalid data.'
)
# Guard against incorrect use of `serializer.save(commit=False)`
assert 'commit' not in kwargs, (
"'commit' is not a valid keyword argument to the 'save()' method. "
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
"You can also pass additional keyword arguments to 'save()' if you "
"need to set extra attributes on the saved model instance. "
"For example: 'serializer.save(owner=request.user)'.'"
)
assert not hasattr(self, '_data'), (
"You cannot call `.save()` after accessing `serializer.data`."
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
)
validated_data = dict(
list(self.validated_data.items()) +
list(kwargs.items())
)
if self.instance is not None:
self.instance = self.update(self.instance, validated_data)
assert self.instance is not None, (
'`update()` did not return an object instance.'
)
else:
self.instance = self.create(validated_data)
assert self.instance is not None, (
'`create()` did not return an object instance.'
)
return self.instance
def is_valid(self, raise_exception=False):
assert not hasattr(self, 'restore_object'), (
'Serializer `%s.%s` has old-style version 2 `.restore_object()` '
'that is no longer compatible with REST framework 3. '
'Use the new-style `.create()` and `.update()` methods instead.' %
(self.__class__.__module__, self.__class__.__name__)
)
assert hasattr(self, 'initial_data'), (
'Cannot call `.is_valid()` as no `data=` keyword argument was '
'passed when instantiating the serializer instance.'
)
if not hasattr(self, '_validated_data'):
try:
self._validated_data = self.run_validation(self.initial_data)
except ValidationError as exc:
self._validated_data = {}
self._errors = exc.detail
else:
self._errors = {}
if self._errors and raise_exception:
raise ValidationError(self.errors)
return not bool(self._errors)
@property
def data(self):
if hasattr(self, 'initial_data') and not hasattr(self, '_validated_data'):
msg = (
'When a serializer is passed a `data` keyword argument you '
'must call `.is_valid()` before attempting to access the '
'serialized `.data` representation.\n'
'You should either call `.is_valid()` first, '
'or access `.initial_data` instead.'
)
raise AssertionError(msg)
if not hasattr(self, '_data'):
if self.instance is not None and not getattr(self, '_errors', None):
self._data = self.to_representation(self.instance)
elif hasattr(self, '_validated_data') and not getattr(self, '_errors', None):
self._data = self.to_representation(self.validated_data)
else:
self._data = self.get_initial()
return self._data
@property
def errors(self):
if not hasattr(self, '_errors'):
msg = 'You must call `.is_valid()` before accessing `.errors`.'
raise AssertionError(msg)
return self._errors
@property
def validated_data(self):
if not hasattr(self, '_validated_data'):
msg = 'You must call `.is_valid()` before accessing `.validated_data`.'
raise AssertionError(msg)
return self._validated_data
# Serializer & ListSerializer classes
# -----------------------------------
class SerializerMetaclass(type):
"""
This metaclass sets a dictionary named `_declared_fields` on the class.
Any instances of `Field` included as attributes on either the class
or on any of its superclasses will be include in the
`_declared_fields` dictionary.
"""
@classmethod
def _get_declared_fields(cls, bases, attrs):
fields = [(field_name, attrs.pop(field_name))
for field_name, obj in list(attrs.items())
if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1]._creation_counter)
# Ensures a base class field doesn't override cls attrs, and maintains
# field precedence when inheriting multiple parents. e.g. if there is a
# class C(A, B), and A and B both define 'field', use 'field' from A.
known = set(attrs)
def visit(name):
known.add(name)
return name
base_fields = [
(visit(name), f)
for base in bases if hasattr(base, '_declared_fields')
for name, f in base._declared_fields.items() if name not in known
]
return OrderedDict(base_fields + fields)
def __new__(cls, name, bases, attrs):
attrs['_declared_fields'] = cls._get_declared_fields(bases, attrs)
return super().__new__(cls, name, bases, attrs)
def as_serializer_error(exc):
assert isinstance(exc, (ValidationError, DjangoValidationError))
if isinstance(exc, DjangoValidationError):
detail = get_error_detail(exc)
else:
detail = exc.detail
if isinstance(detail, Mapping):
# If errors may be a dict we use the standard {key: list of values}.
# Here we ensure that all the values are *lists* of errors.
return {
key: value if isinstance(value, (list, Mapping)) else [value]
for key, value in detail.items()
}
elif isinstance(detail, list):
# Errors raised as a list are non-field errors.
return {
api_settings.NON_FIELD_ERRORS_KEY: detail
}
# Errors raised as a string are non-field errors.
return {
api_settings.NON_FIELD_ERRORS_KEY: [detail]
}
class Serializer(BaseSerializer, metaclass=SerializerMetaclass):
default_error_messages = {
'invalid': _('Invalid data. Expected a dictionary, but got {datatype}.')
}
@cached_property
def fields(self):
"""
A dictionary of {field_name: field_instance}.
"""
# `fields` is evaluated lazily. We do this to ensure that we don't
# have issues importing modules that use ModelSerializers as fields,
# even if Django's app-loading stage has not yet run.
fields = BindingDict(self)
for key, value in self.get_fields().items():
fields[key] = value
return fields
@property
def _writable_fields(self):
for field in self.fields.values():
if not field.read_only:
yield field
@property
def _readable_fields(self):
for field in self.fields.values():
if not field.write_only:
yield field
def get_fields(self):
"""
Returns a dictionary of {field_name: field_instance}.
"""
# Every new serializer is created with a clone of the field instances.
# This allows users to dynamically modify the fields on a serializer
# instance without affecting every other serializer instance.
return copy.deepcopy(self._declared_fields)
def get_validators(self):
"""
Returns a list of validator callables.
"""
# Used by the lazily-evaluated `validators` property.
meta = getattr(self, 'Meta', None)
validators = getattr(meta, 'validators', None)
return list(validators) if validators else []
def get_initial(self):
if hasattr(self, 'initial_data'):
# initial_data may not be a valid type
if not isinstance(self.initial_data, Mapping):
return OrderedDict()
return OrderedDict([
(field_name, field.get_value(self.initial_data))
for field_name, field in self.fields.items()
if (field.get_value(self.initial_data) is not empty) and
not field.read_only
])
return OrderedDict([
(field.field_name, field.get_initial())
for field in self.fields.values()
if not field.read_only
])
def get_value(self, dictionary):
# We override the default field access in order to support
# nested HTML forms.
if html.is_html_input(dictionary):
return html.parse_html_dict(dictionary, prefix=self.field_name) or empty
return dictionary.get(self.field_name, empty)
def run_validation(self, data=empty):
"""
We override the default `run_validation`, because the validation
performed by validators and the `.validate()` method should
be coerced into an error dictionary with a 'non_fields_error' key.
"""
(is_empty_value, data) = self.validate_empty_values(data)
if is_empty_value:
return data
value = self.to_internal_value(data)
try:
self.run_validators(value)
value = self.validate(value)
assert value is not None, '.validate() should return the validated data'
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=as_serializer_error(exc))
return value
def _read_only_defaults(self):
fields = [
field for field in self.fields.values()
if (field.read_only) and (field.default != empty) and (field.source != '*') and ('.' not in field.source)
]
defaults = OrderedDict()
for field in fields:
try:
default = field.get_default()
except SkipField:
continue
defaults[field.source] = default
return defaults
def run_validators(self, value):
"""
Add read_only fields with defaults to value before running validators.
"""
if isinstance(value, dict):
to_validate = self._read_only_defaults()
to_validate.update(value)
else:
to_validate = value
super().run_validators(to_validate)
def to_internal_value(self, data):
"""
Dict of native values <- Dict of primitive datatypes.
"""
if not isinstance(data, Mapping):
message = self.error_messages['invalid'].format(
datatype=type(data).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='invalid')
ret = OrderedDict()
errors = OrderedDict()
fields = self._writable_fields
for field in fields:
validate_method = getattr(self, 'validate_' + field.field_name, None)
primitive_value = field.get_value(data)
try:
validated_value = field.run_validation(primitive_value)
if validate_method is not None:
validated_value = validate_method(validated_value)
except ValidationError as exc:
errors[field.field_name] = exc.detail
except DjangoValidationError as exc:
errors[field.field_name] = get_error_detail(exc)
except SkipField:
pass
else:
set_value(ret, field.source_attrs, validated_value)
if errors:
raise ValidationError(errors)
return ret
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
"""
ret = OrderedDict()
fields = self._readable_fields
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
# We skip `to_representation` for `None` values so that fields do
# not have to explicitly deal with that case.
#
# For related fields with `use_pk_only_optimization` we need to
# resolve the pk value.
check_for_none = attribute.pk if isinstance(attribute, PKOnlyObject) else attribute
if check_for_none is None:
ret[field.field_name] = None
else:
ret[field.field_name] = field.to_representation(attribute)
return ret
def validate(self, attrs):
return attrs
def __repr__(self):
return representation.serializer_repr(self, indent=1)
# The following are used for accessing `BoundField` instances on the
# serializer, for the purposes of presenting a form-like API onto the
# field values and field errors.
def __iter__(self):
for field in self.fields.values():
yield self[field.field_name]
def __getitem__(self, key):
field = self.fields[key]
value = self.data.get(key)
error = self.errors.get(key) if hasattr(self, '_errors') else None
if isinstance(field, Serializer):
return NestedBoundField(field, value, error)
if isinstance(field, JSONField):
return JSONBoundField(field, value, error)
return BoundField(field, value, error)
# Include a backlink to the serializer class on return objects.
# Allows renderers such as HTMLFormRenderer to get the full field info.
@property
def data(self):
ret = super().data
return ReturnDict(ret, serializer=self)
@property
def errors(self):
ret = super().errors
if isinstance(ret, list) and len(ret) == 1 and getattr(ret[0], 'code', None) == 'null':
# Edge case. Provide a more descriptive error than
# "this field may not be null", when no data is passed.
detail = ErrorDetail('No data provided', code='null')
ret = {api_settings.NON_FIELD_ERRORS_KEY: [detail]}
return ReturnDict(ret, serializer=self)
# There's some replication of `ListField` here,
# but that's probably better than obfuscating the call hierarchy.
class ListSerializer(BaseSerializer):
child = None
many = True
default_error_messages = {
'not_a_list': _('Expected a list of items but got type "{input_type}".'),
'empty': _('This list may not be empty.')
}
def __init__(self, *args, **kwargs):
self.child = kwargs.pop('child', copy.deepcopy(self.child))
self.allow_empty = kwargs.pop('allow_empty', True)
assert self.child is not None, '`child` is a required argument.'
assert not inspect.isclass(self.child), '`child` has not been instantiated.'
super().__init__(*args, **kwargs)
self.child.bind(field_name='', parent=self)
def get_initial(self):
if hasattr(self, 'initial_data'):
return self.to_representation(self.initial_data)
return []
def get_value(self, dictionary):
"""
Given the input dictionary, return the field value.
"""
# We override the default field access in order to support
# lists in HTML forms.
if html.is_html_input(dictionary):
return html.parse_html_list(dictionary, prefix=self.field_name, default=empty)
return dictionary.get(self.field_name, empty)
def run_validation(self, data=empty):
"""
We override the default `run_validation`, because the validation
performed by validators and the `.validate()` method should
be coerced into an error dictionary with a 'non_fields_error' key.
"""
(is_empty_value, data) = self.validate_empty_values(data)
if is_empty_value:
return data
value = self.to_internal_value(data)
try:
self.run_validators(value)
value = self.validate(value)
assert value is not None, '.validate() should return the validated data'
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=as_serializer_error(exc))
return value
def to_internal_value(self, data):
"""
List of dicts of native values <- List of dicts of primitive datatypes.
"""
if html.is_html_input(data):
data = html.parse_html_list(data, default=[])
if not isinstance(data, list):
message = self.error_messages['not_a_list'].format(
input_type=type(data).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='not_a_list')
if not self.allow_empty and len(data) == 0:
message = self.error_messages['empty']
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='empty')
ret = []
errors = []
for item in data:
try:
validated = self.child.run_validation(item)
except ValidationError as exc:
errors.append(exc.detail)
else:
ret.append(validated)
errors.append({})
if any(errors):
raise ValidationError(errors)
return ret
def to_representation(self, data):
"""
List of object instances -> List of dicts of primitive datatypes.
"""
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, models.Manager) else data
return [
self.child.to_representation(item) for item in iterable
]
def validate(self, attrs):
return attrs
def update(self, instance, validated_data):
raise NotImplementedError(
"Serializers with many=True do not support multiple update by "
"default, only multiple create. For updates it is unclear how to "
"deal with insertions and deletions. If you need to support "
"multiple update, use a `ListSerializer` class and override "
"`.update()` so you can specify the behavior exactly."
)
def create(self, validated_data):
return [
self.child.create(attrs) for attrs in validated_data
]
def save(self, **kwargs):
"""
Save and return a list of object instances.
"""
# Guard against incorrect use of `serializer.save(commit=False)`
assert 'commit' not in kwargs, (
"'commit' is not a valid keyword argument to the 'save()' method. "
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
"You can also pass additional keyword arguments to 'save()' if you "
"need to set extra attributes on the saved model instance. "
"For example: 'serializer.save(owner=request.user)'.'"
)
validated_data = [
dict(list(attrs.items()) + list(kwargs.items()))
for attrs in self.validated_data
]
if self.instance is not None:
self.instance = self.update(self.instance, validated_data)
assert self.instance is not None, (
'`update()` did not return an object instance.'
)
else:
self.instance = self.create(validated_data)
assert self.instance is not None, (
'`create()` did not return an object instance.'
)
return self.instance
def is_valid(self, raise_exception=False):
# This implementation is the same as the default,
# except that we use lists, rather than dicts, as the empty case.
assert hasattr(self, 'initial_data'), (
'Cannot call `.is_valid()` as no `data=` keyword argument was '
'passed when instantiating the serializer instance.'
)
if not hasattr(self, '_validated_data'):
try:
self._validated_data = self.run_validation(self.initial_data)
except ValidationError as exc:
self._validated_data = []
self._errors = exc.detail
else:
self._errors = []
if self._errors and raise_exception:
raise ValidationError(self.errors)
return not bool(self._errors)
def __repr__(self):
return representation.list_repr(self, indent=1)
# Include a backlink to the serializer class on return objects.
# Allows renderers such as HTMLFormRenderer to get the full field info.
@property
def data(self):
ret = super().data
return ReturnList(ret, serializer=self)
@property
def errors(self):
ret = super().errors
if isinstance(ret, list) and len(ret) == 1 and getattr(ret[0], 'code', None) == 'null':
# Edge case. Provide a more descriptive error than
# "this field may not be null", when no data is passed.
detail = ErrorDetail('No data provided', code='null')
ret = {api_settings.NON_FIELD_ERRORS_KEY: [detail]}
if isinstance(ret, dict):
return ReturnDict(ret, serializer=self)
return ReturnList(ret, serializer=self)
# ModelSerializer & HyperlinkedModelSerializer
# --------------------------------------------
def raise_errors_on_nested_writes(method_name, serializer, validated_data):
"""
Give explicit errors when users attempt to pass writable nested data.
If we don't do this explicitly they'd get a less helpful error when
calling `.save()` on the serializer.
We don't *automatically* support these sorts of nested writes because
there are too many ambiguities to define a default behavior.
Eg. Suppose we have a `UserSerializer` with a nested profile. How should
we handle the case of an update, where the `profile` relationship does
not exist? Any of the following might be valid:
* Raise an application error.
* Silently ignore the nested part of the update.
* Automatically create a profile instance.
"""
ModelClass = serializer.Meta.model
model_field_info = model_meta.get_field_info(ModelClass)
# Ensure we don't have a writable nested field. For example:
#
# class UserSerializer(ModelSerializer):
# ...
# profile = ProfileSerializer()
assert not any(
isinstance(field, BaseSerializer) and
(field.source in validated_data) and
(field.source in model_field_info.relations) and
isinstance(validated_data[field.source], (list, dict))
for field in serializer._writable_fields
), (
'The `.{method_name}()` method does not support writable nested '
'fields by default.\nWrite an explicit `.{method_name}()` method for '
'serializer `{module}.{class_name}`, or set `read_only=True` on '
'nested serializer fields.'.format(
method_name=method_name,
module=serializer.__class__.__module__,
class_name=serializer.__class__.__name__
)
)
# Ensure we don't have a writable dotted-source field. For example:
#
# class UserSerializer(ModelSerializer):
# ...
# address = serializer.CharField('profile.address')
#
# Though, non-relational fields (e.g., JSONField) are acceptable. For example:
#
# class NonRelationalPersonModel(models.Model):
# profile = JSONField()
#
# class UserSerializer(ModelSerializer):
# ...
# address = serializer.CharField('profile.address')
assert not any(
len(field.source_attrs) > 1 and
(field.source_attrs[0] in validated_data) and
(field.source_attrs[0] in model_field_info.relations) and
isinstance(validated_data[field.source_attrs[0]], (list, dict))
for field in serializer._writable_fields
), (
'The `.{method_name}()` method does not support writable dotted-source '
'fields by default.\nWrite an explicit `.{method_name}()` method for '
'serializer `{module}.{class_name}`, or set `read_only=True` on '
'dotted-source serializer fields.'.format(
method_name=method_name,
module=serializer.__class__.__module__,
class_name=serializer.__class__.__name__
)
)
class ModelSerializer(Serializer):
"""
A `ModelSerializer` is just a regular `Serializer`, except that:
* A set of default fields are automatically populated.
* A set of default validators are automatically populated.
* Default `.create()` and `.update()` implementations are provided.
The process of automatically determining a set of serializer fields
based on the model fields is reasonably complex, but you almost certainly
don't need to dig into the implementation.
If the `ModelSerializer` class *doesn't* generate the set of fields that
you need you should either declare the extra/differing fields explicitly on
the serializer class, or simply use a `Serializer` class.
"""
serializer_field_mapping = {
models.AutoField: IntegerField,
models.BigIntegerField: IntegerField,
models.BooleanField: BooleanField,
models.CharField: CharField,
models.CommaSeparatedIntegerField: CharField,
models.DateField: DateField,
models.DateTimeField: DateTimeField,
models.DecimalField: DecimalField,
models.EmailField: EmailField,
models.Field: ModelField,
models.FileField: FileField,
models.FloatField: FloatField,
models.ImageField: ImageField,
models.IntegerField: IntegerField,
models.NullBooleanField: NullBooleanField,
models.PositiveIntegerField: IntegerField,
models.PositiveSmallIntegerField: IntegerField,
models.SlugField: SlugField,
models.SmallIntegerField: IntegerField,
models.TextField: CharField,
models.TimeField: TimeField,
models.URLField: URLField,
models.GenericIPAddressField: IPAddressField,
models.FilePathField: FilePathField,
}
if ModelDurationField is not None:
serializer_field_mapping[ModelDurationField] = DurationField
serializer_related_field = PrimaryKeyRelatedField
serializer_related_to_field = SlugRelatedField
serializer_url_field = HyperlinkedIdentityField
serializer_choice_field = ChoiceField
# The field name for hyperlinked identity fields. Defaults to 'url'.
# You can modify this using the API setting.
#
# Note that if you instead need modify this on a per-serializer basis,
# you'll also need to ensure you update the `create` method on any generic
# views, to correctly handle the 'Location' response header for
# "HTTP 201 Created" responses.
url_field_name = None
# Default `create` and `update` behavior...
def create(self, validated_data):
"""
We have a bit of extra checking around this in order to provide
descriptive messages when something goes wrong, but this method is
essentially just:
return ExampleModel.objects.create(**validated_data)
If there are many to many fields present on the instance then they
cannot be set until the model is instantiated, in which case the
implementation is like so:
example_relationship = validated_data.pop('example_relationship')
instance = ExampleModel.objects.create(**validated_data)
instance.example_relationship = example_relationship
return instance
The default implementation also does not handle nested relationships.
If you want to support writable nested relationships you'll need
to write an explicit `.create()` method.
"""
raise_errors_on_nested_writes('create', self, validated_data)
ModelClass = self.Meta.model
# Remove many-to-many relationships from validated_data.
# They are not valid arguments to the default `.create()` method,
# as they require that the instance has already been saved.
info = model_meta.get_field_info(ModelClass)
many_to_many = {}
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
many_to_many[field_name] = validated_data.pop(field_name)
try:
instance = ModelClass._default_manager.create(**validated_data)
except TypeError:
tb = traceback.format_exc()
msg = (
'Got a `TypeError` when calling `%s.%s.create()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.%s.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception was:\n %s' %
(
ModelClass.__name__,
ModelClass._default_manager.name,
ModelClass.__name__,
ModelClass._default_manager.name,
self.__class__.__name__,
tb
)
)
raise TypeError(msg)
# Save many-to-many relationships after the instance is created.
if many_to_many:
for field_name, value in many_to_many.items():
field = getattr(instance, field_name)
field.set(value)
return instance
def update(self, instance, validated_data):
raise_errors_on_nested_writes('update', self, validated_data)
info = model_meta.get_field_info(instance)
# Simply set each attribute on the instance, and then save it.
# Note that unlike `.create()` we don't need to treat many-to-many
# relationships as being a special case. During updates we already
# have an instance pk for the relationships to be associated with.
m2m_fields = []
for attr, value in validated_data.items():
if attr in info.relations and info.relations[attr].to_many:
m2m_fields.append((attr, value))
else:
setattr(instance, attr, value)
instance.save()
# Note that many-to-many fields are set after updating instance.
# Setting m2m fields triggers signals which could potentially change
# updated instance and we do not want it to collide with .update()
for attr, value in m2m_fields:
field = getattr(instance, attr)
field.set(value)
return instance
# Determine the fields to apply...
def get_fields(self):
"""
Return the dict of field names -> field instances that should be
used for `self.fields` when instantiating the serializer.
"""
if self.url_field_name is None:
self.url_field_name = api_settings.URL_FIELD_NAME
assert hasattr(self, 'Meta'), (
'Class {serializer_class} missing "Meta" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert hasattr(self.Meta, 'model'), (
'Class {serializer_class} missing "Meta.model" attribute'.format(
serializer_class=self.__class__.__name__
)
)
if model_meta.is_abstract_model(self.Meta.model):
raise ValueError(
'Cannot use ModelSerializer with Abstract Models.'
)
declared_fields = copy.deepcopy(self._declared_fields)
model = getattr(self.Meta, 'model')
depth = getattr(self.Meta, 'depth', 0)
if depth is not None:
assert depth >= 0, "'depth' may not be negative."
assert depth <= 10, "'depth' may not be greater than 10."
# Retrieve metadata about fields & relationships on the model class.
info = model_meta.get_field_info(model)
field_names = self.get_field_names(declared_fields, info)
# Determine any extra field arguments and hidden fields that
# should be included
extra_kwargs = self.get_extra_kwargs()
extra_kwargs, hidden_fields = self.get_uniqueness_extra_kwargs(
field_names, declared_fields, extra_kwargs
)
# Determine the fields that should be included on the serializer.
fields = OrderedDict()
for field_name in field_names:
# If the field is explicitly declared on the class then use that.
if field_name in declared_fields:
fields[field_name] = declared_fields[field_name]
continue
extra_field_kwargs = extra_kwargs.get(field_name, {})
source = extra_field_kwargs.get('source', '*')
if source == '*':
source = field_name
# Determine the serializer field class and keyword arguments.
field_class, field_kwargs = self.build_field(
source, info, model, depth
)
# Include any kwargs defined in `Meta.extra_kwargs`
field_kwargs = self.include_extra_kwargs(
field_kwargs, extra_field_kwargs
)
# Create the serializer field.
fields[field_name] = field_class(**field_kwargs)
# Add in any hidden fields.
fields.update(hidden_fields)
return fields
# Methods for determining the set of field names to include...
def get_field_names(self, declared_fields, info):
"""
Returns the list of all field names that should be created when
instantiating this serializer class. This is based on the default
set of fields, but also takes into account the `Meta.fields` or
`Meta.exclude` options if they have been specified.
"""
fields = getattr(self.Meta, 'fields', None)
exclude = getattr(self.Meta, 'exclude', None)
if fields and fields != ALL_FIELDS and not isinstance(fields, (list, tuple)):
raise TypeError(
'The `fields` option must be a list or tuple or "__all__". '
'Got %s.' % type(fields).__name__
)
if exclude and not isinstance(exclude, (list, tuple)):
raise TypeError(
'The `exclude` option must be a list or tuple. Got %s.' %
type(exclude).__name__
)
assert not (fields and exclude), (
"Cannot set both 'fields' and 'exclude' options on "
"serializer {serializer_class}.".format(
serializer_class=self.__class__.__name__
)
)
assert not (fields is None and exclude is None), (
"Creating a ModelSerializer without either the 'fields' attribute "
"or the 'exclude' attribute has been deprecated since 3.3.0, "
"and is now disallowed. Add an explicit fields = '__all__' to the "
"{serializer_class} serializer.".format(
serializer_class=self.__class__.__name__
),
)
if fields == ALL_FIELDS:
fields = None
if fields is not None:
# Ensure that all declared fields have also been included in the
# `Meta.fields` option.
# Do not require any fields that are declared in a parent class,
# in order to allow serializer subclasses to only include
# a subset of fields.
required_field_names = set(declared_fields)
for cls in self.__class__.__bases__:
required_field_names -= set(getattr(cls, '_declared_fields', []))
for field_name in required_field_names:
assert field_name in fields, (
"The field '{field_name}' was declared on serializer "
"{serializer_class}, but has not been included in the "
"'fields' option.".format(
field_name=field_name,
serializer_class=self.__class__.__name__
)
)
return fields
# Use the default set of field names if `Meta.fields` is not specified.
fields = self.get_default_field_names(declared_fields, info)
if exclude is not None:
# If `Meta.exclude` is included, then remove those fields.
for field_name in exclude:
assert field_name not in self._declared_fields, (
"Cannot both declare the field '{field_name}' and include "
"it in the {serializer_class} 'exclude' option. Remove the "
"field or, if inherited from a parent serializer, disable "
"with `{field_name} = None`."
.format(
field_name=field_name,
serializer_class=self.__class__.__name__
)
)
assert field_name in fields, (
"The field '{field_name}' was included on serializer "
"{serializer_class} in the 'exclude' option, but does "
"not match any model field.".format(
field_name=field_name,
serializer_class=self.__class__.__name__
)
)
fields.remove(field_name)
return fields
def get_default_field_names(self, declared_fields, model_info):
"""
Return the default list of field names that will be used if the
`Meta.fields` option is not specified.
"""
return (
[model_info.pk.name] +
list(declared_fields) +
list(model_info.fields) +
list(model_info.forward_relations)
)
# Methods for constructing serializer fields...
def build_field(self, field_name, info, model_class, nested_depth):
"""
Return a two tuple of (cls, kwargs) to build a serializer field with.
"""
if field_name in info.fields_and_pk:
model_field = info.fields_and_pk[field_name]
return self.build_standard_field(field_name, model_field)
elif field_name in info.relations:
relation_info = info.relations[field_name]
if not nested_depth:
return self.build_relational_field(field_name, relation_info)
else:
return self.build_nested_field(field_name, relation_info, nested_depth)
elif hasattr(model_class, field_name):
return self.build_property_field(field_name, model_class)
elif field_name == self.url_field_name:
return self.build_url_field(field_name, model_class)
return self.build_unknown_field(field_name, model_class)
def build_standard_field(self, field_name, model_field):
"""
Create regular model fields.
"""
field_mapping = ClassLookupDict(self.serializer_field_mapping)
field_class = field_mapping[model_field]
field_kwargs = get_field_kwargs(field_name, model_field)
# Special case to handle when a OneToOneField is also the primary key
if model_field.one_to_one and model_field.primary_key:
field_class = self.serializer_related_field
field_kwargs['queryset'] = model_field.related_model.objects
if 'choices' in field_kwargs:
# Fields with choices get coerced into `ChoiceField`
# instead of using their regular typed field.
field_class = self.serializer_choice_field
# Some model fields may introduce kwargs that would not be valid
# for the choice field. We need to strip these out.
# Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)
valid_kwargs = {
'read_only', 'write_only',
'required', 'default', 'initial', 'source',
'label', 'help_text', 'style',
'error_messages', 'validators', 'allow_null', 'allow_blank',
'choices'
}
for key in list(field_kwargs):
if key not in valid_kwargs:
field_kwargs.pop(key)
if not issubclass(field_class, ModelField):
# `model_field` is only valid for the fallback case of
# `ModelField`, which is used when no other typed field
# matched to the model field.
field_kwargs.pop('model_field', None)
if not issubclass(field_class, CharField) and not issubclass(field_class, ChoiceField):
# `allow_blank` is only valid for textual fields.
field_kwargs.pop('allow_blank', None)
if postgres_fields and isinstance(model_field, postgres_fields.JSONField):
# Populate the `encoder` argument of `JSONField` instances generated
# for the PostgreSQL specific `JSONField`.
field_kwargs['encoder'] = getattr(model_field, 'encoder', None)
if postgres_fields and isinstance(model_field, postgres_fields.ArrayField):
# Populate the `child` argument on `ListField` instances generated
# for the PostgreSQL specific `ArrayField`.
child_model_field = model_field.base_field
child_field_class, child_field_kwargs = self.build_standard_field(
'child', child_model_field
)
field_kwargs['child'] = child_field_class(**child_field_kwargs)
return field_class, field_kwargs
def build_relational_field(self, field_name, relation_info):
"""
Create fields for forward and reverse relationships.
"""
field_class = self.serializer_related_field
field_kwargs = get_relation_kwargs(field_name, relation_info)
to_field = field_kwargs.pop('to_field', None)
if to_field and not relation_info.reverse and not relation_info.related_model._meta.get_field(to_field).primary_key:
field_kwargs['slug_field'] = to_field
field_class = self.serializer_related_to_field
# `view_name` is only valid for hyperlinked relationships.
if not issubclass(field_class, HyperlinkedRelatedField):
field_kwargs.pop('view_name', None)
return field_class, field_kwargs
def build_nested_field(self, field_name, relation_info, nested_depth):
"""
Create nested fields for forward and reverse relationships.
"""
class NestedSerializer(ModelSerializer):
class Meta:
model = relation_info.related_model
depth = nested_depth - 1
fields = '__all__'
field_class = NestedSerializer
field_kwargs = get_nested_relation_kwargs(relation_info)
return field_class, field_kwargs
def build_property_field(self, field_name, model_class):
"""
Create a read only field for model methods and properties.
"""
field_class = ReadOnlyField
field_kwargs = {}
return field_class, field_kwargs
def build_url_field(self, field_name, model_class):
"""
Create a field representing the object's own URL.
"""
field_class = self.serializer_url_field
field_kwargs = get_url_kwargs(model_class)
return field_class, field_kwargs
def build_unknown_field(self, field_name, model_class):
"""
Raise an error on any unknown fields.
"""
raise ImproperlyConfigured(
'Field name `%s` is not valid for model `%s`.' %
(field_name, model_class.__name__)
)
def include_extra_kwargs(self, kwargs, extra_kwargs):
"""
Include any 'extra_kwargs' that have been included for this field,
possibly removing any incompatible existing keyword arguments.
"""
if extra_kwargs.get('read_only', False):
for attr in [
'required', 'default', 'allow_blank', 'allow_null',
'min_length', 'max_length', 'min_value', 'max_value',
'validators', 'queryset'
]:
kwargs.pop(attr, None)
if extra_kwargs.get('default') and kwargs.get('required') is False:
kwargs.pop('required')
if extra_kwargs.get('read_only', kwargs.get('read_only', False)):
extra_kwargs.pop('required', None) # Read only fields should always omit the 'required' argument.
kwargs.update(extra_kwargs)
return kwargs
# Methods for determining additional keyword arguments to apply...
def get_extra_kwargs(self):
"""
Return a dictionary mapping field names to a dictionary of
additional keyword arguments.
"""
extra_kwargs = copy.deepcopy(getattr(self.Meta, 'extra_kwargs', {}))
read_only_fields = getattr(self.Meta, 'read_only_fields', None)
if read_only_fields is not None:
if not isinstance(read_only_fields, (list, tuple)):
raise TypeError(
'The `read_only_fields` option must be a list or tuple. '
'Got %s.' % type(read_only_fields).__name__
)
for field_name in read_only_fields:
kwargs = extra_kwargs.get(field_name, {})
kwargs['read_only'] = True
extra_kwargs[field_name] = kwargs
else:
# Guard against the possible misspelling `readonly_fields` (used
# by the Django admin and others).
assert not hasattr(self.Meta, 'readonly_fields'), (
'Serializer `%s.%s` has field `readonly_fields`; '
'the correct spelling for the option is `read_only_fields`.' %
(self.__class__.__module__, self.__class__.__name__)
)
return extra_kwargs
def get_uniqueness_extra_kwargs(self, field_names, declared_fields, extra_kwargs):
"""
Return any additional field options that need to be included as a
result of uniqueness constraints on the model. This is returned as
a two-tuple of:
('dict of updated extra kwargs', 'mapping of hidden fields')
"""
if getattr(self.Meta, 'validators', None) is not None:
return (extra_kwargs, {})
model = getattr(self.Meta, 'model')
model_fields = self._get_model_fields(
field_names, declared_fields, extra_kwargs
)
# Determine if we need any additional `HiddenField` or extra keyword
# arguments to deal with `unique_for` dates that are required to
# be in the input data in order to validate it.
unique_constraint_names = set()
for model_field in model_fields.values():
# Include each of the `unique_for_*` field names.
unique_constraint_names |= {model_field.unique_for_date, model_field.unique_for_month,
model_field.unique_for_year}
unique_constraint_names -= {None}
# Include each of the `unique_together` field names,
# so long as all the field names are included on the serializer.
for parent_class in [model] + list(model._meta.parents):
for unique_together_list in parent_class._meta.unique_together:
if set(field_names).issuperset(set(unique_together_list)):
unique_constraint_names |= set(unique_together_list)
# Now we have all the field names that have uniqueness constraints
# applied, we can add the extra 'required=...' or 'default=...'
# arguments that are appropriate to these fields, or add a `HiddenField` for it.
hidden_fields = {}
uniqueness_extra_kwargs = {}
for unique_constraint_name in unique_constraint_names:
# Get the model field that is referred too.
unique_constraint_field = model._meta.get_field(unique_constraint_name)
if getattr(unique_constraint_field, 'auto_now_add', None):
default = CreateOnlyDefault(timezone.now)
elif getattr(unique_constraint_field, 'auto_now', None):
default = timezone.now
elif unique_constraint_field.has_default():
default = unique_constraint_field.default
else:
default = empty
if unique_constraint_name in model_fields:
# The corresponding field is present in the serializer
if default is empty:
uniqueness_extra_kwargs[unique_constraint_name] = {'required': True}
else:
uniqueness_extra_kwargs[unique_constraint_name] = {'default': default}
elif default is not empty:
# The corresponding field is not present in the
# serializer. We have a default to use for it, so
# add in a hidden field that populates it.
hidden_fields[unique_constraint_name] = HiddenField(default=default)
# Update `extra_kwargs` with any new options.
for key, value in uniqueness_extra_kwargs.items():
if key in extra_kwargs:
value.update(extra_kwargs[key])
extra_kwargs[key] = value
return extra_kwargs, hidden_fields
def _get_model_fields(self, field_names, declared_fields, extra_kwargs):
"""
Returns all the model fields that are being mapped to by fields
on the serializer class.
Returned as a dict of 'model field name' -> 'model field'.
Used internally by `get_uniqueness_field_options`.
"""
model = getattr(self.Meta, 'model')
model_fields = {}
for field_name in field_names:
if field_name in declared_fields:
# If the field is declared on the serializer
field = declared_fields[field_name]
source = field.source or field_name
else:
try:
source = extra_kwargs[field_name]['source']
except KeyError:
source = field_name
if '.' in source or source == '*':
# Model fields will always have a simple source mapping,
# they can't be nested attribute lookups.
continue
try:
field = model._meta.get_field(source)
if isinstance(field, DjangoModelField):
model_fields[source] = field
except FieldDoesNotExist:
pass
return model_fields
# Determine the validators to apply...
def get_validators(self):
"""
Determine the set of validators to use when instantiating serializer.
"""
# If the validators have been declared explicitly then use that.
validators = getattr(getattr(self, 'Meta', None), 'validators', None)
if validators is not None:
return list(validators)
# Otherwise use the default set of validators.
return (
self.get_unique_together_validators() +
self.get_unique_for_date_validators()
)
def get_unique_together_validators(self):
"""
Determine a default set of validators for any unique_together constraints.
"""
model_class_inheritance_tree = (
[self.Meta.model] +
list(self.Meta.model._meta.parents)
)
# The field names we're passing though here only include fields
# which may map onto a model field. Any dotted field name lookups
# cannot map to a field, and must be a traversal, so we're not
# including those.
field_names = {
field.source for field in self._writable_fields
if (field.source != '*') and ('.' not in field.source)
}
# Special Case: Add read_only fields with defaults.
field_names |= {
field.source for field in self.fields.values()
if (field.read_only) and (field.default != empty) and (field.source != '*') and ('.' not in field.source)
}
# Note that we make sure to check `unique_together` both on the
# base model class, but also on any parent classes.
validators = []
for parent_class in model_class_inheritance_tree:
for unique_together in parent_class._meta.unique_together:
if field_names.issuperset(set(unique_together)):
validator = UniqueTogetherValidator(
queryset=parent_class._default_manager,
fields=unique_together
)
validators.append(validator)
return validators
def get_unique_for_date_validators(self):
"""
Determine a default set of validators for the following constraints:
* unique_for_date
* unique_for_month
* unique_for_year
"""
info = model_meta.get_field_info(self.Meta.model)
default_manager = self.Meta.model._default_manager
field_names = [field.source for field in self.fields.values()]
validators = []
for field_name, field in info.fields_and_pk.items():
if field.unique_for_date and field_name in field_names:
validator = UniqueForDateValidator(
queryset=default_manager,
field=field_name,
date_field=field.unique_for_date
)
validators.append(validator)
if field.unique_for_month and field_name in field_names:
validator = UniqueForMonthValidator(
queryset=default_manager,
field=field_name,
date_field=field.unique_for_month
)
validators.append(validator)
if field.unique_for_year and field_name in field_names:
validator = UniqueForYearValidator(
queryset=default_manager,
field=field_name,
date_field=field.unique_for_year
)
validators.append(validator)
return validators
if hasattr(models, 'UUIDField'):
ModelSerializer.serializer_field_mapping[models.UUIDField] = UUIDField
# IPAddressField is deprecated in Django
if hasattr(models, 'IPAddressField'):
ModelSerializer.serializer_field_mapping[models.IPAddressField] = IPAddressField
if postgres_fields:
ModelSerializer.serializer_field_mapping[postgres_fields.HStoreField] = HStoreField
ModelSerializer.serializer_field_mapping[postgres_fields.ArrayField] = ListField
ModelSerializer.serializer_field_mapping[postgres_fields.JSONField] = JSONField
class HyperlinkedModelSerializer(ModelSerializer):
"""
A type of `ModelSerializer` that uses hyperlinked relationships instead
of primary key relationships. Specifically:
* A 'url' field is included instead of the 'id' field.
* Relationships to other instances are hyperlinks, instead of primary keys.
"""
serializer_related_field = HyperlinkedRelatedField
def get_default_field_names(self, declared_fields, model_info):
"""
Return the default list of field names that will be used if the
`Meta.fields` option is not specified.
"""
return (
[self.url_field_name] +
list(declared_fields) +
list(model_info.fields) +
list(model_info.forward_relations)
)
def build_nested_field(self, field_name, relation_info, nested_depth):
"""
Create nested fields for forward and reverse relationships.
"""
class NestedSerializer(HyperlinkedModelSerializer):
class Meta:
model = relation_info.related_model
depth = nested_depth - 1
fields = '__all__'
field_class = NestedSerializer
field_kwargs = get_nested_relation_kwargs(relation_info)
return field_class, field_kwargs
| 39.136225
| 124
| 0.625855
|
90e7b1463476525f90667af30b5ade2ffc40ad1f
| 1,904
|
py
|
Python
|
setup.py
|
mikkokotila/padma
|
f0b6012f1808e2a2e0f769ddbe5cc2e19dae017c
|
[
"MIT"
] | null | null | null |
setup.py
|
mikkokotila/padma
|
f0b6012f1808e2a2e0f769ddbe5cc2e19dae017c
|
[
"MIT"
] | 8
|
2020-07-18T08:49:21.000Z
|
2020-07-30T18:11:51.000Z
|
setup.py
|
mikkokotila/padma
|
f0b6012f1808e2a2e0f769ddbe5cc2e19dae017c
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
#
# Copyright (C) 2021 Mikko Kotila
DESCRIPTION = "Padma is a next-generation Tibetan translation and learning tool."
LONG_DESCRIPTION = """\
"""
DISTNAME = 'Padma-Backend'
MAINTAINER = 'Mikko Kotila'
MAINTAINER_EMAIL = 'mailme@mikkokotila.com'
URL = 'http://padma.io'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/Lotus-King-Trust/Padma-Backend'
VERSION = '1.5'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
install_requires = ['pandas',
'botok',
'wget',
'sqlitedict',
'fastapi',
'uvicorn==0.17.4']
if __name__ == "__main__":
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['app'],
classifiers=['Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows :: Windows 10'])
| 32.827586
| 85
| 0.560399
|
d01c9d87444393d71ea1a2bbf541ec54c1b1a1a2
| 10,946
|
py
|
Python
|
haiku/_src/recurrent.py
|
qsays/dm-haiku
|
5f4a4011d666f6bdb8266797c26cc7daa1684bb4
|
[
"Apache-2.0"
] | 7
|
2020-03-01T11:47:50.000Z
|
2020-07-30T08:35:23.000Z
|
haiku/_src/recurrent.py
|
marload/dm-haiku
|
8190b42cc6d9e7fae21c7d738bd3d719c144ab8a
|
[
"Apache-2.0"
] | null | null | null |
haiku/_src/recurrent.py
|
marload/dm-haiku
|
8190b42cc6d9e7fae21c7d738bd3d719c144ab8a
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Haiku recurrent core."""
import abc
from haiku._src import base
from haiku._src import basic
from haiku._src import initializers
from haiku._src import module
import jax
import jax.nn
import jax.numpy as jnp
def add_batch(nest, batch_size):
broadcast = lambda x: jnp.broadcast_to(x, (batch_size,) + x.shape)
return jax.tree_map(broadcast, nest)
def static_unroll(core, inputs, state):
"""Unroll core over inputs, starting from state."""
outs = []
num_steps = jax.tree_leaves(inputs)[0].shape[0]
for t in range(num_steps):
next_input = jax.tree_map(lambda x, t=t: x[t], inputs)
out, state = core(next_input, state)
outs.append(out)
return jnp.stack(outs), state
def dynamic_unroll(core, inputs, state):
"""Unroll core over inputs, starting from state."""
# Swap the input and output of core.
def scan_f(prev_state, next_input):
out, next_state = core(next_input, prev_state)
return next_state, out
state, outs = jax.lax.scan(scan_f, state, inputs)
return outs, state
class RNNCore(module.Module, metaclass=abc.ABCMeta):
"""Base class for RNN cores.
Cores can be dynamically unrolled with jax.lax.scan().
"""
@abc.abstractmethod
def __call__(self, inputs, state):
"""Run one step of the RNN.
Args:
inputs: Arbitrary nest of inputs.
state: Previous core state.
Returns:
Tuple of (output, next_state).
"""
@abc.abstractmethod
def initial_state(self, batch_size):
"""Construct an initial state for the core.
Args:
batch_size: Specifies the batch size of the initial state. Cores may
experimentally support returning an initial state without a batch
dimension if batch_size is None.
"""
class VanillaRNN(RNNCore):
"""Vanilla RNN."""
def __init__(self, hidden_size, name=None):
super(VanillaRNN, self).__init__(name=name)
self.hidden_size = hidden_size
def __call__(self, inputs, state):
in2h = basic.Linear(self.hidden_size)(inputs)
h2h = basic.Linear(self.hidden_size)(state)
output = jax.nn.relu(in2h + h2h)
new_h = output
return output, new_h
def initial_state(self, batch_size):
state = jnp.zeros([self.hidden_size])
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class LSTM(RNNCore):
"""LSTM."""
def __init__(self, hidden_size, name=None):
super(LSTM, self).__init__(name=name)
self.hidden_size = hidden_size
def __call__(self, inputs, state):
if len(inputs.shape) > 2 or not inputs.shape:
raise ValueError("LSTM input must be rank-1 or rank-2.")
prev_h, prev_c = state
x_and_h = jnp.concatenate([inputs, prev_h], axis=-1)
gated = basic.Linear(4 * self.hidden_size)(x_and_h)
# i = input, g = cell_gate, f = forget_gate, o = output_gate
i, g, f, o = jnp.split(gated, indices_or_sections=4, axis=-1)
f = jax.nn.sigmoid(f + 1) # Forget bias, as in sonnet.
c = f * prev_c + jax.nn.sigmoid(i) * jnp.tanh(g)
h = jax.nn.sigmoid(o) * jnp.tanh(c)
return h, (h, c)
def initial_state(self, batch_size):
state = (jnp.zeros([self.hidden_size]), jnp.zeros([self.hidden_size]))
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class GRU(RNNCore):
r"""Gated Recurrent Unit.
The implementation is based on: https://arxiv.org/pdf/1412.3555v1.pdf with
biases.
Given :math:`x_t` and the previous state :math:`h_{t-1}` the core computes
.. math::
\begin{array}{ll}
z_t &= \sigma(W_{iz} x_t + W_{hz} h_{t-1} + b_z) \\
r_t &= \sigma(W_{ir} x_t + W_{hr} h_{t-1} + b_r) \\
a_t &= \tanh(W_{ia} x_t + W_{ha} (r_t \bigodot h_{t-1}) + b_a) \\
h_t &= (1 - z_t) \bigodot h_{t-1} + z_t \bigodot a_t
\end{array}
where :math:`z_t` and :math:`r_t` are reset and update gates.
Warning: Backwards compatibility of GRU weights is currently unsupported.
TODO(tycai): Make policy decision/benchmark performance for GRU variants.
"""
def __init__(self,
hidden_size,
w_i_init: base.Initializer = None,
w_h_init: base.Initializer = None,
b_init: base.Initializer = None,
name=None):
super(GRU, self).__init__(name=name)
self.hidden_size = hidden_size
self._w_i_init = w_i_init or initializers.VarianceScaling()
self._w_h_init = w_h_init or initializers.VarianceScaling()
self._b_init = b_init or jnp.zeros
def __call__(self, inputs, state):
if len(inputs.shape) > 2 or not inputs.shape:
raise ValueError("GRU input must be rank-1 or rank-2.")
input_size = inputs.shape[-1]
hidden_size = self.hidden_size
w_i = base.get_parameter(
name="w_i", shape=[input_size, 3 * hidden_size], init=self._w_i_init)
w_h = base.get_parameter(
name="w_h", shape=[hidden_size, 3 * hidden_size], init=self._w_h_init)
b = base.get_parameter(
name="b",
shape=[3 * hidden_size],
dtype=inputs.dtype,
init=self._b_init)
w_h_z, w_h_a = jnp.split(w_h, indices_or_sections=[2 * hidden_size], axis=1)
b_z, b_a = jnp.split(b, indices_or_sections=[2 * hidden_size], axis=0)
gates_x = jnp.matmul(inputs, w_i)
zr_x, a_x = jnp.split(
gates_x, indices_or_sections=[2 * hidden_size], axis=-1)
zr_h = jnp.matmul(state, w_h_z)
zr = zr_x + zr_h + b_z
z, r = jnp.split(jax.nn.sigmoid(zr), indices_or_sections=2, axis=-1)
a_h = jnp.matmul(r * state, w_h_a)
a = jnp.tanh(a_x + a_h + b_a)
next_state = (1 - z) * state + z * a
return next_state, next_state
def initial_state(self, batch_size):
state = jnp.zeros([self.hidden_size])
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class ResetCore(RNNCore):
"""A wrapper for managing state resets during unrolls.
When unrolling an `RNNCore` on a batch of inputs sequences it may be necessary
to reset the core's state at different timesteps for different elements of the
batch. The `ResetCore` class enables this by taking a batch of `should_reset`
booleans in addition to the batch of inputs, and conditionally resetting the
core's state for individual elements of the batch.
"""
def __init__(self, core, name=None):
super(ResetCore, self).__init__(name=name)
self._core = core
def __call__(self, inputs, state):
inputs, should_reset = inputs
# Protect against an easy, invisible error class. This should be jitted out.
# >>> np.where(np.asarray([False, True]), np.zeros([2,2]), np.ones([2,2]))
# ... array([[1., 0.], [1., 0.]])
# Using a should_reset of rank R - 1 could result in one example
# affecting another.
for x in jax.tree_leaves(state):
if len(x.shape) - 1 != len(should_reset.shape):
raise ValueError("should_reset must have rank-1 of state.")
should_reset = jnp.expand_dims(should_reset, axis=-1)
batch_size = jax.tree_leaves(inputs)[0].shape[0]
initial_state = jax.tree_map(lambda v: v.astype(inputs.dtype),
self.initial_state(batch_size))
state = jax.tree_multimap(lambda i, s: jnp.where(should_reset, i, s),
initial_state, state)
return self._core(inputs, state)
def initial_state(self, batch_size):
return self._core.initial_state(batch_size)
class _DeepRNN(RNNCore):
"""Underlying implementation of DeepRNN with skip connections."""
def __init__(self, layers, skip_connections, name=None):
super(_DeepRNN, self).__init__(name=name)
self._layers = layers
self._skip_connections = skip_connections
if skip_connections:
for layer in layers:
if not isinstance(layer, RNNCore):
raise ValueError("skip_connections requires for all layers to be "
"`hk.RNNCore`s. Layers is: {}".format(layers))
def __call__(self, inputs, state):
current_inputs = inputs
next_states = []
outputs = []
state_idx = 0
concat = lambda *args: jnp.concatenate(args, axis=-1)
for idx, layer in enumerate(self._layers):
if self._skip_connections and idx > 0:
current_inputs = jax.tree_multimap(concat, inputs, current_inputs)
if isinstance(layer, RNNCore):
current_inputs, next_state = layer(current_inputs, state[state_idx])
outputs.append(current_inputs)
next_states.append(next_state)
state_idx += 1
else:
current_inputs = layer(current_inputs)
if self._skip_connections:
output = jax.tree_multimap(concat, *outputs)
else:
output = current_inputs
return output, tuple(next_states)
def initial_state(self, batch_size):
return tuple(
layer.initial_state(batch_size)
for layer in self._layers
if isinstance(layer, RNNCore))
class DeepRNN(_DeepRNN):
"""Wraps a sequence of cores and callables as a single core.
>>> deep_rnn = hk.DeepRNN([
... hk.LSTM(hidden_size=4),
... jax.nn.relu,
... hk.LSTM(hidden_size=2),
... ])
The state of a `DeepRNN` is a tuple with one element per `RNNCore`.
If no layers are `RNNCore`s, the state is an empty tuple.
"""
def __init__(self, layers, name=None):
super(DeepRNN, self).__init__(layers, skip_connections=False, name=name)
def deep_rnn_with_skip_connections(layers, name=None):
"""Constructs a DeepRNN with skip connections.
Skip connections alter the dependency structure within a `DeepRNN`.
Specifically, input to the i-th layer (i > 0) is given by a
concatenation of the core's inputs and the outputs of the (i-1)-th layer.
The output of the `DeepRNN` is the concatenation of the outputs of all cores.
.. code-block:: python
outputs0, ... = layers[0](inputs, ...)
outputs1, ... = layers[1](tf.concat([inputs, outputs0], axis=-1], ...)
outputs2, ... = layers[2](tf.concat([inputs, outputs1], axis=-1], ...)
...
Args:
layers: List of `RNNCore`s.
name: Name of the module.
Returns:
A `_DeepRNN` with skip connections.
Raises:
ValueError: If any of the layers is not an `RNNCore`.
"""
return _DeepRNN(layers, skip_connections=True, name=name)
| 33.069486
| 80
| 0.664809
|
c457e5ec692efae002828a400c3493fbb5798017
| 16,669
|
py
|
Python
|
examples/scanvi/scanvi.py
|
sjfleming/pyro
|
c8dc40a75cc4ff1f43c6ff9178d91c08155d7973
|
[
"Apache-2.0"
] | 2,827
|
2019-02-04T23:09:52.000Z
|
2022-03-31T10:50:29.000Z
|
examples/scanvi/scanvi.py
|
liyunlong10/pyro
|
eadca9c9ed9654573037acdf4f48b34ea40037fe
|
[
"Apache-2.0"
] | 965
|
2019-02-05T09:33:27.000Z
|
2022-03-25T13:09:52.000Z
|
examples/scanvi/scanvi.py
|
liyunlong10/pyro
|
eadca9c9ed9654573037acdf4f48b34ea40037fe
|
[
"Apache-2.0"
] | 488
|
2019-02-08T21:42:49.000Z
|
2022-03-31T12:33:21.000Z
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
We use a semi-supervised deep generative model of transcriptomics data to propagate labels
from a small set of labeled cells to a larger set of unlabeled cells. In particular we
use a dataset of peripheral blood mononuclear cells (PBMC) from 10x Genomics and
(approximately) reproduce Figure 6 in reference [1].
Note that for simplicity we do not reproduce every aspect of the scANVI pipeline. For
example, we do not use dropout in our neural network encoders/decoders, nor do we include
batch/dataset annotations in our model.
References:
[1] "Harmonization and Annotation of Single-cell Transcriptomics data with Deep Generative Models,"
Chenling Xu, Romain Lopez, Edouard Mehlman, Jeffrey Regier, Michael I. Jordan, Nir Yosef.
[2] https://github.com/YosefLab/scvi-tutorials/blob/50dd3269abfe0c375ec47114f2c20725a016736f/seed_labeling.ipynb
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from data import get_data
from matplotlib.patches import Patch
from torch.distributions import constraints
from torch.nn.functional import softmax, softplus
from torch.optim import Adam
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.distributions.util import broadcast_shape
from pyro.infer import SVI, TraceEnum_ELBO, config_enumerate
from pyro.optim import MultiStepLR
# Helper for making fully-connected neural networks
def make_fc(dims):
layers = []
for in_dim, out_dim in zip(dims, dims[1:]):
layers.append(nn.Linear(in_dim, out_dim))
layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.ReLU())
return nn.Sequential(*layers[:-1]) # Exclude final ReLU non-linearity
# Splits a tensor in half along the final dimension
def split_in_half(t):
return t.reshape(t.shape[:-1] + (2, -1)).unbind(-2)
# Helper for broadcasting inputs to neural net
def broadcast_inputs(input_args):
shape = broadcast_shape(*[s.shape[:-1] for s in input_args]) + (-1,)
input_args = [s.expand(shape) for s in input_args]
return input_args
# Used in parameterizing p(z2 | z1, y)
class Z2Decoder(nn.Module):
def __init__(self, z1_dim, y_dim, z2_dim, hidden_dims):
super().__init__()
dims = [z1_dim + y_dim] + hidden_dims + [2 * z2_dim]
self.fc = make_fc(dims)
def forward(self, z1, y):
z1_y = torch.cat([z1, y], dim=-1)
# We reshape the input to be two-dimensional so that nn.BatchNorm1d behaves correctly
_z1_y = z1_y.reshape(-1, z1_y.size(-1))
hidden = self.fc(_z1_y)
# If the input was three-dimensional we now restore the original shape
hidden = hidden.reshape(z1_y.shape[:-1] + hidden.shape[-1:])
loc, scale = split_in_half(hidden)
# Here and elsewhere softplus ensures that scale is positive. Note that we generally
# expect softplus to be more numerically stable than exp.
scale = softplus(scale)
return loc, scale
# Used in parameterizing p(x | z2)
class XDecoder(nn.Module):
def __init__(self, num_genes, z2_dim, hidden_dims):
super().__init__()
dims = [z2_dim] + hidden_dims + [2 * num_genes]
self.fc = make_fc(dims)
def forward(self, z2):
gate_logits, mu = split_in_half(self.fc(z2))
mu = softmax(mu, dim=-1)
return gate_logits, mu
# Used in parameterizing q(z2 | x) and q(l | x)
class Z2LEncoder(nn.Module):
def __init__(self, num_genes, z2_dim, hidden_dims):
super().__init__()
dims = [num_genes] + hidden_dims + [2 * z2_dim + 2]
self.fc = make_fc(dims)
def forward(self, x):
# Transform the counts x to log space for increased numerical stability.
# Note that we only use this transform here; in particular the observation
# distribution in the model is a proper count distribution.
x = torch.log(1 + x)
h1, h2 = split_in_half(self.fc(x))
z2_loc, z2_scale = h1[..., :-1], softplus(h2[..., :-1])
l_loc, l_scale = h1[..., -1:], softplus(h2[..., -1:])
return z2_loc, z2_scale, l_loc, l_scale
# Used in parameterizing q(z1 | z2, y)
class Z1Encoder(nn.Module):
def __init__(self, num_labels, z1_dim, z2_dim, hidden_dims):
super().__init__()
dims = [num_labels + z2_dim] + hidden_dims + [2 * z1_dim]
self.fc = make_fc(dims)
def forward(self, z2, y):
# This broadcasting is necessary since Pyro expands y during enumeration (but not z2)
z2_y = broadcast_inputs([z2, y])
z2_y = torch.cat(z2_y, dim=-1)
# We reshape the input to be two-dimensional so that nn.BatchNorm1d behaves correctly
_z2_y = z2_y.reshape(-1, z2_y.size(-1))
hidden = self.fc(_z2_y)
# If the input was three-dimensional we now restore the original shape
hidden = hidden.reshape(z2_y.shape[:-1] + hidden.shape[-1:])
loc, scale = split_in_half(hidden)
scale = softplus(scale)
return loc, scale
# Used in parameterizing q(y | z2)
class Classifier(nn.Module):
def __init__(self, z2_dim, hidden_dims, num_labels):
super().__init__()
dims = [z2_dim] + hidden_dims + [num_labels]
self.fc = make_fc(dims)
def forward(self, x):
logits = self.fc(x)
return logits
# Encompasses the scANVI model and guide as a PyTorch nn.Module
class SCANVI(nn.Module):
def __init__(
self,
num_genes,
num_labels,
l_loc,
l_scale,
latent_dim=10,
alpha=0.01,
scale_factor=1.0,
):
assert isinstance(num_genes, int)
self.num_genes = num_genes
assert isinstance(num_labels, int) and num_labels > 1
self.num_labels = num_labels
# This is the dimension of both z1 and z2
assert isinstance(latent_dim, int) and latent_dim > 0
self.latent_dim = latent_dim
# The next two hyperparameters determine the prior over the log_count latent variable `l`
assert isinstance(l_loc, float)
self.l_loc = l_loc
assert isinstance(l_scale, float) and l_scale > 0
self.l_scale = l_scale
# This hyperparameter controls the strength of the auxiliary classification loss
assert isinstance(alpha, float) and alpha > 0
self.alpha = alpha
assert isinstance(scale_factor, float) and scale_factor > 0
self.scale_factor = scale_factor
super().__init__()
# Setup the various neural networks used in the model and guide
self.z2_decoder = Z2Decoder(
z1_dim=self.latent_dim,
y_dim=self.num_labels,
z2_dim=self.latent_dim,
hidden_dims=[50],
)
self.x_decoder = XDecoder(
num_genes=num_genes, hidden_dims=[100], z2_dim=self.latent_dim
)
self.z2l_encoder = Z2LEncoder(
num_genes=num_genes, z2_dim=self.latent_dim, hidden_dims=[100]
)
self.classifier = Classifier(
z2_dim=self.latent_dim, hidden_dims=[50], num_labels=num_labels
)
self.z1_encoder = Z1Encoder(
num_labels=num_labels,
z1_dim=self.latent_dim,
z2_dim=self.latent_dim,
hidden_dims=[50],
)
self.epsilon = 5.0e-3
def model(self, x, y=None):
# Register various nn.Modules with Pyro
pyro.module("scanvi", self)
# This gene-level parameter modulates the variance of the observation distribution
theta = pyro.param(
"inverse_dispersion",
10.0 * x.new_ones(self.num_genes),
constraint=constraints.positive,
)
# We scale all sample statements by scale_factor so that the ELBO is normalized
# wrt the number of datapoints and genes
with pyro.plate("batch", len(x)), poutine.scale(scale=self.scale_factor):
z1 = pyro.sample(
"z1", dist.Normal(0, x.new_ones(self.latent_dim)).to_event(1)
)
# Note that if y is None (i.e. y is unobserved) then y will be sampled;
# otherwise y will be treated as observed.
y = pyro.sample(
"y", dist.OneHotCategorical(logits=x.new_zeros(self.num_labels)), obs=y
)
z2_loc, z2_scale = self.z2_decoder(z1, y)
z2 = pyro.sample("z2", dist.Normal(z2_loc, z2_scale).to_event(1))
l_scale = self.l_scale * x.new_ones(1)
l = pyro.sample("l", dist.LogNormal(self.l_loc, l_scale).to_event(1))
# Note that by construction mu is normalized (i.e. mu.sum(-1) == 1) and the
# total scale of counts for each cell is determined by `l`
gate_logits, mu = self.x_decoder(z2)
# TODO revisit this parameterization if torch.distributions.NegativeBinomial changes
# from failure to success parametrization;
# see https://github.com/pytorch/pytorch/issues/42449
nb_logits = (l * mu + self.epsilon).log() - (theta + self.epsilon).log()
x_dist = dist.ZeroInflatedNegativeBinomial(
gate_logits=gate_logits, total_count=theta, logits=nb_logits
)
# Observe the datapoint x using the observation distribution x_dist
pyro.sample("x", x_dist.to_event(1), obs=x)
# The guide specifies the variational distribution
def guide(self, x, y=None):
pyro.module("scanvi", self)
with pyro.plate("batch", len(x)), poutine.scale(scale=self.scale_factor):
z2_loc, z2_scale, l_loc, l_scale = self.z2l_encoder(x)
pyro.sample("l", dist.LogNormal(l_loc, l_scale).to_event(1))
z2 = pyro.sample("z2", dist.Normal(z2_loc, z2_scale).to_event(1))
y_logits = self.classifier(z2)
y_dist = dist.OneHotCategorical(logits=y_logits)
if y is None:
# x is unlabeled so sample y using q(y|z2)
y = pyro.sample("y", y_dist)
else:
# x is labeled so add a classification loss term
# (this way q(y|z2) learns from both labeled and unlabeled data)
classification_loss = y_dist.log_prob(y)
# Note that the negative sign appears because we're adding this term in the guide
# and the guide log_prob appears in the ELBO as -log q
pyro.factor("classification_loss", -self.alpha * classification_loss)
z1_loc, z1_scale = self.z1_encoder(z2, y)
pyro.sample("z1", dist.Normal(z1_loc, z1_scale).to_event(1))
def main(args):
# Fix random number seed
pyro.util.set_rng_seed(args.seed)
# Enable optional validation warnings
# Load and pre-process data
dataloader, num_genes, l_mean, l_scale, anndata = get_data(
dataset=args.dataset, batch_size=args.batch_size, cuda=args.cuda
)
# Instantiate instance of model/guide and various neural networks
scanvi = SCANVI(
num_genes=num_genes,
num_labels=4,
l_loc=l_mean,
l_scale=l_scale,
scale_factor=1.0 / (args.batch_size * num_genes),
)
if args.cuda:
scanvi.cuda()
# Setup an optimizer (Adam) and learning rate scheduler.
# By default we start with a moderately high learning rate (0.005)
# and reduce by a factor of 5 after 20 epochs.
scheduler = MultiStepLR(
{
"optimizer": Adam,
"optim_args": {"lr": args.learning_rate},
"milestones": [20],
"gamma": 0.2,
}
)
# Tell Pyro to enumerate out y when y is unobserved
guide = config_enumerate(scanvi.guide, "parallel", expand=True)
# Setup a variational objective for gradient-based learning.
# Note we use TraceEnum_ELBO in order to leverage Pyro's machinery
# for automatic enumeration of the discrete latent variable y.
elbo = TraceEnum_ELBO(strict_enumeration_warning=False)
svi = SVI(scanvi.model, guide, scheduler, elbo)
# Training loop
for epoch in range(args.num_epochs):
losses = []
for x, y in dataloader:
if y is not None:
y = y.type_as(x)
loss = svi.step(x, y)
losses.append(loss)
# Tell the scheduler we've done one epoch.
scheduler.step()
print("[Epoch %04d] Loss: %.5f" % (epoch, np.mean(losses)))
# Put neural networks in eval mode (needed for batchnorm)
scanvi.eval()
# Now that we're done training we'll inspect the latent representations we've learned
if args.plot and args.dataset == "pbmc":
import scanpy as sc
# Compute latent representation (z2_loc) for each cell in the dataset
latent_rep = scanvi.z2l_encoder(dataloader.data_x)[0]
# Compute inferred cell type probabilities for each cell
y_logits = scanvi.classifier(latent_rep)
y_probs = softmax(y_logits, dim=-1).data.cpu().numpy()
# Use scanpy to compute 2-dimensional UMAP coordinates using our
# learned 10-dimensional latent representation z2
anndata.obsm["X_scANVI"] = latent_rep.data.cpu().numpy()
sc.pp.neighbors(anndata, use_rep="X_scANVI")
sc.tl.umap(anndata)
umap1, umap2 = anndata.obsm["X_umap"][:, 0], anndata.obsm["X_umap"][:, 1]
# Construct plots; all plots are scatterplots depicting the two-dimensional UMAP embedding
# and only differ in how points are colored
# The topmost plot depicts the 200 hand-curated seed labels in our dataset
fig, axes = plt.subplots(3, 2)
seed_marker_sizes = anndata.obs["seed_marker_sizes"]
axes[0, 0].scatter(
umap1,
umap2,
s=seed_marker_sizes,
c=anndata.obs["seed_colors"],
marker=".",
alpha=0.7,
)
axes[0, 0].set_title("Hand-Curated Seed Labels")
patch1 = Patch(color="lightcoral", label="CD8-Naive")
patch2 = Patch(color="limegreen", label="CD4-Naive")
patch3 = Patch(color="deepskyblue", label="CD4-Memory")
patch4 = Patch(color="mediumorchid", label="CD4-Regulatory")
axes[0, 1].legend(loc="center left", handles=[patch1, patch2, patch3, patch4])
axes[0, 1].get_xaxis().set_visible(False)
axes[0, 1].get_yaxis().set_visible(False)
axes[0, 1].set_frame_on(False)
# The remaining plots depict the inferred cell type probability for each of the four cell types
s10 = axes[1, 0].scatter(
umap1, umap2, s=1, c=y_probs[:, 0], marker=".", alpha=0.7
)
axes[1, 0].set_title("Inferred CD8-Naive probability")
fig.colorbar(s10, ax=axes[1, 0])
s11 = axes[1, 1].scatter(
umap1, umap2, s=1, c=y_probs[:, 1], marker=".", alpha=0.7
)
axes[1, 1].set_title("Inferred CD4-Naive probability")
fig.colorbar(s11, ax=axes[1, 1])
s20 = axes[2, 0].scatter(
umap1, umap2, s=1, c=y_probs[:, 2], marker=".", alpha=0.7
)
axes[2, 0].set_title("Inferred CD4-Memory probability")
fig.colorbar(s20, ax=axes[2, 0])
s21 = axes[2, 1].scatter(
umap1, umap2, s=1, c=y_probs[:, 3], marker=".", alpha=0.7
)
axes[2, 1].set_title("Inferred CD4-Regulatory probability")
fig.colorbar(s21, ax=axes[2, 1])
fig.tight_layout()
plt.savefig("scanvi.pdf")
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
# Parse command line arguments
parser = argparse.ArgumentParser(
description="single-cell ANnotation using Variational Inference"
)
parser.add_argument("-s", "--seed", default=0, type=int, help="rng seed")
parser.add_argument(
"-n", "--num-epochs", default=60, type=int, help="number of training epochs"
)
parser.add_argument(
"-d",
"--dataset",
default="pbmc",
type=str,
help="which dataset to use",
choices=["pbmc", "mock"],
)
parser.add_argument(
"-bs", "--batch-size", default=100, type=int, help="mini-batch size"
)
parser.add_argument(
"-lr", "--learning-rate", default=0.005, type=float, help="learning rate"
)
parser.add_argument(
"--cuda", action="store_true", default=False, help="whether to use cuda"
)
parser.add_argument(
"--plot", action="store_true", default=False, help="whether to make a plot"
)
args = parser.parse_args()
main(args)
| 38.057078
| 112
| 0.632311
|
c9d4fced83f0f589477d21b756615a0e169894b7
| 3,390
|
py
|
Python
|
application.py
|
nasiratt92/techdegree-project-2
|
ef22fa1b22748abbebbcefb8502f4c9889d7185c
|
[
"MIT"
] | 1
|
2020-07-05T00:45:29.000Z
|
2020-07-05T00:45:29.000Z
|
application.py
|
nasiratt92/techdegree-project-2
|
ef22fa1b22748abbebbcefb8502f4c9889d7185c
|
[
"MIT"
] | null | null | null |
application.py
|
nasiratt92/techdegree-project-2
|
ef22fa1b22748abbebbcefb8502f4c9889d7185c
|
[
"MIT"
] | null | null | null |
import constants
from copy import deepcopy
PLAYERS = deepcopy(constants.PLAYERS)
TEAMS = deepcopy(constants.TEAMS)
allocated_teams = []
allocated_players = []
players_string = []
user_input = 0
def clean_data(collection):
'''This is function that will take a collection and
it will clean up the heights key with a value containing number and
store them as integer
And if the experience value is Yes/NO it will save the value as true/false
'''
for dictionary in collection:
for key, value in dictionary.items():
if key == 'height':
# pdb.set_trace()
dictionary[key] = int(value[0:2])
elif key == 'experience':
if value == 'YES':
dictionary[key] = True
else:
dictionary[key] = False
def balance_teams(teams, players):
num_players_team = len(players) / len(teams)
print('i am here')
print(num_players_team)
copy_of_players = deepcopy(players)
for team in teams:
count = 0
# create a dictionary with team name and append 6 players data into a team
allocated_teams.append('{}'.format(team))
list = []
while count < num_players_team:
list.append(copy_of_players.pop())
count += 1
allocated_players.append(list)
def display_team_print(option, teams, players):
option_index = option - 1
team_name = str(teams[option_index])
print(
'''
Team: {} Stats
--------------------
Total players: {}
Players on Team:
'''.format(team_name, len(players[option_index]) )
)
for player in players[option_index]:
players_string.append((player['name']))
print(', '.join(players_string))
def get_int_from_user(max_int):
#I am adding 1 to the max_int so I can get the end index for the the range() function
max_int += 1
# I am using list comprehensions here to generate the string ! Yes!
strings_list = [str(integer) for integer in range(1, max_int)]
options_string = ", ".join(strings_list)
valid_input = False
while not valid_input:
user_input = input("Enter an option > ")
try:
user_input = int(user_input)
except ValueError:
print("ValueError: please enter one of the following: {}".format(options_string))
else:
if user_input > 0 and user_input <= max_int:
# use range function to list options
valid_input = True
else:
print("Sorry, only numbers {} are the availible options".format(options_string))
return user_input
if __name__ == "__main__":
clean_data(PLAYERS)
balance_teams(TEAMS, PLAYERS)
print('BASKETBALL TEAM STATS TOOL')
print('\n---- MENU----')
print("""
Here are your choices:
1) Display Team Stats
2) Quit
""")
user_input = get_int_from_user(2)
if user_input == 1:
list_number = 1
for team in allocated_teams:
print('{}) {}'.format(list_number, team))
list_number += 1
display_team = get_int_from_user(len(allocated_teams))
display_team_print(display_team, allocated_teams, allocated_players)
print('Press ENTER to continue...')
elif user_input == 2:
exit()
| 28.25
| 96
| 0.606195
|
9bf0ef86422ee6571c3b72a05145a0485868ec27
| 465
|
py
|
Python
|
jogo_da_velha/venv/Scripts/easy_install-3.8-script.py
|
MateusProvensi/jogo-velha
|
0809064a1d0c41f764ac960fd06e1ad2429c861a
|
[
"MIT"
] | null | null | null |
jogo_da_velha/venv/Scripts/easy_install-3.8-script.py
|
MateusProvensi/jogo-velha
|
0809064a1d0c41f764ac960fd06e1ad2429c861a
|
[
"MIT"
] | null | null | null |
jogo_da_velha/venv/Scripts/easy_install-3.8-script.py
|
MateusProvensi/jogo-velha
|
0809064a1d0c41f764ac960fd06e1ad2429c861a
|
[
"MIT"
] | null | null | null |
#!C:\Users\Mateus\PycharmProjects\jogo_da_velha\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| 35.769231
| 87
| 0.698925
|
894173c61414131c3424874962b47f8212dbda79
| 9,701
|
py
|
Python
|
venv/lib/python3.6/site-packages/Cryptodome/SelfTest/Protocol/test_SecretSharing.py
|
S-JingTao/ROS_Air_ground_simulation_model
|
393aa2c881dd6d0fe5efdb94409800c2d161832a
|
[
"MIT"
] | 4
|
2021-07-12T16:37:36.000Z
|
2021-08-06T09:42:37.000Z
|
venv/lib/python3.6/site-packages/Cryptodome/SelfTest/Protocol/test_SecretSharing.py
|
S-JingTao/ROS_Air_ground_simulation_model
|
393aa2c881dd6d0fe5efdb94409800c2d161832a
|
[
"MIT"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/Cryptodome/SelfTest/Protocol/test_SecretSharing.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 2
|
2021-03-16T12:41:29.000Z
|
2021-03-16T14:50:08.000Z
|
#
# SelfTest/Protocol/test_secret_sharing.py: Self-test for secret sharing protocols
#
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
from unittest import main, TestCase, TestSuite
from binascii import unhexlify, hexlify
from Cryptodome.Util.py3compat import *
from Cryptodome.SelfTest.st_common import list_test_cases
from Cryptodome.Protocol.SecretSharing import Shamir, _Element, \
_mult_gf2, _div_gf2
class GF2_Tests(TestCase):
def test_mult_gf2(self):
# Prove mult by zero
x = _mult_gf2(0,0)
self.assertEqual(x, 0)
# Prove mult by unity
x = _mult_gf2(34, 1)
self.assertEqual(x, 34)
z = 3 # (x+1)
y = _mult_gf2(z, z)
self.assertEqual(y, 5) # (x+1)^2 = x^2 + 1
y = _mult_gf2(y, z)
self.assertEqual(y, 15) # (x+1)^3 = x^3 + x^2 + x + 1
y = _mult_gf2(y, z)
self.assertEqual(y, 17) # (x+1)^4 = x^4 + 1
# Prove linearity works
comps = [1, 4, 128, 2**34]
sum_comps = 1+4+128+2**34
y = 908
z = _mult_gf2(sum_comps, y)
w = 0
for x in comps:
w ^= _mult_gf2(x, y)
self.assertEqual(w, z)
def test_div_gf2(self):
from Cryptodome.Util.number import size as deg
x, y = _div_gf2(567, 7)
self.failUnless(deg(y) < deg(7))
w = _mult_gf2(x, 7) ^ y
self.assertEqual(567, w)
x, y = _div_gf2(7, 567)
self.assertEqual(x, 0)
self.assertEqual(y, 7)
class Element_Tests(TestCase):
def test1(self):
# Test encondings
e = _Element(256)
self.assertEqual(int(e), 256)
self.assertEqual(e.encode(), bchr(0)*14 + b("\x01\x00"))
e = _Element(bchr(0)*14 + b("\x01\x10"))
self.assertEqual(int(e), 0x110)
self.assertEqual(e.encode(), bchr(0)*14 + b("\x01\x10"))
# Only 16 byte string are a valid encoding
self.assertRaises(ValueError, _Element, bchr(0))
def test2(self):
# Test addition
e = _Element(0x10)
f = _Element(0x0A)
self.assertEqual(int(e+f), 0x1A)
def test3(self):
# Test multiplication
zero = _Element(0)
one = _Element(1)
two = _Element(2)
x = _Element(6) * zero
self.assertEqual(int(x), 0)
x = _Element(6) * one
self.assertEqual(int(x), 6)
x = _Element(2**127) * two
self.assertEqual(int(x), 1 + 2 + 4 + 128)
def test4(self):
# Test inversion
one = _Element(1)
x = one.inverse()
self.assertEqual(int(x), 1)
x = _Element(82323923)
y = x.inverse()
self.assertEqual(int(x * y), 1)
class Shamir_Tests(TestCase):
def test1(self):
# Test splitting
shares = Shamir.split(2, 3, bchr(90)*16)
self.assertEqual(len(shares), 3)
for index in range(3):
self.assertEqual(shares[index][0], index+1)
self.assertEqual(len(shares[index][1]), 16)
def test2(self):
# Test recombine
from itertools import permutations
test_vectors = (
(2, "d9fe73909bae28b3757854c0af7ad405",
"1-594ae8964294174d95c33756d2504170",
"2-d897459d29da574eb40e93ec552ffe6e",
"3-5823de9bf0e068b054b5f07a28056b1b",
"4-db2c1f8bff46d748f795da995bd080cb"),
(2, "bf4f902d9a7efafd1f3ffd9291fd5de9",
"1-557bd3b0748064b533469722d1cc7935",
"2-6b2717164783c66d47cd28f2119f14d0",
"3-8113548ba97d58256bb4424251ae300c",
"4-179e9e5a218483ddaeda57539139cf04"),
(3, "ec96aa5c14c9faa699354cf1da74e904",
"1-64579fbf1908d66f7239bf6e2b4e41e1",
"2-6cd9428df8017b52322561e8c672ae3e",
"3-e418776ef5c0579bd9299277374806dd",
"4-ab3f77a0107398d23b323e581bb43f5d",
"5-23fe42431db2b41bd03ecdc7ea8e97ac"),
(3, "44cf249b68b80fcdc27b47be60c2c145",
"1-d6515a3905cd755119b86e311c801e31",
"2-16693d9ac9f10c254036ced5f8917fa3",
"3-84f74338a48476b99bf5e75a84d3a0d1",
"4-3fe8878dc4a5d35811cf3cbcd33dbe52",
"5-ad76f92fa9d0a9c4ca0c1533af7f6132"),
(5, "5398717c982db935d968eebe53a47f5a",
"1-be7be2dd4c068e7ef576aaa1b1c11b01",
"2-f821f5848441cb98b3eb467e2733ee21",
"3-25ee52f53e203f6e29a0297b5ab486b5",
"4-fc9fb58ef74dab947fbf9acd9d5d83cd",
"5-b1949cce46d81552e65f248d3f74cc5c",
"6-d64797f59977c4d4a7956ad916da7699",
"7-ab608a6546a8b9af8820ff832b1135c7"),
(5, "4a78db90fbf35da5545d2fb728e87596",
"1-08daf9a25d8aa184cfbf02b30a0ed6a0",
"2-dda28261e36f0b14168c2cf153fb734e",
"3-e9fdec5505d674a57f9836c417c1ecaa",
"4-4dce5636ae06dee42d2c82e65f06c735",
"5-3963dc118afc2ba798fa1d452b28ef00",
"6-6dfe6ff5b09e94d2f84c382b12f42424",
"7-6faea9d4d4a4e201bf6c90b9000630c3"),
(10, "eccbf6d66d680b49b073c4f1ddf804aa",
"01-7d8ac32fe4ae209ead1f3220fda34466",
"02-f9144e76988aad647d2e61353a6e96d5",
"03-b14c3b80179203363922d60760271c98",
"04-770bb2a8c28f6cee89e00f4d5cc7f861",
"05-6e3d7073ea368334ef67467871c66799",
"06-248792bc74a98ce024477c13c8fb5f8d",
"07-fcea4640d2db820c0604851e293d2487",
"08-2776c36fb714bb1f8525a0be36fc7dba",
"09-6ee7ac8be773e473a4bf75ee5f065762",
"10-33657fc073354cf91d4a68c735aacfc8",
"11-7645c65094a5868bf225c516fdee2d0c",
"12-840485aacb8226631ecd9c70e3018086"),
(10, "377e63bdbb5f7d4dc58a483d035212bb",
"01-32c53260103be431c843b1a633afe3bd",
"02-0107eb16cb8695084d452d2cc50bc7d6",
"03-df1e5c66cd755287fb0446faccd72a06",
"04-361bbcd5d40797f49dfa1898652da197",
"05-160d3ad1512f7dec7fd9344aed318591",
"06-659af6d95df4f25beca4fb9bfee3b7e8",
"07-37f3b208977bad50b3724566b72bfa9d",
"08-6c1de2dfc69c2986142c26a8248eb316",
"09-5e19220837a396bd4bc8cd685ff314c3",
"10-86e7b864fb0f3d628e46d50c1ba92f1c",
"11-065d0082c80b1aea18f4abe0c49df72e",
"12-84a09430c1d20ea9f388f3123c3733a3"),
)
def get_share(p):
pos = p.find('-')
return int(p[:pos]), unhexlify(p[pos + 1:])
for tv in test_vectors:
k = tv[0]
secret = unhexlify(tv[1])
max_perms = 10
for perm, shares_idx in enumerate(permutations(range(2, len(tv)), k)):
if perm > max_perms:
break
shares = [ get_share(tv[x]) for x in shares_idx ]
result = Shamir.combine(shares, True)
self.assertEqual(secret, result)
def test3(self):
# Loopback split/recombine
secret = unhexlify(b("000102030405060708090a0b0c0d0e0f"))
shares = Shamir.split(2, 3, secret)
secret2 = Shamir.combine(shares[:2])
self.assertEqual(secret, secret2)
secret3 = Shamir.combine([ shares[0], shares[2] ])
self.assertEqual(secret, secret3)
def test4(self):
# Loopback split/recombine (SSSS)
secret = unhexlify(b("000102030405060708090a0b0c0d0e0f"))
shares = Shamir.split(2, 3, secret, ssss=True)
secret2 = Shamir.combine(shares[:2], ssss=True)
self.assertEqual(secret, secret2)
def test5(self):
# Detect duplicate shares
secret = unhexlify(b("000102030405060708090a0b0c0d0e0f"))
shares = Shamir.split(2, 3, secret)
self.assertRaises(ValueError, Shamir.combine, (shares[0], shares[0]))
def get_tests(config={}):
tests = []
tests += list_test_cases(GF2_Tests)
tests += list_test_cases(Element_Tests)
tests += list_test_cases(Shamir_Tests)
return tests
if __name__ == '__main__':
suite = lambda: TestSuite(get_tests())
main(defaultTest='suite')
| 36.197761
| 83
| 0.61406
|
dea1b780252cf37fcdec6e119383bbbdc1fcb072
| 12,621
|
py
|
Python
|
prompt_toolkit2/styles/style.py
|
dgilman/python-prompt-toolkit
|
c4ff7317c995b4b032f07ba2b180c89f1de4a169
|
[
"BSD-3-Clause"
] | null | null | null |
prompt_toolkit2/styles/style.py
|
dgilman/python-prompt-toolkit
|
c4ff7317c995b4b032f07ba2b180c89f1de4a169
|
[
"BSD-3-Clause"
] | null | null | null |
prompt_toolkit2/styles/style.py
|
dgilman/python-prompt-toolkit
|
c4ff7317c995b4b032f07ba2b180c89f1de4a169
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Tool for creating styles from a dictionary.
"""
from __future__ import absolute_import, unicode_literals
import itertools
import re
import sys
from prompt_toolkit2.cache import SimpleCache
from .base import (
ANSI_COLOR_NAMES,
ANSI_COLOR_NAMES_ALIASES,
DEFAULT_ATTRS,
Attrs,
BaseStyle,
)
from .named_colors import NAMED_COLORS
__all__ = [
'Style',
'parse_color',
'Priority',
'merge_styles',
]
_named_colors_lowercase = dict(
(k.lower(), v.lstrip('#')) for k, v in NAMED_COLORS.items())
def parse_color(text):
"""
Parse/validate color format.
Like in Pygments, but also support the ANSI color names.
(These will map to the colors of the 16 color palette.)
"""
# ANSI color names.
if text in ANSI_COLOR_NAMES:
return text
if text in ANSI_COLOR_NAMES_ALIASES:
return ANSI_COLOR_NAMES_ALIASES[text]
# 140 named colors.
try:
# Replace by 'hex' value.
return _named_colors_lowercase[text.lower()]
except KeyError:
pass
# Hex codes.
if text[0:1] == '#':
col = text[1:]
# Keep this for backwards-compatibility (Pygments does it).
# I don't like the '#' prefix for named colors.
if col in ANSI_COLOR_NAMES:
return col
elif col in ANSI_COLOR_NAMES_ALIASES:
return ANSI_COLOR_NAMES_ALIASES[col]
# 6 digit hex color.
elif len(col) == 6:
return col
# 3 digit hex color.
elif len(col) == 3:
return col[0] * 2 + col[1] * 2 + col[2] * 2
# Default.
elif text in ('', 'default'):
return text
raise ValueError('Wrong color format %r' % text)
# Attributes, when they are not filled in by a style. None means that we take
# the value from the parent.
_EMPTY_ATTRS = Attrs(color=None, bgcolor=None, bold=None, underline=None,
italic=None, blink=None, reverse=None, hidden=None)
def _expand_classname(classname):
"""
Split a single class name at the `.` operator, and build a list of classes.
E.g. 'a.b.c' becomes ['a', 'a.b', 'a.b.c']
"""
result = []
parts = classname.split('.')
for i in range(1, len(parts) + 1):
result.append('.'.join(parts[:i]).lower())
return result
def _parse_style_str(style_str):
"""
Take a style string, e.g. 'bg:red #88ff00 class:title'
and return a `Attrs` instance.
"""
# Start from default Attrs.
if 'noinherit' in style_str:
attrs = DEFAULT_ATTRS
else:
attrs = _EMPTY_ATTRS
# Now update with the given attributes.
for part in style_str.split():
if part == 'noinherit':
pass
elif part == 'bold':
attrs = attrs._replace(bold=True)
elif part == 'nobold':
attrs = attrs._replace(bold=False)
elif part == 'italic':
attrs = attrs._replace(italic=True)
elif part == 'noitalic':
attrs = attrs._replace(italic=False)
elif part == 'underline':
attrs = attrs._replace(underline=True)
elif part == 'nounderline':
attrs = attrs._replace(underline=False)
# prompt_toolkit extensions. Not in Pygments.
elif part == 'blink':
attrs = attrs._replace(blink=True)
elif part == 'noblink':
attrs = attrs._replace(blink=False)
elif part == 'reverse':
attrs = attrs._replace(reverse=True)
elif part == 'noreverse':
attrs = attrs._replace(reverse=False)
elif part == 'hidden':
attrs = attrs._replace(hidden=True)
elif part == 'nohidden':
attrs = attrs._replace(hidden=False)
# Pygments properties that we ignore.
elif part in ('roman', 'sans', 'mono'):
pass
elif part.startswith('border:'):
pass
# Ignore pieces in between square brackets. This is internal stuff.
# Like '[transparent]' or '[set-cursor-position]'.
elif part.startswith('[') and part.endswith(']'):
pass
# Colors.
elif part.startswith('bg:'):
attrs = attrs._replace(bgcolor=parse_color(part[3:]))
elif part.startswith('fg:'): # The 'fg:' prefix is optional.
attrs = attrs._replace(color=parse_color(part[3:]))
else:
attrs = attrs._replace(color=parse_color(part))
return attrs
CLASS_NAMES_RE = re.compile(r'^[a-z0-9.\s_-]*$') # This one can't contain a comma!
class Priority:
"""
The priority of the rules, when a style is created from a dictionary.
In a `Style`, rules that are defined later will always override previous
defined rules, however in a dictionary, the key order was arbitrary before
Python 3.6. This means that the style could change at random between rules.
We have two options:
- `DICT_KEY_ORDER`: This means, iterate through the dictionary, and take
the key/value pairs in order as they come. This is a good option if you
have Python >3.6. Rules at the end will override rules at the beginning.
- `MOST_PRECISE`: keys that are defined with most precision will get higher
priority. (More precise means: more elements.)
"""
DICT_KEY_ORDER = 'KEY_ORDER'
MOST_PRECISE = 'MOST_PRECISE'
_ALL = [DICT_KEY_ORDER, MOST_PRECISE]
# In the latest python verions, we take the dictionary ordering like it is,
# In older versions, we sort by by precision. If you need to write code that
# runs on all Python versions, it's best to sort them manually, with the most
# precise rules at the bottom.
if sys.version_info >= (3, 6):
default_priority = Priority.DICT_KEY_ORDER
else:
default_priority = Priority.MOST_PRECISE
class Style(BaseStyle):
"""
Create a ``Style`` instance from a list of style rules.
The `style_rules` is supposed to be a list of ('classnames', 'style') tuples.
The classnames are a whitespace separated string of class names and the
style string is just like a Pygments style definition, but with a few
additions: it supports 'reverse' and 'blink'.
Later rules always override previous rules.
Usage::
Style([
('title', '#ff0000 bold underline'),
('something-else', 'reverse'),
('class1 class2', 'reverse'),
])
The ``from_dict`` classmethod is similar, but takes a dictionary as input.
"""
def __init__(self, style_rules):
assert isinstance(style_rules, list)
class_names_and_attrs = []
# Loop through the rules in the order they were defined.
# Rules that are defined later get priority.
for class_names, style_str in style_rules:
assert CLASS_NAMES_RE.match(class_names), repr(class_names)
# The order of the class names doesn't matter.
# (But the order of rules does matter.)
class_names = frozenset(class_names.lower().split())
attrs = _parse_style_str(style_str)
class_names_and_attrs.append((class_names, attrs))
self._style_rules = style_rules
self.class_names_and_attrs = class_names_and_attrs
@property
def style_rules(self):
return self._style_rules
@classmethod
def from_dict(cls, style_dict, priority=default_priority):
"""
:param style_dict: Style dictionary.
:param priority: `Priority` value.
"""
assert priority in Priority._ALL
if priority == Priority.MOST_PRECISE:
def key(item):
# Split on '.' and whitespace. Count elements.
return sum(len(i.split('.')) for i in item[0].split())
return cls(sorted(style_dict.items(), key=key))
else:
return cls(list(style_dict.items()))
def get_attrs_for_style_str(self, style_str, default=DEFAULT_ATTRS):
"""
Get `Attrs` for the given style string.
"""
list_of_attrs = [default]
class_names = set()
# Apply default styling.
for names, attr in self.class_names_and_attrs:
if not names:
list_of_attrs.append(attr)
# Go from left to right through the style string. Things on the right
# take precedence.
for part in style_str.split():
# This part represents a class.
# Do lookup of this class name in the style definition, as well
# as all class combinations that we have so far.
if part.startswith('class:'):
# Expand all class names (comma separated list).
new_class_names = []
for p in part[6:].lower().split(','):
new_class_names.extend(_expand_classname(p))
for new_name in new_class_names:
# Build a set of all possible class combinations to be applied.
combos = set()
combos.add(frozenset([new_name]))
for count in range(1, len(class_names) + 1):
for c2 in itertools.combinations(class_names, count):
combos.add(frozenset(c2 + (new_name, )))
# Apply the styles that match these class names.
for names, attr in self.class_names_and_attrs:
if names in combos:
list_of_attrs.append(attr)
class_names.add(new_name)
# Process inline style.
else:
inline_attrs = _parse_style_str(part)
list_of_attrs.append(inline_attrs)
return _merge_attrs(list_of_attrs)
def invalidation_hash(self):
return id(self.class_names_and_attrs)
def _merge_attrs(list_of_attrs):
"""
Take a list of :class:`.Attrs` instances and merge them into one.
Every `Attr` in the list can override the styling of the previous one. So,
the last one has highest priority.
"""
def _or(*values):
" Take first not-None value, starting at the end. "
for v in values[::-1]:
if v is not None:
return v
return Attrs(
color=_or('', *[a.color for a in list_of_attrs]),
bgcolor=_or('', *[a.bgcolor for a in list_of_attrs]),
bold=_or(False, *[a.bold for a in list_of_attrs]),
underline=_or(False, *[a.underline for a in list_of_attrs]),
italic=_or(False, *[a.italic for a in list_of_attrs]),
blink=_or(False, *[a.blink for a in list_of_attrs]),
reverse=_or(False, *[a.reverse for a in list_of_attrs]),
hidden=_or(False, *[a.hidden for a in list_of_attrs]))
def merge_styles(styles):
"""
Merge multiple `Style` objects.
"""
styles = [s for s in styles if s is not None]
return _MergedStyle(styles)
class _MergedStyle(BaseStyle):
"""
Merge multiple `Style` objects into one.
This is supposed to ensure consistency: if any of the given styles changes,
then this style will be updated.
"""
# NOTE: previously, we used an algorithm where we did not generate the
# combined style. Instead this was a proxy that called one style
# after the other, passing the outcome of the previous style as the
# default for the next one. This did not work, because that way, the
# priorities like described in the `Style` class don't work.
# 'class:aborted' was for instance never displayed in gray, because
# the next style specified a default color for any text. (The
# explicit styling of class:aborted should have taken priority,
# because it was more precise.)
def __init__(self, styles):
assert all(isinstance(style, BaseStyle) for style in styles)
self.styles = styles
self._style = SimpleCache(maxsize=1)
@property
def _merged_style(self):
" The `Style` object that has the other styles merged together. "
def get():
return Style(self.style_rules)
return self._style.get(self.invalidation_hash(), get)
@property
def style_rules(self):
style_rules = []
for s in self.styles:
style_rules.extend(s.style_rules)
return style_rules
def get_attrs_for_style_str(self, style_str, default=DEFAULT_ATTRS):
return self._merged_style.get_attrs_for_style_str(style_str, default)
def invalidation_hash(self):
return tuple(s.invalidation_hash() for s in self.styles)
| 33.039267
| 83
| 0.615799
|
0a63804bcd7f9ea2b19196355d8adccbed36df95
| 1,969
|
py
|
Python
|
test-driven-development-teste-design-mundo-real/cap6_Lucas.py
|
magsilva/software-testing-benchmark
|
9ca5c14cfcfa3f2fef67d27d80efc8900c7bedfd
|
[
"MIT"
] | null | null | null |
test-driven-development-teste-design-mundo-real/cap6_Lucas.py
|
magsilva/software-testing-benchmark
|
9ca5c14cfcfa3f2fef67d27d80efc8900c7bedfd
|
[
"MIT"
] | null | null | null |
test-driven-development-teste-design-mundo-real/cap6_Lucas.py
|
magsilva/software-testing-benchmark
|
9ca5c14cfcfa3f2fef67d27d80efc8900c7bedfd
|
[
"MIT"
] | null | null | null |
import unittest
class Calculadora:
def __init__(self):
self.lista = []
def adicionarNumeros(self, numeros):
for item in numeros:
self.lista.append(item)
def calculoSoma(self):
# Resultado vem por 0, pois caso nao haja nenhum item, este sera o retorno
resultado = 0
for num in self.lista:
resultado += num
return resultado
def zeraLista(self):
self.lista = []
class Testing(unittest.TestCase):
# Funcionando como um before
# E recomendado que crie a classe e a configure aqui, para que caso sua estrutura
# seja alterada, seria necessario editar apenas esta parte
def setUp(self):
self.esperado = 0
self.numeros = []
self.calculadora = Calculadora()
# Um teste para cada procedimento possivel da funcao [Somar]
def test_soma_apenas_positivos(self):
self.numeros = [3,7,5,10]
self.calculadora.adicionarNumeros(self.numeros)
self.esperado = 25
self.assertEqual(self.calculadora.calculoSoma(), self.esperado)
# Agindo como um after, para que o proximo teste obtenha a classe limpa
self.calculadora.zeraLista()
def test_soma_com_negativos_e_positivos(self):
self.calculadora.adicionarNumeros([3,-7,-2,4])
self.esperado = -2
self.assertEqual(self.calculadora.calculoSoma(), self.esperado)
self.calculadora.zeraLista()
def test_soma_apenas_negativos(self):
self.calculadora.adicionarNumeros([-2,-26,-2])
self.esperado = -30
self.assertEqual(self.calculadora.calculoSoma(), self.esperado)
self.calculadora.zeraLista()
def test_soma_sem_valores(self):
#self.calculadora.adicionarNumeros()
self.esperado = 0
self.assertEqual(self.calculadora.calculoSoma(), self.esperado)
self.calculadora.zeraLista()
| 34.54386
| 86
| 0.639411
|
107cbc48ff6f1c91a79e4bab780f934c86a479f8
| 720
|
py
|
Python
|
Service_Mockup/instance/settings.py
|
TamSzaGot/mydata-sdk
|
9c8afb75077f0b993819aa534b904501a8112f76
|
[
"MIT"
] | 4
|
2018-04-21T00:46:40.000Z
|
2019-12-03T13:52:03.000Z
|
Service_Mockup/instance/settings.py
|
TamSzaGot/mydata-sdk
|
9c8afb75077f0b993819aa534b904501a8112f76
|
[
"MIT"
] | 1
|
2019-01-09T10:45:23.000Z
|
2019-01-09T10:45:23.000Z
|
Service_Mockup/instance/settings.py
|
TamSzaGot/mydata-sdk
|
9c8afb75077f0b993819aa534b904501a8112f76
|
[
"MIT"
] | 4
|
2018-04-21T01:12:12.000Z
|
2020-09-24T06:19:29.000Z
|
# -*- coding: utf-8 -*-
timeout = 8
KEYSIZE = 512
SERVICE_MGMNT_URL = "http://localhost:7000"
DATABASE_PATH = "./db_Mockup.sqlite"
# Name of host to connect to. Default: use the local host via a UNIX socket (where applicable)
MYSQL_HOST = 'localhost'
# User to authenticate as. Default: current effective user.
MYSQL_USER = 'serviceMockup'
# Password to authenticate with. Default: no password.
MYSQL_PASSWORD = 'MynorcA'
# Database to use. Default: no default database.
MYSQL_DB = 'db_Service_Mockup'
# TCP port of MySQL server. Default: 3306.
MYSQL_PORT = 3306
CERT_PATH = "./service_key.jwk"
CERT_KEY_PATH = "./service_key.jwk"
CERT_PASSWORD_PATH = "./cert_pw"
DEBUG_MODE = True
| 12.631579
| 94
| 0.709722
|
0e8a820550ecbe48992ea647691a78c82ee2150a
| 1,408
|
py
|
Python
|
accounts/views.py
|
bnorbert/debussy
|
ccfeceeead555c5d33424ff3614404f48eeabc00
|
[
"MIT"
] | 1
|
2020-07-18T10:46:25.000Z
|
2020-07-18T10:46:25.000Z
|
accounts/views.py
|
bnorbert/debussy
|
ccfeceeead555c5d33424ff3614404f48eeabc00
|
[
"MIT"
] | 14
|
2021-03-10T22:13:36.000Z
|
2022-03-12T00:34:53.000Z
|
accounts/views.py
|
bnorbert/debussy
|
ccfeceeead555c5d33424ff3614404f48eeabc00
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework import generics, permissions
from rest_framework.response import Response
from knox.models import AuthToken
from .serializers import UserSerializer, RegisterSerializer, LoginSerializer
# Register View
class RegisterView(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
# Login View
class LoginView(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
class UserViewRetrieve(generics.RetrieveAPIView):
permissiion_classes = [
permissions.IsAuthenticated
]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
| 30.608696
| 85
| 0.710938
|
f40730af342997177a928bd22cfb7259b8298539
| 19,879
|
py
|
Python
|
dbus-mongo-extractor/tests/test_filter_fields.py
|
JunZCn/DBus
|
4594da36f9838bcaba0c05a08fe4524052e2553e
|
[
"Apache-2.0"
] | 1,232
|
2017-09-04T11:23:08.000Z
|
2022-03-25T07:01:07.000Z
|
dbus-mongo-extractor/tests/test_filter_fields.py
|
JunZCn/DBus
|
4594da36f9838bcaba0c05a08fe4524052e2553e
|
[
"Apache-2.0"
] | 72
|
2017-10-11T04:43:17.000Z
|
2022-03-01T04:48:22.000Z
|
dbus-mongo-extractor/tests/test_filter_fields.py
|
JunZCn/DBus
|
4594da36f9838bcaba0c05a08fe4524052e2553e
|
[
"Apache-2.0"
] | 551
|
2017-09-05T06:55:14.000Z
|
2022-03-19T07:24:56.000Z
|
# Copyright 2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test include and exclude fields
"""
import sys
sys.path[0:0] = [""]
from mongo_connector import errors
from mongo_connector.doc_managers.doc_manager_simulator import DocManager
from mongo_connector.locking_dict import LockingDict
from mongo_connector.oplog_manager import OplogThread
from mongo_connector.namespace_config import NamespaceConfig
from mongo_connector.test_utils import (assert_soon,
close_client,
ReplicaSetSingle)
from tests import unittest
class TestFilterFields(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.repl_set = ReplicaSetSingle().start()
cls.primary_conn = cls.repl_set.client()
cls.oplog_coll = cls.primary_conn.local['oplog.rs']
@classmethod
def tearDownClass(cls):
cls.primary_conn.drop_database("test")
close_client(cls.primary_conn)
cls.repl_set.stop()
def setUp(self):
self.namespace_config = NamespaceConfig()
self.opman = OplogThread(
primary_client=self.primary_conn,
doc_managers=(DocManager(),),
oplog_progress_dict=LockingDict(),
namespace_config=self.namespace_config
)
def tearDown(self):
try:
self.opman.join()
except RuntimeError:
# OplogThread may not have been started
pass
def reset_include_fields(self, fields):
self.opman.namespace_config = NamespaceConfig(include_fields=fields)
def reset_exclude_fields(self, fields):
self.opman.namespace_config = NamespaceConfig(exclude_fields=fields)
def test_filter_fields(self):
docman = self.opman.doc_managers[0]
conn = self.opman.primary_client
include_fields = ["a", "b", "c"]
exclude_fields = ["d", "e", "f"]
# Set fields to care about
self.reset_include_fields(include_fields)
# Documents have more than just these fields
doc = {
"a": 1, "b": 2, "c": 3,
"d": 4, "e": 5, "f": 6,
"_id": 1
}
db = conn['test']['test']
db.insert_one(doc)
assert_soon(lambda: db.count() == 1)
self.opman.dump_collection()
result = docman._search()[0]
keys = result.keys()
for inc, exc in zip(include_fields, exclude_fields):
self.assertIn(inc, keys)
self.assertNotIn(exc, keys)
def test_filter_exclude_oplog_entry(self):
# Test oplog entries: these are callables, since
# filter_oplog_entry modifies the oplog entry in-place
insert_op = lambda: {
"op": "i",
"o": {
"_id": 0,
"a": 1,
"b": 2,
"c": 3
}
}
update_op = lambda: {
"op": "u",
"o": {
"$set": {
"a": 4,
"b": 5
},
"$unset": {
"c": True
}
},
"o2": {
"_id": 1
}
}
def filter_doc(document, fields):
if fields and '_id' in fields:
fields.remove('_id')
return self.opman.filter_oplog_entry(
document, exclude_fields=fields)
# Case 0: insert op, no fields provided
filtered = filter_doc(insert_op(), None)
self.assertEqual(filtered, insert_op())
# Case 1: insert op, fields provided
filtered = filter_doc(insert_op(), ['c'])
self.assertEqual(filtered['o'], {'_id': 0, 'a': 1, 'b': 2})
# Case 2: insert op, fields provided, doc becomes empty except for _id
filtered = filter_doc(insert_op(), ['a', 'b', 'c'])
self.assertEqual(filtered['o'], {'_id': 0})
# Case 3: update op, no fields provided
filtered = filter_doc(update_op(), None)
self.assertEqual(filtered, update_op())
# Case 4: update op, fields provided
filtered = filter_doc(update_op(), ['b'])
self.assertNotIn('b', filtered['o']['$set'])
self.assertIn('a', filtered['o']['$set'])
self.assertEqual(filtered['o']['$unset'], update_op()['o']['$unset'])
# Case 5: update op, fields provided, empty $set
filtered = filter_doc(update_op(), ['a', 'b'])
self.assertNotIn('$set', filtered['o'])
self.assertEqual(filtered['o']['$unset'], update_op()['o']['$unset'])
# Case 6: update op, fields provided, empty $unset
filtered = filter_doc(update_op(), ['c'])
self.assertNotIn('$unset', filtered['o'])
self.assertEqual(filtered['o']['$set'], update_op()['o']['$set'])
# Case 7: update op, fields provided, entry is nullified
filtered = filter_doc(update_op(), ['a', 'b', 'c'])
self.assertEqual(filtered, None)
# Case 8: update op, fields provided, replacement
filtered = filter_doc({
'op': 'u',
'o': {'a': 1, 'b': 2, 'c': 3, 'd': 4}
}, ['d', 'e', 'f'])
self.assertEqual(
filtered, {'op': 'u', 'o': {'a': 1, 'b': 2, 'c': 3}})
def test_filter_oplog_entry(self):
# Test oplog entries: these are callables, since
# filter_oplog_entry modifies the oplog entry in-place
insert_op = lambda: {
"op": "i",
"o": {
"_id": 0,
"a": 1,
"b": 2,
"c": 3
}
}
update_op = lambda: {
"op": "u",
"o": {
"$set": {
"a": 4,
"b": 5
},
"$unset": {
"c": True
}
},
"o2": {
"_id": 1
}
}
def filter_doc(document, fields):
if fields and '_id' not in fields:
fields.append('_id')
return self.opman.filter_oplog_entry(
document, include_fields=fields)
# Case 0: insert op, no fields provided
filtered = filter_doc(insert_op(), None)
self.assertEqual(filtered, insert_op())
# Case 1: insert op, fields provided
filtered = filter_doc(insert_op(), ['a', 'b'])
self.assertEqual(filtered['o'], {'_id': 0, 'a': 1, 'b': 2})
# Case 2: insert op, fields provided, doc becomes empty except for _id
filtered = filter_doc(insert_op(), ['d', 'e', 'f'])
self.assertEqual(filtered['o'], {'_id': 0})
# Case 3: update op, no fields provided
filtered = filter_doc(update_op(), None)
self.assertEqual(filtered, update_op())
# Case 4: update op, fields provided
filtered = filter_doc(update_op(), ['a', 'c'])
self.assertNotIn('b', filtered['o']['$set'])
self.assertIn('a', filtered['o']['$set'])
self.assertEqual(filtered['o']['$unset'], update_op()['o']['$unset'])
# Case 5: update op, fields provided, empty $set
filtered = filter_doc(update_op(), ['c'])
self.assertNotIn('$set', filtered['o'])
self.assertEqual(filtered['o']['$unset'], update_op()['o']['$unset'])
# Case 6: update op, fields provided, empty $unset
filtered = filter_doc(update_op(), ['a', 'b'])
self.assertNotIn('$unset', filtered['o'])
self.assertEqual(filtered['o']['$set'], update_op()['o']['$set'])
# Case 7: update op, fields provided, entry is nullified
filtered = filter_doc(update_op(), ['d', 'e', 'f'])
self.assertEqual(filtered, None)
# Case 8: update op, fields provided, replacement
filtered = filter_doc({
'op': 'u',
'o': {'a': 1, 'b': 2, 'c': 3, 'd': 4}
}, ['a', 'b', 'c'])
self.assertEqual(
filtered, {'op': 'u', 'o': {'a': 1, 'b': 2, 'c': 3}})
def test_nested_fields(self):
def check_nested(document, fields, filtered_document, op='i'):
if '_id' not in fields:
fields.append('_id')
filtered_result = self.opman.filter_oplog_entry(
{'op': op, 'o': document}, include_fields=fields)
if filtered_result is not None:
filtered_result = filtered_result['o']
self.assertEqual(filtered_result, filtered_document)
document = {'name': 'Han Solo', 'a': {'b': {}}}
fields = ['name', 'a.b.c']
filtered_document = {'name': 'Han Solo'}
check_nested(document, fields, filtered_document)
document = {'a': {'b': {'c': 2, 'e': 3}, 'e': 5},
'b': 2,
'c': {'g': 1}}
fields = ['a.b.c', 'a.e']
filtered_document = {'a': {'b': {'c': 2}, 'e': 5}}
check_nested(document, fields, filtered_document)
document = {'a': {'b': {'c': 2, 'e': 3}, 'e': 5},
'b': 2,
'c': {'g': 1},
'_id': 1}
fields = ['a.b.c', 'a.e']
filtered_document = {'a': {'b': {'c': 2}, 'e': 5}, '_id': 1}
check_nested(document, fields, filtered_document)
document = {'a': {'b': {'c': {'d': 1}}}, '-a': {'-b': {'-c': 2}}}
fields = ['a.b', '-a']
filtered_document = document.copy()
check_nested(document, fields, filtered_document)
document = {'a': {'b': {'c': {'d': 1}}}, '-a': {'-b': {'-c': 2}}}
fields = ['a', '-a.-b']
filtered_document = document.copy()
check_nested(document, fields, filtered_document)
document = {'a': {'b': {'c': {'d': 1}}}, '-a': {'-b': {'-c': 2}},
'_id': 1}
fields = ['a.b', '-a']
filtered_document = document.copy()
check_nested(document, fields, filtered_document)
fields = ['a', '-a.-b']
check_nested(document, fields, filtered_document)
document = {'test': 1}
fields = ['doesnt_exist']
filtered_document = {}
check_nested(document, fields, filtered_document)
document = {'a': {'b': 1}, 'b': {'a': 1}}
fields = ['a.b', 'b.a']
filtered_document = document.copy()
check_nested(document, fields, filtered_document)
document = {'a': {'b': {'a': {'b': 1}}}, 'c': {'a': {'b': 1}}}
fields = ['a.b']
filtered_document = {'a': {'b': {'a': {'b': 1}}}}
check_nested(document, fields, filtered_document)
document = {'name': 'anna', 'name_of_cat': 'pushkin'}
fields = ['name']
filtered_document = {'name': 'anna'}
check_nested(document, fields, filtered_document)
update = {'$set': {'a.b': 1, 'a.c': 3, 'b': 2, 'c': {'b': 3}}}
fields = ['a', 'c']
filtered_update = {'$set': {'a.b': 1, 'a.c': 3, 'c': {'b': 3}}}
check_nested(update, fields, filtered_update, op='u')
update = {'$set': {'a.b': {'c': 3, 'd': 1}, 'a.e': 1, 'a.f': 2}}
fields = ['a.b.c', 'a.e']
filtered_update = {'$set': {'a.b': {'c': 3}, 'a.e': 1}}
check_nested(update, fields, filtered_update, op='u')
update = {'$set': {'a.b.1': 1, 'a.b.2': 2, 'b': 3}}
fields = ['a.b']
filtered_update = {'$set': {'a.b.1': 1, 'a.b.2': 2}}
check_nested(update, fields, filtered_update, op='u')
update = {'$set': {'a.b': {'c': 3, 'd': 1}, 'a.e': 1}}
fields = ['a.b.e']
filtered_update = None
check_nested(update, fields, filtered_update, op='u')
def test_nested_exclude_fields(self):
def check_nested(document, exclude_fields, filtered_document, op='i'):
if '_id' in exclude_fields:
exclude_fields.remove('_id')
filtered_result = self.opman.filter_oplog_entry(
{'op': op, 'o': document}, exclude_fields=exclude_fields)
if filtered_result is not None:
filtered_result = filtered_result['o']
self.assertEqual(filtered_result, filtered_document)
document = {'a': {'b': {'c': {'d': 0, 'e': 1}}}}
exclude_fields = ['a.b.c.d']
filtered_document = {'a': {'b': {'c': {'e': 1}}}}
check_nested(document, exclude_fields, filtered_document)
document = {'a': {'b': {'c': {'-a': 0, 'd': {'e': {'f': 1}}}}}}
exclude_fields = ['a.b.c.d.e.f']
filtered_document = {'a': {'b': {'c': {'-a': 0, 'd': {'e': {}}}}}}
check_nested(document, exclude_fields, filtered_document)
document = {'a': 1}
exclude_fields = ['a']
filtered_document = {}
check_nested(document, exclude_fields, filtered_document)
document = {'a': {'b': {'c': 2, 'e': 3}, 'e': 5},
'b': 2,
'c': {'g': 1}}
exclude_fields = ['a.b.c', 'a.e']
filtered_document = {'a': {'b': {'e': 3}},
'b': 2,
'c': {'g': 1}}
check_nested(document, exclude_fields, filtered_document)
document = {'a': {'b': {'c': 2, 'e': 3}, 'e': 5},
'b': 2,
'c': {'g': 1},
'_id': 1}
exclude_fields = ['a.b.c', 'a.e', '_id']
filtered_document = {'a': {'b': {'e': 3}},
'b': 2, 'c': {'g': 1},
'_id': 1}
check_nested(document, exclude_fields, filtered_document)
document = {'a': {'b': {'c': {'d': 1}}},
'-a': {'-b': {'-c': 2}}}
exclude_fields = ['a.b', '-a']
filtered_document = {'a': {}}
check_nested(document, exclude_fields, filtered_document)
document = {'a': {'b': {'c': {'d': 1}}},
'-a': {'-b': {'-c': 2}}}
exclude_fields = ['a', '-a.-b']
filtered_document = {'-a': {}}
check_nested(document, exclude_fields, filtered_document)
document = {'a': {'b': {'c': {'d': 1}}},
'-a': {'-b': {'-c': 2}},
'_id': 1}
exclude_fields = ['a.b', '-a']
filtered_document = {'_id': 1, 'a': {}}
check_nested(document, exclude_fields, filtered_document)
document = {'test': 1}
exclude_fields = ['doesnt_exist']
filtered_document = document.copy()
check_nested(document, exclude_fields, filtered_document)
document = {'test': 1}
exclude_fields = ['test.doesnt_exist']
filtered_document = document.copy()
check_nested(document, exclude_fields, filtered_document)
document = {'a': {'b': 1}, 'b': {'a': 1}}
exclude_fields = ['a.b', 'b.a']
filtered_document = {'a': {}, 'b': {}}
check_nested(document, exclude_fields, filtered_document)
document = {'a': {'b': {'a': {'b': 1}}}, 'c': {'a': {'b': 1}}}
exclude_fields = ['a.b']
filtered_document = {'a': {}, 'c': {'a': {'b': 1}}}
check_nested(document, exclude_fields, filtered_document)
document = {'name': 'anna', 'name_of_cat': 'pushkin'}
exclude_fields = ['name']
filtered_document = {'name_of_cat': 'pushkin'}
check_nested(document, exclude_fields, filtered_document)
update = {'$set': {'a.b': 1, 'a.c': 3, 'b': 2, 'c': {'b': 3}}}
exclude_fields = ['a', 'c']
filtered_update = {'$set': {'b': 2}}
check_nested(update, exclude_fields, filtered_update, op='u')
update = {'$set': {'a.b': {'c': 3, 'd': 1}, 'a.e': 1, 'a.f': 2}}
exclude_fields = ['a.b.c', 'a.e']
filtered_update = {'$set': {'a.b': {'d': 1}, 'a.f': 2}}
check_nested(update, exclude_fields, filtered_update, op='u')
update = {'$set': {'a.b': {'c': 3, 'd': 1}, 'a.e': 1}}
exclude_fields = ['a.b.c', 'a.b.d', 'a.e']
filtered_update = {'$set': {'a.b': {}}}
check_nested(update, exclude_fields, filtered_update, op='u')
update = {'$set': {'a.b.1': 1, 'a.b.2': 2, 'b': 3}}
exclude_fields = ['a.b']
filtered_update = {'$set': {'b': 3}}
check_nested(update, exclude_fields, filtered_update, op='u')
update = {'$set': {'a.b.c': 42, 'd.e.f': 123, 'g': 456}}
exclude_fields = ['a.b', 'd']
filtered_update = {'$set': {'g': 456}}
check_nested(update, exclude_fields, filtered_update, op='u')
update = {'$set': {'a.b': {'c': 3, 'd': 1}, 'a.e': 1}}
exclude_fields = ['a.b', 'a.e']
filtered_update = None
check_nested(update, exclude_fields, filtered_update, op='u')
class TestFindFields(unittest.TestCase):
def test_find_field(self):
doc = {'a': {'b': {'c': 1}}}
self.assertEqual(OplogThread._find_field('a', doc),
[(['a'], doc['a'])])
self.assertEqual(OplogThread._find_field('a.b', doc),
[(['a', 'b'], doc['a']['b'])])
self.assertEqual(OplogThread._find_field('a.b.c', doc),
[(['a', 'b', 'c'], doc['a']['b']['c'])])
self.assertEqual(OplogThread._find_field('x', doc),
[])
self.assertEqual(OplogThread._find_field('a.b.x', doc),
[])
def test_find_update_fields(self):
doc = {'a': {'b': {'c': 1}}, 'e.f': 1, 'g.h': {'i': {'j': 1}}}
self.assertEqual(OplogThread._find_update_fields('a', doc),
[(['a'], doc['a'])])
self.assertEqual(OplogThread._find_update_fields('a.b', doc),
[(['a', 'b'], doc['a']['b'])])
self.assertEqual(OplogThread._find_update_fields('a.b.c', doc),
[(['a', 'b', 'c'], doc['a']['b']['c'])])
self.assertEqual(OplogThread._find_update_fields('x', doc),
[])
self.assertEqual(OplogThread._find_update_fields('a.b.x', doc),
[])
self.assertEqual(OplogThread._find_update_fields('e.f', doc),
[(['e.f'], doc['e.f'])])
self.assertEqual(OplogThread._find_update_fields('e', doc),
[(['e.f'], doc['e.f'])])
self.assertEqual(OplogThread._find_update_fields('g.h.i.j', doc),
[(['g.h', 'i', 'j'], doc['g.h']['i']['j'])])
# Test multiple matches
doc = {'a.b': 1, 'a.c': 2, 'e.f.h': 3, 'e.f.i': 4}
matches = OplogThread._find_update_fields('a', doc)
self.assertEqual(len(matches), 2)
self.assertIn((['a.b'], doc['a.b']), matches)
self.assertIn((['a.c'], doc['a.c']), matches)
matches = OplogThread._find_update_fields('e.f', doc)
self.assertEqual(len(matches), 2)
self.assertIn((['e.f.h'], doc['e.f.h']), matches)
self.assertIn((['e.f.i'], doc['e.f.i']), matches)
# Test updates to array fields
doc = {'a.b.1': 9, 'a.b.3': 10, 'a.b.4.c': 11}
matches = OplogThread._find_update_fields('a.b', doc)
self.assertEqual(len(matches), 3)
self.assertIn((['a.b.1'], doc['a.b.1']), matches)
self.assertIn((['a.b.3'], doc['a.b.3']), matches)
self.assertIn((['a.b.4.c'], doc['a.b.4.c']), matches)
if __name__ == "__main__":
unittest.main()
| 38.826172
| 78
| 0.506615
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.