hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60e7eeab52a00e19d3c68a7d17f751163b8b2396
| 720
|
py
|
Python
|
tests/test_ratings.py
|
technicapital/stake-python
|
8d0a985923318ca7b92f23e0c9a8319a75f37ff2
|
[
"Apache-2.0"
] | 47
|
2020-09-16T04:17:53.000Z
|
2022-03-29T12:20:50.000Z
|
tests/test_ratings.py
|
technicapital/stake-python
|
8d0a985923318ca7b92f23e0c9a8319a75f37ff2
|
[
"Apache-2.0"
] | 123
|
2020-09-10T05:03:43.000Z
|
2022-02-03T12:13:35.000Z
|
tests/test_ratings.py
|
technicapital/stake-python
|
8d0a985923318ca7b92f23e0c9a8319a75f37ff2
|
[
"Apache-2.0"
] | 5
|
2021-07-24T08:53:37.000Z
|
2022-01-24T16:19:50.000Z
|
import datetime
import pytest
from stake import RatingsRequest
@pytest.mark.asyncio
async def test_list_ratings(tracing_client):
request = RatingsRequest(symbols=["AAPL", "MSFT"], limit=4)
ratings = await tracing_client.ratings.list(request)
assert len(ratings) == 4
assert ratings[0].symbol in ("AAPL", "MSFT")
assert ratings[0].rating_current == "Buy"
assert ratings[0].updated == datetime.datetime(
2021, 7, 16, 11, 40, 23, tzinfo=datetime.timezone.utc
)
@pytest.mark.asyncio
async def test_list_ratings_unknown(tracing_client):
request = RatingsRequest(symbols=["NOTEXIST"], limit=4)
ratings = await tracing_client.ratings.list(request)
assert len(ratings) == 0
| 28.8
| 63
| 0.718056
|
a11bc4b05e8c2ebd9c2aa282ac4c66e9d679d0ad
| 3,111
|
py
|
Python
|
fonts/terminus-font-4.49.1/bin/fnutil.py
|
xfnw/yaft
|
c57e8f3014aa5cf743ca0855e543dbafc2e0db22
|
[
"MIT"
] | null | null | null |
fonts/terminus-font-4.49.1/bin/fnutil.py
|
xfnw/yaft
|
c57e8f3014aa5cf743ca0855e543dbafc2e0db22
|
[
"MIT"
] | null | null | null |
fonts/terminus-font-4.49.1/bin/fnutil.py
|
xfnw/yaft
|
c57e8f3014aa5cf743ca0855e543dbafc2e0db22
|
[
"MIT"
] | null | null | null |
#
# Copyright (C) 2017-2020 Dimitar Toshkov Zhekov <dimitar.zhekov@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys
# -- Various --
UNICODE_MAX = 1114111 # 0x10FFFF
UNICODE_BMP_MAX = 65535 # 0xFFFF
def parse_dec(name, s, min_value=0, max_value=UNICODE_MAX):
try:
value = int(s)
except ValueError:
raise Exception('invalid %s format' % name)
if min_value is not None and value < min_value:
raise Exception('%s must be >= %d' % (name, min_value))
if max_value is not None and value > max_value:
raise Exception('%s must be <= %d' % (name, max_value))
return value
def parse_hex(name, s, min_value=0, max_value=UNICODE_MAX):
try:
value = int(s, 16)
except ValueError:
raise Exception('invalid %s format' % name)
if min_value is not None and value < min_value:
raise Exception('%s must be >= %X' % (name, min_value))
if max_value is not None and value > max_value:
raise Exception('%s must be <= %X' % (name, max_value))
return value
def quote(bstr):
return b'"%s"' % bstr.replace(b'"', b'""')
def unquote(bstr, name=None):
if len(bstr) >= 2 and bstr.startswith(b'"') and bstr.endswith(b'"'):
bstr = bstr[1 : len(bstr) - 1].replace(b'""', b'"')
elif name is not None:
raise Exception(name + ' must be quoted')
return bstr
def message(prefix, severity, text):
sys.stderr.write('%s%s%s\n' % (prefix, severity + ': ' if severity else '', text))
def warning(prefix, text):
message(prefix, 'warning', text)
def split_words(name, value, count):
words = value.split(None, count)
if len(words) != count:
raise Exception('%s must contain %d values' % (name, count))
return words
GPL2PLUS_LICENSE = ('' +
'This program is free software; you can redistribute it and/or modify it\n' +
'under the terms of the GNU General Public License as published by the Free\n' +
'Software Foundation; either version 2 of the License, or (at your option)\n' +
'any later version.\n' +
'\n' +
'This program is distributed in the hope that it will be useful, but\n' +
'WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY\n' +
'or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n' +
'for more details.\n' +
'\n' +
'You should have received a copy of the GNU General Public License along\n' +
'with this program; if not, write to the Free Software Foundation, Inc.,\n' +
'51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n')
| 31.424242
| 83
| 0.704597
|
77e9eef19a7a4cd7db22331ed32562bf4a949ea4
| 155,499
|
py
|
Python
|
src/tests/api/test_orders.py
|
NorDULaN/pretix
|
e2b9fe8e71f3852721a42c594047d88f5181fd29
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-04-25T00:11:00.000Z
|
2020-04-25T00:11:00.000Z
|
src/tests/api/test_orders.py
|
NorDULaN/pretix
|
e2b9fe8e71f3852721a42c594047d88f5181fd29
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/tests/api/test_orders.py
|
NorDULaN/pretix
|
e2b9fe8e71f3852721a42c594047d88f5181fd29
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import copy
import datetime
import json
from decimal import Decimal
from unittest import mock
import pytest
from django.core import mail as djmail
from django.dispatch import receiver
from django.utils.timezone import now
from django_countries.fields import Country
from django_scopes import scopes_disabled
from pytz import UTC
from stripe.error import APIConnectionError
from tests.plugins.stripe.test_provider import MockedCharge
from pretix.base.channels import SalesChannel
from pretix.base.models import (
InvoiceAddress, Order, OrderPosition, Question, SeatingPlan,
)
from pretix.base.models.orders import (
CartPosition, OrderFee, OrderPayment, OrderRefund, QuestionAnswer,
)
from pretix.base.services.invoices import (
generate_cancellation, generate_invoice,
)
from pretix.base.signals import register_sales_channels
class FoobarSalesChannel(SalesChannel):
identifier = "bar"
verbose_name = "Foobar"
icon = "home"
testmode_supported = False
@receiver(register_sales_channels, dispatch_uid="test_orders_register_sales_channels")
def base_sales_channels(sender, **kwargs):
return (
FoobarSalesChannel(),
)
@pytest.fixture
def item(event):
return event.items.create(name="Budget Ticket", default_price=23)
@pytest.fixture
def item2(event2):
return event2.items.create(name="Budget Ticket", default_price=23)
@pytest.fixture
def taxrule(event):
return event.tax_rules.create(rate=Decimal('19.00'))
@pytest.fixture
def question(event, item):
q = event.questions.create(question="T-Shirt size", type="S", identifier="ABC")
q.items.add(item)
q.options.create(answer="XL", identifier="LVETRWVU")
return q
@pytest.fixture
def question2(event2, item2):
q = event2.questions.create(question="T-Shirt size", type="S", identifier="ABC")
q.items.add(item2)
return q
@pytest.fixture
def quota(event, item):
q = event.quotas.create(name="Budget Quota", size=200)
q.items.add(item)
return q
@pytest.fixture
def order(event, item, taxrule, question):
testtime = datetime.datetime(2017, 12, 1, 10, 0, 0, tzinfo=UTC)
event.plugins += ",pretix.plugins.stripe"
event.save()
with mock.patch('django.utils.timezone.now') as mock_now:
mock_now.return_value = testtime
o = Order.objects.create(
code='FOO', event=event, email='dummy@dummy.test',
status=Order.STATUS_PENDING, secret="k24fiuwvu8kxz3y1",
datetime=datetime.datetime(2017, 12, 1, 10, 0, 0, tzinfo=UTC),
expires=datetime.datetime(2017, 12, 10, 10, 0, 0, tzinfo=UTC),
total=23, locale='en'
)
p1 = o.payments.create(
provider='stripe',
state='refunded',
amount=Decimal('23.00'),
payment_date=testtime,
)
o.refunds.create(
provider='stripe',
state='done',
source='admin',
amount=Decimal('23.00'),
execution_date=testtime,
payment=p1,
)
o.payments.create(
provider='banktransfer',
state='pending',
amount=Decimal('23.00'),
)
o.fees.create(fee_type=OrderFee.FEE_TYPE_PAYMENT, value=Decimal('0.25'), tax_rate=Decimal('19.00'),
tax_value=Decimal('0.05'), tax_rule=taxrule)
o.fees.create(fee_type=OrderFee.FEE_TYPE_PAYMENT, value=Decimal('0.25'), tax_rate=Decimal('19.00'),
tax_value=Decimal('0.05'), tax_rule=taxrule, canceled=True)
InvoiceAddress.objects.create(order=o, company="Sample company", country=Country('NZ'),
vat_id="DE123", vat_id_validated=True)
op = OrderPosition.objects.create(
order=o,
item=item,
variation=None,
price=Decimal("23"),
attendee_name_parts={"full_name": "Peter", "_scheme": "full"},
secret="z3fsn8jyufm5kpk768q69gkbyr5f4h6w",
pseudonymization_id="ABCDEFGHKL",
)
OrderPosition.objects.create(
order=o,
item=item,
variation=None,
price=Decimal("23"),
attendee_name_parts={"full_name": "Peter", "_scheme": "full"},
secret="YBiYJrmF5ufiTLdV1iDf",
pseudonymization_id="JKLM",
canceled=True
)
op.answers.create(question=question, answer='S')
return o
@pytest.fixture
def clist_autocheckin(event):
c = event.checkin_lists.create(name="Default", all_products=True, auto_checkin_sales_channels=['web'])
return c
TEST_ORDERPOSITION_RES = {
"id": 1,
"order": "FOO",
"positionid": 1,
"item": 1,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter", "_scheme": "full"},
"attendee_name": "Peter",
"attendee_email": None,
"voucher": None,
"tax_rate": "0.00",
"tax_value": "0.00",
"tax_rule": None,
"secret": "z3fsn8jyufm5kpk768q69gkbyr5f4h6w",
"addon_to": None,
"pseudonymization_id": "ABCDEFGHKL",
"checkins": [],
"downloads": [],
"seat": None,
"company": None,
"street": None,
"zipcode": None,
"city": None,
"country": None,
"state": None,
"answers": [
{
"question": 1,
"answer": "S",
"question_identifier": "ABC",
"options": [],
"option_identifiers": []
}
],
"subevent": None,
"canceled": False,
}
TEST_PAYMENTS_RES = [
{
"local_id": 1,
"created": "2017-12-01T10:00:00Z",
"payment_date": "2017-12-01T10:00:00Z",
"provider": "stripe",
"payment_url": None,
"details": {
"id": None,
"payment_method": None
},
"state": "refunded",
"amount": "23.00"
},
{
"local_id": 2,
"created": "2017-12-01T10:00:00Z",
"payment_date": None,
"provider": "banktransfer",
"payment_url": None,
"details": {},
"state": "pending",
"amount": "23.00"
}
]
TEST_REFUNDS_RES = [
{
"local_id": 1,
"payment": 1,
"source": "admin",
"created": "2017-12-01T10:00:00Z",
"execution_date": "2017-12-01T10:00:00Z",
"provider": "stripe",
"state": "done",
"amount": "23.00"
},
]
TEST_ORDER_RES = {
"code": "FOO",
"status": "n",
"testmode": False,
"secret": "k24fiuwvu8kxz3y1",
"email": "dummy@dummy.test",
"locale": "en",
"datetime": "2017-12-01T10:00:00Z",
"expires": "2017-12-10T10:00:00Z",
"payment_date": "2017-12-01",
"sales_channel": "web",
"fees": [
{
"canceled": False,
"fee_type": "payment",
"value": "0.25",
"description": "",
"internal_type": "",
"tax_rate": "19.00",
"tax_value": "0.05"
}
],
"url": "http://example.com/dummy/dummy/order/FOO/k24fiuwvu8kxz3y1/",
"payment_provider": "banktransfer",
"total": "23.00",
"comment": "",
"checkin_attention": False,
"invoice_address": {
"last_modified": "2017-12-01T10:00:00Z",
"is_business": False,
"company": "Sample company",
"name": "",
"name_parts": {},
"street": "",
"zipcode": "",
"city": "",
"country": "NZ",
"state": "",
"internal_reference": "",
"vat_id": "DE123",
"vat_id_validated": True
},
"require_approval": False,
"positions": [TEST_ORDERPOSITION_RES],
"downloads": [],
"payments": TEST_PAYMENTS_RES,
"refunds": TEST_REFUNDS_RES,
}
@pytest.mark.django_db
def test_order_list(token_client, organizer, event, order, item, taxrule, question):
res = dict(TEST_ORDER_RES)
with scopes_disabled():
res["positions"][0]["id"] = order.positions.first().pk
res["positions"][0]["item"] = item.pk
res["positions"][0]["answers"][0]["question"] = question.pk
res["last_modified"] = order.last_modified.isoformat().replace('+00:00', 'Z')
res["fees"][0]["tax_rule"] = taxrule.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?code=FOO'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?code=BAR'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?testmode=false'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?testmode=true'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?status=n'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?status=p'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orders/?email=dummy@dummy.test'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orders/?email=foo@example.org'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?locale=en'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?locale=de'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?modified_since={}'.format(
organizer.slug, event.slug,
(order.last_modified - datetime.timedelta(hours=1)).isoformat().replace('+00:00', 'Z')
))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?modified_since={}'.format(
organizer.slug, event.slug, order.last_modified.isoformat().replace('+00:00', 'Z')
))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?modified_since={}'.format(
organizer.slug, event.slug,
(order.last_modified + datetime.timedelta(hours=1)).isoformat().replace('+00:00', 'Z')
))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?include_canceled_positions=false'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert len(resp.data['results'][0]['positions']) == 1
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?include_canceled_positions=true'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert len(resp.data['results'][0]['positions']) == 2
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?include_canceled_fees=false'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert len(resp.data['results'][0]['fees']) == 1
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/?include_canceled_fees=true'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert len(resp.data['results'][0]['fees']) == 2
@pytest.mark.django_db
def test_order_detail(token_client, organizer, event, order, item, taxrule, question):
res = dict(TEST_ORDER_RES)
with scopes_disabled():
res["positions"][0]["id"] = order.positions.first().pk
res["positions"][0]["item"] = item.pk
res["fees"][0]["tax_rule"] = taxrule.pk
res["positions"][0]["answers"][0]["question"] = question.pk
res["last_modified"] = order.last_modified.isoformat().replace('+00:00', 'Z')
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/'.format(organizer.slug, event.slug,
order.code))
assert resp.status_code == 200
assert res == resp.data
order.status = 'p'
order.save()
event.settings.ticketoutput_pdf__enabled = True
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/'.format(organizer.slug, event.slug,
order.code))
assert len(resp.data['downloads']) == 1
assert len(resp.data['positions'][0]['downloads']) == 1
order.status = 'n'
order.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/'.format(organizer.slug, event.slug,
order.code))
assert len(resp.data['downloads']) == 0
assert len(resp.data['positions'][0]['downloads']) == 0
event.settings.ticket_download_pending = True
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/'.format(organizer.slug, event.slug,
order.code))
assert len(resp.data['downloads']) == 1
assert len(resp.data['positions'][0]['downloads']) == 1
assert len(resp.data['positions']) == 1
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/?include_canceled_positions=true'.format(organizer.slug, event.slug, order.code))
assert resp.status_code == 200
assert len(resp.data['positions']) == 2
assert len(resp.data['fees']) == 1
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/?include_canceled_fees=true'.format(organizer.slug, event.slug, order.code))
assert resp.status_code == 200
assert len(resp.data['fees']) == 2
@pytest.mark.django_db
def test_payment_list(token_client, organizer, event, order):
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/payments/'.format(organizer.slug, event.slug,
order.code))
assert resp.status_code == 200
assert TEST_PAYMENTS_RES == resp.data['results']
@pytest.mark.django_db
def test_payment_detail(token_client, organizer, event, order):
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/payments/1/'.format(organizer.slug, event.slug,
order.code))
assert resp.status_code == 200
assert TEST_PAYMENTS_RES[0] == resp.data
@pytest.mark.django_db
def test_payment_create_confirmed(token_client, organizer, event, order):
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'provider': 'banktransfer',
'state': 'confirmed',
'amount': order.total,
'info': {
'foo': 'bar'
}
})
with scopes_disabled():
p = order.payments.last()
assert resp.status_code == 201
assert p.state == OrderPayment.PAYMENT_STATE_CONFIRMED
assert p.info_data == {'foo': 'bar'}
order.refresh_from_db()
assert order.status == Order.STATUS_PAID
@pytest.mark.django_db
def test_payment_create_pending(token_client, organizer, event, order):
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'provider': 'banktransfer',
'state': 'pending',
'amount': order.total,
'info': {
'foo': 'bar'
}
})
with scopes_disabled():
p = order.payments.last()
assert resp.status_code == 201
assert p.state == OrderPayment.PAYMENT_STATE_PENDING
assert p.info_data == {'foo': 'bar'}
order.refresh_from_db()
assert order.status == Order.STATUS_PENDING
@pytest.mark.django_db
def test_payment_confirm(token_client, organizer, event, order):
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/2/confirm/'.format(
organizer.slug, event.slug, order.code
), format='json', data={'force': True})
with scopes_disabled():
p = order.payments.get(local_id=2)
assert resp.status_code == 200
assert p.state == OrderPayment.PAYMENT_STATE_CONFIRMED
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/2/confirm/'.format(
organizer.slug, event.slug, order.code
), format='json', data={'force': True})
assert resp.status_code == 400
@pytest.mark.django_db
def test_payment_cancel(token_client, organizer, event, order):
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/2/cancel/'.format(
organizer.slug, event.slug, order.code
))
with scopes_disabled():
p = order.payments.get(local_id=2)
assert resp.status_code == 200
assert p.state == OrderPayment.PAYMENT_STATE_CANCELED
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/2/cancel/'.format(
organizer.slug, event.slug, order.code
))
assert resp.status_code == 400
@pytest.mark.django_db
def test_payment_refund_fail(token_client, organizer, event, order, monkeypatch):
with scopes_disabled():
order.payments.last().confirm()
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/2/refund/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'amount': '25.00',
'mark_canceled': False
})
assert resp.status_code == 400
assert resp.data == {'amount': ['Invalid refund amount, only 23.00 are available to refund.']}
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/2/refund/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'amount': '20.00',
'mark_canceled': False
})
assert resp.status_code == 400
assert resp.data == {'amount': ['Partial refund not available for this payment method.']}
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/2/refund/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'mark_canceled': False
})
assert resp.status_code == 400
assert resp.data == {'amount': ['Full refund not available for this payment method.']}
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/2/refund/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'amount': '23.00',
'mark_canceled': False
})
assert resp.status_code == 400
assert resp.data == {'amount': ['Full refund not available for this payment method.']}
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/1/refund/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'amount': '23.00',
'mark_canceled': False
})
assert resp.status_code == 400
assert resp.data == {'detail': 'Invalid state of payment.'}
@pytest.mark.django_db
def test_payment_refund_success(token_client, organizer, event, order, monkeypatch):
def charge_retr(*args, **kwargs):
def refund_create(amount):
r = MockedCharge()
r.id = 'foo'
r.status = 'succeeded'
return r
c = MockedCharge()
c.refunds.create = refund_create
return c
with scopes_disabled():
p1 = order.payments.create(
provider='stripe',
state='confirmed',
amount=Decimal('23.00'),
payment_date=order.datetime,
info=json.dumps({
'id': 'ch_123345345'
})
)
monkeypatch.setattr("stripe.Charge.retrieve", charge_retr)
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/{}/refund/'.format(
organizer.slug, event.slug, order.code, p1.local_id
), format='json', data={
'amount': '23.00',
'mark_canceled': False,
})
assert resp.status_code == 200
with scopes_disabled():
r = order.refunds.get(local_id=resp.data['local_id'])
assert r.provider == "stripe"
assert r.state == OrderRefund.REFUND_STATE_DONE
assert r.source == OrderRefund.REFUND_SOURCE_ADMIN
@pytest.mark.django_db
def test_payment_refund_unavailable(token_client, organizer, event, order, monkeypatch):
def charge_retr(*args, **kwargs):
def refund_create(amount):
raise APIConnectionError(message='Foo')
c = MockedCharge()
c.refunds.create = refund_create
return c
with scopes_disabled():
p1 = order.payments.create(
provider='stripe',
state='confirmed',
amount=Decimal('23.00'),
payment_date=order.datetime,
info=json.dumps({
'id': 'ch_123345345'
})
)
monkeypatch.setattr("stripe.Charge.retrieve", charge_retr)
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/payments/{}/refund/'.format(
organizer.slug, event.slug, order.code, p1.local_id
), format='json', data={
'amount': '23.00',
'mark_canceled': False,
})
assert resp.status_code == 400
assert resp.data == {'detail': 'External error: We had trouble communicating with Stripe. Please try again and contact support if the problem persists.'}
with scopes_disabled():
r = order.refunds.last()
assert r.provider == "stripe"
assert r.state == OrderRefund.REFUND_STATE_FAILED
assert r.source == OrderRefund.REFUND_SOURCE_ADMIN
@pytest.mark.django_db
def test_refund_list(token_client, organizer, event, order):
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/refunds/'.format(organizer.slug, event.slug,
order.code))
assert resp.status_code == 200
assert TEST_REFUNDS_RES == resp.data['results']
@pytest.mark.django_db
def test_refund_detail(token_client, organizer, event, order):
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/refunds/1/'.format(organizer.slug, event.slug,
order.code))
assert resp.status_code == 200
assert TEST_REFUNDS_RES[0] == resp.data
@pytest.mark.django_db
def test_refund_done(token_client, organizer, event, order):
with scopes_disabled():
r = order.refunds.get(local_id=1)
r.state = 'transit'
r.save()
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/1/done/'.format(
organizer.slug, event.slug, order.code
))
with scopes_disabled():
r = order.refunds.get(local_id=1)
assert resp.status_code == 200
assert r.state == OrderRefund.REFUND_STATE_DONE
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/1/done/'.format(
organizer.slug, event.slug, order.code
))
assert resp.status_code == 400
@pytest.mark.django_db
def test_refund_process_mark_refunded(token_client, organizer, event, order):
with scopes_disabled():
p = order.payments.get(local_id=1)
p.create_external_refund()
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/2/process/'.format(
organizer.slug, event.slug, order.code
), format='json', data={'mark_canceled': True})
with scopes_disabled():
r = order.refunds.get(local_id=1)
assert resp.status_code == 200
assert r.state == OrderRefund.REFUND_STATE_DONE
order.refresh_from_db()
assert order.status == Order.STATUS_CANCELED
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/2/process/'.format(
organizer.slug, event.slug, order.code
), format='json', data={'mark_canceled': True})
assert resp.status_code == 400
@pytest.mark.django_db
def test_refund_process_mark_pending(token_client, organizer, event, order):
with scopes_disabled():
p = order.payments.get(local_id=1)
p.create_external_refund()
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/2/process/'.format(
organizer.slug, event.slug, order.code
), format='json', data={'mark_canceled': False})
with scopes_disabled():
r = order.refunds.get(local_id=1)
assert resp.status_code == 200
assert r.state == OrderRefund.REFUND_STATE_DONE
order.refresh_from_db()
assert order.status == Order.STATUS_PENDING
@pytest.mark.django_db
def test_refund_cancel(token_client, organizer, event, order):
with scopes_disabled():
r = order.refunds.get(local_id=1)
r.state = 'transit'
r.save()
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/1/cancel/'.format(
organizer.slug, event.slug, order.code
))
with scopes_disabled():
r = order.refunds.get(local_id=1)
assert resp.status_code == 200
assert r.state == OrderRefund.REFUND_STATE_CANCELED
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/1/cancel/'.format(
organizer.slug, event.slug, order.code
))
assert resp.status_code == 400
@pytest.mark.django_db
def test_orderposition_list(token_client, organizer, event, order, item, subevent, subevent2, question):
i2 = copy.copy(item)
i2.pk = None
i2.save()
with scopes_disabled():
var = item.variations.create(value="Children")
var2 = item.variations.create(value="Children")
res = dict(TEST_ORDERPOSITION_RES)
op = order.positions.first()
op.variation = var
op.save()
res["id"] = op.pk
res["item"] = item.pk
res["variation"] = var.pk
res["answers"][0]["question"] = question.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/orderpositions/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?order__status=n'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?order__status=p'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?item={}'.format(organizer.slug, event.slug, item.pk))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?item__in={},{}'.format(
organizer.slug, event.slug, item.pk, i2.pk
))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?item={}'.format(organizer.slug, event.slug, i2.pk))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?variation={}'.format(organizer.slug, event.slug, var.pk))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?variation={}'.format(organizer.slug, event.slug, var2.pk))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?attendee_name=Peter'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?attendee_name=peter'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?attendee_name=Mark'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?secret=z3fsn8jyufm5kpk768q69gkbyr5f4h6w'.format(
organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?secret=abc123'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?pseudonymization_id=ABCDEFGHKL'.format(
organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?pseudonymization_id=FOO'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?search=FO'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?search=z3fsn8j'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?search=Peter'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?search=5f4h6w'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?order=FOO'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?order=BAR'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?has_checkin=false'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?has_checkin=true'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
with scopes_disabled():
cl = event.checkin_lists.create(name="Default")
op.checkins.create(datetime=datetime.datetime(2017, 12, 26, 10, 0, 0, tzinfo=UTC), list=cl)
res['checkins'] = [{'datetime': '2017-12-26T10:00:00Z', 'list': cl.pk, 'auto_checked_in': False}]
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?has_checkin=true'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
op.subevent = subevent
op.save()
res['subevent'] = subevent.pk
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?subevent={}'.format(organizer.slug, event.slug, subevent.pk))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?subevent__in={},{}'.format(organizer.slug, event.slug,
subevent.pk, subevent2.pk))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?subevent={}'.format(organizer.slug, event.slug,
subevent.pk + 1))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?include_canceled_positions=false'.format(organizer.slug, event.slug))
assert len(resp.data['results']) == 1
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?include_canceled_positions=true'.format(organizer.slug, event.slug))
assert len(resp.data['results']) == 2
@pytest.mark.django_db
def test_orderposition_detail(token_client, organizer, event, order, item, question):
res = dict(TEST_ORDERPOSITION_RES)
with scopes_disabled():
op = order.positions.first()
res["id"] = op.pk
res["item"] = item.pk
res["answers"][0]["question"] = question.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/orderpositions/{}/'.format(organizer.slug, event.slug,
op.pk))
assert resp.status_code == 200
assert res == resp.data
order.status = 'p'
order.save()
event.settings.ticketoutput_pdf__enabled = True
resp = token_client.get('/api/v1/organizers/{}/events/{}/orderpositions/{}/'.format(organizer.slug, event.slug,
op.pk))
assert len(resp.data['downloads']) == 1
@pytest.mark.django_db
def test_orderposition_detail_canceled(token_client, organizer, event, order, item, question):
with scopes_disabled():
op = order.all_positions.filter(canceled=True).first()
resp = token_client.get('/api/v1/organizers/{}/events/{}/orderpositions/{}/'.format(organizer.slug, event.slug,
op.pk))
assert resp.status_code == 404
resp = token_client.get('/api/v1/organizers/{}/events/{}/orderpositions/{}/?include_canceled_positions=true'.format(
organizer.slug, event.slug, op.pk))
assert resp.status_code == 200
@pytest.mark.django_db
def test_orderposition_delete(token_client, organizer, event, order, item, question):
with scopes_disabled():
op = order.positions.first()
resp = token_client.delete('/api/v1/organizers/{}/events/{}/orderpositions/{}/'.format(
organizer.slug, event.slug, op.pk
))
assert resp.status_code == 400
assert resp.data == ['This operation would leave the order empty. Please cancel the order itself instead.']
with scopes_disabled():
op2 = OrderPosition.objects.create(
order=order,
item=item,
variation=None,
price=Decimal("23"),
attendee_name_parts={"full_name": "Peter", "_scheme": "full"},
secret="foobar",
pseudonymization_id="BAZ",
)
order.refresh_from_db()
order.total = Decimal('46')
order.save()
assert order.positions.count() == 2
resp = token_client.delete('/api/v1/organizers/{}/events/{}/orderpositions/{}/'.format(
organizer.slug, event.slug, op2.pk
))
assert resp.status_code == 204
with scopes_disabled():
assert order.positions.count() == 1
assert order.all_positions.count() == 3
order.refresh_from_db()
assert order.total == Decimal('23.25')
@pytest.fixture
def invoice(order):
testtime = datetime.datetime(2017, 12, 10, 10, 0, 0, tzinfo=UTC)
with mock.patch('django.utils.timezone.now') as mock_now:
mock_now.return_value = testtime
return generate_invoice(order)
TEST_INVOICE_RES = {
"order": "FOO",
"number": "DUMMY-00001",
"is_cancellation": False,
"invoice_from": "",
"invoice_to": "Sample company\nNew Zealand\nVAT-ID: DE123",
"date": "2017-12-10",
"refers": None,
"locale": "en",
"introductory_text": "",
"internal_reference": "",
"additional_text": "",
"payment_provider_text": "",
"footer_text": "",
"foreign_currency_display": None,
"foreign_currency_rate": None,
"foreign_currency_rate_date": None,
"lines": [
{
"position": 1,
"description": "Budget Ticket<br />Attendee: Peter",
"gross_value": "23.00",
"tax_value": "0.00",
"tax_name": "",
"tax_rate": "0.00"
},
{
"position": 2,
"description": "Payment fee",
"gross_value": "0.25",
"tax_value": "0.05",
"tax_name": "",
"tax_rate": "19.00"
}
]
}
@pytest.mark.django_db
def test_invoice_list(token_client, organizer, event, order, invoice):
res = dict(TEST_INVOICE_RES)
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?order=FOO'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?order=BAR'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?number={}'.format(
organizer.slug, event.slug, invoice.number))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?number=XXX'.format(
organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?locale=en'.format(
organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?locale=de'.format(
organizer.slug, event.slug))
assert [] == resp.data['results']
with scopes_disabled():
ic = generate_cancellation(invoice)
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?is_cancellation=false'.format(
organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?is_cancellation=true'.format(
organizer.slug, event.slug))
assert len(resp.data['results']) == 1
assert resp.data['results'][0]['number'] == ic.number
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?refers={}'.format(
organizer.slug, event.slug, invoice.number))
assert len(resp.data['results']) == 1
assert resp.data['results'][0]['number'] == ic.number
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?refers={}'.format(
organizer.slug, event.slug, ic.number))
assert [] == resp.data['results']
@pytest.mark.django_db
def test_invoice_detail(token_client, organizer, event, invoice):
res = dict(TEST_INVOICE_RES)
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/{}/'.format(organizer.slug, event.slug,
invoice.number))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_invoice_regenerate(token_client, organizer, event, invoice):
with scopes_disabled():
InvoiceAddress.objects.filter(order=invoice.order).update(company="ACME Ltd")
resp = token_client.post('/api/v1/organizers/{}/events/{}/invoices/{}/regenerate/'.format(
organizer.slug, event.slug, invoice.number
))
assert resp.status_code == 204
invoice.refresh_from_db()
assert "ACME Ltd" in invoice.invoice_to
@pytest.mark.django_db
def test_invoice_reissue(token_client, organizer, event, invoice):
with scopes_disabled():
InvoiceAddress.objects.filter(order=invoice.order).update(company="ACME Ltd")
resp = token_client.post('/api/v1/organizers/{}/events/{}/invoices/{}/reissue/'.format(
organizer.slug, event.slug, invoice.number
))
assert resp.status_code == 204
invoice.refresh_from_db()
assert "ACME Ltd" not in invoice.invoice_to
with scopes_disabled():
assert invoice.order.invoices.count() == 3
invoice = invoice.order.invoices.last()
assert "ACME Ltd" in invoice.invoice_to
@pytest.mark.django_db
def test_order_mark_paid_pending(token_client, organizer, event, order):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_paid/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_PAID
@pytest.mark.django_db
def test_order_mark_paid_canceled(token_client, organizer, event, order):
order.status = Order.STATUS_CANCELED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_paid/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
order.refresh_from_db()
assert order.status == Order.STATUS_CANCELED
@pytest.mark.django_db
def test_order_mark_paid_expired_quota_free(token_client, organizer, event, order, quota):
order.status = Order.STATUS_EXPIRED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_paid/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.status == Order.STATUS_PAID
@pytest.mark.django_db
def test_order_mark_paid_expired_quota_fill(token_client, organizer, event, order, quota):
order.status = Order.STATUS_EXPIRED
order.save()
quota.size = 0
quota.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_paid/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
order.refresh_from_db()
assert order.status == Order.STATUS_EXPIRED
@pytest.mark.django_db
def test_order_mark_paid_locked(token_client, organizer, event, order):
order.status = Order.STATUS_EXPIRED
order.save()
with event.lock():
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_paid/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 409
order.refresh_from_db()
assert order.status == Order.STATUS_EXPIRED
@pytest.mark.django_db
def test_order_reactivate(token_client, organizer, event, order, quota):
order.status = Order.STATUS_CANCELED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/reactivate/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_PENDING
@pytest.mark.django_db
def test_order_reactivate_invalid(token_client, organizer, event, order):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/reactivate/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
@pytest.mark.django_db
def test_order_mark_canceled_pending(token_client, organizer, event, order):
djmail.outbox = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_canceled/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_CANCELED
assert len(djmail.outbox) == 1
@pytest.mark.django_db
def test_order_mark_canceled_pending_fee_not_allowed(token_client, organizer, event, order):
djmail.outbox = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_canceled/'.format(
organizer.slug, event.slug, order.code
), data={
'cancellation_fee': '7.00'
}
)
assert resp.status_code == 400
assert resp.data == {'detail': 'The cancellation fee cannot be higher than the payment credit of this order.'}
@pytest.mark.django_db
def test_order_mark_canceled_pending_no_email(token_client, organizer, event, order):
djmail.outbox = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_canceled/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'send_email': False
}
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_CANCELED
assert len(djmail.outbox) == 0
@pytest.mark.django_db
def test_order_mark_canceled_expired(token_client, organizer, event, order):
order.status = Order.STATUS_EXPIRED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_canceled/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.status == Order.STATUS_CANCELED
@pytest.mark.django_db
def test_order_mark_paid_canceled_keep_fee(token_client, organizer, event, order):
order.status = Order.STATUS_PAID
order.save()
with scopes_disabled():
order.payments.create(state=OrderPayment.PAYMENT_STATE_CONFIRMED, amount=order.total)
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_canceled/'.format(
organizer.slug, event.slug, order.code
), data={
'cancellation_fee': '6.00'
}
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_PAID
order.refresh_from_db()
assert order.status == Order.STATUS_PAID
assert order.total == Decimal('6.00')
@pytest.mark.django_db
def test_order_mark_paid_refunded(token_client, organizer, event, order):
order.status = Order.STATUS_PAID
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_refunded/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_CANCELED
@pytest.mark.django_db
def test_order_mark_canceled_refunded(token_client, organizer, event, order):
order.status = Order.STATUS_CANCELED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_refunded/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
order.refresh_from_db()
assert order.status == Order.STATUS_CANCELED
@pytest.mark.django_db
def test_order_mark_paid_unpaid(token_client, organizer, event, order):
order.status = Order.STATUS_PAID
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_pending/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_PENDING
@pytest.mark.django_db
def test_order_mark_canceled_unpaid(token_client, organizer, event, order):
order.status = Order.STATUS_CANCELED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_pending/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
order.refresh_from_db()
assert order.status == Order.STATUS_CANCELED
@pytest.mark.django_db
def test_order_mark_pending_expired(token_client, organizer, event, order):
order.status = Order.STATUS_PENDING
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_expired/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_EXPIRED
@pytest.mark.django_db
def test_order_mark_paid_expired(token_client, organizer, event, order):
order.status = Order.STATUS_PAID
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_expired/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
order.refresh_from_db()
assert order.status == Order.STATUS_PAID
@pytest.mark.django_db
def test_order_extend_paid(token_client, organizer, event, order):
order.status = Order.STATUS_PAID
order.save()
newdate = (now() + datetime.timedelta(days=20)).strftime("%Y-%m-%d")
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/extend/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'expires': newdate
}
)
assert resp.status_code == 400
order.refresh_from_db()
assert order.status == Order.STATUS_PAID
@pytest.mark.django_db
def test_order_extend_pending(token_client, organizer, event, order):
order.status = Order.STATUS_PENDING
order.save()
newdate = (now() + datetime.timedelta(days=20)).strftime("%Y-%m-%d")
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/extend/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'expires': newdate
}
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.status == Order.STATUS_PENDING
assert order.expires.astimezone(event.timezone).strftime("%Y-%m-%d %H:%M:%S") == newdate[:10] + " 23:59:59"
@pytest.mark.django_db
def test_order_extend_expired_quota_empty(token_client, organizer, event, order, quota):
order.status = Order.STATUS_EXPIRED
order.save()
quota.size = 0
quota.save()
newdate = (now() + datetime.timedelta(days=20)).strftime("%Y-%m-%d")
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/extend/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'expires': newdate
}
)
assert resp.status_code == 400
order.refresh_from_db()
assert order.status == Order.STATUS_EXPIRED
@pytest.mark.django_db
def test_order_extend_expired_quota_ignore(token_client, organizer, event, order, quota):
order.status = Order.STATUS_EXPIRED
order.save()
quota.size = 0
quota.save()
newdate = (now() + datetime.timedelta(days=20)).strftime("%Y-%m-%d")
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/extend/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'expires': newdate,
'force': True
}
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.status == Order.STATUS_PENDING
assert order.expires.astimezone(event.timezone).strftime("%Y-%m-%d %H:%M:%S") == newdate[:10] + " 23:59:59"
@pytest.mark.django_db
def test_order_extend_expired_quota_waiting_list(token_client, organizer, event, order, item, quota):
order.status = Order.STATUS_EXPIRED
order.save()
quota.size = 1
quota.save()
with scopes_disabled():
event.waitinglistentries.create(item=item, email='foo@bar.com')
newdate = (now() + datetime.timedelta(days=20)).strftime("%Y-%m-%d")
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/extend/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'expires': newdate,
}
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.status == Order.STATUS_PENDING
assert order.expires.astimezone(event.timezone).strftime("%Y-%m-%d %H:%M:%S") == newdate[:10] + " 23:59:59"
@pytest.mark.django_db
def test_order_extend_expired_quota_left(token_client, organizer, event, order, quota):
order.status = Order.STATUS_EXPIRED
order.save()
quota.size = 2
quota.save()
newdate = (now() + datetime.timedelta(days=20)).strftime("%Y-%m-%d")
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/extend/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'expires': newdate,
}
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.status == Order.STATUS_PENDING
assert order.expires.astimezone(event.timezone).strftime("%Y-%m-%d %H:%M:%S") == newdate[:10] + " 23:59:59"
@pytest.mark.django_db
def test_order_pending_approve(token_client, organizer, event, order):
order.require_approval = True
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/approve/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_PENDING
assert not resp.data['require_approval']
@pytest.mark.django_db
def test_order_invalid_state_approve(token_client, organizer, event, order):
order.require_approval = True
order.status = Order.STATUS_CANCELED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/approve/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
order.require_approval = False
order.status = Order.STATUS_PENDING
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/approve/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
@pytest.mark.django_db
def test_order_pending_deny(token_client, organizer, event, order):
order.require_approval = True
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/deny/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_CANCELED
assert resp.data['require_approval']
@pytest.mark.django_db
def test_order_invalid_state_deny(token_client, organizer, event, order):
order.require_approval = True
order.status = Order.STATUS_CANCELED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/deny/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
order.require_approval = False
order.status = Order.STATUS_PENDING
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/deny/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
ORDER_CREATE_PAYLOAD = {
"email": "dummy@dummy.test",
"locale": "en",
"sales_channel": "web",
"fees": [
{
"fee_type": "payment",
"value": "0.25",
"description": "",
"internal_type": "",
"tax_rule": None
}
],
"payment_provider": "banktransfer",
"invoice_address": {
"is_business": False,
"company": "Sample company",
"name_parts": {"full_name": "Fo"},
"street": "Bar",
"state": "",
"zipcode": "",
"city": "Sample City",
"country": "NZ",
"internal_reference": "",
"vat_id": ""
},
"positions": [
{
"positionid": 1,
"item": 1,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": None,
"company": "FOOCORP",
"answers": [
{
"question": 1,
"answer": "S",
"options": []
}
],
"subevent": None
}
],
}
@pytest.mark.django_db
def test_order_create(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.email == "dummy@dummy.test"
assert o.locale == "en"
assert o.total == Decimal('23.25')
assert o.status == Order.STATUS_PENDING
assert o.sales_channel == "web"
assert not o.testmode
with scopes_disabled():
p = o.payments.first()
assert p.provider == "banktransfer"
assert p.amount == o.total
assert p.state == "created"
with scopes_disabled():
fee = o.fees.first()
assert fee.fee_type == "payment"
assert fee.value == Decimal('0.25')
ia = o.invoice_address
assert ia.company == "Sample company"
assert ia.name_parts == {"full_name": "Fo", "_scheme": "full"}
assert ia.name_cached == "Fo"
with scopes_disabled():
assert o.positions.count() == 1
pos = o.positions.first()
assert pos.item == item
assert pos.price == Decimal("23.00")
assert pos.attendee_name_parts == {"full_name": "Peter", "_scheme": "full"}
assert pos.company == "FOOCORP"
with scopes_disabled():
answ = pos.answers.first()
assert answ.question == question
assert answ.answer == "S"
@pytest.mark.django_db
def test_order_create_simulate(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
question.type = Question.TYPE_CHOICE_MULTIPLE
question.save()
with scopes_disabled():
opt = question.options.create(answer="L")
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['positions'][0]['answers'][0]['options'] = [opt.pk]
res['simulate'] = True
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
assert Order.objects.count() == 0
assert QuestionAnswer.objects.count() == 0
assert OrderPosition.objects.count() == 0
assert OrderFee.objects.count() == 0
assert InvoiceAddress.objects.count() == 0
d = resp.data
del d['last_modified']
del d['secret']
del d['url']
del d['expires']
del d['invoice_address']['last_modified']
del d['positions'][0]['secret']
assert d == {
'code': 'PREVIEW',
'status': 'n',
'testmode': False,
'email': 'dummy@dummy.test',
'locale': 'en',
'datetime': None,
'payment_date': None,
'payment_provider': None,
'fees': [
{
'fee_type': 'payment',
'value': '0.25',
'description': '',
'internal_type': '',
'tax_rate': '0.00',
'tax_value': '0.00',
'tax_rule': None,
'canceled': False
}
],
'total': '23.25',
'comment': '',
'invoice_address': {
'is_business': False,
'company': 'Sample company',
'name': 'Fo',
'name_parts': {'full_name': 'Fo', '_scheme': 'full'},
'street': 'Bar',
'zipcode': '',
'city': 'Sample City',
'country': 'NZ',
'state': '',
'vat_id': '',
'vat_id_validated': False,
'internal_reference': ''
},
'positions': [
{
'id': 0,
'order': '',
'positionid': 1,
'item': item.pk,
'variation': None,
'price': '23.00',
'attendee_name': 'Peter',
'attendee_name_parts': {'full_name': 'Peter', '_scheme': 'full'},
'attendee_email': None,
'voucher': None,
'tax_rate': '0.00',
'tax_value': '0.00',
'addon_to': None,
'subevent': None,
'checkins': [],
'downloads': [],
'answers': [
{'question': question.pk, 'answer': 'L', 'question_identifier': 'ABC',
'options': [opt.pk],
'option_identifiers': [opt.identifier]}
],
'tax_rule': None,
'pseudonymization_id': 'PREVIEW',
'seat': None,
'company': "FOOCORP",
'street': None,
'city': None,
'zipcode': None,
'state': None,
'country': None,
'canceled': False
}
],
'downloads': [],
'checkin_attention': False,
'payments': [],
'refunds': [],
'require_approval': False,
'sales_channel': 'web',
}
@pytest.mark.django_db
def test_order_create_autocheckin(token_client, organizer, event, item, quota, question, clist_autocheckin):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert "web" in clist_autocheckin.auto_checkin_sales_channels
assert o.positions.first().checkins.first().auto_checked_in
clist_autocheckin.auto_checkin_sales_channels = []
clist_autocheckin.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert clist_autocheckin.auto_checkin_sales_channels == []
assert o.positions.first().checkins.count() == 0
@pytest.mark.django_db
def test_order_create_invoice_address_optional(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['invoice_address']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
with pytest.raises(InvoiceAddress.DoesNotExist):
o.invoice_address
@pytest.mark.django_db
def test_order_create_sales_channel_optional(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['sales_channel']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.sales_channel == "web"
@pytest.mark.django_db
def test_order_create_sales_channel_invalid(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['sales_channel'] = 'foo'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'sales_channel': ['Unknown sales channel.']}
@pytest.mark.django_db
def test_order_create_in_test_mode(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['testmode'] = True
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.testmode
@pytest.mark.django_db
def test_order_create_in_test_mode_saleschannel_limited(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['testmode'] = True
res['sales_channel'] = 'bar'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'testmode': ['This sales channel does not provide support for test mode.']}
@pytest.mark.django_db
def test_order_create_attendee_name_optional(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['attendee_name'] = None
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['positions'][0]['attendee_name_parts']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.positions.first().attendee_name_parts == {}
@pytest.mark.django_db
def test_order_create_legacy_attendee_name(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['attendee_name'] = 'Peter'
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
del res['positions'][0]['attendee_name_parts']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.positions.first().attendee_name_parts == {"_legacy": "Peter"}
@pytest.mark.django_db
def test_order_create_legacy_invoice_name(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['invoice_address']['name'] = 'Peter'
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
del res['invoice_address']['name_parts']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.invoice_address.name_parts == {"_legacy": "Peter"}
@pytest.mark.django_db
def test_order_create_code_optional(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['code'] = 'ABCDE'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.code == "ABCDE"
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'code': ['This order code is already in use.']}
res['code'] = 'ABaDE'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'code': ['This order code contains invalid characters.']}
@pytest.mark.django_db
def test_order_email_optional(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['email']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert not o.email
@pytest.mark.django_db
def test_order_create_payment_provider_optional_free(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['positions'][0]['price'] = '0.00'
res['positions'][0]['status'] = 'p'
del res['payment_provider']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert not o.payments.exists()
@pytest.mark.django_db
def test_order_create_payment_info_optional(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
res['payment_info'] = {
'foo': {
'bar': [1, 2],
'test': False
}
}
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
p = o.payments.first()
assert p.provider == "banktransfer"
assert p.amount == o.total
assert json.loads(p.info) == res['payment_info']
@pytest.mark.django_db
def test_order_create_position_secret_optional(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.positions.first().secret
res['positions'][0]['secret'] = "aaa"
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.positions.first().secret == "aaa"
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'secret': ['You cannot assign a position secret that already exists.']}]}
@pytest.mark.django_db
def test_order_create_tax_rules(token_client, organizer, event, item, quota, question, taxrule):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['fees'][0]['tax_rule'] = taxrule.pk
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
item.tax_rule = taxrule
item.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
fee = o.fees.first()
assert fee.fee_type == "payment"
assert fee.value == Decimal('0.25')
assert fee.tax_rate == Decimal('19.00')
assert fee.tax_rule == taxrule
ia = o.invoice_address
assert ia.company == "Sample company"
with scopes_disabled():
pos = o.positions.first()
assert pos.item == item
assert pos.tax_rate == Decimal('19.00')
assert pos.tax_value == Decimal('3.67')
assert pos.tax_rule == taxrule
@pytest.mark.django_db
def test_order_create_fee_type_validation(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['fees'][0]['fee_type'] = 'unknown'
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'fees': [{'fee_type': ['"unknown" is not a valid choice.']}]}
@pytest.mark.django_db
def test_order_create_fee_as_percentage(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['fees'][0]['_treat_value_as_percentage'] = True
res['fees'][0]['value'] = '10.00'
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
fee = o.fees.first()
assert fee.value == Decimal('2.30')
assert o.total == Decimal('25.30')
@pytest.mark.django_db
def test_order_create_fee_with_auto_tax(token_client, organizer, event, item, quota, question, taxrule):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['fees'][0]['_split_taxes_like_products'] = True
res['fees'][0]['_treat_value_as_percentage'] = True
res['fees'][0]['value'] = '10.00'
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
item.tax_rule = taxrule
item.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
fee = o.fees.first()
assert fee.value == Decimal('2.30')
assert fee.tax_rate == Decimal('19.00')
assert o.total == Decimal('25.30')
@pytest.mark.django_db
def test_order_create_tax_rule_wrong_event(token_client, organizer, event, item, quota, question, taxrule2):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['fees'][0]['tax_rule'] = taxrule2.pk
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'fees': [{'tax_rule': ['The specified tax rate does not belong to this event.']}]}
@pytest.mark.django_db
def test_order_create_subevent_not_allowed(token_client, organizer, event, item, quota, question, subevent2):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['positions'][0]['subevent'] = subevent2.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'subevent': ['You cannot set a subevent for this event.']}]}
@pytest.mark.django_db
def test_order_create_empty(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'] = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': ['An order cannot be empty.']}
@pytest.mark.django_db
def test_order_create_subevent_validation(token_client, organizer, event, item, subevent, subevent2, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'subevent': ['You need to set a subevent.']}]}
res['positions'][0]['subevent'] = subevent2.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'subevent': ['The specified subevent does not belong to this event.']}]}
@pytest.mark.django_db
def test_order_create_item_validation(token_client, organizer, event, item, item2, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
item.active = False
item.save()
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'item': ['The specified item is not active.']}]}
item.active = True
item.save()
res['positions'][0]['item'] = item2.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'item': ['The specified item does not belong to this event.']}]}
with scopes_disabled():
var2 = item2.variations.create(value="A")
quota.variations.add(var2)
res['positions'][0]['item'] = item.pk
res['positions'][0]['variation'] = var2.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'variation': ['You cannot specify a variation for this item.']}]}
with scopes_disabled():
var1 = item.variations.create(value="A")
res['positions'][0]['item'] = item.pk
res['positions'][0]['variation'] = var1.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'item': ['The product "Budget Ticket" is not assigned to a quota.']}]}
with scopes_disabled():
quota.variations.add(var1)
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
res['positions'][0]['variation'] = var2.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [{'variation': ['The specified variation does not belong to the specified item.']}]}
res['positions'][0]['variation'] = None
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'variation': ['You should specify a variation for this item.']}]}
@pytest.mark.django_db
def test_order_create_positionids_addons(token_client, organizer, event, item, quota):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'] = [
{
"positionid": 1,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": None,
"answers": [],
"subevent": None
},
{
"positionid": 2,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": 1,
"answers": [],
"subevent": None
}
]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
pos1 = o.positions.first()
pos2 = o.positions.last()
assert pos2.addon_to == pos1
@pytest.mark.django_db
def test_order_create_positionid_validation(token_client, organizer, event, item, quota):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'] = [
{
"positionid": 1,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": None,
"answers": [],
"subevent": None
},
{
"positionid": 2,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": 2,
"answers": [],
"subevent": None
}
]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{},
{
'addon_to': [
'If you set addon_to, you need to make sure that the '
'referenced position ID exists and is transmitted directly '
'before its add-ons.'
]
}
]
}
res['positions'] = [
{
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": None,
"answers": [],
"subevent": None
},
{
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": 2,
"answers": [],
"subevent": None
}
]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [
{'positionid': ["If you set addon_to on any position, you need to specify position IDs manually."]},
{'positionid': ["If you set addon_to on any position, you need to specify position IDs manually."]}
]}
res['positions'] = [
{
"positionid": 1,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"answers": [],
"subevent": None
},
{
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"answers": [],
"subevent": None
}
]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{},
{
'positionid': ['If you set position IDs manually, you need to do so for all positions.']
}
]
}
res['positions'] = [
{
"positionid": 1,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"answers": [],
"subevent": None
},
{
"positionid": 3,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"answers": [],
"subevent": None
}
]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{},
{
'positionid': ['Position IDs need to be consecutive.']
}
]
}
res['positions'] = [
{
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"answers": [],
"subevent": None
},
{
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"answers": [],
"subevent": None
}
]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.positions.first().positionid == 1
assert o.positions.last().positionid == 2
@pytest.mark.django_db
def test_order_create_answer_validation(token_client, organizer, event, item, quota, question, question2):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question2.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [{'answers': [{'question': ['The specified question does not belong to this event.']}]}]}
res['positions'][0]['answers'][0]['question'] = question.pk
res['positions'][0]['answers'][0]['options'] = [question.options.first().pk]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'answers': [
{'non_field_errors': ['You should not specify options if the question is not of a choice type.']}]}]}
question.type = Question.TYPE_CHOICE
question.save()
res['positions'][0]['answers'][0]['options'] = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [
{'answers': [{'non_field_errors': ['You need to specify options if the question is of a choice type.']}]}]}
with scopes_disabled():
question.options.create(answer="L")
with scopes_disabled():
res['positions'][0]['answers'][0]['options'] = [
question.options.first().pk,
question.options.last().pk,
]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [{'answers': [{'non_field_errors': ['You can specify at most one option for this question.']}]}]}
question.type = Question.TYPE_FILE
question.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [{'answers': [{'non_field_errors': ['File uploads are currently not supported via the API.']}]}]}
question.type = Question.TYPE_CHOICE_MULTIPLE
question.save()
with scopes_disabled():
res['positions'][0]['answers'][0]['options'] = [
question.options.first().pk,
question.options.last().pk,
]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
pos = o.positions.first()
answ = pos.answers.first()
assert answ.question == question
assert answ.answer == "XL, L"
question.type = Question.TYPE_NUMBER
question.save()
res['positions'][0]['answers'][0]['options'] = []
res['positions'][0]['answers'][0]['answer'] = '3.45'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
pos = o.positions.first()
answ = pos.answers.first()
assert answ.answer == "3.45"
question.type = Question.TYPE_NUMBER
question.save()
res['positions'][0]['answers'][0]['options'] = []
res['positions'][0]['answers'][0]['answer'] = 'foo'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'answers': [{'non_field_errors': ['A valid number is required.']}]}]}
question.type = Question.TYPE_BOOLEAN
question.save()
res['positions'][0]['answers'][0]['options'] = []
res['positions'][0]['answers'][0]['answer'] = 'True'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
pos = o.positions.first()
answ = pos.answers.first()
assert answ.answer == "True"
question.type = Question.TYPE_BOOLEAN
question.save()
res['positions'][0]['answers'][0]['answer'] = '0'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
pos = o.positions.first()
answ = pos.answers.first()
assert answ.answer == "False"
question.type = Question.TYPE_BOOLEAN
question.save()
res['positions'][0]['answers'][0]['answer'] = 'bla'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [{'answers': [{'non_field_errors': ['Please specify "true" or "false" for boolean questions.']}]}]}
question.type = Question.TYPE_DATE
question.save()
res['positions'][0]['answers'][0]['answer'] = '2018-05-14'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
pos = o.positions.first()
answ = pos.answers.first()
assert answ.answer == "2018-05-14"
question.type = Question.TYPE_DATE
question.save()
res['positions'][0]['answers'][0]['answer'] = 'bla'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'answers': [
{'non_field_errors': ['Date has wrong format. Use one of these formats instead: YYYY-MM-DD.']}]}]}
question.type = Question.TYPE_DATETIME
question.save()
res['positions'][0]['answers'][0]['answer'] = '2018-05-14T13:00:00Z'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
pos = o.positions.first()
answ = pos.answers.first()
assert answ.answer == "2018-05-14 13:00:00+00:00"
question.type = Question.TYPE_DATETIME
question.save()
res['positions'][0]['answers'][0]['answer'] = 'bla'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'answers': [{'non_field_errors': [
'Datetime has wrong format. Use one of these formats instead: '
'YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].']}]}]}
question.type = Question.TYPE_TIME
question.save()
res['positions'][0]['answers'][0]['answer'] = '13:00:00'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
pos = o.positions.first()
answ = pos.answers.first()
assert answ.answer == "13:00:00"
question.type = Question.TYPE_TIME
question.save()
res['positions'][0]['answers'][0]['answer'] = 'bla'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{'answers': [
{'non_field_errors': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].']}]}]}
@pytest.mark.django_db
def test_order_create_quota_validation(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'] = [
{
"positionid": 1,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": None,
"answers": [],
"subevent": None
},
{
"positionid": 2,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": 1,
"answers": [],
"subevent": None
}
]
quota.size = 0
quota.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'item': ['There is not enough quota available on quota "Budget Quota" to perform the operation.']},
{'item': ['There is not enough quota available on quota "Budget Quota" to perform the operation.']},
]
}
quota.size = 1
quota.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{},
{'item': ['There is not enough quota available on quota "Budget Quota" to perform the operation.']},
]
}
res['force'] = True
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
@pytest.mark.django_db
def test_order_create_quota_consume_cart(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
with scopes_disabled():
cr = CartPosition.objects.create(
event=event, cart_id="uxLJBUMEcnxOLI2EuxLYN1hWJq9GKu4yWL9FEgs2m7M0vdFi@api", item=item,
price=23,
expires=now() + datetime.timedelta(hours=3)
)
quota.size = 1
quota.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'item': ['There is not enough quota available on quota "Budget Quota" to perform the operation.']},
]
}
res['consume_carts'] = [cr.cart_id]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
assert not CartPosition.objects.filter(pk=cr.pk).exists()
@pytest.mark.django_db
def test_order_create_quota_consume_cart_expired(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
with scopes_disabled():
cr = CartPosition.objects.create(
event=event, cart_id="uxLJBUMEcnxOLI2EuxLYN1hWJq9GKu4yWL9FEgs2m7M0vdFi@api", item=item,
price=23,
expires=now() - datetime.timedelta(hours=3)
)
quota.size = 0
quota.save()
res['consume_carts'] = [cr.cart_id]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'item': ['There is not enough quota available on quota "Budget Quota" to perform the operation.']},
]
}
@pytest.mark.django_db
def test_order_create_free(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['fees'] = []
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['positions'][0]['price'] = '0.00'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.total == Decimal('0.00')
assert o.status == Order.STATUS_PAID
with scopes_disabled():
p = o.payments.first()
assert p.provider == "free"
assert p.amount == o.total
assert p.state == "confirmed"
@pytest.mark.django_db
def test_order_create_invalid_payment_provider(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['payment_provider'] = 'foo'
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'payment_provider': ['The given payment provider is not known.']}
@pytest.mark.django_db
def test_order_create_invalid_free_order(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['payment_provider'] = 'free'
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == ['You cannot use the "free" payment provider for non-free orders.']
@pytest.mark.django_db
def test_order_create_invalid_status(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['status'] = 'e'
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'status': ['"e" is not a valid choice.']}
@pytest.mark.django_db
def test_order_create_paid_generate_invoice(token_client, organizer, event, item, quota, question):
event.settings.invoice_generate = 'paid'
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['status'] = 'p'
res['payment_date'] = '2019-04-01 08:20:00Z'
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert o.invoices.count() == 1
p = o.payments.first()
assert p.provider == "banktransfer"
assert p.amount == o.total
assert p.state == "confirmed"
assert p.payment_date.year == 2019
assert p.payment_date.month == 4
assert p.payment_date.day == 1
assert p.payment_date.hour == 8
assert p.payment_date.minute == 20
@pytest.fixture
def seat(event, organizer, item):
SeatingPlan.objects.create(
name="Plan", organizer=organizer, layout="{}"
)
event.seat_category_mappings.create(
layout_category='Stalls', product=item
)
return event.seats.create(name="A1", product=item, seat_guid="A1")
@pytest.mark.django_db
def test_order_create_with_seat(token_client, organizer, event, item, quota, seat, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['seat'] = seat.seat_guid
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
p = o.positions.first()
assert p.seat == seat
@pytest.mark.django_db
def test_order_create_with_blocked_seat_allowed(token_client, organizer, event, item, quota, seat, question):
seat.blocked = True
seat.save()
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['seat'] = seat.seat_guid
res['positions'][0]['answers'][0]['question'] = question.pk
res['sales_channel'] = 'bar'
event.settings.seating_allow_blocked_seats_for_channel = ['bar']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
@pytest.mark.django_db
def test_order_create_with_blocked_seat(token_client, organizer, event, item, quota, seat, question):
seat.blocked = True
seat.save()
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['seat'] = seat.seat_guid
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'seat': ['The selected seat "A1" is not available.']},
]
}
@pytest.mark.django_db
def test_order_create_with_used_seat(token_client, organizer, event, item, quota, seat, question):
CartPosition.objects.create(
event=event, cart_id='aaa', item=item,
price=21.5, expires=now() + datetime.timedelta(minutes=10), seat=seat
)
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['seat'] = seat.seat_guid
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'seat': ['The selected seat "A1" is not available.']},
]
}
@pytest.mark.django_db
def test_order_create_with_unknown_seat(token_client, organizer, event, item, quota, seat, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['seat'] = seat.seat_guid + '_'
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'seat': ['The specified seat does not exist.']},
]
}
@pytest.mark.django_db
def test_order_create_require_seat(token_client, organizer, event, item, quota, seat, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'seat': ['The specified product requires to choose a seat.']},
]
}
@pytest.mark.django_db
def test_order_create_unseated(token_client, organizer, event, item, quota, seat, question):
with scopes_disabled():
item2 = event.items.create(name="Budget Ticket", default_price=23)
quota.items.add(item2)
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item2.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['positions'][0]['seat'] = seat.seat_guid
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'seat': ['The specified product does not allow to choose a seat.']},
]
}
@pytest.mark.django_db
def test_order_create_with_duplicate_seat(token_client, organizer, event, item, quota, seat, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'] = [
{
"positionid": 1,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": None,
"answers": [],
"subevent": None,
"seat": seat.seat_guid
},
{
"positionid": 2,
"item": item.pk,
"variation": None,
"price": "23.00",
"attendee_name_parts": {"full_name": "Peter"},
"attendee_email": None,
"addon_to": 1,
"answers": [],
"subevent": None,
"seat": seat.seat_guid
}
]
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{},
{'seat': ['The selected seat "A1" is not available.']},
]
}
@pytest.mark.django_db
def test_order_create_with_seat_consumed_from_cart(token_client, organizer, event, item, quota, seat, question):
CartPosition.objects.create(
event=event, cart_id='aaa', item=item,
price=21.5, expires=now() + datetime.timedelta(minutes=10), seat=seat
)
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['seat'] = seat.seat_guid
res['positions'][0]['answers'][0]['question'] = question.pk
res['consume_carts'] = ['aaa']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
p = o.positions.first()
assert p.seat == seat
@pytest.mark.django_db
def test_order_create_send_no_emails(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
djmail.outbox = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
assert len(djmail.outbox) == 0
@pytest.mark.django_db
def test_order_create_send_emails(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['send_mail'] = True
djmail.outbox = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
assert len(djmail.outbox) == 1
assert djmail.outbox[0].subject == "Your order: {}".format(resp.data['code'])
@pytest.mark.django_db
def test_order_create_send_emails_free(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['price'] = '0.00'
res['payment_provider'] = 'free'
del res['fees']
res['positions'][0]['answers'][0]['question'] = question.pk
res['send_mail'] = True
djmail.outbox = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
assert len(djmail.outbox) == 1
assert djmail.outbox[0].subject == "Your order: {}".format(resp.data['code'])
@pytest.mark.django_db
def test_order_create_send_emails_paid(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['send_mail'] = True
res['status'] = 'p'
djmail.outbox = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
assert len(djmail.outbox) == 2
assert djmail.outbox[0].subject == "Your order: {}".format(resp.data['code'])
assert djmail.outbox[1].subject == "Payment received for your order: {}".format(resp.data['code'])
@pytest.mark.django_db
def test_order_paid_require_payment_method(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['payment_provider']
res['status'] = 'p'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == [
'You cannot create a paid order without a payment provider.'
]
res['status'] = "n"
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
assert not o.payments.exists()
@pytest.mark.django_db
def test_order_create_auto_pricing(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['positions'][0]['price']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
p = o.positions.first()
assert p.price == item.default_price
assert o.total == item.default_price + Decimal('0.25')
@pytest.mark.django_db
def test_order_create_auto_pricing_reverse_charge(token_client, organizer, event, item, quota, question, taxrule):
taxrule.eu_reverse_charge = True
taxrule.home_country = Country('DE')
taxrule.save()
item.tax_rule = taxrule
item.save()
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['invoice_address']['country'] = 'FR'
res['invoice_address']['is_business'] = True
res['invoice_address']['vat_id'] = 'FR12345'
res['invoice_address']['vat_id_validated'] = True
del res['positions'][0]['price']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
p = o.positions.first()
assert p.price == Decimal('19.33')
assert p.tax_rate == Decimal('0.00')
assert p.tax_value == Decimal('0.00')
assert o.total == Decimal('19.58')
@pytest.mark.django_db
def test_order_create_auto_pricing_reverse_charge_require_valid_vatid(token_client, organizer, event, item, quota,
question, taxrule):
taxrule.eu_reverse_charge = True
taxrule.home_country = Country('DE')
taxrule.save()
item.tax_rule = taxrule
item.save()
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['invoice_address']['country'] = 'FR'
res['invoice_address']['is_business'] = True
res['invoice_address']['vat_id'] = 'FR12345'
del res['positions'][0]['price']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
p = o.positions.first()
assert p.price == Decimal('23.00')
assert p.tax_rate == Decimal('19.00')
@pytest.mark.django_db
def test_order_create_autopricing_voucher_budget_partially(token_client, organizer, event, item, quota, question,
taxrule):
with scopes_disabled():
voucher = event.vouchers.create(price_mode="set", value=21.50, item=item, budget=Decimal('2.50'),
max_usages=999)
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['positions'][0]['voucher'] = voucher.code
del res['positions'][0]['price']
del res['positions'][0]['positionid']
res['positions'].append(res['positions'][0])
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
print(resp.data)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
p = o.positions.first()
p2 = o.positions.last()
assert p.price == Decimal('21.50')
assert p2.price == Decimal('22.00')
@pytest.mark.django_db
def test_order_create_autopricing_voucher_budget_full(token_client, organizer, event, item, quota, question, taxrule):
with scopes_disabled():
voucher = event.vouchers.create(price_mode="set", value=21.50, item=item, budget=Decimal('0.50'),
max_usages=999)
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['positions'][0]['voucher'] = voucher.code
del res['positions'][0]['price']
del res['positions'][0]['positionid']
res['positions'].append(res['positions'][0])
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {'positions': [{}, {'voucher': ['The voucher has a remaining budget of 0.00, therefore a '
'discount of 1.50 can not be given.']}]}
@pytest.mark.django_db
def test_order_create_voucher_budget_exceeded(token_client, organizer, event, item, quota, question, taxrule):
with scopes_disabled():
voucher = event.vouchers.create(price_mode="set", value=21.50, item=item, budget=Decimal('3.00'),
max_usages=999)
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
res['positions'][0]['voucher'] = voucher.code
res['positions'][0]['price'] = '19.00'
del res['positions'][0]['positionid']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
print(resp.data)
assert resp.status_code == 400
assert resp.data == {'positions': [{'voucher': ['The voucher has a remaining budget of 3.00, therefore a '
'discount of 4.00 can not be given.']}]}
@pytest.mark.django_db
def test_order_create_voucher_price(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['positions'][0]['price']
with scopes_disabled():
voucher = event.vouchers.create(price_mode="set", value=15, item=item)
res['positions'][0]['voucher'] = voucher.code
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
o = Order.objects.get(code=resp.data['code'])
p = o.positions.first()
assert p.voucher == voucher
voucher.refresh_from_db()
assert voucher.redeemed == 1
assert p.price == Decimal('15.00')
assert o.total == Decimal('15.25')
@pytest.mark.django_db
def test_order_create_voucher_unknown_code(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['positions'][0]['price']
with scopes_disabled():
event.vouchers.create(price_mode="set", value=15, item=item)
res['positions'][0]['voucher'] = "FOOBAR"
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'voucher': ['Object with code=FOOBAR does not exist.']},
]
}
@pytest.mark.django_db
def test_order_create_voucher_redeemed(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
del res['positions'][0]['price']
res['positions'][0]['answers'][0]['question'] = question.pk
with scopes_disabled():
voucher = event.vouchers.create(price_mode="set", value=15, item=item, redeemed=1)
res['positions'][0]['voucher'] = voucher.code
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'voucher': ['The voucher has already been used the maximum number of times.']},
]
}
@pytest.mark.django_db
def test_order_create_voucher_redeemed_partially(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['answers'][0]['question'] = question.pk
res['positions'][0]['item'] = item.pk
del res['positions'][0]['price']
del res['positions'][0]['positionid']
with scopes_disabled():
voucher = event.vouchers.create(price_mode="set", value=15, item=item, redeemed=1, max_usages=2)
res['positions'][0]['voucher'] = voucher.code
res['positions'].append(copy.copy(res['positions'][0]))
res['positions'].append(copy.copy(res['positions'][0]))
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{},
{'voucher': ['The voucher has already been used the maximum number of times.']},
{'voucher': ['The voucher has already been used the maximum number of times.']},
]
}
@pytest.mark.django_db
def test_order_create_voucher_item_mismatch(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['positions'][0]['price']
with scopes_disabled():
item2 = event.items.create(name="Budget Ticket", default_price=23)
voucher = event.vouchers.create(price_mode="set", value=15, item=item2, redeemed=0)
res['positions'][0]['voucher'] = voucher.code
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'voucher': ['This voucher is not valid for this product.']},
]
}
@pytest.mark.django_db
def test_order_create_voucher_expired(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['positions'][0]['price']
with scopes_disabled():
voucher = event.vouchers.create(price_mode="set", value=15, item=item, redeemed=0,
valid_until=now() - datetime.timedelta(days=1))
res['positions'][0]['voucher'] = voucher.code
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
assert resp.data == {
'positions': [
{'voucher': ['This voucher is expired.']},
]
}
@pytest.mark.django_db
def test_order_create_voucher_block_quota(token_client, organizer, event, item, quota, question):
res = copy.deepcopy(ORDER_CREATE_PAYLOAD)
res['positions'][0]['item'] = item.pk
res['positions'][0]['answers'][0]['question'] = question.pk
del res['positions'][0]['price']
quota.size = 0
quota.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 400
with scopes_disabled():
voucher = event.vouchers.create(price_mode="set", value=15, item=item, redeemed=0,
block_quota=True)
res['positions'][0]['voucher'] = voucher.code
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/'.format(
organizer.slug, event.slug
), format='json', data=res
)
assert resp.status_code == 201
REFUND_CREATE_PAYLOAD = {
"state": "created",
"provider": "manual",
"amount": "23.00",
"source": "admin",
"payment": 2,
"info": {
"foo": "bar",
}
}
@pytest.mark.django_db
def test_refund_create(token_client, organizer, event, order):
res = copy.deepcopy(REFUND_CREATE_PAYLOAD)
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/refunds/'.format(
organizer.slug, event.slug, order.code
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
r = order.refunds.get(local_id=resp.data['local_id'])
assert r.provider == "manual"
assert r.amount == Decimal("23.00")
assert r.state == "created"
assert r.source == "admin"
assert r.info_data == {"foo": "bar"}
assert r.payment.local_id == 2
order.refresh_from_db()
assert order.status == Order.STATUS_PENDING
@pytest.mark.django_db
def test_refund_create_mark_refunded(token_client, organizer, event, order):
res = copy.deepcopy(REFUND_CREATE_PAYLOAD)
res['mark_canceled'] = True
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/refunds/'.format(
organizer.slug, event.slug, order.code
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
r = order.refunds.get(local_id=resp.data['local_id'])
assert r.provider == "manual"
assert r.amount == Decimal("23.00")
assert r.state == "created"
assert r.source == "admin"
assert r.info_data == {"foo": "bar"}
assert r.payment.local_id == 2
order.refresh_from_db()
assert order.status == Order.STATUS_CANCELED
@pytest.mark.django_db
def test_refund_optional_fields(token_client, organizer, event, order):
res = copy.deepcopy(REFUND_CREATE_PAYLOAD)
del res['info']
del res['payment']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/refunds/'.format(
organizer.slug, event.slug, order.code
), format='json', data=res
)
assert resp.status_code == 201
with scopes_disabled():
r = order.refunds.get(local_id=resp.data['local_id'])
assert r.provider == "manual"
assert r.amount == Decimal("23.00")
assert r.state == "created"
assert r.source == "admin"
del res['state']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/refunds/'.format(
organizer.slug, event.slug, order.code
), format='json', data=res
)
assert resp.status_code == 400
@pytest.mark.django_db
def test_refund_create_invalid_payment(token_client, organizer, event, order):
res = copy.deepcopy(REFUND_CREATE_PAYLOAD)
res['payment'] = 7
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/refunds/'.format(
organizer.slug, event.slug, order.code
), format='json', data=res
)
assert resp.status_code == 400
@pytest.mark.django_db
def test_order_delete(token_client, organizer, event, order):
resp = token_client.delete(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 403
@pytest.mark.django_db
def test_order_delete_test_mode(token_client, organizer, event, order):
order.testmode = True
order.save()
resp = token_client.delete(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 204
with scopes_disabled():
assert not Order.objects.filter(code=order.code).exists()
@pytest.mark.django_db
def test_order_delete_test_mode_voucher(token_client, organizer, event, order, item):
order.testmode = True
order.save()
with scopes_disabled():
q = event.quotas.create(name="Quota")
q.items.add(item)
voucher = event.vouchers.create(price_mode="set", value=15, quota=q, redeemed=1)
op = order.positions.first()
op.voucher = voucher
op.save()
assert voucher.redeemed == 1
resp = token_client.delete(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 204
with scopes_disabled():
assert not Order.objects.filter(code=order.code).exists()
voucher.refresh_from_db()
assert voucher.redeemed == 0
@pytest.mark.django_db
def test_order_delete_test_mode_voucher_cancelled_position(token_client, organizer, event, order, item):
order.testmode = True
order.save()
with scopes_disabled():
q = event.quotas.create(name="Quota")
q.items.add(item)
voucher = event.vouchers.create(price_mode="set", value=15, quota=q, redeemed=42)
op = order.all_positions.last()
op.voucher = voucher
op.save()
resp = token_client.delete(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 204
with scopes_disabled():
assert not Order.objects.filter(code=order.code).exists()
voucher.refresh_from_db()
assert voucher.redeemed == 42
@pytest.mark.django_db
def test_order_delete_test_mode_voucher_cancelled_order(token_client, organizer, event, order, item):
with scopes_disabled():
order.testmode = True
order.status = Order.STATUS_CANCELED
order.save()
q = event.quotas.create(name="Quota")
q.items.add(item)
voucher = event.vouchers.create(price_mode="set", value=15, quota=q, redeemed=42)
op = order.positions.first()
op.voucher = voucher
op.save()
resp = token_client.delete(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 204
with scopes_disabled():
assert not Order.objects.filter(code=order.code).exists()
voucher.refresh_from_db()
assert voucher.redeemed == 42
@pytest.mark.django_db
def test_order_update_ignore_fields(token_client, organizer, event, order):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'status': 'c'
}
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.status == 'n'
@pytest.mark.django_db
def test_order_update_only_partial(token_client, organizer, event, order):
resp = token_client.put(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'status': 'c'
}
)
assert resp.status_code == 405
@pytest.mark.django_db
def test_order_update_state_validation(token_client, organizer, event, order):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'invoice_address': {
"is_business": False,
"company": "This is my company name",
"name": "John Doe",
"name_parts": {},
"street": "",
"state": "",
"zipcode": "",
"city": "Paris",
"country": "NONEXISTANT",
"internal_reference": "",
"vat_id": "",
}
}
)
assert resp.status_code == 400
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'invoice_address': {
"is_business": False,
"company": "This is my company name",
"name": "John Doe",
"name_parts": {},
"street": "",
"state": "NONEXISTANT",
"zipcode": "",
"city": "Test",
"country": "AU",
"internal_reference": "",
"vat_id": "",
}
}
)
assert resp.status_code == 400
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'invoice_address': {
"is_business": False,
"company": "This is my company name",
"name": "John Doe",
"name_parts": {},
"street": "",
"state": "QLD",
"zipcode": "",
"city": "Test",
"country": "AU",
"internal_reference": "",
"vat_id": "",
}
}
)
assert resp.status_code == 200
order.invoice_address.refresh_from_db()
assert order.invoice_address.state == "QLD"
assert order.invoice_address.country == "AU"
@pytest.mark.django_db
def test_order_update_allowed_fields(token_client, organizer, event, order):
event.settings.locales = ['de', 'en']
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'comment': 'Here is a comment',
'checkin_attention': True,
'email': 'foo@bar.com',
'locale': 'de',
'invoice_address': {
"is_business": False,
"company": "This is my company name",
"name": "John Doe",
"name_parts": {},
"street": "",
"state": "",
"zipcode": "",
"city": "Paris",
"country": "FR",
"internal_reference": "",
"vat_id": "",
}
}
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.comment == 'Here is a comment'
assert order.checkin_attention
assert order.email == 'foo@bar.com'
assert order.locale == 'de'
assert order.invoice_address.company == "This is my company name"
assert order.invoice_address.name_cached == "John Doe"
assert order.invoice_address.name_parts == {'_legacy': 'John Doe'}
assert str(order.invoice_address.country) == "FR"
assert not order.invoice_address.vat_id_validated
assert order.invoice_address.city == "Paris"
with scopes_disabled():
assert order.all_logentries().get(action_type='pretix.event.order.comment')
assert order.all_logentries().get(action_type='pretix.event.order.checkin_attention')
assert order.all_logentries().get(action_type='pretix.event.order.contact.changed')
assert order.all_logentries().get(action_type='pretix.event.order.locale.changed')
assert order.all_logentries().get(action_type='pretix.event.order.modified')
@pytest.mark.django_db
def test_order_update_validated_vat_id(token_client, organizer, event, order):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'invoice_address': {
"is_business": False,
"company": "This is my company name",
"name": "John Doe",
"name_parts": {},
"street": "",
"state": "",
"zipcode": "",
"city": "Paris",
"country": "FR",
"internal_reference": "",
"vat_id": "FR123",
"vat_id_validated": True
}
}
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.invoice_address.vat_id == "FR123"
assert order.invoice_address.vat_id_validated
@pytest.mark.django_db
def test_order_update_invoiceaddress_delete_create(token_client, organizer, event, order):
event.settings.locales = ['de', 'en']
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'invoice_address': None,
}
)
assert resp.status_code == 200
order.refresh_from_db()
with pytest.raises(InvoiceAddress.DoesNotExist):
order.invoice_address
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'invoice_address': {
"is_business": False,
"company": "This is my company name",
"name": "",
"name_parts": {},
"street": "",
"state": "",
"zipcode": "",
"city": "Paris",
"country": "Fr",
"internal_reference": "",
"vat_id": "",
}
}
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.invoice_address.company == "This is my company name"
assert str(order.invoice_address.country) == "FR"
assert order.invoice_address.city == "Paris"
@pytest.mark.django_db
def test_order_update_email_to_none(token_client, organizer, event, order):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'email': None,
}
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.email is None
@pytest.mark.django_db
def test_order_update_locale_to_invalid(token_client, organizer, event, order):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/orders/{}/'.format(
organizer.slug, event.slug, order.code
), format='json', data={
'locale': 'de',
}
)
assert resp.status_code == 400
assert resp.data == {'locale': ['"de" is not a supported locale for this event.']}
@pytest.mark.django_db
def test_order_create_invoice(token_client, organizer, event, order):
event.settings.invoice_generate = 'True'
event.settings.invoice_generate_sales_channels = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/create_invoice/'.format(
organizer.slug, event.slug, order.code
), format='json', data={}
)
assert resp.status_code == 400
event.settings.invoice_generate_sales_channels = ['web']
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/create_invoice/'.format(
organizer.slug, event.slug, order.code
), format='json', data={}
)
assert resp.status_code == 201
assert resp.data == {
'order': 'FOO',
'number': 'DUMMY-00001',
'is_cancellation': False,
'invoice_from': '',
'invoice_to': 'Sample company\nNew Zealand\nVAT-ID: DE123',
'date': now().date().isoformat(),
'refers': None,
'locale': 'en',
'introductory_text': '',
'additional_text': '',
'payment_provider_text': '',
'footer_text': '',
'lines': [
{
'position': 1,
'description': 'Budget Ticket<br />Attendee: Peter',
'gross_value': '23.00',
'tax_value': '0.00',
'tax_rate': '0.00',
'tax_name': ''
},
{
'position': 2,
'description': 'Payment fee',
'gross_value': '0.25',
'tax_value': '0.05',
'tax_rate': '19.00',
'tax_name': ''
}
],
'foreign_currency_display': None,
'foreign_currency_rate': None,
'foreign_currency_rate_date': None,
'internal_reference': ''
}
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/create_invoice/'.format(
organizer.slug, event.slug, order.code
), format='json', data={}
)
assert resp.data == {'detail': 'An invoice for this order already exists.'}
assert resp.status_code == 400
event.settings.invoice_generate = 'False'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/create_invoice/'.format(
organizer.slug, event.slug, order.code
), format='json', data={}
)
assert resp.status_code == 400
assert resp.data == {'detail': 'You cannot generate an invoice for this order.'}
@pytest.mark.django_db
def test_order_regenerate_secrets(token_client, organizer, event, order):
s = order.secret
with scopes_disabled():
ps = order.positions.first().secret
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/regenerate_secrets/'.format(
organizer.slug, event.slug, order.code
), format='json', data={}
)
assert resp.status_code == 200
order.refresh_from_db()
assert s != order.secret
with scopes_disabled():
assert ps != order.positions.first().secret
@pytest.mark.django_db
def test_order_resend_link(token_client, organizer, event, order):
djmail.outbox = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/resend_link/'.format(
organizer.slug, event.slug, order.code
), format='json', data={}
)
assert resp.status_code == 204
assert len(djmail.outbox) == 1
order.email = None
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/resend_link/'.format(
organizer.slug, event.slug, order.code
), format='json', data={}
)
assert resp.status_code == 400
@pytest.mark.django_db
def test_orderposition_price_calculation(token_client, organizer, event, order, item):
with scopes_disabled():
op = order.positions.first()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orderpositions/{}/price_calc/'.format(organizer.slug, event.slug, op.pk),
data={
}
)
assert resp.status_code == 200
assert resp.data == {
'gross': Decimal('23.00'),
'gross_formatted': '23.00',
'name': '',
'net': Decimal('23.00'),
'rate': Decimal('0.00'),
'tax': Decimal('0.00')
}
@pytest.mark.django_db
def test_orderposition_price_calculation_item_with_tax(token_client, organizer, event, order, item, taxrule):
with scopes_disabled():
item2 = event.items.create(name="Budget Ticket", default_price=23, tax_rule=taxrule)
op = order.positions.first()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orderpositions/{}/price_calc/'.format(organizer.slug, event.slug, op.pk),
data={
'item': item2.pk
}
)
assert resp.status_code == 200
assert resp.data == {
'gross': Decimal('23.00'),
'gross_formatted': '23.00',
'name': '',
'net': Decimal('19.33'),
'rate': Decimal('19.00'),
'tax': Decimal('3.67')
}
@pytest.mark.django_db
def test_orderposition_price_calculation_item_with_variation(token_client, organizer, event, order):
with scopes_disabled():
item2 = event.items.create(name="Budget Ticket", default_price=23)
var = item2.variations.create(default_price=12, value="XS")
op = order.positions.first()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orderpositions/{}/price_calc/'.format(organizer.slug, event.slug, op.pk),
data={
'item': item2.pk,
'variation': var.pk
}
)
assert resp.status_code == 200
assert resp.data == {
'gross': Decimal('12.00'),
'gross_formatted': '12.00',
'name': '',
'net': Decimal('12.00'),
'rate': Decimal('0.00'),
'tax': Decimal('0.00')
}
@pytest.mark.django_db
def test_orderposition_price_calculation_subevent(token_client, organizer, event, order, subevent):
with scopes_disabled():
item2 = event.items.create(name="Budget Ticket", default_price=23)
op = order.positions.first()
op.subevent = subevent
op.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orderpositions/{}/price_calc/'.format(organizer.slug, event.slug, op.pk),
data={
'item': item2.pk,
'subevent': subevent.pk
}
)
assert resp.status_code == 200
assert resp.data == {
'gross': Decimal('23.00'),
'gross_formatted': '23.00',
'name': '',
'net': Decimal('23.00'),
'rate': Decimal('0.00'),
'tax': Decimal('0.00')
}
@pytest.mark.django_db
def test_orderposition_price_calculation_subevent_with_override(token_client, organizer, event, order, subevent):
with scopes_disabled():
item2 = event.items.create(name="Budget Ticket", default_price=23)
se2 = event.subevents.create(name="Foobar", date_from=datetime.datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC))
se2.subeventitem_set.create(item=item2, price=12)
op = order.positions.first()
op.subevent = subevent
op.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orderpositions/{}/price_calc/'.format(organizer.slug, event.slug, op.pk),
data={
'item': item2.pk,
'subevent': se2.pk
}
)
assert resp.status_code == 200
assert resp.data == {
'gross': Decimal('12.00'),
'gross_formatted': '12.00',
'name': '',
'net': Decimal('12.00'),
'rate': Decimal('0.00'),
'tax': Decimal('0.00')
}
@pytest.mark.django_db
def test_orderposition_price_calculation_voucher_matching(token_client, organizer, event, order, subevent, item):
with scopes_disabled():
item2 = event.items.create(name="Budget Ticket", default_price=23)
q = event.quotas.create(name="Quota")
q.items.add(item)
q.items.add(item2)
voucher = event.vouchers.create(price_mode="set", value=15, quota=q)
op = order.positions.first()
op.voucher = voucher
op.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orderpositions/{}/price_calc/'.format(organizer.slug, event.slug, op.pk),
data={
'item': item2.pk,
}
)
assert resp.status_code == 200
assert resp.data == {
'gross': Decimal('15.00'),
'gross_formatted': '15.00',
'name': '',
'net': Decimal('15.00'),
'rate': Decimal('0.00'),
'tax': Decimal('0.00')
}
@pytest.mark.django_db
def test_orderposition_price_calculation_voucher_not_matching(token_client, organizer, event, order, subevent, item):
with scopes_disabled():
item2 = event.items.create(name="Budget Ticket", default_price=23)
q = event.quotas.create(name="Quota")
q.items.add(item)
voucher = event.vouchers.create(price_mode="set", value=15, quota=q)
op = order.positions.first()
op.voucher = voucher
op.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orderpositions/{}/price_calc/'.format(organizer.slug, event.slug, op.pk),
data={
'item': item2.pk,
}
)
assert resp.status_code == 200
assert resp.data == {
'gross': Decimal('23.00'),
'gross_formatted': '23.00',
'name': '',
'net': Decimal('23.00'),
'rate': Decimal('0.00'),
'tax': Decimal('0.00')
}
@pytest.mark.django_db
def test_orderposition_price_calculation_net_price(token_client, organizer, event, order, subevent, item, taxrule):
taxrule.price_includes_tax = False
taxrule.save()
with scopes_disabled():
item2 = event.items.create(name="Budget Ticket", default_price=10, tax_rule=taxrule)
op = order.positions.first()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orderpositions/{}/price_calc/'.format(organizer.slug, event.slug, op.pk),
data={
'item': item2.pk,
}
)
assert resp.status_code == 200
assert resp.data == {
'gross': Decimal('11.90'),
'gross_formatted': '11.90',
'name': '',
'net': Decimal('10.00'),
'rate': Decimal('19.00'),
'tax': Decimal('1.90')
}
@pytest.mark.django_db
def test_orderposition_price_calculation_reverse_charge(token_client, organizer, event, order, subevent, item, taxrule):
taxrule.price_includes_tax = False
taxrule.eu_reverse_charge = True
taxrule.home_country = Country('DE')
taxrule.save()
order.invoice_address.is_business = True
order.invoice_address.vat_id = 'ATU1234567'
order.invoice_address.vat_id_validated = True
order.invoice_address.country = Country('AT')
order.invoice_address.save()
with scopes_disabled():
item2 = event.items.create(name="Budget Ticket", default_price=10, tax_rule=taxrule)
op = order.positions.first()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orderpositions/{}/price_calc/'.format(organizer.slug, event.slug, op.pk),
data={
'item': item2.pk,
}
)
assert resp.status_code == 200
assert resp.data == {
'gross': Decimal('10.00'),
'gross_formatted': '10.00',
'name': '',
'net': Decimal('10.00'),
'rate': Decimal('0.00'),
'tax': Decimal('0.00')
}
| 35.796271
| 157
| 0.604525
|
4c76cc4e4af5c696e99a31e0332a7c23955ce643
| 2,104
|
py
|
Python
|
socialserver/api/v3/block.py
|
niallasher/socialserver-neo
|
7e7d25d939133d149b56ccd54fbfa62d75cabb73
|
[
"MIT"
] | null | null | null |
socialserver/api/v3/block.py
|
niallasher/socialserver-neo
|
7e7d25d939133d149b56ccd54fbfa62d75cabb73
|
[
"MIT"
] | 11
|
2022-03-10T04:55:09.000Z
|
2022-03-30T14:24:19.000Z
|
socialserver/api/v3/block.py
|
niallasher/socialserver-neo
|
7e7d25d939133d149b56ccd54fbfa62d75cabb73
|
[
"MIT"
] | null | null | null |
# Copyright (c) Niall Asher 2022
from datetime import datetime
from flask_restful import Resource, reqparse
from socialserver.db import db
from socialserver.util.auth import get_user_from_auth_header, auth_reqd
from socialserver.constants import ErrorCodes
from pony.orm import db_session
class Block(Resource):
def __init__(self):
self.post_parser = reqparse.RequestParser()
# the username to block
self.post_parser.add_argument("username", type=str, required=True)
self.delete_parser = reqparse.RequestParser()
# the username to unblock
self.delete_parser.add_argument("username", type=str, required=True)
@db_session
@auth_reqd
def post(self):
args = self.post_parser.parse_args()
requesting_user_db = get_user_from_auth_header()
user_to_follow = db.User.get(username=args["username"])
if user_to_follow is None:
return {"error": ErrorCodes.USERNAME_NOT_FOUND.value}, 404
if user_to_follow is requesting_user_db:
return {"error": ErrorCodes.CANNOT_BLOCK_SELF.value}, 400
existing_follow = db.Block.get(user=requesting_user_db, blocking=user_to_follow)
if existing_follow is not None:
return {"error": ErrorCodes.BLOCK_ALREADY_EXISTS.value}, 400
db.Block(
user=requesting_user_db,
blocking=user_to_follow,
creation_time=datetime.utcnow(),
)
return {}, 201
@db_session
@auth_reqd
def delete(self):
args = self.delete_parser.parse_args()
requesting_user_db = get_user_from_auth_header()
user_to_unfollow = db.User.get(username=args["username"])
if user_to_unfollow is None:
return {"error": ErrorCodes.USERNAME_NOT_FOUND.value}, 404
existing_follow = db.Block.get(
user=requesting_user_db, blocking=user_to_unfollow
)
if existing_follow is None:
return {"error": ErrorCodes.CANNOT_FIND_BLOCK_ENTRY.value}, 404
existing_follow.delete()
return {}, 204
| 31.402985
| 88
| 0.678232
|
180f4ca07f40040ccc6ffe8a1edbb58ebbf4648e
| 4,866
|
py
|
Python
|
ark_nlp/factory/predictor/text_classification.py
|
confstantine/nlp-task
|
cb152e885bc6f6f1243a12ad90b1c715eb548736
|
[
"Apache-2.0"
] | 1
|
2021-12-27T04:48:40.000Z
|
2021-12-27T04:48:40.000Z
|
ark_nlp/factory/predictor/text_classification.py
|
confstantine/nlp-task
|
cb152e885bc6f6f1243a12ad90b1c715eb548736
|
[
"Apache-2.0"
] | null | null | null |
ark_nlp/factory/predictor/text_classification.py
|
confstantine/nlp-task
|
cb152e885bc6f6f1243a12ad90b1c715eb548736
|
[
"Apache-2.0"
] | 1
|
2021-12-27T04:49:35.000Z
|
2021-12-27T04:49:35.000Z
|
"""
# Copyright 2021 Xiang Wang, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
Author: Xiang Wang, xiangking1995@163.com
Status: Active
"""
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable, grad
from torch.utils.data import DataLoader, Dataset
import torch.nn.functional as F
import tqdm
from tqdm import tqdm
import sklearn.metrics as sklearn_metrics
class TCPredictor(object):
def __init__(
self,
module,
tokernizer,
cat2id
):
self.module = module
self.module.task = 'SequenceLevel'
self.cat2id = cat2id
self.tokenizer = tokernizer
self.device = list(self.module.parameters())[0].device
self.id2cat = {}
for cat_, idx_ in self.cat2id.items():
self.id2cat[idx_] = cat_
def _convert_to_transfomer_ids(
self,
text
):
input_ids = self.tokenizer.sequence_to_ids(text)
input_ids, input_mask, segment_ids = input_ids
features = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': segment_ids
}
return features
def _convert_to_vanilla_ids(
self,
text
):
tokens = vanilla_tokenizer.tokenize(text)
length = len(tokens)
input_ids = vanilla_tokenizer.sequence_to_ids(tokens)
features = {
'input_ids': input_ids,
'length': length if length < vanilla_tokenizer.max_seq_len else vanilla_tokenizer.max_seq_len,
}
return features
def _get_input_ids(
self,
text
):
if self.tokenizer.tokenizer_type == 'vanilla':
return self._convert_to_vanilla_ids(text)
elif self.tokenizer.tokenizer_type == 'transfomer':
return self._convert_to_transfomer_ids(text)
elif self.tokenizer.tokenizer_type == 'customized':
features = self._convert_to_customized_ids(text)
else:
raise ValueError("The tokenizer type does not exist")
def _get_module_one_sample_inputs(
self,
features
):
return {col: torch.Tensor(features[col]).type(torch.long).unsqueeze(0).to(self.device) for col in features}
def predict_one_sample(
self,
text='',
topk=1,
return_label_name=True,
return_proba=False
):
if topk == None:
topk = len(self.cat2id) if len(self.cat2id) >2 else 1
features = self._get_input_ids(text)
self.module.eval()
with torch.no_grad():
inputs = self._get_module_one_sample_inputs(features)
logit = self.module(**inputs)
logit = torch.nn.functional.softmax(logit, dim=1)
probs, indices = logit.topk(topk, dim=1, sorted=True)
preds = []
probas = []
for pred_, proba_ in zip(indices.cpu().numpy()[0], probs.cpu().numpy()[0].tolist()):
if return_label_name:
pred_ = self.id2cat[pred_]
preds.append(pred_)
if return_proba:
probas.append(proba_)
if return_proba:
return list(zip(preds, probas))
return preds
def _get_module_batch_inputs(
self,
features
):
return {col: features[col].type(torch.long).to(self.device) for col in self.inputs_cols}
def predict_batch(
self,
test_data,
batch_size=16,
shuffle=False,
return_label_name=True,
return_proba=False
):
self.inputs_cols = test_data.dataset_cols
preds = []
probas=[]
self.module.eval()
generator = DataLoader(test_data, batch_size=batch_size, shuffle=False)
with torch.no_grad():
for step, inputs in enumerate(generator):
inputs = self._get_module_batch_inputs(inputs)
logits = self.module(**inputs)
preds.extend(torch.max(logits, 1)[1].cpu().numpy())
if return_proba:
logits = torch.nn.functional.softmax(logits, dim=1)
probas.extend(logits.max(dim=1).values.cpu().detach().numpy())
if return_label_name:
preds = [self.id2cat[pred_] for pred_ in preds]
if return_proba:
return list(zip(preds, probas))
return preds
| 28.792899
| 115
| 0.583847
|
07c58708d2df7d60c9749cad381bcc905a005eeb
| 12,243
|
py
|
Python
|
analysis_utils/visualize.py
|
AIasd/ADFuzz
|
388d6568e1e1c0dfcd3951481268f01e2f0c2106
|
[
"MIT"
] | 5
|
2022-01-06T01:10:47.000Z
|
2022-03-18T15:39:43.000Z
|
analysis_utils/visualize.py
|
AIasd/ADFuzz
|
388d6568e1e1c0dfcd3951481268f01e2f0c2106
|
[
"MIT"
] | 15
|
2022-01-03T19:36:36.000Z
|
2022-03-30T03:57:58.000Z
|
analysis_utils/visualize.py
|
AIasd/ADFuzz
|
388d6568e1e1c0dfcd3951481268f01e2f0c2106
|
[
"MIT"
] | 3
|
2021-11-22T08:01:47.000Z
|
2022-03-11T08:53:58.000Z
|
import sys
sys.path.append('.')
sys.path.append('pymoo')
sys.path.append('fuzzing_utils')
import os
import pickle
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_theme()
# TBD: visualize synthetic function bug distribution (2d)
def visualize_synthetic_function_bugs():
pass
# -------------------- helper functions for visualize_data --------------------
from mpl_toolkits.mplot3d.proj3d import proj_transform
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib.patches import FancyArrowPatch
class Arrow3D(FancyArrowPatch):
def __init__(self, x, y, z, dx, dy, dz, *args, **kwargs):
super().__init__((0, 0), (0, 0), *args, **kwargs)
self._xyz = (x, y, z)
self._dxdydz = (dx, dy, dz)
def draw(self, renderer):
x1, y1, z1 = self._xyz
dx, dy, dz = self._dxdydz
x2, y2, z2 = (x1 + dx, y1 + dy, z1 + dz)
xs, ys, zs = proj_transform((x1, x2), (y1, y2), (z1, z2), self.axes.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
super().draw(renderer)
def do_3d_projection(self, renderer=None):
x1, y1, z1 = self._xyz
dx, dy, dz = self._dxdydz
x2, y2, z2 = (x1 + dx, y1 + dy, z1 + dz)
xs, ys, zs = proj_transform((x1, x2), (y1, y2), (z1, z2), self.axes.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
return np.min(zs)
def _arrow3D(ax, x, y, z, dx, dy, dz, *args, **kwargs):
'''Add an 3d arrow to an `Axes3D` instance.'''
arrow = Arrow3D(x, y, z, dx, dy, dz, *args, **kwargs)
ax.add_artist(arrow)
setattr(Axes3D, 'arrow3D', _arrow3D)
def plot_arrow(ax, values, label, color, plot_dim, legend=False, width=0.001, head_width=0.01):
if len(values) == 2:
x, y = values
yaw = 0
length = 0
head_width = 0
if legend:
ax.scatter(x, y, color=color, label=str(label))
else:
ax.scatter(x, y, color=color)
else:
if len(values) == 3:
x, y, yaw = values
length = 0.05
else:
x, y, yaw, length = values
if plot_dim == 2:
# since yaw will be represented by orientation, its value range is different from others
yaw = yaw * 360
yaw = np.deg2rad(yaw)
dx = np.cos(yaw)*length*0.1
dy = np.sin(yaw)*length*0.1
if legend:
label = str(label)
else:
label = None
if plot_dim == 2:
ax.arrow(x, y, dx, dy, color=color, head_width=head_width, alpha=0.5, width=width, label=label)
elif plot_dim == 3:
# ax.arrow3D(x,y,0.5, x+dx,y+dy,0.7, mutation_scale=20, arrowstyle="-|>", linestyle='dashed', color=color, label=label)
ax.scatter(x, y, yaw, color=color, label=label)
ax.set_ylim(-0.1, 1.1)
ax.set_xlim(-0.1, 1.1)
if plot_dim == 3:
ax.set_zlim(-0.1, 1.1)
def plot_subplot(ax, x_list, y_list, chosen_inds, unique_y_list, legend, mode, chosen_labels, plot_dim, split_label_v_pair=()):
x_sublist = x_list[chosen_inds]
y_sublist = y_list[chosen_inds]
colors = ['black', 'red', 'gray', 'lightgray', 'brown', 'salmon', 'orange', 'yellowgreen', 'green', 'blue', 'purple', 'magenta', 'pink']
for j, y in enumerate(unique_y_list):
color = colors[j]
x_subset = x_sublist[y_sublist==y]
print('\t', 'y', y, 'len(x_subset)', len(x_subset))
for k in range(x_subset.shape[0]):
if legend and k == 0:
plot_arrow(ax, x_subset[k], y, color, plot_dim, legend=True)
else:
plot_arrow(ax, x_subset[k], y, color, plot_dim, legend=False)
if len(split_label_v_pair) > 0:
subplot_split_label, v = split_label_v_pair
ax.set_title(subplot_split_label+' = '+v, fontsize=18)
else:
ax.set_title('samples '+str(chosen_inds[0])+' to '+str(chosen_inds[1]), fontsize=18)
if legend:
ax.legend(loc='lower right', prop={'size': 16}, fancybox=True, framealpha=0.5)
if mode == 'plain':
ax.set_xlabel(chosen_labels[0])
ax.set_ylabel(chosen_labels[1])
if plot_dim == 3:
ax.set_zlabel(chosen_labels[2])
# extract data from result folder
def extract_data_from_fuzzing(folder_path):
data_path = os.path.join(folder_path, 'data.pickle')
with open(data_path, 'rb') as f_in:
data_d = pickle.load(f_in)
x_list = data_d['x_list']
y_list = data_d['y_list']
x_labels = np.array(data_d['labels'])
print('all x_labels', x_labels)
xl = data_d['xl']
xu = data_d['xu']
used_labels_inds = xu - xl > 0
x_list = x_list[:, used_labels_inds]
x_labels = x_labels[used_labels_inds]
print('used x_labels', x_labels)
return x_list, y_list, x_labels
def extract_data_from_csv(folder_path, filename, x_labels, y_label):
import pandas
df = pandas.read_csv(os.path.join(folder_path, filename))
x_list = df[x_labels].to_numpy()
y_list = df[y_label].to_numpy()
# print('x_list.shape', x_list.shape, 'y_list.shape', y_list.shape)
return x_list, y_list, np.array(x_labels)
# -------------------- helper functions for visualize_data --------------------
def visualize_data(save_folder_path, initial_x_list, y_list, x_labels, num_subplots, mode, dim, chosen_labels, plot_dim, subplot_split_label=''):
# normalize the data first
from sklearn.preprocessing import MinMaxScaler
transformer = MinMaxScaler().fit(initial_x_list)
x_list_normalized = transformer.transform(initial_x_list)
if plot_dim == 3:
assert dim == plot_dim
if subplot_split_label:
split_ind = np.where(x_labels==subplot_split_label)[0][0]
if mode == 'plain':
assert len(chosen_labels) == dim
inds_list = []
for chosen_label in chosen_labels:
assert chosen_label in x_labels
inds_list.append(np.where(x_labels==chosen_label)[0][0])
else:
# TBD: when applying dimensionality reduction, exclude the split label if it is used.
# dimensionality reduction is only used when input dimension is larger than the visualization dimension
assert x_list_normalized.shape[1] > dim
if mode == 'pca':
from sklearn.decomposition import PCA
pca = PCA(n_components=dim, svd_solver='full')
pca.fit(x_list_normalized)
print('dim', dim, 'pca.explained_variance_ratio_', pca.explained_variance_ratio_)
x_list_normalized = pca.transform(x_list_normalized)
inds_list = [i for i in range(dim)]
elif mode == 'tsne':
if plot_dim == 2:
assert dim == 2
elif plot_dim == 3:
assert dim == 3
from sklearn.manifold import TSNE
print('x_list_normalized.shape', x_list_normalized.shape)
x_list_normalized = TSNE(n_components=dim).fit_transform(x_list_normalized)
inds_list = [i for i in range(dim)]
else:
print('mode', mode)
raise
print('mode', mode, 'dim', dim, 'chosen_labels', chosen_labels)
print('x_list_normalized.shape', x_list_normalized.shape)
print('y_list.shape', y_list.shape)
x_list = x_list_normalized[:, inds_list]
unique_y_list = np.unique(y_list)
if subplot_split_label:
v_list = np.unique(x_list_normalized[:, split_ind])
num_subplots = len(v_list)
assert num_subplots >= 1
assert x_list_normalized.shape[0] >= num_subplots
num_subplots_col_num = int(np.ceil(np.sqrt(num_subplots)))
num_subplots_row_num = int(np.ceil(num_subplots / num_subplots_col_num))
if num_subplots > 1:
# for the overall plot at the first row
num_subplots_row_num += 1
if plot_dim == 2:
projection = None
else:
projection = '3d'
unit_size = 6
fig = plt.figure(figsize=(num_subplots_col_num*unit_size, num_subplots_row_num*unit_size))
# draw an overall plot
ax = fig.add_subplot(num_subplots_col_num, num_subplots_row_num, 1, projection=projection)
chosen_inds = np.arange(0, x_list.shape[0])
plot_subplot(ax, x_list, y_list, chosen_inds, unique_y_list, True, mode, chosen_labels, plot_dim, split_label_v_pair=(subplot_split_label, 'any'))
if subplot_split_label:
for i, v in enumerate(v_list):
ax = fig.add_subplot(num_subplots_col_num, num_subplots_row_num, i+num_subplots_col_num+1, projection=projection)
chosen_inds = np.where(x_list_normalized[:, split_ind]==v)[0]
print('v', v, 'len(chosen_inds)', len(chosen_inds), chosen_inds[:3])
plot_subplot(ax, x_list, y_list, chosen_inds, unique_y_list, False, mode, chosen_labels, plot_dim, split_label_v_pair=(subplot_split_label, '{:.1f}'.format(v)))
fig.suptitle(mode+' with '+str(dim)+' dimensions for different '+subplot_split_label, fontsize=25)
else:
num_per_subplot = int(np.ceil(len(y_list) / num_subplots))
# draw subplots
if num_subplots > 1:
for i in range(num_subplots):
ax = fig.add_subplot(num_subplots_col_num, num_subplots_row_num, i+num_subplots_col_num+1, projection=projection)
left, right = i*num_per_subplot, (i+1)*num_per_subplot
if i == num_subplots-1:
right = x_list.shape[0]
chosen_inds = np.arange(left, right)
plot_subplot(ax, x_list, y_list, chosen_inds, unique_y_list, False, mode, chosen_labels, plot_dim)
fig.suptitle(mode+' with '+str(dim)+' dimensions for '+str(x_list.shape[0])+' samples', fontsize=25)
fig.savefig(os.path.join(save_folder_path, mode+'_'+str(dim)+'_'+str(x_list.shape[0])+'.jpg'))
if __name__ == '__main__':
# -------------------- Dataset Visualization Parameters--------------------
folder_path = 'no_simulation_dataset_script'
filename = 'grid.csv'
# The values with these labels will be extracted
x_labels = ['ego_pos', 'ego_init_speed', 'other_pos', 'other_init_speed', 'ped_delay', 'ped_init_speed']
# The interested target's label
y_label = 'oob'
x_list, y_list, x_labels = extract_data_from_csv(folder_path, filename, x_labels, y_label)
# -------------------- Fuzzing + Visualization Parameters --------------------
# folder_path = 'no_simulation_function_script/run_results_no_simulation/nsga2/four_modes/2022_05_09_18_03_17,50_10_none_500_coeff_0_0.1_0.5_only_unique_0'
# folder_path = 'carla_lbc/run_results/nsga2/town07_front_0/go_straight_town07_one_ped/lbc/2022_05_09_23_07_38,50_10_none_500_coeff_0.0_0.1_0.5_only_unique_0'
# x_list, y_list, labels = extract_data_from_folder(folder_path)
# -------------------- Common Parameters --------------------
# The number of subsets to split all the data during fuzzing. It needs to be a positive integer and less than or equal to (usually far less than) the number of data points. When it is set to 1, only a plot with all the data points will be shown.
num_subplots = 1
# The visualization method. ['plain', 'pca', 'tsne']
mode = 'plain'
# The number of dimensions to visualize. For 'plain', 2 to 4 are supported and dim must be equal to len(chosen_labels); For 'pca', 2 to 4 are supported; for 'tsne', 2 to 3 are supported and plot_dim must be equal to dim
dim = 4
# The labels used for visualization. It is used only if mode == 'plain' and every label in the chosen_labels must be in labels
chosen_labels = ['ego_pos', 'ego_init_speed', 'other_pos', 'other_init_speed']
# The dimensionality for plotting. [2, 3]. Note if plot_dim == 3, currently only dim == 3 is supported.
plot_dim = 2
# The label used for splitting subplots (it is either None or an element in x_labels). When it is not None, num_subplots will be determined by the number of unique values of subplot_split_label in x_list. Usually this is set to be a categorical feature.
subplot_split_label = 'ped_delay'
visualize_data(folder_path, x_list, y_list, x_labels, num_subplots, mode, dim, chosen_labels, plot_dim, subplot_split_label=subplot_split_label)
| 39.493548
| 257
| 0.641264
|
e405055847775ec11f46a2cef8ba1364477690f6
| 1,201
|
py
|
Python
|
test/test_action_template_category_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
test/test_action_template_category_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
test/test_action_template_category_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import octopus_deploy_swagger_client
from octopus_deploy_swagger_client.models.action_template_category_resource import ActionTemplateCategoryResource # noqa: E501
from octopus_deploy_swagger_client.rest import ApiException
class TestActionTemplateCategoryResource(unittest.TestCase):
"""ActionTemplateCategoryResource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testActionTemplateCategoryResource(self):
"""Test ActionTemplateCategoryResource"""
# FIXME: construct object with mandatory attributes with example values
# model = octopus_deploy_swagger_client.models.action_template_category_resource.ActionTemplateCategoryResource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 29.292683
| 135
| 0.766861
|
263cc334d16f2aafa2d72e6407cde09d0cc45cd0
| 26,636
|
py
|
Python
|
tensorflow_transform/beam/analysis_graph_builder.py
|
LaudateCorpus1/transform
|
afee306046b8f656355b0170793ee64423f30e23
|
[
"Apache-2.0"
] | 970
|
2017-02-10T04:33:46.000Z
|
2022-03-26T08:11:20.000Z
|
tensorflow_transform/beam/analysis_graph_builder.py
|
LaudateCorpus1/transform
|
afee306046b8f656355b0170793ee64423f30e23
|
[
"Apache-2.0"
] | 216
|
2017-02-23T04:50:59.000Z
|
2022-03-31T13:52:57.000Z
|
tensorflow_transform/beam/analysis_graph_builder.py
|
LaudateCorpus1/transform
|
afee306046b8f656355b0170793ee64423f30e23
|
[
"Apache-2.0"
] | 238
|
2017-02-17T16:30:55.000Z
|
2022-03-03T20:10:25.000Z
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to create the implementation graph."""
import collections
import hashlib
import tensorflow as tf
from tensorflow_transform import analyzer_nodes
from tensorflow_transform import graph_tools
from tensorflow_transform import impl_helper
from tensorflow_transform import nodes
from tensorflow_transform import tf2_utils
from tensorflow_transform import tf_utils
from tensorflow_transform.beam import analyzer_cache
from tensorflow_transform.beam import beam_nodes
from tensorflow_transform.beam import combiner_packing_util
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple
# Used for debugging only. This will point to the most recent graph built.
_ANALYSIS_GRAPH = None
def _tensor_name(tensor):
"""Get a name of a tensor without trailing ":0" when relevant."""
# tensor.name is unicode in Python 3 and bytes in Python 2 so convert to
# bytes here.
name = str(tensor.name)
return name[:-2] if name.endswith(':0') else name
class _ReadyVisitor(nodes.Visitor):
"""Visitor to determine if a node is ready to run."""
def __init__(self, graph_analyzer):
self._graph_analyzer = graph_analyzer
self._visited_operation_def_labels = set()
def _validate_operation_label_uniqueness(self, operation_def):
assert operation_def.label not in self._visited_operation_def_labels, (
f'An operation with label {operation_def.label} '
'already exists in the operations graph.')
self._visited_operation_def_labels.add(operation_def.label)
def visit(self, operation_def, input_values):
self._validate_operation_label_uniqueness(operation_def)
if isinstance(operation_def, analyzer_nodes.TensorSource):
is_ready = all(self._graph_analyzer.ready_to_run(tensor)
for tensor in operation_def.tensors)
else:
is_ready = all(input_values)
return (is_ready,) * operation_def.num_outputs
def validate_value(self, value):
assert isinstance(value, bool)
class _TranslateVisitor(nodes.Visitor):
"""Visitor that translates the operation graph.
The original graph is defined by the user in the preprocessing_fn. The
translated graph represents a Beam pipeline.
"""
def __init__(self):
self.phase = None
self.extracted_values_dict = None
self.intermediate_output_signature = None
def visit(self, operation_def, input_values):
if isinstance(operation_def, analyzer_nodes.TensorSource):
tensors = operation_def.tensors
label = operation_def.label
# Add tensor to signature so it gets produced by the SavedModel.
for tensor in tensors:
self.intermediate_output_signature[_tensor_name(tensor)] = tensor
keys = tuple(map(_tensor_name, tensors))
output = nodes.apply_operation(
beam_nodes.ExtractFromDict, self.extracted_values_dict,
keys=keys, label=label)
return (output,)
else:
return nodes.OperationNode(operation_def, input_values).outputs
def validate_value(self, value):
assert isinstance(value, nodes.ValueNode)
class _OptimizationView(
tfx_namedtuple.namedtuple('_OptimizationView', [
'prefer_fine_grained_view', 'flattened_view', 'fine_grained_view',
'hashed_path'
])):
"""A container for operation outputs during _OptimizeVisitor traversal.
This is used in order to maintain both a flattened view, and a fine grained
view that can be used for caching.
`prefer_fine_grained_view` is a hint that means that if True, the
`fine_grained_view` should be used. It should be set to true if the upstream
view has cacheing operations that haven't been flattened yet.
"""
def __init__(self, prefer_fine_grained_view, flattened_view,
fine_grained_view, hashed_path):
if prefer_fine_grained_view and not fine_grained_view:
raise ValueError(
'Cannot prefer fine_grained_view when one is not provided')
del hashed_path
self._validate_flattened_view(flattened_view)
self._validate_fine_grained_view(fine_grained_view)
super().__init__()
def _validate_flattened_view(self, view):
assert view is self.flattened_view
assert view is not None
assert isinstance(view, nodes.ValueNode), view
def _validate_fine_grained_view(self, view):
assert view is self.fine_grained_view
if view is None:
return
assert isinstance(view, collections.OrderedDict), view
for value in view.values():
assert isinstance(value, nodes.ValueNode), value
class _OptimizeVisitor(nodes.Visitor):
"""Visitor optimizes the operation graph (see nodes.py).
This operates on the translated graph which is emitted by the
`_TranslateVisitor`, and performs optimizations.
Namely, when enabled, this enables reading and writing from/to analyzer
accumulator cache to avoid recomputing them over already seen datasets.
This type of optimization requires also creating a partitioned view of the
input data, according to the `is_partitionable` annotation.
"""
def __init__(self, dataset_keys, cache_dict, tensor_keys_to_paths,
cache_output_nodes):
"""Init method for _OptimizeVisitor.
Args:
dataset_keys: An iterable of strings which are keys for a partitioned
dataset.
cache_dict: A dictionary of input cache that can be used in place of a
cacheable accumulate operation. A dictionary from dataset_keys to
dictionaries of cache keys to PCollections. This can be None if there is
no cache.
tensor_keys_to_paths: A dictionary from a tensor key to a unique TF graph
path hash.
cache_output_nodes: A dictionary from (dataset_key, cache_key) to encoded
cache ValueNode. This is the output cache for this graph.
"""
self._sorted_dataset_keys = sorted(dataset_keys)
self._cache_dict = cache_dict
self._tensor_keys_to_paths = tensor_keys_to_paths
self.cache_output_nodes = cache_output_nodes
def _validate_operation_def(self, operation_def):
if operation_def.cache_coder is not None:
if not operation_def.is_partitionable:
raise ValueError('Non partitionable OperationDefs cannot be cacheable')
if operation_def.is_partitionable or operation_def.cache_coder is not None:
if operation_def.num_outputs != 1:
raise ValueError('Cacheable OperationDefs must have exactly 1 output')
def _make_next_hashed_path(self, parent_hashed_paths, operation_def):
# Making a copy of parent_hashed_paths.
paths_to_hash = list(parent_hashed_paths)
paths_to_hash.append(tf.compat.as_bytes(operation_def.__class__.__name__))
if isinstance(operation_def, beam_nodes.ExtractFromDict):
for key in operation_def.keys:
path = self._tensor_keys_to_paths[key]
paths_to_hash.append(path)
else:
for attr in sorted(
[x for x in dir(operation_def) if x not in operation_def._fields]):
if attr.startswith('_') or callable(getattr(operation_def, attr)):
continue
paths_to_hash.append(
tf.compat.as_bytes(str((attr, getattr(operation_def, attr)))))
for field in operation_def._fields:
paths_to_hash.append(
tf.compat.as_bytes(
str((field, operation_def.get_field_str(field)))))
hash_container = hashlib.sha1()
for path in paths_to_hash:
if path is None:
return None
hash_container.update(path)
return hash_container.digest()
def visit(self, operation_def, input_values):
self._validate_operation_def(operation_def)
if (isinstance(operation_def, beam_nodes.ApplySavedModel) and
operation_def.phase == 0):
return self._visit_apply_savedmodel_operation(operation_def, input_values)
# When self._cache_dict is None this means that we shouldn't do any cacheing
# for this pipeline, and so there's no need to create any fine grained
# views.
if self._cache_dict is not None and operation_def.is_partitionable:
return self._visit_partitionable_operation(operation_def, input_values)
if input_values and any(v.fine_grained_view and v.prefer_fine_grained_view
for v in input_values):
# We can 'flatten' the cached outputs of the parent operation since this
# operation doesn't support partitioning.
disaggregated_input_values = []
for view in input_values:
disaggregated_input_values.extend(view.fine_grained_view.values())
# Checking that all cache has the same size.
assert len({len(value) for value in disaggregated_input_values}) == 1
next_inputs = nodes.apply_multi_output_operation(
beam_nodes.Flatten,
*disaggregated_input_values,
label='FlattenCache[{}]'.format(operation_def.label))
else:
# Parent operation output is not cacheable, therefore we can just use
# a flattened view.
next_inputs = tuple(v.flattened_view for v in input_values)
flattened_view = nodes.OperationNode(operation_def, next_inputs).outputs
return tuple(
_OptimizationView( # pylint: disable=g-complex-comprehension
prefer_fine_grained_view=False,
flattened_view=flat,
fine_grained_view=None,
hashed_path=None) for flat in flattened_view)
def _visit_partitionable_operation(self, operation_def, upstream_views):
# This is a hint for whether or not the `fine_grained_view` should be used
# downstream. It should be set to true if either the upstream view has
# cacheing operations that haven't been flattened yet, or the current
# operation is cacheable.
all_fine_grained_views_available = all(
v.fine_grained_view for v in upstream_views)
prefer_fine_grained_view = (
any(v.prefer_fine_grained_view for v in upstream_views) or
all_fine_grained_views_available and
operation_def.cache_coder is not None)
next_hashed_path = self._make_next_hashed_path(
[v.hashed_path for v in upstream_views], operation_def)
if all_fine_grained_views_available:
fine_grained_views = (self._apply_operation_on_fine_grained_view(
operation_def, tuple(v.fine_grained_view for v in upstream_views),
next_hashed_path),)
else:
fine_grained_views = (None,) * operation_def.num_outputs
flattened_views = nodes.OperationNode(
operation_def, tuple(v.flattened_view for v in upstream_views)).outputs
assert len(fine_grained_views) == len(flattened_views)
return tuple(
_OptimizationView( # pylint: disable=g-complex-comprehension
prefer_fine_grained_view=prefer_fine_grained_view,
flattened_view=flat,
fine_grained_view=fine,
hashed_path=next_hashed_path)
for flat, fine in zip(flattened_views, fine_grained_views))
def _apply_operation_on_fine_grained_view(self, operation_def,
fine_grained_views,
next_hashed_path):
"""Applies a shardable operation on a fine grained view.
This also updates `cache_output_nodes` when necessary.
Args:
operation_def: A shardable `OperationDef`.
fine_grained_views: A tuple of `_OptimizationView.fine_grained_view`s.
next_hashed_path: The hashed path for the currently processed
operation_def.
Returns:
The resulting list of `_OptimizationView.fine_grained_view`s.
"""
result_fine_grained_view = collections.OrderedDict()
cache_entry_key = analyzer_cache.make_cache_entry_key(
tf.compat.as_bytes(operation_def.label) + b'-' + next_hashed_path)
for (dataset_idx, dataset_key) in enumerate(self._sorted_dataset_keys):
# We use an index for the label in order to make beam labels more stable.
infix = 'AnalysisIndex{}'.format(dataset_idx)
if (operation_def.cache_coder and self._cache_dict.get(
dataset_key, {}).get(cache_entry_key) is not None):
decode_cache = analyzer_nodes.DecodeCache(
dataset_key,
cache_entry_key,
coder=operation_def.cache_coder,
label='DecodeCache[{}][{}]'.format(operation_def.label, infix))
(op_output,) = nodes.OperationNode(decode_cache, tuple()).outputs
else:
value_nodes = tuple(v[dataset_key] for v in fine_grained_views)
(op_output,) = nodes.OperationNode(
operation_def._replace(
label='{}[{}]'.format(operation_def.label, infix)),
value_nodes).outputs
if operation_def.cache_coder:
encode_cache = nodes.apply_operation(
analyzer_nodes.EncodeCache,
op_output,
coder=operation_def.cache_coder,
label='EncodeCache[{}][{}]'.format(operation_def.label, infix))
self.cache_output_nodes[(dataset_key, cache_entry_key)] = encode_cache
result_fine_grained_view[dataset_key] = op_output
return result_fine_grained_view
def _visit_apply_savedmodel_operation(self, operation_def, upstream_views):
if any(v.fine_grained_view for v in upstream_views):
raise ValueError(
'Was not expecting a fine_grained_view input for ApplySavedModel')
(saved_model_path_upstream_view, input_upstream_view) = upstream_views
fine_grained_view = collections.OrderedDict()
for (dataset_idx, dataset_key) in enumerate(self._sorted_dataset_keys):
infix = 'AnalysisIndex{}'.format(dataset_idx)
input_node = nodes.apply_operation(
beam_nodes.ExtractInputForSavedModel,
dataset_key=dataset_key,
label='ExtractInputForSavedModel[{}]'.format(infix))
# We use an index for the label in order to make beam labels more stable.
(fine_grained_view[dataset_key],) = (
nodes.OperationNode(
operation_def._replace(
label='{}[{}]'.format(operation_def.label, infix)),
(saved_model_path_upstream_view.flattened_view,
input_node)).outputs)
(flattened_view,) = nodes.OperationNode(
operation_def, (saved_model_path_upstream_view.flattened_view,
input_upstream_view.flattened_view)).outputs
return (_OptimizationView(
prefer_fine_grained_view=False,
flattened_view=flattened_view,
fine_grained_view=fine_grained_view,
hashed_path=b'APPLY_SAVEDMODEL'),)
def validate_value(self, value):
assert isinstance(value, _OptimizationView), value
if value.fine_grained_view:
assert set(value.fine_grained_view.keys()) == set(
self._sorted_dataset_keys), ('{} != {}'.format(
value.fine_grained_view.keys(), self._sorted_dataset_keys))
def _perform_cache_optimization(saved_model_future, dataset_keys,
tensor_keys_to_paths, cache_dict):
"""Performs cache optimization on the given graph."""
cache_output_nodes = {}
optimize_visitor = _OptimizeVisitor(dataset_keys or {}, cache_dict,
tensor_keys_to_paths, cache_output_nodes)
optimize_traverser = nodes.Traverser(optimize_visitor)
optimized = optimize_traverser.visit_value_node(
saved_model_future).flattened_view
if cache_dict is None:
assert not cache_output_nodes
cache_output_nodes = None
return optimized, cache_output_nodes
class _InspectVisitor(nodes.Visitor):
"""A visitor that inspects the graph and looks for dataset keys in use."""
def __init__(self, required_dataset_keys_output):
self._required_dataset_keys = required_dataset_keys_output
def visit(self, operation_def, input_values):
if isinstance(operation_def, beam_nodes.ExtractInputForSavedModel):
self._required_dataset_keys.add(operation_def.dataset_key)
return nodes.OperationNode(operation_def, input_values).outputs
def validate_value(self, value):
assert isinstance(value, nodes.ValueNode)
def _build_analysis_graph_for_inspection(preprocessing_fn, specs, dataset_keys,
input_cache, force_tf_compat_v1):
"""Builds the analysis graph for inspection."""
if not force_tf_compat_v1:
assert all([isinstance(s, tf.TypeSpec) for s in specs.values()]), specs
graph, structured_inputs, structured_outputs = (
impl_helper.trace_preprocessing_function(
preprocessing_fn,
specs,
use_tf_compat_v1=tf2_utils.use_tf_compat_v1(force_tf_compat_v1)))
transform_fn_future, cache_dict = build(
graph,
structured_inputs,
structured_outputs,
dataset_keys=dataset_keys,
cache_dict=input_cache)
return transform_fn_future, cache_dict
def get_analysis_dataset_keys(preprocessing_fn,
specs,
dataset_keys,
input_cache,
force_tf_compat_v1):
"""Computes the dataset keys that are required in order to perform analysis.
Args:
preprocessing_fn: A tf.transform preprocessing_fn.
specs: A dict of feature name to tf.TypeSpecs. If `force_tf_compat_v1` is
True, this can also be feature specifications.
dataset_keys: A set of strings which are dataset keys, they uniquely
identify these datasets across analysis runs.
input_cache: A cache dictionary.
force_tf_compat_v1: If `True`, use Tensorflow in compat.v1 mode.
Returns:
A set of dataset keys that are required for analysis.
"""
transform_fn_future, _ = _build_analysis_graph_for_inspection(
preprocessing_fn, specs, dataset_keys, input_cache, force_tf_compat_v1)
result = set()
inspect_visitor = _InspectVisitor(result)
inspect_traverser = nodes.Traverser(inspect_visitor)
_ = inspect_traverser.visit_value_node(transform_fn_future)
# If None is present this means that a flattened version of the entire dataset
# is required, therefore this will be returning all of the given dataset_keys.
if any(k.is_flattened_dataset_key() for k in result):
result = dataset_keys
return result
def get_analysis_cache_entry_keys(preprocessing_fn,
specs,
dataset_keys,
force_tf_compat_v1):
"""Computes the cache entry keys that would be useful for analysis.
Args:
preprocessing_fn: A tf.transform preprocessing_fn.
specs: A dict of feature name to tf.TypeSpecs. If `force_tf_compat_v1` is
True, this can also be feature specifications.
dataset_keys: A set of strings which are dataset keys, they uniquely
identify these datasets across analysis runs.
force_tf_compat_v1: If `True`, use Tensorflow in compat.v1 mode.
Returns:
A set of cache entry keys which would be useful for analysis.
"""
_, cache_dict = _build_analysis_graph_for_inspection(preprocessing_fn, specs,
dataset_keys, {},
force_tf_compat_v1)
return set([cache_key for _, cache_key in cache_dict.keys()])
def build(graph,
input_signature,
output_signature,
dataset_keys=None,
cache_dict=None):
"""Returns a list of `Phase`s describing how to execute the pipeline.
The default graph is assumed to contain some `Analyzer`s which must be
executed by doing a full pass over the dataset, and passing the inputs for
that analyzer into some implementation, then taking the results and replacing
the `Analyzer`s outputs with constants in the graph containing these results.
The execution plan is described by a list of `Phase`s. Each phase contains
a list of `Analyzer`s, which are the `Analyzer`s which are ready to run in
that phase, together with a list of ops, which are the table initializers that
are ready to run in that phase.
An `Analyzer` or op is ready to run when all its dependencies in the graph
have been computed. Thus if the graph is constructed by
def preprocessing_fn(input)
x = inputs['x']
scaled_0 = x - tft.min(x)
scaled_0_1 = scaled_0 / tft.max(scaled_0)
Then the first phase will contain the analyzer corresponding to the call to
`min`, because `x` is an input and so is ready to compute in the first phase,
while the second phase will contain the analyzer corresponding to the call to
`max` since `scaled_1` depends on the result of the call to `tft.min` which
is computed in the first phase.
More generally, we define a level for each op and each `Analyzer` by walking
the graph, assigning to each operation the max level of its inputs, to each
`Tensor` the level of its operation, unless it's the output of an `Analyzer`
in which case we assign the level of its `Analyzer` plus one.
Args:
graph: A `tf.Graph`.
input_signature: A dict whose keys are strings and values are `Tensor`s or
`SparseTensor`s.
output_signature: A dict whose keys are strings and values are `Tensor`s or
`SparseTensor`s.
dataset_keys: (Optional) A set of strings which are dataset keys, they
uniquely identify these datasets across analysis runs.
cache_dict: (Optional): A cache dictionary.
Returns:
A pair of:
* list of `Phase`s
* A dictionary of output cache `ValueNode`s.
Raises:
ValueError: if the graph cannot be analyzed.
"""
tensor_sinks = graph.get_collection(analyzer_nodes.TENSOR_REPLACEMENTS)
graph.clear_collection(analyzer_nodes.TENSOR_REPLACEMENTS)
phase = 0
tensor_bindings = []
sink_tensors_ready = {
tf_utils.hashable_tensor_or_op(tensor_sink.tensor):
False for tensor_sink in tensor_sinks
}
translate_visitor = _TranslateVisitor()
translate_traverser = nodes.Traverser(translate_visitor)
analyzers_input_signature = {}
graph_analyzer = None
extracted_input_node = nodes.apply_operation(
beam_nodes.ExtractInputForSavedModel,
dataset_key=analyzer_cache._make_flattened_dataset_key(), # pylint: disable=protected-access
label='ExtractInputForSavedModel[FlattenedDataset]')
while not all(sink_tensors_ready.values()):
infix = 'Phase{}'.format(phase)
# Determine which table init ops are ready to run in this phase
# Determine which keys of pending_tensor_replacements are ready to run
# in this phase, based in whether their dependencies are ready.
graph_analyzer = graph_tools.InitializableGraphAnalyzer(
graph, input_signature, list(sink_tensors_ready.items()),
graph_tools.describe_path_as_analyzer_cache_hash)
ready_traverser = nodes.Traverser(_ReadyVisitor(graph_analyzer))
# Now create and apply a SavedModel with all tensors in tensor_bindings
# bound, which outputs all the tensors in the required tensor tuples.
intermediate_output_signature = collections.OrderedDict()
saved_model_future = nodes.apply_operation(
beam_nodes.CreateSavedModel,
*tensor_bindings,
table_initializers=tuple(graph_analyzer.ready_table_initializers),
output_signature=intermediate_output_signature,
label='CreateSavedModelForAnalyzerInputs[{}]'.format(infix))
extracted_values_dict = nodes.apply_operation(
beam_nodes.ApplySavedModel,
saved_model_future,
extracted_input_node,
phase=phase,
label='ApplySavedModel[{}]'.format(infix))
translate_visitor.phase = phase
translate_visitor.intermediate_output_signature = (
intermediate_output_signature)
translate_visitor.extracted_values_dict = extracted_values_dict
for tensor, value_node, is_asset_filepath in tensor_sinks:
hashable_tensor = tf_utils.hashable_tensor_or_op(tensor)
# Don't compute a binding/sink/replacement that's already been computed
if sink_tensors_ready[hashable_tensor]:
continue
if not ready_traverser.visit_value_node(value_node):
continue
translated_value_node = translate_traverser.visit_value_node(value_node)
name = _tensor_name(tensor)
tensor_bindings.append(
nodes.apply_operation(
beam_nodes.CreateTensorBinding,
translated_value_node,
tensor_name=str(tensor.name),
dtype_enum=tensor.dtype.as_datatype_enum,
is_asset_filepath=is_asset_filepath,
label=analyzer_nodes.sanitize_label(
'CreateTensorBinding[{}]'.format(name))))
sink_tensors_ready[hashable_tensor] = True
analyzers_input_signature.update(intermediate_output_signature)
phase += 1
# We need to make sure that the representation of this output_signature is
# deterministic.
output_signature = collections.OrderedDict(
sorted(output_signature.items(), key=lambda t: t[0]))
# TODO(KesterTong): check all table initializers are ready, check all output
# tensors are ready.
saved_model_future = nodes.apply_operation(
beam_nodes.CreateSavedModel,
*tensor_bindings,
table_initializers=tuple(
graph.get_collection(tf.compat.v1.GraphKeys.TABLE_INITIALIZERS)),
output_signature=output_signature,
label='CreateSavedModel')
tensor_keys_to_paths = {
tensor_key:
graph_analyzer.get_unique_path(analyzers_input_signature[tensor_key])
for tensor_key in analyzers_input_signature
}
(optimized_saved_model_future,
output_cache_value_nodes) = _perform_cache_optimization(
saved_model_future, dataset_keys, tensor_keys_to_paths, cache_dict)
(optimized_saved_model_future, output_cache_value_nodes) = (
combiner_packing_util.perform_combiner_packing_optimization(
optimized_saved_model_future, output_cache_value_nodes, phase))
global _ANALYSIS_GRAPH
_ANALYSIS_GRAPH = optimized_saved_model_future
return optimized_saved_model_future, output_cache_value_nodes
| 41.16847
| 99
| 0.722631
|
165d5862a29e79da5dc40f86ec88887da6db3026
| 4,980
|
py
|
Python
|
sphinx/util/docutils.py
|
merwok-forks/sphinx
|
b7cada236f765003a73ab5dca48f975d54c0c298
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/util/docutils.py
|
merwok-forks/sphinx
|
b7cada236f765003a73ab5dca48f975d54c0c298
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/util/docutils.py
|
merwok-forks/sphinx
|
b7cada236f765003a73ab5dca48f975d54c0c298
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
sphinx.util.docutils
~~~~~~~~~~~~~~~~~~~~
Utility functions for docutils.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import absolute_import
import re
from copy import copy
from contextlib import contextmanager
import docutils
from docutils.utils import Reporter
from docutils.parsers.rst import directives, roles
from sphinx.util import logging
logger = logging.getLogger(__name__)
report_re = re.compile('^(.+?:\\d+): \\((DEBUG|INFO|WARNING|ERROR|SEVERE)/(\\d+)?\\) '
'(.+?)\n?$')
if False:
# For type annotation
from typing import Any, Callable, Iterator, List, Tuple # NOQA
from docutils import nodes # NOQA
from sphinx.environment import BuildEnvironment # NOQA
__version_info__ = tuple(map(int, docutils.__version__.split('.')))
@contextmanager
def docutils_namespace():
# type: () -> Iterator[None]
"""Create namespace for reST parsers."""
try:
_directives = copy(directives._directives)
_roles = copy(roles._roles)
yield
finally:
directives._directives = _directives
roles._roles = _roles
class ElementLookupError(Exception):
pass
class sphinx_domains(object):
"""Monkey-patch directive and role dispatch, so that domain-specific
markup takes precedence.
"""
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env
self.directive_func = None # type: Callable
self.roles_func = None # type: Callable
def __enter__(self):
# type: () -> None
self.enable()
def __exit__(self, type, value, traceback):
# type: (unicode, unicode, unicode) -> None
self.disable()
def enable(self):
# type: () -> None
self.directive_func = directives.directive
self.role_func = roles.role
directives.directive = self.lookup_directive
roles.role = self.lookup_role
def disable(self):
# type: () -> None
directives.directive = self.directive_func
roles.role = self.role_func
def lookup_domain_element(self, type, name):
# type: (unicode, unicode) -> Tuple[Any, List]
"""Lookup a markup element (directive or role), given its name which can
be a full name (with domain).
"""
name = name.lower()
# explicit domain given?
if ':' in name:
domain_name, name = name.split(':', 1)
if domain_name in self.env.domains:
domain = self.env.get_domain(domain_name)
element = getattr(domain, type)(name)
if element is not None:
return element, []
# else look in the default domain
else:
def_domain = self.env.temp_data.get('default_domain')
if def_domain is not None:
element = getattr(def_domain, type)(name)
if element is not None:
return element, []
# always look in the std domain
element = getattr(self.env.get_domain('std'), type)(name)
if element is not None:
return element, []
raise ElementLookupError
def lookup_directive(self, name, lang_module, document):
# type: (unicode, unicode, nodes.document) -> Tuple[Any, List]
try:
return self.lookup_domain_element('directive', name)
except ElementLookupError:
return self.directive_func(name, lang_module, document)
def lookup_role(self, name, lang_module, lineno, reporter):
# type: (unicode, unicode, int, Any) -> Tuple[Any, List]
try:
return self.lookup_domain_element('role', name)
except ElementLookupError:
return self.role_func(name, lang_module, lineno, reporter)
class WarningStream(object):
def write(self, text):
# type: (unicode) -> None
matched = report_re.search(text) # type: ignore
if not matched:
logger.warning(text.rstrip("\r\n"))
else:
location, type, level, message = matched.groups()
logger.log(type, message, location=location)
class LoggingReporter(Reporter):
def __init__(self, source, report_level, halt_level,
debug=False, error_handler='backslashreplace'):
# type: (unicode, int, int, bool, unicode) -> None
stream = WarningStream()
Reporter.__init__(self, source, report_level, halt_level,
stream, debug, error_handler=error_handler)
def set_conditions(self, category, report_level, halt_level, debug=False):
# type: (unicode, int, int, bool) -> None
Reporter.set_conditions(self, category, report_level, halt_level, debug=debug)
def is_html5_writer_available():
# type: () -> bool
return __version_info__ > (0, 13, 0)
| 31.719745
| 86
| 0.618675
|
015f4fc429ef6921c158f8eef83bb97eafcb18f5
| 598
|
py
|
Python
|
jax/version.py
|
dirmeier/jax
|
9ba28d263479ed5b9cada97bf73aec92ccc69bc6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/version.py
|
dirmeier/jax
|
9ba28d263479ed5b9cada97bf73aec92ccc69bc6
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/version.py
|
dirmeier/jax
|
9ba28d263479ed5b9cada97bf73aec92ccc69bc6
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-07-17T18:17:31.000Z
|
2020-07-17T18:17:31.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.2.4"
| 37.375
| 74
| 0.757525
|
6855aa245e370b477218d07cd03f449a26758e7c
| 396
|
py
|
Python
|
myUniSystem/venv/Lib/site-packages/pkg_resources/py2_warn.py
|
LukasKaziliunas/uniSystemDemo
|
11e8c19e6d2bd08eb0449e229dbaa2a0300d8263
|
[
"MIT"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
myUniSystem/venv/Lib/site-packages/pkg_resources/py2_warn.py
|
LukasKaziliunas/uniSystemDemo
|
11e8c19e6d2bd08eb0449e229dbaa2a0300d8263
|
[
"MIT"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
myUniSystem/venv/Lib/site-packages/pkg_resources/py2_warn.py
|
LukasKaziliunas/uniSystemDemo
|
11e8c19e6d2bd08eb0449e229dbaa2a0300d8263
|
[
"MIT"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
import sys
import warnings
import textwrap
msg = textwrap.dedent("""
Encountered a version of Setuptools that no longer supports
this version of Python. Please head to
https://bit.ly/setuptools-py2-sunset for support.
""")
pre = "Setuptools no longer works on Python 2\n"
if sys.version_info < (3,):
warnings.warn(pre + "*" * 60 + msg + "*" * 60)
raise SystemExit(32)
| 23.294118
| 63
| 0.676768
|
61dad4fff18ff29d771c46bc16244f38d6f16312
| 1,781
|
py
|
Python
|
fastrf/app/routes/noise_figure.py
|
TheDubliner/fastrf
|
31761f58ab588cf441eaf200fb3862beeef625b5
|
[
"MIT"
] | 4
|
2020-05-29T01:19:09.000Z
|
2021-03-16T12:05:26.000Z
|
fastrf/app/routes/noise_figure.py
|
TheDubliner/fastrf
|
31761f58ab588cf441eaf200fb3862beeef625b5
|
[
"MIT"
] | 166
|
2020-04-16T03:34:53.000Z
|
2022-01-03T16:55:14.000Z
|
fastrf/app/routes/noise_figure.py
|
TheDubliner/fastrf
|
31761f58ab588cf441eaf200fb3862beeef625b5
|
[
"MIT"
] | 2
|
2021-04-25T23:55:56.000Z
|
2022-01-10T13:06:19.000Z
|
import uuid
from typing import Dict, List, Union
from fastapi import APIRouter
from fastrf.models.noise_figure import NoiseFigure
router = APIRouter()
class NoiseFigureSpec(NoiseFigure):
id: str
NOISE_FIGURE_SPECS = [
NoiseFigureSpec(id=uuid.uuid4().hex, value=1.5),
NoiseFigureSpec(id=uuid.uuid4().hex, value=1.8),
NoiseFigureSpec(id=uuid.uuid4().hex, value=2.2),
]
@router.get(
"/noise_figure", tags=["Noise Figure"], response_model=List[NoiseFigureSpec]
)
def get_all_noise_figure_specs() -> List[Dict[str, Union[str, object]]]:
# print(NOISE_FIGURE_SPECS)
return [spec.dict(exclude={"unit"}) for spec in NOISE_FIGURE_SPECS]
@router.post("/noise_figure", tags=["Noise Figure"])
async def create_noise_figure_spec(request: NoiseFigure) -> None:
new_spec = NoiseFigureSpec(id=uuid.uuid4().hex, value=request.value)
NOISE_FIGURE_SPECS.append(new_spec)
return
def remove_noise_figure_spec(noise_figure_spec_id: str) -> bool:
# print(NOISE_FIGURE_SPECS)
for spec in NOISE_FIGURE_SPECS:
if spec.id == noise_figure_spec_id:
NOISE_FIGURE_SPECS.remove(spec)
return True
return False
@router.put(
"/noise_figure/{noise_figure_id}",
tags=["Noise Figure"],
)
def edit_single_noise_figure_spec(
noise_figure: NoiseFigure, noise_figure_id: str
) -> None:
# Delete old entry
remove_noise_figure_spec(noise_figure_id)
# Add updated entry
NOISE_FIGURE_SPECS.append(
NoiseFigureSpec(id=noise_figure_id, value=noise_figure.value)
)
return
@router.delete(
"/noise_figure/{noise_figure_id}",
tags=["Noise Figure"],
)
def remove_single_noise_figure_spec(noise_figure_id: str) -> None:
remove_noise_figure_spec(noise_figure_id)
return None
| 25.811594
| 80
| 0.723189
|
ff258d6504dc339b04fbc383d884f9c3f3efe54c
| 634
|
py
|
Python
|
alibaba/items.py
|
PandorAstrum/alibaba_skrapy
|
1548a354785578be1850015eeb439c368f5be4f2
|
[
"MIT"
] | null | null | null |
alibaba/items.py
|
PandorAstrum/alibaba_skrapy
|
1548a354785578be1850015eeb439c368f5be4f2
|
[
"MIT"
] | null | null | null |
alibaba/items.py
|
PandorAstrum/alibaba_skrapy
|
1548a354785578be1850015eeb439c368f5be4f2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class AlibabaItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
price = scrapy.Field()
min_order = scrapy.Field()
short_description = scrapy.Field()
supply_ability = scrapy.Field()
packaging_delivery = scrapy.Field()
description = scrapy.Field()
url = scrapy.Field()
images_links = scrapy.Field()
category = scrapy.Field()
sub_category = scrapy.Field()
| 25.36
| 52
| 0.675079
|
c7a34d32e87338a982188ffcbc8ccf8c55c44274
| 9,133
|
py
|
Python
|
tests/ut/python/dataset/test_dataset_numpy_slices.py
|
kungfu-team/mindspore-bert
|
71501cf52ae01db9d6a73fb64bcfe68a6509dc32
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset/test_dataset_numpy_slices.py
|
kungfu-team/mindspore-bert
|
71501cf52ae01db9d6a73fb64bcfe68a6509dc32
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset/test_dataset_numpy_slices.py
|
kungfu-team/mindspore-bert
|
71501cf52ae01db9d6a73fb64bcfe68a6509dc32
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
import pytest
import numpy as np
import pandas as pd
import mindspore.dataset as de
from mindspore import log as logger
import mindspore.dataset.vision.c_transforms as vision
def test_numpy_slices_list_1():
logger.info("Test Slicing a 1D list.")
np_data = [1, 2, 3]
ds = de.NumpySlicesDataset(np_data, shuffle=False)
for i, data in enumerate(ds):
assert data[0].asnumpy() == np_data[i]
def test_numpy_slices_list_2():
logger.info("Test Slicing a 2D list into 1D list.")
np_data = [[1, 2], [3, 4]]
ds = de.NumpySlicesDataset(np_data, column_names=["col1"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data[0].asnumpy(), np_data[i]).all()
def test_numpy_slices_list_3():
logger.info("Test Slicing list in the first dimension.")
np_data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
ds = de.NumpySlicesDataset(np_data, column_names=["col1"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data[0].asnumpy(), np_data[i]).all()
def test_numpy_slices_list_append():
logger.info("Test reading data of image list.")
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
resize_height, resize_width = 2, 2
data1 = de.TFRecordDataset(DATA_DIR)
resize_op = vision.Resize((resize_height, resize_width))
data1 = data1.map(operations=[vision.Decode(True), resize_op], input_columns=["image"])
res = []
for data in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
res.append(data["image"])
ds = de.NumpySlicesDataset(res, column_names=["col1"], shuffle=False)
for i, data in enumerate(ds.create_tuple_iterator(output_numpy=True)):
assert np.equal(data, res[i]).all()
def test_numpy_slices_dict_1():
logger.info("Test Dictionary structure data.")
np_data = {"a": [1, 2], "b": [3, 4]}
ds = de.NumpySlicesDataset(np_data, shuffle=False)
res = [[1, 3], [2, 4]]
for i, data in enumerate(ds):
assert data[0].asnumpy() == res[i][0]
assert data[1].asnumpy() == res[i][1]
def test_numpy_slices_tuple_1():
logger.info("Test slicing a list of tuple.")
np_data = [([1, 2], [3, 4]), ([11, 12], [13, 14]), ([21, 22], [23, 24])]
ds = de.NumpySlicesDataset(np_data, shuffle=False)
for i, data in enumerate(ds.create_tuple_iterator(output_numpy=True)):
assert np.equal(data, np_data[i]).all()
assert sum([1 for _ in ds]) == 3
def test_numpy_slices_tuple_2():
logger.info("Test slicing a tuple of list.")
np_data = ([1, 2], [3, 4], [5, 6])
expected = [[1, 3, 5], [2, 4, 6]]
ds = de.NumpySlicesDataset(np_data, shuffle=False)
for i, data in enumerate(ds.create_tuple_iterator(output_numpy=True)):
assert np.equal(data, expected[i]).all()
assert sum([1 for _ in ds]) == 2
def test_numpy_slices_tuple_3():
logger.info("Test reading different dimension of tuple data.")
features, labels = np.random.sample((5, 2)), np.random.sample((5, 1))
data = (features, labels)
ds = de.NumpySlicesDataset(data, column_names=["col1", "col2"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(data[0].asnumpy(), features[i]).all()
assert data[1].asnumpy() == labels[i]
def test_numpy_slices_csv_value():
logger.info("Test loading value of csv file.")
csv_file = "../data/dataset/testNumpySlicesDataset/heart.csv"
df = pd.read_csv(csv_file)
target = df.pop("target")
df.pop("state")
np_data = (df.values, target.values)
ds = de.NumpySlicesDataset(np_data, column_names=["col1", "col2"], shuffle=False)
for i, data in enumerate(ds):
assert np.equal(np_data[0][i], data[0].asnumpy()).all()
assert np.equal(np_data[1][i], data[1].asnumpy()).all()
def test_numpy_slices_csv_dict():
logger.info("Test loading csv file as dict.")
csv_file = "../data/dataset/testNumpySlicesDataset/heart.csv"
df = pd.read_csv(csv_file)
df.pop("state")
res = df.values
ds = de.NumpySlicesDataset(dict(df), shuffle=False)
for i, data in enumerate(ds.create_tuple_iterator(output_numpy=True)):
assert np.equal(data, res[i]).all()
def test_numpy_slices_num_samplers():
logger.info("Test num_samplers.")
np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
ds = de.NumpySlicesDataset(np_data, shuffle=False, num_samples=2)
for i, data in enumerate(ds):
assert np.equal(data[0].asnumpy(), np_data[i]).all()
assert sum([1 for _ in ds]) == 2
def test_numpy_slices_distributed_sampler():
logger.info("Test distributed sampler.")
np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
ds = de.NumpySlicesDataset(np_data, shuffle=False, shard_id=0, num_shards=4)
for i, data in enumerate(ds):
assert np.equal(data[0].asnumpy(), np_data[i * 4]).all()
assert sum([1 for _ in ds]) == 2
def test_numpy_slices_distributed_shard_limit():
logger.info("Test Slicing a 1D list.")
np_data = [1, 2, 3]
num = sys.maxsize
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, num_shards=num, shard_id=0, shuffle=False)
assert "Input num_shards is not within the required interval of [1, 2147483647]." in str(err.value)
def test_numpy_slices_distributed_zero_shard():
logger.info("Test Slicing a 1D list.")
np_data = [1, 2, 3]
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, num_shards=0, shard_id=0, shuffle=False)
assert "Input num_shards is not within the required interval of [1, 2147483647]." in str(err.value)
def test_numpy_slices_sequential_sampler():
logger.info("Test numpy_slices_dataset with SequentialSampler and repeat.")
np_data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
ds = de.NumpySlicesDataset(np_data, sampler=de.SequentialSampler()).repeat(2)
for i, data in enumerate(ds):
assert np.equal(data[0].asnumpy(), np_data[i % 8]).all()
def test_numpy_slices_invalid_column_names_type():
logger.info("Test incorrect column_names input")
np_data = [1, 2, 3]
with pytest.raises(TypeError) as err:
de.NumpySlicesDataset(np_data, column_names=[1], shuffle=False)
assert "Argument column_names[0] with value 1 is not of type (<class 'str'>,)." in str(err.value)
def test_numpy_slices_invalid_column_names_string():
logger.info("Test incorrect column_names input")
np_data = [1, 2, 3]
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, column_names=[""], shuffle=False)
assert "column_names[0] should not be empty" in str(err.value)
def test_numpy_slices_invalid_empty_column_names():
logger.info("Test incorrect column_names input")
np_data = [1, 2, 3]
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, column_names=[], shuffle=False)
assert "column_names should not be empty" in str(err.value)
def test_numpy_slices_invalid_empty_data_column():
logger.info("Test incorrect column_names input")
np_data = []
with pytest.raises(ValueError) as err:
de.NumpySlicesDataset(np_data, shuffle=False)
assert "Argument data cannot be empty" in str(err.value)
def test_numpy_slice_empty_output_shape():
logger.info("running test_numpy_slice_empty_output_shape")
dataset = de.NumpySlicesDataset([[[1, 2], [3, 4]]], column_names=["col1"])
dataset = dataset.batch(batch_size=3, drop_remainder=True)
assert dataset.output_shapes() == []
if __name__ == "__main__":
test_numpy_slices_list_1()
test_numpy_slices_list_2()
test_numpy_slices_list_3()
test_numpy_slices_list_append()
test_numpy_slices_dict_1()
test_numpy_slices_tuple_1()
test_numpy_slices_tuple_2()
test_numpy_slices_tuple_3()
test_numpy_slices_csv_value()
test_numpy_slices_csv_dict()
test_numpy_slices_num_samplers()
test_numpy_slices_distributed_sampler()
test_numpy_slices_distributed_shard_limit()
test_numpy_slices_distributed_zero_shard()
test_numpy_slices_sequential_sampler()
test_numpy_slices_invalid_column_names_type()
test_numpy_slices_invalid_column_names_string()
test_numpy_slices_invalid_empty_column_names()
test_numpy_slices_invalid_empty_data_column()
test_numpy_slice_empty_output_shape()
| 33.825926
| 103
| 0.68258
|
84e37b1184cbc7120d934f75c429ede25acbc1d5
| 10,785
|
py
|
Python
|
compaction_separation.py
|
seanys/2D-Irregular-Packing-Algorithm
|
cc10edff2bc2631fcbcb47acf7bb3215e5c5023c
|
[
"MIT"
] | 29
|
2020-02-07T06:41:25.000Z
|
2022-03-16T18:04:07.000Z
|
compaction_separation.py
|
seanys/2D-Irregular-Packing-Algorithm
|
cc10edff2bc2631fcbcb47acf7bb3215e5c5023c
|
[
"MIT"
] | 6
|
2020-04-27T01:36:27.000Z
|
2022-01-31T11:59:05.000Z
|
compaction_separation.py
|
seanys/2D-Irregular-Packing-Algorithm
|
cc10edff2bc2631fcbcb47acf7bb3215e5c5023c
|
[
"MIT"
] | 12
|
2020-05-05T05:34:06.000Z
|
2022-03-26T07:32:46.000Z
|
"""
该文件实现了拆分算法 Separation去除重叠
和Compaction压缩当前解
-----------------------------------
Created on Wed Dec 11, 2020
@author: seanys,prinway
-----------------------------------
We will update this file soon, now
it has some wrong.
"""
from tools.geofunc import GeoFunc
from tools.show import PltFunc
from tools.lp import sovleLP,problem
from tools.lp_assistant import LPAssistant
import pandas as pd
import json
from shapely.geometry import Polygon,Point,mapping,LineString
from interval import Interval
import copy
import random
import math
class LPFunction(object):
'''
参考文献:Solving Irregular Strip Packing problems by hybridising simulated annealing and linear programming
功能:Compaction与Separation函数处理
'''
def __init__(self,polys,poly_status,width,length,_type):
self._type=_type
self.all_nfp=pd.read_csv("/Users/sean/Documents/Projects/Data/fu_simplify.csv")
self.poly_status=copy.deepcopy(poly_status)
self.polys=copy.deepcopy(polys)
self.WIDTH=width
# print("初始高度:",LPAssistant.getLength(polys))
# self.LENGTH=LPAssistant.getLength(polys)
# print(LPAssistant.getLength(polys))
self.LENGTH=length
self.DISTANCE=400
self.main()
def main(self):
# 初始化全部参数,目标参数为z,x1,y1,x2,y...,
N=len(self.polys)
if self._type=="separation":
a,b,c=[[0]*(2*N+N*N) for _ in range(8*N+N*N)],[0 for _ in range(8*N+N*N)],[0 for _ in range(N*2+N*N)]
else:
# Compaction有x1-xn/y1-yn/z共2N+1个参数,限制距离和边界2N个限制,N*N个两两间的约束
a,b,c=[[0]*(2*N+1) for _ in range(9*N+N*N)],[0 for _ in range(9*N+N*N)],[0 for _ in range(N*2+1)]
# 获得常数限制和多边形的限制
self.getConstants()
self.getTargetEdges()
# 限制全部移动距离 OK
for i in range(N):
row=i*4
a[row+0][i*2+0],b[row+0]=-1,-self.DISTANCE-self.Xi[i] # -xi>=-DISTANCE-Xi
a[row+1][i*2+1],b[row+1]=-1,-self.DISTANCE-self.Yi[i] # -yi>=-DISTANCE-Yi
a[row+2][i*2+0],b[row+2]= 1,-self.DISTANCE+self.Xi[i] # xi>=-DISTANCE+Xi
a[row+3][i*2+1],b[row+3]= 1,-self.DISTANCE+self.Yi[i] # yi>=-DISTANCE+Yi
# 限制无法移出边界 OK
for i in range(N):
row=4*N+i*4
a[row+0][i*2+0],b[row+0]= 1,self.W_[i] # xi>=Wi*
a[row+1][i*2+1],b[row+1]= 1,self.H[i] # yi>=Hi
a[row+2][i*2+0],b[row+2]=-1,self.W[i]-self.LENGTH # -xi>=Wi-Length
a[row+3][i*2+1],b[row+3]=-1,-self.WIDTH # -yi>=-Width
# 限制不出现重叠情况 有一点问题
for i in range(N):
for j in range(N):
row=8*N+i*N+j
if self._type=="separation":
if i!=j:
a[row][i*2+0],a[row][i*2+1],a[row][j*2+0],a[row][j*2+1],b[row]=self.getOverlapConstrain(i,j)
a[row][2*N+i*N+j],c[2*N+i*N+j]=1,1 # 目标函数变化
else:
a[row][2*N+i*N+j],c[2*N+i*N+j],b[row]=1,1,0
else:
if i!=j:
a[row][i*2+0],a[row][i*2+1],a[row][j*2+0],a[row][j*2+1],b[row]=self.getOverlapConstrain(i,j)
if self._type=="compaction":
# 大于所有形状的位置+高度,z-xi>=w OK
for i in range(N):
row=8*N+N*N+i
a[row][2*N],a[row][i*2],b[row]=1,-1,self.W[i]
c[2*N]=1
# 求解计算结果
result,self.final_value=sovleLP(a,b,c,_type=self._type)
# 将其转化为坐标,Variable的输出顺序是[a00,..,ann,x1,..,xn,y1,..,yn]
placement_points=[]
if self._type=="separation":
for i in range(N*N,N*N+N):
placement_points.append([result[i],result[i+N]])
else:
for i in range(len(result)//2):
placement_points.append([result[i],result[i+N]])
# 获得最终结果
self.getResult(placement_points)
# 更新最终的结果,更新所有的位置
def getResult(self,placement_points):
self.final_polys,self.final_poly_status=[],copy.deepcopy(self.poly_status)
for i,poly in enumerate(self.polys):
self.final_polys.append(GeoFunc.getSlide(poly,placement_points[i][0]-self.Xi[i],placement_points[i][1]-self.Yi[i]))
self.final_poly_status[i][1]=[placement_points[i][0],placement_points[i][1]]
# for i in range(len(self.polys)):
# PltFunc.addPolygon(self.final_polys[i])
# PltFunc.addPolygonColor(self.polys[i]) # 初始化的结果
# PltFunc.showPlt(width=1500,height=1500)
def getOverlapConstrain(self,i,j):
# 初始化参数
a_xi,a_yi,a_xj,a_yj,b=0,0,0,0,0
# 获取Stationary Poly的参考点的坐标
Xi,Yi=self.Xi[i],self.Yi[i]
# 获取参考的边
edge=self.target_edges[i][j]
X1,Y1,X2,Y2=edge[0][0],edge[0][1],edge[1][0],edge[1][1]
'''
非重叠情况
式1: (y2-y1)*xj+(x1-x2)*yj+x2*y1-x1*y2>0 右侧距离大于0
式2: (Y2-Y1)*xj+(X1-X2)*yj+X2*Y1-X1*Y2+(xi-Xi)*(Y1-Y2)+(yi-Yi)*(X2-X1)+>0
式3: (Y2-Y1)*xj+(X1-X2)*yj+X2*Y1-X1*Y2+(Y1-Y2)*xi+(X2-X1)*yi-Xi*(Y1-Y2)-Yi*(X2-X1)>0
式4: (Y1-Y2)*xi+(X2-X1)*yi+(Y2-Y1)*xj+(X1-X2)*yj>-X2*Y1+X1*Y2+Xi*(Y1-Y2)+Yi*(X2-X1)
重叠情况
式1: -((y2-y1)*xj+(x1-x2)*yj+x2*y1-x1*y2)-a_ij<0 左侧距离小于0
式2: (y2-y1)*xj+(x1-x2)*yj+x2*y1-x1*y2+a_ij>0
式1: (Y1-Y2)*xi+(X2-X1)*yi+(Y2-Y1)*xj+(X1-X2)*yj+a_ij>-X2*Y1+X1*Y2+Xi*(Y1-Y2)+Yi*(X2-X1) 左侧距离小于0
总结: 重叠的时候由于求出来是负值,最终只增加了一个a_ij,参数肯定是1
'''
a_xi,a_yi,a_xj,a_yj,b=Y1-Y2,X2-X1,Y2-Y1,X1-X2,-X2*Y1+X1*Y2+Xi*(Y1-Y2)+Yi*(X2-X1)
return a_xi,a_yi,a_xj,a_yj,b
# 获取所有的常数限制
def getConstants(self):
self.W=[] # 最高位置到右侧的距离
self.W_=[] # 最高位置到左侧的距离
self.H=[] # 最高点
self.Xi=[] # Xi的初始位置
self.Yi=[] # Yi的初始位置
self.PLACEMENTPOINT=[]
for i,poly in enumerate(self.polys):
left,bottom,right,top=LPAssistant.getBoundPoint(poly)
self.PLACEMENTPOINT.append([top[0],top[1]])
self.Xi.append(top[0])
self.Yi.append(top[1])
self.W.append(right[0]-top[0])
self.W_.append(top[0]-left[0])
self.H.append(top[1]-bottom[1])
# print("W:",self.W)
# print("W_:",self.W_)
# print("H:",self.H)
# print("Xi:",self.Xi)
# print("Yi:",self.Yi)
# print("PLACEMENTPOINT:",self.PLACEMENTPOINT)
# print("Length:",self.LENGTH)
# 获取所有两条边之间的关系
def getTargetEdges(self):
self.target_edges=[[0]*len(self.polys) for _ in range(len(self.polys))]
for i in range(len(self.polys)):
for j in range(len(self.polys)):
if i==j:
continue
nfp=self.getNFP(i,j)
nfp_edges=GeoFunc.getPolyEdges(nfp)
point=self.PLACEMENTPOINT[j]
if Polygon(nfp).contains(Point(point)) and self._type=="separation":
# 如果包含且是拆分,则寻找距离最近的那个
min_distance=99999999999999
for edge in nfp_edges:
left_distance=-self.getRightDistance(edge,point)
if left_distance<=min_distance:
min_distance=left_distance
self.target_edges[i][j]=copy.deepcopy(edge)
else:
# 如果不包含或者是压缩,则选择最远的
max_distance=-0.00001
for edge in nfp_edges:
right_distance=self.getRightDistance(edge,point)
if right_distance>=max_distance:
max_distance=right_distance
self.target_edges[i][j]=copy.deepcopy(edge)
@staticmethod
def getRightDistance(edge,point):
A=edge[1][1]-edge[0][1]
B=edge[0][0]-edge[1][0]
C=edge[1][0]*edge[0][1]-edge[0][0]*edge[1][1]
D=A*point[0]+B*point[1]+C
dis=(math.fabs(A*point[0]+B*point[1]+C))/(math.pow(A*A+B*B,0.5))
if D>0:
return dis # 右侧返回正
elif D==0:
return 0 # 直线上返回0
else:
return -dis # 左侧返回负值
def getNFP(self,j,i):
# j是固定位置,i是移动位置
row=j*192+i*16+self.poly_status[j][2]*4+self.poly_status[i][2]
bottom_pt=LPAssistant.getBottomPoint(self.polys[j])
delta_x,delta_y=bottom_pt[0],bottom_pt[1]
nfp=GeoFunc.getSlide(json.loads(self.all_nfp["nfp"][row]),delta_x,delta_y)
return nfp
def searchForBest(polys,poly_status,width,length):
# 记录最优结果
best_poly_status,best_polys=[],[]
cur_length=length
# 循环检索最优位置(Polys不需要变化)
while True:
print("允许高度:",cur_length)
result_polys,result_poly_status,result_value=searchOneLength(polys,poly_status,width,cur_length,"separation")
if result_value==0:
best_polys=result_polys
break
cur_length=cur_length+4
print("开始准确检索")
# 精准检索最优结果
for i in range(3):
cur_length=cur_length-1
print("允许高度:",cur_length)
result_polys,result_poly_status,result_value=searchOneLength(polys,poly_status,width,cur_length,"separation")
if result_value!=0:
break
best_polys=result_polys
best_length=cur_length+1
print("Separation最终高度:",best_length)
# 执行Compaction代码更新状态,只有在最后这次才需要改poly_status
best_polys,best_poly_status,best_length=searchOneLength(best_polys,poly_status,width,best_length,"compaction")
print("最终高度:",best_length)
return best_polys,poly_status,best_length
def searchOneLength(polys,poly_status,width,length,_type):
'''
检索一个确定高度到不会变化
Separation: 检索某个高度是否能够达到0,如果不能达到,就返回最终结果、状态、最终重叠
Compaction: 检索某个高度,返回最终形状、状态、计算高度
'''
input_polys=copy.deepcopy(polys) # 每次输入的形状
last_value=99999999999
final_polys,final_poly_status=[],[]
while True:
res=LPFunction(input_polys,poly_status,width,length,_type)
# 如果没有重叠,或者等于上一个状态
if res.final_value==0 or abs(res.final_value-last_value)<0.001:
last_value=res.final_value
final_polys=copy.deepcopy(res.final_polys)
final_poly_status=copy.deepcopy(res.final_poly_status)
break
# 如果有变化,则更换状态再试一次
input_polys=copy.deepcopy(res.final_polys)
last_value=res.final_value
return final_polys,final_poly_status,last_value
if __name__ == "__main__":
blf = pd.read_csv("record/blf.csv")
index = 7
polys,poly_status,width=json.loads(blf["polys"][index]),json.loads(blf["poly_status"][index]),int(blf["width"][index])
searchForBest(polys,poly_status,width,628.1533587455999)
# LPFunction(polys,poly_status,width,628.1533587455999,"compaction")
# Compaction(polys,poly_status,width)
| 37.70979
| 127
| 0.571164
|
1aaec58aa695f734eb32540375e9156313d28750
| 62,139
|
py
|
Python
|
disco/extensions/upgrade_simulation/upgrades/common_functions.py
|
NREL/disco
|
19afa1c397c6c24e37222f6cbf027eb88833beda
|
[
"BSD-3-Clause"
] | 2
|
2022-03-11T20:04:34.000Z
|
2022-03-14T22:25:29.000Z
|
disco/extensions/upgrade_simulation/upgrades/common_functions.py
|
NREL/disco
|
19afa1c397c6c24e37222f6cbf027eb88833beda
|
[
"BSD-3-Clause"
] | 4
|
2022-03-11T17:48:50.000Z
|
2022-03-17T21:39:47.000Z
|
disco/extensions/upgrade_simulation/upgrades/common_functions.py
|
NREL/disco
|
19afa1c397c6c24e37222f6cbf027eb88833beda
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import ast
import math
import json
import logging
import pathlib
import numpy as np
import pandas as pd
import opendssdirect as dss
from .pydss_parameters import *
from jade.utils.timing_utils import track_timing, Timer
from disco import timer_stats_collector
from disco.enums import LoadMultiplierType
from disco.exceptions import (
OpenDssCompileError,
OpenDssConvergenceError,
UpgradesExternalCatalogRequired,
UpgradesExternalCatalogMissingObjectDefinition,
InvalidOpenDssElementError,
)
logger = logging.getLogger(__name__)
@track_timing(timer_stats_collector)
def reload_dss_circuit(dss_file_list, commands_list=None, **kwargs):
"""This function clears the circuit and loads dss files and commands.
Also solves the circuit and checks for convergence errors
Parameters
----------
dss_file_list
commands_list
Returns
-------
"""
logger.info("-> Reloading OpenDSS circuit")
check_dss_run_command("clear")
if dss_file_list is None:
raise Exception("No OpenDSS files have been passed to be loaded.")
for dss_file in dss_file_list:
logger.info(f"Redirecting {dss_file}.")
check_dss_run_command(f"Redirect {dss_file}")
dc_ac_ratio = kwargs.get('dc_ac_ratio', None)
if dc_ac_ratio is not None:
change_pv_pctpmpp(dc_ac_ratio=dc_ac_ratio)
if commands_list is not None:
logger.info(f"Running {len(commands_list)} dss commands")
for command_string in commands_list:
check_dss_run_command(command_string)
if "new " in command_string.lower():
check_dss_run_command("CalcVoltageBases")
enable_pydss_solve = kwargs.get("enable_pydss_solve", False)
if enable_pydss_solve:
pydss_params = define_initial_pydss_settings(**kwargs)
circuit_solve_and_check(raise_exception=True, **pydss_params)
return pydss_params
else:
circuit_solve_and_check(raise_exception=True)
return kwargs
def run_selective_master_dss(master_filepath, **kwargs):
"""This function executes master.dss file line by line and ignores some commands that Solve yearly mode,
export or plot data.
Parameters
----------
master_filepath
Returns
-------
"""
run_dir = os.getcwd()
check_dss_run_command("Clear")
# logger.info("-->Redirecting master file:")
# check_dss_run_command(f"Redirect {master_filepath}")
# do this instead of redirect master to ignore some lines (e.g., that solve for the whole year)
os.chdir(os.path.dirname(master_filepath))
logger.debug(master_filepath)
with open(master_filepath, "r") as fr:
tlines = fr.readlines()
for line in tlines:
if ('Solve'.lower() in line.lower()) or ('Export'.lower() in line.lower()) or ('Plot'.lower() in line.lower()):
logger.info(f"Skipping this line: {line}")
continue
else:
check_dss_run_command(f"{line}")
circuit_solve_and_check(raise_exception=True, **kwargs)
os.chdir(run_dir)
return
@track_timing(timer_stats_collector)
def circuit_solve_and_check(raise_exception=False, **kwargs):
"""This function solves the circuit (both OpenDSS and PyDSS-if enabled)
and can raise exception if convergence error occurs
Parameters
----------
raise_exception
kwargs
Returns
-------
"""
calcvoltagebases = kwargs.pop("calcvoltagebases", False)
if calcvoltagebases:
check_dss_run_command("CalcVoltageBases")
dss_pass_flag = dss_solve_and_check(raise_exception=raise_exception)
pass_flag = dss_pass_flag
enable_pydss_solve = kwargs.get("enable_pydss_solve", False)
if enable_pydss_solve: # if pydss solver is also to be used
pydss_pass_flag = pydss_solve_and_check(raise_exception=raise_exception, **kwargs)
pass_flag = dss_pass_flag and pydss_pass_flag
return pass_flag
def dss_solve_and_check(raise_exception=False):
"""This function solves OpenDSS and returns bool flag which shows if it has converged or not.
Parameters
----------
raise_exception
Returns
-------
bool
"""
dss.Solution.Solve()
logger.debug("Solving circuit using OpenDSS")
# check_dss_run_command('CalcVoltageBases')
dss_pass_flag = dss.Solution.Converged()
if not dss_pass_flag:
logger.info(f"OpenDSS Convergence Error")
if raise_exception:
raise OpenDssConvergenceError("OpenDSS solution did not converge")
return dss_pass_flag
def dss_run_command_list(command_list):
for command_string in command_list:
check_dss_run_command(command_string)
return
def write_text_file(string_list, text_file_path):
"""This function writes the string contents of a list to a text file
Parameters
----------
string_list
text_file_path
Returns
-------
"""
pathlib.Path(text_file_path).write_text("\n".join(string_list))
def create_upgraded_master_dss(dss_file_list, upgraded_master_dss_filepath):
"""Function to create master dss with redirects to upgrades dss file.
The redirect paths in this file are relative to the file"""
command_list = []
for filename in dss_file_list:
rel_filename = os.path.relpath(filename, os.path.dirname(upgraded_master_dss_filepath))
command_list.append(f"Redirect {rel_filename}")
return command_list
def create_dataframe_from_nested_dict(user_dict, index_names):
"""This function creates dataframe from a nested dictionary
Parameters
----------
user_dict
index_names
Returns
-------
DataFrame
"""
df = pd.DataFrame.from_dict({(i, j): user_dict[i][j]
for i in user_dict.keys()
for j in user_dict[i].keys()},
orient='index')
df.index.names = index_names
return df.reset_index()
def get_dictionary_of_duplicates(df, subset, index_field):
"""This creates a mapping dictionary of duplicate indices in a dataframe
Parameters
----------
df
subset
index_field
Returns
-------
Dictionary
"""
df.set_index(index_field, inplace=True)
df = df[df.duplicated(keep=False, subset=subset)]
tuple_list = df.groupby(subset).apply(lambda x: tuple(x.index)).tolist()
mapping_dict = {v: tup[0] for tup in tuple_list for v in tup}
return mapping_dict
def get_scenario_name(enable_pydss_solve, pydss_volt_var_model):
"""This function determines the controller scenario
Parameters
----------
enable_pydss_solve : bool
pydss_volt_var_model
Returns
-------
str
"""
if enable_pydss_solve:
# scenario = pydss_volt_var_model.control1 # TODO can read in name instead
scenario = "control_mode"
else:
scenario = "pf1"
return scenario
@track_timing(timer_stats_collector)
def change_pv_pctpmpp(dc_ac_ratio):
"""This function changes PV system pctpmpp based on passed dc-ac ratio
newpctpmpp = oldpctpmpp / dc_ac_ratio
"""
dss.PVsystems.First()
for i in range(dss.PVsystems.Count()):
newpctpmpp = int(dss.Properties.Value('%Pmpp')) / dc_ac_ratio
command_string = f"Edit PVSystem.{dss.PVsystems.Name()} %Pmpp={newpctpmpp}"
check_dss_run_command(command_string)
dss.PVsystems.Next()
def get_feeder_stats(dss):
"""This function gives metadata stats for a feeder
Parameters
----------
dss
Returns
-------
dict
"""
load_kw = 0
load_kVABase = 0
pv_kw = 0
pv_kVARated = 0
load_df = dss.utils.loads_to_dataframe()
if len(load_df) > 0:
load_kw = load_df['kW'].sum()
load_kVABase = load_df['kVABase'].sum()
pv_df = dss.utils.pvsystems_to_dataframe()
if len(pv_df) > 0:
pv_kw = pv_df['kW'].sum()
pv_kVARated = pv_df['kVARated'].sum()
data_dict = {
'total_load(kVABase)': load_kVABase,
'total_load(kW)': load_kw,
'total_PV(kW)': pv_kw,
'total_PV(kVARated)': pv_kVARated,
}
return data_dict
def get_upgrade_stage_stats(dss, upgrade_stage, upgrade_type, xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs):
"""This function gives upgrade stage stats for a feeder
upgrade_stage can be Initial or Final
upgrade_type can be thermal or voltage
"""
final_dict = {"stage": upgrade_stage, "upgrade_type": upgrade_type}
ckt_info_dict = get_circuit_info()
final_dict["feeder_components"] = ckt_info_dict
final_dict["feeder_components"].update({
"num_nodes": dss.Circuit.NumNodes(),
"num_loads": dss.Loads.Count(),
"num_lines": dss.Lines.Count(),
"num_transformers": dss.Transformers.Count(),
"num_pv_systems": dss.PVsystems.Count(),
"num_capacitors": dss.Capacitors.Count(),
"num_regulators": dss.RegControls.Count(),
} )
equipment_dict = combine_equipment_health_stats(xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs)
final_dict.update(equipment_dict)
return final_dict
def combine_equipment_health_stats(xfmr_loading_df, line_loading_df, bus_voltages_df, **kwargs):
line_properties = kwargs.get("line_properties", None)
xfmr_properties = kwargs.get("xfmr_properties", None)
voltage_properties = kwargs.get("voltage_properties", None)
final_dict = {}
if line_properties is None:
line_properties = ['name', 'phases','normamps', 'kV', 'line_placement', 'length', 'units', 'max_amp_loading',
'max_per_unit_loading', 'status']
if xfmr_properties is None:
xfmr_properties = ['name', 'phases', 'windings', 'conns', 'kVs', 'kVAs', 'amp_limit_per_phase','max_amp_loading',
'max_per_unit_loading', 'status']
if voltage_properties is None:
voltage_properties = ['name', 'Max per unit voltage', 'Min per unit voltage', 'Overvoltage violation',
'Max voltage_deviation', 'Undervoltage violation', 'Min voltage_deviation']
# some file reformatting
if "conns" in xfmr_properties:
xfmr_loading_df["conns"] = xfmr_loading_df["conns"].apply(ast.literal_eval)
if "kVs" in xfmr_properties:
xfmr_loading_df["kVs"] = xfmr_loading_df["kVs"].apply(ast.literal_eval)
if "windings" in xfmr_properties:
xfmr_loading_df["windings"] = xfmr_loading_df["windings"].astype(int)
final_dict.update({"transformer_loading": xfmr_loading_df[xfmr_properties].to_dict(orient="records")})
final_dict.update({"line_loading": line_loading_df[line_properties].to_dict(orient="records")})
final_dict.update({"bus_voltage": bus_voltages_df[voltage_properties].to_dict(orient="records")})
return final_dict
def get_circuit_info():
"""This collects circuit information: source bus, feeder head info, substation xfmr information
Returns
-------
Dictionary
"""
data_dict = {}
dss.Vsources.First()
data_dict['source_bus'] = dss.CktElement.BusNames()[0].split(".")[0]
data_dict["feeder_head_name"] = dss.Circuit.Name()
dss.Circuit.SetActiveBus(data_dict['source_bus'])
data_dict["feeder_head_basekv"] = dss.Bus.kVBase()
data_dict["source_num_nodes"] = dss.Bus.NumNodes()
data_dict["total_num_buses_in_circuit"] = len(dss.Circuit.AllBusNames())
if data_dict["source_num_nodes"] > 1:
data_dict["feeder_head_basekv"] = round(data_dict["feeder_head_basekv"] * math.sqrt(3), 1)
data_dict["substation_xfmr"] = None
all_xfmr_df = get_thermal_equipment_info(compute_loading=False, equipment_type="transformer")
all_xfmr_df["substation_xfmr_flag"] = all_xfmr_df.apply(lambda x: int(
data_dict["source_bus"].lower() in x['bus_names_only']), axis=1)
if len(all_xfmr_df.loc[all_xfmr_df["substation_xfmr_flag"] == True]) > 0:
data_dict["substation_xfmr"] = all_xfmr_df.loc[all_xfmr_df["substation_xfmr_flag"] ==
True].to_dict(orient='records')[0]
data_dict["substation_xfmr"]["kVs"] = ast.literal_eval(data_dict["substation_xfmr"]["kVs"])
# this checks if the voltage kVs are the same for the substation transformer
data_dict["substation_xfmr"]["is_autotransformer_flag"] = len(set(data_dict["substation_xfmr"]["kVs"])) <= 1
return data_dict
def create_opendss_definition(config_definition_dict, action_type="New", property_list=None):
"""This function creates an opendss element definition for any generic equipment
Returns
-------
str
"""
command_string = f"{action_type} {config_definition_dict['equipment_type']}.{config_definition_dict['name']}"
logger.debug(f"New {config_definition_dict['equipment_type']}.{config_definition_dict['name']} being defined")
# these properties contain data (refer OpenDSS manual for more information on these parameters)
if property_list is None:
property_list = list(set(config_definition_dict.keys()) - {"name", "equipment_type"})
empty_field_values = ["----", "nan", "NaN", "None", None, np.nan]
for property_name in property_list:
if isinstance(config_definition_dict[property_name], float):
if np.isnan(config_definition_dict[property_name]):
continue
if config_definition_dict[property_name] in empty_field_values:
continue
# if the value is not empty and is not nan, only then add it into the command string
temp_s = f" {property_name}={config_definition_dict[property_name]}"
command_string = command_string + temp_s
return command_string
def ensure_line_config_exists(chosen_option, new_config_type, external_upgrades_technical_catalog):
"""This function check if a line config exists in the network.
If it doesn't exist, it checks the external catalog (if available) and returns a new dss definition string.
Returns
-------
str
"""
existing_config_dict = {"linecode": get_line_code(), "geometry": get_line_geometry()}
new_config_name = chosen_option[new_config_type].lower()
# if linecode or linegeometry is not present in existing network definitions
if not existing_config_dict[new_config_type]["name"].str.lower().isin([new_config_name]).any():
# add definition for linecode or linegeometry
if external_upgrades_technical_catalog is None:
raise UpgradesExternalCatalogRequired(f"External upgrades technical catalog not available to determine line config type")
external_config_df = pd.DataFrame(external_upgrades_technical_catalog[new_config_type])
if external_config_df["name"].str.lower().isin([new_config_name]).any():
config_definition_df = external_config_df.loc[external_config_df["name"] == new_config_name]
config_definition_dict = dict(config_definition_df.iloc[0])
if config_definition_dict["normamps"] != chosen_option["normamps"]:
logger.warning(f"Mismatch between noramps for linecode {new_config_name} and chosen upgrade option normamps: {chosen_option['name']}")
# check format of certain fields
matrix_fields = [s for s in config_definition_dict.keys() if 'matrix' in s]
for field in matrix_fields:
config_definition_dict[field] = config_definition_dict[field].replace("'","")
config_definition_dict[field] = config_definition_dict[field].replace("[","(")
config_definition_dict[field] = config_definition_dict[field].replace("]",")")
command_string = create_opendss_definition(config_definition_dict=config_definition_dict)
else:
raise UpgradesExternalCatalogMissingObjectDefinition(
f"{new_config_type} definition for {new_config_name} not found in external catalog."
)
else:
command_string = None
return command_string
def get_present_loading_condition():
""" Get present loading condition for all loads
Returns
-------
DataFrame
"""
load_dict = {}
dss.Circuit.SetActiveClass("Load")
flag = dss.ActiveClass.First()
while flag > 0:
# Get the name of the load
load_dict[dss.CktElement.Name()] = {
'Num_phases': float(dss.Properties.Value("phases")),
'kV': float(dss.Properties.Value("kV")),
'kVA': float(dss.Properties.Value("kVA")),
'kW': float(dss.Properties.Value("kW")),
'pf': dss.Properties.Value("pf"),
'Bus1': dss.Properties.Value("bus1"),
'Powers': dss.CktElement.Powers(),
'NetPower': sum(dss.CktElement.Powers()[::2]),
}
# Move on to the next Load...
flag = dss.ActiveClass.Next()
load_df = pd.DataFrame.from_dict(load_dict, "index")
return load_df
def get_present_storage_condition():
""" Get present operating condition for all storage
Returns
-------
DataFrame
"""
storage_dict = {}
dss.Circuit.SetActiveClass('Storage')
flag = dss.ActiveClass.First()
while flag > 0:
# Get the name of the load
storage_dict[dss.CktElement.Name()] = {
'Num_phases': float(dss.Properties.Value("phases")),
'kV': float(dss.Properties.Value("kV")),
'kVA': float(dss.Properties.Value("kVA")),
'kW': float(dss.Properties.Value("kW")),
'pf': dss.Properties.Value("pf"),
'Bus1': dss.Properties.Value("bus1"),
'Powers': dss.CktElement.Powers(),
'NetPower': sum(dss.CktElement.Powers()[::2]),
}
# Move on to the next ...
flag = dss.ActiveClass.Next()
storage_df = pd.DataFrame.from_dict(storage_dict, "index")
return storage_df
def get_present_pvgeneration():
""" Get present generation for all pv systems
Returns
-------
DataFrame
"""
pv_dict = {}
dss.Circuit.SetActiveClass("PVSystem")
flag = dss.ActiveClass.First()
while flag:
pv_dict[dss.CktElement.Name()] = {
'Num_phases': float(dss.Properties.Value("phases")),
'kV': float(dss.Properties.Value("kV")),
'kVA': float(dss.Properties.Value("kVA")),
'kvar': float(dss.Properties.Value("kvar")),
'Irradiance': float(dss.Properties.Value("Irradiance")),
'connection': dss.Properties.Value("conn"),
'Pmpp': float(dss.Properties.Value("Pmpp")),
'Powers': dss.CktElement.Powers(),
'NetPower': sum(dss.CktElement.Powers()[::2]),
'pf': dss.Properties.Value("pf"),
'Bus1': dss.Properties.Value("bus1"),
'Voltages': dss.CktElement.Voltages(),
'VoltagesMagAng': dss.CktElement.VoltagesMagAng(),
'VoltagesMag': float(dss.CktElement.VoltagesMagAng()[0]),
}
flag = dss.ActiveClass.Next() > 0
pv_df = pd.DataFrame.from_dict(pv_dict, "index")
return pv_df
def get_all_transformer_info_instance(upper_limit=None, compute_loading=True):
"""This collects transformer information
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("transformer")
if len(all_df) == 0:
return pd.DataFrame()
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
# extract only enabled lines
all_df = all_df.loc[all_df["enabled"] == True]
all_df[["wdg", "phases"]] = all_df[["wdg", "phases"]].astype(int)
float_fields = ["kV", "kVA", "normhkVA", "emerghkVA", "%loadloss", "%noloadloss", "XHL", "XHT", "XLT", "%R",
"Rneut", "Xneut", "X12", "X13", "X23", "RdcOhms"]
all_df[float_fields] = all_df[float_fields].astype(float)
# define empty new columns
all_df['bus_names_only'] = None
all_df["amp_limit_per_phase"] = np.nan
if compute_loading:
all_df["max_amp_loading"] = np.nan
all_df["max_per_unit_loading"] = np.nan
all_df["status"] = ""
for index, row in all_df.iterrows():
# convert type from list to tuple since they are hashable objects (and can be indexed)
all_df.at[index, "kVs"] = [float(a) for a in row["kVs"]]
all_df.at[index, "kVAs"] = [float(a) for a in row["kVAs"]]
all_df.at[index, "Xscarray"] = [float(a) for a in row["Xscarray"]]
all_df.at[index, "%Rs"] = [float(a) for a in row["%Rs"]]
all_df.at[index, "bus_names_only"] = [a.split(".")[0].lower() for a in row["buses"]]
# first winding is considered primary winding
primary_kv = float(row["kVs"][0])
primary_kva = float(row["kVAs"][0])
if row["phases"] > 1:
amp_limit_per_phase = primary_kva / (primary_kv * math.sqrt(3))
elif row["phases"] == 1:
amp_limit_per_phase = primary_kva / primary_kv
else:
raise InvalidOpenDssElementError(f"Incorrect number of phases for transformer {row['name']}")
all_df.at[index, "amp_limit_per_phase"] = amp_limit_per_phase
if compute_loading:
if upper_limit is None:
raise Exception("Transformer upper limit is to be passed to function to compute transformer loading")
dss.Circuit.SetActiveElement("Transformer.{}".format(row["name"]))
extract_magang = dss.CktElement.CurrentsMagAng()[: 2 * row["phases"]] # extract elements based on num of ph
xfmr_current_magnitude = extract_magang[::2]
max_amp_loading = max(xfmr_current_magnitude)
max_per_unit_loading = round(max_amp_loading / amp_limit_per_phase, 4)
all_df.at[index, "max_amp_loading"] = max_amp_loading
all_df.at[index, "max_per_unit_loading"] = max_per_unit_loading
if max_per_unit_loading > upper_limit:
all_df.at[index, "status"] = "overloaded"
elif max_per_unit_loading == 0:
all_df.at[index, "status"] = "unloaded"
else:
all_df.at[index, "status"] = "normal"
# convert lists to string type (so they can be set as dataframe index later)
all_df[['conns', 'kVs']] = all_df[['conns', 'kVs']].astype(str)
all_df = all_df.reset_index(drop=True).set_index('name')
return all_df.reset_index()
def add_info_line_definition_type(all_df):
all_df["line_definition_type"] = "line_definition"
all_df.loc[all_df["linecode"] != "", "line_definition_type"] = "linecode"
all_df.loc[all_df["geometry"] != "", "line_definition_type"] = "geometry"
return all_df
def determine_line_placement(line_series):
""" Distinguish between overhead and underground cables
currently there is no way to distinguish directy using opendssdirect/pydss etc.
It is done here using property 'height' parameter and if string present in name
Parameters
----------
line_series
Returns
-------
dict
"""
info_dict = {}
info_dict["line_placement"] = None
if line_series["line_definition_type"] == "geometry":
dss.Circuit.SetActiveClass("linegeometry")
dss.ActiveClass.Name(line_series["geometry"])
h = float(dss.Properties.Value("h"))
info_dict["h"] = 0
if h >= 0:
info_dict["line_placement"] = "overhead"
else:
info_dict["line_placement"] = "underground"
else:
if ("oh" in line_series["geometry"].lower()) or ("oh" in line_series["linecode"].lower()):
info_dict["line_placement"] = "overhead"
elif ("ug" in line_series["geometry"].lower()) or ("ug" in line_series["linecode"].lower()):
info_dict["line_placement"] = "underground"
else:
info_dict["line_placement"] = None
return info_dict
def get_all_line_info_instance(upper_limit=None, compute_loading=True, ignore_switch=True):
"""This collects line information
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("line")
if len(all_df) == 0:
return pd.DataFrame()
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
# extract only enabled lines
all_df = all_df.loc[all_df["enabled"] == True]
all_df["phases"] = all_df["phases"].astype(int)
all_df[["normamps", "length"]] = all_df[["normamps", "length"]].astype(float)
all_df = add_info_line_definition_type(all_df)
# define empty new columns
all_df["kV"] = np.nan
all_df["h"] = np.nan
all_df["line_placement"] = ""
if compute_loading:
all_df["max_amp_loading"] = np.nan
all_df["max_per_unit_loading"] = np.nan
all_df["status"] = ""
for index, row in all_df.iterrows():
dss.Circuit.SetActiveBus(row["bus1"])
kv_b1 = dss.Bus.kVBase()
dss.Circuit.SetActiveBus(row["bus2"])
kv_b2 = dss.Bus.kVBase()
dss.Circuit.SetActiveElement("Line.{}".format(row["name"]))
if round(kv_b1) != round(kv_b2):
raise InvalidOpenDssElementError("To and from bus voltages ({} {}) do not match for line {}".format(
kv_b2, kv_b1, row['name']))
all_df.at[index, "kV"] = kv_b1
# Distinguish between overhead and underground cables
# currently there is no way to distinguish directy using opendssdirect/pydss etc.
# It is done here using property 'height' parameter and if string present in name
placement_dict = determine_line_placement(row)
for key in placement_dict.keys():
all_df.at[index, key] = placement_dict[key]
# if line loading is to be computed
if compute_loading:
if upper_limit is None:
raise Exception("Line upper limit is to be passed to function to compute line loading")
dss.Circuit.SetActiveElement("Line.{}".format(row["name"]))
extract_magang = dss.CktElement.CurrentsMagAng()[: 2 * row["phases"]]
line_current = extract_magang[::2]
max_amp_loading = max(line_current)
max_per_unit_loading = round(max_amp_loading / row["normamps"], 4)
all_df.at[index, "max_amp_loading"] = max_amp_loading
all_df.at[index, "max_per_unit_loading"] = max_per_unit_loading
if max_per_unit_loading > upper_limit:
all_df.at[index, "status"] = "overloaded"
elif max_per_unit_loading == 0:
all_df.at[index, "status"] = "unloaded"
else:
all_df.at[index, "status"] = "normal"
all_df = all_df.reset_index(drop=True).set_index('name')
all_df["kV"] = all_df["kV"].round(5)
# add units to switch length (needed to plot graph). By default, length of switch is taken as max
all_df.loc[(all_df.units == 'none') & (all_df.Switch == True), 'units'] = 'm'
# if switch is to be ignored
if ignore_switch:
all_df = all_df.loc[all_df['Switch'] == False]
return all_df.reset_index()
def compare_multiple_dataframes(comparison_dict, deciding_column_name, comparison_type="max"):
"""This function compares all dataframes in a given dictionary based on a deciding column name
Returns
-------
Dataframe
"""
summary_df = pd.DataFrame()
for df_name in comparison_dict.keys():
summary_df[df_name] = comparison_dict[df_name][deciding_column_name]
if comparison_type == "max":
label_df = summary_df.idxmax(axis=1) # find dataframe name that has max
elif comparison_type == "min":
label_df = summary_df.idxmax(axis=1) # find dataframe name that has min
else:
raise Exception(f"Unknown comparison type {comparison_type} passed.")
final_list = []
for index, label in label_df.iteritems(): # index is element name
temp_dict = dict(comparison_dict[label].loc[index])
temp_dict.update({"name": index})
final_list.append(temp_dict)
final_df = pd.DataFrame(final_list)
return final_df
@track_timing(timer_stats_collector)
def get_thermal_equipment_info(compute_loading, equipment_type, upper_limit=None, ignore_switch=False, **kwargs):
"""This function determines the thermal equipment loading (line, transformer), based on timepoint multiplier
Returns
-------
DataFrame
"""
timepoint_multipliers = kwargs.get("timepoint_multipliers", None)
multiplier_type = kwargs.get("multiplier_type", LoadMultiplierType.ORIGINAL)
# if there are no multipliers, run on rated load i.e. multiplier=1. 0
# if compute_loading is false, then just run once (no need to check multipliers)
if (timepoint_multipliers is None) or (not compute_loading) or (multiplier_type == LoadMultiplierType.ORIGINAL):
if compute_loading and multiplier_type != LoadMultiplierType.ORIGINAL:
apply_uniform_timepoint_multipliers(multiplier_name=1, field="with_pv", **kwargs)
if equipment_type == "line":
loading_df = get_all_line_info_instance(compute_loading=compute_loading, upper_limit=upper_limit, ignore_switch=ignore_switch)
elif equipment_type == "transformer":
loading_df = get_all_transformer_info_instance(compute_loading=compute_loading, upper_limit=upper_limit)
return loading_df
if multiplier_type == LoadMultiplierType.UNIFORM:
comparison_dict = {}
for pv_field in timepoint_multipliers["load_multipliers"].keys():
logger.debug(pv_field)
for multiplier_name in timepoint_multipliers["load_multipliers"][pv_field]:
logger.debug("Multipler name: %s", multiplier_name)
# this changes the dss network load and pv
apply_uniform_timepoint_multipliers(multiplier_name=multiplier_name, field=pv_field, **kwargs)
if equipment_type.lower() == "line":
deciding_column_name = "max_per_unit_loading"
loading_df = get_all_line_info_instance(compute_loading=compute_loading, upper_limit=upper_limit, ignore_switch=ignore_switch)
elif equipment_type.lower() == "transformer":
deciding_column_name = "max_per_unit_loading"
loading_df = get_all_transformer_info_instance(compute_loading=compute_loading, upper_limit=upper_limit)
loading_df.set_index("name", inplace=True)
comparison_dict[pv_field+"_"+str(multiplier_name)] = loading_df
# compare all dataframe, and create one that contains all worst loading conditions (across all multiplier conditions)
loading_df = compare_multiple_dataframes(comparison_dict, deciding_column_name, comparison_type="max")
else:
raise Exception(f"Undefined multiplier_type {multiplier_type} passed.")
return loading_df
def get_regcontrol_info(correct_PT_ratio=False, nominal_voltage=None):
"""This collects enabled regulator control information.
If correcting PT ratio, the following information is followed (based on OpenDSS documentation)
PT ratio: # If the winding is Wye, the line-to-neutral voltage is used. Else, the line-to-line voltage is used.
# Here, bus kV is taken from Bus.kVBase
Bus base kV: Returns L-L voltages for 2- and 3-phase. Else for 1-ph, return L-N voltage
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("regcontrol")
if len(all_df) == 0:
return pd.DataFrame()
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
float_columns = ['winding', 'vreg', 'band', 'ptratio', 'delay']
all_df[float_columns] = all_df[float_columns].astype(float)
all_df['at_substation_xfmr_flag'] = False # by default, reg control is considered to be not at substation xfmr
ckt_info_dict = get_circuit_info()
sub_xfmr_present = False
sub_xfmr_name = None
if ckt_info_dict['substation_xfmr'] is not None:
sub_xfmr_present = True
sub_xfmr_name = ckt_info_dict['substation_xfmr']['name']
if correct_PT_ratio:
if nominal_voltage is None:
raise Exception("Nominal voltage not provided to correct regcontrol PT ratio.")
all_df['old_ptratio'] = all_df['ptratio']
for index, row in all_df.iterrows():
dss.Circuit.SetActiveElement("Regcontrol.{}".format(row["name"]))
reg_bus = dss.CktElement.BusNames()[0].split(".")[0]
all_df.at[index, "reg_bus"] = reg_bus
dss.Circuit.SetActiveBus(reg_bus)
all_df.at[index, "bus_num_phases"] = dss.CktElement.NumPhases()
all_df.at[index, "bus_kv"] = dss.Bus.kVBase()
dss.Circuit.SetActiveElement("Transformer.{}".format(row["transformer"]))
all_df.at[index, "transformer_kva"] = float(dss.Properties.Value("kva"))
dss.Transformers.Wdg(1) # setting winding to 1, to get kV for winding 1
all_df.at[index, "transformer_kv"] = dss.Transformers.kV()
all_df.at[index, "transformer_conn"] = dss.Properties.Value("conn").replace(" ", "") # opendss returns conn with a space
all_df.at[index, "transformer_bus1"] = dss.CktElement.BusNames()[0].split(".")[0]
all_df.at[index, "transformer_bus2"] = dss.CktElement.BusNames()[1].split(".")[0]
if correct_PT_ratio:
if (all_df.loc[index]["bus_num_phases"] > 1) and (all_df.loc[index]["transformer_conn"].lower() == "wye"):
kV_to_be_used = all_df.loc[index]["transformer_kv"] * 1000 / math.sqrt(3)
else:
kV_to_be_used = all_df.loc[index]["transformer_kv"] * 1000
# kV_to_be_used = dss.Bus.kVBase() * 1000
all_df.at[index, "ptratio"] = kV_to_be_used / nominal_voltage
if sub_xfmr_present and (row["transformer"] == sub_xfmr_name): # if reg control is at substation xfmr
all_df.at[index, 'at_substation_xfmr_flag'] = True
all_df = all_df.reset_index(drop=True).set_index('name')
all_df = all_df.loc[all_df['enabled'] == True]
return all_df.reset_index()
def get_capacitor_info(nominal_voltage=None, correct_PT_ratio=False):
"""
This collects capacitor information.
For correcting PT ratio, the following information and definitions are followed:
# cap banks are 3 phase, 2 phase or 1 phase. 1 phase caps will have LN voltage
# PT ratio: Ratio of the PT that converts the monitored voltage to the control voltage.
# If the capacitor is Wye, the 1st phase line-to-neutral voltage is monitored.
# Else, the line-to-line voltage (1st - 2nd phase) is monitored.
# Capacitor kv: Rated kV of the capacitor (not necessarily same as bus rating).
# For Phases=2 or Phases=3, it is line-to-line (phase-to-phase) rated voltage.
# For all other numbers of phases, it is actual rating. (For Delta connection this is always line-to-line rated voltage).
This function doesnt currently check if object is "enabled".
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("capacitor")
if len(all_df) == 0:
return pd.DataFrame()
all_df["capacitor_name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
float_columns = ["phases", "kv"]
all_df[float_columns] = all_df[float_columns].astype(float)
all_df = all_df.reset_index(drop=True).set_index("capacitor_name")
# collect capcontrol information to combine with capcontrols
capcontrol_df = get_cap_control_info()
capcontrol_df.rename(columns={'name': 'capcontrol_name', 'capacitor': 'capacitor_name', 'type': 'capcontrol_type',
'equipment_type': 'capcontrol_present'}, inplace=True)
capcontrol_df = capcontrol_df.set_index("capacitor_name")
# with capacitor name as index, concatenate capacitor information with cap controls
# TODO are any other checks needed before concatenating dataframes? i.e. if capacitor is not present
all_df = pd.concat([all_df, capcontrol_df], axis=1)
all_df.index.name = 'capacitor_name'
all_df = all_df.reset_index().set_index('capacitor_name')
if correct_PT_ratio and (len(capcontrol_df) > 0):
if nominal_voltage is None:
raise Exception("Nominal voltage not provided to correct capacitor bank PT ratio.")
all_df['old_PTratio'] = all_df['PTratio']
# iterate over all capacitors
for index, row in all_df.iterrows():
all_df.at[index, "kvar"] = [float(a) for a in row["kvar"]][0]
# if capcontrol type is empty, then that capacitor does not have controls
# correct PT ratios for existing cap controls
if correct_PT_ratio and (len(capcontrol_df) > 0):
if row["phases"] > 1 and row["conn"].lower() == "wye":
kv_to_be_used = (row['kv'] * 1000) / math.sqrt(3)
else:
kv_to_be_used = row['kv'] * 1000
all_df.at[index, "PTratio"] = kv_to_be_used / nominal_voltage
return all_df.reset_index()
def get_cap_control_info():
"""This collects capacitor control information
Returns
-------
DataFrame
"""
all_df = dss.utils.class_to_dataframe("capcontrol")
if len(all_df) == 0:
capcontrol_columns = ['name', 'capacitor', 'type', 'equipment_type']
return pd.DataFrame(columns=capcontrol_columns)
all_df["name"] = all_df.index.str.split(".").str[1]
all_df["equipment_type"] = all_df.index.str.split(".").str[0]
float_columns = ["CTPhase", "CTratio", "DeadTime", "Delay", "DelayOFF", "OFFsetting", "ONsetting", "PTratio",
"Vmax", "Vmin"]
all_df[float_columns] = all_df[float_columns].astype(float)
all_df = all_df.reset_index(drop=True).set_index("name")
return all_df.reset_index()
def get_line_geometry():
"""This collects all line geometry information
Returns
-------
DataFrame
"""
active_class_name = 'linegeometry'
all_df = dss.utils.class_to_dataframe(active_class_name)
if len(all_df) == 0:
return pd.DataFrame()
all_df['name'] = all_df.index.str.split('.').str[1]
all_df['equipment_type'] = all_df.index.str.split('.').str[0]
all_df.reset_index(inplace=True, drop=True)
return all_df
def get_line_code():
"""This collects all line codes information
Returns
-------
DataFrame
"""
active_class_name = 'linecode'
all_df = dss.utils.class_to_dataframe(active_class_name)
if len(all_df) == 0:
return pd.DataFrame()
all_df['name'] = all_df.index.str.split('.').str[1]
all_df['equipment_type'] = all_df.index.str.split('.').str[0]
all_df.reset_index(inplace=True, drop=True)
return all_df
def get_wire_data():
"""This collects all wire data information
Returns
-------
DataFrame
"""
active_class_name = 'wiredata'
all_df = dss.utils.class_to_dataframe(active_class_name)
if len(all_df) == 0:
return pd.DataFrame()
all_df['name'] = all_df.index.str.split('.').str[1]
all_df['equipment_type'] = all_df.index.str.split('.').str[0]
all_df.reset_index(inplace=True, drop=True)
return all_df
def get_cn_data():
"""This collects all cn data information
Returns
-------
DataFrame
"""
active_class_name = 'cndata'
all_df = dss.utils.class_to_dataframe(active_class_name)
if len(all_df) == 0:
return pd.DataFrame()
all_df['name'] = all_df.index.str.split('.').str[1]
all_df['equipment_type'] = all_df.index.str.split('.').str[0]
all_df.reset_index(inplace=True, drop=True)
return all_df
def check_dss_run_command(command_string):
"""Runs dss command
And checks for exception
Parameters
----------
command_string : str
dss command to be run
Raises
-------
OpenDssCompileError
Raised if the command fails
"""
logger.debug(f"Running DSS command: {command_string}")
result = dss.run_command(f"{command_string}")
if result != "":
raise OpenDssCompileError(f"OpenDSS run_command failed with message: {result}. \nCommand: {command_string}")
@track_timing(timer_stats_collector)
def get_bus_voltages(voltage_upper_limit, voltage_lower_limit, raise_exception=True, **kwargs):
"""This function determines the voltages, based on timepoint multiplier
Returns
-------
DataFrame
"""
timepoint_multipliers = kwargs.get("timepoint_multipliers", None)
multiplier_type = kwargs.get("multiplier_type", LoadMultiplierType.ORIGINAL)
# if there are no multipliers, run on rated load i.e. multiplier=1. 0
# if compute_loading is false, then just run once (no need to check multipliers)
if (timepoint_multipliers is None) or (multiplier_type == LoadMultiplierType.ORIGINAL):
if multiplier_type != LoadMultiplierType.ORIGINAL:
apply_uniform_timepoint_multipliers(multiplier_name=1, field="with_pv", **kwargs)
# determine voltage violations after changes
bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = get_bus_voltages_instance(
voltage_upper_limit=voltage_upper_limit, voltage_lower_limit=voltage_lower_limit, raise_exception=raise_exception,
**kwargs)
return bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations
if multiplier_type == LoadMultiplierType.UNIFORM:
comparison_dict = {}
for pv_field in timepoint_multipliers["load_multipliers"].keys():
logger.debug(pv_field)
for multiplier_name in timepoint_multipliers["load_multipliers"][pv_field]:
logger.debug("Multipler name: %s", multiplier_name)
# this changes the dss network load and pv
apply_uniform_timepoint_multipliers(multiplier_name=multiplier_name, field=pv_field, **kwargs)
bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = get_bus_voltages_instance(
voltage_upper_limit=voltage_upper_limit, voltage_lower_limit=voltage_lower_limit, raise_exception=raise_exception, **kwargs)
bus_voltages_df.set_index("name", inplace=True)
comparison_dict[pv_field+"_"+str(multiplier_name)] = bus_voltages_df
# compare all dataframe, and create one that contains all worst loading conditions (across all multiplier conditions)
deciding_column_dict = {"Max per unit voltage": "max", "Min per unit voltage": "min"}
bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = compare_multiple_dataframes_voltage(comparison_dict=comparison_dict,
deciding_column_dict=deciding_column_dict,
voltage_upper_limit=voltage_upper_limit,
voltage_lower_limit=voltage_lower_limit)
else:
raise Exception(f"Undefined multiplier_type {multiplier_type} passed.")
return bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations
@track_timing(timer_stats_collector)
def get_bus_voltages_instance(voltage_upper_limit, voltage_lower_limit, raise_exception=True, **kwargs):
"""This computes per unit voltages for all buses in network
Returns
-------
DataFrame
"""
circuit_solve_and_check(raise_exception=raise_exception, **kwargs) # this is added as a final check for convergence
all_dict = {}
all_bus_names = dss.Circuit.AllBusNames()
for bus_name in all_bus_names:
dss.Circuit.SetActiveBus(bus_name)
data_dict = {
"name": bus_name,
"voltages": dss.Bus.puVmagAngle()[::2],
# "kvbase": dss.Bus.kVBase(),
}
data_dict["Max per unit voltage"] = max(data_dict["voltages"])
data_dict["Min per unit voltage"] = min(data_dict["voltages"])
data_dict['Phase imbalance'] = data_dict["Max per unit voltage"] - data_dict["Min per unit voltage"]
# check for overvoltage violation
if data_dict["Max per unit voltage"] > voltage_upper_limit:
data_dict['Overvoltage violation'] = True
data_dict["Max voltage_deviation"] = data_dict["Max per unit voltage"] - voltage_upper_limit
else:
data_dict['Overvoltage violation'] = False
data_dict["Max voltage_deviation"] = 0.0
# check for undervoltage violation
if data_dict["Min per unit voltage"] < voltage_lower_limit:
data_dict['Undervoltage violation'] = True
data_dict["Min voltage_deviation"] = voltage_lower_limit - data_dict["Min per unit voltage"]
else:
data_dict['Undervoltage violation'] = False
data_dict["Min voltage_deviation"] = 0.0
all_dict[data_dict["name"]] = data_dict
all_df = pd.DataFrame.from_dict(all_dict, orient='index').reset_index(drop=True)
undervoltage_bus_list = list(all_df.loc[all_df['Undervoltage violation'] == True]['name'].unique())
overvoltage_bus_list = list(all_df.loc[all_df['Overvoltage violation'] == True]['name'].unique())
buses_with_violations = list(set(undervoltage_bus_list + overvoltage_bus_list))
return all_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations
def compare_multiple_dataframes_voltage(comparison_dict, deciding_column_dict, voltage_upper_limit, voltage_lower_limit):
"""This function compares all dataframes in a given dictionary based on a deciding column
Returns
-------
Dataframe
"""
all_df = pd.DataFrame()
for deciding_column_name in deciding_column_dict.keys():
summary_df = pd.DataFrame()
comparison_type = deciding_column_dict[deciding_column_name]
for df_name in comparison_dict.keys():
label_df = pd.DataFrame()
summary_df[df_name] = comparison_dict[df_name][deciding_column_name]
if comparison_type == "max":
label_df[deciding_column_name] = summary_df.idxmax(axis=1) # find dataframe name that has max
elif comparison_type == "min":
label_df[deciding_column_name] = summary_df.idxmin(axis=1) # find dataframe name that has min
else:
raise Exception(f"Unknown comparison type {comparison_type} passed.")
final_list = []
for index, row in label_df.iterrows(): # index is element name
label = row[deciding_column_name]
temp_dict = {deciding_column_name: comparison_dict[label].loc[index][deciding_column_name]}
temp_dict.update({"name": index})
final_list.append(temp_dict)
temp_df = pd.DataFrame(final_list)
temp_df.set_index("name", inplace=True)
all_df = pd.concat([all_df, temp_df], axis=1)
bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations = get_voltage_violations(voltage_upper_limit=voltage_upper_limit,
voltage_lower_limit=voltage_lower_limit,
bus_voltages_df=all_df)
return bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations
def get_voltage_violations(voltage_upper_limit, voltage_lower_limit, bus_voltages_df):
"""Function to determine voltage violations
"""
bus_voltages_df['Overvoltage violation'] = False
bus_voltages_df['Undervoltage violation'] = False
bus_voltages_df['Max voltage_deviation'] = 0.0
bus_voltages_df['Min voltage_deviation'] = 0.0
for index, row in bus_voltages_df.iterrows():
# check for overvoltage violation
if row["Max per unit voltage"] > voltage_upper_limit:
bus_voltages_df.at[index, 'Overvoltage violation'] = True
bus_voltages_df.at[index, "Max voltage_deviation"] = row["Max per unit voltage"] - voltage_upper_limit
else:
bus_voltages_df.at[index, 'Overvoltage violation'] = False
bus_voltages_df.at[index, "Max voltage_deviation"] = 0.0
# check for undervoltage violation
if row["Min per unit voltage"] < voltage_lower_limit:
bus_voltages_df.at[index, 'Undervoltage violation'] = True
bus_voltages_df.at[index, "Min voltage_deviation"] = voltage_lower_limit - row["Min per unit voltage"]
else:
bus_voltages_df.at[index, 'Undervoltage violation'] = False
bus_voltages_df.at[index, "Min voltage_deviation"] = 0.0
bus_voltages_df.reset_index(inplace=True)
undervoltage_bus_list = list(bus_voltages_df.loc[bus_voltages_df['Undervoltage violation'] == True]['name'].unique())
overvoltage_bus_list = list(bus_voltages_df.loc[bus_voltages_df['Overvoltage violation'] == True]['name'].unique())
buses_with_violations = list(set(undervoltage_bus_list + overvoltage_bus_list))
return bus_voltages_df, undervoltage_bus_list, overvoltage_bus_list, buses_with_violations
def determine_available_line_upgrades(line_loading_df):
property_list = ['line_definition_type', 'linecode', 'phases', 'kV', 'Switch',
'normamps', 'r1', 'x1', 'r0', 'x0', 'C1', 'C0',
'rmatrix', 'xmatrix', 'cmatrix', 'Rg', 'Xg', 'rho', 'units', 'spacing',
# 'wires', 'EarthModel', 'cncables', 'tscables', 'B1', 'B0', 'emergamps',
# 'faultrate', 'pctperm', 'repair', 'basefreq', 'enabled', 'like',
'h', 'line_placement']
if 'line_definition_type' not in line_loading_df.columns: # add line_definition_type if not present
line_loading_df = add_info_line_definition_type(line_loading_df)
if 'line_placement' not in line_loading_df.columns:
for index, row in line_loading_df.iterrows(): # add line_placement and h if not present
info_dict = determine_line_placement(row)
for key in info_dict.keys():
line_loading_df.at[index, key] = info_dict[key]
line_upgrade_options = line_loading_df[property_list + ['geometry']]
# remove duplicate line upgrade options (that might have a different name, but same parameters)
line_upgrade_options = line_upgrade_options.loc[line_upgrade_options.astype(str).drop_duplicates(
subset=property_list).index]
line_upgrade_options.reset_index(drop=True, inplace=True)
line_upgrade_options = line_upgrade_options.reset_index().rename(columns={'index': 'name'})
line_upgrade_options['name'] = 'line_' + line_upgrade_options['name'].astype(str)
line_upgrade_options["kV"] = line_upgrade_options["kV"].round(5)
return line_upgrade_options
def determine_available_xfmr_upgrades(xfmr_loading_df):
"""This function creates a dataframe of available transformer upgrades by dropping duplicates from transformer dataframe passed.
Input dataframe will need to contain "amp_limit_per_phase" column. So if external catalog is supplied, ensure it contains that column.
"""
property_list = ['phases', 'windings', 'wdg', 'conn', 'kV', 'kVA',
'tap', '%R', 'Rneut', 'Xneut', 'conns', 'kVs', 'kVAs', 'taps', 'XHL', 'XHT',
'XLT', 'Xscarray', 'thermal', 'n', 'm', 'flrise', 'hsrise', '%loadloss',
'%noloadloss', 'normhkVA', 'emerghkVA', 'sub', 'MaxTap', 'MinTap',
'NumTaps', 'subname', '%imag', 'ppm_antifloat', '%Rs', 'bank',
'XfmrCode', 'XRConst', 'X12', 'X13', 'X23', 'LeadLag',
'Core', 'RdcOhms', 'normamps', 'emergamps', 'faultrate', 'pctperm',
'basefreq', 'amp_limit_per_phase']
# TODO: can add capability to add "amp_limit_per_phase" column if not present in input dataframe.
# if 'amp_limit_per_phase' not in xfmr_loading_df.columns:
xfmr_upgrade_options = xfmr_loading_df[property_list]
xfmr_upgrade_options = xfmr_upgrade_options.loc[xfmr_upgrade_options.astype(str).drop_duplicates().index]
xfmr_upgrade_options.reset_index(drop=True, inplace=True)
xfmr_upgrade_options = xfmr_upgrade_options.reset_index().rename(columns={'index': 'name'})
xfmr_upgrade_options['name'] = 'xfmr_' + xfmr_upgrade_options['name'].astype(str)
return xfmr_upgrade_options
def get_pv_buses(dss):
pv_buses = []
flag = dss.PVsystems.First()
while flag > 0:
pv_buses.append(dss.Properties.Value('bus1').split('.')[0])
flag = dss.PVsystems.Next()
return pv_buses
def get_load_buses(dss):
load_buses = []
flag = dss.Loads.First()
while flag > 0:
load_buses.append(dss.Properties.Value('bus1').split('.')[0])
flag = dss.Loads.Next()
return load_buses
def get_bus_coordinates():
"""This function creates a dataframe of all buses in the circuit with their x and y coordinates
Returns
-------
"""
all_bus_names = dss.Circuit.AllBusNames()
buses_list = []
for b in all_bus_names:
bus_dict = {}
dss.Circuit.SetActiveBus(b)
bus_dict['bus_name'] = b.lower()
bus_dict['x_coordinate'] = dss.Bus.X()
bus_dict['y_coordinate'] = dss.Bus.Y()
buses_list.append(bus_dict)
return pd.DataFrame(buses_list)
def convert_summary_dict_to_df(summary_dict):
df = pd.DataFrame.from_dict(summary_dict, orient='index')
df.index.name = "stage"
return df
def filter_dictionary(dict_data, wanted_keys):
return {k: dict_data.get(k, None) for k in wanted_keys}
def compare_dict(old, new):
"""function to compare two dictionaries with same format.
Only compares common elements present in both original and new dictionaries
"""
field_list = []
change = {}
sharedKeys = set(old.keys()).intersection(new.keys())
for key in sharedKeys:
change_flag = False
for sub_field in old[key]:
if old[key][sub_field] != new[key][sub_field]:
change_flag = True
field_list.append(sub_field)
if change_flag:
change[key] = field_list
return change
def create_timepoint_multipliers_dict(timepoint_multipliers):
"""Creates a dictionary with new load rating, for every property and multiplier.
Currently, it only does this for loads. But can be modified to accommodate other elements like PV as well.
In raw_dict, value can be accessed as follows:
value = raw_dict[property_name][object_name][multiplier_name]
In reformatted_dict (which is returned from this function), value can be accessed as follows:
value = raw_dict[object_name][property_name][multiplier_name]
This value will need to be assigned to the object and run.
This hasnt been used yet.
Returns
-------
dict
"""
for field in timepoint_multipliers.keys():
if field == "load_multipliers":
property_list = ["kW"]
object_name = "Load"
multiplier_list = []
# get combined list of multipliers
for key, value in timepoint_multipliers[field].items():
multiplier_list = multiplier_list + value
df = dss.utils.class_to_dataframe(object_name)
df.reset_index(inplace=True)
df['name'] = df['index'].str.split(".", expand=True)[1]
name_list = list(df['name'].values)
del df["index"]
df.set_index('name', inplace=True)
raw_dict = {}
for property in property_list:
logger.debug(property)
df[property] = df[property].astype(float)
new_df = pd.DataFrame(index=name_list, columns=multiplier_list)
new_df.index.name = 'name'
for multiplier in multiplier_list:
logger.debug(multiplier)
new_df[multiplier] = df[property] * multiplier
raw_dict[property] = new_df.T.to_dict()
# reformat dictionary to create desired format
reformatted_dict = {}
for name in name_list:
reformatted_dict[name] = {}
for property in property_list:
reformatted_dict[name][property] = raw_dict[property][name]
else:
raise Exception(f"Timepoint multiplier has Unsupported key: {field}. Presently, key 'load_multipliers' is supported.")
return reformatted_dict
@track_timing(timer_stats_collector)
def apply_timepoint_multipliers_dict(reformatted_dict, multiplier_name, property_list=None, field="load_multipliers",
**kwargs):
"""This uses a dictionary with the format of output received from create_timepoint_multipliers_dict
Currently, it only does works loads. But can be modified to accommodate other elements like PV as well.
In input dict: value can be accessed as follows:
value = raw_dict[object_name][property_name][multiplier_name]
In this function, value will be assigned to corresponding property and run.
This hasnt been used yet.
Returns
-------
dict
"""
name_list = list(reformatted_dict.keys())
if property_list is None:
property_list = list(reformatted_dict[name_list[0]].keys())
if field == "load_multipliers":
flag = dss.Loads.First()
while flag > 0:
flag = dss.Loads.Next()
name = dss.Loads.Name()
if name not in name_list: # if load name is not present in dictionary keys, continue
continue
for property in property_list:
value = reformatted_dict[name][property][multiplier_name]
if property == "kW":
dss.Loads.kW(value)
else:
raise Exception(f"Property {property} not defined in multipliers dict")
circuit_solve_and_check(raise_exception=True, **kwargs)
else:
raise Exception(f"Unsupported key in dictionary. Presently, load_multipliers is supported.")
return reformatted_dict
def apply_uniform_timepoint_multipliers(multiplier_name, field, **kwargs):
"""This function applies a uniform mulitplier to all elements.
Currently, the multiplier only does works on loads. But can be modified to accommodate other elements like PV as well.
It has two options, 1) all pv is enabled. 2) all pv is disabled.
Returns
-------
bool
"""
if field == "with_pv":
check_dss_run_command("BatchEdit PVSystem..* Enabled=True")
elif field == "without_pv":
check_dss_run_command("BatchEdit PVSystem..* Enabled=False")
else:
raise Exception(f"Unknown parameter {field} passed in uniform timepoint multiplier dict."
f"Acceptable values are 'with_pv', 'without_pv'")
check_dss_run_command(f"set LoadMult = {multiplier_name}")
circuit_solve_and_check(raise_exception=True, **kwargs)
return True
| 45.589875
| 173
| 0.633531
|
87370bb6e892a5c77eabbe7a1670d348678913be
| 2,784
|
py
|
Python
|
modbus_utils.py
|
davystrong/FlexFact-Tina
|
6b5b0603834160abb5fcf66b6e3a532a304790c9
|
[
"MIT"
] | null | null | null |
modbus_utils.py
|
davystrong/FlexFact-Tina
|
6b5b0603834160abb5fcf66b6e3a532a304790c9
|
[
"MIT"
] | null | null | null |
modbus_utils.py
|
davystrong/FlexFact-Tina
|
6b5b0603834160abb5fcf66b6e3a532a304790c9
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, List, Optional, Tuple, Union
from pathlib import Path
import xml.etree.cElementTree as ET
from pyModbusTCP.client import ModbusClient as mbClient
class ModbusClient(mbClient):
def __init__(self, host: str, port: int, unit_id: Optional[int] = None, timeout: Optional[float] = None, debug: Optional[bool] = None):
try:
super().__init__(host=host, port=port, unit_id=unit_id,
timeout=timeout, debug=debug, auto_open=True)
except ValueError:
print("Error with host or port params")
def __enter__(self):
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
self.close()
class InputEvent:
triggers: List[Tuple[int, bool]]
def __init__(self):
self.triggers = []
class OutputEvent:
actions: List[Tuple[int, bool]]
def __init__(self):
self.actions = []
def parseXMLConfig(filepath: Union[Path, str]):
"""
Parse config for virtual Modbus. The XML can be exported from (tina/flexfact)
Returns (inputs, outputs)
"""
inputs: Dict[str, InputEvent] = {}
outputs: Dict[str, OutputEvent] = {}
root = ET.parse(filepath).getroot()
for tag in root.findall('EventConfiguration/Event'):
event = {}
name = tag.get('name')
assert name is not None, 'Expected name in XML'
if tag.get('iotype') == 'input':
event = InputEvent()
raw_triggers = tag.find('Triggers')
assert raw_triggers is not None, 'Expected Triggers in XML'
triggers = [trigger for trigger in raw_triggers.iter(
) if trigger is not tag.find('Triggers')]
for element in triggers:
raw_address = element.get('address')
assert raw_address is not None, 'Expected address in XML'
address = int(raw_address)
rising = element.tag == 'PositiveEdge' # Rising edge or not
event.triggers.append((address, rising))
inputs[name] = event
else:
event = OutputEvent()
raw_actions = tag.find('Actions')
assert raw_actions is not None, 'Expected Actions in XML'
actions = [action for action in raw_actions.iter(
) if action is not tag.find('Actions')]
for element in actions:
raw_address = element.get('address')
assert raw_address is not None, 'Expected address in XML'
address = int(raw_address)
value = element.tag == 'Set' # Otherwise it's clr so it should be false
event.actions.append((address, value))
outputs[name] = event
return inputs, outputs
| 33.95122
| 139
| 0.600934
|
31766a8fe6d0ebd3b4f604b005bb644463ede400
| 40
|
py
|
Python
|
script/cgi/perthon/ex/syntax3b.py
|
ErikNissen/webanwendung
|
92ea306c1764f74035aa843d98eed186ea2339b4
|
[
"MIT"
] | null | null | null |
script/cgi/perthon/ex/syntax3b.py
|
ErikNissen/webanwendung
|
92ea306c1764f74035aa843d98eed186ea2339b4
|
[
"MIT"
] | null | null | null |
script/cgi/perthon/ex/syntax3b.py
|
ErikNissen/webanwendung
|
92ea306c1764f74035aa843d98eed186ea2339b4
|
[
"MIT"
] | null | null | null |
def test():
print 1
\
print 2
| 5.714286
| 11
| 0.475
|
75f668d7f72dbf403894a40fb67931fded96d5a1
| 7,222
|
py
|
Python
|
scripts/random_swaps.py
|
sandeepsoni/jca_release
|
3b9ca41fe5a1ff7074347a6720a4025018c23a88
|
[
"MIT"
] | 7
|
2021-02-01T18:44:08.000Z
|
2022-02-10T17:43:31.000Z
|
scripts/random_swaps.py
|
sandeepsoni/jca_release
|
3b9ca41fe5a1ff7074347a6720a4025018c23a88
|
[
"MIT"
] | null | null | null |
scripts/random_swaps.py
|
sandeepsoni/jca_release
|
3b9ca41fe5a1ff7074347a6720a4025018c23a88
|
[
"MIT"
] | 1
|
2021-08-21T19:01:38.000Z
|
2021-08-21T19:01:38.000Z
|
import argparse
import pandas as pd
import os
import random
from random import choices, sample
import numpy as np
from collections import defaultdict, Counter
from itertools import combinations
from helpful_functions import safe_open_w
def readArgs ():
parser = argparse.ArgumentParser (description="swapping code ")
parser.add_argument ("--src-file", type=str, required=True, help="file contains all the observed data")
parser.add_argument ("--tgt-file", type=str, required=True, help="file contains all the randomly permuted data")
parser.add_argument ("--chunk-size", type=int, required=False, default=1000, help="size of each document")
parser.add_argument ("--max-source-size", type=int, required=False, default=None, help="total overall tokens for a source-time combination")
parser.add_argument("--keep-all", dest="keep_all", action="store_true")
parser.add_argument("--no-keep-all", dest="keep_all", action="store_false")
parser.set_defaults(keep_all=True)
parser.add_argument ("--epochs", type=str, nargs="+", required=False, default=[], help="the epochs that need to be kept")
parser.add_argument("--always-activated", dest="always_activated", action="store_true")
parser.add_argument("--not-always-activated", dest="always_activated", action="store_false")
parser.set_defaults(always_activated=False)
args = parser.parse_args ()
if not args.keep_all and len(args.epochs) == 0:
parser.error('must have non-zero number --epochs when --no-keep-all')
return args
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def make_chunks (text, chunk_size):
return [chunk for chunk in chunks (text, chunk_size)]
def read_data (filename, chunk_size=1000):
# read the data as it is
docs = list ()
with open (filename) as fin:
for line in fin:
parts = line.strip().split ("\t")
epoch = parts[1]
source = parts[2].split ("_")[1]
text = parts[3]
docs.append ([epoch, source, text])
# group all documents from one epoch and
# one source
grouped = defaultdict (list)
for doc in docs:
epoch, src, text = doc[0], doc[1], doc[2]
grouped[(epoch, src)].append (doc[2])
# coalesce all documents into one
grouped = {key: [token for text in grouped[key] for token in text.split()] for key in grouped}
# and then make document chunks
grouped = {key: make_chunks(grouped[key], chunk_size) for key in grouped}
rows = list ()
for key in grouped:
epoch, src = key
for chunk in grouped[key]:
rows.append ([epoch, src, chunk])
df = pd.DataFrame (rows, columns=["epoch", "orig_source", "text"])
return df
def transform_by_permuting (df):
# add modified_src column
df["mod_source"] = df["orig_source"]
# Per epoch, print the initial source distribution
epochs = df["epoch"].unique()
src_dist = dict ()
sources = list ()
for epoch in epochs:
epoch_sources = df[df["epoch"] == epoch]["mod_source"].values
new_sources = np.random.permutation (epoch_sources)
sources.append (new_sources)
# assign the new sources
df["mod_source"] = pd.Series (np.concatenate (sources, axis=0))
return df
def select_docs (df, max_docs=None):
if max_docs is None:
return df
# reorient into a epoch-source dictionary
data = dict ()
for index, row in df.iterrows():
epoch, mod_source = row["epoch"], row["mod_source"]
if epoch not in data:
data[epoch] = dict ()
if mod_source not in data[epoch]:
data[epoch][mod_source] = list ()
data[epoch][mod_source].append ((row["orig_source"], row["text"]))
# now sweep over every epoch one at a time; then every source one at a time;
# and then select a sample for each combination
modified_rows = list ()
for epoch in data:
for source in data[epoch]:
if len (data[epoch][source]) <= max_docs:
# just copy everything
for item in data[epoch][source]:
modified_rows.append ([epoch, source, item[0], item[1]])
else:
items = sample (data[epoch][source], max_docs)
for item in items:
modified_rows.append ([epoch, source, item[0], item[1]])
mod_df = pd.DataFrame (modified_rows, columns=["epoch", "mod_source", "orig_source", "text"])
return mod_df
def select_based_on_time (df, epochs, activated_throughout=False):
# reorient into an epoch-source dictionary
data = dict ()
for index, row in df.iterrows():
epoch, mod_source = row["epoch"], row["mod_source"]
if epoch not in data:
data[epoch] = dict ()
if mod_source not in data[epoch]:
data[epoch][mod_source] = list ()
data[epoch][mod_source].append ((row["orig_source"], row["text"]))
# now select only the relevant source-epoch pairs
if len(epochs) > 0 and activated_throughout:
# there are a few selected epochs and we want all sources
# that are activated throughout them
source_map = dict ()
for epoch in data:
if epoch in epochs:
for source in data[epoch]:
if source not in source_map:
source_map[source] = list ()
source_map[source].append (epoch)
relevant_sources = {key for key in source_map if len(source_map[key]) == len (epochs)}
elif len (epochs) > 0:
# there are few selected epochs and we select all sources within those epochs
relevant_sources = set ()
for epoch in data:
if epoch in epochs:
for source in data[epoch]:
relevant_sources.add (source)
elif activated_throughout:
# there are not selected epochs but we only want those sources that are activated throughout
source_map = dict ()
for epoch in data:
epochs.append (epoch)
for source in data[epoch]:
if source not in source_map:
source_map[source] = list ()
source_map[source].append (epoch)
relevant_sources = {key for key in source_map if len(source_map[key]) == len (epochs)}
else:
# select everything
relevant_sources = set ()
for epoch in data:
epochs.append (epoch)
for source in data[epoch]:
relevant_sources.add (source)
# now create a dataframe out of it
modified_rows = list ()
for epoch in data:
for source in data[epoch]:
if epoch in epochs and source in relevant_sources:
for item in data[epoch][source]:
modified_rows.append ([epoch, source, item[0], item[1]])
mod_df = pd.DataFrame (modified_rows, columns=["epoch", "mod_source", "orig_source", "text"])
return mod_df
def write_data (df, filename):
with safe_open_w (filename) as fout:
for index, row in df.iterrows():
tokens = " ".join (row["text"])
epoch = row["epoch"]
src = row["mod_source"]
orig = row["orig_source"]
intersection = f"{epoch}_{src}"
fout.write (f"{orig}\t{epoch}\t{intersection}\t{tokens}\n")
def main (args):
# load data and create per source document chunks
df = read_data (args.src_file, chunk_size=args.chunk_size)
new_df = transform_by_permuting (df)
# restrict to max number of documents per source in every epoch
new_df = select_docs (new_df, max_docs = int(args.max_source_size/args.chunk_size))
# restrict further to small number of epochs if necessary
# and decide which source time pairs are to be kept
if not args.keep_all:
new_df = select_based_on_time (new_df, args.epochs, args.always_activated)
write_data (new_df, args.tgt_file)
if __name__ == "__main__":
main (readArgs ())
| 33.435185
| 141
| 0.705345
|
c7e25259c6ce1ef8a08d27ff5b21772326fbb298
| 842
|
py
|
Python
|
.circleci/test_examples.py
|
cloudify-cosmo/cloudify-diamond-plugin
|
2d5cd1bbb8e5b272d13b26e3ddd45759cde5e8a7
|
[
"Apache-2.0"
] | 4
|
2016-02-28T17:01:34.000Z
|
2019-07-15T08:01:19.000Z
|
.circleci/test_examples.py
|
cloudify-cosmo/cloudify-diamond-plugin
|
2d5cd1bbb8e5b272d13b26e3ddd45759cde5e8a7
|
[
"Apache-2.0"
] | 5
|
2015-10-06T14:46:24.000Z
|
2020-09-10T05:49:43.000Z
|
.circleci/test_examples.py
|
cloudify-cosmo/cloudify-diamond-plugin
|
2d5cd1bbb8e5b272d13b26e3ddd45759cde5e8a7
|
[
"Apache-2.0"
] | 10
|
2015-01-21T17:10:36.000Z
|
2019-07-22T06:30:28.000Z
|
########
# Copyright (c) 2014-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
blueprint_list = []
@pytest.fixture(scope='function', params=blueprint_list)
def blueprint_examples(**_):
pass
def test_blueprints(blueprint_examples):
assert blueprint_examples is None
| 29.034483
| 74
| 0.752969
|
26504d233ec9c7b4ac597811b834489a07fd4d26
| 2,936
|
py
|
Python
|
steelscript/netprofiler/appfwk/reports/netprofiler_hostgroup.py
|
jkraenzle/steelscript-netprofiler
|
970a8f492203875a35cc13e94237740b31eb01b4
|
[
"MIT"
] | 5
|
2016-02-29T01:16:36.000Z
|
2019-12-08T19:04:54.000Z
|
steelscript/netprofiler/appfwk/reports/netprofiler_hostgroup.py
|
jkraenzle/steelscript-netprofiler
|
970a8f492203875a35cc13e94237740b31eb01b4
|
[
"MIT"
] | 5
|
2015-08-18T19:07:44.000Z
|
2020-06-04T15:56:38.000Z
|
steelscript/netprofiler/appfwk/reports/netprofiler_hostgroup.py
|
jkraenzle/steelscript-netprofiler
|
970a8f492203875a35cc13e94237740b31eb01b4
|
[
"MIT"
] | 3
|
2016-02-29T01:16:37.000Z
|
2020-06-04T00:43:38.000Z
|
# Copyright (c) 2019 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
import steelscript.appfwk.apps.report.modules.c3 as c3
from steelscript.appfwk.apps.report.models import Report
from steelscript.netprofiler.appfwk.datasources.netprofiler import \
NetProfilerTimeSeriesTable, NetProfilerGroupbyTable, \
add_netprofiler_hostgroup_field
#
# NetProfiler report
#
report = Report.create("NetProfiler HostGroup Report - ByLocation",
position=10,
field_order=['netprofiler_device', 'endtime',
'duration', 'resolution', 'hostgroup',
'netprofiler_filterexpr'])
section = report.add_section()
add_netprofiler_hostgroup_field(report, section, 'ByLocation')
# Define a Overall TimeSeries showing Avg Bytes/s
p = NetProfilerTimeSeriesTable.create('ts-overall',
duration=60, resolution="1min")
p.add_column('time', 'Time', datatype='time', iskey=True)
p.add_column('avg_bytes', 'Avg Bytes/s', units='B/s')
report.add_widget(c3.TimeSeriesWidget, p, "Overall Traffic", width=12)
# Define a Pie Chart for top ports
p = NetProfilerGroupbyTable.create('ports-bytes',
groupby='port_group', duration=60)
p.add_column('portgroup', 'Port Group', iskey=True)
p.add_column('avg_bytes', 'Avg Bytes/s', units='B/s', sortdesc=True)
report.add_widget(c3.PieWidget, p, "Port Groups by Avg Bytes")
# Define a Bar Chart for application ports
p = NetProfilerGroupbyTable.create('application-bytes',
groupby='application_port', duration=60)
p.add_column('protoport_name', 'Application Port', iskey=True)
p.add_column('avg_bytes', 'Avg Bytes/s', units='B/s', sortdesc=True)
report.add_widget(c3.BarWidget, p, "Application Ports by Avg Bytes")
# Define a TimeSeries showing Avg Bytes/s for tcp/80
p = NetProfilerTimeSeriesTable.create('ts-tcp80', duration=60,
filterexpr='tcp/80', cacheable=False)
p.add_column('time', 'Time', datatype='time', iskey=True)
p.add_column('avg_bytes', 'Avg Bytes/s', units='B/s')
p.add_column('avg_bytes_rtx', 'Avg Retrans Bytes/s', units='B/s')
report.add_widget(c3.TimeSeriesWidget, p, "Bandwidth for tcp/80",
altaxis=['avg_bytes_rtx'])
# Define a TimeSeries showing Avg Bytes/s for tcp/443
p = NetProfilerTimeSeriesTable.create('ts-tcp443',
duration=60, filterexpr='tcp/443')
p.add_column('time', 'Time', datatype='time', iskey=True)
p.add_column('avg_bytes', 'Avg Bytes/s', units='B/s')
p.add_column('avg_bytes_rtx', 'Avg Retrans Bytes/s', units='B/s')
report.add_widget(c3.TimeSeriesWidget, p, "Bandwidth for tcp/443")
| 40.219178
| 78
| 0.676771
|
8b20d446a6b0a5a69a61c5e5f0256a7502057207
| 3,368
|
py
|
Python
|
firebase/firestore-py/lib/students/reader.py
|
BraydenKO/RamLife
|
10c9bbb7338fbaf6c3d1c98bb2f559e6cc089ee6
|
[
"MIT"
] | 3
|
2021-10-03T11:37:11.000Z
|
2022-01-20T15:39:58.000Z
|
firebase/firestore-py/lib/students/reader.py
|
BraydenKO/RamLife
|
10c9bbb7338fbaf6c3d1c98bb2f559e6cc089ee6
|
[
"MIT"
] | 58
|
2020-03-10T18:48:52.000Z
|
2021-08-31T23:19:09.000Z
|
firebase/firestore-py/lib/students/reader.py
|
Ramaz-Upper-School/RamLife
|
5015c72f6e6dc53cd5dd37bd3f0f87caf40ec0c4
|
[
"MIT"
] | 8
|
2020-09-08T18:29:54.000Z
|
2021-04-20T23:11:50.000Z
|
import csv
from collections import defaultdict
import lib.data as data
import lib.utils as utils
def read_students():
with open(utils.dir.students) as file: return {
row ["ID"]: data.User(
first = row ["First Name"],
last = row ["Last Name"],
email = row ["Email"].lower(),
id = row ["ID"],
)
for row in csv.DictReader(file)
if row ["ID"] not in utils.constants.corrupted_students
}
def read_periods():
homeroom_locations = {}
periods = defaultdict(list)
with open(utils.dir.section_schedule) as file:
for row in csv.DictReader(file):
if row ["SCHOOL_ID"] != "Upper": continue
section_id = row ["SECTION_ID"]
day = row ["WEEKDAY_NAME"]
period_str = row ["BLOCK_NAME"]
room = row ["ROOM"]
# Handle homerooms
try: period_num = int(period_str)
except ValueError:
if period_str == "HOMEROOM":
homeroom_locations [section_id] = room
continue
periods [section_id].append(data.Period(
day = day,
room = room,
id = section_id,
period = period_num
))
return periods
def read_student_courses():
courses = defaultdict(list)
with open(utils.dir.schedule) as file:
for row in csv.DictReader(file):
if row ["SCHOOL_ID"] != "Upper": continue
student = row ["STUDENT_ID"]
if student in utils.constants.corrupted_students: continue
courses [student].append(row ["SECTION_ID"])
return courses
def read_semesters():
with open(utils.dir.section) as file: return {
row ["SECTION_ID"]: data.Semesters(
semester1 = row ["TERM1"] == "Y",
semester2 = row ["TERM2"] == "Y",
section_id = row ["SECTION_ID"],
)
for row in csv.DictReader(file)
if row ["SCHOOL_ID"] == "Upper"
}
def get_schedules(students, periods, student_courses, semesters):
homerooms = {}
seniors = set()
result = defaultdict(data.DayDefaultDict)
ignored = set()
for student, courses in student_courses.items():
student = students [student]
for section_id in courses:
if "UADV" in section_id:
homerooms [student] = section_id
continue
# if section_id in utils.constants.ignored_sections: continue
try: semester = semesters [section_id]
except KeyError as error:
utils.logger.error(f"Section {section_id} was in schedule.csv but not in sections.csv")
raise error from None
if (semester is not None and not (semester.semester1 if utils.constants.is_semester1 else semester.semester2)):
continue
elif section_id.startswith("12"): seniors.add(student)
if section_id not in periods: # in schedule.csv but not section_schedule.csv
ignored.add(section_id)
continue
for period in periods [section_id]:
result [student] [period.day] [period.period - 1] = period
for schedule in result.values(): schedule.populate(utils.constants.day_names)
if ignored:
utils.logger.warning(f"Ignored {len(ignored)} classes")
utils.logger.debug("Ignored classes", ignored)
return result, homerooms, seniors
def set_students_schedules(schedules, homerooms, homeroom_locations):
for student, schedule in schedules.items():
if student.id in utils.constants.ignored_students: continue
student.homeroom = "SENIOR_HOMEROOM" if student not in homerooms else homerooms [student]
student.homeroom_location = "Unavailable" if student not in homerooms else homeroom_locations [homerooms [student]]
student.schedule = schedule
| 31.185185
| 117
| 0.708432
|
394c3f4f6f79572558cb7575e8b65dc97e99fb00
| 3,548
|
py
|
Python
|
devices/sensors/sht21.py
|
boretskij/SensorsPy
|
98ebdf0ec88ff4532918ad16c925a8563780a6bf
|
[
"MIT"
] | null | null | null |
devices/sensors/sht21.py
|
boretskij/SensorsPy
|
98ebdf0ec88ff4532918ad16c925a8563780a6bf
|
[
"MIT"
] | null | null | null |
devices/sensors/sht21.py
|
boretskij/SensorsPy
|
98ebdf0ec88ff4532918ad16c925a8563780a6bf
|
[
"MIT"
] | 1
|
2019-10-27T11:38:55.000Z
|
2019-10-27T11:38:55.000Z
|
import smbus2 as smbus
import time
class SHT21:
"""Class to read temperature and humidity from SHT21.
Resources:
http://www.sensirion.com/fileadmin/user_upload/customers/sensirion/Dokumente/Humidity/Sensirion_Humidity_SHT21_Datasheet_V3.pdf
https://github.com/jaques/sht21_python/blob/master/sht21.py
Martin Steppuhn's code from http://www.emsystech.de/raspi-sht21
https://github.com/jsilence/python-i2c-sensors/blob/master/sht21.py"""
#control constants
_SOFTRESET = 0xFE
_I2C_ADDRESS = 0x40
_TRIGGER_TEMPERATURE_NO_HOLD = 0xF3
_TRIGGER_HUMIDITY_NO_HOLD = 0xF5
def __init__(self, bus=1, address=0x40):
"""According to the datasheet the soft reset takes less than 15 ms."""
self.bus = smbus.SMBus(bus)
self.bus.write_byte(self._I2C_ADDRESS, self._SOFTRESET)
time.sleep(0.015)
def get_data(self):
temperature = self.read_temperature()
humidity = self.read_humidity()
return {'temperature':temperature,'humidity':humidity}
def read_temperature(self):
"""Reads the temperature from the sensor. Not that this call blocks
for 250ms to allow the sensor to return the data"""
data = []
self.bus.write_byte(self._I2C_ADDRESS, self._TRIGGER_TEMPERATURE_NO_HOLD)
time.sleep(0.250)
data.append(self.bus.read_byte(self._I2C_ADDRESS))
data.append(self.bus.read_byte(self._I2C_ADDRESS))
return self._get_temperature_from_buffer(data)
def read_humidity(self):
"""Reads the humidity from the sensor. Not that this call blocks
for 250ms to allow the sensor to return the data"""
data = []
self.bus.write_byte(self._I2C_ADDRESS, self._TRIGGER_HUMIDITY_NO_HOLD)
time.sleep(0.250)
data.append(self.bus.read_byte(self._I2C_ADDRESS))
data.append(self.bus.read_byte(self._I2C_ADDRESS))
return self._get_humidity_from_buffer(data)
def _get_temperature_from_buffer(self, data):
"""This function reads the first two bytes of data and
returns the temperature in C by using the following function:
T = =46.82 + (172.72 * (ST/2^16))
where ST is the value from the sensor
"""
unadjusted = (data[0] << 8) + data[1]
unadjusted *= 175.72
unadjusted /= 1 << 16 # divide by 2^16
unadjusted -= 46.85
return unadjusted
def _get_humidity_from_buffer(self, data):
"""This function reads the first two bytes of data and returns
the relative humidity in percent by using the following function:
RH = -6 + (125 * (SRH / 2 ^16))
where SRH is the value read from the sensor
"""
unadjusted = (data[0] << 8) + data[1]
unadjusted *= 125
unadjusted /= 1 << 16 # divide by 2^16
unadjusted -= 6
return unadjusted
def close(self):
"""Closes the i2c connection"""
self.bus.close()
def __enter__(self):
"""used to enable python's with statement support"""
return self
def __exit__(self, type, value, traceback):
"""with support"""
self.close()
if __name__ == "__main__":
try:
bus = smbus.SMBus(0)
with SHT21(bus) as sht21:
print ("Temperature: %s"%sht21.read_temperature())
print ("Humidity: %s"%sht21.read_humidity())
except:
##print (e)
print ('Error creating connection to i2c.')
| 35.48
| 133
| 0.633878
|
f8dfac8a823f8fb5d407e942861cc1f11e650b09
| 2,360
|
py
|
Python
|
venv/Lib/site-packages/pyrogram/raw/types/message_entity_bank_card.py
|
D1ne2021/jjhhhjj
|
a090da30983b3ef276dfe4cef2ded4526f36002a
|
[
"MIT"
] | 2
|
2021-12-13T07:09:55.000Z
|
2022-01-12T12:15:20.000Z
|
venv/Lib/site-packages/pyrogram/raw/types/message_entity_bank_card.py
|
hoangkiet1906/Botcie_ver1
|
c133b915edde06dac690a7dc6ca160f6792fc4c8
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/message_entity_bank_card.py
|
hoangkiet1906/Botcie_ver1
|
c133b915edde06dac690a7dc6ca160f6792fc4c8
|
[
"MIT"
] | null | null | null |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class MessageEntityBankCard(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.MessageEntity`.
Details:
- Layer: ``126``
- ID: ``0x761e6af4``
Parameters:
offset: ``int`` ``32-bit``
length: ``int`` ``32-bit``
"""
__slots__: List[str] = ["offset", "length"]
ID = 0x761e6af4
QUALNAME = "types.MessageEntityBankCard"
def __init__(self, *, offset: int, length: int) -> None:
self.offset = offset # int
self.length = length # int
@staticmethod
def read(data: BytesIO, *args: Any) -> "MessageEntityBankCard":
# No flags
offset = Int.read(data)
length = Int.read(data)
return MessageEntityBankCard(offset=offset, length=length)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(Int(self.offset))
data.write(Int(self.length))
return data.getvalue()
| 31.466667
| 103
| 0.614831
|
3b1ac44f78eddcd8fa9f36aca04dc7a7d2c36893
| 143
|
py
|
Python
|
ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/transformations/ifRedundantLinesRemoval/data/pure/in_10_middle_common_part.py
|
JetBrains-Research/ast-transformations
|
0ab408af3275b520cc87a473f418c4b4dfcb0284
|
[
"MIT"
] | 8
|
2021-01-19T21:15:54.000Z
|
2022-02-23T19:16:25.000Z
|
ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/transformations/ifRedundantLinesRemoval/data/pure/out_10.py
|
JetBrains-Research/ast-transformations
|
0ab408af3275b520cc87a473f418c4b4dfcb0284
|
[
"MIT"
] | 4
|
2020-11-17T14:28:25.000Z
|
2022-02-24T07:54:28.000Z
|
ast-transformations-core/src/test/resources/org/jetbrains/research/ml/ast/transformations/ifRedundantLinesRemoval/data/pure/out_10.py
|
nbirillo/ast-transformations
|
717706765a2da29087a0de768fc851698886dd65
|
[
"MIT"
] | 1
|
2022-02-23T19:16:30.000Z
|
2022-02-23T19:16:30.000Z
|
s = input()
if s:
print('foo')
a = 2
b = a + a
print('bar')
else:
print('foo1')
a = 2
b = a + a
print('bar1')
| 11
| 17
| 0.398601
|
0509437e275d99661515288fbb3ba85ec8684e52
| 883
|
py
|
Python
|
test.py
|
kmapgar01/LDHelloWorld
|
671246ae5425b4e96ecf925e37b8f68a6c08bbeb
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
kmapgar01/LDHelloWorld
|
671246ae5425b4e96ecf925e37b8f68a6c08bbeb
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
kmapgar01/LDHelloWorld
|
671246ae5425b4e96ecf925e37b8f68a6c08bbeb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import logging
import sys
import ldclient
from ldclient.config import Config
root = logging.getLogger()
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
if __name__ == "__main__":
sdk_key = "YOUR_SDK_KEY"
ldclient.set_config(Config(sdk_key))
user = {
"key": "bob@example.com",
"firstName": "Bob",
"lastName": "Loblaw",
"custom": {
"groups": "beta_testers"
}
}
show_feature = ldclient.get().variation("YOUR_FLAG_KEY", user, False)
if show_feature:
print("Showing your feature")
else:
print("Not showing your feature")
ldclient.get().close() # close the client before exiting the program - ensures that all events are delivered
| 23.236842
| 110
| 0.696489
|
5270a52c40b8e4d133cec4fe91caebc80d02a22a
| 704
|
py
|
Python
|
main/at-spi2-atk/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | 46
|
2021-06-10T02:27:32.000Z
|
2022-03-27T11:33:24.000Z
|
main/at-spi2-atk/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | 58
|
2021-07-03T13:58:20.000Z
|
2022-03-13T16:45:35.000Z
|
main/at-spi2-atk/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | 6
|
2021-07-04T10:46:40.000Z
|
2022-01-09T00:03:59.000Z
|
pkgname = "at-spi2-atk"
pkgver = "2.38.0"
pkgrel = 0
build_style = "meson"
hostmakedepends = [
"meson", "pkgconf", "glib-devel", "gettext-tiny",
]
makedepends = [
"libglib-devel", "atk-devel", "at-spi2-core-devel", "libxml2-devel"
]
pkgdesc = "GTK+ module that bridges ATK to D-Bus AT-SPI"
maintainer = "q66 <q66@chimera-linux.org>"
license = "LGPL-2.0-or-later"
url = "https://gitlab.gnome.org/GNOME/at-spi2-atk"
source = f"$(GNOME_SITE)/{pkgname}/{pkgver[:-2]}/{pkgname}-{pkgver}.tar.xz"
sha256 = "cfa008a5af822b36ae6287f18182c40c91dd699c55faa38605881ed175ca464f"
# non-trivial dbus setup
options = ["!check"]
@subpackage("at-spi2-atk-devel")
def _devel(self):
return self.default_devel()
| 30.608696
| 75
| 0.698864
|
692525e518fa390e8afd65f366b1fd717e37be63
| 2,736
|
py
|
Python
|
tests/test_mongodb.py
|
crim-ca/weaver
|
107fec5e19f20b77061b9405a764da911d2db8a2
|
[
"Apache-2.0"
] | 16
|
2019-03-18T12:23:05.000Z
|
2022-02-25T00:39:11.000Z
|
tests/test_mongodb.py
|
crim-ca/weaver
|
107fec5e19f20b77061b9405a764da911d2db8a2
|
[
"Apache-2.0"
] | 346
|
2019-03-06T21:05:04.000Z
|
2022-03-31T13:38:37.000Z
|
tests/test_mongodb.py
|
crim-ca/weaver
|
107fec5e19f20b77061b9405a764da911d2db8a2
|
[
"Apache-2.0"
] | 5
|
2019-03-15T01:38:28.000Z
|
2021-11-11T15:38:43.000Z
|
"""
Based on unittests in https://github.com/wndhydrnt/python-oauth2/tree/master/oauth2/test.
"""
import unittest
import mock
from pymongo.collection import Collection
from weaver.datatype import Service
from weaver.store.mongodb import MongodbServiceStore
class MongodbServiceStoreTestCase(unittest.TestCase):
def setUp(self):
self.service = dict(name="loving_flamingo", url="http://somewhere.over.the/ocean", type="wps",
public=False, auth="token")
self.service_public = dict(name="open_pingu", url="http://somewhere.in.the/deep_ocean", type="wps",
public=True, auth="token")
self.service_special = dict(url="http://wonderload", name="A special Name", type="wps", auth="token")
self.sane_name_config = {"assert_invalid": False}
def test_fetch_by_name(self):
collection_mock = mock.Mock(spec=Collection)
collection_mock.find_one.return_value = self.service
store = MongodbServiceStore(collection=collection_mock, sane_name_config=self.sane_name_config)
service = store.fetch_by_name(name=self.service["name"])
collection_mock.find_one.assert_called_with({"name": self.service["name"]})
assert isinstance(service, dict)
def test_save_service_default(self):
collection_mock = mock.Mock(spec=Collection)
collection_mock.count_documents.return_value = 0
collection_mock.find_one.return_value = self.service
store = MongodbServiceStore(collection=collection_mock, sane_name_config=self.sane_name_config)
store.save_service(Service(self.service))
collection_mock.insert_one.assert_called_with(self.service)
def test_save_service_with_special_name(self):
collection_mock = mock.Mock(spec=Collection)
collection_mock.count_documents.return_value = 0
collection_mock.find_one.return_value = self.service_special
store = MongodbServiceStore(collection=collection_mock, sane_name_config=self.sane_name_config)
store.save_service(Service(self.service_special))
collection_mock.insert_one.assert_called_with({
"url": "http://wonderload", "type": "wps", "name": "A_special_Name", "public": False, "auth": "token"})
def test_save_service_public(self):
collection_mock = mock.Mock(spec=Collection)
collection_mock.count_documents.return_value = 0
collection_mock.find_one.return_value = self.service_public
store = MongodbServiceStore(collection=collection_mock, sane_name_config=self.sane_name_config)
store.save_service(Service(self.service_public))
collection_mock.insert_one.assert_called_with(self.service_public)
| 46.372881
| 115
| 0.724415
|
546993aaf88392a64cf10aa5bbbc25d528ecbd35
| 1,359
|
py
|
Python
|
pyACA/FeatureTimeAcfCoeff.py
|
ruohoruotsi/pyACA
|
339e9395b65a217aa5965638af941b32d5c95454
|
[
"MIT"
] | 81
|
2019-07-08T15:48:03.000Z
|
2022-03-21T22:52:25.000Z
|
pyACA/FeatureTimeAcfCoeff.py
|
ruohoruotsi/pyACA
|
339e9395b65a217aa5965638af941b32d5c95454
|
[
"MIT"
] | 24
|
2019-10-03T19:20:18.000Z
|
2022-02-28T17:20:40.000Z
|
pyACA/FeatureTimeAcfCoeff.py
|
ruohoruotsi/pyACA
|
339e9395b65a217aa5965638af941b32d5c95454
|
[
"MIT"
] | 26
|
2019-07-18T23:50:52.000Z
|
2022-03-10T14:59:35.000Z
|
# -*- coding: utf-8 -*-
"""
computes the ACF coefficients of a time domain signal
Args:
x: audio signal
iBlockLength: block length in samples
iHopLength: hop length in samples
f_s: sample rate of audio data (unused)
eta: index (or vector of indices) of coeff result
Returns:
vacf autocorrelation coefficient
t time stamp
"""
import numpy as np
import pyACA
def FeatureTimeAcfCoeff(x, iBlockLength, iHopLength, f_s, eta=19):
# create blocks
xBlocks = pyACA.ToolBlockAudio(x, iBlockLength, iHopLength)
# number of results
iNumOfBlocks = xBlocks.shape[0]
if (np.isscalar(eta)):
iNumOfResultsPerBlock = 1
else:
iNumOfResultsPerBlock = eta.size
# compute time stamps
t = (np.arange(0, iNumOfBlocks) * iHopLength + (iBlockLength / 2)) / f_s
# allocate memory
vacf = np.zeros([iNumOfResultsPerBlock, iNumOfBlocks])
for n, block in enumerate(xBlocks):
# calculate the acf
if not block.sum():
vacf[np.arange(0, iNumOfResultsPerBlock), n] = np.zeros(iNumOfResultsPerBlock)
continue
else:
afCorr = np.correlate(block, block, "full") / np.dot(block, block)
# find the coefficients specified in eta
vacf[np.arange(0, iNumOfResultsPerBlock), n] = afCorr[iBlockLength + eta]
return vacf, t
| 26.647059
| 90
| 0.657837
|
b3f7974a3a66d3e3830a9c8ed6175bee4f4ced1f
| 257
|
py
|
Python
|
plans/serializers.py
|
thestackcoder/notifao_app
|
e21ab3c0eed72a64ee24508b92045de13c8385bb
|
[
"MIT"
] | null | null | null |
plans/serializers.py
|
thestackcoder/notifao_app
|
e21ab3c0eed72a64ee24508b92045de13c8385bb
|
[
"MIT"
] | null | null | null |
plans/serializers.py
|
thestackcoder/notifao_app
|
e21ab3c0eed72a64ee24508b92045de13c8385bb
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import Plan
class PlanSerializer(serializers.ModelSerializer):
class Meta:
model = Plan
fields = ['id', 'name', 'price', 'duration', 'notifications', 'emails', 'apps', 'description']
| 28.555556
| 102
| 0.688716
|
3e3a34d85236a590ad1d9419836ee765d187efe4
| 905
|
py
|
Python
|
osr2mp4/ImageProcess/Objects/Components/ScorebarBG.py
|
ADoesGit/osr2mp4-core
|
b702295998439dce39a421dbefde71c37f5ddb63
|
[
"MIT"
] | null | null | null |
osr2mp4/ImageProcess/Objects/Components/ScorebarBG.py
|
ADoesGit/osr2mp4-core
|
b702295998439dce39a421dbefde71c37f5ddb63
|
[
"MIT"
] | null | null | null |
osr2mp4/ImageProcess/Objects/Components/ScorebarBG.py
|
ADoesGit/osr2mp4-core
|
b702295998439dce39a421dbefde71c37f5ddb63
|
[
"MIT"
] | null | null | null |
from .AScorebar import AScorebar
class ScorebarBG(AScorebar):
def __init__(self, frames, start_time, settings, hasfl):
AScorebar.__init__(self, frames, settings=settings)
self.map_start = start_time
self.hasfl = hasfl
def add_to_frame(self, background, cur_time, inbreak):
AScorebar.animate(self)
if self.settings.settings["In-game interface"] or inbreak:
# use a more optimised algorithm to draw background and scorebarbg
if not self.hasfl:
# if in break then reset frame will be background's job. Otherwise it's ScorebarBG's job
animating = self.h != 0
if animating or cur_time < self.map_start:
self.frame_index = 0
super().add_to_frame(background, 0, -self.h, alpha=self.alpha, topleft=True)
elif not inbreak:
background.paste(self.frames[1], (0, -self.h))
else:
super().add_to_frame(background, 0, -self.h, alpha=self.alpha, topleft=True)
| 34.807692
| 92
| 0.723757
|
d13c31d0ed4c514c393fa3b27610947346d00789
| 12,421
|
py
|
Python
|
qa/rpc-tests/util.py
|
ToranTeam/NewToran
|
ba40d8884f6f3e0d3aa7a0eb54ada7c6a21e2642
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/util.py
|
ToranTeam/NewToran
|
ba40d8884f6f3e0d3aa7a0eb54ada7c6a21e2642
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/util.py
|
ToranTeam/NewToran
|
ba40d8884f6f3e0d3aa7a0eb54ada7c6a21e2642
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The PIVX developers
# Copyright (c) 2017 The TNX developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "TNX.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
TNXd and TNX-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run TNXd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "TNXd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "TNX-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in TNX.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a TNXd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "TNXd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "TNX-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple TNXds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| 35.795389
| 104
| 0.64713
|
7dd9e2c8e1381ee5428448c4416f56c4296f9c95
| 1,564
|
py
|
Python
|
tests/test_reader_springer.py
|
OBrink/chemdataextractor2
|
152a45f6abbf069d2070232fa5c4038569ac7717
|
[
"MIT"
] | null | null | null |
tests/test_reader_springer.py
|
OBrink/chemdataextractor2
|
152a45f6abbf069d2070232fa5c4038569ac7717
|
[
"MIT"
] | null | null | null |
tests/test_reader_springer.py
|
OBrink/chemdataextractor2
|
152a45f6abbf069d2070232fa5c4038569ac7717
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
test_reader_springer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test reader for Springer.
.. codeauthor:: Shu Huang <sh2009@cam.ac.uk>
"""
import unittest
import logging
import io
import os
from chemdataextractor.doc.document import Document
from chemdataextractor.reader.springer_jats import SpringerJatsReader
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class TestSpringerJatsReader(unittest.TestCase):
def test_detect(self):
"""Test RscXMLReader can detect an RSC document."""
r = SpringerJatsReader()
fname = 'spr_test1.xml'
f = io.open(os.path.join(os.path.dirname(__file__), 'data', 'springer', fname), 'rb')
content = f.read()
f.close()
self.assertEqual(r.detect(content, fname=fname), True)
def test_direct_usage(self):
"""Test RscXMLReader used directly to parse file."""
r = SpringerJatsReader()
fname = 'spr_test1.xml'
f = io.open(os.path.join(os.path.dirname(__file__), 'data', 'springer', fname), 'rb')
content = f.read()
d = r.readstring(content)
f.close()
self.assertEqual(len(d.elements), 307)
def test_document_usage(self):
"""Test RscXMLReader used via Document.from_file."""
fname = 'spr_test1.xml'
f = io.open(os.path.join(os.path.dirname(__file__), 'data', 'springer', fname), 'rb')
d = Document.from_file(f, readers=[SpringerJatsReader()])
self.assertEqual(len(d.elements), 307)
if __name__ == '__main__':
unittest.main()
| 30.076923
| 93
| 0.642583
|
1c128f172caf9c4955301a54a9c08032953c1b72
| 1,551
|
py
|
Python
|
shops/views_autocomplete.py
|
EDario333/minegocito
|
5dd0869fa2510bb8152f4a117f33b2a30bb6d69c
|
[
"MIT"
] | null | null | null |
shops/views_autocomplete.py
|
EDario333/minegocito
|
5dd0869fa2510bb8152f4a117f33b2a30bb6d69c
|
[
"MIT"
] | null | null | null |
shops/views_autocomplete.py
|
EDario333/minegocito
|
5dd0869fa2510bb8152f4a117f33b2a30bb6d69c
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.http import HttpResponse, JsonResponse
from django.utils.translation import gettext as _
from users.models import Users
import json
def my_shops_autocomplete(request):
if not request.user.is_authenticated:
return User.objects.none()
results = []
if request.is_ajax():
term = request.GET.get('term', '')
from shops.models import Shops
try:
user = Users.objects.get(pk=request.user)
users=Users.objects.filter(created_by_user=user)
my_users=[]
my_users.extend(users)
my_users.append(user)
shops = Shops.objects.filter(created_by_user__in=my_users, name__icontains=term, dropped=False)
# This will add also the objects created by users
# that I've created
for shop in shops:
label = shop.name + ' [' + _('City') + '='
label += shop.city.display_name + '; '
label += _('Address line 1') + '='
label += shop.address_line1 + '; '
label += _('Admin') + '='
label += shop.admin.first_name + ' '
label += shop.admin.last_name + ' ('
label += shop.admin.email + ')]'
if label not in results:
results.append(label)
except ObjectDoesNotExist:
return JsonResponse({'status': 'error', 'msg': _('There are not records matching your query')})
#return JsonResponse(results)
data = json.dumps(results)
mimetype = "application/json"
return HttpResponse(data, mimetype)
| 27.210526
| 99
| 0.672469
|
7a7bed78c97a031332d9243385018fd4481bc209
| 949
|
py
|
Python
|
enubeuta/apps/articles/views.py
|
Enubeuta6/nuwm-forum
|
8711619b2e37ed5ac0f4876ec18b06d0153e4571
|
[
"MIT"
] | null | null | null |
enubeuta/apps/articles/views.py
|
Enubeuta6/nuwm-forum
|
8711619b2e37ed5ac0f4876ec18b06d0153e4571
|
[
"MIT"
] | null | null | null |
enubeuta/apps/articles/views.py
|
Enubeuta6/nuwm-forum
|
8711619b2e37ed5ac0f4876ec18b06d0153e4571
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from .models import Article, Comment
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse
def index(request):
latest_articles_list = Article.objects.order_by('-pub_date')[:5]
return render(request, 'articles/list.html', {'latest_articles_list': latest_articles_list})
def detail(request, article_id):
try:
a = Article.objects.get( id = article_id)
except:
raise Http404("Article not found")
latest_comments_list = a.comment_set.order_by('-id')[:10]
return render(request, 'articles/detail.html', {'article': a, 'latest_comments_list': latest_comments_list})
def leave_comment(request, article_id):
try:
a = Article.objects.get( id = article_id)
except:
raise Http404("Article not found")
a.comment_set.create(author_name = request.POST['name'], comment_text = request.POST['text'])
return HttpResponseRedirect( reverse('articles:detail', args = (a.id,)))
| 32.724138
| 109
| 0.759747
|
7f2cd476aaf37399d8e33fe6e36d7a9bc4e8684c
| 2,924
|
py
|
Python
|
runsync.py
|
CrownID/maildump
|
b8f5a264d6b9a9e5d8225c74133451293c36696e
|
[
"MIT"
] | null | null | null |
runsync.py
|
CrownID/maildump
|
b8f5a264d6b9a9e5d8225c74133451293c36696e
|
[
"MIT"
] | null | null | null |
runsync.py
|
CrownID/maildump
|
b8f5a264d6b9a9e5d8225c74133451293c36696e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import mailproc
import ConfigParser
import csv
import re
import os
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Boolean, DateTime, Sequence
from sqlalchemy.orm import scoped_session, mapper, sessionmaker
from sqlalchemy.sql import func
from config import REGBASE
Base = declarative_base()
class Register(Base):
__tablename__ = 'dumpjrnl'
id = Column(Integer, Sequence('user_id_seq'), primary_key=True)
boxname = Column(String, nullable=False)
last_status = Column(String, nullable=False)
count = Column(Integer, nullable=False, default=0)
lastrun_date = Column(String, nullable=False)
def __init__ (self, boxname, last_status, count, lastrun_date):
self.boxname = boxname
self.last_status = last_status
self.count = count
self.lastrun_date = lastrun_date
def __repr__(self):
return "<Register('%s','%s', '%s', '%s', '%s')>" \
% (self.id, self.boxname, self.last_status, self.count, \
self.lastrun_date)
sqliteng = sqlalchemy.create_engine('sqlite:///'+REGBASE)
metadata= Base.metadata.create_all(sqliteng)
session_factory=sessionmaker(bind=sqliteng)
Session=scoped_session(session_factory)
tpool = ThreadPool(4)
config=ConfigParser.ConfigParser()
#TODO: processing config exception
config.read('settings.cfg')
#TODO: add option set backing up root directory
backuproot=config.get('main', 'backup_root')
#TODO compress folder
compress=config.get('main', 'compress')
#backup mode - simulation (0) or real backup (1)
backupmode=config.get('main','backupmode')
loglevel=config.get('main','loglevel')
addresses=config.get('main','addressfile')
csv.register_dialect('addr', delimiter=';', quoting=csv.QUOTE_NONE)
reader = csv.DictReader(open(addresses), dialect="addr")
pool = {}
#TODO: add return values for registart in base
def multi_run_wrapper(args):
return mailproc.imapclones(*args)
for row in reader:
for column, value in row.iteritems():
pool.setdefault(column, []).append(value)
n=0
args=[]
protocol='pop'
#DONE: get imap server by email
#sqlalchemy
for k in pool['address']:
login=k
basedomain=re.split("@",k)[1]
if (pool['protocol'][n]=='imap'):
srvname="imap."+basedomain
protocol='imap'
else:
srvname="pop."+basedomain
passw = pool['pass'][n]
#debug
n=n+1
print srvname
print passw
if not (os.path.exists(backuproot+'/'+k)):
os.mkdir(backuproot+'/'+k)
localfolder=backuproot+'/'+k
element_session=Session()
args.append((srvname, k, 'INBOX', localfolder, passw, \
protocol, element_session))
tpool.map(multi_run_wrapper, args)
tpool.close()
tpool.join()
| 26.107143
| 77
| 0.6987
|
f9b70f01c826cadcc7e7fe985cae410f4b718216
| 2,188
|
py
|
Python
|
student-owl/utils/reader.py
|
StudentOwl/StudentOwl-Monitoreo
|
c3592a6ef2ab1234e75d2140317bed91dde45a78
|
[
"MIT"
] | null | null | null |
student-owl/utils/reader.py
|
StudentOwl/StudentOwl-Monitoreo
|
c3592a6ef2ab1234e75d2140317bed91dde45a78
|
[
"MIT"
] | 5
|
2021-01-09T17:10:41.000Z
|
2021-01-20T20:55:39.000Z
|
student-owl/utils/reader.py
|
StudentOwl/StudentOwl-Monitoreo
|
c3592a6ef2ab1234e75d2140317bed91dde45a78
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from os import path
from utils.time_utils import convertTimeToTimestamp
class ReaderLogFile(object):
"""
Reader LogFile class
Provides methods for obtaining log lines.
"""
__EXCLUDED_CLASSES = ('system', 'clipboard', 'url', 'keystrokes', 'jpg',)
__ACCEPTED_CLASSES = ('app')
def __init__(self, pathfile: str, lastLine=0):
"""
Constructor de clase
"""
self._pathfile = path.abspath(pathfile)
self.lastLine = lastLine
def getLines(self) -> list[str]:
"""
Metodo que devuelve una lista de lineas
"""
lines = []
pastLastLine = self.lastLine
with open(self._pathfile, 'r', encoding='utf8') as file:
# file.seek(self.lastLine,0)
lines = file.readlines()
self.lastLine = len(lines)
lines = lines[pastLastLine:]
return lines
def getJson(self, lines: list[str]) -> str:
"""
Metodo que procesa las lineas de texto a JSON
"""
logs = []
for line in lines:
if line.strip() != "":
line = line.replace(',\n', '').replace('\\', '\\\\')
try:
line: dict = json.loads(line)
if type(line) == dict:
line = self.proccessJson(line)
if line:
logs.append(line)
else:
print("[ERROR]: Not a dict")
except ValueError as err:
print("[ERROR]: Not a valid JSON")
print(f"\t{err}")
print(f"\t{line}")
return json.dumps(logs) if len(logs) > 0 else None
def proccessJson(self, jsonData: dict) -> dict:
if jsonData["class"] in self.__ACCEPTED_CLASSES:
if jsonData.get("duration"):
jsonData["duration"] = int(jsonData["duration"])
if jsonData.get("time"):
jsonData["time"] = convertTimeToTimestamp(jsonData["time"])
return jsonData
else:
return None
| 29.972603
| 77
| 0.50777
|
54da9d9171744df866836e3e2740864667129885
| 142
|
py
|
Python
|
tests/test_units/test_auth/test_openid_connect.py
|
thanegill/aiogoogle
|
e398df3886b6f6b254fa5413479f503f5bcbf435
|
[
"MIT"
] | null | null | null |
tests/test_units/test_auth/test_openid_connect.py
|
thanegill/aiogoogle
|
e398df3886b6f6b254fa5413479f503f5bcbf435
|
[
"MIT"
] | null | null | null |
tests/test_units/test_auth/test_openid_connect.py
|
thanegill/aiogoogle
|
e398df3886b6f6b254fa5413479f503f5bcbf435
|
[
"MIT"
] | null | null | null |
# TODO:
def test_authorization_url():
pass
def test_decode_and_validate():
pass
def test_build_user_creds_jwt_grant():
pass
| 10.142857
| 38
| 0.71831
|
313fab183c1eb167d08f3d1a914ccd1f16f08e1b
| 2,155
|
py
|
Python
|
car/agent.py
|
CarliWasTaken/Backend
|
56f83999b1521c43b738be1856ffd8eeecf22a93
|
[
"MIT"
] | 1
|
2021-09-29T12:40:25.000Z
|
2021-09-29T12:40:25.000Z
|
car/agent.py
|
CarliWasTaken/Carli
|
56f83999b1521c43b738be1856ffd8eeecf22a93
|
[
"MIT"
] | 1
|
2021-11-15T10:01:27.000Z
|
2021-11-15T10:01:27.000Z
|
car/agent.py
|
CarliWasTaken/Backend
|
56f83999b1521c43b738be1856ffd8eeecf22a93
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('../')
import Adafruit_PCA9685
from log.log import Log
from typing import *
log: log = Log.get_instance()
class Servo():
def __init__(self, pwm: Adafruit_PCA9685.PCA9685, number: int, neutral: int, delta_max: int) -> None:
self.__pwm: Adafruit_PCA9685.PCA9685 = pwm
self.__number = number
self.__neutral = neutral
self.__delta_max = delta_max
pass
# sets the servo to its neutral value
def set_neutral(self) -> None:
'''Sets a servo to his neutral value
This method is for the servo to the corresponding default value.
'''
self.__pwm.set_pwm(self.__number, 0, self.__neutral)
pass
# checks if the value is in the accepted range
# def check_value(self, value) -> int:
# if value > self.__neutral + self.__delta_max:
# value = self.__neutral + self.__delta_max
# if value < self.__neutral - self.__delta_max:
# value = self.__neutral - self.__delta_max
# return value
def set_value(self, value: int) -> None:
'''Set give value
This method is mainly for setting the throttle and the steering angle of the servos/motors
Parameter
---------
value
corresponding value
'''
if(self.__number == 8):
log.info(f"Throttle: {value}")
self.__pwm.set_pwm(self.__number, 0, self.__neutral + value)
pass
class AgentMoveController():
def __init__(self):
self.__pwm: Adafruit_PCA9685.PCA9685 = Adafruit_PCA9685.PCA9685()
self.servos: dict = {
"steering": Servo(self.__pwm, 0, 1200, 40),
"speed": Servo(self.__pwm, 8, 1200, 40),
}
self.reset_servos()
self.__servoMax= 100
self.__servoMin = 0
pass
def reset_servos(self) -> None:
'''Sets all servos to their neutral value
This method is for resetting the servos corresponding to their default values.
'''
self.servos["speed"].set_neutral()
self.servos["steering"].set_neutral()
pass
| 29.121622
| 105
| 0.604176
|
51338b0e915d2cbfca661d9b921b918994742d0b
| 1,464
|
py
|
Python
|
test/test_typechecking.py
|
runtime-jupyter-safety/runtime-jupyter-safety
|
f62a24b5b4f44fed5111c31441bc6a105441e34c
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_typechecking.py
|
runtime-jupyter-safety/runtime-jupyter-safety
|
f62a24b5b4f44fed5111c31441bc6a105441e34c
|
[
"BSD-3-Clause"
] | 20
|
2020-04-17T02:32:50.000Z
|
2020-05-07T05:50:32.000Z
|
test/test_typechecking.py
|
runtime-jupyter-safety/runtime-jupyter-safety
|
f62a24b5b4f44fed5111c31441bc6a105441e34c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
from typing import Set
from nbsafety.data_model.code_cell import cells
from nbsafety.singletons import nbs
from nbsafety.types import CellId
from test.utils import make_safety_fixture, skipif_known_failing
logging.basicConfig(level=logging.ERROR)
# Reset dependency graph before each test
# _safety_fixture, run_cell_ = make_safety_fixture(trace_messages_enabled=True)
_safety_fixture, run_cell_ = make_safety_fixture(mark_typecheck_failures_unsafe=True)
def run_cell(cell, cell_id=None, **kwargs):
"""Mocks the `change active cell` portion of the comm protocol"""
if cell_id is not None:
nbs().handle({"type": "change_active_cell", "active_cell_id": cell_id})
run_cell_(cell, **kwargs)
def get_cell_ids_needing_typecheck() -> Set[CellId]:
return {
cell.cell_id
for cell in cells().all_cells_most_recently_run_for_each_id()
if cell.needs_typecheck
}
def test_int_change_to_str_triggers_typecheck():
run_cell("a = 1", 1)
assert not get_cell_ids_needing_typecheck()
run_cell("b = 2", 2)
assert not get_cell_ids_needing_typecheck()
run_cell("logging.info(a + b)", 3)
assert not get_cell_ids_needing_typecheck()
run_cell('b = "b"', 4)
assert get_cell_ids_needing_typecheck() == {3}
nbs().check_and_link_multiple_cells()
assert not get_cell_ids_needing_typecheck()
assert cells().from_id(3)._cached_typecheck_result is False
| 33.272727
| 85
| 0.746585
|
0ae2afa4a77ba578900565879dc885195429738c
| 9,460
|
py
|
Python
|
src/xgboost_gpu.py
|
gmmoliveira/xgboost_gpu
|
2878e8c4655f37796c88fbc19fd555637ea06f1a
|
[
"Apache-2.0"
] | null | null | null |
src/xgboost_gpu.py
|
gmmoliveira/xgboost_gpu
|
2878e8c4655f37796c88fbc19fd555637ea06f1a
|
[
"Apache-2.0"
] | null | null | null |
src/xgboost_gpu.py
|
gmmoliveira/xgboost_gpu
|
2878e8c4655f37796c88fbc19fd555637ea06f1a
|
[
"Apache-2.0"
] | null | null | null |
'''
Copyright 2020 Guilherme Oliveira
SPDX-License-Identifier: Apache-2.0
========================================================================================================================
Author: Guilherme Oliveira
Date: july 06, 2020
Contact: gmmoliveira1@gmail.com
License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
========================================================================================================================
This script implements functions to facilitate the execution of the XGBoost algorithm on multiple GPUs on a single-
machine.
========================================================================================================================
'''
from xgboost.dask import DaskDMatrix, train as dask_xgboost_train, predict as dask_xgboost_predict
from dask.dataframe import from_array, from_pandas
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
import numpy as np
import pandas as pd
def train_xgboost_gpu(
X, y,
data_chunksize=None,
n_gpus=None, n_threads_per_gpu=1,
params=None,
xgboost_model=None,
gpu_cluster=None, client=None
):
'''
Trains a XGBoost model on the GPU.
:param X: a 2D matrix object of either type numpy ndarray or pandas DataFrame;
:param y: a 1D array of one of the following types: numpy ndarray, pandas Series or pandas DataFrame;
:param data_chunksize: number of rows to partition input data (both X and y simultaneously) to split among multiple
GPU devices. Default value None splits evenly among devices;
:param n_gpus: number of GPUs to be used. Default value None selects all available devices.
:param n_threads_per_gpu: number of threads per GPU;
:param params: xgboost trainning params as a python dict, refer to
https://xgboost.readthedocs.io/en/latest/parameter.html
:param xgboost_model: xgbooster object to continue training, it may be either a regular XGBoost model or a
dask xgboost dict
:param gpu_cluster: an existing dask cluster object to use. This param should be used if you call this method
too many times in quick successions. Note that this function doesn't close an externally created cluster.
:param client: an existing dask client object to use. This param should be used if you call this method
too many times in quick successions. Note that this function doesn't close an externally created client.
:return:
A dictionary containing 2 keys:
* 'booster': maps to a XGBoost model
* 'history': maps to another dict which informs the history of the training process, as in the following the
examṕle: {'train': {'logloss': ['0.48253', '0.35953']}, 'eval': {'logloss': ['0.480385', '0.357756']}}}
'''
if gpu_cluster is None:
local_gpus = LocalCUDACluster(n_workers=n_gpus, threads_per_worker=n_threads_per_gpu)
else:
local_gpus = gpu_cluster
if client is None:
local_dask_client = Client(local_gpus, {'verbose': 0})
else:
local_dask_client = client
if data_chunksize is None:
data_chunksize = X.shape[0] // len(local_gpus.cuda_visible_devices)
if params is None:
params = {
'learning_rate': 0.3,
'max_depth': 8,
'objective': 'reg:squarederror',
'verbosity': 0,
'tree_method': 'gpu_hist'
}
if isinstance(X, pd.DataFrame):
X = from_pandas(X, chunksize=data_chunksize)
else:
X = from_array(X, chunksize=data_chunksize)
if isinstance(y, pd.DataFrame):
y = from_pandas(y, chunksize=data_chunksize)
else:
y = from_array(y, chunksize=data_chunksize)
dtrain = DaskDMatrix(local_dask_client, X, y)
if type(xgboost_model) is dict:
xgboost_model = xgboost_model['booster']
xgb_model = dask_xgboost_train(local_dask_client, params, dtrain, num_boost_round=100, evals=[(dtrain, 'train')], xgb_model=xgboost_model)
if client is None:
local_dask_client.close()
if gpu_cluster is None:
local_gpus.close()
return xgb_model
def predict_xgboost_gpu(
xgb_model, X,
data_chunksize=None,
n_gpus=None, n_threads_per_gpu=1,
gpu_cluster=None, client=None
):
'''
Predicts the output for the input features X using the 'xgb_model' running on the GPU.
:param xgb_model: a dask XGBoost model to use for predictions
:param X: the input features to use for predictions, must be either a numpy ndarray or a pandas DataFrame
:param data_chunksize: chunk sizes to be used on a dask dataframe, leave the default value None for auto decision
:param n_gpus: number of GPUs to be used. Default value None selects all available devices;
:param n_threads_per_gpu: number of threads per GPU;
:param gpu_cluster: an existing dask cluster object to use. This param should be used if you call this method
too many times in quick successions. Note that this function doesn't close an externally created cluster.
:param client: an existing dask cluster object to use. This param should be used if you call this method
too many times in quick successions. Note that this function doesn't close an externally created client.
:return:
If the input features X is a pandas DataFrame, returns a array-like DataFrame of single column containing
the predictions;
Otherwise, if the input features X is a numpy ndarray, returns a 1D ndarray containing the predictions .
'''
if gpu_cluster is None:
local_gpus = LocalCUDACluster(n_workers=n_gpus, threads_per_worker=n_threads_per_gpu)
else:
local_gpus = gpu_cluster
if client is None:
local_dask_client = Client(local_gpus)
else:
local_dask_client = client
if data_chunksize is None:
data_chunksize = X.shape[0] // len(local_gpus.cuda_visible_devices)
if isinstance(X, pd.DataFrame):
ndarray = False
X = from_pandas(X, chunksize=data_chunksize)
else:
ndarray = True
X = from_array(X, chunksize=data_chunksize)
y_predicted = dask_xgboost_predict(local_dask_client, xgb_model, X)
y_predicted = pd.DataFrame(y_predicted)
if client is None:
local_dask_client.close()
if gpu_cluster is None:
local_gpus.close()
if ndarray:
return y_predicted.to_numpy()
return y_predicted
def _example():
# the following imports are meant to be used only in the scope of this example, therefore,
# they were placed here for performance issues regarding external modules calling this one
from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score
from sklearn.metrics import explained_variance_score, mean_squared_error, max_error
from os.path import exists
base_path = ''
if exists('../models/'):
base_path = '../models/'
# [WARNING]: choose carefully the below parameters according to your machine, avoiding, for example, consuming
# more memory than what's available
n, m = 10 ** 4, 10
rand = np.random.Generator(np.random.PCG64())
print('========== *** XGBoost Classification example *** ==========')
params = {
'learning_rate': 0.3,
'max_depth': 8,
'objective': 'binary:hinge',
'verbosity': 0,
'tree_method': 'gpu_hist'
}
class_proportion = 0.5
X = rand.random(size=(n, m))
y = np.array([1 if np.sum(X[i, :]) > class_proportion * m else 0 for i in range(X.shape[0])])
classification_xgbmodel = train_xgboost_gpu(X, y, params=params, n_gpus=1, n_threads_per_gpu=1, xgboost_model=None)
X = rand.random(size=(n, m))
y = np.array([1 if np.sum(X[i, :]) > class_proportion * m else 0 for i in range(X.shape[0])])
y_pred = predict_xgboost_gpu(classification_xgbmodel, X, n_gpus=1, n_threads_per_gpu=1)
'''
# my tests have shown that predicting over the GPU is much slower than over the CPU
# to predict using the CPU instead of the GPU, use the following example code
from xgboost import DMatrix
y_pred = classification_xgbmodel['booster'].predict(DMatrix(pd.DataFrame(X, columns=[i for i in range(m)])))
'''
acc = accuracy_score(y, y_pred)
cm = confusion_matrix(y, y_pred)
print('accuracy: {:.2f}%'.format(acc * 100))
print('confusion matrix:')
print(cm)
try:
print('ROC AUC score: {:.2f}%'.format(roc_auc_score(y, y_pred) * 100))
except:
pass
# save your model as follows
classification_xgbmodel['booster'].save_model(base_path + 'my_classf_model001.xgbmodel')
print('========== *** XGBoost Regression example *** ==========')
transformation = rand.random(size=m)
X = rand.random(size=(n, m))
y = np.matmul(X, transformation)
params = {
'learning_rate': 0.3,
'max_depth': 8,
'objective': 'reg:squarederror',
'verbosity': 0,
'tree_method': 'gpu_hist'
}
regression_xgbmodel = train_xgboost_gpu(X, y, params=params)
X = rand.random(size=(n, m))
y = np.matmul(X, transformation)
y_pred = predict_xgboost_gpu(regression_xgbmodel, X)
'''
# my tests have shown that predicting over the GPU is much slower than over the CPU
# to predict using the CPU instead of the GPU, use the following example code
from xgboost import DMatrix
y_pred = regression_xgbmodel['booster'].predict(DMatrix(pd.DataFrame(X, columns=[i for i in range(m)])))
'''
vscore = explained_variance_score(y, y_pred)
mse = mean_squared_error(y, y_pred)
me = max_error(y, y_pred)
print('Variance score: {:.2f}'.format(vscore))
print('Mean squared error: {:.2f}'.format(mse))
print('Maximum absolute error: {:.2f}'.format(me))
# save your model as follows
regression_xgbmodel['booster'].save_model(base_path + 'my_reg_model001.xgbmodel')
if __name__ == '__main__':
from time import time
t_start = time()
_example()
t_end = time() - t_start
print('executed in {:.2f} seconds'.format(t_end))
| 38.455285
| 139
| 0.708245
|
5ce8389d4e1de163c4206b151dbd4a9273b83f4e
| 114,484
|
py
|
Python
|
pipeline/pipeline.py
|
rhefner1/ghidonations
|
aa1b263ce30c952400a5eac8739b1ef52a2e4fed
|
[
"Apache-2.0"
] | null | null | null |
pipeline/pipeline.py
|
rhefner1/ghidonations
|
aa1b263ce30c952400a5eac8739b1ef52a2e4fed
|
[
"Apache-2.0"
] | 2
|
2015-03-11T04:59:20.000Z
|
2016-02-08T16:42:06.000Z
|
pipeline/pipeline.py
|
rhefner1/ghidonations
|
aa1b263ce30c952400a5eac8739b1ef52a2e4fed
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2.5
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google App Engine Pipeline API for complex, asynchronous workflows."""
__all__ = [
# Public API.
'Error', 'PipelineSetupError', 'PipelineExistsError',
'PipelineRuntimeError', 'SlotNotFilledError', 'SlotNotDeclaredError',
'UnexpectedPipelineError', 'PipelineStatusError', 'Slot', 'Pipeline',
'PipelineFuture', 'After', 'InOrder', 'Retry', 'Abort', 'get_status_tree',
'get_pipeline_names', 'get_root_list', 'create_handlers_map',
'set_enforce_auth',
]
import datetime
import hashlib
import itertools
import logging
import os
import re
import sys
import threading
import time
import traceback
import urllib
import uuid
import webapp2
from google.appengine.api import mail
from google.appengine.api import files
from google.appengine.api import users
from google.appengine.api import taskqueue
from google.appengine.ext import db
# Relative imports
import models
import json
import status_ui
import util as mr_util
# For convenience
_PipelineRecord = models._PipelineRecord
_SlotRecord = models._SlotRecord
_BarrierRecord = models._BarrierRecord
_StatusRecord = models._StatusRecord
# Overall TODOs:
# - Add a human readable name for start()
# - Consider using sha1 of the UUID for user-supplied pipeline keys to ensure
# that they keys are definitely not sequential or guessable (Python's uuid1
# method generates roughly sequential IDs).
# Potential TODOs:
# - Add support for ANY N barriers.
# - Allow Pipelines to declare they are "short" and optimize the evaluate()
# function to run as many of them in quick succession.
# - Add support in all Pipelines for hold/release where up-stream
# barriers will fire but do nothing because the Pipeline is not ready.
################################################################################
class Error(Exception):
"""Base class for exceptions in this module."""
class PipelineSetupError(Error):
"""Base class for exceptions that happen before Pipeline execution."""
class PipelineExistsError(PipelineSetupError):
"""A new Pipeline with an assigned idempotence_key cannot be overwritten."""
class PipelineRuntimeError(Error):
"""Base class for exceptions that happen during Pipeline execution."""
class SlotNotFilledError(PipelineRuntimeError):
"""A slot that should have been filled already was not yet filled."""
class SlotNotDeclaredError(PipelineRuntimeError):
"""A slot that was filled or passed along was not previously declared."""
class UnexpectedPipelineError(PipelineRuntimeError):
"""An assertion failed, potentially leaving the pipeline unable to proceed."""
class PipelineUserError(Error):
"""Exceptions raised indirectly by developers to cause certain behaviors."""
class Retry(PipelineUserError):
"""The currently running pipeline should be retried at a later time."""
class Abort(PipelineUserError):
"""The currently running pipeline should be aborted up to the root."""
class PipelineStatusError(Error):
"""Exceptions raised when trying to collect pipeline status."""
################################################################################
_MAX_BARRIERS_TO_NOTIFY = 10
_MAX_ABORTS_TO_BEGIN = 10
_TEST_MODE = False
_TEST_ROOT_PIPELINE_KEY = None
_DEFAULT_BACKOFF_SECONDS = 15
_DEFAULT_BACKOFF_FACTOR = 2
_DEFAULT_MAX_ATTEMPTS = 3
_RETRY_WIGGLE_TIMEDELTA = datetime.timedelta(seconds=20)
_DEBUG = False
_MAX_JSON_SIZE = 900000
_ENFORCE_AUTH = True
################################################################################
class Slot(object):
"""An output that is filled by a Pipeline as it executes."""
def __init__(self, name=None, slot_key=None, strict=False):
"""Initializer.
Args:
name: The name of this slot.
slot_key: The db.Key for this slot's _SlotRecord if it's already been
allocated by an up-stream pipeline.
strict: If this Slot was created as an output of a strictly defined
pipeline.
"""
if name is None:
raise UnexpectedPipelineError('Slot with key "%s" missing a name.' %
slot_key)
if slot_key is None:
slot_key = db.Key.from_path(_SlotRecord.kind(), uuid.uuid1().hex)
self._exists = _TEST_MODE
else:
self._exists = True
self._touched = False
self._strict = strict
self.name = name
self.key = slot_key
self.filled = False
self._filler_pipeline_key = None
self._fill_datetime = None
self._value = None
@property
def value(self):
"""Returns the current value of this slot.
Returns:
The value of the slot (a serializable Python type).
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._value
@property
def filler(self):
"""Returns the pipeline ID that filled this slot's value.
Returns:
A string that is the pipeline ID.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._filler_pipeline_key.name()
@property
def fill_datetime(self):
"""Returns when the slot was filled.
Returns:
A datetime.datetime.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._fill_datetime
def _set_value(self, slot_record):
"""Sets the value of this slot based on its corresponding _SlotRecord.
Does nothing if the slot has not yet been filled.
Args:
slot_record: The _SlotRecord containing this Slot's value.
"""
if slot_record.status == _SlotRecord.FILLED:
self.filled = True
self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(
slot_record)
self._fill_datetime = slot_record.fill_time
self._value = slot_record.value
def _set_value_test(self, filler_pipeline_key, value):
"""Sets the value of this slot for use in testing.
Args:
filler_pipeline_key: The db.Key of the _PipelineRecord that filled
this slot.
value: The serializable value set for this slot.
"""
self.filled = True
self._filler_pipeline_key = filler_pipeline_key
self._fill_datetime = datetime.datetime.utcnow()
# Convert to JSON and back again, to simulate the behavior of production.
self._value = json.loads(json.dumps(value))
def __repr__(self):
"""Returns a string representation of this slot."""
if self.filled:
return repr(self._value)
else:
return 'Slot(name="%s", slot_key="%s")' % (self.name, self.key)
class PipelineFuture(object):
"""A future for accessing the outputs of a Pipeline."""
# NOTE: Do not, ever, add a names() method to this class. Callers cannot do
# introspection on their context of being called. Even though the runtime
# environment of the Pipeline can allow for that to happen, such behavior
# would prevent synchronous simulation and verification, whic is an
# unacceptable tradeoff.
def __init__(self, output_names, force_strict=False):
"""Initializer.
Args:
output_names: The list of require output names that will be strictly
enforced by this class.
force_strict: If True, force this future to be in strict mode.
"""
self._after_all_pipelines = set()
self._output_dict = {
'default': Slot(name='default'),
}
self._strict = len(output_names) > 0 or force_strict
if self._strict:
for name in output_names:
if name in self._output_dict:
raise UnexpectedPipelineError('Output name reserved: "%s"' % name)
self._output_dict[name] = Slot(name=name, strict=True)
def _inherit_outputs(self,
pipeline_name,
already_defined,
resolve_outputs=False):
"""Inherits outputs from a calling Pipeline.
Args:
pipeline_name: The Pipeline class name (used for debugging).
already_defined: Maps output name to stringified db.Key (of _SlotRecords)
of any exiting output slots to be inherited by this future.
resolve_outputs: When True, this method will dereference all output slots
before returning back to the caller, making those output slots' values
available.
Raises:
UnexpectedPipelineError when resolve_outputs is True and any of the output
slots could not be retrived from the Datastore.
"""
for name, slot_key in already_defined.iteritems():
if not isinstance(slot_key, db.Key):
slot_key = db.Key(slot_key)
slot = self._output_dict.get(name)
if slot is None:
if self._strict:
raise UnexpectedPipelineError(
'Inherited output named "%s" must be filled but '
'not declared for pipeline class "%s"' % (name, pipeline_name))
else:
self._output_dict[name] = Slot(name=name, slot_key=slot_key)
else:
slot.key = slot_key
slot._exists = True
if resolve_outputs:
slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues())
all_slots = db.get(slot_key_dict.keys())
for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots):
if slot_record is None:
raise UnexpectedPipelineError(
'Inherited output named "%s" for pipeline class "%s" is '
'missing its Slot in the datastore: "%s"' %
(slot.name, pipeline_name, slot.key))
slot = slot_key_dict[slot_record.key()]
slot._set_value(slot_record)
def __getattr__(self, name):
"""Provides an output Slot instance with the given name if allowed."""
if name not in self._output_dict:
if self._strict:
raise SlotNotDeclaredError('Undeclared output with name "%s"' % name)
self._output_dict[name] = Slot(name=name)
slot = self._output_dict[name]
return slot
class _PipelineMeta(type):
"""Meta-class for recording all Pipelines that have been defined."""
# List of all Pipeline classes that have been seen.
_all_classes = []
def __new__(meta, name, bases, cls_dict):
"""Initializes the class path of a Pipeline and saves it."""
cls = type.__new__(meta, name, bases, cls_dict)
meta._all_classes.append(cls)
return cls
class ClassProperty(object):
"""Descriptor that lets us have read-only class properties."""
def __init__(self, method):
self.method = method
def __get__(self, cls, obj):
return self.method(obj)
class Pipeline(object):
"""A Pipeline function-object that performs operations and has a life cycle.
Class properties (to be overridden by sub-classes):
async: When True, this Pipeline will execute asynchronously and fill the
default output slot itself using the complete() method.
output_names: List of named outputs (in addition to the default slot) that
this Pipeline must output to (no more, no less).
public_callbacks: If the callback URLs generated for this class should be
accessible by all external requests regardless of login or task queue.
admin_callbacks: If the callback URLs generated for this class should be
accessible by the task queue ane externally by users logged in as admins.
class_path: String identifier for this Pipeline, which is derived from
its path in the global system modules dictionary.
Modifiable instance properties:
backoff_seconds: How many seconds to use as the constant factor in
exponential backoff; may be changed by the user
backoff_factor: Base factor to use for exponential backoff. The formula
followed is (backoff_seconds * backoff_factor^current_attempt).
max_attempts: Maximum number of retry attempts to make before failing
completely and aborting the entire pipeline up to the root.
target: The application version to use for processing this Pipeline. This
can be set to the name of a backend to direct Pipelines to run there.
Instance properties:
pipeline_id: The ID of this pipeline.
root_pipeline_id: The ID of the root of this pipeline.
queue_name: The queue this pipeline runs on or None if unknown.
current_attempt: The current attempt being tried for this pipeline.
"""
__metaclass__ = _PipelineMeta
# To be set by sub-classes
async = False
output_names = []
public_callbacks = False
admin_callbacks = False
# Internal only.
_class_path = None # Set for each class
_send_mail = mail.send_mail_to_admins # For testing
def __init__(self, *args, **kwargs):
"""Initializer.
Args:
*args: The positional arguments for this function-object.
**kwargs: The keyword arguments for this function-object.
"""
self.args = args
self.kwargs = kwargs
self.outputs = None
self.backoff_seconds = _DEFAULT_BACKOFF_SECONDS
self.backoff_factor = _DEFAULT_BACKOFF_FACTOR
self.max_attempts = _DEFAULT_MAX_ATTEMPTS
self.target = None
self.task_retry = False
self._current_attempt = 0
self._root_pipeline_key = None
self._pipeline_key = None
self._context = None
self._result_status = None
self._set_class_path()
if _TEST_MODE:
self._context = _PipelineContext('', 'default', '')
self._root_pipeline_key = _TEST_ROOT_PIPELINE_KEY
self._pipeline_key = db.Key.from_path(
_PipelineRecord.kind(), uuid.uuid1().hex)
self.outputs = PipelineFuture(self.output_names)
self._context.evaluate_test(self)
@property
def pipeline_id(self):
"""Returns the ID of this Pipeline as a string or None if unknown."""
if self._pipeline_key is None:
return None
return self._pipeline_key.name()
@property
def root_pipeline_id(self):
"""Returns root pipeline ID as a websafe string or None if unknown."""
if self._root_pipeline_key is None:
return None
return self._root_pipeline_key.name()
@property
def is_root(self):
"""Returns True if this pipeline is a root pipeline, False otherwise."""
return self._root_pipeline_key == self._pipeline_key
@property
def queue_name(self):
"""Returns the queue name this Pipeline runs on or None if unknown."""
if self._context:
return self._context.queue_name
return None
@property
def base_path(self):
"""Returns the base path for Pipeline URL handlers or None if unknown."""
if self._context:
return self._context.base_path
return None
@property
def has_finalized(self):
"""Returns True if this pipeline has completed and finalized."""
return self._result_status == _PipelineRecord.DONE
@property
def was_aborted(self):
"""Returns True if this pipeline was aborted."""
return self._result_status == _PipelineRecord.ABORTED
@property
def current_attempt(self):
"""Returns the current attempt at running this pipeline, starting at 1."""
return self._current_attempt + 1
@property
def test_mode(self):
"""Returns True if the pipeline is running in test mode."""
return _TEST_MODE
@ClassProperty
def class_path(cls):
"""Returns the unique string identifier for this Pipeline class.
Refers to how to find the Pipeline in the global modules dictionary.
"""
cls._set_class_path()
return cls._class_path
@classmethod
def from_id(cls, pipeline_id, resolve_outputs=True, _pipeline_record=None):
"""Returns an instance corresponding to an existing Pipeline.
The returned object will have the same properties a Pipeline does while
it's running synchronously (e.g., like what it's first allocated), allowing
callers to inspect caller arguments, outputs, fill slots, complete the
pipeline, abort, retry, etc.
Args:
pipeline_id: The ID of this pipeline (a string).
resolve_outputs: When True, dereference the outputs of this Pipeline
so their values can be accessed by the caller.
_pipeline_record: Internal-only. The _PipelineRecord instance to use
to instantiate this instance instead of fetching it from
the datastore.
Returns:
Pipeline sub-class instances or None if it could not be found.
"""
pipeline_record = _pipeline_record
# Support pipeline IDs and idempotence_keys that are not unicode.
if not isinstance(pipeline_id, unicode):
try:
pipeline_id = pipeline_id.encode('utf-8')
except UnicodeDecodeError:
pipeline_id = hashlib.sha1(pipeline_id).hexdigest()
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id)
if pipeline_record is None:
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
return None
try:
pipeline_func_class = mr_util.for_name(pipeline_record.class_path)
except ImportError, e:
logging.warning('Tried to find Pipeline %s#%s, but class could '
'not be found. Using default Pipeline class instead.',
pipeline_record.class_path, pipeline_id)
pipeline_func_class = cls
params = pipeline_record.params
arg_list, kwarg_dict = _dereference_args(
pipeline_record.class_path, params['args'], params['kwargs'])
outputs = PipelineFuture(pipeline_func_class.output_names)
outputs._inherit_outputs(
pipeline_record.class_path,
params['output_slots'],
resolve_outputs=resolve_outputs)
stage = pipeline_func_class(*arg_list, **kwarg_dict)
stage.backoff_seconds = params['backoff_seconds']
stage.backoff_factor = params['backoff_factor']
stage.max_attempts = params['max_attempts']
stage.task_retry = params['task_retry']
stage.target = params.get('target') # May not be defined for old Pipelines
stage._current_attempt = pipeline_record.current_attempt
stage._set_values_internal(
_PipelineContext('', params['queue_name'], params['base_path']),
pipeline_key,
_PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record),
outputs,
pipeline_record.status)
return stage
# Methods that can be invoked on a Pipeline instance by anyone with a
# valid object (e.g., directly instantiated, retrieve via from_id).
def start(self,
idempotence_key='',
queue_name='default',
base_path='/_ah/pipeline',
return_task=False):
"""Starts a new instance of this pipeline.
Args:
idempotence_key: The ID to use for this Pipeline and throughout its
asynchronous workflow to ensure the operations are idempotent. If
empty a starting key will be automatically assigned.
queue_name: What queue this Pipeline's workflow should execute on.
base_path: The relative URL path to where the Pipeline API is
mounted for access by the taskqueue API or external requests.
return_task: When True, a task to start this pipeline will be returned
instead of submitted, allowing the caller to start off this pipeline
as part of a separate transaction (potentially leaving this newly
allocated pipeline's datastore entities in place if that separate
transaction fails for any reason).
Returns:
A taskqueue.Task instance if return_task was True. This task will *not*
have a name, thus to ensure reliable execution of your pipeline you
should add() this task as part of a separate Datastore transaction.
Raises:
PipelineExistsError if the pipeline with the given idempotence key exists.
PipelineSetupError if the pipeline could not start for any other reason.
"""
if not idempotence_key:
idempotence_key = uuid.uuid1().hex
elif not isinstance(idempotence_key, unicode):
try:
idempotence_key.encode('utf-8')
except UnicodeDecodeError:
idempotence_key = hashlib.sha1(idempotence_key).hexdigest()
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key)
context = _PipelineContext('', queue_name, base_path)
future = PipelineFuture(self.output_names, force_strict=True)
try:
self._set_values_internal(
context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING)
return context.start(self, return_task=return_task)
except Error:
# Pass through exceptions that originate in this module.
raise
except Exception, e:
# Re-type any exceptions that were raised in dependent methods.
raise PipelineSetupError('Error starting %s#%s: %s' % (
self, idempotence_key, str(e)))
def start_test(self, idempotence_key=None, base_path='', **kwargs):
"""Starts this pipeline in test fashion.
Args:
idempotence_key: Dummy idempotence_key to use for this root pipeline.
base_path: Dummy base URL path to use for this root pipeline.
kwargs: Ignored keyword arguments usually passed to start().
"""
if not idempotence_key:
idempotence_key = uuid.uuid1().hex
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key)
context = _PipelineContext('', 'default', base_path)
future = PipelineFuture(self.output_names, force_strict=True)
self._set_values_internal(
context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING)
context.start_test(self)
# Pipeline control methods.
def retry(self, retry_message=''):
"""Forces a currently running asynchronous pipeline to retry.
Note this may not be called by synchronous or generator pipelines. Those
must instead raise the 'Retry' exception during execution.
Args:
retry_message: Optional message explaining why the retry happened.
Returns:
True if the Pipeline should be retried, False if it cannot be cancelled
mid-flight for some reason.
"""
if not self.async:
raise UnexpectedPipelineError(
'May only call retry() method for asynchronous pipelines.')
if self.try_cancel():
self._context.transition_retry(self._pipeline_key, retry_message)
return True
else:
return False
def abort(self, abort_message=''):
"""Mark the entire pipeline up to the root as aborted.
Note this should only be called from *outside* the context of a running
pipeline. Synchronous and generator pipelines should raise the 'Abort'
exception to cause this behavior during execution.
Args:
abort_message: Optional message explaining why the abort happened.
Returns:
True if the abort signal was sent successfully; False if the pipeline
could not be aborted for any reason.
"""
# TODO: Use thread-local variable to enforce that this is not called
# while a pipeline is executing in the current thread.
if (self.async and self._root_pipeline_key == self._pipeline_key and
not self.try_cancel()):
# Handle the special case where the root pipeline is async and thus
# cannot be aborted outright.
return False
else:
return self._context.begin_abort(
self._root_pipeline_key, abort_message=abort_message)
# Methods used by the Pipeline as it runs.
def fill(self, name_or_slot, value):
"""Fills an output slot required by this Pipeline.
Args:
name_or_slot: The name of the slot (a string) or Slot record to fill.
value: The serializable value to assign to this slot.
Raises:
UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError
if trying to output to a slot that was not declared ahead of time.
"""
if isinstance(name_or_slot, basestring):
slot = getattr(self.outputs, name_or_slot)
elif isinstance(name_or_slot, Slot):
slot = name_or_slot
else:
raise UnexpectedPipelineError(
'Could not fill invalid output name: %r' % name_or_slot)
if not slot._exists:
raise SlotNotDeclaredError(
'Cannot fill output with name "%s" that was just '
'declared within the Pipeline context.' % slot.name)
self._context.fill_slot(self._pipeline_key, slot, value)
def set_status(self, message=None, console_url=None, status_links=None):
"""Sets the current status of this pipeline.
This method is purposefully non-transactional. Updates are written to the
datastore immediately and overwrite all existing statuses.
Args:
message: (optional) Overall status message.
console_url: (optional) Relative URL to use for the "console" of this
pipeline that displays current progress. When None, no console will
be displayed.
status_links: (optional) Dictionary of readable link names to relative
URLs that should be associated with this pipeline as it runs. These links
provide convenient access to other dashboards, consoles, etc associated
with the pipeline.
Raises:
PipelineRuntimeError if the status could not be set for any reason.
"""
if _TEST_MODE:
logging.info(
'New status for %s#%s: message=%r, console_url=%r, status_links=%r',
self, self.pipeline_id, message, console_url, status_links)
return
status_key = db.Key.from_path(_StatusRecord.kind(), self.pipeline_id)
root_pipeline_key = db.Key.from_path(
_PipelineRecord.kind(), self.root_pipeline_id)
status_record = _StatusRecord(
key=status_key, root_pipeline=root_pipeline_key)
try:
if message:
status_record.message = message
if console_url:
status_record.console_url = console_url
if status_links:
# Alphabeticalize the list.
status_record.link_names = sorted(
db.Text(s) for s in status_links.iterkeys())
status_record.link_urls = [
db.Text(status_links[name]) for name in status_record.link_names]
status_record.status_time = datetime.datetime.utcnow()
status_record.put()
except Exception, e:
raise PipelineRuntimeError('Could not set status for %s#%s: %s' %
(self, self.pipeline_id, str(e)))
def complete(self, default_output=None):
"""Marks this asynchronous Pipeline as complete.
Args:
default_output: What value the 'default' output slot should be assigned.
Raises:
UnexpectedPipelineError if the slot no longer exists or this method was
called for a pipeline that is not async.
"""
# TODO: Enforce that all outputs expected by this async pipeline were
# filled before this complete() function was called. May required all
# async functions to declare their outputs upfront.
if not self.async:
raise UnexpectedPipelineError(
'May only call complete() method for asynchronous pipelines.')
self._context.fill_slot(
self._pipeline_key, self.outputs.default, default_output)
def get_callback_url(self, **kwargs):
"""Returns a relative URL for invoking this Pipeline's callback method.
Args:
kwargs: Dictionary mapping keyword argument names to single values that
should be passed to the callback when it is invoked.
Raises:
UnexpectedPipelineError if this is invoked on pipeline that is not async.
"""
# TODO: Support positional parameters.
if not self.async:
raise UnexpectedPipelineError(
'May only call get_callback_url() method for asynchronous pipelines.')
kwargs['pipeline_id'] = self._pipeline_key.name()
params = urllib.urlencode(kwargs)
return '%s/callback?%s' % (self.base_path, params)
def get_callback_task(self, *args, **kwargs):
"""Returns a task for calling back this Pipeline.
Args:
params: Keyword argument containing a dictionary of key/value pairs
that will be passed to the callback when it is executed.
args, kwargs: Passed to the taskqueue.Task constructor. Use these
arguments to set the task name (for idempotence), etc.
Returns:
A taskqueue.Task instance that must be enqueued by the caller.
"""
if not self.async:
raise UnexpectedPipelineError(
'May only call get_callback_task() method for asynchronous pipelines.')
params = kwargs.get('params', {})
kwargs['params'] = params
params['pipeline_id'] = self._pipeline_key.name()
kwargs['url'] = self.base_path + '/callback'
kwargs['method'] = 'POST'
return taskqueue.Task(*args, **kwargs)
def send_result_email(self):
"""Sends an email to admins indicating this Pipeline has completed.
For developer convenience. Automatically called from finalized for root
Pipelines that do not override the default action.
"""
status = 'successful'
if self.was_aborted:
status = 'aborted'
app_id = os.environ['APPLICATION_ID']
shard_index = app_id.find('~')
if shard_index != -1:
app_id = app_id[shard_index+1:]
param_dict = {
'status': status,
'app_id': app_id,
'class_path': self._class_path,
'pipeline_id': self.root_pipeline_id,
'base_path': '%s.appspot.com%s' % (app_id, self.base_path),
}
subject = (
'Pipeline %(status)s: App "%(app_id)s", %(class_path)s'
'#%(pipeline_id)s' % param_dict)
body = """View the pipeline results here:
http://%(base_path)s/status?root=%(pipeline_id)s
Thanks,
The Pipeline API
""" % param_dict
html = """<html><body>
<p>View the pipeline results here:</p>
<p><a href="http://%(base_path)s/status?root=%(pipeline_id)s"
>http://%(base_path)s/status?root=%(pipeline_id)s</a></p>
<p>
Thanks,
<br>
The Pipeline API
</p>
</body></html>
""" % param_dict
sender = '%s@%s.appspotmail.com' % (app_id, app_id)
try:
self._send_mail(sender, subject, body, html=html)
except (mail.InvalidSenderError, mail.InvalidEmailError):
logging.warning('Could not send result email for '
'root pipeline ID "%s" from sender "%s"',
self.root_pipeline_id, sender)
def cleanup(self):
"""Clean up this Pipeline and all Datastore records used for coordination.
Only works when called on a root pipeline. Child pipelines will ignore
calls to this method.
After this method is called, Pipeline.from_id() and related status
methods will return inconsistent or missing results. This method is
fire-and-forget and asynchronous.
"""
if self._root_pipeline_key is None:
raise UnexpectedPipelineError(
'Could not cleanup Pipeline with unknown root pipeline ID.')
if not self.is_root:
return
task = taskqueue.Task(
params=dict(root_pipeline_key=self._root_pipeline_key),
url=self.base_path + '/cleanup',
headers={'X-Ae-Pipeline-Key': self._root_pipeline_key})
taskqueue.Queue(self.queue_name).add(task)
def with_params(self, **kwargs):
"""Modify various execution parameters of a Pipeline before it runs.
This method has no effect in test mode.
Args:
kwargs: Attributes to modify on this Pipeline instance before it has
been executed.
Returns:
This Pipeline instance, for easy chaining.
"""
if _TEST_MODE:
logging.info(
'Setting runtime parameters for %s#%s: %r',
self, self.pipeline_id, kwargs)
return self
if self.pipeline_id is not None:
raise UnexpectedPipelineError(
'May only call with_params() on a Pipeline that has not yet '
'been scheduled for execution.')
ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target')
for name, value in kwargs.iteritems():
if name not in ALLOWED:
raise TypeError('Unexpected keyword: %s=%r' % (name, value))
setattr(self, name, value)
return self
# Methods implemented by developers for lifecycle management. These
# must be idempotent under all circumstances.
def run(self, *args, **kwargs):
"""Runs this Pipeline."""
raise NotImplementedError('Must implement "run" in Pipeline sub-class.')
def run_test(self, *args, **kwargs):
"""Runs this Pipeline in test mode."""
raise NotImplementedError(
'Must implement "run_test" in Pipeline sub-class.')
def finalized(self):
"""Finalizes this Pipeline after execution if it's a generator.
Default action as the root pipeline is to email the admins with the status.
Implementors be sure to call 'was_aborted' to find out if the finalization
that you're handling is for a success or error case.
"""
if self.pipeline_id == self.root_pipeline_id:
self.send_result_email()
def finalized_test(self, *args, **kwargs):
"""Finalized this Pipeline in test mode."""
raise NotImplementedError(
'Must implement "finalized_test" in Pipeline sub-class.')
def callback(self, **kwargs):
"""This Pipeline received an asynchronous callback request."""
raise NotImplementedError(
'Must implement "callback" in Pipeline sub-class.')
def try_cancel(self):
"""This pipeline has been cancelled.
Called when a pipeline is interrupted part-way through due to some kind
of failure (an abort of the whole pipeline to the root or a forced retry on
this child pipeline).
Returns:
True to indicate that cancellation was successful and this pipeline may
go in the retry or aborted state; False to indicate that this pipeline
cannot be canceled right now and must remain as-is.
"""
return False
# Internal methods.
@classmethod
def _set_class_path(cls, module_dict=sys.modules):
"""Sets the absolute path to this class as a string.
Used by the Pipeline API to reconstruct the Pipeline sub-class object
at execution time instead of passing around a serialized function.
Args:
module_dict: Used for testing.
"""
# Do not traverse the class hierarchy fetching the class path attribute.
found = cls.__dict__.get('_class_path')
if found is not None:
return
# Do not set the _class_path for the base-class, otherwise all children's
# lookups for _class_path will fall through and return 'Pipeline' above.
# This situation can happen if users call the generic Pipeline.from_id
# to get the result of a Pipeline without knowing its specific class.
if cls is Pipeline:
return
# This is a brute-force approach to solving the module reverse-lookup
# problem, where we want to refer to a class by its stable module name
# but have no built-in facility for doing so in Python.
found = None
for name, module in module_dict.items():
if name == '__main__':
continue
found = getattr(module, cls.__name__, None)
if found is cls:
break
else:
# If all else fails, try the main module.
name = '__main__'
module = module_dict.get(name)
found = getattr(module, cls.__name__, None)
if found is not cls:
raise ImportError('Could not determine path for Pipeline '
'function/class "%s"' % cls.__name__)
cls._class_path = '%s.%s' % (name, cls.__name__)
def _set_values_internal(self,
context,
pipeline_key,
root_pipeline_key,
outputs,
result_status):
"""Sets the user-visible values provided as an API by this class.
Args:
context: The _PipelineContext used for this Pipeline.
pipeline_key: The db.Key of this pipeline.
root_pipeline_key: The db.Key of the root pipeline.
outputs: The PipelineFuture for this pipeline.
result_status: The result status of this pipeline.
"""
self._context = context
self._pipeline_key = pipeline_key
self._root_pipeline_key = root_pipeline_key
self._result_status = result_status
self.outputs = outputs
def _callback_internal(self, kwargs):
"""Used to execute callbacks on asynchronous pipelines."""
logging.debug('Callback %s(*%s, **%s)#%s with params: %r',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name(), kwargs)
return self.callback(**kwargs)
def _run_internal(self,
context,
pipeline_key,
root_pipeline_key,
caller_output):
"""Used by the Pipeline evaluator to execute this Pipeline."""
self._set_values_internal(
context, pipeline_key, root_pipeline_key, caller_output,
_PipelineRecord.RUN)
logging.debug('Running %s(*%s, **%s)#%s',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name())
return self.run(*self.args, **self.kwargs)
def _finalized_internal(self,
context,
pipeline_key,
root_pipeline_key,
caller_output,
aborted):
"""Used by the Pipeline evaluator to finalize this Pipeline."""
result_status = _PipelineRecord.RUN
if aborted:
result_status = _PipelineRecord.ABORTED
self._set_values_internal(
context, pipeline_key, root_pipeline_key, caller_output, result_status)
logging.debug('Finalizing %s(*%r, **%r)#%s',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name())
try:
self.finalized()
except NotImplementedError:
pass
def __repr__(self):
"""Returns a string representation of this Pipeline."""
return '%s(*%s, **%s)' % (
self._class_path, _short_repr(self.args), _short_repr(self.kwargs))
# TODO: Change InOrder and After to use a common thread-local list of
# execution modifications to apply to the current evaluating pipeline.
class After(object):
"""Causes all contained Pipelines to run after the given ones complete.
Must be used in a 'with' block.
"""
_local = threading.local()
def __init__(self, *futures):
"""Initializer.
Args:
*futures: PipelineFutures that all subsequent pipelines should follow.
May be empty, in which case this statement does nothing.
"""
for f in futures:
if not isinstance(f, PipelineFuture):
raise TypeError('May only pass PipelineFuture instances to After()')
self._futures = set(futures)
def __enter__(self):
"""When entering a 'with' block."""
After._thread_init()
After._local._after_all_futures.extend(self._futures)
def __exit__(self, type, value, trace):
"""When exiting a 'with' block."""
for future in self._futures:
After._local._after_all_futures.remove(future)
return False
@classmethod
def _thread_init(cls):
"""Ensure thread local is initialized."""
if not hasattr(cls._local, '_after_all_futures'):
cls._local._after_all_futures = []
class InOrder(object):
"""Causes all contained Pipelines to run in order.
Must be used in a 'with' block.
"""
_local = threading.local()
@classmethod
def _add_future(cls, future):
"""Adds a future to the list of in-order futures thus far.
Args:
future: The future to add to the list.
"""
if cls._local._activated:
cls._local._in_order_futures.add(future)
def __init__(self):
"""Initializer."""
def __enter__(self):
"""When entering a 'with' block."""
InOrder._thread_init()
if InOrder._local._activated:
raise UnexpectedPipelineError('Already in an InOrder "with" block.')
InOrder._local._activated = True
InOrder._local._in_order_futures.clear()
def __exit__(self, type, value, trace):
"""When exiting a 'with' block."""
InOrder._local._activated = False
InOrder._local._in_order_futures.clear()
return False
@classmethod
def _thread_init(cls):
"""Ensure thread local is initialized."""
if not hasattr(cls._local, '_in_order_futures'):
cls._local._in_order_futures = set()
cls._local._activated = False
################################################################################
def _short_repr(obj):
"""Helper function returns a truncated repr() of an object."""
stringified = repr(obj)
if len(stringified) > 200:
return '%s... (%d bytes)' % (stringified[:200], len(stringified))
return stringified
def _write_json_blob(encoded_value):
"""Writes a JSON encoded value to a Blobstore File.
Args:
encoded_value: The encoded JSON string.
Returns:
The blobstore.BlobKey for the file that was created.
"""
file_name = files.blobstore.create(mime_type='application/json')
handle = files.open(file_name, 'a')
try:
# Chunk the file into individual writes of less than 1MB, since the files
# API does not do buffered writes implicitly.
for start_index in xrange(0, len(encoded_value), _MAX_JSON_SIZE):
end_index = start_index + _MAX_JSON_SIZE
handle.write(encoded_value[start_index:end_index])
finally:
handle.close()
files.finalize(file_name)
return files.blobstore.get_blob_key(file_name)
def _dereference_args(pipeline_name, args, kwargs):
"""Dereference a Pipeline's arguments that are slots, validating them.
Each argument value passed in is assumed to be a dictionary with the format:
{'type': 'value', 'value': 'serializable'} # A resolved value.
{'type': 'slot', 'slot_key': 'str() on a db.Key'} # A pending Slot.
Args:
pipeline_name: The name of the pipeline class; used for debugging.
args: Iterable of positional arguments.
kwargs: Dictionary of keyword arguments.
Returns:
Tuple (args, kwargs) where:
Args: A list of positional arguments values that are all dereferenced.
Kwargs: A list of keyword arguments values that are all dereferenced.
Raises:
SlotNotFilledError if any of the supplied 'slot_key' records are not
present in the Datastore or have not yet been filled.
UnexpectedPipelineError if an unknown parameter type was passed.
"""
lookup_slots = set()
for arg in itertools.chain(args, kwargs.itervalues()):
if arg['type'] == 'slot':
lookup_slots.add(db.Key(arg['slot_key']))
slot_dict = {}
for key, slot_record in zip(lookup_slots, db.get(lookup_slots)):
if slot_record is None or slot_record.status != _SlotRecord.FILLED:
raise SlotNotFilledError(
'Slot "%s" missing its value. From %s(*args=%s, **kwargs=%s)' %
(key, pipeline_name, _short_repr(args), _short_repr(kwargs)))
slot_dict[key] = slot_record.value
arg_list = []
for current_arg in args:
if current_arg['type'] == 'slot':
arg_list.append(slot_dict[db.Key(current_arg['slot_key'])])
elif current_arg['type'] == 'value':
arg_list.append(current_arg['value'])
else:
raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg)
kwarg_dict = {}
for key, current_arg in kwargs.iteritems():
if current_arg['type'] == 'slot':
kwarg_dict[key] = slot_dict[db.Key(current_arg['slot_key'])]
elif current_arg['type'] == 'value':
kwarg_dict[key] = current_arg['value']
else:
raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg)
return (arg_list, kwarg_dict)
def _generate_args(pipeline, future, queue_name, base_path):
"""Generate the params used to describe a Pipeline's depedencies.
The arguments passed to this method may be normal values, Slot instances
(for named outputs), or PipelineFuture instances (for referring to the
default output slot).
Args:
pipeline: The Pipeline instance to generate args for.
future: The PipelineFuture for the Pipeline these arguments correspond to.
queue_name: The queue to run the pipeline on.
base_path: Relative URL for pipeline URL handlers.
Returns:
Tuple (dependent_slots, output_slot_keys, params_text, params_blob) where:
dependent_slots: List of db.Key instances of _SlotRecords on which
this pipeline will need to block before execution (passed to
create a _BarrierRecord for running the pipeline).
output_slot_keys: List of db.Key instances of _SlotRecords that will
be filled by this pipeline during its execution (passed to create
a _BarrierRecord for finalizing the pipeline).
params_text: JSON dictionary of pipeline parameters to be serialized and
saved in a corresponding _PipelineRecord. Will be None if the params are
too big and must be saved in a blob instead.
params_blob: JSON dictionary of pipeline parameters to be serialized and
saved in a Blob file, and then attached to a _PipelineRecord. Will be
None if the params data size was small enough to fit in the entity.
"""
params = {
'args': [],
'kwargs': {},
'after_all': [],
'output_slots': {},
'class_path': pipeline._class_path,
'queue_name': queue_name,
'base_path': base_path,
'backoff_seconds': pipeline.backoff_seconds,
'backoff_factor': pipeline.backoff_factor,
'max_attempts': pipeline.max_attempts,
'task_retry': pipeline.task_retry,
'target': pipeline.target,
}
dependent_slots = set()
arg_list = params['args']
for current_arg in pipeline.args:
if isinstance(current_arg, PipelineFuture):
current_arg = current_arg.default
if isinstance(current_arg, Slot):
arg_list.append({'type': 'slot', 'slot_key': str(current_arg.key)})
dependent_slots.add(current_arg.key)
else:
arg_list.append({'type': 'value', 'value': current_arg})
kwarg_dict = params['kwargs']
for name, current_arg in pipeline.kwargs.iteritems():
if isinstance(current_arg, PipelineFuture):
current_arg = current_arg.default
if isinstance(current_arg, Slot):
kwarg_dict[name] = {'type': 'slot', 'slot_key': str(current_arg.key)}
dependent_slots.add(current_arg.key)
else:
kwarg_dict[name] = {'type': 'value', 'value': current_arg}
after_all = params['after_all']
for other_future in future._after_all_pipelines:
slot_key = other_future._output_dict['default'].key
after_all.append(str(slot_key))
dependent_slots.add(slot_key)
output_slots = params['output_slots']
output_slot_keys = set()
for name, slot in future._output_dict.iteritems():
output_slot_keys.add(slot.key)
output_slots[name] = str(slot.key)
params_encoded = json.dumps(params)
params_text = None
params_blob = None
if len(params_encoded) > _MAX_JSON_SIZE:
params_blob = _write_json_blob(params_encoded)
else:
params_text = params_encoded
return dependent_slots, output_slot_keys, params_text, params_blob
class _PipelineContext(object):
"""Internal API for interacting with Pipeline state."""
_gettime = datetime.datetime.utcnow
def __init__(self,
task_name,
queue_name,
base_path):
"""Initializer.
Args:
task_name: The name of the currently running task or empty if there
is no task running.
queue_name: The queue this pipeline should run on (may not be the
current queue this request is on).
base_path: Relative URL for the pipeline's handlers.
"""
self.task_name = task_name
self.queue_name = queue_name
self.base_path = base_path
self.barrier_handler_path = '%s/output' % base_path
self.pipeline_handler_path = '%s/run' % base_path
self.finalized_handler_path = '%s/finalized' % base_path
self.fanout_handler_path = '%s/fanout' % base_path
self.abort_handler_path = '%s/abort' % base_path
self.fanout_abort_handler_path = '%s/fanout_abort' % base_path
self.session_filled_output_names = set()
@classmethod
def from_environ(cls, environ=os.environ):
"""Constructs a _PipelineContext from the task queue environment."""
base_path, unused = (environ['PATH_INFO'].rsplit('/', 1) + [''])[:2]
return cls(
environ['HTTP_X_APPENGINE_TASKNAME'],
environ['HTTP_X_APPENGINE_QUEUENAME'],
base_path)
def fill_slot(self, filler_pipeline_key, slot, value):
"""Fills a slot, enqueueing a task to trigger pending barriers.
Args:
filler_pipeline_key: db.Key or stringified key of the _PipelineRecord
that filled this slot.
slot: The Slot instance to fill.
value: The serializable value to assign.
Raises:
UnexpectedPipelineError if the _SlotRecord for the 'slot' could not
be found in the Datastore.
"""
if not isinstance(filler_pipeline_key, db.Key):
filler_pipeline_key = db.Key(filler_pipeline_key)
if _TEST_MODE:
slot._set_value_test(filler_pipeline_key, value)
else:
encoded_value = json.dumps(value, sort_keys=True)
value_text = None
value_blob = None
if len(encoded_value) <= _MAX_JSON_SIZE:
value_text = db.Text(encoded_value)
else:
# The encoded value is too big. Save it as a blob.
value_blob = _write_json_blob(encoded_value)
def txn():
slot_record = db.get(slot.key)
if slot_record is None:
raise UnexpectedPipelineError(
'Tried to fill missing slot "%s" '
'by pipeline ID "%s" with value: %r'
% (slot.key, filler_pipeline_key.name(), value))
# NOTE: Always take the override value here. If down-stream pipelines
# need a consitent view of all up-stream outputs (meaning, all of the
# outputs came from the same retry attempt of the upstream pipeline),
# the down-stream pipeline must also wait for the 'default' output
# of these up-stream pipelines.
slot_record.filler = filler_pipeline_key
slot_record.value_text = value_text
slot_record.value_blob = value_blob
slot_record.status = _SlotRecord.FILLED
slot_record.fill_time = self._gettime()
slot_record.put()
task = taskqueue.Task(
url=self.barrier_handler_path,
params=dict(slot_key=slot.key),
headers={'X-Ae-Slot-Key': slot.key,
'X-Ae-Filler-Pipeline-Key': filler_pipeline_key})
task.add(queue_name=self.queue_name, transactional=True)
db.run_in_transaction(txn)
self.session_filled_output_names.add(slot.name)
def notify_barriers(self,
slot_key,
cursor,
max_to_notify=_MAX_BARRIERS_TO_NOTIFY):
"""Searches for barriers affected by a slot and triggers completed ones.
Args:
slot_key: db.Key or stringified key of the _SlotRecord that was filled.
cursor: Stringified Datastore cursor where the notification query
should pick up.
max_to_notify: Used for testing.
"""
if not isinstance(slot_key, db.Key):
slot_key = db.Key(slot_key)
query = (
_BarrierRecord.all(cursor=cursor)
.filter('blocking_slots =', slot_key))
results = query.fetch(max_to_notify)
# Fetch all blocking _SlotRecords for any potentially triggered barriers.
blocking_slot_keys = []
for barrier in results:
blocking_slot_keys.extend(barrier.blocking_slots)
blocking_slot_dict = {}
for slot_record in db.get(blocking_slot_keys):
if slot_record is None:
continue
blocking_slot_dict[slot_record.key()] = slot_record
task_list = []
updated_barriers = []
for barrier in results:
all_ready = True
for blocking_slot_key in barrier.blocking_slots:
slot_record = blocking_slot_dict.get(blocking_slot_key)
if slot_record is None:
logging.error('Barrier "%s" relies on Slot "%s" which is missing.',
barrier.key(), blocking_slot_key)
all_ready = False
break
if slot_record.status != _SlotRecord.FILLED:
all_ready = False
break
# When all of the blocking_slots have been filled, consider the barrier
# ready to trigger. We'll trigger it regardless of the current
# _BarrierRecord status, since there could be task queue failures at any
# point in this flow; this rolls forward the state and de-dupes using
# the task name tombstones.
if all_ready:
if barrier.status != _BarrierRecord.FIRED:
barrier.status = _BarrierRecord.FIRED
barrier.trigger_time = self._gettime()
updated_barriers.append(barrier)
purpose = barrier.key().name()
if purpose == _BarrierRecord.START:
path = self.pipeline_handler_path
countdown = None
else:
path = self.finalized_handler_path
# NOTE: Wait one second before finalization to prevent
# contention on the _PipelineRecord entity.
countdown = 1
pipeline_key = _BarrierRecord.target.get_value_for_datastore(barrier)
task_list.append(taskqueue.Task(
url=path,
countdown=countdown,
name='ae-barrier-fire-%s-%s' % (pipeline_key.name(), purpose),
params=dict(pipeline_key=pipeline_key, purpose=purpose),
headers={'X-Ae-Pipeline-Key': pipeline_key}))
# Blindly overwrite _BarrierRecords that have an updated status. This is
# acceptable because by this point all finalization barriers for
# generator children should have already had their final outputs assigned.
if updated_barriers:
db.put(updated_barriers)
# Task continuation with sequence number to prevent fork-bombs.
if len(results) == max_to_notify:
the_match = re.match('(.*)-ae-barrier-notify-([0-9]+)', self.task_name)
if the_match:
prefix = the_match.group(1)
end = int(the_match.group(2)) + 1
else:
prefix = self.task_name
end = 0
task_list.append(taskqueue.Task(
name='%s-ae-barrier-notify-%d' % (prefix, end),
url=self.barrier_handler_path,
params=dict(slot_key=slot_key, cursor=query.cursor())))
if task_list:
try:
taskqueue.Queue(self.queue_name).add(task_list)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
pass
def begin_abort(self, root_pipeline_key, abort_message):
"""Kicks off the abort process for a root pipeline and all its children.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
abort_message: Message explaining why the abort happened, only saved
into the root pipeline.
Returns:
True if the abort signal was sent successfully; False otherwise.
"""
def txn():
pipeline_record = db.get(root_pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to abort root pipeline ID "%s" but it does not exist.',
root_pipeline_key.name())
raise db.Rollback()
if pipeline_record.status == _PipelineRecord.ABORTED:
logging.warning(
'Tried to abort root pipeline ID "%s"; already in state: %s',
root_pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
if pipeline_record.abort_requested:
logging.warning(
'Tried to abort root pipeline ID "%s"; abort signal already sent.',
root_pipeline_key.name())
raise db.Rollback()
pipeline_record.abort_requested = True
pipeline_record.abort_message = abort_message
pipeline_record.put()
task = taskqueue.Task(
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
return True
return db.run_in_transaction(txn)
def continue_abort(self,
root_pipeline_key,
cursor=None,
max_to_notify=_MAX_ABORTS_TO_BEGIN):
"""Sends the abort signal to all children for a root pipeline.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
cursor: The query cursor for enumerating _PipelineRecords when inserting
tasks to cause child pipelines to terminate.
max_to_notify: Used for testing.
"""
if not isinstance(root_pipeline_key, db.Key):
root_pipeline_key = db.Key(root_pipeline_key)
# NOTE: The results of this query may include _PipelineRecord instances
# that are not actually "reachable", meaning you cannot get to them by
# starting at the root pipeline and following "fanned_out" onward. This
# is acceptable because even these defunct _PipelineRecords will properly
# set their status to ABORTED when the signal comes, regardless of any
# other status they may have had.
#
# The only gotcha here is if a Pipeline's finalize method somehow modifies
# its inputs (like deleting an input file). In the case there are
# unreachable child pipelines, it will appear as if two finalize methods
# have been called instead of just one. The saving grace here is that
# finalize must be idempotent, so this *should* be harmless.
query = (
_PipelineRecord.all(cursor=cursor)
.filter('root_pipeline =', root_pipeline_key))
results = query.fetch(max_to_notify)
task_list = []
for pipeline_record in results:
if pipeline_record.status not in (
_PipelineRecord.RUN, _PipelineRecord.WAITING):
continue
pipeline_key = pipeline_record.key()
task_list.append(taskqueue.Task(
name='%s-%s-abort' % (self.task_name, pipeline_key.name()),
url=self.abort_handler_path,
params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT),
headers={'X-Ae-Pipeline-Key': pipeline_key}))
# Task continuation with sequence number to prevent fork-bombs.
if len(results) == max_to_notify:
the_match = re.match('(.*)-([0-9]+)', self.task_name)
if the_match:
prefix = the_match.group(1)
end = int(the_match.group(2)) + 1
else:
prefix = self.task_name
end = 0
task_list.append(taskqueue.Task(
name='%s-%d' % (prefix, end),
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key,
cursor=query.cursor())))
if task_list:
try:
taskqueue.Queue(self.queue_name).add(task_list)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
pass
def start(self, pipeline, return_task=True):
"""Starts a pipeline.
Args:
pipeline: Pipeline instance to run.
return_task: When True, do not submit the task to start the pipeline
but instead return it for someone else to enqueue.
Returns:
The task to start this pipeline if return_task was True.
Raises:
PipelineExistsError if the pipeline with the given ID already exists.
"""
# Adjust all pipeline output keys for this Pipeline to be children of
# the _PipelineRecord, that way we can write them all and submit in a
# single transaction.
entities_to_put = []
for name, slot in pipeline.outputs._output_dict.iteritems():
slot.key = db.Key.from_path(
*slot.key.to_path(), **dict(parent=pipeline._pipeline_key))
_, output_slots, params_text, params_blob = _generate_args(
pipeline, pipeline.outputs, self.queue_name, self.base_path)
def txn():
pipeline_record = db.get(pipeline._pipeline_key)
if pipeline_record is not None:
raise PipelineExistsError(
'Pipeline with idempotence key "%s" already exists; params=%s' %
(pipeline._pipeline_key.name(),
_short_repr(pipeline_record.params)))
entities_to_put = []
for name, slot in pipeline.outputs._output_dict.iteritems():
entities_to_put.append(_SlotRecord(
key=slot.key,
root_pipeline=pipeline._pipeline_key))
entities_to_put.append(_PipelineRecord(
key=pipeline._pipeline_key,
root_pipeline=pipeline._pipeline_key,
is_root_pipeline=True,
# Bug in DB means we need to use the storage name here,
# not the local property name.
params=params_text,
params_blob=params_blob,
start_time=self._gettime(),
class_path=pipeline._class_path,
max_attempts=pipeline.max_attempts))
entities_to_put.append(_BarrierRecord(
parent=pipeline._pipeline_key,
key_name=_BarrierRecord.FINALIZE,
target=pipeline._pipeline_key,
root_pipeline=pipeline._pipeline_key,
blocking_slots=list(output_slots)))
db.put(entities_to_put)
task = taskqueue.Task(
url=self.pipeline_handler_path,
params=dict(pipeline_key=pipeline._pipeline_key),
headers={'X-Ae-Pipeline-Key': pipeline._pipeline_key},
target=pipeline.target)
if return_task:
return task
task.add(queue_name=self.queue_name, transactional=True)
task = db.run_in_transaction(txn)
# Immediately mark the output slots as existing so they can be filled
# by asynchronous pipelines or used in test mode.
for output_slot in pipeline.outputs._output_dict.itervalues():
output_slot._exists = True
return task
def start_test(self, pipeline):
"""Starts a pipeline in the test mode.
Args:
pipeline: The Pipeline instance to test.
"""
global _TEST_MODE, _TEST_ROOT_PIPELINE_KEY
self.start(pipeline, return_task=True)
_TEST_MODE = True
_TEST_ROOT_PIPELINE_KEY = pipeline._pipeline_key
try:
self.evaluate_test(pipeline, root=True)
finally:
_TEST_MODE = False
def evaluate_test(self, stage, root=False):
"""Recursively evaluates the given pipeline in test mode.
Args:
stage: The Pipeline instance to run at this stage in the flow.
root: True if the supplied stage is the root of the pipeline.
"""
args_adjusted = []
for arg in stage.args:
if isinstance(arg, PipelineFuture):
arg = arg.default
if isinstance(arg, Slot):
value = arg.value
arg._touched = True
else:
value = arg
args_adjusted.append(value)
kwargs_adjusted = {}
for name, arg in stage.kwargs.iteritems():
if isinstance(arg, PipelineFuture):
arg = arg.default
if isinstance(arg, Slot):
value = arg.value
arg._touched = True
else:
value = arg
kwargs_adjusted[name] = value
stage.args, stage.kwargs = args_adjusted, kwargs_adjusted
pipeline_generator = mr_util.is_generator_function(stage.run)
logging.debug('Running %s(*%s, **%s)', stage._class_path,
_short_repr(stage.args), _short_repr(stage.kwargs))
if stage.async:
stage.run_test(*stage.args, **stage.kwargs)
elif pipeline_generator:
all_output_slots = set()
try:
pipeline_iter = stage.run_test(*stage.args, **stage.kwargs)
except NotImplementedError:
pipeline_iter = stage.run(*stage.args, **stage.kwargs)
all_substages = set()
next_value = None
last_sub_stage = None
while True:
try:
yielded = pipeline_iter.send(next_value)
except StopIteration:
break
if isinstance(yielded, Pipeline):
if yielded in all_substages:
raise UnexpectedPipelineError(
'Already yielded pipeline object %r' % yielded)
else:
all_substages.add(yielded)
last_sub_stage = yielded
next_value = yielded.outputs
all_output_slots.update(next_value._output_dict.itervalues())
else:
raise UnexpectedPipelineError(
'Yielded a disallowed value: %r' % yielded)
if last_sub_stage:
# Generator's outputs inherited from last running sub-stage.
# If the generator changes its mind and doesn't yield anything, this
# may not happen at all. Missing outputs will be caught when they
# are passed to the stage as inputs, or verified from the outside by
# the test runner.
for slot_name, slot in last_sub_stage.outputs._output_dict.iteritems():
stage.outputs._output_dict[slot_name] = slot
# Any inherited slots won't be checked for declaration.
all_output_slots.remove(slot)
else:
# Generator yielded no children, so treat it as a sync function.
stage.outputs.default._set_value_test(stage._pipeline_key, None)
# Enforce the policy of requiring all undeclared output slots from
# child pipelines to be consumed by their parent generator.
for slot in all_output_slots:
if slot.name == 'default':
continue
if slot.filled and not slot._strict and not slot._touched:
raise SlotNotDeclaredError(
'Undeclared output "%s"; all dynamic outputs from child '
'pipelines must be consumed.' % slot.name)
else:
try:
result = stage.run_test(*stage.args, **stage.kwargs)
except NotImplementedError:
result = stage.run(*stage.args, **stage.kwargs)
stage.outputs.default._set_value_test(stage._pipeline_key, result)
# Enforce strict output usage at the top level.
if root:
found_outputs = set()
for slot in stage.outputs._output_dict.itervalues():
if slot.filled:
found_outputs.add(slot.name)
if slot.name == 'default':
continue
if slot.name not in stage.output_names:
raise SlotNotDeclaredError(
'Undeclared output from root pipeline "%s"' % slot.name)
missing_outputs = set(stage.output_names) - found_outputs
if missing_outputs:
raise SlotNotFilledError(
'Outputs %r were never filled.' % missing_outputs)
logging.debug('Finalizing %s(*%s, **%s)', stage._class_path,
_short_repr(stage.args), _short_repr(stage.kwargs))
ran = False
try:
stage.finalized_test()
ran = True
except NotImplementedError:
pass
if not ran:
try:
stage.finalized()
except NotImplementedError:
pass
def evaluate(self, pipeline_key, purpose=None, attempt=0):
"""Evaluates the given Pipeline and enqueues sub-stages for execution.
Args:
pipeline_key: The db.Key or stringified key of the _PipelineRecord to run.
purpose: Why evaluate was called ('start', 'finalize', or 'abort').
attempt: The attempt number that should be tried.
"""
After._thread_init()
InOrder._thread_init()
InOrder._local._activated = False
if not isinstance(pipeline_key, db.Key):
pipeline_key = db.Key(pipeline_key)
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.error('Pipeline ID "%s" does not exist.', pipeline_key.name())
return
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.error('Pipeline ID "%s" in bad state for purpose "%s": "%s"',
pipeline_key.name(), purpose or _BarrierRecord.START,
pipeline_record.status)
return
params = pipeline_record.params
root_pipeline_key = \
_PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record)
default_slot_key = db.Key(params['output_slots']['default'])
default_slot_record, root_pipeline_record = db.get([
default_slot_key, root_pipeline_key])
if default_slot_record is None:
logging.error('Pipeline ID "%s" default slot "%s" does not exist.',
pipeline_key.name(), default_slot_key)
return
if root_pipeline_record is None:
logging.error('Pipeline ID "%s" root pipeline ID "%s" is missing.',
pipeline_key.name(), root_pipeline_key.name())
return
# Always finalize if we're aborting so pipelines have a chance to cleanup
# before they terminate. Pipelines must access 'was_aborted' to find
# out how their finalization should work.
abort_signal = (
purpose == _BarrierRecord.ABORT or
root_pipeline_record.abort_requested == True)
finalize_signal = (
(default_slot_record.status == _SlotRecord.FILLED and
purpose == _BarrierRecord.FINALIZE) or abort_signal)
try:
pipeline_func_class = mr_util.for_name(pipeline_record.class_path)
except ImportError, e:
# This means something is wrong with the deployed code. Rely on the
# taskqueue system to do retries.
retry_message = '%s: %s' % (e.__class__.__name__, str(e))
logging.exception(
'Could not locate %s#%s. %s',
pipeline_record.class_path, pipeline_key.name(), retry_message)
raise
try:
pipeline_func = pipeline_func_class.from_id(
pipeline_key.name(),
resolve_outputs=finalize_signal,
_pipeline_record=pipeline_record)
except SlotNotFilledError, e:
logging.exception(
'Could not resolve arguments for %s#%s. Most likely this means there '
'is a bug in the Pipeline runtime or some intermediate data has been '
'deleted from the Datastore. Giving up.',
pipeline_record.class_path, pipeline_key.name())
self.transition_aborted(pipeline_key)
return
except Exception, e:
retry_message = '%s: %s' % (e.__class__.__name__, str(e))
logging.exception(
'Instantiating %s#%s raised exception. %s',
pipeline_record.class_path, pipeline_key.name(), retry_message)
self.transition_retry(pipeline_key, retry_message)
if pipeline_record.params['task_retry']:
raise
else:
return
else:
pipeline_generator = mr_util.is_generator_function(
pipeline_func_class.run)
caller_output = pipeline_func.outputs
if (abort_signal and pipeline_func.async and
pipeline_record.status == _PipelineRecord.RUN
and not pipeline_func.try_cancel()):
logging.warning(
'Could not cancel and abort mid-flight async pipeline: %r#%s',
pipeline_func, pipeline_key.name())
return
if finalize_signal:
try:
pipeline_func._finalized_internal(
self, pipeline_key, root_pipeline_key,
caller_output, abort_signal)
except Exception, e:
# This means something is wrong with the deployed finalization code.
# Rely on the taskqueue system to do retries.
retry_message = '%s: %s' % (e.__class__.__name__, str(e))
logging.exception('Finalizing %r#%s raised exception. %s',
pipeline_func, pipeline_key.name(), retry_message)
raise
else:
if not abort_signal:
self.transition_complete(pipeline_key)
return
if abort_signal:
logging.debug('Marking as aborted %s#%s', pipeline_func,
pipeline_key.name())
self.transition_aborted(pipeline_key)
return
if pipeline_record.current_attempt != attempt:
logging.error(
'Received evaluation task for pipeline ID "%s" attempt %d but '
'current pending attempt is %d', pipeline_key.name(), attempt,
pipeline_record.current_attempt)
return
if pipeline_record.current_attempt >= pipeline_record.max_attempts:
logging.error(
'Received evaluation task for pipeline ID "%s" on attempt %d '
'but that exceeds max attempts %d', pipeline_key.name(), attempt,
pipeline_record.max_attempts)
return
if pipeline_record.next_retry_time is not None:
retry_time = pipeline_record.next_retry_time - _RETRY_WIGGLE_TIMEDELTA
if self._gettime() <= retry_time:
detail_message = (
'Received evaluation task for pipeline ID "%s" on attempt %d, '
'which will not be ready until: %s' % (pipeline_key.name(),
pipeline_record.current_attempt, pipeline_record.next_retry_time))
logging.warning(detail_message)
raise UnexpectedPipelineError(detail_message)
if pipeline_record.status == _PipelineRecord.RUN and pipeline_generator:
if (default_slot_record.status == _SlotRecord.WAITING and
not pipeline_record.fanned_out):
# This properly handles the yield-less generator case when the
# RUN state transition worked properly but outputting to the default
# slot failed.
self.fill_slot(pipeline_key, caller_output.default, None)
return
if (pipeline_record.status == _PipelineRecord.WAITING and
pipeline_func.async):
self.transition_run(pipeline_key)
try:
result = pipeline_func._run_internal(
self, pipeline_key, root_pipeline_key, caller_output)
except Exception, e:
if self.handle_run_exception(pipeline_key, pipeline_func, e):
raise
else:
return
if pipeline_func.async:
return
if not pipeline_generator:
# Catch any exceptions that are thrown when the pipeline's return
# value is being serialized. This ensures that serialization errors
# will cause normal abort/retry behavior.
try:
self.fill_slot(pipeline_key, caller_output.default, result)
except Exception, e:
retry_message = 'Bad return value. %s: %s' % (
e.__class__.__name__, str(e))
logging.exception(
'Generator %r#%s caused exception while serializing return '
'value %r. %s', pipeline_func, pipeline_key.name(), result,
retry_message)
self.transition_retry(pipeline_key, retry_message)
if pipeline_func.task_retry:
raise
else:
return
expected_outputs = set(caller_output._output_dict.iterkeys())
found_outputs = self.session_filled_output_names
if expected_outputs != found_outputs:
exception = SlotNotFilledError(
'Outputs %r for pipeline ID "%s" were never filled by "%s".' % (
expected_outputs - found_outputs,
pipeline_key.name(), pipeline_func._class_path))
if self.handle_run_exception(pipeline_key, pipeline_func, exception):
raise exception
return
pipeline_iter = result
next_value = None
last_sub_stage = None
sub_stage = None
sub_stage_dict = {}
sub_stage_ordering = []
while True:
try:
yielded = pipeline_iter.send(next_value)
except StopIteration:
break
except Exception, e:
if self.handle_run_exception(pipeline_key, pipeline_func, e):
raise
else:
return
if isinstance(yielded, Pipeline):
if yielded in sub_stage_dict:
raise UnexpectedPipelineError(
'Already yielded pipeline object %r with pipeline ID %s' %
(yielded, yielded.pipeline_id))
last_sub_stage = yielded
next_value = PipelineFuture(yielded.output_names)
next_value._after_all_pipelines.update(After._local._after_all_futures)
next_value._after_all_pipelines.update(InOrder._local._in_order_futures)
sub_stage_dict[yielded] = next_value
sub_stage_ordering.append(yielded)
InOrder._add_future(next_value)
# To aid local testing, the task_retry flag (which instructs the
# evaluator to raise all exceptions back up to the task queue) is
# inherited by all children from the root down.
yielded.task_retry = pipeline_func.task_retry
else:
raise UnexpectedPipelineError(
'Yielded a disallowed value: %r' % yielded)
if last_sub_stage:
# Final yielded stage inherits outputs from calling pipeline that were not
# already filled during the generator's execution.
inherited_outputs = params['output_slots']
for slot_name in self.session_filled_output_names:
del inherited_outputs[slot_name]
sub_stage_dict[last_sub_stage]._inherit_outputs(
pipeline_record.class_path, inherited_outputs)
else:
# Here the generator has yielded nothing, and thus acts as a synchronous
# function. We can skip the rest of the generator steps completely and
# fill the default output slot to cause finalizing.
expected_outputs = set(caller_output._output_dict.iterkeys())
expected_outputs.remove('default')
found_outputs = self.session_filled_output_names
if expected_outputs != found_outputs:
exception = SlotNotFilledError(
'Outputs %r for pipeline ID "%s" were never filled by "%s".' % (
expected_outputs - found_outputs,
pipeline_key.name(), pipeline_func._class_path))
if self.handle_run_exception(pipeline_key, pipeline_func, exception):
raise exception
else:
self.fill_slot(pipeline_key, caller_output.default, None)
self.transition_run(pipeline_key)
return
# Allocate any SlotRecords that do not yet exist.
entities_to_put = []
for future in sub_stage_dict.itervalues():
for slot in future._output_dict.itervalues():
if not slot._exists:
entities_to_put.append(_SlotRecord(
key=slot.key, root_pipeline=root_pipeline_key))
# Allocate PipelineRecords and BarrierRecords for generator-run Pipelines.
pipelines_to_run = set()
all_children_keys = []
all_output_slots = set()
for sub_stage in sub_stage_ordering:
future = sub_stage_dict[sub_stage]
# Catch any exceptions that are thrown when the pipeline's parameters
# are being serialized. This ensures that serialization errors will
# cause normal retry/abort behavior.
try:
dependent_slots, output_slots, params_text, params_blob = \
_generate_args(sub_stage, future, self.queue_name, self.base_path)
except Exception, e:
retry_message = 'Bad child arguments. %s: %s' % (
e.__class__.__name__, str(e))
logging.exception(
'Generator %r#%s caused exception while serializing args for '
'child pipeline %r. %s', pipeline_func, pipeline_key.name(),
sub_stage, retry_message)
self.transition_retry(pipeline_key, retry_message)
if pipeline_func.task_retry:
raise
else:
return
child_pipeline_key = db.Key.from_path(
_PipelineRecord.kind(), uuid.uuid1().hex)
all_output_slots.update(output_slots)
all_children_keys.append(child_pipeline_key)
child_pipeline = _PipelineRecord(
key=child_pipeline_key,
root_pipeline=root_pipeline_key,
# Bug in DB means we need to use the storage name here,
# not the local property name.
params=params_text,
params_blob=params_blob,
class_path=sub_stage._class_path,
max_attempts=sub_stage.max_attempts)
entities_to_put.append(child_pipeline)
if not dependent_slots:
# This child pipeline will run immediately.
pipelines_to_run.add(child_pipeline_key)
child_pipeline.start_time = self._gettime()
else:
entities_to_put.append(_BarrierRecord(
parent=child_pipeline_key,
key_name=_BarrierRecord.START,
target=child_pipeline_key,
root_pipeline=root_pipeline_key,
blocking_slots=list(dependent_slots)))
entities_to_put.append(_BarrierRecord(
parent=child_pipeline_key,
key_name=_BarrierRecord.FINALIZE,
target=child_pipeline_key,
root_pipeline=root_pipeline_key,
blocking_slots=list(output_slots)))
db.put(entities_to_put)
self.transition_run(pipeline_key,
blocking_slot_keys=all_output_slots,
fanned_out_pipelines=all_children_keys,
pipelines_to_run=pipelines_to_run)
def handle_run_exception(self, pipeline_key, pipeline_func, e):
"""Handles an exception raised by a Pipeline's user code.
Args:
pipeline_key: The pipeline that raised the error.
pipeline_func: The class path name of the Pipeline that was running.
e: The exception that was raised.
Returns:
True if the exception should be re-raised up through the calling stack
by the caller of this method.
"""
if isinstance(e, Retry):
retry_message = str(e)
logging.warning('User forced retry for pipeline ID "%s" of %r: %s',
pipeline_key.name(), pipeline_func, retry_message)
self.transition_retry(pipeline_key, retry_message)
elif isinstance(e, Abort):
abort_message = str(e)
logging.warning('User forced abort for pipeline ID "%s" of %r: %s',
pipeline_key.name(), pipeline_func, abort_message)
pipeline_func.abort(abort_message)
else:
retry_message = '%s: %s' % (e.__class__.__name__, str(e))
logging.exception('Generator %r#%s raised exception. %s',
pipeline_func, pipeline_key.name(), retry_message)
self.transition_retry(pipeline_key, retry_message)
return pipeline_func.task_retry
def transition_run(self,
pipeline_key,
blocking_slot_keys=None,
fanned_out_pipelines=None,
pipelines_to_run=None):
"""Marks an asynchronous or generator pipeline as running.
Does nothing if the pipeline is no longer in a runnable state.
Args:
pipeline_key: The db.Key of the _PipelineRecord to update.
blocking_slot_keys: List of db.Key instances that this pipeline's
finalization barrier should wait on in addition to the existing one.
This is used to update the barrier to include all child outputs. When
None, the barrier will not be updated.
fanned_out_pipelines: List of db.Key instances of _PipelineRecords that
were fanned out by this generator pipeline. This is distinct from the
'pipelines_to_run' list because not all of the pipelines listed here
will be immediately ready to execute. When None, then this generator
yielded no children.
pipelines_to_run: List of db.Key instances of _PipelineRecords that should
be kicked off (fan-out) transactionally as part of this transition.
When None, no child pipelines will run. All db.Keys in this list must
also be present in the fanned_out_pipelines list.
Raises:
UnexpectedPipelineError if blocking_slot_keys was not empty and the
_BarrierRecord has gone missing.
"""
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning('Pipeline ID "%s" cannot be marked as run. '
'Does not exist.', pipeline_key.name())
raise db.Rollback()
if pipeline_record.status != _PipelineRecord.WAITING:
logging.warning('Pipeline ID "%s" in bad state to be marked as run: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.RUN
if fanned_out_pipelines:
# NOTE: We must model the pipeline relationship in a top-down manner,
# meaning each pipeline must point forward to the pipelines that it
# fanned out to. The reason is race conditions. If evaluate()
# dies early, it may create many unused _PipelineRecord and _SlotRecord
# instances that never progress. The only way we know which of these
# are valid is by traversing the graph from the root, where the
# fanned_out property refers to those pipelines that were run using a
# transactional task.
child_pipeline_list = list(fanned_out_pipelines)
pipeline_record.fanned_out = child_pipeline_list
if pipelines_to_run:
child_indexes = [
child_pipeline_list.index(p) for p in pipelines_to_run]
child_indexes.sort()
task = taskqueue.Task(
url=self.fanout_handler_path,
params=dict(parent_key=str(pipeline_key),
child_indexes=child_indexes))
task.add(queue_name=self.queue_name, transactional=True)
pipeline_record.put()
if blocking_slot_keys:
# NOTE: Always update a generator pipeline's finalization barrier to
# include all of the outputs of any pipelines that it runs, to ensure
# that finalized calls will not happen until all child pipelines have
# completed.
barrier_key = db.Key.from_path(
_BarrierRecord.kind(), _BarrierRecord.FINALIZE,
parent=pipeline_key)
finalize_barrier = db.get(barrier_key)
if finalize_barrier is None:
raise UnexpectedPipelineError(
'Pipeline ID "%s" cannot update finalize barrier. '
'Does not exist.' % pipeline_key.name())
else:
finalize_barrier.blocking_slots = list(
blocking_slot_keys.union(set(finalize_barrier.blocking_slots)))
finalize_barrier.put()
db.run_in_transaction(txn)
def transition_complete(self, pipeline_key):
"""Marks the given pipeline as complete.
Does nothing if the pipeline is no longer in a state that can be completed.
Args:
pipeline_key: db.Key of the _PipelineRecord that has completed.
"""
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to mark pipeline ID "%s" as complete but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to mark pipeline ID "%s" as complete, found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.DONE
pipeline_record.finalized_time = self._gettime()
pipeline_record.put()
db.run_in_transaction(txn)
def transition_retry(self, pipeline_key, retry_message):
"""Marks the given pipeline as requiring another retry.
Does nothing if all attempts have been exceeded.
Args:
pipeline_key: db.Key of the _PipelineRecord that needs to be retried.
retry_message: User-supplied message indicating the reason for the retry.
"""
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to retry pipeline ID "%s" but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to retry pipeline ID "%s", found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
params = pipeline_record.params
offset_seconds = (params['backoff_seconds'] *
(params['backoff_factor'] ** pipeline_record.current_attempt))
pipeline_record.next_retry_time = (
self._gettime() + datetime.timedelta(seconds=offset_seconds))
pipeline_record.current_attempt += 1
pipeline_record.retry_message = retry_message
pipeline_record.status = _PipelineRecord.WAITING
if pipeline_record.current_attempt >= pipeline_record.max_attempts:
root_pipeline_key = (
_PipelineRecord.root_pipeline.get_value_for_datastore(
pipeline_record))
logging.warning(
'Giving up on pipeline ID "%s" after %d attempt(s); causing abort '
'all the way to the root pipeline ID "%s"', pipeline_key.name(),
pipeline_record.current_attempt, root_pipeline_key.name())
# NOTE: We do *not* set the status to aborted here to ensure that
# this pipeline will be finalized before it has been marked as aborted.
pipeline_record.abort_message = (
'Aborting after %d attempts' % pipeline_record.current_attempt)
task = taskqueue.Task(
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
else:
task = taskqueue.Task(
url=self.pipeline_handler_path,
eta=pipeline_record.next_retry_time,
params=dict(pipeline_key=pipeline_key,
purpose=_BarrierRecord.START,
attempt=pipeline_record.current_attempt),
headers={'X-Ae-Pipeline-Key': pipeline_key})
task.add(queue_name=self.queue_name, transactional=True)
pipeline_record.put()
db.run_in_transaction(txn)
def transition_aborted(self, pipeline_key):
"""Makes the given pipeline as having aborted.
Does nothing if the pipeline is in a bad state.
Args:
pipeline_key: db.Key of the _PipelineRecord that needs to be retried.
"""
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to abort pipeline ID "%s" but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to abort pipeline ID "%s", found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.ABORTED
pipeline_record.finalized_time = self._gettime()
pipeline_record.put()
db.run_in_transaction(txn)
################################################################################
class _BarrierHandler(webapp2.RequestHandler):
"""Request handler for triggering barriers."""
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
context = _PipelineContext.from_environ(self.request.environ)
context.notify_barriers(
self.request.get('slot_key'),
self.request.get('cursor'))
class _PipelineHandler(webapp2.RequestHandler):
"""Request handler for running pipelines."""
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
context = _PipelineContext.from_environ(self.request.environ)
context.evaluate(self.request.get('pipeline_key'),
purpose=self.request.get('purpose'),
attempt=int(self.request.get('attempt', '0')))
class _FanoutAbortHandler(webapp2.RequestHandler):
"""Request handler for fanning out abort notifications."""
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
context = _PipelineContext.from_environ(self.request.environ)
context.continue_abort(
self.request.get('root_pipeline_key'),
self.request.get('cursor'))
class _FanoutHandler(webapp2.RequestHandler):
"""Request handler for fanning out pipeline children."""
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
context = _PipelineContext.from_environ(self.request.environ)
# Set of stringified db.Keys of children to run.
all_pipeline_keys = set()
# For backwards compatibility with the old style of fan-out requests.
all_pipeline_keys.update(self.request.get_all('pipeline_key'))
# Fetch the child pipelines from the parent. This works around the 10KB
# task payload limit. This get() is consistent-on-read and the fan-out
# task is enqueued in the transaction that updates the parent, so the
# fanned_out property is consistent here.
parent_key = self.request.get('parent_key')
child_indexes = [int(x) for x in self.request.get_all('child_indexes')]
if parent_key:
parent_key = db.Key(parent_key)
parent = db.get(parent_key)
for index in child_indexes:
all_pipeline_keys.add(str(parent.fanned_out[index]))
all_tasks = []
for pipeline_key in all_pipeline_keys:
all_tasks.append(taskqueue.Task(
url=context.pipeline_handler_path,
params=dict(pipeline_key=pipeline_key),
headers={'X-Ae-Pipeline-Key': pipeline_key},
name='ae-pipeline-fan-out-' + db.Key(pipeline_key).name()))
batch_size = 100 # Limit of taskqueue API bulk add.
for i in xrange(0, len(all_tasks), batch_size):
batch = all_tasks[i:i+batch_size]
try:
taskqueue.Queue(context.queue_name).add(batch)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
pass
class _CleanupHandler(webapp2.RequestHandler):
"""Request handler for cleaning up a Pipeline."""
def post(self):
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
self.response.set_status(403)
return
root_pipeline_key = db.Key(self.request.get('root_pipeline_key'))
logging.debug('Cleaning up root_pipeline_key=%r', root_pipeline_key)
# TODO(user): Accumulate all BlobKeys from _PipelineRecord and
# _SlotRecord entities and delete them.
pipeline_keys = (
_PipelineRecord.all(keys_only=True)
.filter('root_pipeline =', root_pipeline_key))
db.delete(pipeline_keys)
slot_keys = (
_SlotRecord.all(keys_only=True)
.filter('root_pipeline =', root_pipeline_key))
db.delete(slot_keys)
barrier_keys = (
_BarrierRecord.all(keys_only=True)
.filter('root_pipeline =', root_pipeline_key))
db.delete(barrier_keys)
status_keys = (
_StatusRecord.all(keys_only=True)
.filter('root_pipeline =', root_pipeline_key))
db.delete(status_keys)
class _CallbackHandler(webapp2.RequestHandler):
"""Receives asynchronous callback requests from humans or tasks."""
def post(self):
self.get()
def get(self):
# NOTE: The rest of these validations and the undescriptive error code 400
# are present to address security risks of giving external users access to
# cause PipelineRecord lookups and execution. This approach is still
# vulnerable to timing attacks, since db.get() will have different latency
# depending on existence. Luckily, the key names are generally unguessable
# UUIDs, so the risk here is low.
pipeline_id = self.request.get('pipeline_id')
if not pipeline_id:
logging.error('"pipeline_id" parameter missing.')
self.response.set_status(400)
return
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id)
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.error('Pipeline ID "%s" for callback does not exist.',
pipeline_id)
self.response.set_status(400)
return
params = pipeline_record.params
real_class_path = params['class_path']
try:
pipeline_func_class = mr_util.for_name(real_class_path)
except ImportError, e:
logging.error('Cannot load class named "%s" for pipeline ID "%s".',
real_class_path, pipeline_id)
self.response.set_status(400)
return
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
if pipeline_func_class.public_callbacks:
pass
elif pipeline_func_class.admin_callbacks:
if not users.is_current_user_admin():
logging.error('Unauthorized callback for admin-only pipeline ID "%s"',
pipeline_id)
self.response.set_status(400)
return
else:
logging.error('External callback for internal-only pipeline ID "%s"',
pipeline_id)
self.response.set_status(400)
return
stage = pipeline_func_class.from_id(pipeline_id)
if stage is None:
logging.error('Pipeline ID "%s" deleted during callback', pipeline_id)
self.response.set_status(400)
return
kwargs = {}
for key in self.request.arguments():
if key != 'pipeline_id':
kwargs[str(key)] = self.request.get(key)
callback_result = stage._callback_internal(kwargs)
if callback_result is not None:
status_code, content_type, content = callback_result
self.response.set_status(status_code)
self.response.headers['Content-Type'] = content_type
self.response.out.write(content)
################################################################################
def _get_timestamp_ms(when):
"""Converts a datetime.datetime to integer milliseconds since the epoch.
Requires special handling to preserve microseconds.
Args:
when: A datetime.datetime instance.
Returns:
Integer time since the epoch in milliseconds.
"""
ms_since_epoch = float(time.mktime(when.utctimetuple()) * 1000.0)
ms_since_epoch += when.microsecond / 1000.0
return int(ms_since_epoch)
def _get_internal_status(pipeline_key=None,
pipeline_dict=None,
slot_dict=None,
barrier_dict=None,
status_dict=None):
"""Gets the UI dictionary of a pipeline from a set of status dictionaries.
Args:
pipeline_key: The key of the pipeline to lookup.
pipeline_dict: Dictionary mapping pipeline db.Key to _PipelineRecord.
Default is an empty dictionary.
slot_dict: Dictionary mapping slot db.Key to _SlotRecord.
Default is an empty dictionary.
barrier_dict: Dictionary mapping barrier db.Key to _BarrierRecord.
Default is an empty dictionary.
status_dict: Dictionary mapping status record db.Key to _StatusRecord.
Default is an empty dictionary.
Returns:
Dictionary with the keys:
classPath: The pipeline function being run.
args: List of positional argument slot dictionaries.
kwargs: Dictionary of keyword argument slot dictionaries.
outputs: Dictionary of output slot dictionaries.
children: List of child pipeline IDs.
queueName: Queue on which this pipeline is running.
afterSlotKeys: List of Slot Ids after which this pipeline runs.
currentAttempt: Number of the current attempt, starting at 1.
maxAttempts: Maximum number of attempts before aborting.
backoffSeconds: Constant factor for backoff before retrying.
backoffFactor: Exponential factor for backoff before retrying.
status: Current status of the pipeline.
startTimeMs: When this pipeline ran or will run due to retries, if present.
endTimeMs: When this pipeline finalized, if present.
lastRetryMessage: Why the pipeline failed during the last retry, if there
was a failure; may be empty.
abortMessage: For root pipelines, why the pipeline was aborted if it was
aborted; may be empty.
Dictionary will contain these keys if explicit status is set:
statusTimeMs: When the status was set as milliseconds since the epoch.
statusMessage: Status message, if present.
statusConsoleUrl: The relative URL for the console of this pipeline.
statusLinks: Dictionary mapping human-readable names to relative URLs
for related URLs to this pipeline.
Raises:
PipelineStatusError if any input is bad.
"""
if pipeline_dict is None:
pipeline_dict = {}
if slot_dict is None:
slot_dict = {}
if barrier_dict is None:
barrier_dict = {}
if status_dict is None:
status_dict = {}
pipeline_record = pipeline_dict.get(pipeline_key)
if pipeline_record is None:
raise PipelineStatusError(
'Could not find pipeline ID "%s"' % pipeline_key.name())
params = pipeline_record.params
root_pipeline_key = \
_PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record)
default_slot_key = db.Key(params['output_slots']['default'])
start_barrier_key = db.Key.from_path(
_BarrierRecord.kind(), _BarrierRecord.START, parent=pipeline_key)
finalize_barrier_key = db.Key.from_path(
_BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_key)
status_record_key = db.Key.from_path(
_StatusRecord.kind(), pipeline_key.name())
start_barrier = barrier_dict.get(start_barrier_key)
finalize_barrier = barrier_dict.get(finalize_barrier_key)
default_slot = slot_dict.get(default_slot_key)
status_record = status_dict.get(status_record_key)
if finalize_barrier is None:
raise PipelineStatusError(
'Finalization barrier missing for pipeline ID "%s"' %
pipeline_key.name())
if default_slot is None:
raise PipelineStatusError(
'Default output slot with key=%s missing for pipeline ID "%s"' % (
default_slot_key, pipeline_key.name()))
output = {
'classPath': pipeline_record.class_path,
'args': list(params['args']),
'kwargs': params['kwargs'].copy(),
'outputs': params['output_slots'].copy(),
'children': [key.name() for key in pipeline_record.fanned_out],
'queueName': params['queue_name'],
'afterSlotKeys': [str(key) for key in params['after_all']],
'currentAttempt': pipeline_record.current_attempt + 1,
'maxAttempts': pipeline_record.max_attempts,
'backoffSeconds': pipeline_record.params['backoff_seconds'],
'backoffFactor': pipeline_record.params['backoff_factor'],
}
# TODO(user): Truncate args, kwargs, and outputs to < 1MB each so we
# can reasonably return the whole tree of pipelines and their outputs.
# Coerce each value to a string to truncate if necessary. For now if the
# params are too big it will just cause the whole status page to break.
# Fix the key names in parameters to match JavaScript style.
for value_dict in itertools.chain(
output['args'], output['kwargs'].itervalues()):
if 'slot_key' in value_dict:
value_dict['slotKey'] = value_dict.pop('slot_key')
# Figure out the pipeline's status.
if pipeline_record.status in (_PipelineRecord.WAITING, _PipelineRecord.RUN):
if default_slot.status == _SlotRecord.FILLED:
status = 'finalizing'
elif (pipeline_record.status == _PipelineRecord.WAITING and
pipeline_record.next_retry_time is not None):
status = 'retry'
elif start_barrier and start_barrier.status == _BarrierRecord.WAITING:
# start_barrier will be missing for root pipelines
status = 'waiting'
else:
status = 'run'
elif pipeline_record.status == _PipelineRecord.DONE:
status = 'done'
elif pipeline_record.status == _PipelineRecord.ABORTED:
status = 'aborted'
output['status'] = status
if status_record:
output['statusTimeMs'] = _get_timestamp_ms(status_record.status_time)
if status_record.message:
output['statusMessage'] = status_record.message
if status_record.console_url:
output['statusConsoleUrl'] = status_record.console_url
if status_record.link_names:
output['statusLinks'] = dict(
zip(status_record.link_names, status_record.link_urls))
# Populate status-depenedent fields.
if status in ('run', 'finalizing', 'done', 'retry'):
if pipeline_record.next_retry_time is not None:
output['startTimeMs'] = _get_timestamp_ms(pipeline_record.next_retry_time)
elif start_barrier:
# start_barrier will be missing for root pipelines
output['startTimeMs'] = _get_timestamp_ms(start_barrier.trigger_time)
elif pipeline_record.start_time:
# Assume this pipeline ran immediately upon spawning with no
# start barrier or it's the root pipeline.
output['startTimeMs'] = _get_timestamp_ms(pipeline_record.start_time)
if status in ('finalizing',):
output['endTimeMs'] = _get_timestamp_ms(default_slot.fill_time)
if status in ('done',):
output['endTimeMs'] = _get_timestamp_ms(pipeline_record.finalized_time)
if pipeline_record.next_retry_time is not None:
output['lastRetryMessage'] = pipeline_record.retry_message
if pipeline_record.abort_message:
output['abortMessage'] = pipeline_record.abort_message
return output
def _get_internal_slot(slot_key=None,
filler_pipeline_key=None,
slot_dict=None):
"""Gets information about a _SlotRecord for display in UI.
Args:
slot_key: The db.Key of the slot to fetch.
filler_pipeline_key: In the case the slot has not yet been filled, assume
that the given db.Key (for a _PipelineRecord) will be the filler of
the slot in the future.
slot_dict: The slot JSON dictionary.
Returns:
Dictionary with the keys:
status: Slot status: 'filled' or 'waiting'
fillTimeMs: Time in milliseconds since the epoch of when it was filled.
value: The current value of the slot, which is a slot's JSON dictionary.
fillerPipelineId: The pipeline ID of what stage has or should fill
this slot.
Raises:
PipelineStatusError if any input is bad.
"""
if slot_dict is None:
slot_dict = {}
slot_record = slot_dict.get(slot_key)
if slot_record is None:
raise PipelineStatusError(
'Could not find data for output slot key "%s".' % slot_key)
output = {}
if slot_record.status == _SlotRecord.FILLED:
output['status'] = 'filled'
output['fillTimeMs'] = _get_timestamp_ms(slot_record.fill_time)
output['value'] = slot_record.value
filler_pipeline_key = \
_SlotRecord.filler.get_value_for_datastore(slot_record)
else:
output['status'] = 'waiting'
if filler_pipeline_key:
output['fillerPipelineId'] = filler_pipeline_key.name()
return output
def get_status_tree(root_pipeline_id):
"""Gets the full status tree of a pipeline.
Args:
root_pipeline_id: The root pipeline ID to get status for.
Returns:
Dictionary with the keys:
rootPipelineId: The ID of the root pipeline.
slots: Mapping of slot IDs to result of from _get_internal_slot.
pipelines: Mapping of pipeline IDs to result of _get_internal_status.
Raises:
PipelineStatusError if any input is bad.
"""
root_pipeline_key = db.Key.from_path(_PipelineRecord.kind(), root_pipeline_id)
root_pipeline_record = db.get(root_pipeline_key)
if root_pipeline_record is None:
raise PipelineStatusError(
'Could not find pipeline ID "%s"' % root_pipeline_id)
if (root_pipeline_key !=
_PipelineRecord.root_pipeline.get_value_for_datastore(
root_pipeline_record)):
raise PipelineStatusError(
'Pipeline ID "%s" is not a root pipeline!' % root_pipeline_id)
found_pipeline_dict = dict((stage.key(), stage) for stage in
_PipelineRecord.all().filter('root_pipeline =', root_pipeline_key))
found_slot_dict = dict((slot.key(), slot) for slot in
_SlotRecord.all().filter('root_pipeline =', root_pipeline_key))
found_barrier_dict = dict((barrier.key(), barrier) for barrier in
_BarrierRecord.all().filter('root_pipeline =', root_pipeline_key))
found_status_dict = dict((status.key(), status) for status in
_StatusRecord.all().filter('root_pipeline =', root_pipeline_key))
# Breadth-first traversal of _PipelineRecord instances by following
# _PipelineRecord.fanned_out property values.
valid_pipeline_keys = set([root_pipeline_key])
slot_filler_dict = {} # slot_key to pipeline_key
expand_stack = [root_pipeline_record]
while expand_stack:
old_stack = expand_stack
expand_stack = []
for pipeline_record in old_stack:
for child_pipeline_key in pipeline_record.fanned_out:
# This will let us prune off those pipelines which were allocated in
# the Datastore but were never run due to mid-flight task failures.
child_pipeline_record = found_pipeline_dict.get(child_pipeline_key)
if child_pipeline_record is None:
raise PipelineStatusError(
'Pipeline ID "%s" points to child ID "%s" which does not exist.'
% (pipeline_record.key().name(), child_pipeline_key.name()))
expand_stack.append(child_pipeline_record)
valid_pipeline_keys.add(child_pipeline_key)
# Figure out the deepest pipeline that's responsible for outputting to
# a particular _SlotRecord, so we can report which pipeline *should*
# be the filler.
child_outputs = child_pipeline_record.params['output_slots']
for output_slot_key in child_outputs.itervalues():
slot_filler_dict[db.Key(output_slot_key)] = child_pipeline_key
output = {
'rootPipelineId': root_pipeline_id,
'slots': {},
'pipelines': {},
}
for pipeline_key in found_pipeline_dict.keys():
if pipeline_key not in valid_pipeline_keys:
continue
output['pipelines'][pipeline_key.name()] = _get_internal_status(
pipeline_key=pipeline_key,
pipeline_dict=found_pipeline_dict,
slot_dict=found_slot_dict,
barrier_dict=found_barrier_dict,
status_dict=found_status_dict)
for slot_key, filler_pipeline_key in slot_filler_dict.iteritems():
output['slots'][str(slot_key)] = _get_internal_slot(
slot_key=slot_key,
filler_pipeline_key=filler_pipeline_key,
slot_dict=found_slot_dict)
return output
def get_pipeline_names():
"""Returns the class paths of all Pipelines defined in alphabetical order."""
class_path_set = set()
for cls in _PipelineMeta._all_classes:
if cls._class_path is None:
cls._set_class_path()
if cls._class_path is not None:
class_path_set.add(cls._class_path)
return sorted(class_path_set)
def get_root_list(class_path=None, cursor=None, count=50):
"""Gets a list root Pipelines.
Args:
class_path: Optional. If supplied, only return root Pipelines with the
given class_path. By default all root pipelines are returned.
cursor: Optional. When supplied, the cursor returned from the last call to
get_root_list which indicates where to pick up.
count: How many pipeline returns to return.
Returns:
Dictionary with the keys:
pipelines: The list of Pipeline records in the same format as
returned by get_status_tree, but with only the roots listed.
cursor: Cursor to pass back to this function to resume the query. Will
only be present if there is another page of results.
Raises:
PipelineStatusError if any input is bad.
"""
query = _PipelineRecord.all(cursor=cursor)
if class_path:
query.filter('class_path =', class_path)
query.filter('is_root_pipeline =', True)
query.order('-start_time')
root_list = query.fetch(count)
fetch_list = []
for pipeline_record in root_list:
fetch_list.append(db.Key(pipeline_record.params['output_slots']['default']))
fetch_list.append(db.Key.from_path(
_BarrierRecord.kind(), _BarrierRecord.FINALIZE,
parent=pipeline_record.key()))
fetch_list.append(db.Key.from_path(
_StatusRecord.kind(), pipeline_record.key().name()))
pipeline_dict = dict((stage.key(), stage) for stage in root_list)
slot_dict = {}
barrier_dict = {}
status_dict = {}
for entity in db.get(fetch_list):
if isinstance(entity, _BarrierRecord):
barrier_dict[entity.key()] = entity
elif isinstance(entity, _SlotRecord):
slot_dict[entity.key()] = entity
elif isinstance(entity, _StatusRecord):
status_dict[entity.key()] = entity
results = []
for pipeline_record in root_list:
output = _get_internal_status(
pipeline_record.key(),
pipeline_dict=pipeline_dict,
slot_dict=slot_dict,
barrier_dict=barrier_dict,
status_dict=status_dict)
output['pipelineId'] = pipeline_record.key().name()
results.append(output)
result_dict = {}
cursor = query.cursor()
query.with_cursor(cursor)
if query.get(keys_only=True):
result_dict.update(cursor=cursor)
result_dict.update(pipelines=results)
return result_dict
################################################################################
def set_enforce_auth(new_status):
"""Sets whether Pipeline API handlers rely on app.yaml for access control.
Args:
new_status: If True, then the Pipeline API will enforce its own
access control on status and static file handlers. If False, then
it will assume app.yaml is doing the enforcement.
"""
global _ENFORCE_AUTH
_ENFORCE_AUTH = new_status
def create_handlers_map(prefix='.*'):
"""Create new handlers map.
Args:
prefix: url prefix to use.
Returns:
list of (regexp, handler) pairs for WSGIApplication constructor.
"""
return [
(prefix + '/output', _BarrierHandler),
(prefix + '/run', _PipelineHandler),
(prefix + '/finalized', _PipelineHandler),
(prefix + '/cleanup', _CleanupHandler),
(prefix + '/abort', _PipelineHandler),
(prefix + '/fanout', _FanoutHandler),
(prefix + '/fanout_abort', _FanoutAbortHandler),
(prefix + '/callback', _CallbackHandler),
(prefix + '/rpc/tree', status_ui._TreeStatusHandler),
(prefix + '/rpc/class_paths', status_ui._ClassPathListHandler),
(prefix + '/rpc/list', status_ui._RootListHandler),
(prefix + '(/.+)', status_ui._StatusUiHandler),
]
| 37.242681
| 81
| 0.680191
|
a862db1230101aeff3cfeaed26d27e0070f53e78
| 16,697
|
py
|
Python
|
tbx/text.py
|
ronhanson/python-tbx
|
7f5015bcc231b42617bdc3537fb39e5b05d4f7af
|
[
"MIT"
] | 2
|
2016-05-27T06:21:27.000Z
|
2018-12-01T15:02:42.000Z
|
tbx/text.py
|
ronhanson/python-tbx
|
7f5015bcc231b42617bdc3537fb39e5b05d4f7af
|
[
"MIT"
] | null | null | null |
tbx/text.py
|
ronhanson/python-tbx
|
7f5015bcc231b42617bdc3537fb39e5b05d4f7af
|
[
"MIT"
] | 2
|
2018-12-01T15:02:43.000Z
|
2020-11-23T07:57:09.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
"""
(c) 2013 - Ronan Delacroix
Text Utils
:author: Ronan Delacroix
"""
import json
import datetime
import os
import re
import smtplib
import unicodedata
import six
import uuid as UUID
import base64
import yaml
import io
try:
import toml
except:
toml = None
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import encoders
if six.PY3:
import html
else:
import cgi as html
def convert_to_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def normalize_text(text):
return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8')
def slugify(text, delim='-'):
"""Generates an slightly worse ASCII-only slug."""
punctuation_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.:]+')
result = []
for word in punctuation_re.split(text.lower()):
word = normalize_text(word)
if word:
result.append(word)
return delim.join(result)
def slugify_bytes(b):
return base64.urlsafe_b64encode(b).decode('utf-8').strip('=').replace('-', '0').replace('_', 'A')
def uuid_to_slug(uuid):
if isinstance(uuid, str):
b = UUID.UUID(uuid)
elif isinstance(uuid, UUID.UUID):
b = uuid.bytes
elif isinstance(uuid, bytes):
b = uuid
else:
b = bytes(uuid)
return slugify_bytes(b)
def random_slug():
return uuid_to_slug(UUID.uuid4().bytes)
def random_short_slug():
return uuid_to_slug(UUID.uuid4().bytes[0:8])
def javascript_escape(s, quote_double_quotes=True):
"""
Escape characters for javascript strings.
"""
ustring_re = re.compile(u"([\u0080-\uffff])")
def fix(match):
return r"\u%04x" % ord(match.group(1))
if type(s) == str:
s = s.decode('utf-8')
elif type(s) != six.text_type:
raise TypeError(s)
s = s.replace('\\', '\\\\')
s = s.replace('\r', '\\r')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace("'", "\\'")
if quote_double_quotes:
s = s.replace('"', '"')
return str(ustring_re.sub(fix, s))
def send_mail(send_from, send_to, subject, text, server, mime='plain', files=None):
"""
Send an email with attachments.
:param send_from: from email adress
:param send_to: to email adress
:param subject: email subject
:param text: text of the email in html
:param server: SMTP server
:param files: files to attach
:return: None
"""
if not files:
files = []
assert type(send_to) == list
assert type(files) == list
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(text, mime))
for f in files:
part = MIMEBase('application', "octet-stream")
fp = open(f, "rb")
file_content = fp.read()
part.set_payload(file_content)
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
smtp = smtplib.SMTP(server)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
return
def hms_to_seconds(time_string):
"""
Converts string 'hh:mm:ss.ssssss' as a float
"""
s = time_string.split(':')
hours = int(s[0])
minutes = int(s[1])
secs = float(s[2])
return hours * 3600 + minutes * 60 + secs
def seconds_to_hms_verbose(t):
"""
Converts seconds float to 'H hours 8 minutes, 30 seconds' format
"""
hours = int((t / 3600))
mins = int((t / 60) % 60)
secs = int(t % 60)
return ' '.join([
(hours + ' hour' + ('s' if hours > 1 else '')) if hours > 0 else '',
(mins + ' minute' + ('s' if mins > 1 else '')) if mins > 0 else '',
(secs + ' second' + ('s' if secs > 1 else '')) if secs > 0 else ''
])
def seconds_to_hms(seconds):
"""
Converts seconds float to 'hh:mm:ss.ssssss' format.
"""
hours = int(seconds / 3600.0)
minutes = int((seconds / 60.0) % 60.0)
secs = float(seconds % 60.0)
return "{0:02d}:{1:02d}:{2:02.6f}".format(hours, minutes, secs)
def str_to_bool(v):
return str(v).lower() in ("yes", "on", "true", "y", "t", "1")
def datetime_handler(obj):
return obj.isoformat(sep=' ') if isinstance(obj, datetime.datetime) else None
def render_xml(_dict):
return dict_to_xml_string("xml", _dict)
def render_json(_dict):
return json.dumps(_dict, sort_keys=False, indent=4, default=datetime_handler)
def render_html(_dict):
return dict_to_html(_dict)
def render_txt(_dict):
return dict_to_plaintext(_dict)
def render_yaml(_dict):
return yaml.dump(_dict, default_flow_style=False)
def render_toml(_dict):
if toml:
return toml.dumps(_dict)
import logging
logging.warning("TOML Serialisation is unavailable as toml package is not installed.")
return render_yaml(_dict)
def render_ini(_dict):
import configparser
s = io.StringIO()
p = configparser.ConfigParser()
p.read_dict(_dict)
p.write(s)
s.close()
return s.getvalue()
mime_rendering_dict = {
'text/html': render_html,
'application/html': render_html,
'application/xml': render_xml,
'application/json': render_json,
'application/yaml': render_yaml,
'application/toml': render_toml,
'text/plain': render_txt,
'text/yaml': render_yaml,
'text/toml': render_toml,
'text/ini': render_ini
}
def render_dict_from_mimetype(d, mimetype):
return mime_rendering_dict.get(mimetype, render_json)(d)
mime_shortcuts = {
'html': 'text/html',
'xml': 'application/xml',
'json': 'application/json',
'text': 'text/plain',
'txt': 'text/plain',
'yaml': 'text/yaml',
'yml': 'text/yaml',
'tml': 'text/toml',
'toml': 'text/toml',
'ini': 'text/ini'
}
def render_dict_from_format(d, format):
return mime_rendering_dict.get(mime_shortcuts.get(format, 'application/json'))(d)
def pretty_render(data, format='text', indent=0):
"""
Render a dict based on a format
"""
if format == 'json':
return render_json(data)
elif format == 'html':
return render_html(data)
elif format == 'xml':
return render_xml(data)
else:
return dict_to_plaintext(data, indent=indent)
# DICT TO XML FUNCTION
def _dict_to_xml_recurse(parent, dictitem):
import lxml.etree as etree
if isinstance(dictitem, list):
dictitem = {'item': dictitem}
if isinstance(dictitem, dict):
for (tag, child) in dictitem.items():
if str(tag) == '_text':
parent.text = str(child)
elif type(child) is type([]):
# iterate through the array and convert
for listchild in child:
elem = etree.Element(tag)
parent.append(elem)
_dict_to_xml_recurse(elem, listchild)
elif len(tag) == 36 and tag[8] == '-' and tag[
13] == '-': # if uuid is name of the element we try to cook up something nice to display in xml
uuid = tag
tag = parent.tag.replace('_list', '').replace('_dict', '')
elem = etree.Element(tag, uuid=uuid)
parent.append(elem)
_dict_to_xml_recurse(elem, child)
else:
try:
elem = etree.Element(tag)
except ValueError:
elem = etree.Element("element", unrecognized=tag)
parent.append(elem)
_dict_to_xml_recurse(elem, child)
else:
parent.text = str(dictitem)
def dict_to_xml(xml_dict):
"""
Converts a dictionary to an XML ElementTree Element
"""
import lxml.etree as etree
root_tag = list(xml_dict.keys())[0]
root = etree.Element(root_tag)
_dict_to_xml_recurse(root, xml_dict[root_tag])
return root
def dict_to_xml_string(root_name, _dict):
import lxml.etree as etree
_dict = {root_name: _dict}
xml_root = dict_to_xml(_dict)
return etree.tostring(xml_root, pretty_print=True, encoding="UTF-8", xml_declaration=True)
# DICT TO TEXT FUNCTION
def dict_to_plaintext(_dict, indent=0, result=''):
if isinstance(_dict, list):
i = 0
if not _dict:
result += '\t' * indent + "<empty>\n"
for value in _dict:
i += 1
if isinstance(value, dict):
result += '\t' * indent + "[" + str(i) + "]={DICT}\n" + dict_to_plaintext(value, indent + 1)
elif isinstance(value, list):
result += '\t' * indent + "[" + str(i) + "]=<LIST>\n" + dict_to_plaintext(value, indent + 1) + "\n"
else:
result += '\t' * indent + "[" + str(i) + "]=\"" + str(value) + "\"\n"
return result
elif isinstance(_dict, dict):
for key, value in _dict.items():
if isinstance(value, dict):
result += '\t' * indent + "{" + str(key) + "}\n" + dict_to_plaintext(value, indent + 1)
elif isinstance(value, list):
result += '\t' * indent + "<" + str(key) + '>\n' + dict_to_plaintext(value, indent + 1)
else:
if "\n" in str(value):
value = ' '.join([line.strip() for line in str(value).replace("\"", "'").split("\n")])
result += '\t' * indent + str(key) + '=' + "\"" + str(value) + "\"\n"
return result
else:
return "\"" + str(_dict) + "\""
# DICT TO HTML FUNCTION
def _dict_to_html_recurse(_dict, indent=0, result=''):
if isinstance(_dict, list):
i = 0
result += ' ' * indent + "<ul>\n"
for value in _dict:
i += 1
if isinstance(value, dict):
result += ' ' * (indent + 1) + "<li class='row" + str(i % 2) + "'>\n" + _dict_to_html_recurse(value,
indent + 2) + ' ' * (
indent + 1) + "</li>\n"
elif isinstance(value, list):
result += ' ' * (indent + 1) + "<li class='row" + str(i % 2) + "'>\n" + _dict_to_html_recurse(value,
indent + 2) + ' ' * (
indent + 1) + "</li>\n"
else:
result += ' ' * (indent + 1) + "<li class='row" + str(i % 2) + "'><pre>" + html.escape(
str(value)) + "</pre></li>\n"
result += ' ' * indent + "</ul>\n"
return result
elif isinstance(_dict, dict):
result += ' ' * indent + "<table>\n"
i = 0
for key, value in _dict.items():
i += 1
if isinstance(value, dict) or isinstance(value, list):
result += ' ' * (indent + 1) + "<tr class='row" + str(i % 2) + "'>\n"
result += ' ' * (indent + 2) + "<td>" + str(key) + "</td>\n"
result += ' ' * (indent + 2) + "<td>\n" + _dict_to_html_recurse(value, indent + 3)
result += ' ' * (indent + 2) + "</td>\n"
result += ' ' * (indent + 1) + "</tr>\n"
else:
value = html.escape(str(value))
result += ' ' * (indent + 1) + "<tr class='row" + str(i % 2) + "'><td>" + str(
key) + "</td><td><pre>" + str(value) + "</pre></td></tr>\n"
result += ' ' * indent + "</table>\n"
return result
else:
return "<pre>" + html.escape(str(_dict)) + "</pre>"
def dict_to_html(_dict, title="Result"):
return """
<html>
<head>
<style>
body { font-family: monospace; }
table { display : inline-block; border-spacing: 0px; border-collapse: collapse; }
td { border : 1px solid grey; padding:3px 10px; }
li { border : 1px solid grey; padding:0px 10px 0px 10px; margin: 0px 0px 0px 5px; list-style-type : circle; }
ul { display : inline-block; padding:0px 0px 0px 10px; margin:0px;}
pre { margin:0 ; }
.row0 { background-color:#EAEAFF; }
.row1 { background-color:#FFFFFF; }
</style>
<title>""" + title + """</title>
</head>
<body>
""" + _dict_to_html_recurse(_dict, 2) + " </body>\n</html>"
def test_page(title="Result"):
result = "<table>"
docu = {}
i = 0
for func_name, doc in docu.items():
result += "<tr class='row" + str(i) + "'><td>" + doc['friendly_name'] + "</td>"
if 'parameters' in doc:
result += "<td><form action='" + func_name + "' method='" + doc[
'method_type'] + "' enctype='multipart/form-data'>"
result += "<table width='100%'>"
if 'required' in doc['parameters']:
result += "<tr><th colspan='2'>Required</th></tr>"
for param in doc['parameters']['required']:
if param == 'asset_file':
result += "<tr><td>" + str(param) + "</td><td><input type='file' name='" + str(
param) + "' value=''/></td><tr/>"
else:
result += "<tr><td>" + str(param) + "</td><td><input type='text' name='" + str(
param) + "' value=''/></td><tr/>"
if 'optionnal' in doc['parameters']:
result += "<tr><th colspan='2'>Optionnal</th></tr>"
for param, value in doc['parameters']['optionnal'].items():
if value == None:
value = ''
result += "<tr><td>" + str(param) + "</td><td><input type='text' name='" + str(
param) + "' value='" + str(value) + "'/></td><tr/>"
result += "<tr><th colspan='2'><input type='submit'/></th></tr>"
result += "</table>"
result += "</form></td>"
else:
result += "<td><a href='" + func_name + "'>" + func_name + "</a></td>"
result += "</tr>"
i += 1
i = i % 2
result += "</table>"
return """
<html>
<head>
<style>
body {font-family: monospace;}
table {display : inline-block; border-spacing: 0px; border-collapse: collapse;}
td {border: 1px solid grey; padding: 3px 10px;}
li {border: 1px solid grey; padding: 0px 10px 0px 10px; margin: 0px 0px 0px 5px; list-style-type: circle;}
ul {display: inline-block; padding: 0px 0px 0px 10px; margin:0px;}
pre {margin: 0;}
.row0 {background-color:#EAEAFF;}
.row1 {background-color:#FFFFFF;}
</style>
<title>""" + title + """</title>
</head>
<body>
""" + result + """
</body>
</html>"""
def uni(text):
"""
Tries to force to convert to unicode a text.
REALLY DIRTY HACK TO TRY TO DETECT ENCODINGS...
:param text: text to convert
:return: unicode text
"""
if type(text) == six.text_type:
for encoding in ['latin_1', 'ascii', 'utf-8']:
try:
strtext = text.encode(encoding)
except:
pass
else:
break
text = strtext
unitext = text
for encoding in ['utf-8', 'ascii', 'latin_1']:
try:
unitext = text.decode(encoding)
except:
pass
else:
break
return unitext
def handle_carriage_return(s:str):
if '\r' in s:
i = s.rfind('\r')
if i>0:
return s[i:]
return s
def xml_get_tag(xml, tag, parent_tag=None, multi_line=False):
"""
Returns the tag data for the first instance of the named tag, or for all instances if multi is true.
If a parent tag is specified, then that will be required before the tag.
"""
expr_str = '[<:]' + tag + '.*?>(?P<matched_text>.+?)<'
if parent_tag:
expr_str = '[<:]' + parent_tag + '.*?>.*?' + expr_str
expr = re.compile(expr_str, re.DOTALL | re.IGNORECASE)
if multi_line:
return expr.findall(xml)
else:
if expr.search(xml):
return expr.search(xml).group('matched_text').strip()
else:
return None
| 31.563327
| 137
| 0.53279
|
1be8c0cd59308533dc9f3af4f4efe07d2bc4be66
| 1,980
|
py
|
Python
|
test/test_bar3d.py
|
CharileWithZoe/pyecharts
|
dbded9a8932cc13840b7d130802176fd88a97bf8
|
[
"MIT"
] | 11,032
|
2017-12-21T01:21:38.000Z
|
2022-03-31T23:02:38.000Z
|
test/test_bar3d.py
|
MatteLin/pyecharts
|
42717ac1a5be330586bba45196cce1ed961fef54
|
[
"MIT"
] | 1,687
|
2017-12-21T02:10:47.000Z
|
2022-03-31T14:31:45.000Z
|
test/test_bar3d.py
|
MatteLin/pyecharts
|
42717ac1a5be330586bba45196cce1ed961fef54
|
[
"MIT"
] | 2,528
|
2017-12-21T07:57:52.000Z
|
2022-03-30T15:34:51.000Z
|
import random
from unittest.mock import patch
from nose.tools import assert_equal, assert_in
from pyecharts import options as opts
from pyecharts.charts import Bar3D
from pyecharts.faker import Faker
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_bar3d_base(fake_writer):
data = [(i, j, random.randint(0, 12)) for i in range(6) for j in range(24)]
c = (
Bar3D()
.add(
"",
[[d[1], d[0], d[2]] for d in data],
xaxis3d_opts=opts.Axis3DOpts(Faker.clock, type_="category"),
yaxis3d_opts=opts.Axis3DOpts(Faker.week_en, type_="category"),
zaxis3d_opts=opts.Axis3DOpts(type_="value"),
)
.set_global_opts(visualmap_opts=opts.VisualMapOpts(max_=20))
)
c.render()
_, content = fake_writer.call_args[0]
assert_equal(c.theme, "white")
assert_equal(c.renderer, "canvas")
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_bar3d_stack(fake_writer):
data1 = [(i, j, random.randint(0, 12)) for i in range(6) for j in range(24)]
data2 = [(i, j, random.randint(13, 20)) for i in range(6) for j in range(24)]
c = (
Bar3D()
.add(
"1",
[[d[1], d[0], d[2]] for d in data1],
xaxis3d_opts=opts.Axis3DOpts(Faker.clock, type_="category"),
yaxis3d_opts=opts.Axis3DOpts(Faker.week_en, type_="category"),
zaxis3d_opts=opts.Axis3DOpts(type_="value"),
)
.add(
"2",
[[d[1], d[0], d[2]] for d in data2],
xaxis3d_opts=opts.Axis3DOpts(Faker.clock, type_="category"),
yaxis3d_opts=opts.Axis3DOpts(Faker.week_en, type_="category"),
zaxis3d_opts=opts.Axis3DOpts(type_="value"),
)
.set_global_opts(visualmap_opts=opts.VisualMapOpts(max_=20))
.set_series_opts(**{"stack": "stack"})
)
c.render()
_, content = fake_writer.call_args[0]
assert_in("stack", content)
| 34.736842
| 81
| 0.615152
|
0d6a80402cfc549417a16853a19fc59fa586da24
| 5,356
|
py
|
Python
|
test/test_basic.py
|
spalato/qtlets
|
506fb42823ba088f30b2d85c4f8b0c7a3f0c6cd1
|
[
"MIT"
] | 2
|
2020-09-22T17:44:51.000Z
|
2022-01-03T22:47:49.000Z
|
test/test_basic.py
|
spalato/qtlets
|
506fb42823ba088f30b2d85c4f8b0c7a3f0c6cd1
|
[
"MIT"
] | null | null | null |
test/test_basic.py
|
spalato/qtlets
|
506fb42823ba088f30b2d85c4f8b0c7a3f0c6cd1
|
[
"MIT"
] | 1
|
2020-10-02T20:17:36.000Z
|
2020-10-02T20:17:36.000Z
|
import sys
from collections import namedtuple
from functools import partial
from random import randint, choices
from string import ascii_letters, punctuation, digits
from types import SimpleNamespace
import pytest
from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout, QCheckBox, \
QLineEdit
from PySide2.QtCore import Qt
from PySide2.QtTest import QTest
from qtlets.qtlets import HasQtlets
from qtlets.widgets import IntEdit, StrEdit
TRAITLETS_IS_AVAILABLE = False
try:
from traitlets import Integer, HasTraits, Unicode
TRAITLETS_IS_AVAILABLE = True
except ImportError:
pass
ATTR_IS_AVAILABLE = False
try:
import attr # attr is a dependency of pytest...
ATTR_IS_AVAILABLE = True
except ImportError:
pass
printable = ascii_letters + punctuation + digits
@pytest.fixture(params=[int, str])
def data_type(request):
return request.param
# we could expand our test matrix to test multiple edit_types per dtype.
dtypes = {
str: SimpleNamespace(
dtype=str,
init_value="TEST",
random_value=lambda : "".join(choices(printable, k=10)),
edit_type=StrEdit),
int: SimpleNamespace(
dtype=int,
init_value=1,
random_value=lambda : randint(0, 10),
edit_type=IntEdit),
}
if TRAITLETS_IS_AVAILABLE:
dtypes[str].traitlet = Unicode
dtypes[int].traitlet = Integer
@pytest.fixture
def dtype_config(data_type):
return dtypes[data_type]
def vanilla(dtype_config):
v = dtype_config.init_value
class Data(HasQtlets):
def __init__(self, *a, value=v, **kw):
super().__init__(*a, **kw)
self.value = value
return Data()
def properties(dtype_config):
class Data(HasQtlets):
def __init__(self, *a, value=dtype_config.init_value, **kw):
super().__init__(*a, **kw)
self._value = value
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v
return Data()
def traitlets(dtype_config):
class Data(HasQtlets, HasTraits):
value = dtype_config.traitlet(default_value=dtype_config.init_value)
return Data()
def attrs(dtype_config):
@attr.s
class Base:
value: dtype_config.dtype = attr.ib(default=dtype_config.init_value)
# def __attrs_post_init__(self):
# super().__init__() # tsk tsk tsk...
class Data(HasQtlets, Base): pass
return Data()
@pytest.fixture(
params=[
vanilla,
properties,
pytest.param(traitlets,
marks=pytest.mark.skipif(not TRAITLETS_IS_AVAILABLE, reason="Requires the `traitlets` module.")
),
pytest.param(attrs,
marks=pytest.mark.skipif(not ATTR_IS_AVAILABLE, reason="Requires the `attrs` module.")
),
]
)
def data_instance(request, dtype_config):
return request.param(dtype_config)
@pytest.fixture
def new_value(dtype_config):
def f(current):
while (target := dtype_config.random_value()) == current:
pass
return target
return f
@pytest.fixture
def form(dtype_config, data_instance, new_value):
edit_cls = dtype_config.edit_type
class Form(QWidget):
def __init__(self, parent=None, data=None):
super().__init__(parent)
self.data = data
self.edit = edit_cls("...")
self.otheredit = edit_cls("???")
# self.otheredit.setEnabled(False)
self.button = QPushButton("Roll!")
layout = QVBoxLayout()
for w in [self.edit, self.otheredit, self.button]:
layout.addWidget(w)
self.setLayout(layout)
data.link_widget(self.edit, "value")
data.link_widget(self.otheredit, "value")
self.button.clicked.connect(self.on_btn_click)
self.setWindowTitle("Directional connection")
def on_btn_click(self):
self.data.value = new_value(self.data.value)
return Form(data=data_instance)
@pytest.mark.usefixtures("app")
class TestBasic:
def test_initial_sync(self, data_instance, form):
assert data_instance.value == form.edit.value()
assert data_instance.value == form.otheredit.value()
def test_external(self, data_instance, form, new_value):
data_instance.value = new_value(data_instance.value)
assert data_instance.value == form.edit.value()
assert data_instance.value == form.otheredit.value()
def test_roll(self, data_instance, form):
old = data_instance.value
#while data_instance.value == old:
QTest.mouseClick(form.button, Qt.LeftButton)
assert old != data_instance.value
assert data_instance.value == form.edit.value()
assert data_instance.value == form.otheredit.value()
def test_modify_edit(self, data_instance, form, new_value):
target = new_value(data_instance.value)
assert target != data_instance.value
for w in (form.edit, form.otheredit):
w.clear()
QTest.keyClicks(w, str(target))
QTest.keyClick(w, Qt.Key_Enter)
assert data_instance.value == form.edit.value()
assert data_instance.value == form.otheredit.value()
assert data_instance.value == target
| 28.338624
| 107
| 0.655153
|
8862ef53228b689ac5cc22f99ba3436928c0c0fc
| 789
|
py
|
Python
|
Pyinstalled/dist/Resources/Programs/audioTest.py
|
scriptslay3r/MyRose
|
b716969b6bb424be3125c6370b0c9f450cf76151
|
[
"MIT"
] | 1
|
2020-05-10T17:59:35.000Z
|
2020-05-10T17:59:35.000Z
|
Pyinstalled/dist/lib/audioTest.py
|
scriptslay3r/MyRose
|
b716969b6bb424be3125c6370b0c9f450cf76151
|
[
"MIT"
] | null | null | null |
Pyinstalled/dist/lib/audioTest.py
|
scriptslay3r/MyRose
|
b716969b6bb424be3125c6370b0c9f450cf76151
|
[
"MIT"
] | null | null | null |
import pyaudio
import wave
filename = 'blue.wav'
# Set chunk size of 1024 samples per data frame
chunk = 1024
# Open the sound file
wf = wave.open(filename, 'rb')
# Create an interface to PortAudio
p = pyaudio.PyAudio()
# Open a .Stream object to write the WAV file to
# 'output = True' indicates that the sound will be played rather than recorded
stream = p.open(format = p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = wf.getframerate(),
output = True)
# Read data in chunks
data = wf.readframes(chunk)
# Play the sound by writing the audio data to the stream
while data != '':
stream.write(data)
data = wf.readframes(chunk)
# Close and terminate the stream
stream.close()
p.terminate()
| 23.909091
| 78
| 0.679341
|
6f6d3ce3b61745cc3af1e7683fe573fd8536933d
| 1,015
|
py
|
Python
|
BaseTools/Source/Python/GenFds/OptRomFileStatement.py
|
DK519/DK_Project
|
b562574ba2d223aff5dd3f3c3260ee1f9905e735
|
[
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | 1
|
2019-04-28T16:32:26.000Z
|
2019-04-28T16:32:26.000Z
|
BaseTools/Source/Python/GenFds/OptRomFileStatement.py
|
DK519/DK_Project
|
b562574ba2d223aff5dd3f3c3260ee1f9905e735
|
[
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | null | null | null |
BaseTools/Source/Python/GenFds/OptRomFileStatement.py
|
DK519/DK_Project
|
b562574ba2d223aff5dd3f3c3260ee1f9905e735
|
[
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | null | null | null |
## @file
# process OptionROM generation from FILE statement
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
##
#
#
class OptRomFileStatement:
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.FileName = None
self.FileType = None
self.OverrideAttribs = None
## GenFfs() method
#
# Generate FFS
#
# @param self The object pointer
# @param Dict dictionary contains macro and value pair
# @retval string Generated FFS file name
#
def GenFfs(self, Dict = {}, IsMakefile=False):
if self.FileName is not None:
self.FileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FileName)
return self.FileName
| 22.065217
| 85
| 0.650246
|
bc5f27d0a93cc2fd9690680acb42296287d91171
| 2,285
|
py
|
Python
|
src/Honeybee_Energy Simulation Par.py
|
rdzeldenrust/Honeybee
|
e91e58badc1c9b082596d2cf97baeccdb6d7d0af
|
[
"CC-BY-3.0"
] | 1
|
2016-03-04T09:47:42.000Z
|
2016-03-04T09:47:42.000Z
|
src/Honeybee_Energy Simulation Par.py
|
rdzeldenrust/Honeybee
|
e91e58badc1c9b082596d2cf97baeccdb6d7d0af
|
[
"CC-BY-3.0"
] | null | null | null |
src/Honeybee_Energy Simulation Par.py
|
rdzeldenrust/Honeybee
|
e91e58badc1c9b082596d2cf97baeccdb6d7d0af
|
[
"CC-BY-3.0"
] | null | null | null |
# By Mostapha Sadeghipour Roudsari
# Sadeghipour@gmail.com
# Honeybee started by Mostapha Sadeghipour Roudsari is licensed
# under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
"""
EnergyPlus Shadow Parameters
-
Provided by Honeybee 0.0.55
Args:
timestep_:...
shadowCalcPar_: ...
doPlantSizingCalculation_: ...
solarDistribution_: ...
simulationControls_: ...
ddyFile_: ...
Returns:
energySimPar:...
"""
ghenv.Component.Name = "Honeybee_Energy Simulation Par"
ghenv.Component.NickName = 'EnergySimPar'
ghenv.Component.Message = 'VER 0.0.55\nSEP_11_2014'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "09 | Energy | Energy"
#compatibleHBVersion = VER 0.0.55\nAUG_25_2014
#compatibleLBVersion = VER 0.0.58\nAUG_20_2014
try: ghenv.Component.AdditionalHelpFromDocStrings = "3"
except: pass
def main(timestep, shadowCalcPar, solarDistribution, simulationControls, ddyFile):
solarDist = {
"0" : "MinimalShadowing",
"1" : "FullExterior",
"2" : "FullInteriorAndExterior",
"3" : "FullExteriorWithReflections",
"4" : "FullInteriorAndExteriorWithReflections",
"MinimalShadowing" : "MinimalShadowing",
"FullExterior" : "FullExterior",
"FullInteriorAndExterior" : "FullInteriorAndExterior",
"FullExteriorWithReflections" : "FullExteriorWithReflections",
"FullInteriorAndExteriorWithReflections" : "FullInteriorAndExteriorWithReflections"
}
# I will add check for inputs later
if timestep == None: timestep = 6
if shadowCalcPar == []: shadowCalcPar = ["AverageOverDaysInFrequency", 30, 3000]
if solarDistribution == None:
solarDistribution = solarDist["4"]
else:
solarDistribution = solarDist[solarDistribution]
if simulationControls == []: simulationControls= [True, True, True, False, True]
return [timestep] + shadowCalcPar + [solarDistribution] + simulationControls + [ddyFile]
energySimPar = main(timestep_,
shadowCalcPar_,
solarDistribution_,
simulationControls_,
ddyFile_)
| 36.854839
| 99
| 0.655142
|
88bc94d08053a664d77418e30f3b7558713847d0
| 15,259
|
py
|
Python
|
DPPO/dppo_cont_gae_dist_gpu.py
|
CAVED123/DPPO
|
666d54fb95ce6219771a3747bcae29eb88dd8e4b
|
[
"MIT"
] | 1
|
2020-12-01T13:23:47.000Z
|
2020-12-01T13:23:47.000Z
|
DPPO/dppo_cont_gae_dist_gpu.py
|
CAVED123/DPPO
|
666d54fb95ce6219771a3747bcae29eb88dd8e4b
|
[
"MIT"
] | null | null | null |
DPPO/dppo_cont_gae_dist_gpu.py
|
CAVED123/DPPO
|
666d54fb95ce6219771a3747bcae29eb88dd8e4b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""DPPO_cont_GAE_dist_GPU.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1_GZ3wt0ydAf9Fx7YsFuuvOQduCN_NlDC
"""
"""
Distributed Proximal Policy Optimization (Distributed PPO or DPPO) continuous
version implementation with distributed Tensorflow and Python’s multiprocessing
package. This implementation uses normalized running rewards with GAE. The code
is tested with Gym’s continuous action space environment, Pendulum-v0 on Colab.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
#!pip install -q tf-nightly
import tensorflow as tf
tf.reset_default_graph()
import numpy as np
import matplotlib.pyplot as plt
import gym
import time
from multiprocessing import Process
# The following class is adapted from OpenAI's baseline:
# https://github.com/openai/baselines/blob/master/baselines/common/running_mean_std.py
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# This class is used for the normalization of rewards in this program before GAE computation.
class RunningStats(object):
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.std = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / (self.count + batch_count)
m_a = self.var * self.count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
self.mean = new_mean
self.var = new_var
self.std = np.maximum(np.sqrt(self.var), 1e-6)
self.count = batch_count + self.count
class PPO(object):
def __init__(self, scope, sess, env, global_PPO=None):
self.sess = sess
self.env = env
#OPT_A = tf.train.AdamOptimizer(A_LR, beta1=0.99, beta2=0.999, name='OPT_A')
#OPT_C = tf.train.AdamOptimizer(C_LR, beta1=0.99, beta2=0.999, name='OPT_C')
OPT_A = tf.train.AdamOptimizer(A_LR, name='OPT_A')
OPT_C = tf.train.AdamOptimizer(C_LR, name='OPT_C')
with tf.variable_scope(scope): # scope is either global or wid
self.state = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
with tf.variable_scope('critic'):
h1 = tf.layers.dense(self.state, hidden, tf.nn.relu, name='hidden', trainable=True)
self.val = tf.layers.dense(h1, 1, name='val', trainable=True)
self.critic_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
self.discounted_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.discounted_r - self.val
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = OPT_C.minimize(self.closs)
with tf.variable_scope('cgrads'):
self.critic_grad_op = tf.gradients(self.closs, self.critic_params)
# actor
self.pi, self.pi_params = self._build_anet(scope, 'pi', self.env, trainable=True)
self.oldpi, self.oldpi_params = self._build_anet(scope, 'oldpi', self.env, trainable=True) # originally trainable=False
with tf.variable_scope('sample_action'):
self.sample_op = tf.squeeze(self.pi.sample(1), axis=0) # choosing action
with tf.variable_scope('update_oldpi'):
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(self.pi_params, self.oldpi_params)]
self.act = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.adv = tf.placeholder(tf.float32, [None, 1], 'advantage')
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'):
ratio = self.pi.prob(self.act) / self.oldpi.prob(self.act)
surr = ratio * self.adv
self.aloss = -tf.reduce_mean(tf.minimum(surr, tf.clip_by_value(ratio, 1.-epsilon, 1.+epsilon)*self.adv))
with tf.variable_scope('atrain'):
self.atrain_op = OPT_A.minimize(self.aloss)
with tf.variable_scope('agrads'):
self.pi_grad_op = tf.gradients(self.aloss, self.pi_params)
if scope != net_scope: # not global
with tf.name_scope('params'): # push/pull from local/worker perspective
with tf.name_scope('push_to_global'):
self.push_actor_pi_params = OPT_A.apply_gradients(zip(self.pi_grad_op, global_PPO.pi_params))
self.push_critic_params = OPT_C.apply_gradients(zip(self.critic_grad_op, global_PPO.critic_params))
with tf.name_scope('pull_fr_global'):
self.pull_actor_pi_params = [local_params.assign(global_params) for local_params, global_params in zip(self.pi_params, global_PPO.pi_params)]
self.pull_critic_params = [local_params.assign(global_params) for local_params, global_params in zip(self.critic_params, global_PPO.critic_params)]
def update(self, s, a, r, adv):
self.sess.run(self.update_oldpi_op)
for _ in range(A_EPOCH): # train actor
self.sess.run(self.atrain_op, {self.state: s, self.act: a, self.adv: adv})
# update actor
self.sess.run([self.push_actor_pi_params,
self.pull_actor_pi_params],
{self.state: s, self.act: a, self.adv: adv})
for _ in range(C_EPOCH): # train critic
# update critic
self.sess.run(self.ctrain_op, {self.state: s, self.discounted_r: r})
self.sess.run([self.push_critic_params,
self.pull_critic_params],
{self.state: s, self.discounted_r: r})
def _build_anet(self, scope, name, env, trainable):
with tf.variable_scope(name):
h1 = tf.layers.dense(self.state, hidden, tf.nn.relu, name='hidden', trainable=trainable)
mu = self.env.action_space.high * tf.layers.dense(h1, A_DIM, tf.nn.tanh, name='mu', trainable=trainable)
sigma = tf.layers.dense(h1, A_DIM, tf.nn.softplus, name='sigma', trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/' + name)
return norm_dist, params
def choose_action(self, s):
s = s[None, :]
a = self.sess.run(self.sample_op, {self.state: s})[0]
return np.clip(a, self.env.action_space.low, self.env.action_space.high)
def get_val(self, s):
if s.ndim < 2: s = s[None, :]
return self.sess.run(self.val, {self.state: s})[0, 0]
# This function is adapted from OpenAI's Baseline
# GAE computation
# returns TD lamda return & advantage
def add_vtarg_and_adv(self, R, done, V, v_s_, gamma, lam):
# Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
# last element is only used for last vtarg, but we already zeroed it if last new = 1
done = np.append(done, 0)
V_plus = np.append(V, v_s_)
T = len(R)
adv = gaelam = np.empty(T, 'float32')
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-done[t+1]
delta = R[t] + gamma * V_plus[t+1] * nonterminal - V_plus[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
#print("adv=", adv.shape)
#print("V=", V.shape)
#print("V_plus=", V_plus.shape)
tdlamret = np.vstack(adv) + V
#print("tdlamret=", tdlamret.shape)
return tdlamret, adv # tdlamret is critic_target or Qs
class Worker(object):
def __init__(self, wid, GLOBAL_PPO, GLOBAL_EP, GLOBAL_RUNNING_R, sess):
self.wid = wid
self.env = gym.make(GAME).unwrapped
self.g_ppo = GLOBAL_PPO
self.ppo = PPO(wid, sess, self.env, GLOBAL_PPO)
self.running_stats_r = RunningStats()
self.sess = sess
self.GLOBAL_EP = GLOBAL_EP
self.GLOBAL_RUNNING_R = GLOBAL_RUNNING_R
def work(self):
T = 0
t = 0
SESS = self.sess
GLOBAL_EP = self.GLOBAL_EP
GLOBAL_RUNNING_R = self.GLOBAL_RUNNING_R
while SESS.run(GLOBAL_EP) < EP_MAX:
s = self.env.reset()
buffer_s, buffer_a, buffer_r, buffer_done, buffer_V = [], [], [], [], []
ep_r = 0
for t in range(EP_LEN):
a = self.ppo.choose_action(s)
s_, r, done, _ = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
buffer_done.append(done)
v = self.ppo.get_val(s)
buffer_V.append(v)
s = s_
ep_r += r
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1:
self.running_stats_r.update(np.array(buffer_r))
buffer_r = np.clip( (np.array(buffer_r) - self.running_stats_r.mean) / self.running_stats_r.std, -stats_CLIP, stats_CLIP )
v_s_ = self.ppo.get_val(s_)
tdlamret, adv = self.ppo.add_vtarg_and_adv(np.vstack(buffer_r), np.vstack(buffer_done), np.vstack(buffer_V), v_s_, GAMMA, lamda)
bs, ba, br, b_adv = np.vstack(buffer_s), np.vstack(buffer_a), tdlamret, np.vstack(adv)
buffer_s, buffer_a, buffer_r, buffer_done, buffer_V = [], [], [], [], []
self.ppo.update(bs, ba, br, b_adv)
SESS.run(GLOBAL_EP.assign_add(1.0))
qe = GLOBAL_RUNNING_R.enqueue(ep_r)
SESS.run(qe)
GAME = 'Pendulum-v0'
env = gym.make(GAME).unwrapped
net_scope = 'global'
EP_MAX = 500 #500 # max number of episodes
EP_LEN = 200 # episode length
GAMMA = 0.9
lamda = 0.95 #0.95
hidden = 50 #100
A_LR = 0.0001 # actor's learning rate
C_LR = 0.0002 # critic's learning rate
BATCH = 32 # minibatch size
A_EPOCH = 10 # number of epoch
C_EPOCH = 10 # number of epoch
S_DIM, A_DIM = 3, 1 # state, action dimension
stats_CLIP = 10 # upper bound of RunningStats
epsilon=0.2
cluster = tf.train.ClusterSpec({
"worker": ["localhost:3331",
"localhost:3332",
"localhost:3333",
"localhost:3334"
],
"ps": ["localhost:3330"]
})
def parameter_server():
#tf.reset_default_graph()
server = tf.train.Server(cluster,
job_name="ps",
task_index=0)
sess = tf.Session(target=server.target)
with tf.device("/job:ps/task:0"):
GLOBAL_PPO = PPO(net_scope, sess, env, global_PPO=None) # only need its params
GLOBAL_EP = tf.Variable(0.0, name='GLOBAL_EP') # num of global episodes
# a queue of ep_r
GLOBAL_RUNNING_R = tf.FIFOQueue(EP_MAX, tf.float32, shared_name="GLOBAL_RUNNING_R")
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
sess.run(tf.global_variables_initializer())
print("Parameter server: variables initialized")
while True:
time.sleep(1.0)
if sess.run(GLOBAL_RUNNING_R.size()) >= EP_MAX: # GLOBAL_EP starts from 0, hence +1 to max_global_episodes
time.sleep(10.0)
GLOBAL_RUNNING_R_list = []
ep_r_prev = 0.0
for i in range(sess.run(GLOBAL_RUNNING_R.size())):
ep_r = sess.run(GLOBAL_RUNNING_R.dequeue())
if i==0:
GLOBAL_RUNNING_R_list.append(ep_r) # for display
else:
GLOBAL_RUNNING_R_list.append(GLOBAL_RUNNING_R_list[-1]*0.9 + ep_r*0.1) # for display
break
# display
plt.plot(np.arange(len(GLOBAL_RUNNING_R_list)), GLOBAL_RUNNING_R_list)
plt.xlabel('episode')
plt.ylabel('reward')
plt.show()
#print("Parameter server: blocking...")
#server.join() # currently blocks forever
print("Parameter server: ended...")
def worker(worker_n):
#tf.reset_default_graph()
server = tf.train.Server(cluster,
job_name="worker",
task_index=worker_n)
sess = tf.Session(target=server.target)
with tf.device("/job:ps/task:0"):
GLOBAL_PPO = PPO(net_scope, sess, env, global_PPO=None) # only need its params
GLOBAL_EP = tf.Variable(0.0, name='GLOBAL_EP') # num of global episodes
# a queue of ep_r
GLOBAL_RUNNING_R = tf.FIFOQueue(EP_MAX, tf.float32, shared_name="GLOBAL_RUNNING_R")
"""
with tf.device(tf.train.replica_device_setter(
worker_device='/job:worker/task:' + str(worker_n),
cluster=cluster)):
"""
print("Worker %d: waiting for cluster connection..." % worker_n)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % worker_n)
#while sess.run(tf.report_uninitialized_variables()):
while (sess.run(tf.report_uninitialized_variables())).any(): # ********** .any() .all() **********
print("Worker %d: waiting for variable initialization..." % worker_n)
time.sleep(1.0)
print("Worker %d: variables initialized" % worker_n)
w = Worker(str(worker_n), GLOBAL_PPO, GLOBAL_EP, GLOBAL_RUNNING_R, sess)
print("Worker %d: created" % worker_n)
sess.run(tf.global_variables_initializer()) # got to initialize after Worker creation
w.work()
print("Worker %d: w.work()" % worker_n)
#print("Worker %d: blocking..." % worker_n)
server.join() # currently blocks forever
print("Worker %d: ended..." % worker_n)
start_time = time.time()
ps_proc = Process(target=parameter_server, daemon=True)
w1_proc = Process(target=worker, args=(0, ), daemon=True)
w2_proc = Process(target=worker, args=(1, ), daemon=True)
w3_proc = Process(target=worker, args=(2, ), daemon=True)
w4_proc = Process(target=worker, args=(3, ), daemon=True)
ps_proc.start()
w1_proc.start()
w2_proc.start()
w3_proc.start()
w4_proc.start()
# if not join, parent will terminate before children
# & children will terminate as well cuz children are daemon
ps_proc.join()
#w1_proc.join()
#w2_proc.join()
#w3_proc.join()
#w4_proc.join()
for proc in [w1_proc,
w2_proc,
w3_proc,
w4_proc,
ps_proc]:
proc.terminate() # only way to kill server is to kill it's process
print('All done.')
print("--- %s seconds ---" % (time.time() - start_time))
| 41.352304
| 171
| 0.620814
|
a5b5cd5896b78214c10adc60f01a3d9567777162
| 762
|
py
|
Python
|
binproperty/models.py
|
skyydq/GreaterWMS
|
e14014a73b36ec0f0df03712a229b0931cb388fb
|
[
"Apache-2.0"
] | 1,063
|
2020-11-15T12:55:15.000Z
|
2022-03-31T14:33:12.000Z
|
binproperty/models.py
|
ashrafali46/GreaterWMS
|
1aed14a8c26c8ac4571db5e6b07ab7e4fa3c7c72
|
[
"Apache-2.0"
] | 96
|
2020-11-18T00:06:05.000Z
|
2022-03-03T09:05:39.000Z
|
binproperty/models.py
|
ashrafali46/GreaterWMS
|
1aed14a8c26c8ac4571db5e6b07ab7e4fa3c7c72
|
[
"Apache-2.0"
] | 349
|
2020-11-15T13:15:30.000Z
|
2022-03-31T11:01:15.000Z
|
from django.db import models
class ListModel(models.Model):
bin_property = models.CharField(max_length=32, verbose_name="Bin property")
creater = models.CharField(max_length=255, verbose_name="Who created")
openid = models.CharField(max_length=255, verbose_name="Openid")
is_delete = models.BooleanField(default=False, verbose_name='Delete Label')
create_time = models.DateTimeField(auto_now_add=True, verbose_name="Create Time")
update_time = models.DateTimeField(auto_now=True, blank=True, null=True, verbose_name="Update Time")
class Meta:
db_table = 'binproperty'
verbose_name = 'data id'
verbose_name_plural = "data id"
ordering = ['bin_property']
def __str__(self):
return self.pk
| 40.105263
| 104
| 0.71916
|
8ea143ca923b68081e8b124796bb1fb1892762f9
| 1,157
|
py
|
Python
|
test/functional/p2p_mempool.py
|
satcoin-dev/satcoin
|
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
|
[
"MIT"
] | 4
|
2021-02-28T04:34:58.000Z
|
2021-09-14T15:25:31.000Z
|
test/functional/p2p_mempool.py
|
satcoin-dev/satcoin
|
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
|
[
"MIT"
] | null | null | null |
test/functional/p2p_mempool.py
|
satcoin-dev/satcoin
|
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
|
[
"MIT"
] | 1
|
2021-06-18T13:13:17.000Z
|
2021-06-18T13:13:17.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.messages import msg_mempool
from test_framework.mininode import P2PInterface
from test_framework.test_framework import SatcoinTestFramework
from test_framework.util import assert_equal
class P2PMempoolTests(SatcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
# Add a p2p connection
self.nodes[0].add_p2p_connection(P2PInterface())
#request mempool
self.nodes[0].p2p.send_message(msg_mempool())
self.nodes[0].p2p.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 33.057143
| 73
| 0.736387
|
09bdf12c4d3027726439f2ff668c685fd262aea8
| 251
|
py
|
Python
|
Vicar/vicar_app/urls.py
|
cs-fullstack-fall-2018/django-form-post1-jpark1914
|
230ba02b30ef48bf86dd1f0797859af7c434fbaf
|
[
"Apache-2.0"
] | null | null | null |
Vicar/vicar_app/urls.py
|
cs-fullstack-fall-2018/django-form-post1-jpark1914
|
230ba02b30ef48bf86dd1f0797859af7c434fbaf
|
[
"Apache-2.0"
] | null | null | null |
Vicar/vicar_app/urls.py
|
cs-fullstack-fall-2018/django-form-post1-jpark1914
|
230ba02b30ef48bf86dd1f0797859af7c434fbaf
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('',views.home, name='home'),
path('game/', views.index, name='index'),
path('game/<int:pk>/',views.detail, name='game'),
path('game/add/', views.add, name='add')
]
| 25.1
| 53
| 0.621514
|
da43b01cc0df7ac2135421ffa3eafdbf0c7fd976
| 3,628
|
py
|
Python
|
PRL/genP.py
|
balasbk/game-theory
|
958e093e64799e2dd445d18bd9966251270f81e7
|
[
"MIT"
] | 1
|
2020-08-08T09:25:27.000Z
|
2020-08-08T09:25:27.000Z
|
PRL/genP.py
|
makgyver/PRL
|
5ac125bd0bb68978507e4a5a1f1df4e39f67442d
|
[
"MIT"
] | 1
|
2020-10-15T11:12:49.000Z
|
2020-10-15T11:12:49.000Z
|
PRL/genP.py
|
balasbk/game-theory
|
958e093e64799e2dd445d18bd9966251270f81e7
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
class GenP(object):
"""Abstract class which representes a generic preference generator.
Every specific generator MUST inherit from this class."""
def __init__(self, X, y):
"""Initializes the preference generator.
:param X: training instances
:param y: training labels associated to the instances
:type X: bidimensional numpy.ndarray
:type y: numpy.ndarray
"""
self.X = X
self.y = y
self.n = X.shape[0]
self.labelset = set(np.unique(y))
def get_random_pref(self):
"""Returns the identifier of random preference.
:returns: a random preference
:rtype: tuple
"""
pass
def get_pref_value(self, p):
"""Retruns the concrete instantiation of a prefernce identifier.
:param p: preference identifier
:type p: tuple
:returns: a preference
:rtype: tuple(tuple(numpy.ndarray, int), tuple(numpy.ndarray, int))
"""
(ipos, ypos), (ineg, yneg) = p
return ((self.X[ipos], ypos), (self.X[ineg], yneg))
def get_all_prefs(self):
"""Returns the list of all possibile preferences.
:returns: the list of all possible preferences
:rtype: list
"""
pass
class GenMicroP(GenP):
"""Micro preference generator. A micro preference describes preferences like
(x_i, y_i) is preferred to (x_j, y_j), where (x_i, y_i) in X x Y, while (x_j, y_j)
not in X x Y. This kind of preferences are suitable for instance ranking tasks."""
def __init__(self, X, y):
GenP.__init__(self, X, y)
def get_random_pref(self):
ipos = random.randint(0, self.n-1)
ypos = self.y[ipos]
ineg = random.randint(0, self.n-1)
yneg = random.choice(list(self.labelset - set([self.y[ineg]])))
return ((ipos, ypos), (ineg, yneg))
def get_all_prefs(self):
lp = []
for i in range(self.n):
yp = self.y[i]
for j in range(self.n):
ypj = self.y[j]
for yn in (self.labelset - set([ypj])):
lp.append(((i, yp), (j, yn)))
return lp
def __repr__(self):
return "Macro preference generator"
class GenMacroP(GenP):
"""Macro preference generator. A macro preference describes preferences like
y_i is preferred to y_j for the instance x_i, where (x_i, y_i) in X x Y, while (x_i, y_j)
not in X x Y. This kind of preferences are suitable for label ranking tasks."""
def __init__(self, X, y):
GenP.__init__(self, X, y)
def get_random_pref(self):
ipos = random.randint(0,self.n-1)
ypos = self.y[ipos]
yneg = random.choice(list(self.labelset-set([self.y[ipos]])))
return ((ipos,ypos),(ipos,yneg))
def get_all_prefs(self):
lp = []
for i in range(self.n):
yp = self.y[i]
for yn in (self.labelset - set([yp])):
lp.append(((i, yp), (i, yn)))
return lp
def __repr__(self):
return "Micro preference generator"
class GenIP(GenP):
"""Instance-based preference generator. These are actually degenerate preferences that are
simple instances."""
def __init__(self, X):
self.X = X
self.n = X.shape[0]
def get_random_pref(self):
return random.randint(0, self.n-1)
def get_pref_value(self, p):
return self.X[p]
def get_all_prefs(self):
return range(self.n)
def __repr__(self):
return "Instance-based preference generator"
| 29.737705
| 94
| 0.592062
|
77c7f69a03e1d70bdae438b82a05b17ddf030eb1
| 20,503
|
py
|
Python
|
Lib/test/test_fileio.py
|
0xb8/cpython-mingw
|
07ed5a75ac275da2ad9b00e9158b9940ff49acbc
|
[
"0BSD"
] | 21
|
2019-11-21T03:44:53.000Z
|
2021-12-03T09:51:44.000Z
|
Lib/test/test_fileio.py
|
0xb8/cpython-mingw
|
07ed5a75ac275da2ad9b00e9158b9940ff49acbc
|
[
"0BSD"
] | 73
|
2021-06-19T11:08:53.000Z
|
2022-03-20T08:10:32.000Z
|
Lib/test/test_fileio.py
|
0xb8/cpython-mingw
|
07ed5a75ac275da2ad9b00e9158b9940ff49acbc
|
[
"0BSD"
] | 8
|
2021-07-14T21:55:18.000Z
|
2022-01-24T00:12:30.000Z
|
# Adapted from test_file.py by Daniel Stutzbach
import sys
import os
import io
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.support import (TESTFN, TESTFN_UNICODE, check_warnings, run_unittest,
make_bad_fd, cpython_only, swap_attr, gc_collect)
from collections import UserList
import _io # C implementation of io
import _pyio # Python implementation of io
class AutoFileTests:
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
gc_collect() # For PyPy or other GCs.
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testBlksize(self):
# test private _blksize attribute
blksize = io.DEFAULT_BUFFER_SIZE
# try to get preferred blksize from stat.st_blksize, if available
if hasattr(os, 'fstat'):
fst = os.fstat(self.f.fileno())
blksize = getattr(fst, 'st_blksize', blksize)
self.assertEqual(self.f._blksize, blksize)
# verify readinto
def testReadintoByteArray(self):
self.f.write(bytes([1, 2, 0, 255]))
self.f.close()
ba = bytearray(b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(ba)
self.assertEqual(ba, b'\x01\x02\x00\xffefgh')
self.assertEqual(n, 4)
def _testReadintoMemoryview(self):
self.f.write(bytes([1, 2, 0, 255]))
self.f.close()
m = memoryview(bytearray(b'abcdefgh'))
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(m)
self.assertEqual(m, b'\x01\x02\x00\xffefgh')
self.assertEqual(n, 4)
m = memoryview(bytearray(b'abcdefgh')).cast('H', shape=[2, 2])
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(m)
self.assertEqual(bytes(m), b'\x01\x02\x00\xffefgh')
self.assertEqual(n, 4)
def _testReadintoArray(self):
self.f.write(bytes([1, 2, 0, 255]))
self.f.close()
a = array('B', b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(a)
self.assertEqual(a, array('B', [1, 2, 0, 255, 101, 102, 103, 104]))
self.assertEqual(n, 4)
a = array('b', b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(a)
self.assertEqual(a, array('b', [1, 2, 0, -1, 101, 102, 103, 104]))
self.assertEqual(n, 4)
a = array('I', b'abcdefgh')
with self.FileIO(TESTFN, 'r') as f:
n = f.readinto(a)
self.assertEqual(a, array('I', b'\x01\x02\x00\xffefgh'))
self.assertEqual(n, 4)
def testWritelinesList(self):
l = [b'123', b'456']
self.f.writelines(l)
self.f.close()
self.f = self.FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesUserList(self):
l = UserList([b'123', b'456'])
self.f.writelines(l)
self.f.close()
self.f = self.FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesError(self):
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
self.assertRaises(TypeError, self.f.writelines, None)
self.assertRaises(TypeError, self.f.writelines, "abc")
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = self.FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def test_reject(self):
self.assertRaises(TypeError, self.f.write, "Hello!")
def testRepr(self):
self.assertEqual(repr(self.f),
"<%s.FileIO name=%r mode=%r closefd=True>" %
(self.modulename, self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f),
"<%s.FileIO fd=%r mode=%r closefd=True>" %
(self.modulename, self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f),
"<%s.FileIO [closed]>" % (self.modulename,))
def testReprNoCloseFD(self):
fd = os.open(TESTFN, os.O_RDONLY)
try:
with self.FileIO(fd, 'r', closefd=False) as f:
self.assertEqual(repr(f),
"<%s.FileIO name=%r mode=%r closefd=False>" %
(self.modulename, f.name, f.mode))
finally:
os.close(fd)
def testRecursiveRepr(self):
# Issue #25455
with swap_attr(self.f, 'name', self.f):
with self.assertRaises(RuntimeError):
repr(self.f) # Should not crash
def testErrors(self):
f = self.f
self.assertFalse(f.isatty())
self.assertFalse(f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = self.FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertFalse(f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'seekable', 'readable', 'writable',
'read', 'readall', 'readline', 'readlines',
'tell', 'truncate', 'flush']
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
self.assertRaises(TypeError, self.f.readinto)
self.assertRaises(ValueError, self.f.readinto, bytearray(1))
self.assertRaises(TypeError, self.f.seek)
self.assertRaises(ValueError, self.f.seek, 0)
self.assertRaises(TypeError, self.f.write)
self.assertRaises(ValueError, self.f.write, b'')
self.assertRaises(TypeError, self.f.writelines)
self.assertRaises(ValueError, self.f.writelines, b'')
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix uses fstat and returns "[Errno 21]: Is a directory"
try:
self.FileIO('.', 'r')
except OSError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised OSError")
@unittest.skipIf(os.name == 'nt', "test only works on a POSIX-like system")
def testOpenDirFD(self):
fd = os.open('.', os.O_RDONLY)
with self.assertRaises(OSError) as cm:
self.FileIO(fd, 'r')
os.close(fd)
self.assertEqual(cm.exception.errno, errno.EISDIR)
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except OSError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised OSError")
finally:
try:
self.f.close()
except OSError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write(b'a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except OSError:
pass
self.f = self.FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array('b', b'x'*10)
f.readinto(a)
class CAutoFileTests(AutoFileTests, unittest.TestCase):
FileIO = _io.FileIO
modulename = '_io'
class PyAutoFileTests(AutoFileTests, unittest.TestCase):
FileIO = _pyio.FileIO
modulename = '_pyio'
class OtherFileTests:
def testAbles(self):
try:
f = self.FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = self.FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = self.FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = self.FileIO("/dev/tty", "a")
except OSError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith(('sunos', 'aix')):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testInvalidModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = self.FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testModeStrings(self):
# test that the mode attribute is correct for various mode strings
# given as init args
try:
for modes in [('w', 'wb'), ('wb', 'wb'), ('wb+', 'rb+'),
('w+b', 'rb+'), ('a', 'ab'), ('ab', 'ab'),
('ab+', 'ab+'), ('a+b', 'ab+'), ('r', 'rb'),
('rb', 'rb'), ('rb+', 'rb+'), ('r+b', 'rb+')]:
# read modes are last so that TESTFN will exist first
with self.FileIO(TESTFN, modes[0]) as f:
self.assertEqual(f.mode, modes[1])
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = self.FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
self.skipTest('could not encode %r to ascii' % TESTFN)
f = self.FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
@unittest.skipIf(sys.getfilesystemencoding() != 'utf-8',
"test only works for utf-8 filesystems")
def testUtf8BytesOpen(self):
# Opening a UTF-8 bytes filename
try:
fn = TESTFN_UNICODE.encode("utf-8")
except UnicodeEncodeError:
self.skipTest('could not encode %r to utf-8' % TESTFN_UNICODE)
f = self.FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN_UNICODE, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN_UNICODE)
def testConstructorHandlesNULChars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.FileIO, fn_with_NUL, 'w')
self.assertRaises(ValueError, self.FileIO, bytes(fn_with_NUL, 'ascii'), 'w')
def testInvalidFd(self):
self.assertRaises(ValueError, self.FileIO, -10)
self.assertRaises(OSError, self.FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(OSError, msvcrt.get_osfhandle, make_bad_fd())
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = self.FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, io.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, io.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = self.FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = self.FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, self.FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, self.FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, self.FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def testUnclosedFDOnException(self):
class MyException(Exception): pass
class MyFileIO(self.FileIO):
def __setattr__(self, name, value):
if name == "name":
raise MyException("blocked setting name")
return super(MyFileIO, self).__setattr__(name, value)
fd = os.open(__file__, os.O_RDONLY)
self.assertRaises(MyException, MyFileIO, fd)
os.close(fd) # should not raise OSError(EBADF)
class COtherFileTests(OtherFileTests, unittest.TestCase):
FileIO = _io.FileIO
modulename = '_io'
@cpython_only
def testInvalidFd_overflow(self):
# Issue 15989
import _testcapi
self.assertRaises(TypeError, self.FileIO, _testcapi.INT_MAX + 1)
self.assertRaises(TypeError, self.FileIO, _testcapi.INT_MIN - 1)
def test_open_code(self):
# Check that the default behaviour of open_code matches
# open("rb")
with self.FileIO(__file__, "rb") as f:
expected = f.read()
with _io.open_code(__file__) as f:
actual = f.read()
self.assertEqual(expected, actual)
class PyOtherFileTests(OtherFileTests, unittest.TestCase):
FileIO = _pyio.FileIO
modulename = '_pyio'
def test_open_code(self):
# Check that the default behaviour of open_code matches
# open("rb")
with self.FileIO(__file__, "rb") as f:
expected = f.read()
with check_warnings(quiet=True) as w:
# Always test _open_code_with_warning
with _pyio._open_code_with_warning(__file__) as f:
actual = f.read()
self.assertEqual(expected, actual)
self.assertNotEqual(w.warnings, [])
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(CAutoFileTests, PyAutoFileTests,
COtherFileTests, PyOtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| 33.069355
| 84
| 0.549724
|
4b7d6c6529b996ad22ab2e22202dde4e607a3606
| 545
|
py
|
Python
|
clinicaltrials/frontend/migrations/0020_auto_20180207_1355.py
|
chadmiller/clinicaltrials-act-tracker
|
d16f5ff7b1fde673e7b00cd674666a19b19bf092
|
[
"MIT"
] | 13
|
2018-02-20T12:48:42.000Z
|
2022-03-09T01:55:23.000Z
|
clinicaltrials/frontend/migrations/0020_auto_20180207_1355.py
|
chadmiller/clinicaltrials-act-tracker
|
d16f5ff7b1fde673e7b00cd674666a19b19bf092
|
[
"MIT"
] | 134
|
2018-02-19T08:42:54.000Z
|
2021-12-13T19:50:15.000Z
|
clinicaltrials/frontend/migrations/0020_auto_20180207_1355.py
|
chadmiller/clinicaltrials-act-tracker
|
d16f5ff7b1fde673e7b00cd674666a19b19bf092
|
[
"MIT"
] | 3
|
2018-03-10T19:56:27.000Z
|
2019-05-03T15:29:30.000Z
|
# Generated by Django 2.0 on 2018-02-07 13:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0019_auto_20180122_1147'),
]
operations = [
migrations.AlterField(
model_name='trial',
name='status',
field=models.CharField(choices=[('overdue', 'Overdue'), ('ongoing', 'Ongoing'), ('reported', 'Reported'), ('qa', 'Under QA'), ('reported-late', 'Reported (late)')], default='ongoing', max_length=20),
),
]
| 28.684211
| 211
| 0.601835
|
4764b6be7af692808a91fc22599f83c008da6aa5
| 1,208
|
py
|
Python
|
yt_p2p/particles.py
|
brittonsmith/yt_p2p
|
f85e5033cd2db8fc0bb3a2c5f7a62e3c78666d51
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt_p2p/particles.py
|
brittonsmith/yt_p2p
|
f85e5033cd2db8fc0bb3a2c5f7a62e3c78666d51
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt_p2p/particles.py
|
brittonsmith/yt_p2p
|
f85e5033cd2db8fc0bb3a2c5f7a62e3c78666d51
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""
particle stuff
"""
#-----------------------------------------------------------------------------
# Copyright (c) Britton Smith <brittonsmith@gmail.com>. All rights reserved.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from yt.data_objects.particle_filters import \
add_particle_filter
from yt.utilities.logger import ytLogger as mylog
def _pop3(pfilter, data):
return ((data['particle_type'] == 5) & (data['particle_mass'].in_units('Msun') < 1e-10)) \
| ((data['particle_type'] == 1) & (data['creation_time'] > 0) & \
(data['particle_mass'].in_units('Msun') > 1)) \
| ((data['particle_type'] == 5) & (data['particle_mass'].in_units('Msun') > 1e-3))
add_particle_filter(
"pop3", function=_pop3, filtered_type="all",
requires=["particle_type", "creation_time", "particle_mass"])
def add_p2p_particle_filters(ds):
pfilters = ["pop3"]
for pfilter in pfilters:
if not ds.add_particle_filter(pfilter):
mylog.warn("Failed to add filter: %s." % pfilter)
| 34.514286
| 94
| 0.580298
|
53e756cf0d973541ad093d47e6b47dca95145b11
| 3,231
|
py
|
Python
|
pages/login_page.py
|
KKashpovski/test_moodle_project
|
8cd0a53fffe797c47d3b14cc3300c610467432e3
|
[
"Apache-2.0"
] | null | null | null |
pages/login_page.py
|
KKashpovski/test_moodle_project
|
8cd0a53fffe797c47d3b14cc3300c610467432e3
|
[
"Apache-2.0"
] | null | null | null |
pages/login_page.py
|
KKashpovski/test_moodle_project
|
8cd0a53fffe797c47d3b14cc3300c610467432e3
|
[
"Apache-2.0"
] | null | null | null |
"""Интерактивное поведение страницы авторизации."""
import logging
from selenium.webdriver.remote.webelement import WebElement
from locators.admin_page_locators import CoursePageLocators
from models.auth import AuthData
from pages.base_page import BasePage
from locators.login_page_locators import LoginPageLocators
from locators.personal_data_page_locators import PersonalDataPageLocators
logger = logging.getLogger("moodle")
class LoginPage(BasePage):
def is_auth(self):
self.find_element(LoginPageLocators.FORM)
element = self.find_elements(LoginPageLocators.USER_BUTTON)
if len(element) > 0:
return True
return False
def confirm_exit_window(self):
self.find_element(LoginPageLocators.FORM)
element = self.find_elements(LoginPageLocators.CONFIRM_EXIT_BUTTON)
if len(element) > 0:
return True
return False
def email_input(self) -> WebElement:
return self.find_element(LoginPageLocators.LOGIN)
def password_input(self) -> WebElement:
return self.find_element(LoginPageLocators.PASSWORD)
def submit_button(self) -> WebElement:
return self.find_element(LoginPageLocators.SUBMIT)
def user_menu(self) -> WebElement:
return self.find_element(LoginPageLocators.USER_MENU)
def exit(self) -> WebElement:
return self.find_element(LoginPageLocators.EXIT)
def confirm_exit(self):
return self.find_element(LoginPageLocators.CONFIRM_EXIT_BUTTON)
def auth(self, data: AuthData):
logger.info(f'User email is "{data.login}, user password {data.password}"')
if self.is_auth():
self.click_element(self.user_menu())
self.click_element(self.exit())
if self.confirm_exit_window():
self.click_element(self.confirm_exit())
self.fill_element(self.email_input(), data.login)
self.fill_element(self.password_input(), data.password)
self.click_element(self.submit_button())
def user_menu_settings(self) -> WebElement:
return self.find_element(LoginPageLocators.USER_MENU_SETTINGS)
def go_to_editing_personal_data(self):
self.click_element(self.user_menu())
self.click_element(self.user_menu_settings())
self.click_element(self.find_element(PersonalDataPageLocators.EDIT_INFO))
def admin_menu(self) -> WebElement:
return self.find_element(LoginPageLocators.ADMIN_BUTTON)
def select_course_menu(self) -> WebElement:
return self.find_element(LoginPageLocators.ADMIN_BUTTON)
def go_to_editing_course_data(self):
self.click_element(self.admin_menu())
self.click_element(self.select_course_menu())
self.click_element(self.find_element(CoursePageLocators.COURSE_TUB))
self.click_element(self.find_element(CoursePageLocators.COURSE_CREATE_TUB))
def auth_login_error(self) -> str:
return self.find_element(LoginPageLocators.LOGIN_ERROR).text
def sign_out(self):
if self.is_auth():
self.click_element(self.user_menu())
self.click_element(self.exit())
if self.confirm_exit_window():
self.click_element(self.confirm_exit())
| 36.303371
| 83
| 0.721139
|
fb42bba5fb242dc48bc3c8645cd47c195df3e831
| 6,017
|
py
|
Python
|
acdc_nn/cli.py
|
compbiomed-unito/acdc-nn
|
0800a5904c36302f19e48e2d2f7ddae9686f3366
|
[
"MIT"
] | 2
|
2021-07-13T21:41:39.000Z
|
2022-01-27T23:51:10.000Z
|
acdc_nn/cli.py
|
compbiomed-unito/acdc-nn
|
0800a5904c36302f19e48e2d2f7ddae9686f3366
|
[
"MIT"
] | 1
|
2021-09-15T15:53:39.000Z
|
2021-09-15T15:53:39.000Z
|
acdc_nn/cli.py
|
compbiomed-unito/acdc-nn
|
0800a5904c36302f19e48e2d2f7ddae9686f3366
|
[
"MIT"
] | 4
|
2021-07-13T21:41:40.000Z
|
2022-01-27T16:41:49.000Z
|
from acdc_nn import acdc_nn
from acdc_nn import util
import ddgun
import click
from warnings import warn
import functools
class Substitution(click.ParamType):
'''Click parameter class for substitutions'''
name = 'amino acid substitution'
def convert(self, value, param, ctx):
if isinstance(value, ddgun.Substitution):
return value
try:
return ddgun.Substitution.parse(value)
except Exception as e:
self.fail(f"{value!r} is not a valid {self.name}", param, ctx)
help_notes = '''Notes:
Mutations are written as XNY, meaning that the residue X at position N changes to Y. X and Y are given as a one letter amino acid code and
N is 1-based and refers to the the PDB numbering of the relevant chain, and not the position on the sequence.
PDB and profile files will be automatically decompressed (by gzip) if the paths end with ".gz".
'''
@click.group(epilog=help_notes)
def cli():
pass
@cli.command(epilog=help_notes) # TODO add option for the weights
@click.argument("sub", type=Substitution())
@click.argument("profile", type=click.Path(exists=True, readable=True)) # FIXME use File object
def seq(sub, profile):
'''Predict DDG of SUB from the protein PROFILE.
\b
SUB is an amino acid substitution (e.g. Q339P).
PROFILE is the path to a protein profile file.
Uses a trained ACDC-NN Seq that does not require protein structural information.'''
wt_prof = ddgun.Profile(profile)
net = acdc_nn.ACDCSeq()
ddg = net.predict(sub, wt_prof)
click.echo(ddg)
@cli.command(epilog=help_notes)
@click.argument("sub", type=Substitution())
@click.argument("profile", type=click.Path(exists=True, readable=True))
@click.argument("pdb", type=click.Path(exists=True, readable=True))
@click.argument("chain")
#@click.option('--inverse', type=(Substitution(), click.Path(exists=True, readable=True), click.Path(exists=True, readable=True), str), help=')
def struct(sub, profile, pdb, chain): #FIXME add inverse mut
'''Predict DDG of SUB from the protein PROFILE and PDB structure.
\b
SUB is an amino acid substitution (e.g. Q339P).
PROFILE is the path to a protein profile file.
PDB is the path to the protein structure in PDB file.
CHAIN is the PDB chain to be used.
Uses a trained ACDC-NN that requires protein structural information.'''
wt_prof = util.getProfile(profile) #FIXME use Profile
wt_struct = acdc_nn.Structure(pdb, chain)
net = acdc_nn.ACDC3D()
ddg = net.predict(str(sub), wt_prof, wt_struct)
click.echo(ddg)
@cli.command(epilog=help_notes)
@click.argument("sub", type=Substitution())
@click.argument("profile", type=click.Path(exists=True, readable=True))
@click.argument("pdb", type=click.Path(exists=True, readable=True))
@click.argument("chain")
@click.argument("isub", type=Substitution())
@click.argument("iprofile", type=click.Path(exists=True, readable=True))
@click.argument("ipdb", type=click.Path(exists=True, readable=True))
@click.argument("ichain")
#@click.option('--inverse', type=(Substitution(), click.Path(exists=True, readable=True), click.Path(exists=True, readable=True), str), help=')
def istruct(sub, profile, pdb, chain, isub, iprofile, ipdb, ichain):
'''Predict DDG using both the wild-type and mutated protein structures.
\b
SUB is an amino acid substitution (e.g. Q339P).
PROFILE is the path to a protein profile file.
PDB is the path to the protein structure in PDB file.
CHAIN is the PDB chain to be used.
ISUB, IPROFILE, IPDB and ICHAIN are the same for the mutated protein.
Uses a trained ACDC-NN that requires protein structural information.'''
wt_prof = util.getProfile(profile) #FIXME use Profile
wt_struct = acdc_nn.Structure(pdb, chain)
mt_prof = util.getProfile(iprofile) #FIXME use Profile
mt_struct = acdc_nn.Structure(ipdb, ichain)
net = acdc_nn.ACDC3D()
ddg = net.predict(str(sub), wt_prof, wt_struct, str(isub), mt_prof, mt_struct)
click.echo(ddg)
# caching for functions
@functools.lru_cache(10)
def load_nn(seq):
return acdc_nn.ACDCSeq() if seq else acdc_nn.ACDC3D()
@functools.lru_cache(100)
def load_prot_seq(profile):
return ddgun.Profile(profile)
@functools.lru_cache(100)
def load_prot_3d(profile, pdb, chain):
return util.getProfile(profile), acdc_nn.Structure(pdb, chain) #FIXME use Profile
@cli.command(epilog=help_notes)
@click.argument("subs", type=click.File())
def batch(subs):
'''Predict DDG of SUBS using available information.
SUBS is a table containing one amino acid substitution per row and paths to protein profiles and optionally protein structure.
\b
Each row can have 2, 4 or 8 fields of tab separated values that are interpreted with the following schema.
# \tPredictor \tFields
2 \tACDC-NN Seq \tSUB PROFILE
4 \tACDC-NN \tSUB PROFILE PDB CHAIN
8 \tACDC-NN \tWT-SUB WT-PROFILE WT-PDB WT-CHAIN MT-SUB MT-PROFILE MT-PDB MT-CHAIN
For rows with 2 fields, that is, without structural information, the sequence-based ACDC-NN Seq predictor is used. For rows with 4 or 8 fields, the structure-based ACDC-NN is used. Outputs one DDG value for each table row.
'''
for row, line in enumerate(subs, 1):
fields = line.rstrip('\n\r').split('\t')
# detect how many fields are available
null_fields = [f in {'', '.', 'NA', 'na'} for f in fields]
for i, nf in enumerate(null_fields):
if nf:
n = i
break
else:
n = len(fields)
if n == 2: # only profile
seq = True
pargs = ddgun.Substitution.parse(fields[0]), load_prot_seq(fields[1])
elif n == 4 or n == 8: # also structure
seq = False
pargs = (
str(ddgun.Substitution.parse(fields[0])),
*load_prot_3d(*fields[1:4]))
if n == 8: # also inverse substitution
pargs = (
*pargs,
str(ddgun.Substitution.parse(fields[4])),
*load_prot_3d(*fields[5:8]))
else:
raise ValueError(f"found {n} fields at line {row}: fields must be 2, 4 or 8")
# check that there
for i, nf in enumerate(null_fields[n:], n):
if not nf:
warn(f"found value at column {i} after missing value at column {n}, at line {row}")
ddg = load_nn(seq=seq).predict(*pargs)
click.echo(ddg)
| 36.689024
| 222
| 0.728436
|
078e2dd258178612182da452d7cb7c1bbac1f9a1
| 5,985
|
py
|
Python
|
openmm_ramd/examples/hsp90_ramd_example_api.py
|
seekrcentral/openmm_ramd
|
8b0f5094f5c2cfd9d6b77132cc2eaf10d3513d7e
|
[
"MIT"
] | null | null | null |
openmm_ramd/examples/hsp90_ramd_example_api.py
|
seekrcentral/openmm_ramd
|
8b0f5094f5c2cfd9d6b77132cc2eaf10d3513d7e
|
[
"MIT"
] | null | null | null |
openmm_ramd/examples/hsp90_ramd_example_api.py
|
seekrcentral/openmm_ramd
|
8b0f5094f5c2cfd9d6b77132cc2eaf10d3513d7e
|
[
"MIT"
] | null | null | null |
"""
This sample script provides a template that one can use to run their own
RAMD simulations.
"""
import time
from sys import stdout
import simtk.openmm.app as app
import simtk.openmm as mm
import simtk.unit as unit
import parmed
import mdtraj
import numpy as np
import openmm_ramd.base as base
from openmm_ramd import openmm_ramd
prmtop_filename = "../data/hsp90_INH.prmtop"
input_pdb_file = "../data/hsp90_INH.pdb"
# Output equilibration trajectory
trajectory_filename = "ramd_trajectory.pdb"
# The interval between updates to the equilibration trajectory
steps_per_trajectory_update = 50000
# Whether to minimize
minimize = True
# The total number of RAMD steps to take
num_steps = 100000000 # 200 nanoseconds
# The interval between energy printed to standard output
steps_per_energy_update = 300000
# time step of simulation
time_step = 0.002 * unit.picoseconds
# Enter the atom indices whose center of mass defines the receptor binding site
rec_indices = [569, 583, 605, 617, 1266, 1292, 1299, 1374, 1440, 1459, 1499,
1849, 1872, 1892, 2256, 2295, 2352, 2557]
# Indices for VMD selection
# 569 583 605 617 1266 1292 1299 1374 1440 1459 1499 1849 1872 1892 2256 2295 2352 2557
# Enter the atom indices of the ligand molecule
lig_indices = [3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268,
3269, 3270, 3271, 3272, 3273, 3274, 3275, 3276, 3277, 3278,
3279, 3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, 3288]
# Indices for VMD selection
# 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288
# To hold the ligand in place during the equilibration, a harmonic force
# keeps the center of mass of the ligand and binding site at a constant
# distance
ramd_force_magnitude = 14.0 * unit.kilocalories_per_mole / unit.angstrom
# simulation initial and target temperature
temperature = 298.15 * unit.kelvin
# If constant pressure is desired
constant_pressure = True
target_pressure = 1.0 * unit.bar
# Define which GPU to use
cuda_index = "0"
# Nonbonded cutoff
nonbonded_cutoff = 1.0 * unit.nanometer
# The interval between RAMD force evaluations and updates
steps_per_RAMD_update = 50
RAMD_cutoff_distance = 0.0025 * unit.nanometer
RAMD_max_distance = 1.5 * unit.nanometer
#starting_ligand_site_distance = get_site_ligand_distance(
# input_pdb_file, rec_indices, lig_indices)
#print("Starting ligand-site distance:", starting_ligand_site_distance)
# Modify target_distance if you want the ligand to be pulled to a different
# distance. For example:
# target_distance = 0.6 * unit.nanometers
#target_distance = starting_ligand_site_distance
########################################################
# DO NOT MODIFY BELOW UNLESS YOU KNOW WHAT YOU'RE DOING
########################################################
prmtop = app.AmberPrmtopFile(prmtop_filename)
mypdb = app.PDBFile(input_pdb_file)
pdb_parmed = parmed.load_file(input_pdb_file)
assert pdb_parmed.box_vectors is not None, "No box vectors "\
"found in {}. ".format(input_pdb_file) \
+ "Box vectors for an anchor must be defined with a CRYST "\
"line within the PDB file."
box_vectors = pdb_parmed.box_vectors
system = prmtop.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=nonbonded_cutoff,
constraints=app.HBonds)
if constant_pressure:
barostat = mm.MonteCarloBarostat(target_pressure, temperature, 25)
system.addForce(barostat)
integrator = mm.LangevinIntegrator(temperature, 1/unit.picosecond, time_step)
platform = mm.Platform.getPlatformByName('CUDA')
properties = {"CudaDeviceIndex": cuda_index, "CudaPrecision": "mixed"}
#simulation = app.Simulation(prmtop.topology, system, integrator, platform, properties)
simulation = openmm_ramd.RAMDSimulation(
prmtop.topology, system, integrator, ramd_force_magnitude, lig_indices,
rec_indices, platform, properties)
simulation.context.setPositions(mypdb.positions)
simulation.context.setPeriodicBoxVectors(*box_vectors)
if minimize:
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(temperature)
simulation.reporters.append(app.StateDataReporter(stdout, steps_per_energy_update, step=True,
potentialEnergy=True, temperature=True, volume=True))
pdb_reporter = app.PDBReporter(trajectory_filename, steps_per_trajectory_update)
simulation.reporters.append(pdb_reporter)
new_com = base.get_ligand_com(system, mypdb.positions, lig_indices)
start_time = time.time()
counter = 0
while counter < num_steps:
old_com = new_com
simulation.step(steps_per_RAMD_update)
state = simulation.context.getState(getPositions = True)
positions = state.getPositions()
new_com = base.get_ligand_com(system, positions, lig_indices)
com_com_distance = np.linalg.norm(old_com.value_in_unit(unit.nanometers) \
- new_com.value_in_unit(unit.nanometers))
lig_rec_distance = base.get_ligand_receptor_distance(system, positions, lig_indices, rec_indices)
if counter % 5000 == 0:
print("step:", counter, "lig_rec_distance:", lig_rec_distance)
if com_com_distance*unit.nanometers < RAMD_cutoff_distance:
print("recomputing force at step:", counter)
simulation.recompute_RAMD_force()
counter += steps_per_RAMD_update
if lig_rec_distance > RAMD_max_distance:
print("max distance exceeded at step:", counter)
break
total_time = time.time() - start_time
simulation_in_ns = counter * time_step.value_in_unit(unit.picoseconds) * 1e-3
total_time_in_days = total_time / (86400.0)
ns_per_day = simulation_in_ns / total_time_in_days
print("RAMD benchmark:", ns_per_day, "ns/day")
#end_distance = get_site_ligand_distance(output_pdb_file, rec_indices,
# lig_indices)
#print("Final ligand-site distance:", end_distance)
| 36.493902
| 151
| 0.738012
|
6dc2b66f2e19e5091513030a0aef58c85aeb6636
| 3,697
|
py
|
Python
|
tests/test_grism.py
|
ucl-exoplanets/wayne
|
48fd07588cbbab6f5a32038455e36d7fc6b89625
|
[
"MIT"
] | 7
|
2017-05-30T09:01:50.000Z
|
2019-04-05T05:46:23.000Z
|
tests/test_grism.py
|
ucl-exoplanets/wayne
|
48fd07588cbbab6f5a32038455e36d7fc6b89625
|
[
"MIT"
] | 1
|
2018-06-07T17:31:19.000Z
|
2018-06-07T19:38:27.000Z
|
tests/test_grism.py
|
ucl-exoplanets/wayne
|
48fd07588cbbab6f5a32038455e36d7fc6b89625
|
[
"MIT"
] | 2
|
2018-04-30T23:16:22.000Z
|
2020-09-30T18:12:47.000Z
|
import unittest
import numpy as np
import numpy.testing
from wayne import grism
class Test_G141_Grism(unittest.TestCase):
def setUp(self):
self.g141_grism = grism.G141()
def test__init__(self):
grism.G141() # pass if no exceptions
def test_get_pixel_wl(self):
self.assertAlmostEqual(self.g141_grism.get_pixel_wl(50, 50, 100, 50), 11222.2, 1)
self.assertAlmostEqual(self.g141_grism.get_pixel_wl(50, 50, 200, 50), 15748.6, 1)
self.assertAlmostEqual(self.g141_grism.get_pixel_wl(50, 50, 100, 51), 11222.7, 1)
self.assertAlmostEqual(self.g141_grism.get_pixel_wl(50, 60, 100, 50), 11218.8, 1)
self.assertAlmostEqual(self.g141_grism.get_pixel_wl(60, 50, 100, 50), 10770.6, 1)
# There is no code to stop this, but perhaps in future there should be
# Going beyond the detector in ref or normal still gives a value as the calculations are polynomial based
# def test_get_pixel_wl_beyond_limits(self):
# self.assertAlmostEqual(self.g141_grism.get_pixel_wl(2000, 2000, 100, 50), 11218.8, 1)
# self.assertAlmostEqual(self.g141_grism.get_pixel_wl(60, 50, 2000, 2000), 10770.4, 1)
def test_get_pixel_wl_per_row(self):
# TODO (ryan) should this be 1024 or 1014 in length?
wl = self.g141_grism.get_pixel_wl_per_row(50, 50, np.arange(1024))
self.assertEqual(len(wl), 1024)
self.assertAlmostEqual(wl.mean(), 29961.2, 1)
self.assertAlmostEqual(wl.min(), 8959., 1)
self.assertAlmostEqual(wl.max(), 53001.1, 1)
def test_get_pixel_wl_per_row_x_values(self):
wl = self.g141_grism.get_pixel_wl_per_row(50, 50, np.array([100, 110, 120, 150, 200]))
np.testing.assert_array_almost_equal(wl, [11222.2, 11674.8, 12127.5, 13485.4, 15748.6], 1)
def test_get_pixel_wl_per_row_y_value(self):
wl = self.g141_grism.get_pixel_wl_per_row(50, 50, np.array([100, 110, 120, 150, 200]), 51)
np.testing.assert_array_almost_equal(wl, [11222.7, 11675.3, 12127.9, 13485.9, 15749.1], 1)
def test_get_pixel_edges_wl_per_row(self):
wl = self.g141_grism.get_pixel_edges_wl_per_row(50, 50, np.array([100, 110, 120, 130]), None, 10)
np.testing.assert_array_almost_equal(wl, [10995.9, 11448.5, 11901.2, 12353.8, 12806.5], 1)
def test_bin_centers_to_limits(self):
centers = np.array([-1, 0, 1])
limits = self.g141_grism._bin_centers_to_limits(centers, 1)
numpy.testing.assert_array_equal(limits, np.arange(-1.5, 2.))
class Test_SpectrumTrace(unittest.TestCase):
def test__init__(self):
grism._SpectrumTrace(50, 50, np.zeros(9), np.zeros(9)) # pass if no exceptions
class Test_G141_Trace(unittest.TestCase):
def test__get_wavelength_calibration_coeffs_50_50(self):
# This test is failing yet the result given as not equal isequal :-S
g141_trace = grism.G141_Trace(50, 50)
# This step is normally done at initialisation
trace_50_50 = np.array(g141_trace._get_wavelength_calibration_coeffs(50, 50))
expected_50_50 = np.array([0.0099, 1.8767, 45.2665, 8958.9896])
np.testing.assert_array_almost_equal(trace_50_50, expected_50_50, decimal=4)
trace_100_50 = np.array(g141_trace._get_wavelength_calibration_coeffs(100, 50))
expected_100_50 = np.array([0.0096, 1.8812, 45.2776, 8963.6693])
np.testing.assert_array_almost_equal(trace_100_50, expected_100_50, decimal=4)
trace_50_100 = np.array(g141_trace._get_wavelength_calibration_coeffs(50, 100))
expected_50_100 = np.array([0.0099, 1.7801, 45.3782, 8958.9896])
np.testing.assert_array_almost_equal(trace_50_100, expected_50_100, decimal=4)
| 45.641975
| 109
| 0.705437
|
aab9cd307978422a7df5fd48df08cd48b6d0146f
| 254
|
py
|
Python
|
patterns/util/page_loader.py
|
constanm/selenium-py
|
38ef4300ff572014688cc3efe365822eeaea0856
|
[
"MIT"
] | null | null | null |
patterns/util/page_loader.py
|
constanm/selenium-py
|
38ef4300ff572014688cc3efe365822eeaea0856
|
[
"MIT"
] | null | null | null |
patterns/util/page_loader.py
|
constanm/selenium-py
|
38ef4300ff572014688cc3efe365822eeaea0856
|
[
"MIT"
] | null | null | null |
def require_loaded(func):
def load_page(page, *params, **kwds):
if not page.is_loaded():
page.load()
assert page.is_loaded(), "page should be loaded by now"
return func(page, *params, **kwds)
return load_page
| 28.222222
| 63
| 0.606299
|
11b6608b7114676b646efdfde5d0c0e1160099d7
| 11,295
|
py
|
Python
|
pyvista/plotting/camera.py
|
rohankumardubey/pyvista
|
ec5aa343d857d0c7e6a79aeeba340797bc868ced
|
[
"MIT"
] | 1
|
2021-05-12T07:38:46.000Z
|
2021-05-12T07:38:46.000Z
|
pyvista/plotting/camera.py
|
rohankumardubey/pyvista
|
ec5aa343d857d0c7e6a79aeeba340797bc868ced
|
[
"MIT"
] | null | null | null |
pyvista/plotting/camera.py
|
rohankumardubey/pyvista
|
ec5aa343d857d0c7e6a79aeeba340797bc868ced
|
[
"MIT"
] | null | null | null |
"""Module containing pyvista implementation of vtkCamera."""
import numpy as np
import pyvista
from pyvista import _vtk
class Camera(_vtk.vtkCamera):
"""PyVista wrapper for the VTK Camera class.
Examples
--------
Create a camera at the pyvista module level
>>> import pyvista
>>> camera = pyvista.Camera()
Access the active camera of a plotter and get the position of the
camera.
>>> pl = pyvista.Plotter()
>>> pl.camera.position
(1.0, 1.0, 1.0)
"""
def __init__(self):
"""Initialize a new camera descriptor."""
self._is_parallel_projection = False
self._elevation = 0.0
self._azimuth = 0.0
@property
def position(self):
"""Position of the camera in world coordinates.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.position
(1.0, 1.0, 1.0)
"""
return self.GetPosition()
@position.setter
def position(self, value):
"""Set the position of the camera.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.position = (2.0, 1.0, 1.0)
"""
self.SetPosition(value)
self._elevation = 0.0
self._azimuth = 0.0
@property
def focal_point(self):
"""Location of the camera's focus in world coordinates.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.focal_point
(0.0, 0.0, 0.0)
"""
return self.GetFocalPoint()
@focal_point.setter
def focal_point(self, point):
"""Set the location of the camera's focus in world coordinates.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.focal_point = (2.0, 0.0, 0.0)
"""
self.SetFocalPoint(point)
@property
def model_transform_matrix(self):
"""Return the camera's model transformation matrix.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.model_transform_matrix
array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
"""
vtk_matrix = self.GetModelTransformMatrix()
matrix = np.empty((4, 4))
vtk_matrix.DeepCopy(matrix.ravel(), vtk_matrix)
return matrix
@model_transform_matrix.setter
def model_transform_matrix(self, matrix):
"""Set the camera's model transformation matrix.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> trans_mat = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
>>> pl.camera.model_transform_matrix = trans_mat
"""
vtk_matrix = _vtk.vtkMatrix4x4()
vtk_matrix.DeepCopy(matrix.ravel())
self.SetModelTransformMatrix(vtk_matrix)
@property
def is_parallel_projection(self):
"""Return True if parallel projection is set."""
return self._is_parallel_projection
@property
def distance(self):
"""Distance from the camera position to the focal point.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.distance # doctest:+SKIP
1.732050807568
"""
return self.GetDistance()
@property
def thickness(self):
"""Return the distance between clipping planes.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.thickness
1000.0
"""
return self.GetThickness()
@thickness.setter
def thickness(self, length):
"""Set the distance between clipping planes.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.thickness = 100
"""
self.SetThickness(length)
@property
def parallel_scale(self):
"""Scaling used for a parallel projection.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.parallel_scale
1.0
"""
return self.GetParallelScale()
@parallel_scale.setter
def parallel_scale(self, scale):
"""Set the scaling used for parallel projection.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.parallel_scale = 2.0
"""
self.SetParallelScale(scale)
def zoom(self, value):
"""Set the zoom of the camera.
In perspective mode, decrease the view angle by the specified
factor.
In parallel mode, decrease the parallel scale by the specified
factor. A value greater than 1 is a zoom-in, a value less than
1 is a zoom-out.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.zoom(2.0)
"""
self.Zoom(value)
@property
def up(self):
"""Return the "up" of the camera.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.up
(0.0, 0.0, 1.0)
"""
return self.GetViewUp()
@up.setter
def up(self, vector):
"""Set the "up" of the camera.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.up = (0.410018, 0.217989, 0.885644)
"""
self.SetViewUp(vector)
def enable_parallel_projection(self, flag=True):
"""Enable parallel projection.
The camera will have a parallel projection. Parallel
projection is often useful when viewing images or 2D datasets.
"""
self._is_parallel_projection = flag
self.SetParallelProjection(flag)
def disable_parallel_projection(self):
"""Disable the use of perspective projection."""
self.enable_parallel_projection(False)
@property
def clipping_range(self):
"""Return the location of the near and far clipping planes along the direction of projection.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.clipping_range
(0.01, 1000.01)
"""
return self.GetClippingRange()
@clipping_range.setter
def clipping_range(self, points):
"""Set the location of the near and far clipping planes along the direction of projection.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.clipping_range = (1, 10)
"""
if points[0] > points[1]:
raise ValueError(f'Near point must be lower than the far point.')
self.SetClippingRange(points[0], points[1])
def __del__(self):
"""Delete the camera."""
self.RemoveAllObservers()
self.parent = None
@property
def view_angle(self):
"""Return the camera view angle.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> plotter.camera.view_angle
30.0
"""
return self.GetViewAngle()
@property
def direction(self):
"""Vector from the camera position to the focal point.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> plotter.camera.direction # doctest: +SKIP
(0.0, 0.0, -1.0)
"""
return self.GetDirectionOfProjection()
def view_frustum(self, aspect=1.0):
"""Get the view frustum.
Parameters
----------
aspect : float, optional
The aspect of the viewport to compute the planes. Defaults
to 1.0.
Returns
-------
frustum : pv.PolyData
View frustum.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> frustum = plotter.camera.view_frustum(1.0)
>>> frustum.n_points
8
>>> frustum.n_cells
6
"""
frustum_planes = [0] * 24
self.GetFrustumPlanes(aspect, frustum_planes)
planes = _vtk.vtkPlanes()
planes.SetFrustumPlanes(frustum_planes)
frustum_source = _vtk.vtkFrustumSource()
frustum_source.ShowLinesOff()
frustum_source.SetPlanes(planes)
frustum_source.Update()
frustum = pyvista.wrap(frustum_source.GetOutput())
return frustum
@property
def roll(self):
"""Rotate the camera about the direction of projection.
This will spin the camera about its axis.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.roll
-120.00000000000001
"""
return self.GetRoll()
@roll.setter
def roll(self, angle):
"""Set the rotate of the camera about the direction of projection.
This will spin the camera about its axis.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.roll = 45.0
"""
self.SetRoll(angle)
@property
def elevation(self):
"""Vertical rotation of the scene.
Rotate the camera about the cross product of the negative of
the direction of projection and the view up vector, using the
focal point as the center of rotation.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.elevation
0.0
"""
return self._elevation
@elevation.setter
def elevation(self, angle):
"""Set the vertical rotation of the scene.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.elevation = 45.0
"""
if self._elevation:
self.Elevation(-self._elevation)
self._elevation = angle
self.Elevation(angle)
@property
def azimuth(self):
"""Azimuth of the camera.
Rotate the camera about the view up vector centered at the
focal point. Note that the view up vector is whatever was set
via SetViewUp, and is not necessarily perpendicular to the
direction of projection.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.azimuth
0.0
"""
return self._azimuth
@azimuth.setter
def azimuth(self, angle):
"""Set the azimuth rotation of the camera.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.camera.azimuth = 45.0
"""
if self._azimuth:
self.Azimuth(-self._azimuth)
self._azimuth = angle
self.Azimuth(angle)
| 25.554299
| 101
| 0.535812
|
f4c1ffa0dca42847cd0008bcf2d7680087ac45b4
| 1,540
|
py
|
Python
|
gfg/arrays/kth_largest_sum_contiguous_subarray.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | 1
|
2019-04-18T03:29:02.000Z
|
2019-04-18T03:29:02.000Z
|
gfg/arrays/kth_largest_sum_contiguous_subarray.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | null | null | null |
gfg/arrays/kth_largest_sum_contiguous_subarray.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | null | null | null |
"""
Given an array of integers. Write a program to find the K-th largest sum of
contiguous subarray within the array of numbers which has negative and positive numbers.
Input: a[] = {20, -5, -1}, k = 3
Output: 14
Explanation: All sum of contiguous subarrays are (20, 15, 14, -5, -6, -1)
so the 3rd largest sum is 14.
Input: a[] = {10, -10, 20, -40}, k = 6
Output: -10
Explanation: The 6th largest sum among sum of all contiguous subarrays is -10.
"""
import heapq
from typing import Tuple
def largest_sum(arr: list, k: int) -> Tuple[list, int]:
"""
Sum of elements from i to j can be calculated as sum[0:j]-sum[0:i-1]
We can store first k sums in a min heap and replace as required.
After processing all of the elements, the root of min heap will be the solution,
and the min heap will have the k largest elements.
Time Complexity: O(n*nlog(k))
Space Complexity: O(n)
"""
l: int = len(arr)
sum_arr: list = [0] # for sum_arr[i-1], when i is 0
for i in range(1, l + 1):
sum_arr.append(sum_arr[i - 1] + arr[i - 1])
min_heap: list = []
for i in range(1, l + 1):
for j in range(i, l + 1):
temp_sum = sum_arr[j] - sum_arr[i - 1]
if len(min_heap) < k:
heapq.heappush(min_heap, temp_sum)
elif temp_sum > min_heap[0]:
heapq.heapreplace(min_heap, temp_sum)
return min_heap, min_heap[0]
if __name__ == "__main__":
print(largest_sum([20, -5, -1], 3))
print(largest_sum([10, -10, 20, -40], 6))
| 31.428571
| 88
| 0.619481
|
95e69e223c0338693b270e860ccd562255eb4384
| 3,159
|
py
|
Python
|
dataset/inat.py
|
xiaofanustc/cifar_ssl
|
23537b91469cf470cb7a92b2fe8b7e372b39f201
|
[
"MIT"
] | 5
|
2021-07-21T05:59:55.000Z
|
2022-01-08T08:43:25.000Z
|
dataset/inat.py
|
xujinglin/imbalanced-semi-self
|
7cccaa2a1415b8dac485bd520e7814ed3c2ea31d
|
[
"MIT"
] | null | null | null |
dataset/inat.py
|
xujinglin/imbalanced-semi-self
|
7cccaa2a1415b8dac485bd520e7814ed3c2ea31d
|
[
"MIT"
] | 1
|
2021-07-31T05:25:24.000Z
|
2021-07-31T05:25:24.000Z
|
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import os
from PIL import Image
RGB_statistics = {
'iNaturalist18': {
'mean': [0.466, 0.471, 0.380],
'std': [0.195, 0.194, 0.192]
}
}
def get_data_transform(split, rgb_mean, rbg_std):
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
])
}
return data_transforms[split]
class INaturalist(Dataset):
def __init__(self, root, txt, transform=None):
self.img_path = []
self.labels = []
self.transform = transform
with open(txt) as f:
for line in f:
self.img_path.append(os.path.join(root, line.split()[0]))
self.labels.append(int(line.split()[1]))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
path = self.img_path[index]
label = self.labels[index]
with open(path, 'rb') as f:
sample = Image.open(f).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, label # , index
def load_data_inat(data_root, batch_size, phase, sampler_dic=None, num_workers=4, shuffle=True):
assert phase in {'train', 'val'}
key = 'iNaturalist18'
txt = f'./imagenet_inat/data/iNaturalist18/iNaturalist18_{phase}.txt'
print(f'===> Loading iNaturalist18 data from {txt}')
rgb_mean, rgb_std = RGB_statistics[key]['mean'], RGB_statistics[key]['std']
transform = get_data_transform(phase, rgb_mean, rgb_std)
set_inat = INaturalist(data_root, txt, transform)
print(f'===> {phase} data length {len(set_inat)}')
# if phase == 'test' and test_open:
# open_txt = './data/%s/%s_open.txt' % (dataset, dataset)
# print('Testing with open sets from %s' % open_txt)
# open_set_ = INaturalist('./data/%s/%s_open' % (dataset, dataset), open_txt, transform)
# set_ = ConcatDataset([set_, open_set_])
if sampler_dic and phase == 'train':
print('Using sampler: ', sampler_dic['sampler'])
print('Sampler parameters: ', sampler_dic['params'])
return DataLoader(dataset=set_inat, batch_size=batch_size, shuffle=False,
sampler=sampler_dic['sampler'](set_inat, **sampler_dic['params']), num_workers=num_workers)
else:
print('No sampler.')
print('Shuffle is %s.' % shuffle)
return DataLoader(dataset=set_inat, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
| 34.714286
| 117
| 0.615068
|
2c799c3555b769d317c45347b87a7f5477c0b4fa
| 1,728
|
py
|
Python
|
cvnets/layers/random_layers.py
|
KelOdgSmile/ml-cvnets
|
503ec3b4ec187cfa0ed451d0f61de22f669b0081
|
[
"AML"
] | 1
|
2021-12-20T09:25:18.000Z
|
2021-12-20T09:25:18.000Z
|
cvnets/layers/random_layers.py
|
footh/ml-cvnets
|
d9064fe7e7a2d6a7a9817df936432856a0500a25
|
[
"AML"
] | null | null | null |
cvnets/layers/random_layers.py
|
footh/ml-cvnets
|
d9064fe7e7a2d6a7a9817df936432856a0500a25
|
[
"AML"
] | null | null | null |
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2020 Apple Inc. All Rights Reserved.
#
from torch import Tensor
from .base_layer import BaseLayer
import random
from utils.math_utils import bound_fn
from collections import OrderedDict
class RandomApply(BaseLayer):
"""
Apply layers randomly during training
"""
def __init__(self, module_list: list, keep_p: float = 0.8):
super(RandomApply, self).__init__()
self._modules = OrderedDict()
for idx, module in enumerate(module_list):
self._modules[str(idx)] = module
self.module_indexes = [i for i in range(1, len(self._modules))]
n_blocks = len(self.module_indexes)
k = int(round(n_blocks * keep_p))
self.keep_k = bound_fn(min_val=1, max_val=n_blocks, value=k)
def forward(self, x):
if self.training:
indexes = [0] + sorted(random.sample(self.module_indexes, k=self.keep_k))
for idx in indexes:
x = self._modules[str(idx)](x)
else:
for idx, layer in self._modules.items():
x = layer(x)
return x
def profile_module(self, x, *args, **kwargs) -> (Tensor, float, float):
params, macs = 0.0, 0.0
for idx, layer in self._modules.items():
x, p, m = layer.profile_module(x)
params += p
macs += m
return x, params, macs
def __repr__(self):
format_string = self.__class__.__name__ + ' (apply_k (N={})={}, '.format(len(self._modules), self.keep_k)
for idx, layer in self._modules.items():
format_string += '\n\t {}'.format(layer)
format_string += '\n)'
return format_string
| 33.882353
| 113
| 0.605903
|
7fb10b4d75a59e098c4c632e9382de6e77677137
| 7,518
|
py
|
Python
|
mmtrack/datasets/got10k_dataset.py
|
wenry55/mmtracking
|
89e3d4e7a0d16d56d74f9ed1fd3fb9b5b92c9f1d
|
[
"Apache-2.0"
] | null | null | null |
mmtrack/datasets/got10k_dataset.py
|
wenry55/mmtracking
|
89e3d4e7a0d16d56d74f9ed1fd3fb9b5b92c9f1d
|
[
"Apache-2.0"
] | null | null | null |
mmtrack/datasets/got10k_dataset.py
|
wenry55/mmtracking
|
89e3d4e7a0d16d56d74f9ed1fd3fb9b5b92c9f1d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import shutil
import time
import numpy as np
from mmdet.datasets import DATASETS
from .base_sot_dataset import BaseSOTDataset
@DATASETS.register_module()
class GOT10kDataset(BaseSOTDataset):
"""GOT10k Dataset of single object tracking.
The dataset can both support training and testing mode.
"""
def __init__(self, *args, **kwargs):
super(GOT10kDataset, self).__init__(*args, **kwargs)
def load_data_infos(self, split='train'):
"""Load dataset information.
Args:
split (str, optional): the split of dataset. Defaults to 'train'.
Returns:
list[dict]: the length of the list is the number of videos. The
inner dict is in the following format:
{
'video_path': the video path
'ann_path': the annotation path
'start_frame_id': the starting frame number contained
in the image name
'end_frame_id': the ending frame number contained in
the image name
'framename_template': the template of image name
}
"""
print('Loading GOT10k dataset...')
start_time = time.time()
assert split in ['train', 'val', 'test', 'val_vot', 'train_vot']
data_infos = []
if split in ['train', 'val', 'test']:
videos_list = np.loadtxt(
osp.join(self.img_prefix, split, 'list.txt'), dtype=np.str_)
else:
split = '_'.join(split.split('_')[::-1])
vids_id_list = np.loadtxt(
osp.join(self.img_prefix, 'train',
f'got10k_{split}_split.txt'),
dtype=float)
videos_list = [
'GOT-10k_Train_%06d' % (int(video_id) + 1)
for video_id in vids_id_list
]
videos_list = sorted(videos_list)
for video_name in videos_list:
if split in ['val', 'test']:
video_path = osp.join(split, video_name)
else:
video_path = osp.join('train', video_name)
ann_path = osp.join(video_path, 'groundtruth.txt')
img_names = glob.glob(
osp.join(self.img_prefix, video_path, '*.jpg'))
end_frame_name = max(
img_names, key=lambda x: int(osp.basename(x).split('.')[0]))
end_frame_id = int(osp.basename(end_frame_name).split('.')[0])
data_infos.append(
dict(
video_path=video_path,
ann_path=ann_path,
start_frame_id=1,
end_frame_id=end_frame_id,
framename_template='%08d.jpg'))
print(f'GOT10k dataset loaded! ({time.time()-start_time:.2f} s)')
return data_infos
def get_visibility_from_video(self, video_ind):
"""Get the visible information of instance in a video."""
if not self.test_mode:
absense_info_path = osp.join(
self.img_prefix, self.data_infos[video_ind]['video_path'],
'absence.label')
cover_info_path = osp.join(
self.img_prefix, self.data_infos[video_ind]['video_path'],
'cover.label')
absense_info = np.loadtxt(absense_info_path, dtype=bool)
# The values of key 'cover' are
# int numbers in range [0,8], which correspond to
# ranges of object visible ratios: 0%, (0%, 15%],
# (15%~30%], (30%, 45%], (45%, 60%],(60%, 75%],
# (75%, 90%], (90%, 100%) and 100% respectively
cover_info = np.loadtxt(cover_info_path, dtype=int)
visible = ~absense_info & (cover_info > 0)
visible_ratio = cover_info / 8.
return dict(visible=visible, visible_ratio=visible_ratio)
else:
return super(GOT10kDataset,
self).get_visibility_from_video(video_ind)
def prepare_test_data(self, video_ind, frame_ind):
"""Get testing data of one frame. We parse one video, get one frame
from it and pass the frame information to the pipeline.
Args:
video_ind (int): video index
frame_ind (int): frame index
Returns:
dict: testing data of one frame.
"""
ann_infos = self.get_ann_infos_from_video(video_ind)
img_infos = self.get_img_infos_from_video(video_ind)
img_info = dict(
filename=img_infos['filename'][frame_ind], frame_id=frame_ind)
if frame_ind == 0:
ann_info = dict(
bboxes=ann_infos['bboxes'][frame_ind], visible=True)
else:
ann_info = dict(
bboxes=np.array([0] * 4, dtype=np.float32), visible=True)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
results = self.pipeline(results)
return results
def format_results(self, results, resfile_path=None):
"""Format the results to txts (standard format for GOT10k Challenge).
Args:
results (dict(list[ndarray])): Testing results of the dataset.
resfile_path (str): Path to save the formatted results.
Defaults to None.
"""
# prepare saved dir
assert resfile_path is not None, 'Please give key-value pair \
like resfile_path=xxx in argparse'
if not osp.isdir(resfile_path):
os.makedirs(resfile_path, exist_ok=True)
# transform tracking results format
# from [bbox_1, bbox_2, ...] to {'video_1':[bbox_1, bbox_2, ...], ...}
track_bboxes = results['track_bboxes']
print('-------- There are total {} images --------'.format(
len(track_bboxes)))
start_ind = end_ind = 0
for num, video_info in zip(self.num_frames_per_video, self.data_infos):
end_ind += num
video_name = video_info['video_path'].split('/')[-1]
video_resfiles_path = osp.join(resfile_path, video_name)
if not osp.isdir(video_resfiles_path):
os.makedirs(video_resfiles_path, exist_ok=True)
video_bbox_txt = osp.join(video_resfiles_path,
'{}_001.txt'.format(video_name))
video_time_txt = osp.join(video_resfiles_path,
'{}_time.txt'.format(video_name))
with open(video_bbox_txt,
'w') as f_bbox, open(video_time_txt, 'w') as f_time:
for bbox in results['track_bboxes'][start_ind:end_ind]:
bbox = [
str(f'{bbox[0]:.4f}'),
str(f'{bbox[1]:.4f}'),
str(f'{(bbox[2] - bbox[0]):.4f}'),
str(f'{(bbox[3] - bbox[1]):.4f}')
]
line = ','.join(bbox) + '\n'
f_bbox.writelines(line)
# We don't record testing time, so we set a default
# time in order to test on the server.
f_time.writelines('0.0001\n')
start_ind += num
shutil.make_archive(resfile_path, 'zip', resfile_path)
shutil.rmtree(resfile_path)
| 40.858696
| 79
| 0.548018
|
f902ae452b11069414a9270940293aaa4873d9ad
| 2,228
|
py
|
Python
|
testing/test_sct_label_utils.py
|
kousu-1/spinalcordtoolbox
|
9b1c2179fe31be489dab7f08c43e9bd5902931c0
|
[
"MIT"
] | null | null | null |
testing/test_sct_label_utils.py
|
kousu-1/spinalcordtoolbox
|
9b1c2179fe31be489dab7f08c43e9bd5902931c0
|
[
"MIT"
] | null | null | null |
testing/test_sct_label_utils.py
|
kousu-1/spinalcordtoolbox
|
9b1c2179fe31be489dab7f08c43e9bd5902931c0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#########################################################################################
#
# Test function sct_label_utils
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Augustin Roux
# modified: 2014/10/30
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: add test to other processes.
from __future__ import absolute_import
import os
from pandas import DataFrame
import sct_utils as sct
import sct_label_utils
def init(param_test):
"""
Initialize class: param_test
"""
# initialization
folder_data = ['t2']
file_data = ['t2_seg-manual.nii.gz', 't2_seg_labeled.nii.gz']
default_args = ['-i ' + os.path.join(folder_data[0], file_data[0]) + ' -create 1,1,1,1:2,2,2,2',
'-i ' + os.path.join(folder_data[0], file_data[0]) + ' -cubic-to-point -o test_centerofmass.nii.gz']
param_test.centers_of_mass = '31,28,25,1'
# assign default params
if not param_test.args:
param_test.args = default_args
return param_test
def test_integrity(param_test):
"""
Test integrity of function
"""
# find the test that is performed and check the integrity of the output
index_args = param_test.default_args.index(param_test.args)
# Removed because of:
# https://travis-ci.org/neuropoly/spinalcordtoolbox/jobs/482061826
param_test.output += "NOT TESTED-- SHOULD BE REACTIVATED ASAP"
# if index_args == 1:
# # compute center of mass of labeled segmentation
# centers_of_mass_image = sct_label_utils.main(['-i', 'test_centerofmass.nii.gz', '-display', '-v', '0'])
# # compare with ground truth value
# if centers_of_mass_image != param_test.centers_of_mass:
# param_test.output += 'WARNING: Center of mass different from gold-standard. \n--> Results: ' \
# + centers_of_mass_image + '\n--> Should be: ' + param_test.centers_of_mass + '\n'
# param_test.status = 99
# end test
return param_test
| 33.757576
| 120
| 0.58079
|
5d91e9ccaf9c4c621f5bc5ab89c298c3ebecce86
| 10,109
|
py
|
Python
|
nova/tests/functional/libvirt/test_reshape.py
|
nfvri/nova
|
2ce5a440c44eb512f07adacd313304e226bb56a0
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/functional/libvirt/test_reshape.py
|
nfvri/nova
|
2ce5a440c44eb512f07adacd313304e226bb56a0
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/functional/libvirt/test_reshape.py
|
nfvri/nova
|
2ce5a440c44eb512f07adacd313304e226bb56a0
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from oslo_config import cfg
from oslo_log import log as logging
from nova import context
from nova import objects
from nova.tests.functional.libvirt import base
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.libvirt import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class VGPUReshapeTests(base.ServersTestBase):
# the minimum libvirt version needed for vgpu
MIN_LIBVIRT_MDEV_SUPPORT = 3004000
def _wait_for_state_change(self, server, expected_status):
for i in range(0, 50):
server = self.api.get_server(server['id'])
if server['status'] == expected_status:
return server
time.sleep(.1)
self.assertEqual(expected_status, server['status'])
return server
def test_create_servers_with_vgpu(self):
"""Verify that vgpu reshape works with libvirt driver
1) create two servers with an old tree where the VGPU resource is on
the compute provider
2) trigger a reshape
3) check that the allocations of the servers are still valid
4) create another server now against the new tree
"""
# NOTE(gibi): We cannot simply ask the virt driver to create an old
# RP tree with vgpu on the root RP as that code path does not exist
# any more. So we have to hack a "bit". We will create a compute
# service without vgpu support to have the compute RP ready then we
# manually add the VGPU resources to that RP in placement. Also we make
# sure that during the instance claim the virt driver does not detect
# the old tree as that would be a bad time for reshape. Later when the
# compute service is restarted the driver will do the reshape.
fake_connection = self._get_connection(
# We need more RAM or the 3rd server won't be created
host_info=fakelibvirt.HostInfo(kB_mem=8192),
libvirt_version=self.MIN_LIBVIRT_MDEV_SUPPORT,
mdev_info=fakelibvirt.HostMdevDevicesInfo())
self.mock_conn.return_value = fake_connection
# start a compute with vgpu support disabled so the driver will
# ignore the content of the above HostMdevDeviceInfo
self.flags(enabled_vgpu_types='', group='devices')
self.compute = self.start_service('compute', host='compute1')
# create the VGPU resource in placement manually
compute_rp_uuid = self.placement_api.get(
'/resource_providers?name=compute1').body[
'resource_providers'][0]['uuid']
inventories = self.placement_api.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body
inventories['inventories']['VGPU'] = {
'allocation_ratio': 1.0,
'max_unit': 3,
'min_unit': 1,
'reserved': 0,
'step_size': 1,
'total': 3}
self.placement_api.put(
'/resource_providers/%s/inventories' % compute_rp_uuid,
inventories)
# now we boot two servers with vgpu
extra_spec = {"resources:VGPU": 1}
flavor_id = self._create_flavor(extra_spec=extra_spec)
server_req = self._build_server(flavor_id)
# NOTE(gibi): during instance_claim() there is a
# driver.update_provider_tree() call that would detect the old tree and
# would fail as this is not a good time to reshape. To avoid that we
# temporarily mock update_provider_tree here.
with mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'update_provider_tree'):
created_server1 = self.api.post_server({'server': server_req})
server1 = self._wait_for_state_change(created_server1, 'ACTIVE')
created_server2 = self.api.post_server({'server': server_req})
server2 = self._wait_for_state_change(created_server2, 'ACTIVE')
# Determine which device is associated with which instance
# { inst.uuid: pgpu_name }
inst_to_pgpu = {}
ctx = context.get_admin_context()
for server in (server1, server2):
inst = objects.Instance.get_by_uuid(ctx, server['id'])
mdevs = list(
self.compute.driver._get_all_assigned_mediated_devices(inst))
self.assertEqual(1, len(mdevs))
mdev_uuid = mdevs[0]
mdev_info = self.compute.driver._get_mediated_device_information(
utils.mdev_uuid2name(mdev_uuid))
inst_to_pgpu[inst.uuid] = mdev_info['parent']
# The VGPUs should have come from different pGPUs
self.assertNotEqual(*list(inst_to_pgpu.values()))
# verify that the inventory, usages and allocation are correct before
# the reshape
compute_inventory = self.placement_api.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
self.assertEqual(3, compute_inventory['VGPU']['total'])
compute_usages = self.placement_api.get(
'/resource_providers/%s/usages' % compute_rp_uuid).body[
'usages']
self.assertEqual(2, compute_usages['VGPU'])
for server in (server1, server2):
allocations = self.placement_api.get(
'/allocations/%s' % server['id']).body['allocations']
# the flavor has disk=10 and ephemeral=10
self.assertEqual(
{'DISK_GB': 20, 'MEMORY_MB': 2048, 'VCPU': 2, 'VGPU': 1},
allocations[compute_rp_uuid]['resources'])
# enabled vgpu support
self.flags(
enabled_vgpu_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
group='devices')
# restart compute which will trigger a reshape
self.compute = self.restart_compute_service(self.compute)
# verify that the inventory, usages and allocation are correct after
# the reshape
compute_inventory = self.placement_api.get(
'/resource_providers/%s/inventories' % compute_rp_uuid).body[
'inventories']
self.assertNotIn('VGPU', compute_inventory)
# NOTE(sbauza): The two instances will use two different pGPUs
# That said, we need to check all the pGPU inventories for knowing
# which ones are used.
usages = {}
pgpu_uuid_to_name = {}
for pci_device in [fakelibvirt.PGPU1_PCI_ADDR,
fakelibvirt.PGPU2_PCI_ADDR,
fakelibvirt.PGPU3_PCI_ADDR]:
gpu_rp_uuid = self.placement_api.get(
'/resource_providers?name=compute1_%s' % pci_device).body[
'resource_providers'][0]['uuid']
pgpu_uuid_to_name[gpu_rp_uuid] = pci_device
gpu_inventory = self.placement_api.get(
'/resource_providers/%s/inventories' % gpu_rp_uuid).body[
'inventories']
self.assertEqual(1, gpu_inventory['VGPU']['total'])
gpu_usages = self.placement_api.get(
'/resource_providers/%s/usages' % gpu_rp_uuid).body[
'usages']
usages[pci_device] = gpu_usages['VGPU']
# Make sure that both instances are using different pGPUs
used_devices = [dev for dev, usage in usages.items() if usage == 1]
avail_devices = list(set(usages.keys()) - set(used_devices))
self.assertEqual(2, len(used_devices))
# Make sure that both instances are using the correct pGPUs
for server in [server1, server2]:
allocations = self.placement_api.get(
'/allocations/%s' % server['id']).body[
'allocations']
self.assertEqual(
{'DISK_GB': 20, 'MEMORY_MB': 2048, 'VCPU': 2},
allocations[compute_rp_uuid]['resources'])
rp_uuids = list(allocations.keys())
# We only have two RPs, the compute RP (the root) and the child
# pGPU RP
gpu_rp_uuid = (rp_uuids[1] if rp_uuids[0] == compute_rp_uuid
else rp_uuids[0])
self.assertEqual(
{'VGPU': 1},
allocations[gpu_rp_uuid]['resources'])
# The pGPU's RP name contains the pGPU name
self.assertIn(inst_to_pgpu[server['id']],
pgpu_uuid_to_name[gpu_rp_uuid])
# now create one more instance with vgpu against the reshaped tree
created_server = self.api.post_server({'server': server_req})
server3 = self._wait_for_state_change(created_server, 'ACTIVE')
# find the pGPU that wasn't used before we created the third instance
# It should have taken the previously available pGPU
device = avail_devices[0]
gpu_rp_uuid = self.placement_api.get(
'/resource_providers?name=compute1_%s' % device).body[
'resource_providers'][0]['uuid']
gpu_usages = self.placement_api.get(
'/resource_providers/%s/usages' % gpu_rp_uuid).body[
'usages']
self.assertEqual(1, gpu_usages['VGPU'])
allocations = self.placement_api.get(
'/allocations/%s' % server3['id']).body[
'allocations']
self.assertEqual(
{'DISK_GB': 20, 'MEMORY_MB': 2048, 'VCPU': 2},
allocations[compute_rp_uuid]['resources'])
self.assertEqual(
{'VGPU': 1},
allocations[gpu_rp_uuid]['resources'])
| 44.730088
| 79
| 0.62934
|
2a5da2ceb023068a38d2e7cf71ca796c155a5286
| 11,394
|
py
|
Python
|
FCB1010/SpecialTransportComponent.py
|
gaelhuot/FCB1010-Ableton-live
|
d3cbe36a3d85d5632a09517f7137c68a2206598f
|
[
"Apache-2.0"
] | 2
|
2021-10-08T11:46:52.000Z
|
2021-12-15T20:15:53.000Z
|
FCB1010/SpecialTransportComponent.py
|
gaelhuot/FCB1010-Ableton-live
|
d3cbe36a3d85d5632a09517f7137c68a2206598f
|
[
"Apache-2.0"
] | null | null | null |
FCB1010/SpecialTransportComponent.py
|
gaelhuot/FCB1010-Ableton-live
|
d3cbe36a3d85d5632a09517f7137c68a2206598f
|
[
"Apache-2.0"
] | 2
|
2021-10-17T02:24:55.000Z
|
2022-03-31T02:41:30.000Z
|
import Live
from _Framework.TransportComponent import TransportComponent
from _Framework.ButtonElement import ButtonElement
from _Framework.EncoderElement import EncoderElement #added
from _Framework.SubjectSlot import subject_slot #added
#TEMPO_TOP = 300.0
#TEMPO_BOTTOM = 40.0
from .MIDI_Map import TEMPO_TOP
from .MIDI_Map import TEMPO_BOTTOM
class SpecialTransportComponent(TransportComponent):
__doc__ = ' TransportComponent that only uses certain buttons if a shift button is pressed '
def __init__(self):
TransportComponent.__init__(self)
#self._shift_button = None
self._quant_toggle_button = None
#self._shift_pressed = False
self._last_quant_value = Live.Song.RecordingQuantization.rec_q_eight
self.song().add_midi_recording_quantization_listener(self._on_quantisation_changed)
self._on_quantisation_changed()
self._undo_button = None #added from OpenLabs SpecialTransportComponent script
self._redo_button = None #added from OpenLabs SpecialTransportComponent script
#self._bts_button = None #added from OpenLabs SpecialTransportComponent script
self._tempo_encoder_control = None #new addition
return None
def disconnect(self):
TransportComponent.disconnect(self)
#if self._shift_button != None:
#self._shift_button.remove_value_listener(self._shift_value)
#self._shift_button = None
if self._quant_toggle_button != None:
self._quant_toggle_button.remove_value_listener(self._quant_toggle_value)
self._quant_toggle_button = None
self.song().remove_midi_recording_quantization_listener(self._on_quantisation_changed)
if (self._undo_button != None): #added from OpenLabs SpecialTransportComponent script
self._undo_button.remove_value_listener(self._undo_value)
self._undo_button = None
if (self._redo_button != None): #added from OpenLabs SpecialTransportComponent script
self._redo_button.remove_value_listener(self._redo_value)
self._redo_button = None
#if (self._bts_button != None): #added from OpenLabs SpecialTransportComponent script
#self._bts_button.remove_value_listener(self._bts_value)
#self._bts_button = None
if (self._tempo_encoder_control != None): #new addition
self._tempo_encoder_control.remove_value_listener(self._tempo_encoder_value)
self._tempo_encoder_control = None
return None
#def set_shift_button(self, button):
#if not(button == None or isinstance(button, ButtonElement) and button.is_momentary()):
#isinstance(button, ButtonElement)
#raise AssertionError
#if self._shift_button != button:
#if self._shift_button != None:
#self._shift_button.remove_value_listener(self._shift_value)
#self._shift_button = button
#if self._shift_button != None:
#self._shift_button.add_value_listener(self._shift_value)
#
#self.update()
#return None
def set_quant_toggle_button(self, button):
if not(button == None or isinstance(button, ButtonElement) and button.is_momentary()):
isinstance(button, ButtonElement)
raise AssertionError
if self._quant_toggle_button != button:
if self._quant_toggle_button != None:
self._quant_toggle_button.remove_value_listener(self._quant_toggle_value)
self._quant_toggle_button = button
if self._quant_toggle_button != None:
self._quant_toggle_button.add_value_listener(self._quant_toggle_value)
self.update()
return None
#def update(self):
#self._on_metronome_changed()
#self._on_overdub_changed()
#self._on_quantisation_changed()
#self._on_nudge_up_changed() #added
#self._on_nudge_down_changed #added
#def _shift_value(self, value):
#if not self._shift_button != None:
#raise AssertionError
#if not value in range(128):
#raise AssertionError
#self._shift_pressed = value != 0
#if self.is_enabled():
#self.is_enabled()
#self.update()
#else:
#self.is_enabled()
#return None
#def _metronome_value(self, value):
#if not self._shift_pressed:
###if self._shift_pressed:
#TransportComponent._metronome_value(self, value)
#def _overdub_value(self, value):
#if not self._shift_pressed:
#TransportComponent._overdub_value(self, value)
#def _nudge_up_value(self, value): #added
#if not self._shift_pressed:
#TransportComponent._nudge_up_value(self, value)
#def _nudge_down_value(self, value): #added
#if not self._shift_pressed:
#TransportComponent._nudge_down_value(self, value)
#def _tap_tempo_value(self, value): # Added as Shift + Tap Tempo
#if not self._shift_pressed:
##if self._shift_pressed:
#TransportComponent._tap_tempo_value(self, value)
def _quant_toggle_value(self, value):
assert (self._quant_toggle_button != None)
assert (value in range(128))
assert (self._last_quant_value != Live.Song.RecordingQuantization.rec_q_no_q)
if self.is_enabled(): # and (not self._shift_pressed):
if ((value != 0) or (not self._quant_toggle_button.is_momentary())):
quant_value = self.song().midi_recording_quantization
if (quant_value != Live.Song.RecordingQuantization.rec_q_no_q):
self._last_quant_value = quant_value
self.song().midi_recording_quantization = Live.Song.RecordingQuantization.rec_q_no_q
else:
self.song().midi_recording_quantization = self._last_quant_value
#def _on_metronome_changed(self):
#if not self._shift_pressed:
##if self._shift_pressed:
#TransportComponent._on_metronome_changed(self)
#def _on_overdub_changed(self):
#if not self._shift_pressed:
#TransportComponent._on_overdub_changed(self)
#def _on_nudge_up_changed(self): #added
#if not self._shift_pressed:
#TransportComponent._on_nudge_up_changed(self)
#def _on_nudge_down_changed(self): #added
#if not self._shift_pressed:
#TransportComponent._on_nudge_down_changed(self)
def _on_quantisation_changed(self):
if self.is_enabled():
quant_value = self.song().midi_recording_quantization
quant_on = (quant_value != Live.Song.RecordingQuantization.rec_q_no_q)
if quant_on:
self._last_quant_value = quant_value
if self._quant_toggle_button != None: #((not self._shift_pressed) and (self._quant_toggle_button != None)):
if quant_on:
self._quant_toggle_button.turn_on()
else:
self._quant_toggle_button.turn_off()
""" from OpenLabs module SpecialTransportComponent """
def set_undo_button(self, undo_button):
assert isinstance(undo_button, (ButtonElement,
type(None)))
if (undo_button != self._undo_button):
if (self._undo_button != None):
self._undo_button.remove_value_listener(self._undo_value)
self._undo_button = undo_button
if (self._undo_button != None):
self._undo_button.add_value_listener(self._undo_value)
self.update()
def set_redo_button(self, redo_button):
assert isinstance(redo_button, (ButtonElement,
type(None)))
if (redo_button != self._redo_button):
if (self._redo_button != None):
self._redo_button.remove_value_listener(self._redo_value)
self._redo_button = redo_button
if (self._redo_button != None):
self._redo_button.add_value_listener(self._redo_value)
self.update()
#def set_bts_button(self, bts_button): #"back to start" button
#assert isinstance(bts_button, (ButtonElement,
#type(None)))
#if (bts_button != self._bts_button):
#if (self._bts_button != None):
#self._bts_button.remove_value_listener(self._bts_value)
#self._bts_button = bts_button
#if (self._bts_button != None):
#self._bts_button.add_value_listener(self._bts_value)
#self.update()
def _undo_value(self, value):
#if self._shift_pressed: #added
assert (self._undo_button != None)
assert (value in range(128))
if self.is_enabled():
if ((value != 0) or (not self._undo_button.is_momentary())):
if self.song().can_undo:
self.song().undo()
def _redo_value(self, value):
#if self._shift_pressed: #added
assert (self._redo_button != None)
assert (value in range(128))
if self.is_enabled():
if ((value != 0) or (not self._redo_button.is_momentary())):
if self.song().can_redo:
self.song().redo()
#def _bts_value(self, value):
#assert (self._bts_button != None)
#assert (value in range(128))
#if self.is_enabled():
#if ((value != 0) or (not self._bts_button.is_momentary())):
#self.song().current_song_time = 0.0
def _tempo_encoder_value(self, value):
##if not self._shift_pressed:
#if self._shift_pressed:
assert (self._tempo_encoder_control != None)
assert (value in range(128))
backwards = (value >= 64)
step = 0.1 #step = 1.0 #reduce this for finer control; 1.0 is 1 bpm
if backwards:
amount = (value - 128)
else:
amount = value
tempo = max(20, min(999, (self.song().tempo + (amount * step))))
self.song().tempo = tempo
def set_tempo_encoder(self, control):
assert ((control == None) or (isinstance(control, EncoderElement) and (control.message_map_mode() is Live.MidiMap.MapMode.relative_two_compliment)))
if (self._tempo_encoder_control != None):
self._tempo_encoder_control.remove_value_listener(self._tempo_encoder_value)
self._tempo_encoder_control = control
if (self._tempo_encoder_control != None):
self._tempo_encoder_control.add_value_listener(self._tempo_encoder_value)
self.update()
@subject_slot('value')
def _tempo_value(self, value): #Override to pull tempo range from MIDI_Maps.py
assert (self._tempo_control != None)
assert (value in range(128))
if self.is_enabled():
fraction = ((TEMPO_TOP - TEMPO_BOTTOM) / 127.0)
self.song().tempo = ((fraction * value) + TEMPO_BOTTOM)
| 42.04428
| 157
| 0.628138
|
5ffc64cf688e2e686baf3cc94801bd15d44e21d5
| 381
|
py
|
Python
|
host/control_flow_constants.py
|
laochanlam/cheetah-release
|
a836bd31f02fb9f612afaf1f90d4d2638c8294e7
|
[
"MIT"
] | 10
|
2020-06-14T15:17:19.000Z
|
2022-03-30T19:58:41.000Z
|
host/control_flow_constants.py
|
laochanlam/cheetah-release
|
a836bd31f02fb9f612afaf1f90d4d2638c8294e7
|
[
"MIT"
] | null | null | null |
host/control_flow_constants.py
|
laochanlam/cheetah-release
|
a836bd31f02fb9f612afaf1f90d4d2638c8294e7
|
[
"MIT"
] | 3
|
2020-06-25T22:47:05.000Z
|
2022-01-26T04:07:28.000Z
|
CHEETAH_MASTER_IP = '10.243.38.88'
CHEETAH_MASTER_PORT = 23456
CHEETAH_WORKER_NODES = 1
| 1.282828
| 34
| 0.186352
|
016db7b4a9ffd70a1575d693a22c4646caab0fde
| 6,405
|
py
|
Python
|
data/nyu_depth_raw_loader.py
|
linpeisensh/sim
|
e849d76caa0a20507436d4a6f9aab06e659ae6b2
|
[
"MIT"
] | 117
|
2019-11-17T04:27:58.000Z
|
2022-03-31T20:41:27.000Z
|
data/nyu_depth_raw_loader.py
|
linpeisensh/sim
|
e849d76caa0a20507436d4a6f9aab06e659ae6b2
|
[
"MIT"
] | 6
|
2019-12-04T23:08:38.000Z
|
2022-03-07T10:44:08.000Z
|
data/nyu_depth_raw_loader.py
|
linpeisensh/sim
|
e849d76caa0a20507436d4a6f9aab06e659ae6b2
|
[
"MIT"
] | 29
|
2019-11-20T06:09:23.000Z
|
2022-03-07T10:21:13.000Z
|
from __future__ import division
import argparse
import numpy as np
from path import Path
from pebble import ProcessPool
import scipy.misc
import sys
from tqdm import tqdm
from collections import Counter
import torch
parser = argparse.ArgumentParser()
parser.add_argument("dataset_dir", metavar='DIR',
help='path to original dataset')
parser.add_argument("--dump-root", type=str, default='dump', help="Where to dump the data")
parser.add_argument("--with-depth", action='store_true',
help="If available (e.g. with KITTI), will store depth ground truth along with images, for validation")
parser.add_argument("--with-pose", action='store_true',
help="If available (e.g. with KITTI), will store pose ground truth along with images, for validation")
parser.add_argument("--height", type=int, default=192, help="image height")
parser.add_argument("--width", type=int, default=640, help="image width")
parser.add_argument("--num-threads", type=int, default=4, help="number of threads to use")
args = parser.parse_args()
class NYUDepthRawLoader(object):
def __init__(self,
dataset_dir,
img_height=192,
img_width=640,
get_depth=False,
get_pose=False):
self.dataset_dir = Path(dataset_dir)
self.img_height = img_height
self.img_width = img_width
self.get_depth = get_depth
self.get_pose = get_pose
self.img_exts = '.ppm'
self.intrinsics = np.array([[518.85790, 0.00000, 325.58245, 0.00000],
[ 0.00000, 519.46961, 253.73617, 0.00000],
[ 0.00000, 0.00000, 1.00000, 0.00000],
[ 0.00000, 0.00000, 0.00000, 1.00000]], dtype=np.float32)
self.collect_train_folders()
def collect_train_folders(self):
self.scenes = []
drive_set = sorted(self.dataset_dir.dirs())
for dr in drive_set:
if dr.name == 'toolbox':
continue
self.scenes.append(dr)
def get_intrinsics(self, zoom_x, zoom_y):
intrinsics = self.intrinsics
intrinsics[0] *= zoom_x / self.img_width
intrinsics[1] *= zoom_y / self.img_height
return intrinsics
def collect_scene_data(self, drive):
scene_data = {'dir':drive, 'frame_id':[], 'pose':[], 'rel_path':drive.name}
img_files = sorted(drive.files())
for f in img_files:
if f.name[0] == 'r':
scene_data['frame_id'].append(f.name[:-(len(self.img_exts))])
sample = self.load_image(scene_data, 0)
if sample is None:
return []
scene_data['intrinsics'] = self.get_intrinsics(sample[1], sample[2])
return scene_data
def get_scene_imgs(self, scene_data):
def construct_sample(scene_data, i):
sample = {'img': self.load_image(scene_data, i)[0], 'id':scene_data['frame_id'][i]}
if self.get_depth:
sample['depth'] = self.load_depth(scene_data, i)[0]
if self.get_pose:
sample['pose'] = scene_data['pose'][i]
return sample
for (i, frame_id) in enumerate(scene_data['frame_id']):
yield construct_sample(scene_data, i)
def load_image(self, scene_data, tgt_idx):
img_file = scene_data['dir']/'{}{}'.format(scene_data['frame_id'][tgt_idx], self.img_exts)
if not img_file.isfile():
return None
img = scipy.misc.imread(img_file)
img = self.crop_image(img)
zoom_y = self.img_height / img.shape[0]
zoom_x = self.img_width / img.shape[1]
if zoom_x != 1 and zoom_y != 1:
# print("img resize")
img = scipy.misc.imresize(img, (self.img_height, self.img_width))
return img, zoom_x, zoom_y
def crop_image(self, image):
h, w = image.shape[0], image.shape[1]
bbox_h = [h//2 - self.img_height//2, h//2 + self.img_height//2]
bbox_w = [w//2 - self.img_width//2, w//2 + self.img_width//2]
image = image[bbox_h[0]:bbox_h[1], bbox_w[0]:bbox_w[1]]
# print(image.shape)
return image
def dump_example(args, scene):
scene_data = data_loader.collect_scene_data(scene)
assert len(scene_data) != 0
dump_dir = args.dump_root/scene_data['rel_path']
dump_dir.makedirs_p()
intrinsics = scene_data['intrinsics']
dump_cam_file = dump_dir/'cam.txt'
np.savetxt(dump_cam_file, intrinsics)
poses_file = dump_dir/'poses.txt'
poses = []
idx = 0
for sample in data_loader.get_scene_imgs(scene_data):
img = sample["img"]
dump_img_file = dump_dir/'{:010d}.jpg'.format(idx)
scipy.misc.imsave(dump_img_file, img)
if "pose" in sample.keys():
poses.append(sample["pose"].tolist())
if "depth" in sample.keys():
depth_frame_nb = sample["depth_id"]
dump_depth_file = dump_dir/'{:010d}.npy'.format(idx)
np.save(dump_depth_file, sample["depth"])
idx += 1
if len(poses) != 0:
np.savetxt(poses_file, np.array(poses).reshape(-1, 12), fmt='%.6e')
if len(dump_dir.files('*.jpg')) < 3:
dump_dir.rmtree()
def main():
args.dump_root = Path(args.dump_root)
args.dump_root.mkdir_p()
global data_loader
data_loader = NYUDepthRawLoader(args.dataset_dir,
img_height=args.height,
img_width=args.width,
get_depth=args.with_depth,
get_pose=args.with_pose)
n_scenes = len(data_loader.scenes)
print('Found {} potential scenes'.format(n_scenes))
print('Retrieving frames')
if args.num_threads == 1:
for scene in tqdm(data_loader.scenes):
dump_example(args, scene)
else:
with ProcessPool(max_workers=args.num_threads) as pool:
tasks = pool.map(dump_example, [args]*n_scenes, data_loader.scenes)
try:
for _ in tqdm(tasks.result(), total=n_scenes):
pass
except KeyboardInterrupt as e:
tasks.cancel()
raise e
if __name__ == '__main__':
main()
| 37.45614
| 123
| 0.588915
|
ec09787ec5050d50d1e06cee8f138ff773a886c6
| 1,533
|
py
|
Python
|
poco/utils/simplerpc/rpcclient.py
|
felixonmars/Poco
|
f44bf05501bb54561c15ef1b7ad5e5342ba96110
|
[
"Apache-2.0"
] | null | null | null |
poco/utils/simplerpc/rpcclient.py
|
felixonmars/Poco
|
f44bf05501bb54561c15ef1b7ad5e5342ba96110
|
[
"Apache-2.0"
] | null | null | null |
poco/utils/simplerpc/rpcclient.py
|
felixonmars/Poco
|
f44bf05501bb54561c15ef1b7ad5e5342ba96110
|
[
"Apache-2.0"
] | null | null | null |
# encoding=utf-8
from simplerpc import RpcAgent
import simplerpc
import time
class RpcClient(RpcAgent):
CONNECTING, CONNECTED, CLOSED = 1, 2, 3
"""docstring for RpcClient"""
def __init__(self, conn):
super(RpcClient, self).__init__()
self.conn = conn
self.conn.connect_cb = self.on_connect
self.conn.close_cb = self.on_close
self._status = self.CONNECTING
self.conn.connect()
@property
def DEBUG(self):
return simplerpc.DEBUG
@DEBUG.setter
def DEBUG(self, value):
simplerpc.DEBUG = value
def on_connect(self):
if self._status == self.CONNECTING:
self._status = self.CONNECTED
def on_close(self):
self._status = self.CLOSED
def call(self, func, *args, **kwargs):
msg, cb = self.format_request(func, *args, **kwargs)
self.conn.send(msg)
return cb
def update(self):
if self._status != self.CONNECTED:
return
data = self.conn.recv()
if not data:
return
for msg in data:
self.handle_message(msg, self.conn)
def wait_connected(self):
for i in range(10):
print("waiting for connection...%s" % i)
if self._status == self.CONNECTED:
return True
elif self._status == self.CONNECTING:
time.sleep(0.5)
else:
raise RuntimeError("Connection Closed")
raise RuntimeError("connecting timeout")
| 25.55
| 60
| 0.582518
|
4f938719152240e3743c57c0bb7840194b6c6d46
| 633
|
py
|
Python
|
code/api/dashboard.py
|
CiscoSecurity/tr-05-docker-relay
|
8cf9cced02eb338d06d80419b35e563156ac6c9f
|
[
"MIT"
] | null | null | null |
code/api/dashboard.py
|
CiscoSecurity/tr-05-docker-relay
|
8cf9cced02eb338d06d80419b35e563156ac6c9f
|
[
"MIT"
] | null | null | null |
code/api/dashboard.py
|
CiscoSecurity/tr-05-docker-relay
|
8cf9cced02eb338d06d80419b35e563156ac6c9f
|
[
"MIT"
] | 1
|
2021-03-12T14:06:46.000Z
|
2021-03-12T14:06:46.000Z
|
from flask import Blueprint
from api.utils import jsonify_data, get_jwt, get_json
from api.schemas import DashboardTileSchema, DashboardTileDataSchema
dashboard_api = Blueprint('dashboard', __name__)
@dashboard_api.route('/tiles', methods=['POST'])
def tiles():
_ = get_jwt()
return jsonify_data([])
@dashboard_api.route('/tiles/tile', methods=['POST'])
def tile():
_ = get_jwt()
_ = get_json(DashboardTileSchema())
return jsonify_data({})
@dashboard_api.route('/tiles/tile-data', methods=['POST'])
def tile_data():
_ = get_jwt()
_ = get_json(DashboardTileDataSchema())
return jsonify_data({})
| 24.346154
| 68
| 0.709321
|
24e95fc227e4ca3903b02431eb8ddcdc2c5edec1
| 23,649
|
py
|
Python
|
tensorflow/python/ops/linalg_grad.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/linalg_grad.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/linalg_grad.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in linalg_ops.py.
Useful reference for derivative formulas is
An extended collection of matrix derivative results for forward and reverse
mode algorithmic differentiation by Mike Giles:
http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf
A detailed derivation of formulas for backpropagating through spectral layers
(SVD and Eig) by Ionescu, Vantzos & Sminchisescu:
https://arxiv.org/pdf/1509.07838v4.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as _linalg
@ops.RegisterGradient("MatrixInverse")
def _MatrixInverseGrad(op, grad):
"""Gradient for MatrixInverse."""
ainv = op.outputs[0]
return -math_ops.matmul(
ainv, math_ops.matmul(grad, ainv, adjoint_b=True), adjoint_a=True)
@ops.RegisterGradient("MatrixDeterminant")
def _MatrixDeterminantGrad(op, grad):
"""Gradient for MatrixDeterminant."""
a = op.inputs[0]
c = op.outputs[0]
a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)
multipliers = array_ops.reshape(grad * c,
array_ops.concat([array_ops.shape(c), [1, 1]],
0))
return multipliers * a_adj_inv
@ops.RegisterGradient("MatrixSquareRoot")
def _MatrixSquareRootGrad(op, grad):
"""Gradient for MatrixSquareRoot."""
# Let A be an m x m square matrix (or batch of matrices)
# Let R = sqrtm(A)
# By definition, A = RR
# Take the differential: dA = d(RR) = RdR + dRR
# Solve the resulting Sylvester equation for dR
# Used to find Kronecker products within the Sylvester equation
def _KroneckerProduct(b1, b2):
"""Computes the Kronecker product of two batches of square matrices"""
b1_shape = array_ops.shape(b1)
b2_shape = array_ops.shape(b2)
b1_order = b1_shape[-1]
b2_order = b2_shape[-1]
shape_slice_size = [math_ops.subtract(array_ops.size(b1_shape), 2)]
shape_slice = array_ops.slice(b1_shape, [0],
shape_slice_size) # Same for both batches
b1_reshape_shape = array_ops.concat(
[shape_slice, [b1_order], [1], [b1_order], [1]], 0)
b2_reshape_shape = array_ops.concat(
[shape_slice, [1], [b2_order], [1], [b2_order]], 0)
b1_reshape = array_ops.reshape(b1, b1_reshape_shape)
b2_reshape = array_ops.reshape(b2, b2_reshape_shape)
order_prod = b1_order * b2_order
kprod_shape = array_ops.concat([shape_slice, [order_prod], [order_prod]], 0)
return array_ops.reshape(b1_reshape * b2_reshape, kprod_shape)
sqrtm = op.outputs[0] # R
shape = array_ops.shape(sqrtm)
order = shape[-1] # m
matrix_count = math_ops.reduce_prod(shape[0:-2])
# Get batch of m x m identity matrices
eye = linalg_ops.eye(order, dtype=sqrtm.dtype) # m x m identity matrix
eye_flat = array_ops.reshape(eye, [-1])
eye_tiled = array_ops.tile(eye_flat, [matrix_count])
eye_batch = array_ops.reshape(eye_tiled, shape)
# The transpose of R is taken in the k1 term instead of k2 in
# order to prevent redundant transposition of R (i.e. (R')' = R)
sqrtm_transpose = array_ops.matrix_transpose(sqrtm)
k1 = _KroneckerProduct(eye_batch, sqrtm_transpose)
k2 = _KroneckerProduct(sqrtm, eye_batch)
ksum = math_ops.add(k1, k2)
# Vectorize dA
shape_slice_size = [math_ops.subtract(array_ops.size(shape), 2)]
shape_slice = array_ops.slice(shape, [0], shape_slice_size)
shape_vec_da = array_ops.concat([shape_slice, [order * order], [1]], 0)
vec_da = array_ops.reshape(array_ops.matrix_transpose(grad), shape_vec_da)
# Solve for vec(dR)
vec_dsqrtm = linalg_ops.matrix_solve(ksum, vec_da)
# Solve for dR by inverse vectorizing vec(dR)
dsqrtm_transpose = array_ops.reshape(vec_dsqrtm, shape)
return array_ops.matrix_transpose(dsqrtm_transpose)
@ops.RegisterGradient("LogMatrixDeterminant")
def _LogMatrixDeterminantGrad(op, _, grad_b):
"""Gradient for LogMatrixDeterminant."""
a = op.inputs[0]
c = op.outputs[1]
a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)
multipliers = array_ops.reshape(
grad_b, array_ops.concat([array_ops.shape(c), [1, 1]], 0))
return multipliers * a_adj_inv
@ops.RegisterGradient("Cholesky")
def _CholeskyGrad(op, grad):
"""Gradient for Cholesky."""
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
l = op.outputs[0]
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += _linalg.adjoint(grad_a)
return grad_a * 0.5
@ops.RegisterGradient("Qr")
def _QrGrad(op, dq, dr):
"""Gradient for Qr."""
q, r = op.outputs
if q.dtype.is_complex:
raise NotImplementedError("QrGrad not implemented for dtype: %s" % q.dtype)
if (r.shape.ndims is None or r.shape.as_list()[-2] is None or
r.shape.as_list()[-1] is None):
raise NotImplementedError("QrGrad not implemented with dynamic shapes.")
if r.shape.dims[-2].value != r.shape.dims[-1].value:
raise NotImplementedError("QrGrad not implemented when ncols > nrows "
"or full_matrices is true and ncols != nrows.")
qdq = math_ops.matmul(q, dq, adjoint_a=True)
qdq_ = qdq - _linalg.adjoint(qdq)
rdr = math_ops.matmul(r, dr, adjoint_b=True)
rdr_ = rdr - _linalg.adjoint(rdr)
tril = array_ops.matrix_band_part(qdq_ + rdr_, -1, 0)
def _TriangularSolve(x, r):
"""Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri."""
return _linalg.adjoint(
linalg_ops.matrix_triangular_solve(
r, _linalg.adjoint(x), lower=False, adjoint=False))
grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r))
grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r)
return grad_a + grad_b
@ops.RegisterGradient("MatrixSolve")
def _MatrixSolveGrad(op, grad):
"""Gradient for MatrixSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
c = op.outputs[0]
grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
return (grad_a, grad_b)
@ops.RegisterGradient("MatrixSolveLs")
def _MatrixSolveLsGrad(op, grad):
"""Gradients for MatrixSolveLs."""
# TODO(rmlarsen): The implementation could be more efficient:
# a) Output the Cholesky factorization from forward op instead of
# recomputing it here.
# b) Implement a symmetric rank-k update op instead of computing
# x*z + transpose(x*z). This pattern occurs other places in TensorFlow.
def _Overdetermined(op, grad):
"""Gradients for the overdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the first
kind:
X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
which solve the least squares problem
min ||A * X - B||_F^2 + lambda ||X||_F^2.
"""
a = op.inputs[0]
b = op.inputs[1]
x = op.outputs[0]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
# pylint: disable=protected-access
chol = linalg_ops._RegularizedGramianCholesky(
a, l2_regularizer=l2_regularizer, first_kind=True)
# pylint: enable=protected-access
# Temporary z = (A^T * A + lambda * I)^{-1} * grad.
z = linalg_ops.cholesky_solve(chol, grad)
xzt = math_ops.matmul(x, z, adjoint_b=True)
zx_sym = xzt + array_ops.matrix_transpose(xzt)
grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True)
grad_b = math_ops.matmul(a, z)
return (grad_a, grad_b, None)
def _Underdetermined(op, grad):
"""Gradients for the underdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the second
kind:
X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B
that (for lambda=0) solve the least squares problem
min ||X||_F subject to A*X = B.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
# pylint: disable=protected-access
chol = linalg_ops._RegularizedGramianCholesky(
a, l2_regularizer=l2_regularizer, first_kind=False)
# pylint: enable=protected-access
grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad))
# Temporary tmp = (A * A^T + lambda * I)^{-1} * B.
tmp = linalg_ops.cholesky_solve(chol, b)
a1 = math_ops.matmul(tmp, a, adjoint_a=True)
a1 = -math_ops.matmul(grad_b, a1)
a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True)
a2 = math_ops.matmul(tmp, a2, adjoint_b=True)
grad_a = a1 + a2
return (grad_a, grad_b, None)
fast = op.get_attr("fast")
if fast is False:
raise ValueError("Gradient not defined for fast=False")
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined():
if matrix_shape[-2] >= matrix_shape[-1]:
return _Overdetermined(op, grad)
else:
return _Underdetermined(op, grad)
else:
# We have to defer determining the shape to runtime and use
# conditional execution of the appropriate graph.
matrix_shape = array_ops.shape(op.inputs[0])[-2:]
return control_flow_ops.cond(matrix_shape[-2] >= matrix_shape[-1],
lambda: _Overdetermined(op, grad),
lambda: _Underdetermined(op, grad))
@ops.RegisterGradient("MatrixTriangularSolve")
def _MatrixTriangularSolveGrad(op, grad):
"""Gradient for MatrixTriangularSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
lower_a = op.get_attr("lower")
c = op.outputs[0]
grad_b = linalg_ops.matrix_triangular_solve(
a, grad, lower=lower_a, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
if lower_a:
grad_a = array_ops.matrix_band_part(grad_a, -1, 0)
else:
grad_a = array_ops.matrix_band_part(grad_a, 0, -1)
return (grad_a, grad_b)
@ops.RegisterGradient("SelfAdjointEigV2")
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2."""
e = op.outputs[0]
compute_v = op.get_attr("compute_v")
# a = op.inputs[0], which satisfies
# a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
with ops.control_dependencies([grad_e, grad_v]):
if compute_v:
v = op.outputs[1]
# Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
# Notice that because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when eigenvalues are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate eigenvalues, the corresponding eigenvectors are only defined
# up to arbitrary rotation in a (k-dimensional) subspace.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
array_ops.zeros_like(e))
grad_a = math_ops.matmul(
v,
math_ops.matmul(
array_ops.matrix_diag(grad_e) +
f * math_ops.matmul(v, grad_v, adjoint_a=True),
v,
adjoint_b=True))
else:
_, v = linalg_ops.self_adjoint_eig(op.inputs[0])
grad_a = math_ops.matmul(v,
math_ops.matmul(
array_ops.matrix_diag(grad_e),
v,
adjoint_b=True))
# The forward op only depends on the lower triangular part of a, so here we
# symmetrize and take the lower triangle
grad_a = array_ops.matrix_band_part(grad_a + _linalg.adjoint(grad_a), -1, 0)
grad_a = array_ops.matrix_set_diag(grad_a,
0.5 * array_ops.matrix_diag_part(grad_a))
return grad_a
@ops.RegisterGradient("Svd")
def _SvdGrad(op, grad_s, grad_u, grad_v):
"""Gradient for the singular value decomposition."""
# The derivation for the compute_uv=False case, and most of
# the derivation for the full_matrices=True case, are in
# Giles' paper (see reference at top of file). A derivation for
# the full_matrices=False case is available at
# https://j-towns.github.io/papers/svd-derivative.pdf
a = op.inputs[0]
a_shape = a.get_shape().with_rank_at_least(2)
grad_s_mat = array_ops.matrix_diag(grad_s)
if not op.get_attr("compute_uv"):
s, u, v = linalg_ops.svd(a, compute_uv=True)
grad_a = math_ops.matmul(u, math_ops.matmul(grad_s_mat, v, adjoint_b=True))
grad_a.set_shape(a_shape)
return grad_a
full_matrices = op.get_attr("full_matrices")
# TODO(rmlarsen): Make this work with complex types.
if a.dtype.is_complex:
raise NotImplementedError(
"SVD gradient is not implemented for complex types and "
"compute_uv=True.")
grad_u_shape = grad_u.get_shape().with_rank_at_least(2)
grad_v_shape = grad_v.get_shape().with_rank_at_least(2)
m = a_shape.dims[-2].merge_with(grad_u_shape[-2])
n = a_shape.dims[-1].merge_with(grad_v_shape[-2])
batch_shape = a_shape[:-2].merge_with(grad_u_shape[:-2]).merge_with(
grad_v_shape[:-2])
a_shape = batch_shape.concatenate([m, n])
m = a_shape.dims[-2].value
n = a_shape.dims[-1].value
# TODO(rmlarsen): Make this work with placeholders.
if m is None or n is None:
raise NotImplementedError(
"SVD gradient has not been implemented for input with unknown "
"inner matrix shape.")
s = op.outputs[0]
u = op.outputs[1]
v = op.outputs[2]
use_adjoint = False
if m > n:
# Compute the gradient for A^H = V * S^T * U^H, and (implicitly) take the
# Hermitian transpose of the gradient at the end.
use_adjoint = True
m, n = n, m
u, v = v, u
grad_u, grad_v = grad_v, grad_u
with ops.control_dependencies([grad_s, grad_u, grad_v]):
if full_matrices and abs(m - n) > 1:
raise NotImplementedError(
"svd gradient is not implemented for abs(m - n) > 1 "
"when full_matrices is True")
s_mat = array_ops.matrix_diag(s)
s2 = math_ops.square(s)
# NOTICE: Because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when singular values are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate singular values, the corresponding singular vectors are
# only defined up a (k-dimensional) subspace. In practice, this can
# lead to numerical instability when singular values are close but not
# exactly equal.
# Also, even with distinct singular values, the diagonal of f can have Inf
# values before setting to zero, which hurt when differentiating through
# this op. To avoid that, we add eye to the matrix before taking
# the reciprocal.
s_shape = array_ops.shape(s)
eye = _linalg.eye(s_shape[-1], batch_shape=s_shape[:-1], dtype=s.dtype)
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(s2, -2) - array_ops.expand_dims(s2, -1) +
eye), array_ops.zeros_like(s))
s_inv_mat = array_ops.matrix_diag(math_ops.reciprocal(s))
v1 = v[..., :, :m]
grad_v1 = grad_v[..., :, :m]
u_gu = math_ops.matmul(u, grad_u, adjoint_a=True)
v_gv = math_ops.matmul(v1, grad_v1, adjoint_a=True)
f_u = f * u_gu
f_v = f * v_gv
term1_nouv = (
grad_s_mat + math_ops.matmul(f_u + _linalg.adjoint(f_u), s_mat) +
math_ops.matmul(s_mat, f_v + _linalg.adjoint(f_v)))
term1 = math_ops.matmul(u, math_ops.matmul(term1_nouv, v1, adjoint_b=True))
if m == n:
grad_a_before_transpose = term1
else:
gv1t = array_ops.matrix_transpose(grad_v1)
gv1t_v1 = math_ops.matmul(gv1t, v1)
term2_nous = gv1t - math_ops.matmul(gv1t_v1, v1, adjoint_b=True)
if full_matrices:
v2 = v[..., :, m:n]
grad_v2 = grad_v[..., :, m:n]
v1t_gv2 = math_ops.matmul(v1, grad_v2, adjoint_a=True)
term2_nous -= math_ops.matmul(v1t_gv2, v2, adjoint_b=True)
u_s_inv = math_ops.matmul(u, s_inv_mat)
term2 = math_ops.matmul(u_s_inv, term2_nous)
grad_a_before_transpose = term1 + term2
if use_adjoint:
grad_a = array_ops.matrix_transpose(grad_a_before_transpose)
else:
grad_a = grad_a_before_transpose
grad_a.set_shape(a_shape)
return grad_a
def _LeftShift(x):
"""Shifts next-to-last dimension to the left, adding zero on the right."""
rank = array_ops.rank(x)
zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)
pad = array_ops.concat([zeros, array_ops.constant([[0, 1], [0, 0]])], axis=0)
return array_ops.pad(x[..., 1:, :], pad)
def _RightShift(x):
"""Shifts next-to-last dimension to the right, adding zero on the left."""
rank = array_ops.rank(x)
zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)
pad = array_ops.concat([zeros, array_ops.constant([[1, 0], [0, 0]])], axis=0)
return array_ops.pad(x[..., :-1, :], pad)
@ops.RegisterGradient("TridiagonalMatMul")
def _TridiagonalMatMulGrad(op, grad):
"""Gradient for TridiagonalMatMul."""
superdiag_conj = array_ops.matrix_transpose(op.inputs[0], conjugate=True)
maindiag_conj = array_ops.matrix_transpose(op.inputs[1], conjugate=True)
subdiag_conj = array_ops.matrix_transpose(op.inputs[2], conjugate=True)
rhs_conj = math_ops.conj(op.inputs[3])
superdiag_grad = math_ops.reduce_sum(_LeftShift(rhs_conj) * grad, axis=-1)
maindiag_grad = math_ops.reduce_sum(rhs_conj * grad, axis=-1)
subdiag_grad = math_ops.reduce_sum(_RightShift(rhs_conj) * grad, axis=-1)
rhs_grad = _RightShift(superdiag_conj * grad) + \
maindiag_conj * grad + _LeftShift(subdiag_conj * grad)
superdiag_grad = array_ops.expand_dims(superdiag_grad, -2)
maindiag_grad = array_ops.expand_dims(maindiag_grad, -2)
subdiag_grad = array_ops.expand_dims(subdiag_grad, -2)
return superdiag_grad, maindiag_grad, subdiag_grad, rhs_grad
@ops.RegisterGradient("TridiagonalSolve")
def _TridiagonalSolveGrad(op, grad):
"""Gradient for TridiagonalSolveGrad."""
diags = op.inputs[0]
x = op.outputs[0]
partial_pivoting = op.get_attr("partial_pivoting")
# Transposing the matrix within tridiagonal_solve kernel by interchanging
# superdiagonal and subdiagonal wouldn't work on GPU due to mismatch with
# paddings required by cusparse*gtsv routines.
# So constructing the transposed matrix in Python.
diags_transposed = _TransposeTridiagonalMatrix(diags)
grad_rhs = linalg_ops.tridiagonal_solve(diags_transposed, grad,
partial_pivoting=partial_pivoting)
grad_diags = -_MatmulExtractingThreeDiagonals(grad_rhs, x)
return grad_diags, grad_rhs
def _TransposeTridiagonalMatrix(diags):
"""Transposes a tridiagonal matrix.
Args:
diags: the diagonals of the input matrix in the compact form (see
linalg_ops.tridiagonal_solve).
Returns:
Diagonals of the transposed matrix in the compact form.
"""
diag = diags[..., 1, :]
if diags.shape.is_fully_defined():
# For fully defined tensor we can concat with a tensor of zeros, which is
# faster than using array_ops.pad().
zeros = array_ops.zeros(list(diags.shape[:-2]) + [1], dtype=diags.dtype)
superdiag = array_ops.concat((diags[..., 2, 1:], zeros), axis=-1)
subdiag = array_ops.concat((zeros, diags[..., 0, :-1]), axis=-1)
else:
rank = array_ops.rank(diags)
zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)
superdiag_pad = array_ops.concat((zeros, array_ops.constant([[0, 1]])),
axis=0)
superdiag = array_ops.pad(diags[..., 2, 1:], superdiag_pad)
subdiag_pad = array_ops.concat((zeros, array_ops.constant([[1, 0]])),
axis=0)
subdiag = array_ops.pad(diags[..., 0, :-1], subdiag_pad)
return array_ops.stack([superdiag, diag, subdiag], axis=-2)
def _MatmulExtractingThreeDiagonals(x, y_tr):
"""Multiplies matrices and extracts three diagonals from the product.
With sizes M x K and K x M, this function takes O(MK) time and O(M) space,
while using math_ops.matmul, and then extracting the diagonals would take
O(M^2 K) time and O(M^2) space.
Args:
x: first matrix
y_tr: second matrix transposed
Returns:
Diagonals of the product in compact format (see
linalg_ops.tridiagonal_solve)
"""
diag = math_ops.reduce_sum(x * y_tr, axis=-1)
if y_tr.shape.is_fully_defined():
zeros = array_ops.zeros(
list(x.shape[:-2]) + [1, x.shape[-1]], dtype=x.dtype)
superdiag = math_ops.reduce_sum(
x * array_ops.concat((y_tr[..., 1:, :], zeros), axis=-2), axis=-1)
subdiag = math_ops.reduce_sum(
x * array_ops.concat((zeros, y_tr[..., :-1, :]), axis=-2), axis=-1)
else:
rank = array_ops.rank(y_tr)
zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)
superdiag_pad = array_ops.concat(
(zeros, array_ops.constant([[0, 1], [0, 0]])), axis=0)
superdiag = math_ops.reduce_sum(
x * array_ops.pad(y_tr[..., 1:, :], superdiag_pad), axis=-1)
subdiag_pad = array_ops.concat(
(zeros, array_ops.constant([[1, 0], [0, 0]])), axis=0)
subdiag = math_ops.reduce_sum(
x * array_ops.pad(y_tr[..., :-1, :], subdiag_pad), axis=-1)
return array_ops.stack([superdiag, diag, subdiag], axis=-2)
| 39.746218
| 81
| 0.653854
|
e23ead7e6c3e444596f035fbf46988ec1b586e00
| 13,870
|
py
|
Python
|
byteblower/samples/wireless_endpoint/ipv6_tcp.py
|
shmir/PyByteBlower
|
fe6962a8f1c5b1fde3184bc05b4d16be7ddb628f
|
[
"BSD-3-Clause"
] | null | null | null |
byteblower/samples/wireless_endpoint/ipv6_tcp.py
|
shmir/PyByteBlower
|
fe6962a8f1c5b1fde3184bc05b4d16be7ddb628f
|
[
"BSD-3-Clause"
] | null | null | null |
byteblower/samples/wireless_endpoint/ipv6_tcp.py
|
shmir/PyByteBlower
|
fe6962a8f1c5b1fde3184bc05b4d16be7ddb628f
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
import time
import random
import datetime
from byteblower.byteblowerll.byteblower import ByteBlower, DeviceStatus_Reserved
import sys
configuration = {
# Address (IP or FQDN) of the ByteBlower server to use
'server_address': 'byteblower-tp-1300.lab.byteblower.excentis.com',
# Interface on the server to create a port on.
'server_interface': 'trunk-1-13',
# MAC address of the ByteBlower port which will be generated
'port_mac_address': '00:bb:01:00:00:01',
# IP configuration for the ByteBlower Port.
# Options are
# * DHCP
# * SLAAC
# * static
# if DHCP, use "dhcp"
# 'port_ip_address': 'dhcp',
# if SLAAC, use "slaac"
'port_ip_address': 'slaac',
# if static, use ["ipaddress", prefixlength]
# 'port_ip_address': ['3000:3128::24', '64'],
# Address (IP or FQDN) of the ByteBlower Meetingpoint to use. The wireless
# endpoint *must* be registered on this meetingpoint.
# Special value: None. When the address is set to None, the server_address
# will be used.
'meetingpoint_address': None,
# UUID of the ByteBlower WirelessEndpoint to use. This wireless endpoint
# *must* be registered to the meetingpoint configured by
# meetingpoint_address.
# Special value: None. When the UUID is set to None, the example will
# automatically select the first available wireless
# endpoint.
'wireless_endpoint_uuid': None,
# 'wireless_endpoint_uuid': 'fd9d9566-8aa3-47c3-9d4b-e597362728d1',
# TCP port for the HTTP server
'port_tcp_port': 4096,
# TCP port for the HTTP Client
'wireless_endpoint_tcp_port': 4096,
# HTTP Method
# HTTP Method can be GET or PUT
# - GET: Standard HTTP download, we retrieve data from the web server
# - PUT: Standard HTTP upload, the wireless endpoint will push data to the
# webserver
'http_method': 'GET',
# 'http_method': 'PUT',
# duration, in nanoseconds
# Duration of the session
'duration': 10000000000,
# TOS value to use on the HTTP client (and server)
'tos': 0
}
class Example:
def __init__(self, **kwargs):
self.server_address = kwargs['server_address']
self.server_interface = kwargs['server_interface']
self.port_mac_address = kwargs['port_mac_address']
self.port_ip_address = kwargs['port_ip_address']
self.meetingpoint_address = kwargs['meetingpoint_address']
if self.meetingpoint_address is None:
self.meetingpoint_address = self.server_address
self.wireless_endpoint_uuid = kwargs['wireless_endpoint_uuid']
self.port_tcp_port = kwargs['port_tcp_port']
self.wireless_endpoint_tcp_port = kwargs['wireless_endpoint_tcp_port']
# Helper function, we can use this to parse the HTTP Method to the
# enumeration used by the API
from byteblower.byteblowerll import ParseHTTPRequestMethodFromString
self.http_method = ParseHTTPRequestMethodFromString(kwargs['http_method'])
self.duration = kwargs['duration']
self.tos = kwargs['tos']
self.server = None
self.port = None
self.meetingpoint = None
self.wireless_endpoint = None
def run(self):
# duration of the samples taken. (nanoseconds)
sample_duration = 100000000
# number of samples to take:
# ( test_duration / sample_duration) is just enough, so we are doubling
# this so we have more than enough
sample_count = 2 * (self.duration / sample_duration)
instance = ByteBlower.InstanceGet()
assert isinstance(instance, ByteBlower)
# Connect to the server
self.server = instance.ServerAdd(self.server_address)
# create and configure the port.
self.port = self.server.PortCreate(self.server_interface)
# configure the MAC address on the port
port_layer2_config = self.port.Layer2EthIISet()
port_layer2_config.MacSet(self.port_mac_address)
# configure the IP addressing on the port
port_layer3_config = self.port.Layer3IPv6Set()
if type(self.port_ip_address) is str and self.port_ip_address.lower() == 'dhcp':
# DHCP is configured on the DHCP protocol
dhcp_protocol = port_layer3_config.ProtocolDhcpGet()
dhcp_protocol.Perform()
elif type(self.port_ip_address) is str and self.port_ip_address.lower() == 'slaac':
# wait for stateless autoconfiguration to complete
port_layer3_config.StatelessAutoconfiguration()
else:
# Static addressing
address = self.port_ip_address[0]
prefixlength = self.port_ip_address[1]
ip = "{}/{}".format(address, prefixlength)
port_layer3_config.IpManualAdd(ip)
print("Created port", self.port.DescriptionGet())
# Connect to the meetingpoint
self.meetingpoint = instance.MeetingPointAdd(self.meetingpoint_address)
# If no WirelessEndpoint UUID was given, search an available one.
if self.wireless_endpoint_uuid is None:
self.wireless_endpoint_uuid = self.select_wireless_endpoint_uuid()
# Get the WirelessEndpoint device
self.wireless_endpoint = self.meetingpoint.DeviceGet(self.wireless_endpoint_uuid)
print("Using wireless endpoint", self.wireless_endpoint.DescriptionGet())
# Now we have the correct information to start configuring the flow.
# Claim the wireless endpoint for ourselves. This means that nobody
# but us can use this device.
self.wireless_endpoint.Lock(True)
# Configure the HTTP server, running on the ByteBlower port.
http_server = self.port.ProtocolHttpServerAdd()
if self.port_tcp_port is None:
self.port_tcp_port = random.randint(10000, 40000)
# Configure the TCP port on which the HTTP server wll listen
http_server.PortSet(self.port_tcp_port)
# Configure the receive window.
http_server.ReceiveWindowScalingEnable(True)
http_server.ReceiveWindowScalingValueSet(7)
# Tell the ByteBlower to sample every sample_duration and keep up to
# sample_count samples (see top of this function)
http_server.HistorySamplingIntervalDurationSet(sample_duration)
http_server.HistorySamplingBufferLengthSet(sample_count)
# A HTTP server will not listen for new connections as long it is not
# started. You can compare it to e.g. Apache or nginx, it won't accept
# new connections as long the daemon is not started.
http_server.Start()
print("HTTP server configuration:", http_server.DescriptionGet())
# Configure the client.
http_client = self.wireless_endpoint.ProtocolHttpClientAdd()
# Configure the remote endpoint to which it must connect.
# This is the IP address and port of the HTTP server configured above
port_layer3_config = self.port.Layer3IPv6Get()
ipv6_addresses = port_layer3_config.IpLinkLocalGet()
if self.port_ip_address == "dhcp":
ipv6_addresses = port_layer3_config.IpDhcpGet()
elif self.port_ip_address == "slaac":
ipv6_addresses = port_layer3_config.IpStatelessGet()
elif isinstance(self.port_ip_address, list):
ipv6_addresses = port_layer3_config.IpManualGet()
address = None
for ipv6_address in ipv6_addresses:
address = ipv6_address.split("/")[0]
http_client.RemoteAddressSet(address)
http_client.RemotePortSet(self.port_tcp_port)
http_client.RequestDurationSet(self.duration)
http_client.RequestInitialTimeToWaitSet(0)
# What will we do? HTTP Get or HTTP PUT?
http_client.HttpMethodSet(self.http_method)
http_client.TypeOfServiceSet(self.tos)
print("HTTP client configuration:", http_client.DescriptionGet())
try:
self.wireless_endpoint.Prepare()
self.wireless_endpoint.Start()
except Exception as e:
print("Error couldn't start the WE")
print(e.message)
sys.exit(-1)
# Wait until the device returns.
# As long the device is running, the device will be in
# - DeviceStatus_Starting
# - DeviceStatus_Running
# As soon the device has finished the test, it will return to
# 'DeviceStatus_Reserved', since we have a Lock on the device.
status = self.wireless_endpoint.StatusGet()
start_moment = datetime.datetime.now()
while status != DeviceStatus_Reserved:
time.sleep(1)
status = self.wireless_endpoint.StatusGet()
now = datetime.datetime.now()
print(str(now), ":: Running for", str(now - start_moment), "::",
http_server.ClientIdentifiersGet().size(), "client(s) connected")
# Wireless Endpoint has returned. Collect and process the results.
# It was a new HTTP server. There will thus be only 1 client.
client_idents = http_server.ClientIdentifiersGet()
if len(client_idents) == 0:
print("Nothing connected")
sys.exit(-1)
first = client_idents[0]
http_session = http_server.HttpSessionInfoGet(first)
http_hist = http_session.ResultHistoryGet()
http_hist.Refresh()
# save the results to CSV, this allows further analysis afterwards
collected_results = self.collect_results(http_hist)
cumulative_result = http_hist.CumulativeLatestGet()
mbit_s = cumulative_result.AverageDataSpeedGet().MbpsGet()
print("Average throughput", mbit_s, "Mbps")
print("Removing the server")
self.port.ProtocolHttpServerRemove(http_server)
print("Removing the client")
self.wireless_endpoint.ProtocolHttpClientRemove(http_client)
# Cleanup
self.server.PortDestroy(self.port)
self.wireless_endpoint.Lock(False)
return collected_results
def select_wireless_endpoint_uuid(self):
"""
Walk over all known devices on the meetingpoint.
If the device has the status 'Available', return its UUID, otherwise return None
:return: a string representing the UUID or None
"""
from byteblower.byteblowerll import DeviceStatus_Available
for device in self.meetingpoint.DeviceListGet():
# is the status Available?
if device.StatusGet() == DeviceStatus_Available:
# yes, return the UUID
return device.DeviceIdentifierGet()
# No device found, return None
return None
def collect_results(self, http_hist):
"""" Function that writes the results to CSV files.
"""
sample_duration = http_hist.SamplingIntervalDurationGet()
tx_samples = []
for tt in http_hist.IntervalGet():
tx_data = tt.TxByteCountTotalGet()
timestamp = tt.TimestampGet()
tx_samples.append((timestamp, tx_data, self.bytes_per_sample_to_mbit_s(sample_duration, tx_data)))
rx_samples = []
for tt in http_hist.IntervalGet():
rx_data = tt.RxByteCountTotalGet()
timestamp = tt.TimestampGet()
rx_samples.append((timestamp, rx_data, self.bytes_per_sample_to_mbit_s(sample_duration, rx_data)))
cumulative_samples = []
if http_hist.CumulativeLengthGet() > 0:
last_cumul = http_hist.CumulativeLatestGet()
mbit_s = last_cumul.AverageDataSpeedGet().MbpsGet()
uploaded = last_cumul.TxByteCountTotalGet()
downloaded = last_cumul.RxByteCountTotalGet()
timestamp = last_cumul.TimestampGet()
cumulative_samples.append((timestamp, mbit_s, uploaded, downloaded))
return {
'we': {
'uuid': self.wireless_endpoint.DeviceIdentifierGet(),
'givenname': self.wireless_endpoint.DeviceInfoGet().GivenNameGet()
},
'tx': tx_samples,
'rx': rx_samples,
'cumulative': cumulative_samples
}
@staticmethod
def bytes_per_sample_to_mbit_s(sample_duration, n_bytes):
"""
Utility method for conversion.
It converts bytes in a sample to Mbit/s.
"""
return (n_bytes * 8 * 1e9) / (1e6 * sample_duration)
def human_readable_date(bb_timestamp):
return str(datetime.datetime.fromtimestamp(bb_timestamp / 1e9))
def make_csv_line(we_uuid, we_name, *items):
all_itemslist = [we_uuid, we_name] + list(items)
all_items = map(str, all_itemslist)
return (", ".join(all_items)) + "\n"
if __name__ == '__main__':
results = Example(**configuration).run()
# Write the results to CSV files, those can be analyzed later.
uuid = results['we']['uuid']
givenname = results['we']['givenname']
with open('tx_tcp_server_interval.csv', 'a') as tx_results:
for tx_sample in results['tx']:
ts = human_readable_date(int(tx_sample[0]))
tx_results.write(make_csv_line(uuid, givenname, ts, *list(tx_sample)))
with open('rx_tcp_server_interval.csv', 'a') as rx_results:
for rx_sample in results['rx']:
ts = human_readable_date(int(rx_sample[0]))
rx_results.write(make_csv_line(uuid, givenname, ts, *list(rx_sample)))
with open('cumulative_http_server.csv', 'a') as res:
for cumulative_sample in results['cumulative']:
ts = human_readable_date(int(cumulative_sample[0]))
res.write(make_csv_line(uuid, givenname, ts, *list(cumulative_sample)))
| 39.070423
| 110
| 0.661067
|
8b4ae306ce123d222d8f26a0f8932861941dd27d
| 777
|
py
|
Python
|
src/model/experiment.py
|
VinGPan/classification_model_search
|
fab7ce6fc131b858f1b79633e0f7b86d1446c93d
|
[
"MIT"
] | null | null | null |
src/model/experiment.py
|
VinGPan/classification_model_search
|
fab7ce6fc131b858f1b79633e0f7b86d1446c93d
|
[
"MIT"
] | null | null | null |
src/model/experiment.py
|
VinGPan/classification_model_search
|
fab7ce6fc131b858f1b79633e0f7b86d1446c93d
|
[
"MIT"
] | null | null | null |
from src.model.build_models import build_models
from src.model.compute_features import compute_features
from src.model.data_cleansing import cleanse
from src.model.report import report
from src.model.split import split
from src.model.utils import read_args, read_yml, makedir
from src.utils.logging import logger
def run_experiment(yml_name):
configs = read_yml(yml_name)
makedir("output/" + configs['experiment_name'])
cleanse(configs)
compute_features(configs)
split(configs)
build_models(configs)
report(configs)
if __name__ == '__main__':
exp_yml_name = read_args()
logger.info('Running experiment ' + str(exp_yml_name))
try:
run_experiment(exp_yml_name)
except Exception as e:
logger.error(e, exc_info=True)
| 28.777778
| 58
| 0.750322
|
37cec0fe22a79e72cdbdab7484adbeb3a793936b
| 9,588
|
py
|
Python
|
predict_augmented_npy_maxout2048_extradense.py
|
pcoster/kaggle-galaxies
|
bb1908d23ed80e9aeb706166007830760769daf0
|
[
"BSD-3-Clause"
] | 374
|
2015-01-05T02:18:47.000Z
|
2021-12-13T10:30:06.000Z
|
predict_augmented_npy_maxout2048_extradense.py
|
Adaydl/kaggle-galaxies
|
bb1908d23ed80e9aeb706166007830760769daf0
|
[
"BSD-3-Clause"
] | 5
|
2015-01-02T17:17:08.000Z
|
2016-01-05T18:45:38.000Z
|
predict_augmented_npy_maxout2048_extradense.py
|
Adaydl/kaggle-galaxies
|
bb1908d23ed80e9aeb706166007830760769daf0
|
[
"BSD-3-Clause"
] | 173
|
2015-01-05T14:26:37.000Z
|
2021-10-10T14:17:58.000Z
|
"""
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
| 39.45679
| 214
| 0.744994
|
6d7e561a529ea63ba6d2abbd47ec17d39cc7e98e
| 65,487
|
py
|
Python
|
controllers/ajax.py
|
Kandongwe/RunestoneServer
|
f555868521b3717beec0ec42dbcbcb443c64686c
|
[
"MIT"
] | null | null | null |
controllers/ajax.py
|
Kandongwe/RunestoneServer
|
f555868521b3717beec0ec42dbcbcb443c64686c
|
[
"MIT"
] | null | null | null |
controllers/ajax.py
|
Kandongwe/RunestoneServer
|
f555868521b3717beec0ec42dbcbcb443c64686c
|
[
"MIT"
] | null | null | null |
# *************************
# |docname| - Runestone API
# *************************
# This module implements the API that the Runestone Components use to communicate with a Runestone Server.
# **Most of this file is Deprecated**
# **Do not** make any changes to the following functions. They will be removed in an upcoming release.
# def compareAndUpdateCookieData(sid: str):
# def hsblog():
# def runlog():
# def gethist():
# def getuser():
# def set_tz_offset():
# def updatelastpage():
# def getCompletionStatus():
# def getAllCompletionStatus():
# def getlastpage():
# def _getCorrectStats(miscdata, event):
# def _getStudentResults(question: str):
# def getaggregateresults():
# def getpollresults():
# def gettop10Answers():
# def getassignmentgrade():
# def _canonicalize_tz(tstring):
# def getAssessResults():
# def tookTimedAssessment():
# def get_datafile():
# def _same_class(user1: str, user2: str) -> bool:
# def login_status():
# def get_question_source():
#
# TODO: Move these to a new controller file (maybe admin.py)
# def preview_question():
# def save_donate():
# def did_donate():
# def broadcast_code():
# def update_selected_question():
# #
# Imports
# =======
# These are listed in the order prescribed by `PEP 8
# <http://www.python.org/dev/peps/pep-0008/#imports>`_.
from collections import Counter
import datetime
from io import open
import json
import logging
from lxml import html
import math
import os
import random
import re
import subprocess
from textwrap import dedent
import uuid
# Third-party imports
# -------------------
from bleach import clean
from dateutil.parser import parse
# Local application imports
# -------------------------
from feedback import is_server_feedback, fitb_feedback, lp_feedback
from rs_practice import _get_qualified_questions
logger = logging.getLogger(settings.logger)
logger.setLevel(settings.log_level)
EVENT_TABLE = {
"mChoice": "mchoice_answers",
"fillb": "fitb_answers",
"dragNdrop": "dragndrop_answers",
"clickableArea": "clickablearea_answers",
"parsons": "parsons_answers",
"codelensq": "codelens_answers",
"shortanswer": "shortanswer_answers",
"fillintheblank": "fitb_answers",
"mchoice": "mchoice_answers",
"dragndrop": "dragndrop_answers",
"clickablearea": "clickablearea_answers",
"parsonsprob": "parsons_answers",
}
COMMENT_MAP = {
"sql": "--",
"python": "#",
"java": "//",
"javascript": "//",
"c": "//",
"cpp": "//",
}
def compareAndUpdateCookieData(sid: str):
if (
"ipuser" in request.cookies
and request.cookies["ipuser"].value != sid
and request.cookies["ipuser"].value.endswith("@" + request.client)
):
db.useinfo.update_or_insert(
db.useinfo.sid == request.cookies["ipuser"].value, sid=sid
)
# Endpoints
# =========
#
# .. _hsblog endpoint:
#
# hsblog endpoint
# ---------------
# Given a JSON record of a clickstream event record the event in the ``useinfo`` table.
# If the event is an answer to a runestone question record that answer in the database in
# one of the xxx_answers tables.
#
def hsblog():
setCookie = False
if auth.user:
if request.vars.course != auth.user.course_name:
return json.dumps(
dict(
log=False,
message="You appear to have changed courses in another tab. Please switch to this course",
)
)
sid = auth.user.username
compareAndUpdateCookieData(sid)
setCookie = True # we set our own cookie anyway to eliminate many of the extraneous anonymous
# log entries that come from auth timing out even but the user hasn't reloaded
# the page.
# If the incoming data contains an sid then prefer that.
if request.vars.sid:
sid = request.vars.sid
else:
if request.vars.clientLoginStatus == "true":
logger.error("Session Expired")
return json.dumps(dict(log=False, message="Session Expired"))
if "ipuser" in request.cookies:
sid = request.cookies["ipuser"].value
setCookie = True
else:
sid = str(uuid.uuid1().int) + "@" + request.client
setCookie = True
act = request.vars.get("act", "")
div_id = request.vars.div_id
event = request.vars.event
course = request.vars.course
# Get the current time, rounded to the nearest second -- this is how time time will be stored in the database.
ts = datetime.datetime.utcnow()
ts -= datetime.timedelta(microseconds=ts.microsecond)
tt = request.vars.time
if not tt:
tt = 0
try:
db.useinfo.insert(
sid=sid,
act=act[0:512],
div_id=div_id,
event=event,
timestamp=ts,
course_id=course,
)
except Exception as e:
logger.error(
"failed to insert log record for {} in {} : {} {} {}".format(
sid, course, div_id, event, act
)
)
logger.error("Details: {}".format(e))
if event == "timedExam" and (act == "finish" or act == "reset" or act == "start"):
logger.debug(act)
if act == "reset":
r = "T"
else:
r = None
try:
db.timed_exam.insert(
sid=sid,
course_name=course,
correct=int(request.vars.correct or 0),
incorrect=int(request.vars.incorrect or 0),
skipped=int(request.vars.skipped or 0),
time_taken=int(tt),
timestamp=ts,
div_id=div_id,
reset=r,
)
except Exception as e:
logger.debug(
"failed to insert a timed exam record for {} in {} : {}".format(
sid, course, div_id
)
)
logger.debug(
"correct {} incorrect {} skipped {} time {}".format(
request.vars.correct,
request.vars.incorrect,
request.vars.skipped,
request.vars.time,
)
)
logger.debug("Error: {}".format(e.message))
# Produce a default result.
res = dict(log=True, timestamp=str(ts))
try:
pct = float(request.vars.percent)
except ValueError:
pct = None
except TypeError:
pct = None
# Process this event.
if event == "mChoice" and auth.user:
answer = request.vars.answer
correct = request.vars.correct
db.mchoice_answers.insert(
sid=sid,
timestamp=ts,
div_id=div_id,
answer=answer,
correct=correct,
course_name=course,
percent=pct,
)
elif event == "fillb" and auth.user:
answer_json = request.vars.answer
correct = request.vars.correct
# Grade on the server if needed.
do_server_feedback, feedback = is_server_feedback(div_id, course)
if do_server_feedback and answer_json is not None:
correct, res_update = fitb_feedback(answer_json, feedback)
res.update(res_update)
pct = res["percent"]
# Save this data.
db.fitb_answers.insert(
sid=sid,
timestamp=ts,
div_id=div_id,
answer=answer_json,
correct=correct,
course_name=course,
percent=pct,
)
elif event == "dragNdrop" and auth.user:
answers = request.vars.answer
minHeight = request.vars.minHeight
correct = request.vars.correct
db.dragndrop_answers.insert(
sid=sid,
timestamp=ts,
div_id=div_id,
answer=answers,
correct=correct,
course_name=course,
min_height=minHeight,
percent=pct,
)
elif event == "clickableArea" and auth.user:
correct = request.vars.correct
db.clickablearea_answers.insert(
sid=sid,
timestamp=ts,
div_id=div_id,
answer=act,
correct=correct,
course_name=course,
percent=pct,
)
elif event == "parsons" and auth.user:
correct = request.vars.correct
answer = request.vars.answer
source = request.vars.source
db.parsons_answers.insert(
sid=sid,
timestamp=ts,
div_id=div_id,
answer=answer,
source=source,
correct=correct,
course_name=course,
percent=pct,
)
elif event == "codelensq" and auth.user:
correct = request.vars.correct
answer = request.vars.answer
source = request.vars.source
db.codelens_answers.insert(
sid=sid,
timestamp=ts,
div_id=div_id,
answer=answer,
source=source,
correct=correct,
course_name=course,
percent=pct,
)
elif event == "shortanswer" and auth.user:
db.shortanswer_answers.insert(
sid=sid,
answer=act,
div_id=div_id,
timestamp=ts,
course_name=course,
)
elif event == "unittest" and auth.user:
statslist = act.split(":")
if "undefined" not in act:
pct = float(statslist[1])
passed = int(statslist[3])
failed = int(statslist[5])
if math.isnan(pct):
pct = 0
else:
pct = passed = failed = 0
logger.error(f"Got undefined unittest results for {div_id} {sid}")
if pct >= 99.99999:
correct = "T"
else:
correct = "F"
db.unittest_answers.insert(
sid=sid,
timestamp=ts,
div_id=div_id,
correct=correct,
passed=passed,
failed=failed,
course_name=course,
percent=pct,
)
elif event == "lp_build" and auth.user:
ret, new_fields = db.lp_answers._validate_fields(
dict(sid=sid, timestamp=ts, div_id=div_id, course_name=course)
)
if not ret.errors:
do_server_feedback, feedback = is_server_feedback(div_id, course)
if do_server_feedback:
try:
code_snippets = json.loads(request.vars.answer)["code_snippets"]
except Exception:
code_snippets = []
result = lp_feedback(code_snippets, feedback)
# If an error occurred or we're not testing, pass the answer through.
res.update(result)
# Record the results in the database.
correct = result.get("correct")
answer = result.get("answer", {})
answer["code_snippets"] = code_snippets
ret = db.lp_answers.validate_and_insert(
sid=sid,
timestamp=ts,
div_id=div_id,
answer=json.dumps(answer),
correct=correct,
course_name=course,
)
if ret.errors:
res.setdefault("errors", []).append(ret.errors.as_dict())
else:
res["errors"] = ["No feedback provided."]
else:
res.setdefault("errors", []).append(ret.errors.as_dict())
response.headers["content-type"] = "application/json"
if setCookie:
response.cookies["ipuser"] = sid
response.cookies["ipuser"]["expires"] = 24 * 3600 * 90
response.cookies["ipuser"]["path"] = "/"
if auth.user:
response.cookies["last_course"] = auth.user.course_name
response.cookies["last_course"]["expires"] = 24 * 3600 * 90
response.cookies["last_course"]["path"] = "/"
return json.dumps(res)
# .. _runlog endpoint:
#
# runlog endpoint
# ---------------
# The `logRunEvent` client-side function calls this endpoint to record TODO...
def runlog(): # Log errors and runs with code
# response.headers['content-type'] = 'application/json'
setCookie = False
if auth.user:
if request.vars.course != auth.user.course_name:
return json.dumps(
dict(
log=False,
message="You appear to have changed courses in another tab. Please switch to this course",
)
)
if request.vars.sid:
sid = request.vars.sid
else:
sid = auth.user.username
setCookie = True
else:
if request.vars.clientLoginStatus == "true":
logger.error("Session Expired")
return json.dumps(dict(log=False, message="Session Expired"))
if "ipuser" in request.cookies:
sid = request.cookies["ipuser"].value
setCookie = True
else:
sid = str(uuid.uuid1().int) + "@" + request.client
setCookie = True
div_id = request.vars.div_id
course = request.vars.course
code = request.vars.code if request.vars.code else ""
ts = datetime.datetime.utcnow()
error_info = request.vars.errinfo
pre = request.vars.prefix if request.vars.prefix else ""
post = request.vars.suffix if request.vars.suffix else ""
if error_info != "success":
event = "ac_error"
act = str(error_info)[:512]
else:
act = "run"
if request.vars.event:
event = request.vars.event
else:
event = "activecode"
num_tries = 3
done = False
while num_tries > 0 and not done:
try:
db.useinfo.insert(
sid=sid,
act=act,
div_id=div_id,
event=event,
timestamp=ts,
course_id=course,
)
done = True
except Exception as e:
logger.error(
"probable Too Long problem trying to insert sid={} act={} div_id={} event={} timestamp={} course_id={} exception={}".format(
sid, act, div_id, event, ts, course, e
)
)
num_tries -= 1
if num_tries == 0:
raise Exception("Runlog Failed to insert into useinfo")
if auth.user:
if "to_save" in request.vars and (
request.vars.to_save == "True" or request.vars.to_save == "true"
):
num_tries = 3
done = False
dbcourse = (
db(db.courses.course_name == course).select(**SELECT_CACHE).first()
)
while num_tries > 0 and not done:
try:
db.code.insert(
sid=sid,
acid=div_id,
code=code,
emessage=error_info,
timestamp=ts,
course_id=dbcourse,
language=request.vars.lang,
)
if request.vars.partner:
if _same_class(sid, request.vars.partner):
comchar = COMMENT_MAP.get(request.vars.lang, "#")
newcode = (
"{} This code was shared by {}\n\n".format(comchar, sid)
+ code
)
db.code.insert(
sid=request.vars.partner,
acid=div_id,
code=newcode,
emessage=error_info,
timestamp=ts,
course_id=dbcourse,
language=request.vars.lang,
)
else:
res = {
"message": "You must be enrolled in the same class as your partner"
}
return json.dumps(res)
done = True
except Exception as e:
num_tries -= 1
logger.error("INSERT into code FAILED retrying -- {}".format(e))
if num_tries == 0:
raise Exception("Runlog Failed to insert into code")
res = {"log": True}
if setCookie:
response.cookies["ipuser"] = sid
response.cookies["ipuser"]["expires"] = 24 * 3600 * 90
response.cookies["ipuser"]["path"] = "/"
return json.dumps(res)
# Ajax Handlers for saving and restoring active code blocks
def gethist():
"""
return the history of saved code by this user for a particular acid
:Parameters:
- `acid`: id of the active code block
- `user`: optional identifier for the owner of the code
:Return:
- json object containing a list/array of source texts
"""
codetbl = db.code
acid = request.vars.acid
# if vars.sid then we know this is being called from the grading interface
if request.vars.sid:
sid = request.vars.sid
if auth.user and verifyInstructorStatus(
auth.user.course_name, auth.user.id
): # noqa: F405
course_id = auth.user.course_id
else:
course_id = None
elif auth.user:
sid = auth.user.username
course_id = auth.user.course_id
else:
sid = None
course_id = None
res = {}
if sid:
query = (
(codetbl.sid == sid)
& (codetbl.acid == acid)
& (codetbl.course_id == course_id)
& (codetbl.timestamp != None) # noqa: E711
)
res["acid"] = acid
res["sid"] = sid
# get the code they saved in chronological order; id order gets that for us
r = db(query).select(orderby=codetbl.id)
res["history"] = [row.code for row in r]
res["timestamps"] = [
row.timestamp.replace(tzinfo=datetime.timezone.utc).isoformat() for row in r
]
response.headers["content-type"] = "application/json"
return json.dumps(res)
# @auth.requires_login()
# This function is deprecated as of June 2019
# We need to keep it in place as long as we continue to serve books
# from runestone/static/ When that period is over we can eliminate
def getuser():
response.headers["content-type"] = "application/json"
if auth.user:
try:
# return the list of courses that auth.user is registered for to keep them from
# accidentally wandering into courses they are not registered for.
cres = db(
(db.user_courses.user_id == auth.user.id)
& (db.user_courses.course_id == db.courses.id)
).select(db.courses.course_name)
clist = []
for row in cres:
clist.append(row.course_name)
res = {
"email": auth.user.email,
"nick": auth.user.username,
"donated": auth.user.donated,
"isInstructor": verifyInstructorStatus( # noqa: F405
auth.user.course_name, auth.user.id
),
"course_list": clist,
}
session.timezoneoffset = request.vars.timezoneoffset
logger.debug(
"setting timezone offset in session %s hours" % session.timezoneoffset
)
except Exception:
res = dict(redirect=auth.settings.login_url) # ?_next=....
else:
res = dict(redirect=auth.settings.login_url) # ?_next=....
if session.readings:
res["readings"] = session.readings
logger.debug("returning login info: %s" % res)
return json.dumps([res])
def set_tz_offset():
session.timezoneoffset = request.vars.timezoneoffset
logger.debug("setting timezone offset in session %s hours" % session.timezoneoffset)
return "done"
#
# Ajax Handlers to update and retrieve the last position of the user in the course
#
def updatelastpage():
lastPageUrl = request.vars.lastPageUrl
lastPageScrollLocation = request.vars.lastPageScrollLocation
if lastPageUrl is None:
return # todo: log request.vars, request.args and request.env.path_info
course = request.vars.course
completionFlag = request.vars.completionFlag
lastPageChapter = lastPageUrl.split("/")[-2]
lastPageSubchapter = ".".join(lastPageUrl.split("/")[-1].split(".")[:-1])
if auth.user:
done = False
num_tries = 3
while not done and num_tries > 0:
try:
db(
(db.user_state.user_id == auth.user.id)
& (db.user_state.course_name == course)
).update(
last_page_url=lastPageUrl,
last_page_chapter=lastPageChapter,
last_page_subchapter=lastPageSubchapter,
last_page_scroll_location=lastPageScrollLocation,
last_page_accessed_on=datetime.datetime.utcnow(),
)
done = True
except Exception:
num_tries -= 1
if num_tries == 0:
raise Exception("Failed to save the user state in update_last_page")
done = False
num_tries = 3
while not done and num_tries > 0:
try:
db(
(db.user_sub_chapter_progress.user_id == auth.user.id)
& (db.user_sub_chapter_progress.chapter_id == lastPageChapter)
& (
db.user_sub_chapter_progress.sub_chapter_id
== lastPageSubchapter
)
& (
(db.user_sub_chapter_progress.course_name == course)
| (
db.user_sub_chapter_progress.course_name == None
) # Back fill for old entries without course
)
).update(
status=completionFlag,
end_date=datetime.datetime.utcnow(),
course_name=course,
)
done = True
except Exception:
num_tries -= 1
if num_tries == 0:
raise Exception("Failed to save sub chapter progress in update_last_page")
practice_settings = db(db.course_practice.course_name == auth.user.course_name)
if (
practice_settings.count() != 0
and practice_settings.select().first().flashcard_creation_method == 0
):
# Since each authenticated user has only one active course, we retrieve the course this way.
course = (
db(db.courses.id == auth.user.course_id).select(**SELECT_CACHE).first()
)
# We only retrieve questions to be used in flashcards if they are marked for practice purpose.
questions = _get_qualified_questions(
course.base_course, lastPageChapter, lastPageSubchapter, db
)
if len(questions) > 0:
now = datetime.datetime.utcnow()
now_local = now - datetime.timedelta(
hours=float(session.timezoneoffset)
if "timezoneoffset" in session
else 0
)
existing_flashcards = db(
(db.user_topic_practice.user_id == auth.user.id)
& (db.user_topic_practice.course_name == auth.user.course_name)
& (db.user_topic_practice.chapter_label == lastPageChapter)
& (db.user_topic_practice.sub_chapter_label == lastPageSubchapter)
& (db.user_topic_practice.question_name == questions[0].name)
)
# There is at least one qualified question in this subchapter, so insert a flashcard for the subchapter.
if completionFlag == "1" and existing_flashcards.isempty():
db.user_topic_practice.insert(
user_id=auth.user.id,
course_name=auth.user.course_name,
chapter_label=lastPageChapter,
sub_chapter_label=lastPageSubchapter,
question_name=questions[0].name,
# Treat it as if the first eligible question is the last one asked.
i_interval=0,
e_factor=2.5,
next_eligible_date=now_local.date(),
# add as if yesterday, so can practice right away
last_presented=now - datetime.timedelta(1),
last_completed=now - datetime.timedelta(1),
creation_time=now,
timezoneoffset=float(session.timezoneoffset)
if "timezoneoffset" in session
else 0,
)
if completionFlag == "0" and not existing_flashcards.isempty():
existing_flashcards.delete()
def getCompletionStatus():
if auth.user:
lastPageUrl = request.vars.lastPageUrl
lastPageChapter = lastPageUrl.split("/")[-2]
lastPageSubchapter = ".".join(lastPageUrl.split("/")[-1].split(".")[:-1])
result = db(
(db.user_sub_chapter_progress.user_id == auth.user.id)
& (db.user_sub_chapter_progress.chapter_id == lastPageChapter)
& (db.user_sub_chapter_progress.sub_chapter_id == lastPageSubchapter)
& (
(db.user_sub_chapter_progress.course_name == auth.user.course_name)
| (
db.user_sub_chapter_progress.course_name == None
) # for backward compatibility
)
).select(db.user_sub_chapter_progress.status)
rowarray_list = []
if result:
for row in result:
res = {"completionStatus": row.status}
rowarray_list.append(res)
# question: since the javascript in user-highlights.js is going to look only at the first row, shouldn't
# we be returning just the *last* status? Or is there no history of status kept anyway?
return json.dumps(rowarray_list)
else:
# haven't seen this Chapter/Subchapter before
# make the insertions into the DB as necessary
# we know the subchapter doesn't exist
db.user_sub_chapter_progress.insert(
user_id=auth.user.id,
chapter_id=lastPageChapter,
sub_chapter_id=lastPageSubchapter,
status=-1,
start_date=datetime.datetime.utcnow(),
course_name=auth.user.course_name,
)
# the chapter might exist without the subchapter
result = db(
(db.user_chapter_progress.user_id == auth.user.id)
& (db.user_chapter_progress.chapter_id == lastPageChapter)
).select()
if not result:
db.user_chapter_progress.insert(
user_id=auth.user.id, chapter_id=lastPageChapter, status=-1
)
return json.dumps([{"completionStatus": -1}])
def getAllCompletionStatus():
if auth.user:
result = db(
(db.user_sub_chapter_progress.user_id == auth.user.id)
& (db.user_sub_chapter_progress.course_name == auth.user.course_name)
).select(
db.user_sub_chapter_progress.chapter_id,
db.user_sub_chapter_progress.sub_chapter_id,
db.user_sub_chapter_progress.status,
db.user_sub_chapter_progress.status,
db.user_sub_chapter_progress.end_date,
)
rowarray_list = []
if result:
for row in result:
if row.end_date is None:
endDate = 0
else:
endDate = row.end_date.strftime("%d %b, %Y")
res = {
"chapterName": row.chapter_id,
"subChapterName": row.sub_chapter_id,
"completionStatus": row.status,
"endDate": endDate,
}
rowarray_list.append(res)
return json.dumps(rowarray_list)
@auth.requires_login()
def getlastpage():
course = request.vars.course
course = db(db.courses.course_name == course).select(**SELECT_CACHE).first()
result = db(
(db.user_state.user_id == auth.user.id)
& (db.user_state.course_name == course.course_name)
& (db.chapters.course_id == course.base_course)
& (db.user_state.last_page_chapter == db.chapters.chapter_label)
& (db.sub_chapters.chapter_id == db.chapters.id)
& (db.user_state.last_page_subchapter == db.sub_chapters.sub_chapter_label)
).select(
db.user_state.last_page_url,
db.user_state.last_page_hash,
db.chapters.chapter_name,
db.user_state.last_page_scroll_location,
db.sub_chapters.sub_chapter_name,
)
rowarray_list = []
if result:
for row in result:
res = {
"lastPageUrl": row.user_state.last_page_url,
"lastPageHash": row.user_state.last_page_hash,
"lastPageChapter": row.chapters.chapter_name,
"lastPageSubchapter": row.sub_chapters.sub_chapter_name,
"lastPageScrollLocation": row.user_state.last_page_scroll_location,
}
rowarray_list.append(res)
return json.dumps(rowarray_list)
else:
db.user_state.insert(user_id=auth.user.id, course_name=course.course_name)
def _getCorrectStats(miscdata, event):
# TODO: update this to use the xxx_answer table
# select and count grouping by the correct column
# this version can suffer from division by zero error
sid = None
dbtable = EVENT_TABLE[event] # translate event to correct table
if auth.user:
sid = auth.user.username
else:
if "ipuser" in request.cookies:
sid = request.cookies["ipuser"].value
if sid:
course = (
db(db.courses.course_name == miscdata["course"])
.select(**SELECT_CACHE)
.first()
)
tbl = db[dbtable]
count_expr = tbl.correct.count()
rows = db((tbl.sid == sid) & (tbl.timestamp > course.term_start_date)).select(
tbl.correct, count_expr, groupby=tbl.correct
)
total = 0
correct = 0
for row in rows:
count = row[count_expr]
total += count
if row[dbtable].correct:
correct = count
if total > 0:
pctcorr = round(float(correct) / total * 100)
else:
pctcorr = "unavailable"
else:
pctcorr = "unavailable"
miscdata["yourpct"] = pctcorr
def _getStudentResults(question: str):
"""
Internal function to collect student answers
"""
cc = db(db.courses.id == auth.user.course_id).select().first()
qst = (
db(
(db.questions.name == question)
& (db.questions.base_course == cc.base_course)
)
.select()
.first()
)
tbl_name = EVENT_TABLE[qst.question_type]
tbl = db[tbl_name]
res = db(
(tbl.div_id == question)
& (tbl.course_name == cc.course_name)
& (tbl.timestamp >= cc.term_start_date)
).select(tbl.sid, tbl.answer, orderby=tbl.sid)
resultList = []
if len(res) > 0:
currentSid = res[0].sid
currentAnswers = []
for row in res:
if row.answer:
answer = clean(row.answer)
else:
answer = None
if row.sid == currentSid:
if answer is not None:
currentAnswers.append(answer)
else:
currentAnswers.sort()
resultList.append((currentSid, currentAnswers))
currentAnswers = [answer] if answer is not None else []
currentSid = row.sid
currentAnswers.sort()
resultList.append((currentSid, currentAnswers))
return resultList
def getaggregateresults():
course = request.vars.course
question = request.vars.div_id
# select act, count(*) from useinfo where div_id = 'question4_2_1' group by act;
response.headers["content-type"] = "application/json"
if not auth.user:
return json.dumps([dict(answerDict={}, misc={}, emess="You must be logged in")])
is_instructor = verifyInstructorStatus(course, auth.user.id) # noqa: F405
# Yes, these two things could be done as a join. but this **may** be better for performance
if course in (
"thinkcspy",
"pythonds",
"fopp",
"csawesome",
"apcsareview",
"StudentCSP",
):
start_date = datetime.datetime.utcnow() - datetime.timedelta(days=90)
else:
start_date = (
db(db.courses.course_name == course)
.select(db.courses.term_start_date)
.first()
.term_start_date
)
count = db.useinfo.id.count()
try:
result = db(
(db.useinfo.div_id == question)
& (db.useinfo.course_id == course)
& (db.useinfo.timestamp >= start_date)
).select(db.useinfo.act, count, groupby=db.useinfo.act)
except Exception:
return json.dumps(
[dict(answerDict={}, misc={}, emess="Sorry, the request timed out")]
)
tdata = {}
tot = 0
for row in result:
tdata[clean(row.useinfo.act)] = row[count]
tot += row[count]
tot = float(tot)
rdata = {}
miscdata = {}
correct = ""
if tot > 0:
for key in tdata:
all_a = key.split(":")
try:
answer = all_a[1]
if "correct" in key:
correct = answer
count = int(tdata[key])
if answer in rdata:
count += rdata[answer] / 100.0 * tot
pct = round(count / tot * 100.0)
if answer != "undefined" and answer != "":
rdata[answer] = pct
except Exception as e:
logger.error("Bad data for %s data is %s -- %s" % (question, key, e))
miscdata["correct"] = correct
miscdata["course"] = course
_getCorrectStats(miscdata, "mChoice")
returnDict = dict(answerDict=rdata, misc=miscdata)
if auth.user and is_instructor:
resultList = _getStudentResults(question)
returnDict["reslist"] = resultList
return json.dumps([returnDict])
def getpollresults():
course = request.vars.course
div_id = request.vars.div_id
response.headers["content-type"] = "application/json"
query = """select act from useinfo
join (select sid, max(id) mid
from useinfo where event='poll' and div_id = %s and course_id = %s group by sid) as T
on id = T.mid"""
rows = db.executesql(query, (div_id, course))
result_list = []
for row in rows:
val = row[0].split(":")[0]
result_list.append(int(val))
# maps option : count
opt_counts = Counter(result_list)
if result_list:
for i in range(max(result_list)):
if i not in opt_counts:
opt_counts[i] = 0
# opt_list holds the option numbers from smallest to largest
# count_list[i] holds the count of responses that chose option i
opt_list = sorted(opt_counts.keys())
count_list = []
for i in opt_list:
count_list.append(opt_counts[i])
user_res = None
if auth.user:
user_res = (
db(
(db.useinfo.sid == auth.user.username)
& (db.useinfo.course_id == course)
& (db.useinfo.div_id == div_id)
)
.select(db.useinfo.act, orderby=~db.useinfo.id)
.first()
)
if user_res:
my_vote = user_res.act
else:
my_vote = -1
return json.dumps([len(result_list), opt_list, count_list, div_id, my_vote])
def gettop10Answers():
course = request.vars.course
question = request.vars.div_id
response.headers["content-type"] = "application/json"
rows = []
try:
dbcourse = db(db.courses.course_name == course).select(**SELECT_CACHE).first()
count_expr = db.fitb_answers.answer.count()
rows = db(
(db.fitb_answers.div_id == question)
& (db.fitb_answers.course_name == course)
& (db.fitb_answers.timestamp > dbcourse.term_start_date)
).select(
db.fitb_answers.answer,
count_expr,
groupby=db.fitb_answers.answer,
orderby=~count_expr,
limitby=(0, 10),
)
res = [
{"answer": clean(row.fitb_answers.answer), "count": row[count_expr]}
for row in rows
]
except Exception as e:
logger.debug(e)
res = "error in query"
miscdata = {"course": course}
_getCorrectStats(
miscdata, "fillb"
) # TODO: rewrite _getCorrectStats to use xxx_answers
if auth.user and verifyInstructorStatus(course, auth.user.id): # noqa: F405
resultList = _getStudentResults(question)
miscdata["reslist"] = resultList
return json.dumps([res, miscdata])
def getassignmentgrade():
response.headers["content-type"] = "application/json"
if not auth.user:
return json.dumps([dict(message="not logged in")])
divid = request.vars.div_id
ret = {
"grade": "Not graded yet",
"comment": "No Comments",
"avg": "None",
"count": "None",
"released": False,
}
# check that the assignment is released
#
a_q = (
db(
(db.assignments.course == auth.user.course_id)
& (db.assignment_questions.assignment_id == db.assignments.id)
& (db.assignment_questions.question_id == db.questions.id)
& (db.questions.name == divid)
)
.select(
db.assignments.released, db.assignments.id, db.assignment_questions.points
)
.first()
)
# if there is no assignment_question
# try new way that we store scores and comments
# divid is a question; find question_grades row
result = (
db(
(db.question_grades.sid == auth.user.username)
& (db.question_grades.course_name == auth.user.course_name)
& (db.question_grades.div_id == divid)
)
.select(db.question_grades.score, db.question_grades.comment)
.first()
)
logger.debug(result)
if result:
# say that we're sending back result styles in new version, so they can be processed differently without affecting old way during transition.
ret["version"] = 2
ret["released"] = a_q.assignments.released if a_q else False
if a_q and not a_q.assignments.released:
ret["grade"] = "Not graded yet"
elif a_q and a_q.assignments.released:
ret["grade"] = result.score or "Written Feedback Only"
if a_q and a_q.assignments.released == True:
ret["max"] = a_q.assignment_questions.points
else:
ret["max"] = ""
if result.comment:
ret["comment"] = result.comment
return json.dumps([ret])
def _canonicalize_tz(tstring):
x = re.search(r"\((.*)\)", tstring)
x = x.group(1)
y = x.split()
if len(y) == 1:
return tstring
else:
zstring = "".join([i[0] for i in y])
return re.sub(r"(.*)\((.*)\)", r"\1({})".format(zstring), tstring)
# .. _getAssessResults:
#
# getAssessResults
# ----------------
def getAssessResults():
if not auth.user:
# can't query for user's answers if we don't know who the user is, so just load from local storage
return ""
course = request.vars.course
div_id = request.vars.div_id
event = request.vars.event
if (
verifyInstructorStatus(auth.user.course_name, auth.user) and request.vars.sid
): # retrieving results for grader
sid = request.vars.sid
else:
sid = auth.user.username
# TODO This whole thing is messy - get the deadline from the assignment in the db
if request.vars.deadline:
try:
deadline = parse(_canonicalize_tz(request.vars.deadline))
tzoff = session.timezoneoffset if session.timezoneoffset else 0
deadline = deadline + datetime.timedelta(hours=float(tzoff))
deadline = deadline.replace(tzinfo=None)
except Exception:
logger.error("Bad Timezone - {}".format(request.vars.deadline))
deadline = datetime.datetime.utcnow()
else:
deadline = datetime.datetime.utcnow()
response.headers["content-type"] = "application/json"
# Identify the correct event and query the database so we can load it from the server
if event == "fillb":
rows = (
db(
(db.fitb_answers.div_id == div_id)
& (db.fitb_answers.course_name == course)
& (db.fitb_answers.sid == sid)
)
.select(
db.fitb_answers.answer,
db.fitb_answers.timestamp,
orderby=~db.fitb_answers.id,
)
.first()
)
if not rows:
return "" # server doesn't have it so we load from local storage instead
#
res = {"answer": rows.answer, "timestamp": str(rows.timestamp)}
do_server_feedback, feedback = is_server_feedback(div_id, course)
if do_server_feedback and rows.answer != None:
correct, res_update = fitb_feedback(rows.answer, feedback)
res.update(res_update)
return json.dumps(res)
elif event == "mChoice":
rows = (
db(
(db.mchoice_answers.div_id == div_id)
& (db.mchoice_answers.course_name == course)
& (db.mchoice_answers.sid == sid)
)
.select(
db.mchoice_answers.answer,
db.mchoice_answers.timestamp,
db.mchoice_answers.correct,
orderby=~db.mchoice_answers.id,
)
.first()
)
if not rows:
return ""
res = {
"answer": rows.answer,
"timestamp": str(rows.timestamp),
"correct": rows.correct,
}
return json.dumps(res)
elif event == "dragNdrop":
rows = (
db(
(db.dragndrop_answers.div_id == div_id)
& (db.dragndrop_answers.course_name == course)
& (db.dragndrop_answers.sid == sid)
)
.select(
db.dragndrop_answers.answer,
db.dragndrop_answers.timestamp,
db.dragndrop_answers.correct,
db.dragndrop_answers.min_height,
orderby=~db.dragndrop_answers.id,
)
.first()
)
if not rows:
return ""
res = {
"answer": rows.answer,
"timestamp": str(rows.timestamp),
"correct": rows.correct,
"minHeight": str(rows.min_height),
}
return json.dumps(res)
elif event == "clickableArea":
rows = (
db(
(db.clickablearea_answers.div_id == div_id)
& (db.clickablearea_answers.course_name == course)
& (db.clickablearea_answers.sid == sid)
)
.select(
db.clickablearea_answers.answer,
db.clickablearea_answers.timestamp,
db.clickablearea_answers.correct,
orderby=~db.clickablearea_answers.id,
)
.first()
)
if not rows:
return ""
res = {
"answer": rows.answer,
"timestamp": str(rows.timestamp),
"correct": rows.correct,
}
return json.dumps(res)
elif event == "timedExam":
rows = (
db(
(db.timed_exam.reset == None) # noqa: E711
& (db.timed_exam.div_id == div_id)
& (db.timed_exam.course_name == course)
& (db.timed_exam.sid == sid)
)
.select(
db.timed_exam.correct,
db.timed_exam.incorrect,
db.timed_exam.skipped,
db.timed_exam.time_taken,
db.timed_exam.timestamp,
db.timed_exam.reset,
orderby=~db.timed_exam.id,
)
.first()
)
if not rows:
return ""
res = {
"correct": rows.correct,
"incorrect": rows.incorrect,
"skipped": str(rows.skipped),
"timeTaken": str(rows.time_taken),
"timestamp": str(rows.timestamp),
"reset": str(rows.reset),
}
return json.dumps(res)
elif event == "parsons":
rows = (
db(
(db.parsons_answers.div_id == div_id)
& (db.parsons_answers.course_name == course)
& (db.parsons_answers.sid == sid)
)
.select(
db.parsons_answers.answer,
db.parsons_answers.source,
db.parsons_answers.timestamp,
orderby=~db.parsons_answers.id,
)
.first()
)
if not rows:
return ""
res = {
"answer": rows.answer,
"source": rows.source,
"timestamp": str(rows.timestamp),
}
return json.dumps(res)
elif event == "shortanswer":
logger.debug(f"Getting shortanswer: deadline is {deadline} ")
rows = db(
(db.shortanswer_answers.sid == sid)
& (db.shortanswer_answers.div_id == div_id)
& (db.shortanswer_answers.course_name == course)
).select(orderby=~db.shortanswer_answers.id)
if not rows:
return ""
last_answer = None
if not request.vars.deadline:
row = rows[0]
else:
last_answer = rows[0]
for row in rows:
if row.timestamp <= deadline:
break
if row.timestamp > deadline:
row = None
if row and row == last_answer:
res = {"answer": row.answer, "timestamp": row.timestamp.isoformat()}
else:
if row and row.timestamp <= deadline:
res = {"answer": row.answer, "timestamp": row.timestamp.isoformat()}
else:
res = {
"answer": "",
"timestamp": None,
"last_answer": last_answer.answer,
"last_timestamp": last_answer.timestamp.isoformat(),
}
srow = (
db(
(db.question_grades.sid == sid)
& (db.question_grades.div_id == div_id)
& (db.question_grades.course_name == course)
)
.select()
.first()
)
if srow:
res["score"] = srow.score
res["comment"] = srow.comment
return json.dumps(res)
elif event == "lp_build":
rows = (
db(
(db.lp_answers.div_id == div_id)
& (db.lp_answers.course_name == course)
& (db.lp_answers.sid == sid)
)
.select(
db.lp_answers.answer,
db.lp_answers.timestamp,
db.lp_answers.correct,
orderby=~db.lp_answers.id,
)
.first()
)
if not rows:
return "" # server doesn't have it so we load from local storage instead
answer = json.loads(rows.answer)
correct = rows.correct
return json.dumps(
{"answer": answer, "timestamp": str(rows.timestamp), "correct": correct}
)
def tookTimedAssessment():
if auth.user:
sid = auth.user.username
else:
return json.dumps({"tookAssessment": False})
exam_id = request.vars.div_id
course = request.vars.course_name
rows = (
db(
(db.timed_exam.div_id == exam_id)
& (db.timed_exam.sid == sid)
& (db.timed_exam.course_name == course)
)
.select(orderby=~db.timed_exam.id)
.first()
)
logger.debug(f"checking {exam_id} {sid} {course} {rows}")
if rows:
return json.dumps({"tookAssessment": True})
else:
return json.dumps({"tookAssessment": False})
# The request variable ``code`` must contain JSON-encoded RST to be rendered by Runestone. Only the HTML containing the actual Runestone component will be returned.
def preview_question():
begin = """
.. raw:: html
<begin_directive>
"""
end = """
.. raw:: html
<end_directive>
"""
try:
code = begin + dedent(json.loads(request.vars.code)) + end
with open(
"applications/{}/build/preview/_sources/index.rst".format(
request.application
),
"w",
encoding="utf-8",
) as ixf:
ixf.write(code)
# Note that ``os.environ`` isn't a dict, it's an object whose setter modifies environment variables. So, modifications of a copy/deepcopy still `modify the original environment <https://stackoverflow.com/questions/13142972/using-copy-deepcopy-on-os-environ-in-python-appears-broken>`_. Therefore, convert it to a dict, where modifications will not affect the environment.
env = dict(os.environ)
# Prevent any changes to the database when building a preview question.
env.pop("DBURL", None)
# Run a runestone build.
# We would like to use sys.executable But when we run web2py
# in uwsgi then sys.executable is uwsgi which doesn't work.
# Why not just run runestone?
if "python" not in settings.python_interpreter:
logger.error(f"Error {settings.python_interpreter} is not a valid python")
return json.dumps(
f"Error: settings.python_interpreter must be set to a valid interpreter not {settings.python_interpreter}"
)
popen_obj = subprocess.Popen(
[settings.python_interpreter, "-m", "runestone", "build"],
# The build must be run from the directory containing a ``conf.py`` and all the needed support files.
cwd="applications/{}/build/preview".format(request.application),
# Capture the build output as text in case of an error.
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
# Pass the modified environment which doesn't contain ``DBURL``.
env=env,
)
stdout, stderr = popen_obj.communicate()
# If there was an error, return stdout and stderr from the build.
if popen_obj.returncode != 0:
return json.dumps(
"Error: Runestone build failed:\n\n" + stdout + "\n" + stderr
)
with open(
"applications/{}/build/preview/build/preview/index.html".format(
request.application
),
"r",
encoding="utf-8",
) as ixf:
src = ixf.read()
tree = html.fromstring(src)
if len(tree.cssselect(".runestone")) == 0:
src = ""
result = re.search(
"<begin_directive>(.*)<end_directive>", src, flags=re.DOTALL
)
if result:
ctext = result.group(1)
else:
component = tree.cssselect(".system-message")
if len(component) > 0:
ctext = html.tostring(component[0]).decode("utf-8")
logger.debug("error - ", ctext)
else:
ctext = "Error: Runestone content missing."
return json.dumps(ctext)
except Exception as ex:
return json.dumps("Error: {}".format(ex))
def save_donate():
if auth.user:
db(db.auth_user.id == auth.user.id).update(donated=True)
def did_donate():
if auth.user:
d_status = (
db(db.auth_user.id == auth.user.id).select(db.auth_user.donated).first()
)
return json.dumps(dict(donate=d_status.donated))
return json.dumps(dict(donate=False))
def get_datafile():
"""
course_id - string, the name of the course
acid - the acid of this datafile
"""
course = request.vars.course_id # the course name
the_course = db(db.courses.course_name == course).select(**SELECT_CACHE).first()
acid = request.vars.acid
file_contents = (
db(
(db.source_code.acid == acid)
& (
(db.source_code.course_id == the_course.base_course)
| (db.source_code.course_id == course)
)
)
.select(db.source_code.main_code)
.first()
)
if file_contents:
file_contents = file_contents.main_code
else:
file_contents = None
return json.dumps(dict(data=file_contents))
@auth.requires(
lambda: verifyInstructorStatus(auth.user.course_name, auth.user),
requires_login=True,
)
def broadcast_code():
"""
Callable by an instructor to send the code in their scratch activecode
to all students in the class.
"""
the_course = (
db(db.courses.course_name == auth.user.course_name)
.select(**SELECT_CACHE)
.first()
)
cid = the_course.id
student_list = db(
(db.user_courses.course_id == cid)
& (db.auth_user.id == db.user_courses.user_id)
).select()
shared_code = (
"{} Instructor shared code on {}\n".format(
COMMENT_MAP.get(request.vars.lang, "#"), datetime.datetime.utcnow().date()
)
+ request.vars.code
)
counter = 0
for student in student_list:
if student.auth_user.id == auth.user.id:
continue
sid = student.auth_user.username
try:
db.code.insert(
sid=sid,
acid=request.vars.divid,
code=shared_code,
emessage="",
timestamp=datetime.datetime.utcnow(),
course_id=cid,
language=request.vars.lang,
comment="Instructor shared code",
)
except Exception as e:
logger.error("Failed to insert instructor code! details: {}".format(e))
return json.dumps(dict(mess="failed"))
counter += 1
return json.dumps(dict(mess="success", share_count=counter))
def _same_class(user1: str, user2: str) -> bool:
user1_course = (
db(db.auth_user.username == user1).select(db.auth_user.course_id).first()
)
user2_course = (
db(db.auth_user.username == user2).select(db.auth_user.course_id).first()
)
return user1_course == user2_course
def login_status():
if auth.user:
return json.dumps(dict(status="loggedin", course_name=auth.user.course_name))
else:
return json.dumps(dict(status="loggedout", course_name=auth.user.course_name))
auto_gradable_q = [
"clickablearea",
"mchoice",
"parsonsprob",
"dragndrop",
"fillintheblank",
"quizly",
"khanex",
]
def get_question_source():
"""Called from the selectquestion directive
There are 4 cases:
1. If there is only 1 question in the question list then return the html source for it.
2. If there are multiple questions then choose a question at random
3. If a proficiency is selected then select a random question that tests that proficiency
4. If the question is an AB question then see if this student is an A or a B or assign them to one randomly.
In the last two cases, first check to see if there is a question for this student for this
component that was previously selected.
Returns:
json: html source for this question
"""
prof = False
points = request.vars.points
logger.debug(f"POINTS = {points}")
min_difficulty = request.vars.min_difficulty
max_difficulty = request.vars.max_difficulty
not_seen_ever = request.vars.not_seen_ever
autogradable = request.vars.autogradable
is_primary = request.vars.primary
is_ab = request.vars.AB
selector_id = request.vars["selector_id"]
assignment_name = request.vars["timedWrapper"]
toggle = request.vars["toggleOptions"]
questionlist = []
# If the question has a :points: option then those points are the default
# however sometimes questions are entered in the web ui without the :points:
# and points are assigned in the UI instead. If this is part of an
# assignment or timed exam AND the points are set in the web UI we will
# use the points from the UI over the :points: If this is an assignment
# or exam that is totally written in RST then the points in the UI will match
# the points from the assignment anyway.
if assignment_name:
ui_points = (
db(
(db.assignments.name == assignment_name)
& (db.assignments.id == db.assignment_questions.assignment_id)
& (db.assignment_questions.question_id == db.questions.id)
& (db.questions.name == selector_id)
)
.select(db.assignment_questions.points)
.first()
)
logger.debug(
f"Assignment Points for {assignment_name}, {selector_id} = {ui_points}"
)
if ui_points:
points = ui_points.points
if request.vars["questions"]:
questionlist = request.vars["questions"].split(",")
questionlist = [q.strip() for q in questionlist]
elif request.vars["proficiency"]:
prof = request.vars["proficiency"]
query = (db.competency.competency == prof) & (
db.competency.question == db.questions.id
)
if is_primary:
query = query & (db.competency.is_primary == True)
if min_difficulty:
query = query & (db.questions.difficulty >= float(min_difficulty))
if max_difficulty:
query = query & (db.questions.difficulty <= float(max_difficulty))
if autogradable:
query = query & (
(db.questions.autograde == "unittest")
| db.questions.question_type.contains(auto_gradable_q, all=False)
)
res = db(query).select(db.questions.name)
logger.debug(f"Query was {db._lastsql}")
if res:
questionlist = [row.name for row in res]
else:
questionlist = []
logger.error(f"No questions found for proficiency {prof}")
return json.dumps(f"<p>No Questions found for proficiency: {prof}</p>")
if not auth.user:
# user is not logged in so just give them a random question from questions list
# and be done with it.
if questionlist:
q = random.choice(questionlist)
res = db(db.questions.name == q).select(db.questions.htmlsrc).first()
if res:
return json.dumps(res.htmlsrc)
else:
return json.dumps(f"<p>Question {q} is not in the database.</p>")
else:
return json.dumps(f"<p>No Questions available</p>")
logger.debug(f"is_ab is {is_ab}")
if is_ab:
res = db(
(db.user_experiment.sid == auth.user.username)
& (db.user_experiment.experiment_id == is_ab)
).select(orderby=db.user_experiment.id)
if not res:
exp_group = random.randrange(2)
db.user_experiment.insert(
sid=auth.user.username, experiment_id=is_ab, exp_group=exp_group
)
logger.debug(f"added {auth.user.username} to {is_ab} group {exp_group}")
else:
exp_group = res[0].exp_group
logger.debug(f"experimental group is {exp_group}")
prev_selection = (
db(
(db.selected_questions.sid == auth.user.username)
& (db.selected_questions.selector_id == selector_id)
)
.select()
.first()
)
if prev_selection:
questionid = prev_selection.selected_id
else:
questionid = questionlist[exp_group]
if not is_ab:
poss = set()
if not_seen_ever:
seenq = db(
(db.useinfo.sid == auth.user.username)
& (db.useinfo.div_id.contains(questionlist, all=False))
).select(db.useinfo.div_id)
seen = set([x.div_id for x in seenq])
poss = set(questionlist)
questionlist = list(poss - seen)
if len(questionlist) == 0 and len(poss) > 0:
questionlist = list(poss)
htmlsrc = ""
prev_selection = (
db(
(db.selected_questions.sid == auth.user.username)
& (db.selected_questions.selector_id == selector_id)
)
.select()
.first()
)
if prev_selection:
questionid = prev_selection.selected_id
else:
# Eliminate any previous exam questions for this student
prev_questions = db(db.selected_questions.sid == auth.user.username).select(
db.selected_questions.selected_id
)
prev_questions = set([row.selected_id for row in prev_questions])
possible = set(questionlist)
questionlist = list(possible - prev_questions)
if questionlist:
questionid = random.choice(questionlist)
else:
# If there are no questions left we should still return a random question.
questionid = random.choice(list(possible))
logger.debug(f"toggle is {toggle}")
if toggle:
prev_selection = (
db(
(db.selected_questions.sid == auth.user.username)
& (db.selected_questions.selector_id == selector_id)
)
.select()
.first()
)
if prev_selection:
questionid = prev_selection.selected_id
else:
questionid = request.vars["questions"].split(",")[0]
# else:
# logger.error(
# f"Question ID '{questionid}' not found in select question list of '{selector_id}'."
# )
# return json.dumps(
# f"<p>Question ID '{questionid}' not found in select question list of '{selector_id}'.</p>"
# )
res = db((db.questions.name == questionid)).select(db.questions.htmlsrc).first()
if res and not prev_selection:
qid = db.selected_questions.insert(
selector_id=selector_id,
sid=auth.user.username,
selected_id=questionid,
points=points,
)
if not qid:
logger.error(
f"Failed to insert a selected question for {selector_id} and {auth.user.username}"
)
else:
logger.debug(
f"Did not insert a record for {selector_id}, {questionid} Conditions are {res} QL: {questionlist} PREV: {prev_selection}"
)
if res and res.htmlsrc:
htmlsrc = res.htmlsrc
else:
logger.error(
f"HTML Source not found for {questionid} in course {auth.user.course_name} for {auth.user.username}"
)
htmlsrc = "<p>No preview available</p>"
return json.dumps(htmlsrc)
@auth.requires_login()
def update_selected_question():
"""
This endpoint is used by the selectquestion problems that allow the
student to select the problem they work on. For example they may have
a programming problem that can be solved with writing code, or they
can switch to a parsons problem if necessary.
Caller must provide:
* ``metaid`` -- the id of the selectquestion
* ``selected`` -- the id of the real question chosen by the student
"""
sid = auth.user.username
selector_id = request.vars.metaid
selected_id = request.vars.selected
logger.debug(f"USQ - {selector_id} --> {selected_id} for {sid}")
db.selected_questions.update_or_insert(
(db.selected_questions.selector_id == selector_id)
& (db.selected_questions.sid == sid),
selected_id=selected_id,
selector_id=selector_id,
sid=sid,
)
| 34.340325
| 379
| 0.554873
|
aafc8d2e72cc52f6f98aa56679df42389f794787
| 7,529
|
py
|
Python
|
src/biokbase/narrative/tests/test_job.py
|
Tianhao-Gu/narrative-jupyterlab
|
94a4b4a6bbb583f65ce50c8f8343083aceafff05
|
[
"MIT"
] | 2
|
2019-05-03T10:12:56.000Z
|
2020-10-26T05:35:16.000Z
|
src/biokbase/narrative/tests/test_job.py
|
Tianhao-Gu/narrative-jupyterlab
|
94a4b4a6bbb583f65ce50c8f8343083aceafff05
|
[
"MIT"
] | 9
|
2019-05-19T04:13:55.000Z
|
2022-03-23T19:18:44.000Z
|
src/biokbase/narrative/tests/test_job.py
|
Tianhao-Gu/narrative-jupyterlab
|
94a4b4a6bbb583f65ce50c8f8343083aceafff05
|
[
"MIT"
] | 2
|
2019-03-12T17:41:10.000Z
|
2019-04-24T15:33:50.000Z
|
import unittest
import mock
import mock
import biokbase.narrative.jobs.jobmanager
from biokbase.narrative.jobs.job import Job
from .util import TestConfig
import os
from IPython.display import (
HTML,
Javascript
)
from .narrative_mock.mockclients import get_mock_client
from .narrative_mock.mockcomm import MockComm
from contextlib import contextmanager
from io import StringIO
import sys
@contextmanager
def capture_stdout():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
config = TestConfig()
test_jobs = config.load_json_file(config.get('jobs', 'job_info_file'))
class JobTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
info = test_jobs["job_info"][0]
cls.job_id = info[0]
param_info = test_jobs["job_param_info"][cls.job_id]
cls.app_id = param_info["app_id"]
cls.app_tag = param_info.get("meta", {}).get("tag", "dev")
cls.app_version = param_info.get("service_ver", "0.0.1")
cls.cell_id = info[10]["cell_id"]
cls.run_id = info[10]["run_id"]
cls.inputs = param_info["params"]
cls.owner = info[2]
cls.token_id = "temp_token"
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def _mocked_job(self, with_version=True, with_cell_id=True, with_run_id=True, with_token_id=True):
kwargs = dict()
if with_version:
kwargs["app_version"] = self.app_version
if with_cell_id:
kwargs["cell_id"] = self.cell_id
if with_run_id:
kwargs["run_id"] = self.run_id
if with_token_id:
kwargs["token_id"] = self.token_id
job = Job(self.job_id, self.app_id, self.inputs, self.owner, tag=self.app_tag, **kwargs)
return job
def test_job_init(self):
job = self._mocked_job()
self.assertEqual(job.job_id, self.job_id)
self.assertEqual(job.app_id, self.app_id)
self.assertEqual(job.inputs, self.inputs)
self.assertEqual(job.owner, self.owner)
self.assertEqual(job.tag, self.app_tag)
self.assertEqual(job.app_version, self.app_version)
self.assertEqual(job.cell_id, self.cell_id)
self.assertEqual(job.run_id, self.run_id)
self.assertEqual(job.token_id, self.token_id)
def test_job_from_state(self):
job_info = {
"params": self.inputs,
"service_ver": self.app_version
}
job = Job.from_state(self.job_id, job_info, self.owner, self.app_id, tag=self.app_tag,
cell_id=self.cell_id, run_id=self.run_id, token_id=self.token_id)
self.assertEqual(job.job_id, self.job_id)
self.assertEqual(job.app_id, self.app_id)
self.assertEqual(job.inputs, self.inputs)
self.assertEqual(job.owner, self.owner)
self.assertEqual(job.tag, self.app_tag)
self.assertEqual(job.app_version, self.app_version)
self.assertEqual(job.cell_id, self.cell_id)
self.assertEqual(job.run_id, self.run_id)
self.assertEqual(job.token_id, self.token_id)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_job_info(self):
job = self._mocked_job()
info_str = "App name (id): Test Editor\nVersion: 0.0.1\nStatus: completed\nInputs:\n------\n["
with capture_stdout() as (out, err):
job.info()
self.assertIn(info_str, out.getvalue().strip())
def test_repr(self):
job = self._mocked_job()
job_str = job.__repr__()
self.assertIn(job.job_id, job_str)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_repr_js(self):
job = self._mocked_job()
js_out = job._repr_javascript_()
self.assertIsInstance(js_out, str)
# spot check to make sure the core pieces are present. needs the element.html part, job_id, and widget
self.assertIn("element.html", js_out)
self.assertIn(job.job_id, js_out)
self.assertIn("kbaseNarrativeJobStatus", js_out)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_job_finished(self):
job = self._mocked_job()
self.assertTrue(job.is_finished())
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_state(self):
job = self._mocked_job()
state = job.state()
self.assertEqual(state['job_id'], job.job_id)
self.assertIn('status', state)
self.assertIn('canceled', state)
self.assertIn('job_state', state)
# to do - add a test to only fetch from _last_state if it's populated and in a final state
job.state()
job.job_id = "not_a_job_id"
job._last_state = None # force it to look up.
with self.assertRaises(Exception) as e:
job.state()
self.assertIn("Unable to fetch info for job", str(e.exception))
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_show_output_widget(self):
job = self._mocked_job()
out_widget = job.show_output_widget()
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_log(self):
# Things set up by the mock:
# 1. There's 100 total log lines
# 2. Each line has its line number embedded in it
total_lines = 100
job = self._mocked_job()
logs = job.log()
# we know there's 100 lines total, so roll with it that way.
self.assertEqual(logs[0], total_lines)
self.assertEqual(len(logs[1]), total_lines)
for i in range(len(logs[1])):
line = logs[1][i]
self.assertIn("is_error", line)
self.assertIn("line", line)
self.assertIn(str(i), line["line"])
# grab the last half
offset = 50
logs = job.log(first_line=offset)
self.assertEqual(logs[0], total_lines)
self.assertEqual(len(logs[1]), offset)
for i in range(total_lines - offset):
self.assertIn(str(i+offset), logs[1][i]["line"])
# grab a bite from the middle
num_fetch = 20
logs = job.log(first_line=offset, num_lines=num_fetch)
self.assertEqual(logs[0], total_lines)
self.assertEqual(len(logs[1]), num_fetch)
for i in range(num_fetch):
self.assertIn(str(i+offset), logs[1][i]["line"])
# should normalize negative numbers properly
logs = job.log(first_line=-5)
self.assertEqual(logs[0], total_lines)
self.assertEqual(len(logs[1]), total_lines)
logs = job.log(num_lines=-5)
self.assertEqual(logs[0], total_lines)
self.assertEqual(len(logs[1]), 0)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_parameters(self):
job = self._mocked_job()
params = job.parameters()
self.assertIsNotNone(params)
job.inputs = None
params2 = job.parameters()
self.assertIsNotNone(params2)
self.assertEqual(params, params2)
job.job_id = "not_a_job_id"
job.inputs = None
with self.assertRaises(Exception) as e:
job.parameters()
self.assertIn("Unable to fetch parameters for job", str(e.exception))
| 38.413265
| 110
| 0.64298
|
32e11dbb3067ab2d75bfea04e54babba970f55fd
| 662
|
py
|
Python
|
src/web/server.py
|
topinfrassi01/Cumulus
|
13ec845f8e979653a51f9fe5f424c81923fffd92
|
[
"Apache-2.0"
] | null | null | null |
src/web/server.py
|
topinfrassi01/Cumulus
|
13ec845f8e979653a51f9fe5f424c81923fffd92
|
[
"Apache-2.0"
] | null | null | null |
src/web/server.py
|
topinfrassi01/Cumulus
|
13ec845f8e979653a51f9fe5f424c81923fffd92
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, render_template, request
from services.get_keywords_for_date import get_keywords_for_date
from services.get_articles import get_articles
from datetime import date
from urllib import parse
import json
app = Flask(__name__)
@app.route('/')
def root():
keywords = get_keywords_for_date((date(2017, 2, 3),))
return render_template('index.html', keywords=keywords[0:30])
@app.route('/keyword')
@app.route('/keyword/ids=<ids>')
def keyword(ids):
ids = json.loads(ids[1:-1])
articles = get_articles(ids)
return render_template("keywords.html", articles=articles)
if __name__ == '__main__':
app.run(debug=False)
| 22.827586
| 65
| 0.73716
|
143812cd34df00be8eb35c4da9070c598812ff8a
| 68,091
|
py
|
Python
|
kleister/api/user_api.py
|
kleister/kleister-python
|
321120b96db59e20b30853b44af3bec6b667db05
|
[
"Apache-2.0"
] | null | null | null |
kleister/api/user_api.py
|
kleister/kleister-python
|
321120b96db59e20b30853b44af3bec6b667db05
|
[
"Apache-2.0"
] | 1
|
2018-03-31T12:33:37.000Z
|
2018-03-31T12:33:37.000Z
|
kleister/api/user_api.py
|
kleister/kleister-python
|
321120b96db59e20b30853b44af3bec6b667db05
|
[
"Apache-2.0"
] | null | null | null |
"""
Kleister OpenAPI
API definition for Kleister, manage mod packs for Minecraft # noqa: E501
The version of the OpenAPI document: 1.0.0-alpha1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from kleister.api_client import ApiClient, Endpoint as _Endpoint
from kleister.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types,
)
from kleister.model.general_error import GeneralError
from kleister.model.team_user import TeamUser
from kleister.model.user import User
from kleister.model.user_mod import UserMod
from kleister.model.user_mod_params import UserModParams
from kleister.model.user_pack import UserPack
from kleister.model.user_pack_params import UserPackParams
from kleister.model.user_team_params import UserTeamParams
from kleister.model.validation_error import ValidationError
class UserApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __append_user_to_mod(self, user_id, user_mod, **kwargs):
"""Assign a mod to user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.append_user_to_mod(user_id, user_mod, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
user_mod (UserModParams): The user mod data to assign
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeneralError
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
kwargs["user_mod"] = user_mod
return self.call_with_http_info(**kwargs)
self.append_user_to_mod = _Endpoint(
settings={
"response_type": (GeneralError,),
"auth": [],
"endpoint_path": "/users/{user_id}/mods",
"operation_id": "append_user_to_mod",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"user_id",
"user_mod",
],
"required": [
"user_id",
"user_mod",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
"user_mod": (UserModParams,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
"user_mod": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__append_user_to_mod,
)
def __append_user_to_pack(self, user_id, user_pack, **kwargs):
"""Assign a pack to user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.append_user_to_pack(user_id, user_pack, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
user_pack (UserPackParams): The user pack data to assign
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeneralError
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
kwargs["user_pack"] = user_pack
return self.call_with_http_info(**kwargs)
self.append_user_to_pack = _Endpoint(
settings={
"response_type": (GeneralError,),
"auth": [],
"endpoint_path": "/users/{user_id}/packs",
"operation_id": "append_user_to_pack",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"user_id",
"user_pack",
],
"required": [
"user_id",
"user_pack",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
"user_pack": (UserPackParams,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
"user_pack": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__append_user_to_pack,
)
def __append_user_to_team(self, user_id, user_team, **kwargs):
"""Assign a team to user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.append_user_to_team(user_id, user_team, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
user_team (UserTeamParams): The user team data to assign
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeneralError
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
kwargs["user_team"] = user_team
return self.call_with_http_info(**kwargs)
self.append_user_to_team = _Endpoint(
settings={
"response_type": (GeneralError,),
"auth": [],
"endpoint_path": "/users/{user_id}/teams",
"operation_id": "append_user_to_team",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"user_id",
"user_team",
],
"required": [
"user_id",
"user_team",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
"user_team": (UserTeamParams,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
"user_team": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__append_user_to_team,
)
def __create_user(self, user, **kwargs):
"""Create a new user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_user(user, async_req=True)
>>> result = thread.get()
Args:
user (User): The user data to create
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
User
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user"] = user
return self.call_with_http_info(**kwargs)
self.create_user = _Endpoint(
settings={
"response_type": (User,),
"auth": [],
"endpoint_path": "/users",
"operation_id": "create_user",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"user",
],
"required": [
"user",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user": (User,),
},
"attribute_map": {},
"location_map": {
"user": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__create_user,
)
def __delete_user(self, user_id, **kwargs):
"""Delete a specific user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user(user_id, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeneralError
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
return self.call_with_http_info(**kwargs)
self.delete_user = _Endpoint(
settings={
"response_type": (GeneralError,),
"auth": [],
"endpoint_path": "/users/{user_id}",
"operation_id": "delete_user",
"http_method": "DELETE",
"servers": None,
},
params_map={
"all": [
"user_id",
],
"required": [
"user_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
callable=__delete_user,
)
def __delete_user_from_mod(self, user_id, user_mod, **kwargs):
"""Remove a mod from user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user_from_mod(user_id, user_mod, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
user_mod (UserModParams): The user mod data to delete
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeneralError
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
kwargs["user_mod"] = user_mod
return self.call_with_http_info(**kwargs)
self.delete_user_from_mod = _Endpoint(
settings={
"response_type": (GeneralError,),
"auth": [],
"endpoint_path": "/users/{user_id}/mods",
"operation_id": "delete_user_from_mod",
"http_method": "DELETE",
"servers": None,
},
params_map={
"all": [
"user_id",
"user_mod",
],
"required": [
"user_id",
"user_mod",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
"user_mod": (UserModParams,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
"user_mod": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__delete_user_from_mod,
)
def __delete_user_from_pack(self, user_id, user_pack, **kwargs):
"""Remove a pack from user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user_from_pack(user_id, user_pack, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
user_pack (UserPackParams): The user pack data to delete
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeneralError
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
kwargs["user_pack"] = user_pack
return self.call_with_http_info(**kwargs)
self.delete_user_from_pack = _Endpoint(
settings={
"response_type": (GeneralError,),
"auth": [],
"endpoint_path": "/users/{user_id}/packs",
"operation_id": "delete_user_from_pack",
"http_method": "DELETE",
"servers": None,
},
params_map={
"all": [
"user_id",
"user_pack",
],
"required": [
"user_id",
"user_pack",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
"user_pack": (UserPackParams,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
"user_pack": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__delete_user_from_pack,
)
def __delete_user_from_team(self, user_id, user_team, **kwargs):
"""Remove a team from user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_user_from_team(user_id, user_team, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
user_team (UserTeamParams): The user team data to delete
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeneralError
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
kwargs["user_team"] = user_team
return self.call_with_http_info(**kwargs)
self.delete_user_from_team = _Endpoint(
settings={
"response_type": (GeneralError,),
"auth": [],
"endpoint_path": "/users/{user_id}/teams",
"operation_id": "delete_user_from_team",
"http_method": "DELETE",
"servers": None,
},
params_map={
"all": [
"user_id",
"user_team",
],
"required": [
"user_id",
"user_team",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
"user_team": (UserTeamParams,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
"user_team": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__delete_user_from_team,
)
def __list_user_mods(self, user_id, **kwargs):
"""Fetch all mods assigned to user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_user_mods(user_id, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[UserMod]
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
return self.call_with_http_info(**kwargs)
self.list_user_mods = _Endpoint(
settings={
"response_type": ([UserMod],),
"auth": [],
"endpoint_path": "/users/{user_id}/mods",
"operation_id": "list_user_mods",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"user_id",
],
"required": [
"user_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
callable=__list_user_mods,
)
def __list_user_packs(self, user_id, **kwargs):
"""Fetch all packs assigned to user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_user_packs(user_id, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[UserPack]
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
return self.call_with_http_info(**kwargs)
self.list_user_packs = _Endpoint(
settings={
"response_type": ([UserPack],),
"auth": [],
"endpoint_path": "/users/{user_id}/packs",
"operation_id": "list_user_packs",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"user_id",
],
"required": [
"user_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
callable=__list_user_packs,
)
def __list_user_teams(self, user_id, **kwargs):
"""Fetch all teams assigned to user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_user_teams(user_id, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[TeamUser]
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
return self.call_with_http_info(**kwargs)
self.list_user_teams = _Endpoint(
settings={
"response_type": ([TeamUser],),
"auth": [],
"endpoint_path": "/users/{user_id}/teams",
"operation_id": "list_user_teams",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"user_id",
],
"required": [
"user_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
callable=__list_user_teams,
)
def __list_users(self, **kwargs):
"""Fetch all available users # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_users(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[User]
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.call_with_http_info(**kwargs)
self.list_users = _Endpoint(
settings={
"response_type": ([User],),
"auth": [],
"endpoint_path": "/users",
"operation_id": "list_users",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {},
"attribute_map": {},
"location_map": {},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
callable=__list_users,
)
def __permit_user_mod(self, user_id, user_mod, **kwargs):
"""Update mod perms for user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.permit_user_mod(user_id, user_mod, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
user_mod (UserModParams): The user mod data to update
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeneralError
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
kwargs["user_mod"] = user_mod
return self.call_with_http_info(**kwargs)
self.permit_user_mod = _Endpoint(
settings={
"response_type": (GeneralError,),
"auth": [],
"endpoint_path": "/users/{user_id}/mods",
"operation_id": "permit_user_mod",
"http_method": "PUT",
"servers": None,
},
params_map={
"all": [
"user_id",
"user_mod",
],
"required": [
"user_id",
"user_mod",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
"user_mod": (UserModParams,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
"user_mod": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__permit_user_mod,
)
def __permit_user_pack(self, user_id, user_pack, **kwargs):
"""Update pack perms for user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.permit_user_pack(user_id, user_pack, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
user_pack (UserPackParams): The user pack data to update
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeneralError
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
kwargs["user_pack"] = user_pack
return self.call_with_http_info(**kwargs)
self.permit_user_pack = _Endpoint(
settings={
"response_type": (GeneralError,),
"auth": [],
"endpoint_path": "/users/{user_id}/packs",
"operation_id": "permit_user_pack",
"http_method": "PUT",
"servers": None,
},
params_map={
"all": [
"user_id",
"user_pack",
],
"required": [
"user_id",
"user_pack",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
"user_pack": (UserPackParams,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
"user_pack": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__permit_user_pack,
)
def __permit_user_team(self, user_id, user_team, **kwargs):
"""Update team perms for user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.permit_user_team(user_id, user_team, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
user_team (UserTeamParams): The user team data to update
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GeneralError
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
kwargs["user_team"] = user_team
return self.call_with_http_info(**kwargs)
self.permit_user_team = _Endpoint(
settings={
"response_type": (GeneralError,),
"auth": [],
"endpoint_path": "/users/{user_id}/teams",
"operation_id": "permit_user_team",
"http_method": "PUT",
"servers": None,
},
params_map={
"all": [
"user_id",
"user_team",
],
"required": [
"user_id",
"user_team",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
"user_team": (UserTeamParams,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
"user_team": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__permit_user_team,
)
def __show_user(self, user_id, **kwargs):
"""Fetch a specific user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.show_user(user_id, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
User
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
return self.call_with_http_info(**kwargs)
self.show_user = _Endpoint(
settings={
"response_type": (User,),
"auth": [],
"endpoint_path": "/users/{user_id}",
"operation_id": "show_user",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"user_id",
],
"required": [
"user_id",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
callable=__show_user,
)
def __update_user(self, user_id, user, **kwargs):
"""Update a specific user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(user_id, user, async_req=True)
>>> result = thread.get()
Args:
user_id (str): A user UUID or slug
user (User): The user data to update
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
User
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["user_id"] = user_id
kwargs["user"] = user
return self.call_with_http_info(**kwargs)
self.update_user = _Endpoint(
settings={
"response_type": (User,),
"auth": [],
"endpoint_path": "/users/{user_id}",
"operation_id": "update_user",
"http_method": "PUT",
"servers": None,
},
params_map={
"all": [
"user_id",
"user",
],
"required": [
"user_id",
"user",
],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"user_id": (str,),
"user": (User,),
},
"attribute_map": {
"user_id": "user_id",
},
"location_map": {
"user_id": "path",
"user": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
callable=__update_user,
)
| 41.142598
| 88
| 0.494397
|
2ad41f64a55f8393362e82a696f036bfa0f4d399
| 15,502
|
py
|
Python
|
python/paddle/nn/__init__.py
|
wangwin/Paddle
|
b7d185d6caf78630d228dfcb90750a21d637583d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/nn/__init__.py
|
wangwin/Paddle
|
b7d185d6caf78630d228dfcb90750a21d637583d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/nn/__init__.py
|
wangwin/Paddle
|
b7d185d6caf78630d228dfcb90750a21d637583d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: import all neural network related api under this directory,
# including layers, linear, conv, rnn etc.
# __all__ = []
from .layer import norm
__all__ = []
__all__ += norm.__all__
# TODO: define alias in nn directory
# from .clip import ErrorClipByValue #DEFINE_ALIAS
# from .clip import GradientClipByGlobalNorm #DEFINE_ALIAS
# from .clip import GradientClipByNorm #DEFINE_ALIAS
# from .clip import GradientClipByValue #DEFINE_ALIAS
# from .clip import set_gradient_clip #DEFINE_ALIAS
# from .clip import clip #DEFINE_ALIAS
# from .clip import clip_by_norm #DEFINE_ALIAS
# from .initalizer import Bilinear #DEFINE_ALIAS
# from .initalizer import Constant #DEFINE_ALIAS
# from .initalizer import MSRA #DEFINE_ALIAS
# from .initalizer import Normal #DEFINE_ALIAS
# from .initalizer import TruncatedNormal #DEFINE_ALIAS
# from .initalizer import Uniform #DEFINE_ALIAS
# from .initalizer import Xavier #DEFINE_ALIAS
# from .decode import BeamSearchDecoder #DEFINE_ALIAS
# from .decode import Decoder #DEFINE_ALIAS
# from .decode import beam_search #DEFINE_ALIAS
# from .decode import beam_search_decode #DEFINE_ALIAS
# from .decode import crf_decoding #DEFINE_ALIAS
# from .decode import ctc_greedy_decoder #DEFINE_ALIAS
# from .decode import dynamic_decode #DEFINE_ALIAS
# from .decode import gather_tree #DEFINE_ALIAS
# from .bin.conv import 0 #DEFINE_ALIAS
# from .control_flow import case #DEFINE_ALIAS
# from .control_flow import cond #DEFINE_ALIAS
# from .control_flow import DynamicRNN #DEFINE_ALIAS
# from .control_flow import StaticRNN #DEFINE_ALIAS
# from .control_flow import switch_case #DEFINE_ALIAS
# from .control_flow import while_loop #DEFINE_ALIAS
# from .control_flow import rnn #DEFINE_ALIAS
# from .layer.conv import Conv2D #DEFINE_ALIAS
# from .layer.conv import Conv2DTranspose #DEFINE_ALIAS
# from .layer.conv import Conv3D #DEFINE_ALIAS
# from .layer.conv import Conv3DTranspose #DEFINE_ALIAS
# from .layer.conv import TreeConv #DEFINE_ALIAS
# from .layer.conv import Conv1D #DEFINE_ALIAS
# from .layer.loss import NCELoss #DEFINE_ALIAS
from .layer.loss import CrossEntropyLoss #DEFINE_ALIAS
# from .layer.loss import MSELoss #DEFINE_ALIAS
from .layer.loss import L1Loss #DEFINE_ALIAS
from .layer import loss #DEFINE_ALIAS
from .layer import conv #DEFINE_ALIAS
from .layer.conv import Conv2D, Conv2DTranspose, Conv3D, Conv3DTranspose #DEFINE_ALIAS
from .layer.loss import NLLLoss #DEFINE_ALIAS
from .layer.loss import BCELoss #DEFINE_ALIAS
# from .layer.learning_rate import CosineDecay #DEFINE_ALIAS
# from .layer.learning_rate import ExponentialDecay #DEFINE_ALIAS
# from .layer.learning_rate import InverseTimeDecay #DEFINE_ALIAS
# from .layer.learning_rate import NaturalExpDecay #DEFINE_ALIAS
# from .layer.learning_rate import NoamDecay #DEFINE_ALIAS
# from .layer.learning_rate import PiecewiseDecay #DEFINE_ALIAS
# from .layer.learning_rate import PolynomialDecay #DEFINE_ALIAS
# from .layer.transformer import #DEFINE_ALIAS
# from .layer.norm import BatchNorm #DEFINE_ALIAS
# from .layer.norm import GroupNorm #DEFINE_ALIAS
# from .layer.norm import LayerNorm #DEFINE_ALIAS
from .layer.norm import InstanceNorm #DEFINE_ALIAS
# from .layer.norm import SpectralNorm #DEFINE_ALIAS
from .layer.activation import HSigmoid #DEFINE_ALIAS
# from .layer.activation import PReLU #DEFINE_ALIAS
from .layer.activation import ReLU #DEFINE_ALIAS
from .layer.activation import Sigmoid #DEFINE_ALIAS
# from .layer.activation import Softmax #DEFINE_ALIAS
# from .layer.activation import LogSoftmax #DEFINE_ALIAS
from .layer.extension import RowConv #DEFINE_ALIAS
from .layer.activation import LogSoftmax #DEFINE_ALIAS
# from .layer.rnn import RNNCell #DEFINE_ALIAS
# from .layer.rnn import GRUCell #DEFINE_ALIAS
# from .layer.rnn import LSTMCell #DEFINE_ALIAS
# from .layer.common import BilinearTensorProduct #DEFINE_ALIAS
# from .layer.common import Pool2D #DEFINE_ALIAS
# from .layer.common import Embedding #DEFINE_ALIAS
# from .layer.common import Linear #DEFINE_ALIAS
# from .layer.common import UpSample #DEFINE_ALIAS
from .functional.conv import conv2d #DEFINE_ALIAS
from .functional.conv import conv2d_transpose #DEFINE_ALIAS
from .functional.conv import conv3d #DEFINE_ALIAS
from .functional.conv import conv3d_transpose #DEFINE_ALIAS
# from .functional.loss import bpr_loss #DEFINE_ALIAS
# from .functional.loss import center_loss #DEFINE_ALIAS
# from .functional.loss import cross_entropy #DEFINE_ALIAS
# from .functional.loss import dice_loss #DEFINE_ALIAS
# from .functional.loss import edit_distance #DEFINE_ALIAS
# from .functional.loss import huber_loss #DEFINE_ALIAS
# from .functional.loss import iou_similarity #DEFINE_ALIAS
# from .functional.loss import kldiv_loss #DEFINE_ALIAS
# from .functional.loss import log_loss #DEFINE_ALIAS
# from .functional.loss import margin_rank_loss #DEFINE_ALIAS
# from .functional.loss import mse_loss #DEFINE_ALIAS
# from .functional.loss import nce #DEFINE_ALIAS
# from .functional.loss import npair_loss #DEFINE_ALIAS
# from .functional.loss import rank_loss #DEFINE_ALIAS
# from .functional.loss import sampled_softmax_with_cross_entropy #DEFINE_ALIAS
# from .functional.loss import sigmoid_cross_entropy_with_logits #DEFINE_ALIAS
# from .functional.loss import sigmoid_focal_loss #DEFINE_ALIAS
# from .functional.loss import smooth_l1 #DEFINE_ALIAS
# from .functional.loss import softmax_with_cross_entropy #DEFINE_ALIAS
# from .functional.loss import square_error_cost #DEFINE_ALIAS
# from .functional.loss import ssd_loss #DEFINE_ALIAS
# from .functional.loss import teacher_student_sigmoid_loss #DEFINE_ALIAS
# from .functional.learning_rate import cosine_decay #DEFINE_ALIAS
# from .functional.learning_rate import exponential_decay #DEFINE_ALIAS
# from .functional.learning_rate import inverse_time_decay #DEFINE_ALIAS
# from .functional.learning_rate import natural_exp_decay #DEFINE_ALIAS
# from .functional.learning_rate import noam_decay #DEFINE_ALIAS
# from .functional.learning_rate import piecewise_decay #DEFINE_ALIAS
# from .functional.learning_rate import polynomial_decay #DEFINE_ALIAS
# from .functional.learning_rate import linear_lr_warmup #DEFINE_ALIAS
# from .functional.transformer import #DEFINE_ALIAS
# from .functional.pooling import pool2d #DEFINE_ALIAS
# from .functional.pooling import pool3d #DEFINE_ALIAS
# from .functional.pooling import adaptive_pool2d #DEFINE_ALIAS
# from .functional.pooling import adaptive_pool3d #DEFINE_ALIAS
# from .functional.norm import batch_norm #DEFINE_ALIAS
# from .functional.norm import data_norm #DEFINE_ALIAS
# from .functional.norm import group_norm #DEFINE_ALIAS
# from .functional.norm import instance_norm #DEFINE_ALIAS
# from .functional.norm import l2_normalize #DEFINE_ALIAS
# from .functional.norm import layer_norm #DEFINE_ALIAS
# from .functional.norm import lrn #DEFINE_ALIAS
# from .functional.norm import spectral_norm #DEFINE_ALIAS
# from .functional.vision import affine_channel #DEFINE_ALIAS
# from .functional.vision import affine_grid #DEFINE_ALIAS
# from .functional.vision import anchor_generator #DEFINE_ALIAS
# from .functional.vision import bipartite_match #DEFINE_ALIAS
# from .functional.vision import box_clip #DEFINE_ALIAS
# from .functional.vision import box_coder #DEFINE_ALIAS
# from .functional.vision import box_decoder_and_assign #DEFINE_ALIAS
# from .functional.vision import collect_fpn_proposals #DEFINE_ALIAS
# from .functional.vision import deformable_conv #DEFINE_ALIAS
# from .functional.vision import deformable_roi_pooling #DEFINE_ALIAS
# from .functional.vision import density_prior_box #DEFINE_ALIAS
# from .functional.vision import detection_output #DEFINE_ALIAS
# from .functional.vision import distribute_fpn_proposals #DEFINE_ALIAS
# from .functional.vision import fsp_matrix #DEFINE_ALIAS
# from .functional.vision import generate_mask_labels #DEFINE_ALIAS
# from .functional.vision import generate_proposal_labels #DEFINE_ALIAS
# from .functional.vision import generate_proposals #DEFINE_ALIAS
# from .functional.vision import grid_sampler #DEFINE_ALIAS
# from .functional.vision import image_resize #DEFINE_ALIAS
# from .functional.vision import image_resize_short #DEFINE_ALIAS
# from .functional.vision import multi_box_head #DEFINE_ALIAS
# from .functional.vision import pixel_shuffle #DEFINE_ALIAS
# from .functional.vision import prior_box #DEFINE_ALIAS
# from .functional.vision import prroi_pool #DEFINE_ALIAS
# from .functional.vision import psroi_pool #DEFINE_ALIAS
# from .functional.vision import resize_bilinear #DEFINE_ALIAS
# from .functional.vision import resize_nearest #DEFINE_ALIAS
# from .functional.vision import resize_trilinear #DEFINE_ALIAS
# from .functional.vision import retinanet_detection_output #DEFINE_ALIAS
# from .functional.vision import retinanet_target_assign #DEFINE_ALIAS
# from .functional.vision import roi_align #DEFINE_ALIAS
# from .functional.vision import roi_perspective_transform #DEFINE_ALIAS
# from .functional.vision import roi_pool #DEFINE_ALIAS
# from .functional.vision import shuffle_channel #DEFINE_ALIAS
# from .functional.vision import space_to_depth #DEFINE_ALIAS
# from .functional.vision import yolo_box #DEFINE_ALIAS
# from .functional.vision import yolov3_loss #DEFINE_ALIAS
# from .functional.activation import brelu #DEFINE_ALIAS
# from .functional.activation import elu #DEFINE_ALIAS
# from .functional.activation import erf #DEFINE_ALIAS
# from .functional.activation import gelu #DEFINE_ALIAS
# from .functional.activation import hard_shrink #DEFINE_ALIAS
# from .functional.activation import hard_sigmoid #DEFINE_ALIAS
# from .functional.activation import hard_swish #DEFINE_ALIAS
from .functional.activation import hsigmoid #DEFINE_ALIAS
# from .functional.activation import leaky_relu #DEFINE_ALIAS
# from .functional.activation import logsigmoid #DEFINE_ALIAS
# from .functional.activation import maxout #DEFINE_ALIAS
# from .functional.activation import prelu #DEFINE_ALIAS
from .functional.activation import relu #DEFINE_ALIAS
# from .functional.activation import relu6 #DEFINE_ALIAS
# from .functional.activation import selu #DEFINE_ALIAS
from .functional.activation import sigmoid #DEFINE_ALIAS
# from .functional.activation import soft_relu #DEFINE_ALIAS
# from .functional.activation import softmax #DEFINE_ALIAS
# from .functional.activation import softplus #DEFINE_ALIAS
# from .functional.activation import softshrink #DEFINE_ALIAS
# from .functional.activation import softsign #DEFINE_ALIAS
# from .functional.activation import swish #DEFINE_ALIAS
# from .functional.activation import tanh_shrink #DEFINE_ALIAS
# from .functional.activation import thresholded_relu #DEFINE_ALIAS
from .functional.activation import log_softmax #DEFINE_ALIAS
# from .functional.extension import add_position_encoding #DEFINE_ALIAS
# from .functional.extension import autoincreased_step_counter #DEFINE_ALIAS
# from .functional.extension import continuous_value_model #DEFINE_ALIAS
# from .functional.extension import filter_by_instag #DEFINE_ALIAS
# from .functional.extension import linear_chain_crf #DEFINE_ALIAS
# from .functional.extension import merge_selected_rows #DEFINE_ALIAS
# from .functional.extension import multiclass_nms #DEFINE_ALIAS
# from .functional.extension import polygon_box_transform #DEFINE_ALIAS
# from .functional.extension import random_crop #DEFINE_ALIAS
from .functional.extension import row_conv #DEFINE_ALIAS
# from .functional.extension import rpn_target_assign #DEFINE_ALIAS
# from .functional.extension import similarity_focus #DEFINE_ALIAS
# from .functional.extension import target_assign #DEFINE_ALIAS
# from .functional.extension import temporal_shift #DEFINE_ALIAS
# from .functional.extension import warpctc #DEFINE_ALIAS
# from .functional.extension import diag_embed #DEFINE_ALIAS
# from .functional.rnn import gru_unit #DEFINE_ALIAS
# from .functional.rnn import lstm #DEFINE_ALIAS
# from .functional.rnn import lstm_unit #DEFINE_ALIAS
# from .functional.lod import sequence_concat #DEFINE_ALIAS
# from .functional.lod import sequence_conv #DEFINE_ALIAS
# from .functional.lod import sequence_enumerate #DEFINE_ALIAS
# from .functional.lod import sequence_expand_as #DEFINE_ALIAS
# from .functional.lod import sequence_expand #DEFINE_ALIAS
# from .functional.lod import sequence_first_step #DEFINE_ALIAS
# from .functional.lod import sequence_last_step #DEFINE_ALIAS
# from .functional.lod import sequence_mask #DEFINE_ALIAS
# from .functional.lod import sequence_pad #DEFINE_ALIAS
# from .functional.lod import sequence_pool #DEFINE_ALIAS
# from .functional.lod import sequence_reshape #DEFINE_ALIAS
# from .functional.lod import sequence_reverse #DEFINE_ALIAS
# from .functional.lod import sequence_scatter #DEFINE_ALIAS
# from .functional.lod import sequence_slice #DEFINE_ALIAS
# from .functional.lod import sequence_softmax #DEFINE_ALIAS
# from .functional.lod import sequence_unpad #DEFINE_ALIAS
# from .functional.lod import array_length #DEFINE_ALIAS
# from .functional.lod import array_read #DEFINE_ALIAS
# from .functional.lod import array_write #DEFINE_ALIAS
# from .functional.lod import create_array #DEFINE_ALIAS
# from .functional.lod import hash #DEFINE_ALIAS
# from .functional.lod import im2sequence #DEFINE_ALIAS
# from .functional.lod import lod_append #DEFINE_ALIAS
# from .functional.lod import lod_reset #DEFINE_ALIAS
# from .functional.lod import reorder_lod_tensor_by_rank #DEFINE_ALIAS
# from .functional.lod import tensor_array_to_tensor #DEFINE_ALIAS
# from .functional.lod import dynamic_gru #DEFINE_ALIAS
# from .functional.lod import dynamic_lstm #DEFINE_ALIAS
# from .functional.lod import dynamic_lstmp #DEFINE_ALIAS
# from .functional.common import dropout #DEFINE_ALIAS
# from .functional.common import embedding #DEFINE_ALIAS
# from .functional.common import fc #DEFINE_ALIAS
# from .functional.common import label_smooth #DEFINE_ALIAS
# from .functional.common import one_hot #DEFINE_ALIAS
# from .functional.common import pad #DEFINE_ALIAS
# from .functional.common import pad_constant_like #DEFINE_ALIAS
# from .functional.common import pad2d #DEFINE_ALIAS
# from .functional.common import unfold #DEFINE_ALIAS
# from .functional.common import bilinear_tensor_product #DEFINE_ALIAS
# from .functional.common import assign #DEFINE_ALIAS
# from .functional.common import interpolate #DEFINE_ALIAS
# from .input import data #DEFINE_ALIAS
# from .input import Input #DEFINE_ALIAS
| 57.414815
| 87
| 0.8028
|
e3f0d92a2ec4e98ce6f7edfbb7abe366e04972bc
| 6,866
|
py
|
Python
|
lasaft/source_separation/conditioned/cunet/models/dcun_tfc_film_lasaft.py
|
roger-tseng/Conditioned-Source-Separation-LaSAFT
|
47cf2b7d11ac442f58127afb4ed5a8af360b20d9
|
[
"MIT"
] | 2
|
2022-01-03T08:22:24.000Z
|
2022-02-10T23:25:41.000Z
|
lasaft/source_separation/conditioned/cunet/models/dcun_tfc_film_lasaft.py
|
ws-choi/LightSAFT-Net-for-MDX-Challenge
|
bd38f44cad681deb7f1cf296b2efdd4c018c8212
|
[
"MIT"
] | null | null | null |
lasaft/source_separation/conditioned/cunet/models/dcun_tfc_film_lasaft.py
|
ws-choi/LightSAFT-Net-for-MDX-Challenge
|
bd38f44cad681deb7f1cf296b2efdd4c018c8212
|
[
"MIT"
] | 3
|
2021-05-27T13:25:19.000Z
|
2021-08-05T11:34:06.000Z
|
import inspect
from argparse import ArgumentParser
import torch
from torch import nn
from lasaft.source_separation.conditioned.LaSAFT import TFC_LaSAFT
from lasaft.source_separation.conditioned.cunet.dcun_film import DenseCUNet_FiLM, DenseCUNet_FiLM_Framework
from lasaft.source_separation.conditioned.loss_functions import get_conditional_loss
from lasaft.utils.functions import get_activation_by_name
class DCUN_TFC_FiLM_LaSAFT(DenseCUNet_FiLM):
def __init__(self,
n_fft,
input_channels, internal_channels,
n_blocks, n_internal_layers,
first_conv_activation, last_activation,
t_down_layers, f_down_layers,
# TFC_LaSAFT #
kernel_size_t, kernel_size_f,
bn_factor, min_bn_units,
tfc_tdf_bias, tfc_tdf_activation,
num_tdfs, dk,
# Conditional Mechanism #
control_vector_type, control_input_dim, embedding_dim,
# Conditional Model #
control_type, control_n_layer,
condition_to, film_type, gamma_activation, beta_activation
):
tfc_tdf_activation = get_activation_by_name(tfc_tdf_activation)
def mk_tfc_lasaft(in_channels, internal_channels, f):
return TFC_LaSAFT(in_channels, n_internal_layers, internal_channels,
kernel_size_t, kernel_size_f, f,
bn_factor, min_bn_units,
tfc_tdf_bias,
tfc_tdf_activation,
embedding_dim, num_tdfs, dk)
def mk_ds(internal_channels, i, f, t_down_layers):
if t_down_layers is None:
scale = (2, 2)
else:
scale = (2, 2) if i in t_down_layers else (1, 2)
ds = nn.Sequential(
nn.Conv2d(in_channels=internal_channels, out_channels=internal_channels,
kernel_size=scale, stride=scale),
nn.BatchNorm2d(internal_channels)
)
return ds, f // scale[-1]
def mk_us(internal_channels, i, f, n, t_down_layers):
if t_down_layers is None:
scale = (2, 2)
else:
scale = (2, 2) if i in [n - 1 - s for s in t_down_layers] else (1, 2)
us = nn.Sequential(
nn.ConvTranspose2d(in_channels=internal_channels, out_channels=internal_channels,
kernel_size=scale, stride=scale),
nn.BatchNorm2d(internal_channels)
)
return us, f * scale[-1]
super(DCUN_TFC_FiLM_LaSAFT, self).__init__(
n_fft,
input_channels, internal_channels,
n_blocks, n_internal_layers,
mk_tfc_lasaft, mk_ds, mk_us,
first_conv_activation, last_activation,
t_down_layers, f_down_layers,
# Conditional Mechanism #
control_vector_type, control_input_dim, embedding_dim, condition_to,
control_type, control_n_layer, film_type, gamma_activation, beta_activation
)
def forward(self, input_spec, input_condition):
condition_embedding = self.embedding(input_condition)
gammas, betas = self.condition_generator(condition_embedding)
x = self.first_conv(input_spec)
encoding_outputs = []
gammas_encoder, gammas_middle, gammas_decoder = gammas
betas_encoder, betas_middle, betas_decoder = betas
for i in range(self.n):
x = self.encoders[i].tfc(x)
if self.is_encoder_conditioned:
x = self.film(x, gammas_encoder[i], betas_encoder[i])
x = x + self.encoders[i].lasaft(x,condition_embedding)
encoding_outputs.append(x)
x = self.downsamplings[i](x)
x = self.mid_block.tfc(x)
if self.is_middle_conditioned:
x = self.film(x, gammas_middle, betas_middle)
x = x + self.mid_block.lasaft(x,condition_embedding)
for i in range(self.n):
x = self.upsamplings[i](x)
x = torch.cat((x, encoding_outputs[-i - 1]), 1)
x = self.decoders[i].tfc(x)
if self.is_decoder_conditioned:
x = self.film(x, gammas_decoder[i], betas_decoder[i])
x = x + self.decoders[i].lasaft(x,condition_embedding)
return self.last_conv(x)
class DCUN_TFC_FiLM_LaSAFT_Framework(DenseCUNet_FiLM_Framework):
def __init__(self, n_fft, hop_length, num_frame,
spec_type, spec_est_mode,
optimizer, lr, auto_lr_schedule,
train_loss, val_loss,
**kwargs):
valid_kwargs = inspect.signature(DCUN_TFC_FiLM_LaSAFT.__init__).parameters
tfc_net_kwargs = dict((name, kwargs[name]) for name in valid_kwargs if name in kwargs)
tfc_net_kwargs['n_fft'] = n_fft
spec2spec = DCUN_TFC_FiLM_LaSAFT(**tfc_net_kwargs)
train_loss_ = get_conditional_loss(train_loss, n_fft, hop_length, **kwargs)
val_loss_ = get_conditional_loss(val_loss, n_fft, hop_length, **kwargs)
super(DCUN_TFC_FiLM_LaSAFT_Framework, self).__init__(n_fft, hop_length, num_frame,
spec_type, spec_est_mode,
spec2spec,
optimizer, lr, auto_lr_schedule,
train_loss_, val_loss_
)
valid_kwargs = inspect.signature(DCUN_TFC_FiLM_LaSAFT_Framework.__init__).parameters
hp = [key for key in valid_kwargs.keys() if key not in ['self', 'kwargs']]
hp = hp + [key for key in kwargs if not callable(kwargs[key])]
self.save_hyperparameters(*hp)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--n_internal_layers', type=int, default=5)
parser.add_argument('--kernel_size_t', type=int, default=3)
parser.add_argument('--kernel_size_f', type=int, default=3)
parser.add_argument('--bn_factor', type=int, default=16)
parser.add_argument('--min_bn_units', type=int, default=16)
parser.add_argument('--tfc_tdf_bias', type=bool, default=False)
parser.add_argument('--tfc_tdf_activation', type=str, default='relu')
parser.add_argument('--num_tdfs', type=int, default=6)
parser.add_argument('--dk', type=int, default=32)
return DenseCUNet_FiLM_Framework.add_model_specific_args(parser)
| 41.612121
| 107
| 0.599913
|
c3ac20d3fc9b6bc1560422184b6a8b2ba29603bf
| 1,750
|
py
|
Python
|
data/p4VQE/R4/benchmark/startCirq649.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startCirq649.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startCirq649.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=12
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=9
c.append(cirq.X.on(input_qubit[1])) # number=10
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=11
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[3])) # number=5
c.append(cirq.X.on(input_qubit[3])) # number=7
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq649.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 27.777778
| 77
| 0.693143
|
94576bf2550330a3bb745e959d6748e9ac809060
| 2,569
|
py
|
Python
|
examples/framework_examples/dqn_per.py
|
1abner1/machin
|
d10727b52d981c898e31cdd20b48a3d972612bb6
|
[
"MIT"
] | 1
|
2021-10-08T18:38:50.000Z
|
2021-10-08T18:38:50.000Z
|
examples/framework_examples/dqn_per.py
|
1abner1/machin
|
d10727b52d981c898e31cdd20b48a3d972612bb6
|
[
"MIT"
] | null | null | null |
examples/framework_examples/dqn_per.py
|
1abner1/machin
|
d10727b52d981c898e31cdd20b48a3d972612bb6
|
[
"MIT"
] | null | null | null |
from machin.frame.algorithms import DQNPer
from machin.utils.logging import default_logger as logger
import torch as t
import torch.nn as nn
import gym
# configurations
env = gym.make("CartPole-v0")
observe_dim = 4
action_num = 2
max_episodes = 1000
max_steps = 200
solved_reward = 190
solved_repeat = 5
# model definition
class QNet(nn.Module):
def __init__(self, state_dim, action_num):
super().__init__()
self.fc1 = nn.Linear(state_dim, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, action_num)
def forward(self, state):
a = t.relu(self.fc1(state))
a = t.relu(self.fc2(a))
return self.fc3(a)
if __name__ == "__main__":
q_net = QNet(observe_dim, action_num)
q_net_t = QNet(observe_dim, action_num)
dqn_per = DQNPer(q_net, q_net_t, t.optim.Adam, nn.MSELoss(reduction="sum"))
episode, step, reward_fulfilled = 0, 0, 0
smoothed_total_reward = 0
while episode < max_episodes:
episode += 1
total_reward = 0
terminal = False
step = 0
state = t.tensor(env.reset(), dtype=t.float32).view(1, observe_dim)
while not terminal and step <= max_steps:
step += 1
with t.no_grad():
old_state = state
# agent model inference
action = dqn_per.act_discrete_with_noise({"state": old_state})
state, reward, terminal, _ = env.step(action.item())
state = t.tensor(state, dtype=t.float32).view(1, observe_dim)
total_reward += reward
dqn_per.store_transition(
{
"state": {"state": old_state},
"action": {"action": action},
"next_state": {"state": state},
"reward": reward,
"terminal": terminal or step == max_steps,
}
)
# update, update more if episode is longer, else less
if episode > 100:
for _ in range(step):
dqn_per.update()
# show reward
smoothed_total_reward = smoothed_total_reward * 0.9 + total_reward * 0.1
logger.info(f"Episode {episode} total reward={smoothed_total_reward:.2f}")
if smoothed_total_reward > solved_reward:
reward_fulfilled += 1
if reward_fulfilled >= solved_repeat:
logger.info("Environment solved!")
exit(0)
else:
reward_fulfilled = 0
| 30.583333
| 82
| 0.567925
|
61f5661ef6e5ae78cf89c16163469b615f9c01c8
| 3,792
|
py
|
Python
|
ckanext/resourceproxy/blueprint.py
|
ziveo/ckan
|
f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c
|
[
"Apache-2.0"
] | 58
|
2015-01-11T09:05:15.000Z
|
2022-03-17T23:44:07.000Z
|
ckanext/resourceproxy/blueprint.py
|
ziveo/ckan
|
f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c
|
[
"Apache-2.0"
] | 1,467
|
2015-01-01T16:47:44.000Z
|
2022-02-28T16:51:20.000Z
|
ckanext/resourceproxy/blueprint.py
|
ziveo/ckan
|
f4cfe5e28789df58b2bf7e73e5989ffda00e5c5c
|
[
"Apache-2.0"
] | 17
|
2015-03-13T18:05:05.000Z
|
2020-11-06T13:55:32.000Z
|
# encoding: utf-8
from logging import getLogger
import requests
from six.moves.urllib.parse import urlsplit
from flask import Blueprint, make_response
import ckan.lib.base as base
import ckan.logic as logic
from ckan.common import config, _
from ckan.plugins.toolkit import (asint, abort, get_action, c)
log = getLogger(__name__)
MAX_FILE_SIZE = asint(
config.get(u'ckan.resource_proxy.max_file_size', 1024**2)
)
CHUNK_SIZE = asint(config.get(u'ckan.resource_proxy.chunk_size', 4096))
resource_proxy = Blueprint(u'resource_proxy', __name__)
def proxy_resource(context, data_dict):
u'''Chunked proxy for resources. To make sure that the file is not too
large, first, we try to get the content length from the headers.
If the headers to not contain a content length (if it is a chinked
response), we only transfer as long as the transferred data is
less than the maximum file size.
'''
resource_id = data_dict[u'resource_id']
log.info(u'Proxify resource {id}'.format(id=resource_id))
try:
resource = get_action(u'resource_show')(context, {u'id': resource_id})
except logic.NotFound:
return abort(404, _(u'Resource not found'))
url = resource[u'url']
parts = urlsplit(url)
if not parts.scheme or not parts.netloc:
return abort(409, _(u'Invalid URL.'))
response = make_response()
try:
# first we try a HEAD request which may not be supported
did_get = False
r = requests.head(url)
# Servers can refuse HEAD requests. 405 is the appropriate
# response, but 400 with the invalid method mentioned in the
# text, or a 403 (forbidden) status is also possible (#2412,
# #2530)
if r.status_code in (400, 403, 405):
r = requests.get(url, stream=True)
did_get = True
r.raise_for_status()
cl = r.headers.get(u'content-length')
if cl and int(cl) > MAX_FILE_SIZE:
return abort(
409, (
u'Content is too large to be proxied. Allowed'
u'file size: {allowed}, Content-Length: {actual}.'
).format(allowed=MAX_FILE_SIZE, actual=cl)
)
if not did_get:
r = requests.get(url, stream=True)
response.headers[u'content-type'] = r.headers[u'content-type']
response.charset = r.encoding
length = 0
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
response.stream.write(chunk)
length += len(chunk)
if length >= MAX_FILE_SIZE:
return abort(
409,
headers={u'content-encoding': u''},
detail=u'Content is too large to be proxied.'
)
except requests.exceptions.HTTPError as error:
details = u'Could not proxy resource. Server responded with %s %s' % (
error.response.status_code, error.response.reason
)
return abort(409, detail=details)
except requests.exceptions.ConnectionError as error:
details = u'''Could not proxy resource because a
connection error occurred. %s''' % error
return abort(502, detail=details)
except requests.exceptions.Timeout:
details = u'Could not proxy resource because the connection timed out.'
return abort(504, detail=details)
return response
def proxy_view(id, resource_id):
data_dict = {u'resource_id': resource_id}
context = {
u'model': base.model,
u'session': base.model.Session,
u'user': c.user
}
return proxy_resource(context, data_dict)
resource_proxy.add_url_rule(
u'/dataset/<id>/resource/<resource_id>/proxy', view_func=proxy_view
)
| 33.557522
| 79
| 0.63423
|
c8a14b4587f76fa408024070978fa090eebff443
| 83
|
py
|
Python
|
xfeat/base/__init__.py
|
Drunkar/xfeat
|
7eced097072a67f06548cc778b27b2310c5e5511
|
[
"MIT"
] | 304
|
2020-06-19T05:00:14.000Z
|
2022-03-19T19:39:04.000Z
|
xfeat/base/__init__.py
|
Drunkar/xfeat
|
7eced097072a67f06548cc778b27b2310c5e5511
|
[
"MIT"
] | 4
|
2020-06-28T11:30:33.000Z
|
2022-02-17T14:31:39.000Z
|
xfeat/base/__init__.py
|
Drunkar/xfeat
|
7eced097072a67f06548cc778b27b2310c5e5511
|
[
"MIT"
] | 15
|
2020-06-19T08:34:56.000Z
|
2022-02-17T14:51:30.000Z
|
from xfeat.base._mixin import TransformerMixin, OptunaSelectorMixin, SelectorMixin
| 41.5
| 82
| 0.879518
|
352d8d933ebf483eef8d8a10f2b00b5be7060f97
| 457
|
py
|
Python
|
codes_/1010_Pairs_of_Songs_With_Total_Durations_Divisible_by_60.py
|
SaitoTsutomu/leetcode
|
4656d66ab721a5c7bc59890db9a2331c6823b2bf
|
[
"MIT"
] | null | null | null |
codes_/1010_Pairs_of_Songs_With_Total_Durations_Divisible_by_60.py
|
SaitoTsutomu/leetcode
|
4656d66ab721a5c7bc59890db9a2331c6823b2bf
|
[
"MIT"
] | null | null | null |
codes_/1010_Pairs_of_Songs_With_Total_Durations_Divisible_by_60.py
|
SaitoTsutomu/leetcode
|
4656d66ab721a5c7bc59890db9a2331c6823b2bf
|
[
"MIT"
] | null | null | null |
# %% [1010. Pairs of Songs With Total Durations Divisible by 60](https://leetcode.com/problems/pairs-of-songs-with-total-durations-divisible-by-60/)
# 問題:60の倍数となるtimeの2つの要素のペア数を返せ
# 解法:collections.Counterを用いる
class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
c = collections.Counter([t % 60 for t in time])
return (
sum(n * (c.get(60 - t, 0) if t % 30 else n - 1) for t, n in c.items()) // 2
)
| 45.7
| 148
| 0.645514
|
5b2be9167156d5eea66b339c225e957ab32d2dc2
| 1,791
|
py
|
Python
|
examples/examples_perf_test.py
|
m-szalay/Cirq
|
1bd083a87fdf49212f347d88f15713e90cc72f8f
|
[
"Apache-2.0"
] | null | null | null |
examples/examples_perf_test.py
|
m-szalay/Cirq
|
1bd083a87fdf49212f347d88f15713e90cc72f8f
|
[
"Apache-2.0"
] | null | null | null |
examples/examples_perf_test.py
|
m-szalay/Cirq
|
1bd083a87fdf49212f347d88f15713e90cc72f8f
|
[
"Apache-2.0"
] | null | null | null |
import cirq
import examples.bell_inequality
import examples.bernstein_vazirani
import examples.grover
import examples.place_on_bristlecone
import examples.hello_qubit
import examples.quantum_fourier_transform
import examples.bcs_mean_field
import examples.phase_estimator
import examples.basic_arithmetic
import examples.quantum_teleportation
import examples.superdense_coding
# Standard test runs do not include performance benchmarks.
# coverage: ignore
def test_example_runs_bernstein_vazirani_perf(benchmark):
benchmark(examples.bernstein_vazirani.main, qubit_count=3)
# Check empty oracle case. Cover both biases.
a = cirq.NamedQubit('a')
assert list(examples.bernstein_vazirani.make_oracle(
[], a, [], False)) == []
assert list(examples.bernstein_vazirani.make_oracle(
[], a, [], True)) == [cirq.X(a)]
def test_example_runs_hello_line_perf(benchmark):
benchmark(examples.place_on_bristlecone.main)
def test_example_runs_hello_qubit_perf(benchmark):
benchmark(examples.hello_qubit.main)
def test_example_runs_bell_inequality_perf(benchmark):
benchmark(examples.bell_inequality.main)
def test_example_runs_quantum_fourier_transform_perf(benchmark):
benchmark(examples.quantum_fourier_transform.main)
def test_example_runs_bcs_mean_field_perf(benchmark):
benchmark(examples.bcs_mean_field.main)
def test_example_runs_grover_perf(benchmark):
benchmark(examples.grover.main)
def test_example_runs_phase_estimator_perf(benchmark):
benchmark(examples.phase_estimator.main, qnums=(2,), repetitions=2)
def test_example_runs_quantum_teleportation(benchmark):
benchmark(examples.quantum_teleportation.main)
def test_example_runs_superdense_coding(benchmark):
benchmark(examples.superdense_coding.main)
| 28.428571
| 71
| 0.815745
|
3d6f5e6828de9a3dcba2545ac8f8aa2ad031b8ac
| 10,641
|
py
|
Python
|
ssguan/ignitor/base/error.py
|
samuelbaizg/ssguan
|
97def0609d61e40472554464470758b5fb9eca35
|
[
"Apache-2.0"
] | 1
|
2015-07-14T14:24:05.000Z
|
2015-07-14T14:24:05.000Z
|
ssguan/ignitor/base/error.py
|
samuelbaizg/ssguan
|
97def0609d61e40472554464470758b5fb9eca35
|
[
"Apache-2.0"
] | null | null | null |
ssguan/ignitor/base/error.py
|
samuelbaizg/ssguan
|
97def0609d61e40472554464470758b5fb9eca35
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2015 www.suishouguan.com
#
# Licensed under the Private License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/samuelbaizg/ssguan/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import types
CODE_UNKNOWN = 9999
"""
Error Code Range:
error: 1001 ~ 1039
orm: 1040 ~ 1049
web: 1050 ~ 1069
vfs: 1070 ~ 1079
auth: 1100 ~ 1129
asyn:1130 ~ 1139
schedule:1140 ~ 1149
cache: 1150 ~ 1160
"""
class ExceptionWrap(Exception):
def __init__(self, exc_info, **data):
if isinstance(exc_info, Exception):
exc_info = (exc_info.__class__, exc_info, None)
if exc_info is None:
raise Exception("exc_info can't be null.")
if not isinstance(exc_info, tuple) or len(exc_info) != 3:
raise Exception("exc_info must be a type of exc_info.")
self.__exc_info = exc_info
self.__data = data
@property
def exception(self):
return self.__exc_info[1]
@property
def traceback(self):
return self.__exc_info[2]
@property
def data(self):
return self.__data
@property
def message(self):
return str(self.exception)
@property
def message_tb(self):
return "".join(traceback.format_tb(self.traceback))
def __str__(self):
return self.message
class Error(Exception):
def __init__(self, message, *args, **kwargs):
self.__message = message
self.__args = args
self.__kwargs = kwargs
@property
def code(self):
"""To be implemented by sub-class"""
raise NotImplementedError("Error.code")
@property
def message(self):
message = self.__message
if message != None and (self.__args != None and len(self.__args) > 0):
message = message % self.__args
if message != None and (self.__kwargs != None and len(self.__kwargs) > 0):
for key, value in self.__kwargs.items():
message = message.replace("{{%s}}" % key, str(value))
return "%d: %s" % (self.code, message)
@property
def arguments(self):
return self.__kwargs
def get_argument(self, key):
return self.arguments[key]
def __str__(self):
return self.message
class ProgramError(Error):
"""
ProgramError is to define the error for programmer codes.
"""
def __init__(self, message, *args):
super(ProgramError, self).__init__(message, *args)
@property
def code(self):
return 1001
class RunError(Error):
"""
RunError is to define the error for runtime.
"""
def __init__(self, message, *args):
super(RunError, self).__init__(message, *args)
@property
def code(self):
return 1002
class NoDoError(Error):
def __init__(self, action, what):
super(NoDoError, self).__init__("No support to {{action}} {{what}}", action=action, what=what)
@property
def code(self):
return 1003
class NoFoundError(Error):
def __init__(self, it, something):
super(NoFoundError, self).__init__("{{it}} {{something}} is not found.", it=str(it), something=str(something))
@property
def code(self):
return 1004
class NoSupportError(Error):
def __init__(self, it, something):
super(NoSupportError, self).__init__("{{it}} {{something}} is not supported.", it=str(it), something=str(something))
@property
def code(self):
return 1005
class InvalidError(Error):
def __init__(self, something, why):
super(InvalidError, self).__init__("{{something}} is not valid because {{why}}.", something=str(something), why=str(why))
@property
def code(self):
return 1006
class ClassCastError(Error):
def __init__(self, clazz, baseclazz):
super(ClassCastError, self).__init__("{{clazz}} is not the sub-class of {{baseClazz}}." , clazz=clazz, baseClazz=baseclazz)
@property
def code(self):
return 1007
class RequiredError(Error):
def __init__(self, label):
super(RequiredError, self).__init__("{{label}} is required.", label=label)
@property
def code(self):
return 1010
class ChoiceError(Error):
def __init__(self, label, choices):
super(ChoiceError, self).__init__("The value of {{label}} must be one of {{choices}}.", label=label, choices=",".join(map(str, choices)))
@property
def code(self):
return 1011
class LengthError(Error):
def __init__(self, label, minlength, maxlength):
super(LengthError, self).__init__("The length of {{label}} must between {{minlength}} and {{maxlength}}.", label=label, minlength=minlength, maxlength=maxlength)
@property
def code(self):
return 1012
class RangeError(Error):
def __init__(self, label, mininum, maximum):
super(RangeError, self).__init__("The value of {{label}} must between {{mininum}} and {{maximum}}.", label=label, mininum=mininum, maximum=maximum)
@property
def code(self):
return 1013
class CompareError(Error):
def __init__(self, label, operator, limitlabel, limit):
super(CompareError, self).__init__("The value of {{label}} must {{operator}} {{limitlabel}} {{limit}}.", label=label, operator=operator, limit=limit, limitlabel=limitlabel)
@property
def code(self):
return 1014
class TypeIntError(Error):
def __init__(self, label):
super(TypeIntError, self).__init__("The value of {{label}} must be an integer.", label=label)
@property
def code(self):
return 1015
class TypeFloatError(Error):
def __init__(self, label):
super(TypeFloatError, self).__init__("The value of {{label}} must be a float.", label=label)
@property
def code(self):
return 1016
class TypeDateError(Error):
def __init__(self, label, fmt=format):
super(TypeDateError, self).__init__("The value of {{label}} must be the format {{fmt}}.", label=label, fmt=format)
@property
def code(self):
return 1017
class TypeDatetimeError(Error):
def __init__(self, label, fmt=format):
super(TypeDatetimeError, self).__init__("The value of {{label}} must be the format {{fmt}}.", label=label, fmt=format)
@property
def code(self):
return 1018
class TypeFormatError(Error):
def __init__(self, label, fmt=format):
super(TypeFormatError, self).__init__("The format of {{label}} must be the format {{fmt}}.", label=label, fmt=format)
@property
def code(self):
return 1019
class TypeBoolError(Error):
def __init__(self, label):
super(TypeBoolError, self).__init__("The value of {{label}} must be a bool.", label=label)
@property
def code(self):
return 1020
class TypeListError(Error):
def __init__(self, label):
super(TypeListError, self).__init__("The value of {{label}} must be instance of list.", label=label)
@property
def code(self):
return 1021
class TypeDictError(Error):
def __init__(self, label):
super(TypeDictError, self).__init__("The value of {{label}} must be instance of dict.", label=label)
@property
def code(self):
return 1022
class TypeFunctionError(Error):
def __init__(self, label):
super(TypeFunctionError, self).__init__("The value of {{label}} must be a function.", label=label)
@property
def code(self):
return 1023
class TypeGeneratorError(Error):
def __init__(self, label):
super(TypeGeneratorError, self).__init__("The value of {{label}} must be instance of generator.", label=label)
@property
def code(self):
return 1024
class TypeStrError(Error):
def __init__(self, label):
super(TypeStrError, self).__init__("The value of {{label}} must be a str.", label=label)
@property
def code(self):
return 1025
def assert_required(value, label):
"""
check if value is None or empty str.
"""
if value is None:
raise RequiredError(label)
if type(value) == str and len(value.strip()) == 0:
raise RequiredError(label)
def assert_type_int(value, label):
if type(value) != int:
raise TypeIntError(label)
return True
def assert_type_float(value, label):
if type(value) != float:
raise TypeFloatError(label)
return True
def assert_type_bool(value, label):
if type(value) != bool:
raise TypeBoolError(label)
def assert_type_list(value, label):
if type(value) != list:
raise TypeListError(label)
def assert_type_dict(value, label):
if type(value) != dict:
raise TypeDictError(label)
def assert_type_generator(value, label):
if type(value) != types.GeneratorType:
raise TypeGeneratorError(label)
def assert_type_function(value, label):
if type(value) != types.FunctionType:
raise TypeFunctionError(label)
def assert_type_str(value, label):
if type(value) != str:
raise TypeStrError(label)
return True
def assert_in(value, choices, label):
if value not in choices:
raise ChoiceError(label, choices)
return True
def assert_equal(value1, value2, label1, label2):
if value1 != value2:
raise CompareError(label1, "=", label2, '')
def assert_not_equal(value1, value2, label1, label2):
if value1 == value2:
raise CompareError(label1, "!=", label2, '')
def format_exc_info(exc_info):
error_class = exc_info[1]
tb_message = format_traceback(exc_info[2])
return "%s\n%s" % (str(error_class), tb_message)
def format_traceback(traceback1):
return "".join(traceback.format_tb(traceback1))
| 31.114035
| 181
| 0.620148
|
97fd1baee2671bedcf292097f58571ae06a2cdf7
| 383
|
py
|
Python
|
baselines/deepq/__init__.py
|
seungjaeryanlee/baselines-tf2
|
299c2e6fb0b1dc8dd5f25c826eb004cf276a5bfe
|
[
"MIT"
] | 10
|
2019-06-18T16:20:20.000Z
|
2021-01-10T04:18:07.000Z
|
baselines/deepq/__init__.py
|
seungjaeryanlee/baselines-tf2
|
299c2e6fb0b1dc8dd5f25c826eb004cf276a5bfe
|
[
"MIT"
] | 5
|
2019-07-02T03:11:00.000Z
|
2020-07-27T17:32:41.000Z
|
baselines/deepq/__init__.py
|
seungjaeryanlee/baselines-tf2
|
299c2e6fb0b1dc8dd5f25c826eb004cf276a5bfe
|
[
"MIT"
] | 6
|
2019-06-03T23:03:40.000Z
|
2021-03-09T06:51:28.000Z
|
from baselines.deepq import models # noqa
from baselines.deepq.deepq_learner import DEEPQ #noqa
from baselines.deepq.deepq import learn # noqa
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
def wrap_atari_dqn(env):
from baselines.common.atari_wrappers import wrap_deepmind
return wrap_deepmind(env, frame_stack=True, scale=False)
| 42.555556
| 87
| 0.817232
|
9e4d7281dfb66b59f488d0c889022130df4e7801
| 716
|
py
|
Python
|
src/charma/persons/directors/decorators.py
|
mononobi/charma-server
|
ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced
|
[
"BSD-3-Clause"
] | 1
|
2020-01-16T23:36:10.000Z
|
2020-01-16T23:36:10.000Z
|
src/charma/persons/directors/decorators.py
|
mononobi/imovie-server
|
ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced
|
[
"BSD-3-Clause"
] | 24
|
2020-06-08T18:27:04.000Z
|
2021-06-06T12:01:39.000Z
|
src/charma/persons/directors/decorators.py
|
mononobi/charma-server
|
ed90f5ec0b5ff3996232d5fe49a4f77f96d82ced
|
[
"BSD-3-Clause"
] | 1
|
2020-12-20T05:29:04.000Z
|
2020-12-20T05:29:04.000Z
|
# -*- coding: utf-8 -*-
"""
directors decorators module.
"""
import charma.persons.directors.services as director_services
def director_hook():
"""
decorator to register a director hook.
:raises InvalidDirectorHookTypeError: invalid director hook type error.
:returns: director hook class.
:rtype: type
"""
def decorator(cls):
"""
decorates the given class and registers an instance
of it into available director hooks.
:param type cls: director hook class.
:returns: director hook class.
:rtype: type
"""
instance = cls()
director_services.register_hook(instance)
return cls
return decorator
| 19.888889
| 75
| 0.638268
|
194667fbce715d84e883b96c1efccc1d258536af
| 305
|
py
|
Python
|
data/multilingual/Latn.KDE/Serif_12/pdf_to_json_test_Latn.KDE_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.KDE/Serif_12/pdf_to_json_test_Latn.KDE_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.KDE/Serif_12/pdf_to_json_test_Latn.KDE_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.KDE/Serif_12/udhr_Latn.KDE_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.5
| 75
| 0.813115
|
112896c78c0934e782bbb95bb051cdf0272d0765
| 410
|
py
|
Python
|
webdjango/migrations/0003_address_street_address_3.py
|
myog-io/WebDjangular
|
73d3c40aa449eec5acc59d4493ee94059bddabbd
|
[
"MIT"
] | 1
|
2018-09-14T15:17:19.000Z
|
2018-09-14T15:17:19.000Z
|
webdjango/migrations/0003_address_street_address_3.py
|
MyOwnGamesLLC/WebDjangular
|
73d3c40aa449eec5acc59d4493ee94059bddabbd
|
[
"MIT"
] | 41
|
2018-12-16T16:58:54.000Z
|
2019-02-22T20:08:58.000Z
|
webdjango/migrations/0003_address_street_address_3.py
|
myog-io/WebDjangular
|
73d3c40aa449eec5acc59d4493ee94059bddabbd
|
[
"MIT"
] | 1
|
2019-12-10T09:32:49.000Z
|
2019-12-10T09:32:49.000Z
|
# Generated by Django 2.1.4 on 2019-01-22 16:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webdjango', '0002_auto_20181222_2028'),
]
operations = [
migrations.AddField(
model_name='address',
name='street_address_3',
field=models.CharField(blank=True, max_length=256),
),
]
| 21.578947
| 63
| 0.614634
|
513cca381242917ae4dd8a31aa893f598754a716
| 30,820
|
py
|
Python
|
luigi/hadoop.py
|
mulby/luigi
|
230380cc69604550defc5db0f666e0919b015a3c
|
[
"Apache-2.0"
] | null | null | null |
luigi/hadoop.py
|
mulby/luigi
|
230380cc69604550defc5db0f666e0919b015a3c
|
[
"Apache-2.0"
] | null | null | null |
luigi/hadoop.py
|
mulby/luigi
|
230380cc69604550defc5db0f666e0919b015a3c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import random
import sys
import os
import datetime
import subprocess
import tempfile
from itertools import groupby
from operator import itemgetter
import pickle
import binascii
import logging
import StringIO
import re
import shutil
import signal
from hashlib import md5
import luigi
import luigi.hdfs
import configuration
import warnings
import mrrunner
import json
import glob
logger = logging.getLogger('luigi-interface')
_attached_packages = []
def attach(*packages):
""" Attach a python package to hadoop map reduce tarballs to make those packages available on the hadoop cluster"""
_attached_packages.extend(packages)
def dereference(file):
if os.path.islink(file):
#by joining with the dirname we are certain to get the absolute path
return dereference(os.path.join(os.path.dirname(file), os.readlink(file)))
else:
return file
def get_extra_files(extra_files):
result = []
for f in extra_files:
if isinstance(f, str):
src, dst = f, os.path.basename(f)
elif isinstance(f, tuple):
src, dst = f
else:
raise Exception()
if os.path.isdir(src):
src_prefix = os.path.join(src, '')
for base, dirs, files in os.walk(src):
for file in files:
f_src = os.path.join(base, file)
f_src_stripped = f_src[len(src_prefix):]
f_dst = os.path.join(dst, f_src_stripped)
result.append((f_src, f_dst))
else:
result.append((src, dst))
return result
def create_packages_archive(packages, filename):
"""Create a tar archive which will contain the files for the packages listed in packages. """
import tarfile
tar = tarfile.open(filename, "w")
def add(src, dst):
logger.debug('adding to tar: %s -> %s', src, dst)
tar.add(src, dst)
def add_files_for_package(sub_package_path, root_package_path, root_package_name):
for root, dirs, files in os.walk(sub_package_path):
if '.svn' in dirs:
dirs.remove('.svn')
for f in files:
if not f.endswith(".pyc") and not f.startswith("."):
add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f)
for package in packages:
# Put a submodule's entire package in the archive. This is the
# magic that usually packages everything you need without
# having to attach packages/modules explicitly
if not getattr(package, "__path__", None) and '.' in package.__name__:
package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty')
n = package.__name__.replace(".", "/")
if getattr(package, "__path__", None):
# TODO: (BUG) picking only the first path does not
# properly deal with namespaced packages in different
# directories
p = package.__path__[0]
if p.endswith('.egg') and os.path.isfile(p):
raise 'egg files not supported!!!'
# Add the entire egg file
# p = p[:p.find('.egg') + 4]
# add(dereference(p), os.path.basename(p))
else:
# include __init__ files from parent projects
root = []
for parent in package.__name__.split('.')[0:-1]:
root.append(parent)
module_name = '.'.join(root)
directory = '/'.join(root)
add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"),
directory + "/__init__.py")
add_files_for_package(p, p, n)
# include egg-info directories that are parallel:
for egg_info_path in glob.glob(p + '*.egg-info'):
logger.debug(
'Adding package metadata to archive for "%s" found at "%s"',
package.__name__,
egg_info_path
)
add_files_for_package(egg_info_path, p, n)
else:
f = package.__file__
if f.endswith("pyc"):
f = f[:-3] + "py"
if n.find(".") == -1:
add(dereference(f), os.path.basename(f))
else:
add(dereference(f), n + ".py")
tar.close()
def flatten(sequence):
"""A simple generator which flattens a sequence.
Only one level is flattned.
(1, (2, 3), 4) -> (1, 2, 3, 4)
"""
for item in sequence:
if hasattr(item, "__iter__"):
for i in item:
yield i
else:
yield item
class HadoopRunContext(object):
def __init__(self):
self.job_id = None
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def kill_job(self, captured_signal=None, stack_frame=None):
if self.job_id:
logger.info('Job interrupted, killing job %s', self.job_id)
subprocess.call(['mapred', 'job', '-kill', self.job_id])
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
class HadoopJobError(RuntimeError):
def __init__(self, message, out=None, err=None):
super(HadoopJobError, self).__init__(message, out, err)
self.message = message
self.out = out
self.err = err
def run_and_track_hadoop_job(arglist, tracking_url_callback=None, env=None):
''' Runs the job by invoking the command from the given arglist. Finds tracking urls from the output and attempts to fetch
errors using those urls if the job fails. Throws HadoopJobError with information about the error (including stdout and stderr
from the process) on failure and returns normally otherwise.
'''
logger.info('%s', ' '.join(arglist))
def write_luigi_history(arglist, history):
'''
Writes history to a file in the job's output directory in JSON format.
Currently just for tracking the job ID in a configuration where no history is stored in the output directory by Hadoop.
'''
history_filename = configuration.get_config().get('core', 'history-filename', '')
if history_filename and '-output' in arglist:
output_dir = arglist[arglist.index('-output') + 1]
f = luigi.hdfs.HdfsTarget(os.path.join(output_dir, history_filename)).open('w')
f.write(json.dumps(history))
f.close()
def track_process(arglist, tracking_url_callback, env=None):
# Dump stdout to a temp file, poll stderr and log it
temp_stdout = tempfile.TemporaryFile()
proc = subprocess.Popen(arglist, stdout=temp_stdout, stderr=subprocess.PIPE, env=env, close_fds=True)
# We parse the output to try to find the tracking URL.
# This URL is useful for fetching the logs of the job.
tracking_url = None
job_id = None
err_lines = []
with HadoopRunContext() as hadoop_context:
while proc.poll() is None:
err_line = proc.stderr.readline()
err_lines.append(err_line)
err_line = err_line.strip()
if err_line:
logger.info('%s', err_line)
err_line = err_line.lower()
if err_line.find('tracking url') != -1:
tracking_url = err_line.split('tracking url: ')[-1]
try:
tracking_url_callback(tracking_url)
except Exception as e:
logger.error("Error in tracking_url_callback, disabling! %s", e)
tracking_url_callback = lambda x: None
if err_line.find('running job') != -1:
# hadoop jar output
job_id = err_line.split('running job: ')[-1]
if err_line.find('submitted hadoop job:') != -1:
# scalding output
job_id = err_line.split('submitted hadoop job: ')[-1]
hadoop_context.job_id = job_id
# Read the rest + stdout
err = ''.join(err_lines + [err_line for err_line in proc.stderr])
temp_stdout.seek(0)
out = ''.join(temp_stdout.readlines())
if proc.returncode == 0:
write_luigi_history(arglist, {'job_id': job_id})
return (out, err)
# Try to fetch error logs if possible
message = 'Streaming job failed with exit code %d. ' % proc.returncode
if not tracking_url:
raise HadoopJobError(message + 'Also, no tracking url found.', out, err)
try:
task_failures = fetch_task_failures(tracking_url)
except Exception, e:
raise HadoopJobError(message + 'Additionally, an error occurred when fetching data from %s: %s' %
(tracking_url, e), out, err)
if not task_failures:
raise HadoopJobError(message + 'Also, could not fetch output from tasks.', out, err)
else:
raise HadoopJobError(message + 'Output from tasks below:\n%s' % task_failures, out, err)
if tracking_url_callback is None:
tracking_url_callback = lambda x: None
return track_process(arglist, tracking_url_callback, env)
def fetch_task_failures(tracking_url):
''' Uses mechanize to fetch the actual task logs from the task tracker.
This is highly opportunistic, and we might not succeed. So we set a low timeout and hope it works.
If it does not, it's not the end of the world.
TODO: Yarn has a REST API that we should probably use instead:
http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/MapredAppMasterRest.html
'''
import mechanize
timeout = 3.0
failures_url = tracking_url.replace('jobdetails.jsp', 'jobfailures.jsp') + '&cause=failed'
logger.debug('Fetching data from %s', failures_url)
b = mechanize.Browser()
b.open(failures_url, timeout=timeout)
links = list(b.links(text_regex='Last 4KB')) # For some reason text_regex='All' doesn't work... no idea why
links = random.sample(links, min(10, len(links))) # Fetch a random subset of all failed tasks, so not to be biased towards the early fails
error_text = []
for link in links:
task_url = link.url.replace('&start=-4097', '&start=-100000') # Increase the offset
logger.debug('Fetching data from %s', task_url)
b2 = mechanize.Browser()
try:
r = b2.open(task_url, timeout=timeout)
data = r.read()
except Exception, e:
logger.debug('Error fetching data from %s: %s', task_url, e)
continue
# Try to get the hex-encoded traceback back from the output
for exc in re.findall(r'luigi-exc-hex=[0-9a-f]+', data):
error_text.append('---------- %s:' % task_url)
error_text.append(exc.split('=')[-1].decode('hex'))
return '\n'.join(error_text)
class JobRunner(object):
run_job = NotImplemented
class HadoopJobRunner(JobRunner):
''' Takes care of uploading & executing a Hadoop job using Hadoop streaming
TODO: add code to support Elastic Mapreduce (using boto) and local execution.
'''
def __init__(self, streaming_jar, modules=[], streaming_args=[], libjars=[], libjars_in_hdfs=[], jobconfs={}, input_format=None, output_format=None):
self.streaming_jar = streaming_jar
self.modules = modules
self.streaming_args = streaming_args
self.libjars = libjars
self.libjars_in_hdfs = libjars_in_hdfs
self.jobconfs = jobconfs
self.input_format = input_format
self.output_format = output_format
self.tmp_dir = False
def run_job(self, job):
packages = [luigi] + self.modules + job.extra_modules() + list(_attached_packages)
# find the module containing the job
packages.append(__import__(job.__module__, None, None, 'dummy'))
# find the path to out runner.py
runner_path = mrrunner.__file__
# assume source is next to compiled
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
base_tmp_dir = configuration.get_config().get('core', 'tmp-dir', None)
if base_tmp_dir:
warnings.warn("The core.tmp-dir configuration item is"\
" deprecated, please use the TMPDIR"\
" environment variable if you wish"\
" to control where luigi.hadoop may"\
" create temporary files and directories.")
self.tmp_dir = os.path.join(base_tmp_dir, 'hadoop_job_%016x' % random.getrandbits(64))
os.makedirs(self.tmp_dir)
else:
self.tmp_dir = tempfile.mkdtemp()
logger.debug("Tmp dir: %s", self.tmp_dir)
# build arguments
config = configuration.get_config()
python_executable = config.get('hadoop', 'python-executable', 'python')
map_cmd = '{0} mrrunner.py map'.format(python_executable)
cmb_cmd = '{0} mrrunner.py combiner'.format(python_executable)
red_cmd = '{0} mrrunner.py reduce'.format(python_executable)
# replace output with a temporary work directory
output_final = job.output().path
output_tmp_fn = output_final + '-temp-' + datetime.datetime.now().isoformat().replace(':', '-')
tmp_target = luigi.hdfs.HdfsTarget(output_tmp_fn)
arglist = luigi.hdfs.load_hadoop_cmd() + ['jar', self.streaming_jar]
# 'libjars' is a generic option, so place it first
libjars = [libjar for libjar in self.libjars]
for libjar in self.libjars_in_hdfs:
subprocess.call(luigi.hdfs.load_hadoop_cmd() + ['fs', '-get', libjar, self.tmp_dir])
libjars.append(os.path.join(self.tmp_dir, os.path.basename(libjar)))
if libjars:
arglist += ['-libjars', ','.join(libjars)]
# Add static files and directories
extra_files = get_extra_files(job.extra_files())
files = []
for src, dst in extra_files:
dst_tmp = '%s_%09d' % (dst.replace('/', '_'), random.randint(0, 999999999))
files += ['%s#%s' % (src, dst_tmp)]
# -files doesn't support subdirectories, so we need to create the dst_tmp -> dst manually
job._add_link(dst_tmp, dst)
if files:
arglist += ['-files', ','.join(files)]
jobconfs = job.jobconfs()
for k, v in self.jobconfs.iteritems():
jobconfs.append('%s=%s' % (k, v))
for conf in jobconfs:
arglist += ['-D', conf]
arglist += self.streaming_args
arglist += ['-mapper', map_cmd]
if job.combiner != NotImplemented:
arglist += ['-combiner', cmb_cmd]
if job.reducer != NotImplemented:
arglist += ['-reducer', red_cmd]
files = [runner_path, self.tmp_dir + '/packages.tar', self.tmp_dir + '/job-instance.pickle']
for f in files:
arglist += ['-file', f]
if self.output_format:
arglist += ['-outputformat', self.output_format]
if self.input_format:
arglist += ['-inputformat', self.input_format]
for target in luigi.task.flatten(job.input_hadoop()):
assert isinstance(target, luigi.hdfs.HdfsTarget)
arglist += ['-input', target.path]
assert isinstance(job.output(), luigi.hdfs.HdfsTarget)
arglist += ['-output', output_tmp_fn]
# submit job
create_packages_archive(packages, self.tmp_dir + '/packages.tar')
job._dump(self.tmp_dir)
run_and_track_hadoop_job(arglist)
tmp_target.move_dir(output_final)
self.finish()
def finish(self):
# FIXME: check for isdir?
if self.tmp_dir and os.path.exists(self.tmp_dir):
logger.debug('Removing directory %s', self.tmp_dir)
shutil.rmtree(self.tmp_dir)
def __del__(self):
self.finish()
class DefaultHadoopJobRunner(HadoopJobRunner):
''' The default job runner just reads from config and sets stuff '''
def __init__(self):
config = configuration.get_config()
streaming_jar = config.get('hadoop', 'streaming-jar')
super(DefaultHadoopJobRunner, self).__init__(streaming_jar=streaming_jar)
# TODO: add more configurable options
class LocalJobRunner(JobRunner):
''' Will run the job locally
This is useful for debugging and also unit testing. Tries to mimic Hadoop Streaming.
TODO: integrate with JobTask
'''
def __init__(self, samplelines=None):
self.samplelines = samplelines
def sample(self, input, n, output):
for i, line in enumerate(input):
if n is not None and i >= n:
break
output.write(line)
def group(self, input):
output = StringIO.StringIO()
lines = []
for i, line in enumerate(input):
parts = line.rstrip('\n').split('\t')
blob = md5(str(i)).hexdigest() # pseudo-random blob to make sure the input isn't sorted
lines.append((parts[:-1], blob, line))
for k, _, line in sorted(lines):
output.write(line)
output.seek(0)
return output
def run_job(self, job):
map_input = StringIO.StringIO()
for i in luigi.task.flatten(job.input_hadoop()):
self.sample(i.open('r'), self.samplelines, map_input)
map_input.seek(0)
if job.reducer == NotImplemented:
# Map only job; no combiner, no reducer
map_output = job.output().open('w')
job._run_mapper(map_input, map_output)
map_output.close()
return
job.init_mapper()
# run job now...
map_output = StringIO.StringIO()
job._run_mapper(map_input, map_output)
map_output.seek(0)
if job.combiner == NotImplemented:
reduce_input = self.group(map_output)
else:
combine_input = self.group(map_output)
combine_output = StringIO.StringIO()
job._run_combiner(combine_input, combine_output)
combine_output.seek(0)
reduce_input = self.group(combine_output)
job.init_reducer()
reduce_output = job.output().open('w')
job._run_reducer(reduce_input, reduce_output)
reduce_output.close()
class BaseHadoopJobTask(luigi.Task):
pool = luigi.Parameter(is_global=True, default=None, significant=False)
# This value can be set to change the default batching increment. Default is 1 for backwards compatibility.
batch_counter_default = 1
final_mapper = NotImplemented
final_combiner = NotImplemented
final_reducer = NotImplemented
mr_priority = NotImplemented
_counter_dict = {}
task_id = None
def jobconfs(self):
jcs = []
jcs.append('mapred.job.name=%s' % self.task_id)
if self.mr_priority != NotImplemented:
jcs.append('mapred.job.priority=%s' % self.mr_priority())
pool = self.pool
if pool is not None:
# Supporting two schedulers: fair (default) and capacity using the same option
scheduler_type = configuration.get_config().get('hadoop', 'scheduler', 'fair')
if scheduler_type == 'fair':
jcs.append('mapred.fairscheduler.pool=%s' % pool)
elif scheduler_type == 'capacity':
jcs.append('mapred.job.queue.name=%s' % pool)
return jcs
def init_local(self):
''' Implement any work to setup any internal datastructure etc here.
You can add extra input using the requires_local/input_local methods.
Anything you set on the object will be pickled and available on the Hadoop nodes.
'''
pass
def init_hadoop(self):
pass
def run(self):
self.init_local()
self.job_runner().run_job(self)
def requires_local(self):
''' Default impl - override this method if you need any local input to be accessible in init() '''
return []
def requires_hadoop(self):
return self.requires() # default impl
def input_local(self):
return luigi.task.getpaths(self.requires_local())
def input_hadoop(self):
return luigi.task.getpaths(self.requires_hadoop())
def deps(self):
# Overrides the default implementation
return luigi.task.flatten(self.requires_hadoop()) + luigi.task.flatten(self.requires_local())
def on_failure(self, exception):
if isinstance(exception, HadoopJobError):
return """Hadoop job failed with message: {message}
stdout:
{stdout}
stderr:
{stderr}
""".format(message=exception.message, stdout=exception.out, stderr=exception.err)
else:
return super(BaseHadoopJobTask, self).on_failure(exception)
class JobTask(BaseHadoopJobTask):
n_reduce_tasks = 25
reducer = NotImplemented
def jobconfs(self):
jcs = super(JobTask, self).jobconfs()
if self.reducer == NotImplemented:
jcs.append('mapred.reduce.tasks=0')
else:
jcs.append('mapred.reduce.tasks=%s' % self.n_reduce_tasks)
return jcs
def init_mapper(self):
pass
def init_combiner(self):
pass
def init_reducer(self):
pass
def _setup_remote(self):
self._setup_links()
def job_runner(self):
# We recommend that you define a subclass, override this method and set up your own config
""" Get the MapReduce runner for this job
If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used. Otherwise, the LocalJobRunner which streams all data through the local machine will be used (great for testing).
"""
outputs = luigi.task.flatten(self.output())
for output in outputs:
if not isinstance(output, luigi.hdfs.HdfsTarget):
warnings.warn("Job is using one or more non-HdfsTarget outputs" +
" so it will be run in local mode")
return LocalJobRunner()
else:
return DefaultHadoopJobRunner()
def reader(self, input_stream):
"""Reader is a method which iterates over input lines and outputs records.
The default implementation yields one argument containing the line for each line in the input."""
for line in input_stream:
yield line,
def writer(self, outputs, stdout, stderr=sys.stderr):
"""Writer format is a method which iterates over the output records from the reducer and formats
them for output.
The default implementation outputs tab separated items"""
for output in outputs:
try:
print >> stdout, "\t".join(map(str, flatten(output)))
except:
print >> stderr, output
raise
def mapper(self, item):
"""Re-define to process an input item (usually a line of input data)
Defaults to identity mapper that sends all lines to the same reducer"""
yield None, item
combiner = NotImplemented
def incr_counter(self, *args, **kwargs):
""" Increments a Hadoop counter
Since counters can be a bit slow to update, this batches the updates.
"""
threshold = kwargs.get("threshold", self.batch_counter_default)
if len(args) == 2:
# backwards compatibility with existing hadoop jobs
group_name, count = args
key = (group_name,)
else:
group, name, count = args
key = (group, name)
ct = self._counter_dict.get(key, 0)
ct += count
if ct >= threshold:
new_arg = list(key)+[ct]
self._incr_counter(*new_arg)
ct = 0
self._counter_dict[key] = ct
def _flush_batch_incr_counter(self):
""" Increments any unflushed counter values
"""
for key, count in self._counter_dict.iteritems():
if count == 0:
continue
args = list(key) + [count]
self._incr_counter(*args)
def _incr_counter(self, *args):
""" Increments a Hadoop counter
Note that this seems to be a bit slow, ~1 ms. Don't overuse this function by updating very frequently.
"""
if len(args) == 2:
# backwards compatibility with existing hadoop jobs
group_name, count = args
print >> sys.stderr, 'reporter:counter:%s,%s' % (group_name, count)
else:
group, name, count = args
print >> sys.stderr, 'reporter:counter:%s,%s,%s' % (group, name, count)
def extra_modules(self):
return [] # can be overridden in subclass
def extra_files(self):
'''
Can be overriden in subclass. Each element is either a string, or a pair of two strings (src, dst).
src can be a directory (in which case everything will be copied recursively).
dst can include subdirectories (foo/bar/baz.txt etc)
Uses Hadoop's -files option so that the same file is reused across tasks.
'''
return []
def _add_link(self, src, dst):
if not hasattr(self, '_links'):
self._links = []
self._links.append((src, dst))
def _setup_links(self):
if hasattr(self, '_links'):
missing = []
for src, dst in self._links:
d = os.path.dirname(dst)
if d and not os.path.exists(d):
os.makedirs(d)
if not os.path.exists(src):
missing.append(src)
continue
if not os.path.exists(dst):
# If the combiner runs, the file might already exist,
# so no reason to create the link again
os.link(src, dst)
if missing:
raise HadoopJobError(
'Missing files for distributed cache: ' +
', '.join(missing))
def _dump(self, dir=''):
"""Dump instance to file."""
file_name = os.path.join(dir, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace('(c__main__', "(c" + module_name)
open(file_name, "w").write(d)
else:
pickle.dump(self, open(file_name, "w"))
def _map_input(self, input_stream):
"""Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run, the arguments will be splitted in key and value."""
for record in self.reader(input_stream):
for output in self.mapper(*record):
yield output
if self.final_mapper != NotImplemented:
for output in self.final_mapper():
yield output
self._flush_batch_incr_counter()
def _reduce_input(self, inputs, reducer, final=NotImplemented):
"""Iterate over input, collect values with the same key, and call the reducer for each uniqe key."""
for key, values in groupby(inputs, key=lambda x: repr(x[0])):
for output in reducer(eval(key), (v[1] for v in values)):
yield output
if final != NotImplemented:
for output in final():
yield output
self._flush_batch_incr_counter()
def _run_mapper(self, stdin=sys.stdin, stdout=sys.stdout):
"""Run the mapper on the hadoop node."""
self.init_hadoop()
self.init_mapper()
outputs = self._map_input((line[:-1] for line in stdin))
if self.reducer == NotImplemented:
self.writer(outputs, stdout)
else:
self.internal_writer(outputs, stdout)
def _run_reducer(self, stdin=sys.stdin, stdout=sys.stdout):
"""Run the reducer on the hadoop node."""
self.init_hadoop()
self.init_reducer()
outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.reducer, self.final_reducer)
self.writer(outputs, stdout)
def _run_combiner(self, stdin=sys.stdin, stdout=sys.stdout):
self.init_hadoop()
self.init_combiner()
outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.combiner, self.final_combiner)
self.internal_writer(outputs, stdout)
def internal_reader(self, input_stream):
"""Reader which uses python eval on each part of a tab separated string.
Yields a tuple of python objects."""
for input in input_stream:
yield map(eval, input.split("\t"))
def internal_writer(self, outputs, stdout):
"""Writer which outputs the python repr for each item"""
for output in outputs:
print >> stdout, "\t".join(map(repr, output))
def pickle_reader(job, input_stream):
def decode(item):
return pickle.loads(binascii.a2b_base64(item))
for line in input_stream:
items = line.split('\t')
yield map(decode, items)
def pickle_writer(job, outputs, stdout):
def encode(item):
return binascii.b2a_base64(pickle.dumps(item))[:-1] # remove trailing newline
for keyval in outputs:
print >> stdout, "\t".join(map(encode, keyval))
| 37.043269
| 193
| 0.606587
|
b8b979bb7ad11dd4bfe266301a21e89e2cb7b90e
| 979
|
py
|
Python
|
Part 3/Chapter 07/Exercises/exercise_53.py
|
phuycke/Practice-of-computing-using-Python
|
9e477bcaecb0e447dfa7184d2071ca338801c86f
|
[
"MIT"
] | 1
|
2019-08-13T11:12:59.000Z
|
2019-08-13T11:12:59.000Z
|
Part 3/Chapter 07/Exercises/exercise_53.py
|
phuycke/Practice-of-computing-using-Python
|
9e477bcaecb0e447dfa7184d2071ca338801c86f
|
[
"MIT"
] | null | null | null |
Part 3/Chapter 07/Exercises/exercise_53.py
|
phuycke/Practice-of-computing-using-Python
|
9e477bcaecb0e447dfa7184d2071ca338801c86f
|
[
"MIT"
] | 1
|
2021-05-16T11:42:19.000Z
|
2021-05-16T11:42:19.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Pieter Huycke
email: pieter.huycke@ugent.be
GitHub: phuycke
"""
#%%
def hole_counter(string = str):
holes = 0
for letter in string:
if letter in ["a", "A" "b", "B", "d", "D" "e", "g", "o", "O" "p", "P", "q", "Q"]:
holes += 1
return {"Holes counted": holes,
"No holes": len(string) - holes}
res = hole_counter(string = "right")
print(res)
#%%
def words_with_holes(string = str):
word_list = string.split(" ")
word_count = 0
for words in word_list:
local_count = 0
for letter in words:
if letter in ["a", "A" "b", "B", "d", "D", "e", "g", "o", "O" "p", "P", "q", "Q"]:
local_count += 1
if local_count >= 2:
word_count += 1
break
return {"Words with 2 or more holes": word_count}
res = words_with_holes(string = "I see I need to go")
print(res)
| 21.282609
| 94
| 0.501532
|
341bb3eb29d46af5ff438987f7f34718f837653a
| 9,236
|
py
|
Python
|
Source/boost_1_33_1/libs/compatibility/generate_cpp_c_headers.py
|
spxuw/RFIM
|
32b78fbb90c7008b1106b0cff4f8023ae83c9b6d
|
[
"MIT"
] | 4
|
2021-07-31T13:56:01.000Z
|
2021-11-13T02:55:10.000Z
|
Source/boost_1_33_1/libs/compatibility/generate_cpp_c_headers.py
|
spxuw/RFIM
|
32b78fbb90c7008b1106b0cff4f8023ae83c9b6d
|
[
"MIT"
] | null | null | null |
Source/boost_1_33_1/libs/compatibility/generate_cpp_c_headers.py
|
spxuw/RFIM
|
32b78fbb90c7008b1106b0cff4f8023ae83c9b6d
|
[
"MIT"
] | 7
|
2021-08-31T14:34:23.000Z
|
2022-01-19T08:25:58.000Z
|
# This Python script creates a full set of C++ C header files that
# are missing on some platforms.
#
# Usage:
# mkdir cpp_c_headers
# cd cpp_c_headers
# python generate_cpp_c_headers.py
#
# The files created by this script are in the directory:
# root/boost/compatibility/cpp_c_headers
#
# Supported platforms:
# Compaq Alpha, RedHat 6.2 Linux, Compaq C++ V6.3 (cxx)
# Compaq Alpha, Tru64 Unix V5.0, Compaq C++ V6.2 (cxx)
# Silicon Graphics, IRIX 6.5, MIPSpro Compilers: Version 7.3.1.1m (CC)
#
# Support for additional platforms can be added by extending the
# "defines" Python dictionary below.
#
# Python is available at:
# http://www.python.org/
#
# Copyright (c) 2001 Ralf W. Grosse-Kunstleve. Permission to copy,
# use, modify, sell and distribute this script is granted provided this
# copyright notice appears in all copies. This document is provided "as
# is" without express or implied warranty, and with no claim as to its
# suitability for any purpose.
#
# Revision history:
# 16 Apr 01 moved to boost CVS tree (R.W. Grosse-Kunstleve)
# 17 Jan 01 Alpha Linux cxx V6.3 support (R.W. Grosse-Kunstleve)
# 15 Dec 00 posted to boost e-group file upload area (R.W. Grosse-Kunstleve)
# Definition of platform specific exclusion of identifiers.
defines = {
'defined(__sgi) && defined(_COMPILER_VERSION) && _COMPILER_VERSION <= 740': (
'btowc', 'fwide', 'fwprintf', 'fwscanf', 'mbrlen', 'mbrtowc',
'mbsinit', 'mbsrtowcs', 'swprintf', 'swscanf', 'towctrans', 'vfwprintf',
'vswprintf', 'vwprintf', 'wcrtomb', 'wcsrtombs', 'wctob', 'wctrans',
'wctrans_t', 'wmemchr', 'wmemcmp', 'wmemcpy', 'wmemmove', 'wmemset',
'wprintf', 'wscanf',
),
'defined(__DECCXX_VER) && __DECCXX_VER <= 60290024': (
'fwide',
),
'defined(__linux) && defined(__DECCXX_VER) && __DECCXX_VER <= 60390005': (
'getwchar', 'ungetwc', 'fgetwc', 'vfwprintf', 'fgetws', 'vswprintf',
'wcsftime', 'fputwc', 'vwprintf', 'fputws', 'fwide', 'putwc',
'wprintf', 'fwprintf', 'putwchar', 'wscanf', 'fwscanf', 'swprintf',
'getwc', 'swscanf',
),
}
# The information below was copied directly from the file:
# ISO+IEC+14882-1998.pdf
# The exact source of the information is given in the format
# PDF #, p. #, Table #
# Where
# PDF # = page number as shown by the Acrobat Reader
# p. # = page number printed at the bottom of the page
# Table # = number printed in caption of table
hfiles = {
'cassert': ( # PDF 378, p. 352, Table 25
# Macro: assert
),
'cctype': ( # PDF 431, p. 405, Table 45
# Functions:
'isalnum', 'isdigit', 'isprint', 'isupper', 'tolower',
'isalpha', 'isgraph', 'ispunct', 'isxdigit', 'toupper',
'iscntrl', 'islower', 'isspace',
),
'cerrno': ( # PDF 378, p. 352, Table 26
# Macros: EDOM ERANGE errno
),
'cfloat': ( # PDF 361, p. 335, Table 17
# Macros: DBL_DIG DBL_MIN_EXP FLT_MIN_10_EXP LDBL_MAX_10_EXP
# DBL_EPSILON FLT_DIG FLT_MIN_EXP LDBL_MAX_EXP
# DBL_MANT_DIG FLT_EPSILON FLT_RADIX LDBL_MIN
# DBL_MAX FLT_MANT_DIG FLT_ROUNDS LDBL_MIN_10_EXP
# DBL_MAX_10_EXP FLT_MAX LDBL_DIG LDBL_MIN_EXP
# DBL_MAX_EXP FLT_MAX_10_EXP LDBL_EPSILON
# DBL_MIN FLT_MAX_EXP LDBL_MANT_DIG
# DBL_MIN_10_EXP FLT_MIN LDBL_MAX
),
#'ciso646': (
#),
'climits': ( # PDF 361, p. 335, Table 16
# Macros: CHAR_BIT INT_MAX LONG_MIN SCHAR_MIN UCHAR_MAX USHRT_MAX
# CHAR_MAX INT_MIN MB_LEN_MAX SHRT_MAX UINT_MAX
# CHAR_MIN LONG_MAX SCHAR_MAX SHRT_MIN ULONG_MAX
),
'clocale': ( # PDF 483, p. 457, Table 62
# Macros: LC_ALL LC_COLLATE LC_CTYPE
# LC_MONETARY LC_NUMERIC LC_TIME
# NULL
# Struct:
'lconv',
# Functions:
'localeconv', 'setlocale',
),
'cmath': ( # PDF 622, p. 596, Table 80
# Macro: HUGE_VAL
# Functions:
'acos', 'cos', 'fmod', 'modf', 'tan',
'asin', 'cosh', 'frexp', 'pow', 'tanh',
'atan', 'exp', 'ldexp', 'sin',
'atan2', 'fabs', 'log', 'sinh',
'ceil', 'floor', 'log10', 'sqrt',
),
'csetjmp': ( # PDF 372, p. 346, Table 20
# Macro: setjmp
# Type:
'jmp_buf',
# Function:
'longjmp',
),
'csignal': ( # PDF 372, p. 346, Table 22
# Macros: SIGABRT SIGILL SIGSEGV SIG_DFL
# SIG_IGN SIGFPE SIGINT SIGTERM SIG_ERR
# Type:
'sig_atomic_t',
# Functions:
'raise', 'signal',
),
'cstdarg': ( # PDF 372, p. 346, Table 19
# Macros: va_arg va_end va_start
# Type:
'va_list',
),
'cstddef': ( # PDF 353, p. 327, Table 15
# Macros: NULL offsetof
# Types:
'ptrdiff_t', 'size_t',
),
'cstdio': ( # PDF 692, p. 666, Table 94
# Macros: BUFSIZ FOPEN_MAX SEEK_CUR TMP_MAX _IONBF stdout
# EOF L_tmpnam SEEK_END _IOFBF stderr
# FILENAME_MAX NULL <cstdio> SEEK_SET _IOLBF stdin
# Types:
'FILE', 'fpos_t', 'size_t',
# Functions:
'clearerr', 'fgets', 'fscanf', 'gets', 'rename', 'tmpfile',
'fclose', 'fopen', 'fseek', 'perror', 'rewind', 'tmpnam',
'feof', 'fprintf', 'fsetpos', 'printf', 'scanf', 'ungetc',
'ferror', 'fputc', 'ftell', 'putc', 'setbuf', 'vfprintf',
'fflush', 'fputs', 'fwrite', 'putchar', 'setvbuf', 'vprintf',
'fgetc', 'fread', 'getc', 'puts', 'sprintf', 'vsprintf',
'fgetpos', 'freopen', 'getchar', 'remove', 'sscanf',
),
'cstdlib': ( # PDF 362, p. 336, Table 18
# Macros: EXIT_FAILURE EXIT_SUCCESS
# Functions:
'abort', 'atexit', 'exit',
# PDF 373, p. 347, Table 23
# Functions:
'getenv', 'system',
# PDF 400, p. 374, Table 33
# Functions:
'calloc', 'malloc',
'free', 'realloc',
# PDF 433, p. 417, Table 49
# Macros: MB_CUR_MAX
# Functions:
'atol', 'mblen', 'strtod', 'wctomb',
'atof', 'mbstowcs', 'strtol', 'wcstombs',
'atoi', 'mbtowc', 'strtoul',
# PDF 589, p. 563, Table 78
# Functions:
'bsearch', 'qsort',
# PDF 622, p. 596, Table 81
# Macros: RAND_MAX
# Types:
'div_t', 'ldiv_t',
# Functions:
'abs', 'labs', 'srand',
'div', 'ldiv', 'rand',
),
'cstring': ( # PDF 401, p. 375, Table 34
# Macro: NULL
# Type: size_t
# Functions:
# 'memchr', 'memcmp',
# 'memcpy', 'memmove', 'memset',
# PDF 432, p. 406, Table 47
# Macro: NULL
# Type:
'size_t',
# Functions:
'memchr', 'strcat', 'strcspn', 'strncpy', 'strtok',
'memcmp', 'strchr', 'strerror', 'strpbrk', 'strxfrm',
'memcpy', 'strcmp', 'strlen', 'strrchr',
'memmove', 'strcoll', 'strncat', 'strspn',
'memset', 'strcpy', 'strncmp', 'strstr',
),
'ctime': ( # PDF 372, p. 346, Table 21
# Macros: CLOCKS_PER_SEC
# Types:
# 'clock_t',
# Functions:
# 'clock',
# PDF 401, p. 375, Table 35
# Macros: NULL
# Types:
'size_t', 'clock_t', 'time_t',
# Struct:
'tm',
# Functions:
'asctime', 'clock', 'difftime', 'localtime', 'strftime',
'ctime', 'gmtime', 'mktime', 'time',
),
'cwchar': ( # PDF 432, p. 406, Table 48
# Macros: NULL WCHAR_MAX WCHAR_MIN WEOF
# Types:
'mbstate_t', 'wint_t', 'size_t',
# Functions:
'btowc', 'getwchar', 'ungetwc', 'wcscpy', 'wcsrtombs', 'wmemchr',
'fgetwc', 'mbrlen', 'vfwprintf', 'wcscspn', 'wcsspn', 'wmemcmp',
'fgetws', 'mbrtowc', 'vswprintf', 'wcsftime', 'wcsstr', 'wmemcpy',
'fputwc', 'mbsinit', 'vwprintf', 'wcslen', 'wcstod', 'wmemmove',
'fputws', 'mbsrtowcs', 'wcrtomb', 'wcsncat', 'wcstok', 'wmemset',
'fwide', 'putwc', 'wcscat', 'wcsncmp', 'wcstol', 'wprintf',
'fwprintf', 'putwchar', 'wcschr', 'wcsncpy', 'wcstoul', 'wscanf',
'fwscanf', 'swprintf', 'wcscmp', 'wcspbrk', 'wcsxfrm',
'getwc', 'swscanf', 'wcscoll', 'wcsrchr', 'wctob',
),
'cwctype': ( # PDF 432, p. 406, Table 46
# Macro: WEOF
# Types:
'wctrans_t', 'wctype_t', 'wint_t',
# Functions:
'iswalnum', 'iswctype', 'iswlower', 'iswspace', 'towctrans', 'wctrans',
'iswalpha', 'iswdigit', 'iswprint', 'iswupper', 'towlower', 'wctype',
'iswcntrl', 'iswgraph', 'iswpunct', 'iswxdigit', 'towupper',
),
}
if (__name__ == "__main__"):
import sys, string, time
now = time.asctime(time.localtime(time.time())) + ' ' + str(time.tzname)
for hfile in hfiles.keys():
HFILE = string.upper(hfile)
f = open(hfile, 'w')
sys.stdout = f
print '// This file is automatically generated. Do not edit.'
print '//', sys.argv
print '//', now
print
print '#ifndef __' + HFILE + '_HEADER'
print '#define __' + HFILE + '_HEADER'
print ''
print '#include <' + hfile[1:] + '.h>'
print ''
if (len(hfiles[hfile]) > 0):
print 'namespace std {'
for s in hfiles[hfile]:
n_endif = 0
for d in defines.keys():
if (s in defines[d]):
print '#if !(' + d + ')'
n_endif = n_endif + 1
print ' using ::' + s + ';'
for i in xrange(n_endif): print '#endif'
print '}'
print ''
print '#endif // ' + HFILE + '_HEADER'
sys.stdout = sys.__stdout__
| 34.207407
| 79
| 0.580771
|
3c6d078fda690a62852c4b6dd4570a0e2716036f
| 432
|
py
|
Python
|
setup.py
|
wnormandin/df-backend
|
94006f66ee16cf932b960751c9d491b39dcff0d0
|
[
"MIT"
] | null | null | null |
setup.py
|
wnormandin/df-backend
|
94006f66ee16cf932b960751c9d491b39dcff0d0
|
[
"MIT"
] | 1
|
2019-08-04T20:38:19.000Z
|
2019-08-04T20:38:19.000Z
|
setup.py
|
wnormandin/df-backend
|
94006f66ee16cf932b960751c9d491b39dcff0d0
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from df_backend import __version__
setup(
name="df_backend",
version=__version__,
install_requires=[
'django',
'django-rest-framework',
'django-waitress'
],
author='William Normandin',
author_email='bill@pokeybill.us',
packages=find_packages(),
license='MIT',
description='Backend API for the DunderFunk game client'
)
| 24
| 60
| 0.666667
|
844ef88e46a93cfeb1e4a8e26d40a9b1f6960df4
| 2,082
|
py
|
Python
|
src/io_graph.py
|
SergeyKuz1001/formal_languages_autumn_2020
|
0cdf5cf16cfe609c88df34b5ee47b4e980f44b69
|
[
"Apache-2.0"
] | null | null | null |
src/io_graph.py
|
SergeyKuz1001/formal_languages_autumn_2020
|
0cdf5cf16cfe609c88df34b5ee47b4e980f44b69
|
[
"Apache-2.0"
] | 5
|
2020-09-09T16:44:54.000Z
|
2020-12-17T12:15:05.000Z
|
src/io_graph.py
|
SergeyKuz1001/formal_languages_autumn_2020
|
0cdf5cf16cfe609c88df34b5ee47b4e980f44b69
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Sergey Kuzivanov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .graph import Graph
from pygraphblas import Matrix, types
from typing import Dict, List, Iterable, Tuple, Optional, Set
Vertex = int
class IOGraph(Graph):
def __init__(self) -> None:
super().__init__()
self._start_Vs: Set[Vertex] = set()
self._final_Vs: Set[Vertex] = set()
self._other_N: Optional[int] = None
def copy(self) -> "IOGraph":
res = super().copy()
res._start_Vs = self._start_Vs.copy()
res._final_Vs = self._final_Vs.copy()
res._other_N = self._other_N
return res
@property
def start_vertexes(self) -> Set[Vertex]:
return self._start_Vs
@property
def final_vertexes(self) -> Set[Vertex]:
return self._final_Vs
def __matmul__(self, other: "IOGraph") -> "IOGraph":
res = super().__matmul__(other)
res._start_Vs = {
self_start_V * other.count_vertexes + other_start_V
for self_start_V in self.start_vertexes
for other_start_V in other.start_vertexes
}
res._final_Vs = {
self_final_V * other.count_vertexes + other_final_V
for self_final_V in self.final_vertexes
for other_final_V in other.final_vertexes
}
res._other_N = other.count_vertexes
return res
def vertex_to_pair(self, vertex: Vertex) -> Tuple[Vertex, Vertex]:
return (vertex // self._other_N, vertex % self._other_N)
| 34.131148
| 75
| 0.652738
|
d12698490fd13d7d8e31b7af51cdab1d6c3a659e
| 2,843
|
py
|
Python
|
tests/python/pants_test/tasks/test_ivy_resolve_integration.py
|
arloherrine/pants
|
5f98f7734590eb21a2992a4c28415f838a2e6927
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/tasks/test_ivy_resolve_integration.py
|
arloherrine/pants
|
5f98f7734590eb21a2992a4c28415f838a2e6927
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/tasks/test_ivy_resolve_integration.py
|
arloherrine/pants
|
5f98f7734590eb21a2992a4c28415f838a2e6927
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class IvyResolveIntegrationTest(PantsRunIntegrationTest):
def test_ivy_resolve_gives_correct_exception_on_cycles(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir([
'compile', 'testprojects/src/java/com/pants/testproject/cycle1'], workdir)
self.assert_failure(pants_run)
self.assertIn('Cycle detected', pants_run.stderr_data)
def test_java_compile_with_ivy_report(self):
# Ensure the ivy report file gets generated
with temporary_dir(root_dir=self.workdir_root()) as workdir:
ivy_report_dir = '{workdir}/ivy-report'.format(workdir=workdir)
pants_run = self.run_pants_with_workdir([
'compile',
'testprojects/src/java/com/pants/testproject/unicode/main',
'--resolve-ivy-report',
'--resolve-ivy-outdir={reportdir}'.format(reportdir=ivy_report_dir)],
workdir)
self.assert_success(pants_run)
# Find the ivy report
found = False
pattern = re.compile('internal-[a-f0-9]+-default\.html$')
for f in os.listdir(ivy_report_dir):
if os.path.isfile(os.path.join(ivy_report_dir, f)):
if pattern.match(f):
found = True
break
self.assertTrue(found,
msg="Couldn't find ivy report in {report_dir}"
.format(report_dir=ivy_report_dir))
def test_ivy_args(self):
pants_run = self.run_pants([
'resolve',
'--resolve-ivy-args=-blablabla',
'examples/src/scala::'
])
self.assert_failure(pants_run)
self.assertIn('Unrecognized option: -blablabla', pants_run.stdout_data)
def test_ivy_confs_success(self):
pants_run = self.run_pants([
'resolve',
'--resolve-ivy-confs=default',
'--resolve-ivy-confs=sources',
'--resolve-ivy-confs=javadoc',
'3rdparty:junit'
])
self.assert_success(pants_run)
def test_ivy_confs_failure(self):
pants_run = self.run_pants([
'resolve',
'--resolve-ivy-confs=parampampam',
'3rdparty:junit'
])
self.assert_failure(pants_run)
def test_ivy_confs_ini_failure(self):
pants_ini_config = {'resolve.ivy': {'confs': 'parampampam'}}
pants_run = self.run_pants([
'resolve',
'3rdparty:junit'
], config=pants_ini_config)
self.assert_failure(pants_run)
| 34.670732
| 93
| 0.674991
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.