hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ada4fe020a9277ea274b6aa23f3bc9a49595bbe7 | 430 | py | Python | angr/procedures/libc/tolower.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 2 | 2018-12-03T23:14:56.000Z | 2018-12-03T23:15:57.000Z | angr/procedures/libc/tolower.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/libc/tolower.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 1 | 2019-08-07T01:42:01.000Z | 2019-08-07T01:42:01.000Z | import angr
from angr.sim_type import SimTypeInt
import logging
l = logging.getLogger("angr.procedures.libc.tolower")
class tolower(angr.SimProcedure):
def run(self, c):
self.argument_types = {0: SimTypeInt(self.state.arch, True)}
self.return_type = SimTypeInt(self.state.arch, True)
return self.state.solver.If(
self.state.solver.And(c >= 65, c <= 90), # A - Z
c + 32, c)
| 26.875 | 68 | 0.644186 | 308 | 0.716279 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.086047 |
ada57bfdb85d2618a08b453ff23946dc9309abc3 | 54,652 | py | Python | tests/main/views/test_agreements.py | uk-gov-mirror/alphagov.digitalmarketplace-api | 5a1db63691d0c4a435714837196ab6914badaf62 | [
"MIT"
] | 25 | 2015-01-14T10:45:13.000Z | 2021-05-26T17:21:41.000Z | tests/main/views/test_agreements.py | uk-gov-mirror/alphagov.digitalmarketplace-api | 5a1db63691d0c4a435714837196ab6914badaf62 | [
"MIT"
] | 641 | 2015-01-15T11:10:50.000Z | 2021-06-15T22:18:42.000Z | tests/main/views/test_agreements.py | uk-gov-mirror/alphagov.digitalmarketplace-api | 5a1db63691d0c4a435714837196ab6914badaf62 | [
"MIT"
] | 22 | 2015-06-13T15:37:45.000Z | 2021-08-19T23:40:49.000Z | import json
from datetime import datetime
from freezegun import freeze_time
from app.models import AuditEvent, db, Framework, FrameworkAgreement, User
from tests.helpers import fixture_params
from tests.bases import BaseApplicationTest
class BaseFrameworkAgreementTest(BaseApplicationTest):
def create_agreement(self, supplier_framework, **framework_agreement_kwargs):
framework = Framework.query.filter(Framework.slug == supplier_framework['frameworkSlug']).first()
agreement = FrameworkAgreement(
supplier_id=supplier_framework['supplierId'],
framework_id=framework.id,
**framework_agreement_kwargs)
db.session.add(agreement)
db.session.commit()
return agreement.id
class TestCreateFrameworkAgreement(BaseApplicationTest):
def post_create_agreement(self, supplier_id=None, framework_slug=None):
agreement_data = {}
if supplier_id:
agreement_data['supplierId'] = supplier_id
if framework_slug:
agreement_data['frameworkSlug'] = framework_slug
return self.client.post(
'/agreements',
data=json.dumps(
{
'updated_by': 'interested@example.com',
'agreement': agreement_data
}),
content_type='application/json')
def test_can_create_framework_agreement(self, supplier_framework):
res = self.post_create_agreement(
supplier_id=supplier_framework['supplierId'],
framework_slug=supplier_framework['frameworkSlug']
)
assert res.status_code == 201
res_agreement_json = json.loads(res.get_data(as_text=True))['agreement']
assert res_agreement_json['id'] > 0
assert res_agreement_json['supplierId'] == supplier_framework['supplierId']
assert res_agreement_json['frameworkSlug'] == supplier_framework['frameworkSlug']
assert res_agreement_json['status'] == 'draft'
res2 = self.client.get('/agreements/{}'.format(res_agreement_json['id']))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == res_agreement_json
def test_create_framework_agreement_makes_an_audit_event(self, supplier_framework):
res = self.post_create_agreement(
supplier_id=supplier_framework['supplierId'],
framework_slug=supplier_framework['frameworkSlug']
)
assert res.status_code == 201
agreement_id = json.loads(res.get_data(as_text=True))['agreement']['id']
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).first()
assert audit.type == "create_agreement"
assert audit.user == "interested@example.com"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug']
}
def test_404_if_creating_framework_agreement_with_no_supplier_framework(self, supplier_framework):
res = self.post_create_agreement(
supplier_id=7,
framework_slug='dos'
)
assert res.status_code == 404
assert json.loads(res.get_data(as_text=True))['error'] == "supplier_id '7' is not on framework 'dos'"
@fixture_params('supplier_framework', {'on_framework': False})
def test_404_if_creating_framework_agreement_with_supplier_framework_not_on_framework(self, supplier_framework):
res = self.post_create_agreement(
supplier_id=supplier_framework['supplierId'],
framework_slug=supplier_framework['frameworkSlug']
)
assert res.status_code == 404
assert (
json.loads(res.get_data(as_text=True))['error'] ==
"supplier_id '{}' is not on framework '{}'".format(
supplier_framework['supplierId'], supplier_framework['frameworkSlug']
)
)
def test_can_not_create_framework_agreement_if_no_supplier_id_provided(self, supplier_framework):
res = self.post_create_agreement(
framework_slug=supplier_framework['frameworkSlug']
)
assert res.status_code == 400
assert (
json.loads(res.get_data(as_text=True))['error'] ==
"Invalid JSON must have 'supplierId' keys"
)
def test_can_not_create_framework_agreement_if_no_framework_slug_provided(self, supplier_framework):
res = self.post_create_agreement(
supplier_id=supplier_framework['supplierId']
)
assert res.status_code == 400
assert (
json.loads(res.get_data(as_text=True))['error'] ==
"Invalid JSON must have 'frameworkSlug' keys"
)
class TestGetFrameworkAgreement(BaseFrameworkAgreementTest):
def test_it_gets_a_newly_created_framework_agreement_by_id(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework
)
res = self.client.get('/agreements/{}'.format(agreement_id))
assert res.status_code == 200
assert json.loads(res.get_data(as_text=True))['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'draft'
}
def test_it_returns_a_framework_agreement_with_details_only(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'details': 'here'}
)
res = self.client.get('/agreements/{}'.format(agreement_id))
assert res.status_code == 200
assert json.loads(res.get_data(as_text=True))['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'signedAgreementDetails': {'details': 'here'},
'status': 'draft'
}
def test_it_gets_a_signed_framework_agreement_by_id(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1),
signed_agreement_details={'details': 'here'},
signed_agreement_path='path'
)
res = self.client.get('/agreements/{}'.format(agreement_id))
assert res.status_code == 200
assert json.loads(res.get_data(as_text=True))['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'signed',
'signedAgreementDetails': {'details': 'here'},
'signedAgreementPath': 'path',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
}
def test_it_gets_an_on_hold_framework_agreement_by_id(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1),
signed_agreement_details={'details': 'here'},
signed_agreement_path='path',
signed_agreement_put_on_hold_at=datetime(2016, 11, 1, 1, 1, 1),
)
res = self.client.get('/agreements/{}'.format(agreement_id))
assert res.status_code == 200
assert json.loads(res.get_data(as_text=True))['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'on-hold',
'signedAgreementDetails': {'details': 'here'},
'signedAgreementPath': 'path',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'signedAgreementPutOnHoldAt': '2016-11-01T01:01:01.000000Z',
}
def test_it_gets_an_approved_framework_agreement_by_id(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1),
signed_agreement_details={'details': 'here'},
signed_agreement_path='path',
countersigned_agreement_details={'countersigneddetails': 'here'},
countersigned_agreement_returned_at=datetime(2016, 11, 1, 1, 1, 1),
)
res = self.client.get('/agreements/{}'.format(agreement_id))
assert res.status_code == 200
assert json.loads(res.get_data(as_text=True))['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementDetails': {'details': 'here'},
'signedAgreementPath': 'path',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'countersignedAgreementDetails': {'countersigneddetails': 'here'},
'countersignedAgreementReturnedAt': '2016-11-01T01:01:01.000000Z',
}
def test_it_gets_a_countersigned_framework_agreement_by_id(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1),
signed_agreement_details={'details': 'here'},
signed_agreement_path='path',
countersigned_agreement_details={'countersigneddetails': 'here'},
countersigned_agreement_returned_at=datetime(2016, 11, 1, 1, 1, 1),
countersigned_agreement_path='path'
)
res = self.client.get('/agreements/{}'.format(agreement_id))
assert res.status_code == 200
assert json.loads(res.get_data(as_text=True))['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'countersigned',
'signedAgreementDetails': {'details': 'here'},
'signedAgreementPath': 'path',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'countersignedAgreementDetails': {'countersigneddetails': 'here'},
'countersignedAgreementReturnedAt': '2016-11-01T01:01:01.000000Z',
'countersignedAgreementPath': 'path'
}
def test_it_gets_a_countersigned_and_uploaded_framework_agreement_by_id(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1),
signed_agreement_details={'details': 'here'},
signed_agreement_path='path',
countersigned_agreement_details={'countersigneddetails': 'here'},
countersigned_agreement_returned_at=datetime(2016, 11, 1, 1, 1, 1),
countersigned_agreement_path='/example.pdf'
)
res = self.client.get('/agreements/{}'.format(agreement_id))
assert res.status_code == 200
assert json.loads(res.get_data(as_text=True))['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'countersigned',
'signedAgreementDetails': {'details': 'here'},
'signedAgreementPath': 'path',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'countersignedAgreementDetails': {'countersigneddetails': 'here'},
'countersignedAgreementReturnedAt': '2016-11-01T01:01:01.000000Z',
'countersignedAgreementPath': '/example.pdf'
}
class TestUpdateFrameworkAgreement(BaseFrameworkAgreementTest):
def post_agreement_update(self, agreement_id, agreement):
return self.client.post(
'/agreements/{}'.format(agreement_id),
data=json.dumps(
{
'updated_by': 'interested@example.com',
'agreement': agreement
}),
content_type='application/json')
@fixture_params('live_example_framework', {'framework_agreement_details': None})
def test_cant_set_agreement_details_for_framework_without_agreement_version(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.client.post(
'/agreements/{}'.format(agreement_id),
data=json.dumps(
{
'updated_by': 'interested@example.com',
'agreement': {
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
}
}
}),
content_type='application/json')
assert res.status_code == 400
assert (
json.loads(res.get_data(as_text=True))['error'] ==
"Can not update signedAgreementDetails for a framework agreement without a frameworkAgreementVersion"
)
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_update_framework_agreement_details(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.post_agreement_update(agreement_id, {
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
}
})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
expected_agreement_json = {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'draft',
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
}
}
assert data['agreement'] == expected_agreement_json
res2 = self.client.get('/agreements/{}'.format(agreement_id))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == expected_agreement_json
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_update_signed_agreement_path(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.post_agreement_update(agreement_id, {
'signedAgreementPath': '/example.pdf'
})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
expected_agreement_json = {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'draft',
'signedAgreementPath': '/example.pdf'
}
assert data['agreement'] == expected_agreement_json
res2 = self.client.get('/agreements/{}'.format(agreement_id))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == expected_agreement_json
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_update_signed_agreement_details_and_signed_agreement_path(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.post_agreement_update(agreement_id, {
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
},
'signedAgreementPath': '/example.pdf'
})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
expected_agreement_json = {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'draft',
'signedAgreementPath': '/example.pdf',
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
}
}
assert data['agreement'] == expected_agreement_json
res2 = self.client.get('/agreements/{}'.format(agreement_id))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == expected_agreement_json
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_audit_event_created_when_updating_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.post_agreement_update(agreement_id, {
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
},
'signedAgreementPath': '/example.pdf'
})
assert res.status_code == 200
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).first()
assert audit.type == "update_agreement"
assert audit.user == "interested@example.com"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'update': {
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
},
'signedAgreementPath': '/example.pdf'
}
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_not_set_framework_agreement_version_directly(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.post_agreement_update(agreement_id, {
'frameworkAgreementVersion': 'v23.4'
})
assert res.status_code == 400
assert json.loads(res.get_data(as_text=True)) == {
'error': "Invalid JSON should not have 'frameworkAgreementVersion' keys"
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_agreement_returned_at_timestamp_cannot_be_set(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.post_agreement_update(agreement_id, {
'signedAgreementReturnedAt': '2013-13-13T00:00:00.000000Z'
})
assert res.status_code == 400
assert json.loads(res.get_data(as_text=True)) == {
'error': "Invalid JSON should not have 'signedAgreementReturnedAt' keys"
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_400_cannot_update_signed_agreement(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework, signed_agreement_returned_at=datetime.utcnow())
res = self.post_agreement_update(agreement_id, {
'signedAgreementPath': '/example.pdf'
})
assert res.status_code == 400
assert json.loads(res.get_data(as_text=True)) == {
'error': 'Can not update signedAgreementDetails or signedAgreementPath if agreement has been signed'
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_400_if_unknown_field_present_in_update_json(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.post_agreement_update(agreement_id, {
'signedRandomKey': 'banana'
})
assert res.status_code == 400
assert json.loads(res.get_data(as_text=True)) == {
'error': "Invalid JSON should not have 'signedRandomKey' keys"
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_400_if_unknown_field_present_in_signed_agreement_details(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.post_agreement_update(agreement_id, {
'signedAgreementDetails': {
'signerName': 'name',
'randomKey': 'value',
}
})
assert res.status_code == 400
data = json.loads(res.get_data(as_text=True))
# split assertions into keyphrases due to nested unicode string in python 2
strings_we_expect_in_the_error_message = [
'Additional properties are not allowed', 'randomKey', 'was unexpected']
for error_string in strings_we_expect_in_the_error_message:
assert error_string in data['error']['_form'][0]
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_200_if_signed_agreement_details_is_empty_object(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.post_agreement_update(agreement_id, {'signedAgreementDetails': {}})
assert res.status_code == 200
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_400_if_signed_agreement_details_contains_empty_strings_as_values(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.post_agreement_update(agreement_id, {
'signedAgreementDetails': {
'signerName': '',
'signerRole': '',
}
})
assert res.status_code == 400
assert json.loads(res.get_data(as_text=True)) == {
'error': {'signerName': 'answer_required', 'signerRole': 'answer_required'}
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_update_countersigned_agreement_path_for_framework_with_agreement_version(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={
'signerName': 'name',
'signerRole': 'role',
},
signed_agreement_path='path/file.pdf',
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1),
countersigned_agreement_returned_at=datetime(2016, 11, 1, 1, 1, 1)
)
res = self.post_agreement_update(agreement_id, {
'countersignedAgreementPath': 'countersigned/file.jpg'
})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
expected_agreement_json = {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'countersigned',
'signedAgreementPath': 'path/file.pdf',
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
},
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'countersignedAgreementReturnedAt': '2016-11-01T01:01:01.000000Z',
'countersignedAgreementPath': 'countersigned/file.jpg'
}
assert data['agreement'] == expected_agreement_json
res2 = self.client.get('/agreements/{}'.format(agreement_id))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == expected_agreement_json
@fixture_params('live_example_framework', {'framework_agreement_details': None})
def test_can_update_countersigned_agreement_path_for_framework_without_agreement_version(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_path='path/file.pdf',
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1),
countersigned_agreement_returned_at=datetime(2016, 11, 1, 1, 1, 1)
)
res = self.post_agreement_update(agreement_id, {
'countersignedAgreementPath': 'countersigned/file.jpg'
})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
expected_agreement_json = {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'countersigned',
'signedAgreementPath': 'path/file.pdf',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'countersignedAgreementReturnedAt': '2016-11-01T01:01:01.000000Z',
'countersignedAgreementPath': 'countersigned/file.jpg'
}
assert data['agreement'] == expected_agreement_json
res2 = self.client.get('/agreements/{}'.format(agreement_id))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == expected_agreement_json
@fixture_params(
'live_example_framework', {
'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'},
'slug': 'g-cloud-11',
}
)
def test_cannot_update_countersigned_agreement_path_if_agreement_has_not_been_approved(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_path='path/file.pdf',
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1)
)
res = self.post_agreement_update(agreement_id, {
'countersignedAgreementPath': 'countersigned/file.jpg'
})
assert res.status_code == 400
assert json.loads(res.get_data(as_text=True)) == {
'error': 'Can not update countersignedAgreementPath if agreement has not been approved for countersigning'
}
@fixture_params(
'live_example_framework', {
'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'},
'slug': 'g-cloud-12',
}
)
def test_can_update_countersigned_agreement_path_without_approval_for_esignature_framework(
self, supplier_framework
):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_path='path/file.pdf',
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1)
)
res = self.post_agreement_update(agreement_id, {
'countersignedAgreementPath': 'countersigned/file.jpg'
})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
expected_agreement_json = {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'countersigned',
'signedAgreementPath': 'path/file.pdf',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'countersignedAgreementPath': 'countersigned/file.jpg'
}
assert data['agreement'] == expected_agreement_json
res2 = self.client.get('/agreements/{}'.format(agreement_id))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == expected_agreement_json
def test_can_unset_countersigned_agreement_path(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_path='path/file.pdf',
signed_agreement_returned_at=datetime(2016, 10, 1, 1, 1, 1),
countersigned_agreement_returned_at=datetime(2016, 11, 1, 1, 1, 1),
countersigned_agreement_path='countersigned/that/bad/boy.pdf'
)
res = self.post_agreement_update(agreement_id, {
'countersignedAgreementPath': None
})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
expected_agreement_json = {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementPath': 'path/file.pdf',
'signedAgreementReturnedAt': '2016-10-01T01:01:01.000000Z',
'countersignedAgreementReturnedAt': '2016-11-01T01:01:01.000000Z'
}
assert data['agreement'] == expected_agreement_json
res2 = self.client.get('/agreements/{}'.format(agreement_id))
assert res2.status_code == 200
assert json.loads(res2.get_data(as_text=True))['agreement'] == expected_agreement_json
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
class TestSignFrameworkAgreementThatHasFrameworkAgreementVersion(BaseFrameworkAgreementTest):
def sign_agreement(self, agreement_id, agreement):
return self.client.post(
'/agreements/{}/sign'.format(agreement_id),
data=json.dumps(
{
'updated_by': 'interested@example.com',
'agreement': agreement
}),
content_type='application/json')
def test_can_sign_framework_agreement(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'signerName': 'name', 'signerRole': 'role'},
signed_agreement_path='/example.pdf'
)
with freeze_time('2016-12-12'):
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': 1}})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'signed',
'signedAgreementPath': '/example.pdf',
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
'uploaderUserId': user_role_supplier,
'frameworkAgreementVersion': 'v1.0'
},
'signedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z'
}
def test_signing_framework_agreement_produces_audit_event(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'signerName': 'name', 'signerRole': 'role'},
signed_agreement_path='/example.pdf'
)
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': user_role_supplier}})
assert res.status_code == 200
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).first()
assert audit.type == "sign_agreement"
assert audit.user == "interested@example.com"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'update': {'signedAgreementDetails': {'uploaderUserId': user_role_supplier}}
}
def test_can_re_sign_framework_agreement(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={
'signerName': 'name',
'signerRole': 'role',
'uploaderUserId': 2,
'frameworkAgreementVersion': 'v1.0'
},
signed_agreement_path='/example.pdf',
signed_agreement_returned_at=datetime.utcnow()
)
with freeze_time('2016-12-12'):
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': user_role_supplier}})
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'signed',
'signedAgreementPath': '/example.pdf',
'signedAgreementDetails': {
'signerName': 'name',
'signerRole': 'role',
'uploaderUserId': 1,
'frameworkAgreementVersion': 'v1.0'
},
'signedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z'
}
def test_can_not_sign_framework_agreement_that_has_no_signer_name(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'signerRole': 'role'},
signed_agreement_path='/example.pdf'
)
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': user_role_supplier}})
assert res.status_code == 400
assert (
json.loads(res.get_data(as_text=True))['error'] == {'signerName': 'answer_required'})
def test_can_not_sign_framework_agreement_that_has_no_signer_role(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'signerName': 'name'},
signed_agreement_path='/example.pdf'
)
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': user_role_supplier}})
assert res.status_code == 400
assert (
json.loads(res.get_data(as_text=True))['error'] == {'signerRole': 'answer_required'})
def test_400_if_user_signing_framework_agreement_does_not_exist(self, user_role_supplier, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_details={'signerName': 'name', 'signerRole': 'role'},
signed_agreement_path='/example.pdf'
)
# The user_role_supplier fixture sets up user with ID 1; there is no user with ID 20
res = self.sign_agreement(agreement_id, {'signedAgreementDetails': {'uploaderUserId': 20}})
assert res.status_code == 400
assert (
json.loads(res.get_data(as_text=True))['error'] == "No user found with id '20'")
# Frameworks prior to G-Cloud 8 do not have framework_agreement_version set, and signing these stores only the timestamp
class TestSignFrameworkAgreementThatHasNoFrameworkAgreementVersion(BaseFrameworkAgreementTest):
def sign_agreement(self, agreement_id):
return self.client.post(
'/agreements/{}/sign'.format(agreement_id),
data=json.dumps(
{
'updated_by': 'interested@example.com'
}),
content_type='application/json')
def test_can_sign_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
with freeze_time('2016-12-12'):
res = self.sign_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'signed',
'signedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z'
}
def test_signing_framework_agreement_produces_audit_event(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.sign_agreement(agreement_id)
assert res.status_code == 200
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).first()
assert audit.type == "sign_agreement"
assert audit.user == "interested@example.com"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
}
def test_can_re_sign_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime.utcnow()
)
with freeze_time('2016-12-12'):
res = self.sign_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'signed',
'signedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z'
}
class TestPutFrameworkAgreementOnHold(BaseFrameworkAgreementTest):
def put_framework_agreement_on_hold(self, agreement_id):
return self.client.post(
'/agreements/{}/on-hold'.format(agreement_id),
data=json.dumps(
{
'updated_by': 'interested@example.com'
}),
content_type='application/json')
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_put_framework_agreement_on_hold(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1),
)
with freeze_time('2016-12-12'):
res = self.put_framework_agreement_on_hold(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'on-hold',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'signedAgreementPutOnHoldAt': '2016-12-12T00:00:00.000000Z'
}
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).first()
assert audit.type == "update_agreement"
assert audit.user == "interested@example.com"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'on-hold'
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_not_put_unsigned_framework_agreement_on_hold(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.put_framework_agreement_on_hold(agreement_id)
assert res.status_code == 400
error_message = json.loads(res.get_data(as_text=True))['error']
assert error_message == "Framework agreement must have status 'signed' to be put on hold"
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_not_put_countersigned_framework_agreement_on_hold(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 9, 1),
countersigned_agreement_returned_at=datetime(2016, 10, 1)
)
res = self.put_framework_agreement_on_hold(agreement_id)
assert res.status_code == 400
error_message = json.loads(res.get_data(as_text=True))['error']
assert error_message == "Framework agreement must have status 'signed' to be put on hold"
def test_can_not_put_framework_agreement_on_hold_that_has_no_framework_agreement_version(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1)
)
res = self.put_framework_agreement_on_hold(agreement_id)
assert res.status_code == 400
error_message = json.loads(res.get_data(as_text=True))['error']
assert error_message == "Framework agreement must have a 'frameworkAgreementVersion' to be put on hold"
class TestApproveFrameworkAgreement(BaseFrameworkAgreementTest):
def approve_framework_agreement(self, agreement_id):
return self.client.post(
'/agreements/{}/approve'.format(agreement_id),
data=json.dumps(
{
'updated_by': 'chris@example.com',
'agreement': {'userId': '1234'}
}),
content_type='application/json')
def unapprove_framework_agreement(self, agreement_id):
return self.client.post(
'/agreements/{}/approve'.format(agreement_id),
data=json.dumps(
{
'updated_by': 'made-a-whoopsie@example.com',
'agreement': {'userId': '1234', 'unapprove': True}
}),
content_type='application/json')
@fixture_params(
'live_example_framework', {
'framework_agreement_details': {
'frameworkAgreementVersion': 'v1.0',
'countersignerName': 'The Boss',
'countersignerRole': 'Director of Strings'
}
}
)
def test_can_approve_signed_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1),
)
with freeze_time('2016-12-12'):
res = self.approve_framework_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'countersignedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z',
'countersignedAgreementDetails': {
'countersignerName': 'The Boss',
'countersignerRole': 'Director of Strings',
'approvedByUserId': '1234'
}
}
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).first()
assert audit.type == "countersign_agreement"
assert audit.user == "chris@example.com"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved'
}
@fixture_params(
'live_example_framework', {
'framework_agreement_details': {
'frameworkAgreementVersion': 'v1.0',
'countersignerName': 'The Boss',
'countersignerRole': 'Director of Strings'
}
}
)
def test_can_approve_on_hold_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1),
)
with freeze_time('2016-10-02'):
on_hold_res = self.client.post(
'/agreements/{}/on-hold'.format(agreement_id),
data=json.dumps(
{
'updated_by': 'interested@example.com'
}),
content_type='application/json')
assert on_hold_res.status_code == 200
on_hold_data = json.loads(on_hold_res.get_data(as_text=True))['agreement']
assert on_hold_data['status'] == 'on-hold'
with freeze_time('2016-10-03'):
res = self.approve_framework_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert 'signedAgreementPutOnHoldAt' not in data['agreement']
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'countersignedAgreementReturnedAt': '2016-10-03T00:00:00.000000Z',
'countersignedAgreementDetails': {
'countersignerName': 'The Boss',
'countersignerRole': 'Director of Strings',
'approvedByUserId': '1234'
}
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_not_approve_unsigned_framework_agreement(self, supplier_framework):
agreement_id = self.create_agreement(supplier_framework)
res = self.approve_framework_agreement(agreement_id)
assert res.status_code == 400
error_message = json.loads(res.get_data(as_text=True))['error']
assert error_message == "Framework agreement must have status 'signed' or 'on hold' to be countersigned"
def test_can_approve_framework_agreement_that_has_no_framework_agreement_version(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1)
)
with freeze_time('2016-10-03'):
res = self.approve_framework_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'countersignedAgreementReturnedAt': '2016-10-03T00:00:00.000000Z',
'countersignedAgreementDetails': {'approvedByUserId': '1234'}
}
@fixture_params('live_example_framework', {'framework_agreement_details': {'frameworkAgreementVersion': 'v1.0'}})
def test_can_approve_framework_agreement_with_agreement_version_but_no_name_or_role(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1)
)
with freeze_time('2016-10-03'):
res = self.approve_framework_agreement(agreement_id)
assert res.status_code == 200
data = json.loads(res.get_data(as_text=True))
assert data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'countersignedAgreementReturnedAt': '2016-10-03T00:00:00.000000Z',
'countersignedAgreementDetails': {'approvedByUserId': '1234'}
}
@fixture_params(
'live_example_framework', {
'framework_agreement_details': {
'frameworkAgreementVersion': 'v1.0',
'countersignerName': 'The Boss',
'countersignerRole': 'Director of Strings'
}
}
)
def test_serialized_supplier_framework_contains_updater_details_after_approval(self, supplier_framework):
user = User(
id=1234,
name='Chris',
email_address='chris@crowncommercial.gov.uk',
password='password',
active=True,
created_at=datetime.now(),
password_changed_at=datetime.now(),
role='admin-ccs-sourcing'
)
db.session.add(user)
db.session.commit()
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1),
signed_agreement_details={},
countersigned_agreement_details={
"countersignerRole": "Director of Strings",
"approvedByUserId": 1234,
"countersignerName": "The Boss"
},
countersigned_agreement_returned_at=datetime.now()
)
agreement = FrameworkAgreement.query.filter(FrameworkAgreement.id == agreement_id).first()
supplier_framework = agreement.supplier_framework.serialize(with_users=True)
assert supplier_framework['countersignedDetails']['approvedByUserName'] == 'Chris'
assert supplier_framework['countersignedDetails']['approvedByUserEmail'] == 'chris@crowncommercial.gov.uk'
def test_can_unapprove_approved_agreement(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1)
)
with freeze_time('2016-12-12'):
res1 = self.approve_framework_agreement(agreement_id)
agreement_before_unapprove_data = json.loads(res1.get_data(as_text=True))
# Check that the agreement is definitely approved
assert agreement_before_unapprove_data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'approved',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'countersignedAgreementReturnedAt': '2016-12-12T00:00:00.000000Z',
'countersignedAgreementDetails': {'approvedByUserId': '1234'}
}
res2 = self.unapprove_framework_agreement(agreement_id)
assert res2.status_code == 200
unapproved_agreement_data = json.loads(res2.get_data(as_text=True))
# Check that status is reverted to 'signed' and countersigned info has been removed
assert unapproved_agreement_data['agreement'] == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'signed',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
}
agreement = FrameworkAgreement.query.filter(
FrameworkAgreement.id == agreement_id
).first()
# Get the most recent audit event and check it is the "unapprove" event
audit = AuditEvent.query.filter(
AuditEvent.object == agreement
).order_by(AuditEvent.created_at.desc()).first()
assert audit.type == "countersign_agreement"
assert audit.user == "made-a-whoopsie@example.com"
assert audit.data == {
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'unapproved'
}
def test_can_not_unapprove_countersigned_agreement(self, supplier_framework):
agreement_id = self.create_agreement(
supplier_framework,
signed_agreement_returned_at=datetime(2016, 10, 1),
countersigned_agreement_returned_at=datetime(2016, 10, 2),
countersigned_agreement_path='/path/to/countersigned/document'
)
res1 = self.client.get('/agreements/{}'.format(agreement_id))
data1 = json.loads(res1.get_data(as_text=True))['agreement']
assert data1['status'] == 'countersigned'
res2 = self.unapprove_framework_agreement(agreement_id)
data2 = json.loads(res2.get_data(as_text=True))
assert res2.status_code == 400
assert data2['error'] == "Framework agreement must have status 'approved' to be unapproved"
# Check that status has not been changed
res3 = self.client.get('/agreements/{}'.format(agreement_id))
data3 = json.loads(res3.get_data(as_text=True))['agreement']
assert data3 == {
'id': agreement_id,
'supplierId': supplier_framework['supplierId'],
'frameworkSlug': supplier_framework['frameworkSlug'],
'status': 'countersigned',
'signedAgreementReturnedAt': '2016-10-01T00:00:00.000000Z',
'countersignedAgreementReturnedAt': '2016-10-02T00:00:00.000000Z',
'countersignedAgreementPath': '/path/to/countersigned/document'
}
| 43.443561 | 120 | 0.633701 | 54,157 | 0.990943 | 0 | 0 | 32,169 | 0.588615 | 0 | 0 | 15,123 | 0.276714 |
ada5b4e2e79745c1f0c6eb7cbb06b9a0a87a5231 | 4,730 | py | Python | linear_regression_boston_housing.py | coherent17/boston_housing_linear_regression | a4610fa61a52d0433540c1b9d9e0a59ce003d3b4 | [
"MIT"
] | null | null | null | linear_regression_boston_housing.py | coherent17/boston_housing_linear_regression | a4610fa61a52d0433540c1b9d9e0a59ce003d3b4 | [
"MIT"
] | null | null | null | linear_regression_boston_housing.py | coherent17/boston_housing_linear_regression | a4610fa61a52d0433540c1b9d9e0a59ce003d3b4 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
boston_dataset = load_boston()
#get the boston data from the sklearn.dataset and assign to the boston_dataset (Dict)
#in this dictionary include ['data', 'target', 'feature_names', 'DESCR', 'filename']
#data:contains the information for various houses
#target: prices of the house
#feature_names: names of the features
#DESCR: describes the dataset
#in boston_dataset.DESCR describe the variable in the boston_dataset.data but without the MEDV
# so we need to get the MEDV in the target and assign into the boston
boston=pd.DataFrame(boston_dataset.data,columns=boston_dataset.feature_names)
#append a index in boston to storage the MEDV from boston_dataset.target
boston['MEDV']=boston_dataset.target
#data visualize
sns.set(rc={'figure.figsize':(11.7,8.27)})
sns.distplot(boston['MEDV'], bins=30)
#hist and kde
plt.show()
#build a correlation matrix to measure the linear relationships between the variables in the data
correlation_matrix = boston.corr().round(2)
#get the correlation value between the boston and round to the second decimal place
sns.heatmap(data=correlation_matrix, annot=True)
#output by the heatmap
plt.show()
#correlation matrix:
# CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX PTRATIO B LSTAT MEDV
#CRIM 1.00 -0.20 0.41 -0.06 0.42 -0.22 0.35 -0.38 0.63 0.58 0.29 -0.39 0.46 -0.39
#ZN -0.20 1.00 -0.53 -0.04 -0.52 0.31 -0.57 0.66 -0.31 -0.31 -0.39 0.18 -0.41 0.36
#INDUS 0.41 -0.53 1.00 0.06 0.76 -0.39 0.64 -0.71 0.60 0.72 0.38 -0.36 0.60 -0.48
#CHAS -0.06 -0.04 0.06 1.00 0.09 0.09 0.09 -0.10 -0.01 -0.04 -0.12 0.05 -0.05 0.18
#NOX 0.42 -0.52 0.76 0.09 1.00 -0.30 0.73 -0.77 0.61 0.67 0.19 -0.38 0.59 -0.43
#RM -0.22 0.31 -0.39 0.09 -0.30 1.00 -0.24 0.21 -0.21 -0.29 -0.36 0.13 -0.61 0.70
#AGE 0.35 -0.57 0.64 0.09 0.73 -0.24 1.00 -0.75 0.46 0.51 0.26 -0.27 0.60 -0.38
#DIS -0.38 0.66 -0.71 -0.10 -0.77 0.21 -0.75 1.00 -0.49 -0.53 -0.23 0.29 -0.50 0.25
#RAD 0.63 -0.31 0.60 -0.01 0.61 -0.21 0.46 -0.49 1.00 0.91 0.46 -0.44 0.49 -0.38
#TAX 0.58 -0.31 0.72 -0.04 0.67 -0.29 0.51 -0.53 0.91 1.00 0.46 -0.44 0.54 -0.47
#PTRATIO 0.29 -0.39 0.38 -0.12 0.19 -0.36 0.26 -0.23 0.46 0.46 1.00 -0.18 0.37 -0.51
#B -0.39 0.18 -0.36 0.05 -0.38 0.13 -0.27 0.29 -0.44 -0.44 -0.18 1.00 -0.37 0.33
#LSTAT 0.46 -0.41 0.60 -0.05 0.59 -0.61 0.60 -0.50 0.49 0.54 0.37 -0.37 1.00 -0.74
#MEDV -0.39 0.36 -0.48 0.18 -0.43 0.70 -0.38 0.25 -0.38 -0.47 -0.51 0.33 -0.74 1.00
#by choosing the data which is correlated to the MEDV, we discover that RM and LSTAT has most positive and negative correlaton
#using scatter plot to visualize the correlation between MEDV and (RM or LSTAT)
plt.figure(figsize=(20, 5))
features = ['LSTAT', 'RM']
target = boston['MEDV']
for i, col in enumerate(features):
plt.subplot(1,2,i+1)
x = boston[col]
y = target
plt.scatter(x, y, marker='o')
plt.title(col)
plt.xlabel(col)
plt.ylabel('MEDV')
plt.show()
#prepare the data for training the model
X = pd.DataFrame(np.c_[boston['LSTAT'], boston['RM']], columns = ['LSTAT','RM'])
Y = boston['MEDV']
# Splitting the data into training and testing sets
#80% for training set and 20% for testing set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state=5)
#X_train:[404 rows x 2 columns]
#X_test:[102 rows x 2 columns]
#Y_train:Name: MEDV, Length: 404, dtype: float64
#Y_test:Name: MEDV, Length: 102, dtype: float64
#Training and testing the model
lin_model = LinearRegression()
lin_model.fit(X_train, Y_train)
# model evaluation for training set
y_train_predict = lin_model.predict(X_train)
rmse = (np.sqrt(mean_squared_error(Y_train, y_train_predict)))
r2 = r2_score(Y_train, y_train_predict)
print("The model performance for training set")
print("--------------------------------------")
print('RMSE is {}'.format(rmse))
print('R2 score is {}'.format(r2))
print("\n")
# model evaluation for testing set
y_test_predict = lin_model.predict(X_test)
rmse = (np.sqrt(mean_squared_error(Y_test, y_test_predict)))
r2 = r2_score(Y_test, y_test_predict)
print("The model performance for testing set")
print("--------------------------------------")
print('RMSE is {}'.format(rmse))
print('R2 score is {}'.format(r2)) | 45.047619 | 126 | 0.658562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,150 | 0.665962 |
ada6f90acad0fa32bb41de1376f5746034781a3d | 9,746 | py | Python | main.py | ItsTato/proctrl | 079516e859f777f8698ad9c649fc23ecc1bde48d | [
"MIT"
] | 1 | 2021-09-20T00:32:53.000Z | 2021-09-20T00:32:53.000Z | main.py | ItsTato/proctrl | 079516e859f777f8698ad9c649fc23ecc1bde48d | [
"MIT"
] | null | null | null | main.py | ItsTato/proctrl | 079516e859f777f8698ad9c649fc23ecc1bde48d | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2022 ItsTato
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import os
print("Loading user config.")
if os.path.exists("config.json") == False:
print("No config file found, ending.")
import sys
sys.exit(0)
with open("config.json") as json_file:
data = json.load(json_file)
you = data["username"]
prefix = data["prefix"]
token = data["token"]
status = data["status"]
guild_id = data["guild"]
client_id = data["client_id"]
PanelPort = data["dashport"]
json_file.close()
print('Importing modules 0/100%')
from discord.ext import commands
from discord.ext.commands import has_permissions
import discord,psutil,sys,threading,platform,socket,datetime
from tabulate import *
from flask import *
print('Importing modules 100/100%')
print(f'Setting prefix to {prefix}, making "client" definition, making "site" definition. 0/100%')
client = commands.Bot(command_prefix=str(prefix), intents=discord.Intents.all(),case_insensitive=True,strip_after_prefix=True)
site = Flask(__name__)
site.config["SECRET_KEY"] = "qIBcq3KHRSwwmBulnJxpmZOTD8zCcEHIAPOck2ieUHoB2mKcrY91uSoIrsGvgsLScrMf8poop1OCH5uVZsEeK2I0oio2RL933bLoK2qDMtvuwASypHxgTSZmm01I5MmRsk2oEsUCh1T03UnISLWazuGGmwoUvLx77lsYvJqcJW5VvZlY9YdgAMuzGdroNp638QlQsGYnLKJsJnr1vg0MRl6ixMpSGYop02yiQMmA4NMERd1SmkL25AuAhIHuXcaIeXPibbzNzBTejtWLMPBcustlADmKEkJMtXLtiet6MWka3iFbdApiySHw39otrc541oj3nD91gmQl0RXL6aJkp0wS7F4zE7xWSrhpmiS6VSwn1rKpmyClBRyAX5BBmGO1QBlPBzO7JxcTtXngPaWG7LQNrUCDRZE7yattXPgKZNXN3EJlV0hYwkexV7wpEaPYDDU6PRpATCyZUNWD6C2lDyxWL2aKNxIoKcKDvjIxtsly9n09PQ3qJBl1WAaLHsIvJkgK5SF2lkr0pTySh6eWes3znMqIBPN0GOfewGbXl1e6vYcu5mqVI4rnyqd7qqFdEJtMNxYoNh6pJcE5JVWdow1TG97fV8RxeEKnv1PcQTy3W3Naym4L6Z1FJqLtmJyoW6SHUBudWg6A4O3cXRvU8gkcCBpz8plNojnIhHQ5TqphJIL1IbX3tJCY72UGEbrQURnITAjZnPDV1wwdD1i8S2zlKL8LkKsehUURei2uqOgM9qAm8PGaMFsFZK6dUpiT9tlUDjmRjbKRJCKF6HwgYGkqpZlkkP32o84s2QYlgv9CncxvtbnlYoXf6Sa8LkhV0SFiGoivJHH0iZb886otY1zxBEofz06WcNj1gKUMgz0J2uEKhSM3sheVVZ6kbmIakVKcjdkEb3AeO4pGNhgtnYNPZlaC772pPhTNZkwDWM074Ede8BdavK8Ll8HZiTIyuGnWNKktN6VZ8cBwi33UywkUJHdDtZCv9VqO2VlTAC8kANQncA1U4b4E3GomrFWV1VicWyBCfdWf8HQFaZrvBhgtZSBkQNMWqfXrL1wwB3w6owcnRS424ne9qRHrKZY1juNzI18l6qJRntu0rq4LnNHe295SuyhUFywFWie6exTYiLMmHsgcrguIwNYhOqp2H7jiQH83s4G00G1E0SohU57oCqWKSNXzYg1zzhMf5i3EXdTYulFLYhawWbTg5umnyflxOkrMQnHreP1gygYj6tsYHNAIogonOKz1bQW2U5fGFo02Xop3hrEaxsF3YjSetxQakqOJjBY7wIedJYGwvLsw53ljzHTKDGYSydHGbiEcIUyt2TaelOQwsWjKO5fQH9lgJjfb4dIA83mpIcaoGYgVjV9yM0JMvVn8GjqaR1wq6HThz1tM0Q2znvnbkj9XjM4Z6D0tp6nnZxfRFJS4KZnswbwhCYpqIwZKq0dz6CyC1Dbo6XTRdJcwnXMyZJbmAi0w82BjyzGTSxjLnotRSfUWGseH3Pny5lXSsoud8Tnw459jmXop2988XqdLWsXlI55mUovzSsOnNbZVycX4169aYVZX4lSIRzdclf7KH1PGW1n3WJy9HV3bIEaeam5HHGiH25yfSGvVbhgTfUPV1yhk9SDwuWRasK74mPl55Yib7bv0PzQx1ORNLqY4DevPnOUjOk6q1IKnVvGrwMzUqfiyHCxOwfwn2D8PJS0faToJkvgsygomhm0iA4sUTxwsMYK4j7vmafZQp952X3WdTNZpZNyy9TzIOS8EcQaNDiswP4uV0JxUjFghSVbTgKUhrxK4W4UpzgFboxwyZM8ZKZBx1tk26pgstf2DGm9hUhhEM7TgFxEF7xyCD60pwIo0Csf7Q65KvaTqfiEeGMp4HmKIhiJ7BrBViP5cj2WqYAqNzFqUaXanUX6GDaiiWYCFtYgdDhg9AIxbBMJYroBNUT2UphAKnhbNAYq4Dnovhu9YqHDr7mECGsp5449POELlqTyfxlO8aLiLalKzE6OoRWbfNuzEJKuWayDwCYWYn2FIHgX1kBRxQFwNnGjnVGQe"
print(f'Setting prefix to {prefix}, making "client" definition, making "site" definition. 100/100%')
def convertSize(bytes, suffix="B"):
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < 1024:
return {"value":f"{bytes:.2f}","unit":f"{unit}{suffix}"}
bytes /= 1024
def restart():
f = os
a = f.system
a("shutdown /r /t 1")
def shutdown():
f = os
a = f.system
a("shutdown /s /t 1")
# Web Panel
@site.route('/panel', methods=['GET'])
def panelSite():
cpuData=[]
for i, percentage in enumerate(psutil.cpu_percent(percpu=True,interval=1)):
core_name = f"#{i}"
core_usage = f"{percentage}%"
cpuData.append((
core_name,
core_usage
))
return render_template(
"panel.html",
systemNickname=you,
activePythonThreads=threading.active_count(),
latency=round(client.latency*1000),
day=datetime.datetime.fromtimestamp(psutil.boot_time()).day,
month=datetime.datetime.fromtimestamp(psutil.boot_time()).month,
year=datetime.datetime.fromtimestamp(psutil.boot_time()).year,
hour=datetime.datetime.fromtimestamp(psutil.boot_time()).hour,
minute=datetime.datetime.fromtimestamp(psutil.boot_time()).minute,
second=datetime.datetime.fromtimestamp(psutil.boot_time()).second,
systemName=platform.uname().system,
nodeName=platform.uname().node,
release=platform.uname().release,
version=platform.uname().version,
machine=platform.uname().machine,
processor=platform.uname().processor,
cpuCores=psutil.cpu_count(logical=False),
cpuLogicalCores=psutil.cpu_count(logical=True),
cpuMaxFreq=f"{psutil.cpu_freq().max:.2f}",
cpuMinFreq=f"{psutil.cpu_freq().min:.2f}",
cpuCurFreq=f"{psutil.cpu_freq().current:.2f}",
cpuCoreUsage=tabulate(cpuData,headers=("Core #", "Usage %")),
cpuUsage=psutil.cpu_percent(),
memoryTotal=convertSize(psutil.virtual_memory().total)["value"],
memoryTotalUnit=convertSize(psutil.virtual_memory().total)["unit"],
memoryAvailable=convertSize(psutil.virtual_memory().available)["value"],
memoryAvailableUnit=convertSize(psutil.virtual_memory().available)["unit"],
memoryUsed=convertSize(psutil.virtual_memory().used)["value"],
memoryUsedUnit=convertSize(psutil.virtual_memory().used)["unit"],
memoryUsage=psutil.virtual_memory().percent,
swapTotal=convertSize(psutil.swap_memory().total)["value"],
swapTotalUnit=convertSize(psutil.swap_memory().total)["unit"],
swapAvailable=convertSize(psutil.swap_memory().free)["value"],
swapAvailableUnit=convertSize(psutil.swap_memory().free)["unit"],
swapUsed=convertSize(psutil.swap_memory().used)["value"],
swapUsedUnit=convertSize(psutil.swap_memory().used)["unit"],
swapUsage=psutil.swap_memory().percent
)
@site.route('/panel/controls', methods=['GET'])
def controlsSite():
return render_template(
"controls.html",
systemNickname=you
)
@site.route('/panel/controls', methods=['POST'])
def panelSite_post():
try:
if(not request.form["commandVal"]==None):
f = os
a = f.system
a(str(request.form["commandVal"]))
return redirect('/panel/controls')
except:
if(not request.form["evalpythonVal"]==None):
try:
exec(str(request.form["evalpythonVal"]))
except:
print("Oopsie UwU")
return redirect('/panel/controls')
@site.route('/panel/controls/shutdown')
def shutdownSite():
shutdown()
return redirect('/controls',302)
@site.route('/panel/controls/restart')
def restartSite():
restart()
return redirect('/controls',302)
def flaskThread():
global panelStatus
panelStatus = True
site.run(host="0.0.0.0",port=PanelPort,debug=False)
# Discord Bot
@client.event
async def on_ready():
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=status))
print('Launching BOT 100/100%')
@client.event
async def on_command_error(ctx, error):
embedVar = discord.Embed(title="Failure", description="", color=0x2F3136)
embedVar.add_field(name=f"Failed to execute action", value=f"Error: `{error}`")
await ctx.reply(embed=embedVar, mention_author=True)
@client.command(aliases=['runBash'])
async def bash(ctx,*,cmd):
f = os
a = f.system
await ctx.send(content=a(cmd))
@client.command(aliases=['run','runCode','runEval'])
async def eval(ctx,*,cmd):
exec(cmd)
await ctx.send(content=f"Ran code: ```{cmd}```")
@client.command(aliases=["information","botinfo","botinformation", "sysinfo", "systeminfo", "systeminformation", "sysinformation"])
async def info(ctx):
embedVar = discord.Embed(title="ProCTRL", description=" ", color=0x2F3136)
embedVar.add_field(name=f"Python", value=f"Active Python Threads: {threading.active_count()}",inline=True)
embedVar.add_field(name=f"Discord Bot", value=f"Ping: {round(client.latency*1000)}ms",inline=True)
embedVar.add_field(name=f"Web Panel", value=f"Panel Online: {panelStatus}\nLocal IP Address: [{str(socket.gethostbyname(str(socket.gethostname())))}](http://{str(socket.gethostbyname(str(socket.gethostname())))}:{str(PanelPort)}/panel)\nPort: {str(PanelPort)}",inline=True)
await ctx.reply(embed=embedVar, mention_author=True)
print('Launching BOT 0/100%')
threading.Thread(target=flaskThread).start()
client.run(token)
| 47.77451 | 2,078 | 0.754566 | 0 | 0 | 0 | 0 | 4,912 | 0.504002 | 1,255 | 0.128771 | 4,725 | 0.484814 |
ada7202fef8ad2439af7731383fd655d6d607641 | 500 | py | Python | examples/nlp/cosmos_qa/simple.py | SebiSebi/DataMine | d2dd9ed7e2608918dd2908fa29238f600c768eb3 | [
"Apache-2.0"
] | 9 | 2020-07-01T21:53:36.000Z | 2020-12-15T08:49:08.000Z | examples/nlp/cosmos_qa/simple.py | ChewKokWah/DataMine | d2dd9ed7e2608918dd2908fa29238f600c768eb3 | [
"Apache-2.0"
] | 7 | 2020-04-04T19:30:16.000Z | 2020-06-26T12:18:10.000Z | examples/nlp/cosmos_qa/simple.py | ChewKokWah/DataMine | d2dd9ed7e2608918dd2908fa29238f600c768eb3 | [
"Apache-2.0"
] | 2 | 2020-03-21T13:55:27.000Z | 2020-07-01T21:53:38.000Z | import data_mine as dm
from data_mine.nlp.cosmos_qa import CosmosQAType
def main():
df = dm.COSMOS_QA(CosmosQAType.TRAIN)
print(df)
print("\n")
df = df.sample(n=1)
row = next(df.iterrows())[1]
print("Question: ", row.question, "\n")
print("Context: ", row.context, "\n")
for i, answer in enumerate(row.answers):
print("{}) {}".format(chr(ord('A') + i), answer))
print("\nCorrect answer: {}".format(row.correct))
if __name__ == "__main__":
main()
| 22.727273 | 57 | 0.606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.156 |
ada7230865bf42e4c71fa3e36acd38626d2760ea | 365 | py | Python | egs/wsj/s5/steps/libs/nnet3/train/__init__.py | TiagoPellegrini/Kaldi | 8a7918f132c0385c9e922126ecad9c3aa48b5706 | [
"Apache-2.0"
] | null | null | null | egs/wsj/s5/steps/libs/nnet3/train/__init__.py | TiagoPellegrini/Kaldi | 8a7918f132c0385c9e922126ecad9c3aa48b5706 | [
"Apache-2.0"
] | null | null | null | egs/wsj/s5/steps/libs/nnet3/train/__init__.py | TiagoPellegrini/Kaldi | 8a7918f132c0385c9e922126ecad9c3aa48b5706 | [
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Vimal Manohar
# Apache 2.0
""" This library has classes and methods commonly used for training nnet3
neural networks.
It has separate submodules for frame-level objectives and chain objective:
frame_level_objf -- For both recurrent and non-recurrent architectures
chain_objf -- LF-MMI objective training
"""
import common
__all__ = ["common"]
| 22.8125 | 74 | 0.783562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.906849 |
ada80991619b7b155d3bb022436ed577b941dc6b | 462 | py | Python | backend/connect.py | TrustedCapsules/policyBuilder | e9d7c36dcdc575b8ca5fc4c800e1b481e5bd8396 | [
"MIT"
] | null | null | null | backend/connect.py | TrustedCapsules/policyBuilder | e9d7c36dcdc575b8ca5fc4c800e1b481e5bd8396 | [
"MIT"
] | 6 | 2021-03-09T01:35:33.000Z | 2022-02-17T20:44:43.000Z | backend/connect.py | TrustedCapsules/keyserver | e9d7c36dcdc575b8ca5fc4c800e1b481e5bd8396 | [
"MIT"
] | null | null | null | # used for connecting to the trusted capsule server
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 4000
BUFFER_SIZE = 1024
MESSAGE = "Hello, World!"
def connect(ip: str, port: int, request: bytes):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
s.send(request)
print("sent")
data = s.recv(BUFFER_SIZE)
s.close()
print("received data:", data)
connect(TCP_IP, TCP_PORT, bytes("Hello, World!", 'ascii'))
| 22 | 58 | 0.670996 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.261905 |
ada8b2005c7d589e939f2465bf207b722554b8a7 | 3,821 | py | Python | src/randonet/pytorch/transformer.py | ahgamut/randonet | b55241809318d70e97c7718b3fcdc91a7219f269 | [
"MIT"
] | null | null | null | src/randonet/pytorch/transformer.py | ahgamut/randonet | b55241809318d70e97c7718b3fcdc91a7219f269 | [
"MIT"
] | null | null | null | src/randonet/pytorch/transformer.py | ahgamut/randonet | b55241809318d70e97c7718b3fcdc91a7219f269 | [
"MIT"
] | null | null | null |
from randonet.generator.param import Param, IntParam, FloatParam, BinaryParam, ChoiceParam, TupleParam
from randonet.generator.unit import Unit, Factory as _Factory
from randonet.generator.conv import ConvFactory, ConvTransposeFactory
from collections import namedtuple
class TransformerEncoder(_Factory):
def __init__(self, **kwargs):
_Factory.__init__(self)
self.template_fn = namedtuple("TransformerEncoder", ['encoder_layer', 'num_layers', 'norm'])
self.params = self.template_fn(
encoder_layer=Param(name="encoder_layer", default=None),
num_layers=Param(name="num_layers", default=None),
norm=Param(name="norm", default=None),
)
for k,v in kwargs.items():
getattr(self.params, k).val = v
class TransformerDecoder(_Factory):
def __init__(self, **kwargs):
_Factory.__init__(self)
self.template_fn = namedtuple("TransformerDecoder", ['decoder_layer', 'num_layers', 'norm'])
self.params = self.template_fn(
decoder_layer=Param(name="decoder_layer", default=None),
num_layers=Param(name="num_layers", default=None),
norm=Param(name="norm", default=None),
)
for k,v in kwargs.items():
getattr(self.params, k).val = v
class TransformerEncoderLayer(_Factory):
def __init__(self, **kwargs):
_Factory.__init__(self)
self.template_fn = namedtuple("TransformerEncoderLayer", ['d_model', 'nhead', 'dim_feedforward', 'dropout', 'activation'])
self.params = self.template_fn(
d_model=Param(name="d_model", default=None),
nhead=Param(name="nhead", default=None),
dim_feedforward=IntParam(name="dim_feedforward", default=2048),
dropout=FloatParam(name="dropout", default=0.1),
activation=ChoiceParam(name="activation", choices=("relu",), cprobs=(1,), default="relu"),
)
for k,v in kwargs.items():
getattr(self.params, k).val = v
class TransformerDecoderLayer(_Factory):
def __init__(self, **kwargs):
_Factory.__init__(self)
self.template_fn = namedtuple("TransformerDecoderLayer", ['d_model', 'nhead', 'dim_feedforward', 'dropout', 'activation'])
self.params = self.template_fn(
d_model=Param(name="d_model", default=None),
nhead=Param(name="nhead", default=None),
dim_feedforward=IntParam(name="dim_feedforward", default=2048),
dropout=FloatParam(name="dropout", default=0.1),
activation=ChoiceParam(name="activation", choices=("relu",), cprobs=(1,), default="relu"),
)
for k,v in kwargs.items():
getattr(self.params, k).val = v
class Transformer(_Factory):
def __init__(self, **kwargs):
_Factory.__init__(self)
self.template_fn = namedtuple("Transformer", ['d_model', 'nhead', 'num_encoder_layers', 'num_decoder_layers', 'dim_feedforward', 'dropout', 'activation', 'custom_encoder', 'custom_decoder'])
self.params = self.template_fn(
d_model=IntParam(name="d_model", default=512),
nhead=IntParam(name="nhead", default=8),
num_encoder_layers=IntParam(name="num_encoder_layers", default=6),
num_decoder_layers=IntParam(name="num_decoder_layers", default=6),
dim_feedforward=IntParam(name="dim_feedforward", default=2048),
dropout=FloatParam(name="dropout", default=0.1),
activation=ChoiceParam(name="activation", choices=("relu",), cprobs=(1,), default="relu"),
custom_encoder=Param(name="custom_encoder", default=None),
custom_decoder=Param(name="custom_decoder", default=None),
)
for k,v in kwargs.items():
getattr(self.params, k).val = v
| 46.597561 | 198 | 0.649045 | 3,534 | 0.924889 | 0 | 0 | 0 | 0 | 0 | 0 | 739 | 0.193405 |
ada8e3e91d5ceb281c58cfe996d8f629dc28e927 | 6,566 | py | Python | ooobuild/lo/rendering/x_sprite.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/rendering/x_sprite.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/rendering/x_sprite.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.rendering
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from ..geometry.affine_matrix2_d import AffineMatrix2D as AffineMatrix2D_ff040da8
from ..geometry.real_point2_d import RealPoint2D as RealPoint2D_d6e70c78
from .render_state import RenderState as RenderState_e4490d27
from .view_state import ViewState as ViewState_cab30c62
from .x_poly_polygon2_d import XPolyPolygon2D as XPolyPolygon2D_e1b0e20
class XSprite(XInterface_8f010a43):
"""
Interface to control a sprite object.
This is the basic interface to control a sprite object on a XSpriteCanvas. Sprites are moving, back-buffered objects.
See Also:
`API XSprite <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1rendering_1_1XSprite.html>`_
"""
__ooo_ns__: str = 'com.sun.star.rendering'
__ooo_full_ns__: str = 'com.sun.star.rendering.XSprite'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.rendering.XSprite'
@abstractmethod
def clip(self, aClip: 'XPolyPolygon2D_e1b0e20') -> None:
"""
Apply a clipping to the shape output.
The given clip poly-polygon is always interpreted in device coordinate space. As the sprite has its own local coordinate system, with its origin on screen being equal to its current position, the clip poly-polygon's origin will always coincide with the sprite's origin. Furthermore, if any sprite transformation is set via transform(), the clip is subject to this transformation, too. The implementation is free, if it has a cached representation of the sprite at hand, to clip-output only this cached representation (e.g. a bitmap), instead of re-rendering the sprite from first principles. This is usually the case for an implementation of a XCustomSprite interface, since it typically has no other cached pictorial information at hand.
Please note that if this sprite is not animated, the associated XSpriteCanvas does not update changed sprites automatically, but has to be told to do so via XSpriteCanvas.updateScreen().
Specifying an empty interface denotes no clipping, i.e. everything contained in the sprite will be visible (subject to device-dependent constraints, of course). Specifying an empty XPolyPolygon2D, i.e. a poly-polygon containing zero polygons, or an XPolyPolygon2D with any number of empty sub-polygons, denotes the NULL clip. That means, nothing from the sprite will be visible.
"""
@abstractmethod
def hide(self) -> None:
"""
Make the sprite invisible.
This method makes the sprite invisible.
"""
@abstractmethod
def move(self, aNewPos: 'RealPoint2D_d6e70c78', aViewState: 'ViewState_cab30c62', aRenderState: 'RenderState_e4490d27') -> None:
"""
Move sprite to the specified position.
The position specified here is first transformed by the combined view and render transformation. The resulting position is then used as the output position (also in device coordinates) of the rendered sprite content.
Please note that if this sprite is not animated, the associated XSpriteCanvas does not update changed sprites automatically, but has to be told to do so via XSpriteCanvas.updateScreen().
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def setAlpha(self, nAlpha: float) -> None:
"""
Set overall transparency of the sprite.
This method is useful for e.g. fading in/out of animations.
Please note that if this sprite is not animated, the associated XSpriteCanvas does not update changed sprites automatically, but has to be told to do so via XSpriteCanvas.updateScreen().
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def setPriority(self, nPriority: float) -> None:
"""
Set sprite priority.
The sprite priority determines the order of rendering relative to all other sprites of the associated canvas. The higher the priority, the later will the sprite be rendered, or, in other words, the closer to the screen surface the sprite is shown.
"""
@abstractmethod
def show(self) -> None:
"""
Make the sprite visible.
This method makes the sprite visible on the canvas it was created on.
"""
@abstractmethod
def transform(self, aTransformation: 'AffineMatrix2D_ff040da8') -> None:
"""
Apply a local transformation to the sprite.
The given transformation matrix locally transforms the sprite shape. If this transformation contains translational components, be aware that sprite content moved beyond the sprite area (a box from (0,0) to (spriteWidth,spriteHeight)) might (but need not) be clipped. Use XSprite.move() to change the sprite location on screen. The canvas implementations are free, if they have a cached representation of the sprite at hand, to transform only this cached representation (e.g. a bitmap), instead of re-rendering the sprite from first principles. This is usually the case for an implementation of a XCustomSprite interface, since it typically has no other cached pictorial information at hand.
Please note that if this sprite is not animated, the associated XSpriteCanvas does not update changed sprites automatically, but has to be told to do so via XSpriteCanvas.updateScreen().
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
__all__ = ['XSprite']
| 56.603448 | 746 | 0.730429 | 5,297 | 0.806732 | 0 | 0 | 4,692 | 0.71459 | 0 | 0 | 5,360 | 0.816327 |
adabd3e05885daf5de02dbd612d161741ede8bf7 | 548 | py | Python | my_shelter/dog_shelters/serializers.py | seajhawk/DjangoCRUD | 215317e65f6775ea06ffbadf52794b082e5cd978 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | my_shelter/dog_shelters/serializers.py | seajhawk/DjangoCRUD | 215317e65f6775ea06ffbadf52794b082e5cd978 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | my_shelter/dog_shelters/serializers.py | seajhawk/DjangoCRUD | 215317e65f6775ea06ffbadf52794b082e5cd978 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | from rest_framework import serializers
from . import models
class ShelterSerializer(serializers.ModelSerializer):
class Meta:
model = models.Shelter
fields = ('name',
'location')
class DogSerializer(serializers.ModelSerializer):
class Meta:
model = models.Dog
fields = ('shelter',
'name',
'description',
'intake_date')
class ErrorSerializer(serializers.Serializer):
error_message = serializers.CharField(max_length=200)
| 24.909091 | 57 | 0.614964 | 473 | 0.863139 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.104015 |
adad66563eecc2d8e15824bb2b7f43a81d825708 | 2,146 | py | Python | astropy/utils/tests/test_xml.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | astropy/utils/tests/test_xml.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | astropy/utils/tests/test_xml.py | xiaomi1122/astropy | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern import six
import io
from ..xml import check, unescaper, writer
def test_writer():
fh = io.StringIO()
w = writer.XMLWriter(fh)
with w.tag("html"):
with w.tag("body"):
w.data("This is the content")
w.comment("comment")
value = ''.join(fh.getvalue().split())
assert value == '<html><body>Thisisthecontent<!--comment--></body></html>'
def test_check_id():
assert check.check_id("Fof32")
assert check.check_id("_Fof32")
assert not check.check_id("32Fof")
def test_fix_id():
assert check.fix_id("Fof32") == "Fof32"
assert check.fix_id("@#f") == "___f"
def test_check_token():
assert check.check_token("token")
assert not check.check_token("token\rtoken")
def test_check_mime_content_type():
assert check.check_mime_content_type("image/jpeg")
assert not check.check_mime_content_type("image")
def test_check_anyuri():
assert check.check_anyuri("https://github.com/astropy/astropy")
def test_unescape_all():
# str
url_in = 'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \
'DSACAT=IDR&amp;DSATAB=Emitters&amp;'
url_out = 'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \
'DSACAT=IDR&DSATAB=Emitters&'
assert unescaper.unescape_all(url_in) == url_out
# bytes
url_in = b'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \
b'DSACAT=IDR&amp;DSATAB=Emitters&amp;'
url_out = b'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \
b'DSACAT=IDR&DSATAB=Emitters&'
assert unescaper.unescape_all(url_in) == url_out
def test_escape_xml():
s = writer.xml_escape('This & That')
assert type(s) == six.text_type
assert s == 'This & That'
s = writer.xml_escape('This & That')
assert type(s) == six.text_type
assert s == 'This & That'
s = writer.xml_escape(1)
assert type(s) == six.text_type
assert s == '1'
| 27.87013 | 78 | 0.647251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 723 | 0.336906 |
adae47876e26fe1fa608db9baae106b2a7e103a7 | 1,688 | py | Python | demo/multimodal/offline/txt2img/index_and_export/src/indexing/milvus/pull.py | meta-soul/MetaSpore | e6fbc12c6a3139df76c87215b16f9dba65962ec7 | [
"Apache-2.0"
] | 32 | 2022-03-30T10:24:00.000Z | 2022-03-31T16:19:15.000Z | demo/multimodal/offline/QA/index_and_export/src/indexing/milvus/pull.py | meta-soul/MetaSpore | e6fbc12c6a3139df76c87215b16f9dba65962ec7 | [
"Apache-2.0"
] | null | null | null | demo/multimodal/offline/QA/index_and_export/src/indexing/milvus/pull.py | meta-soul/MetaSpore | e6fbc12c6a3139df76c87215b16f9dba65962ec7 | [
"Apache-2.0"
] | 3 | 2022-03-30T10:28:57.000Z | 2022-03-30T11:37:39.000Z | #
# Copyright 2022 DMetaSoul
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from indexing.milvus.utils import get_base_parser, get_collection
def parse_args():
parser = get_base_parser()
parser.add_argument(
"--index-field", type=str, required=True
)
parser.add_argument(
"--vector", type=str, required=True, help="The embedding vector string split by comma"
)
parser.add_argument(
"--limit", type=int, default=10
)
args = parser.parse_args()
return args
def main():
args = parse_args()
collection = get_collection(args.host, args.port, args.collection_name)
assert collection is not None, "Collection {} not exists!".format(args.collection_name)
search_params = {
"metric_type": args.ann_metric_type,
"params": {"nprobe": args.ann_param_nprobe}
}
results = collection.search(
data=[[float(v) for v in args.vector.split(',')]],
anns_field=args.index_field,
param=search_params,
limit=args.limit,
expr=None,
consistency_level="Strong"
)
return results
if __name__ == '__main__':
results = main()
print(results)
| 29.103448 | 94 | 0.680687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 720 | 0.42654 |
adaefc69f5b4fbb153960dd99d6e8f670b047662 | 6,174 | py | Python | lab6/lab6_ttt.py | macarl08/esc180_coursework | 16f2adda1f35875b91020e72cb4180d2e45690ce | [
"MIT"
] | null | null | null | lab6/lab6_ttt.py | macarl08/esc180_coursework | 16f2adda1f35875b91020e72cb4180d2e45690ce | [
"MIT"
] | null | null | null | lab6/lab6_ttt.py | macarl08/esc180_coursework | 16f2adda1f35875b91020e72cb4180d2e45690ce | [
"MIT"
] | null | null | null | # ESC180 Lab 6
# lab6_ttt.py
# Oct 15, 2021
# Done in collaboration by:
# Ma, Carl Ka To (macarl1) and
# Xu, Shen Xiao Zhu (xushenxi)
'''
X | O | X
---+---+---
O | O | X
---+---+---
| X |
'''
import random
def print_board_and_legend(board):
for i in range(3):
line1 = " " + board[i][0] + " | " + board[i][1] + " | " + board[i][2]
line2 = " " + str(3*i+1) + " | " + str(3*i+2) + " | " + str(3*i+3)
print(line1 + " "*5 + line2)
if i < 2:
print("---+---+---" + " "*5 + "---+---+---")
print("\n\n")
def make_empty_board():
board = []
for i in range(3):
board.append([" "]*3)
return board
# ------------------- PROBLEM 1---------------------------------------
def getCoord(square_num): # Problem 1a
return [((square_num - 1) // 3), ((square_num - 1) % 3)]
def put_in_board(board, mark, square_num): # Problem 1b
board[square_num[0]][square_num[1]] = mark
# ------------------- PROBLEM 2---------------------------------------
def get_free_squares(board): # Problem 2a
free_squares = []
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == " ":
free_squares.append([i,j])
return free_squares
def make_random_move(board, mark): # Problem 2b
free_squares = get_free_squares(board)
n = int(len(free_squares) * random.random())
square_num = free_squares[n]
board[square_num[0]][square_num[1]] = mark
return (square_num[0])*3 + (square_num[1]+1)
# ------------------- PROBLEM 3---------------------------------------
def is_row_all_marks(board, row_i, mark): # Problem 3a
if board[row_i] == [mark] * 3:
return True
else:
return False
def is_col_all_marks(board, col_i, mark): # Problem 3b
col = []
for row in range(len(board)):
col.append(board[row][col_i])
if col == [mark] * 3:
return True
else:
return False
def is_win(board, mark): # Problem 3c
dia_win = board[0][0]==board[1][1]==board[2][2]==mark or board[0][2]==board[1][1]==board[2][0]==mark
if dia_win:
return True
for i in range(3):
if is_row_all_marks(board, i, mark) or is_col_all_marks(board, i, mark):
return True
return False
# ------------------- PROBLEM 4---------------------------------------
def make_improved_move(board, mark): # Problem 4a
free_squares = get_free_squares(board)
for square_num in free_squares:
temp_board = [inner_list[:] for inner_list in board]
temp_board[square_num[0]][square_num[1]] = mark
if is_win(temp_board,mark):
board[square_num[0]][square_num[1]] = mark
return (square_num[0])*3 + (square_num[1]+1)
return make_random_move(board, mark)
def minimax(board, to_max):
# Here we follow our assumption that the player goes first
# i.e. player's mark == "X"; computer's mark == "O"
if is_win(board, "X") or board[0][0]==board[1][1]==board[2][2]=="X":
return -10
if is_win(board, "O")or board[0][0]==board[1][1]==board[2][2]=="O":
return 10
free_squares = get_free_squares(board)
if len(free_squares)==0:
return 0
if to_max:
best = -1000
for square_num in free_squares:
board[square_num[0]][square_num[1]] = "O"
best = max(best, minimax(board, not to_max))
board[square_num[0]][square_num[1]] = " "
return best
else:
best = 1000
for square_num in free_squares:
board[square_num[0]][square_num[1]] = "X"
best = min(best, minimax(board, not to_max))
board[square_num[0]][square_num[1]] = " "
return best
def make_minimax_move(board):
best_value = -1000
best_move = None
free_squares = get_free_squares(board)
for square_num in free_squares:
board[square_num[0]][square_num[1]] = "O"
minimax_value = minimax(board, False)
board[square_num[0]][square_num[1]] = " "
if minimax_value > best_value:
best_move = square_num
best_value = minimax_value
board[best_move[0]][best_move[1]] = "O"
print("The computer has made a move!")
return (best_move[0])*3 + (best_move[1]+1)
# ----------------------------------------------------------
if __name__ == '__main__':
tie = True
board = make_empty_board()
print_board_and_legend(board)
'''board = [["O", "X", "X"],
[" ", "X", " "],
[" ", "O", " "]]'''
print_board_and_legend(board)
print(getCoord(6)) # Problem 1
move = ""
valid_moves = list(range(1,10))
print(valid_moves)
'''board = [["O", " ", "X"],
[" ", "X", " "],
["O", " ", " "]]
print(get_free_squares(board))'''
'''
for i in range(9):
while move not in valid_moves:
move = int(input("Enter your move: "))
valid_moves.remove(move)
if i % 2 == 0:
put_in_board(board,"X",getCoord(move))
else:
put_in_board(board,"O",getCoord(move))
print_board_and_legend(board)'''
for i in range(9):
if i % 2 == 0:
while move not in valid_moves:
move = int(input("Enter your move: "))
valid_moves.remove(move)
put_in_board(board,"X",getCoord(move))
if is_win(board,"X"):
print("X wins")
tie = False
break
else:
#temp = make_improved_move(board, "O")
temp = make_minimax_move(board)
valid_moves.remove(temp)
if is_win(board,"O"):
print("O wins")
tie = False
break
print_board_and_legend(board)
print_board_and_legend(board)
if tie:
print("Tie!")
| 26.384615 | 104 | 0.495627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,568 | 0.253968 |
adb02e5a94ebf7c392ba77f23c61612b29f40826 | 3,503 | py | Python | sopel/modules/morestuff.py | paulmadore/funkshelper | cfe60ef4015381b6c4fb01e453523d571af0b2de | [
"EFL-2.0"
] | null | null | null | sopel/modules/morestuff.py | paulmadore/funkshelper | cfe60ef4015381b6c4fb01e453523d571af0b2de | [
"EFL-2.0"
] | null | null | null | sopel/modules/morestuff.py | paulmadore/funkshelper | cfe60ef4015381b6c4fb01e453523d571af0b2de | [
"EFL-2.0"
] | null | null | null | #!/usr/bin/python3
# coding=utf-8
"""
Chuck Norris and Other Jokes Module copyright 2015 phm.link
Licensed under Mozilla Public License Version 2.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import json
import jsonrpc
from sopel.module import commands, rule
from sopel.bot import Sopel
import requests
import os
from urllib.request import urlopen
import time
@commands('moviequote')
@rule('$nickname moviequote')
def movieQuote(bot, trigger):
quote = requests.post("https://andruxnet-random-famous-quotes.p.mashape.com/?cat=movies",
headers={
"X-Mashape-Key": "7RNWkrbr2ymshIHSDARCfgVApFD7p1zhK8Kjsn4gzaFXaXCIhM",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json"
}
)
quoteBody = quote.text
quoteFinal = json.loads(quoteBody)
quoteText = quoteFinal['quote']
quoteAuthor = quoteFinal['author']
bot.say(quoteText + ' (from "' + quoteAuthor + '")')
@commands('quote')
@rule('$nickname quote')
def famousQuote(bot, trigger):
quote = requests.post("https://andruxnet-random-famous-quotes.p.mashape.com/?cat=famous",
headers={
"X-Mashape-Key": "7RNWkrbr2ymshIHSDARCfgVApFD7p1zhK8Kjsn4gzaFXaXCIhM",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json"
}
)
quoteBody = quote.text
quoteFinal = json.loads(quoteBody)
quoteText = quoteFinal['quote']
quoteAuthor = quoteFinal['author']
bot.say(quoteText + ' -- ' + quoteAuthor)
@commands('mathfact')
@rule('nickname mathfact')
def mathFact(bot, trigger):
fact = requests.get("https://numbersapi.p.mashape.com/random/trivia?fragment=true&json=true&max=100000000&min=1",
headers={
"X-Mashape-Key": "7RNWkrbr2ymshIHSDARCfgVApFD7p1zhK8Kjsn4gzaFXaXCIhM",
"Accept": "text/plain"
}
)
factBody = fact.text
factFinal = json.loads(factBody)
factText = factFinal['text']
number = factFinal['number']
bot.say(str(number) + ' is ' + factText)
@commands('factabt')
@rule('nickname factabt')
def mathSpecificFact(bot, trigger):
fact = requests.get("https://numbersapi.p.mashape.com/" + trigger.group(2) + "/math?fragment=true&json=true",
headers={
"X-Mashape-Key": "7RNWkrbr2ymshIHSDARCfgVApFD7p1zhK8Kjsn4gzaFXaXCIhM",
"Accept": "text/plain"
}
)
factBody = fact.text
factFinal = json.loads(factBody)
factText = factFinal['text']
number = factFinal['number']
bot.say(str(number) + ' is ' + factText)
@commands('gibberish')
@rule('$nickname gibberish')
def sayGibberish(bot, trigger):
gibberishInit = open(str(time.time()) + '.txt', 'w').close()
gibberishFile = gibberishInit
gibberishInput = int(trigger.group(2))
for _ in range(gibberishInput):
gibberish = requests.get("https://wordsapiv1.p.mashape.com/words/?random=true",
headers={
"X-Mashape-Key": "7RNWkrbr2ymshIHSDARCfgVApFD7p1zhK8Kjsn4gzaFXaXCIhM",
"Accept": "application/json"
}
)
gibberishWord = gibberish.text
gibberishOutput = json.loads(gibberishWord)
gibText = gibberishOutput["word"]
with open(gibberishFile, 'a') as outputFile:
outputFile.write(gibText)
outputFile.close()
with open(gibberishFile, 'r') as gibberishOutputFinal:
actualFinal = gibberishOutputFinal.read().replace('\n', ' ')
bot.say(actualFinal)
| 33.04717 | 117 | 0.679989 | 0 | 0 | 0 | 0 | 3,041 | 0.868113 | 0 | 0 | 1,308 | 0.373394 |
adb03a7f7ff2aada0a21c0bb31a6b843a2c5e949 | 10,569 | py | Python | scripts/dqn_utils.py | woowonjin/Reinforcement_Leraning_for_Optimal_Sepsis_Treatment | caf78cf5406194868e40ecc5e1a30d8b060fce18 | [
"MIT"
] | null | null | null | scripts/dqn_utils.py | woowonjin/Reinforcement_Leraning_for_Optimal_Sepsis_Treatment | caf78cf5406194868e40ecc5e1a30d8b060fce18 | [
"MIT"
] | null | null | null | scripts/dqn_utils.py | woowonjin/Reinforcement_Leraning_for_Optimal_Sepsis_Treatment | caf78cf5406194868e40ecc5e1a30d8b060fce18 | [
"MIT"
] | 1 | 2022-02-04T10:42:09.000Z | 2022-02-04T10:42:09.000Z | """
The classes and methods in this file are derived or pulled directly from https://github.com/sfujim/BCQ/tree/master/discrete_BCQ
which is a discrete implementation of BCQ by Scott Fujimoto, et al. and featured in the following 2019 DRL NeurIPS workshop paper:
@article{fujimoto2019benchmarking,
title={Benchmarking Batch Deep Reinforcement Learning Algorithms},
author={Fujimoto, Scott and Conti, Edoardo and Ghavamzadeh, Mohammad and Pineau, Joelle},
journal={arXiv preprint arXiv:1910.01708},
year={2019}
}
============================================================================================================================
This code is provided under the MIT License and is meant to be helpful, but WITHOUT ANY WARRANTY;
November 2020 by Taylor Killian and Haoran Zhang; University of Toronto + Vector Institute
============================================================================================================================
Notes:
"""
import argparse
import copy
import importlib
import json
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
from utils import prepare_bc_data
from torch.utils.tensorboard import SummaryWriter
# Simple full-connected supervised network for Behavior Cloning of batch data
class FC_BC(nn.Module):
def __init__(self, state_dim=33, num_actions=25, num_nodes=256):
super(FC_BC, self).__init__()
self.l1 = nn.Linear(state_dim, num_nodes)
self.bn1 = nn.BatchNorm1d(num_nodes)
self.l2 = nn.Linear(num_nodes, num_nodes)
self.bn2 = nn.BatchNorm1d(num_nodes)
self.l3 = nn.Linear(num_nodes, num_actions)
def forward(self, state):
out = F.relu(self.l1(state))
out = self.bn1(out)
out = F.relu(self.l2(out))
out = self.bn2(out)
return self.l3(out)
class BehaviorCloning(object):
def __init__(self, input_dim, num_actions, num_nodes=256, learning_rate=1e-3, weight_decay=0.1, optimizer_type='adam', device='cuda'):
'''Implement a fully-connected network that produces a supervised prediction of the actions
preserved in the collected batch of data following observations of patient health.
INPUTS:
input_dim: int, the dimension of an input array. Default: 33
num_actions: int, the number of actions available to choose from. Default: 25
num_nodes: int, the number of nodes
'''
self.device = device
self.state_shape = input_dim
self.num_actions = num_actions
self.lr = learning_rate
# Initialize the network
self.model = FC_BC(input_dim, num_actions, num_nodes).to(self.device)
self.loss_func = nn.CrossEntropyLoss()
if optimizer_type == 'adam':
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=weight_decay)
else:
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9, weight_decay=weight_decay)
self.iterations = 0
def train_epoch(self, train_dataloader, dem_context):
'''Sample batches of data from training dataloader, predict actions using the network,
Update the parameters of the network using CrossEntropyLoss.'''
losses = []
# Loop through the training data
for dem, ob, ac, l, t, scores, _, _ in train_dataloader:
state, action = prepare_bc_data(dem, ob, ac, l, t, dem_context)
state = state.to(self.device)
action = action.to(self.device)
# Predict the action with the network
pred_actions = self.model(state)
# Compute loss
try:
loss = self.loss_func(pred_actions, action.flatten())
except:
print("LOL ERRORS")
# Optimize the network
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses.append(loss.item())
self.iterations += 1
return np.mean(losses)
# Simple fully-connected Q-network for the policy
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions, num_nodes=256):
super(FC_Q, self).__init__()
self.q1 = nn.Linear(state_dim, num_nodes)
self.q2 = nn.Linear(num_nodes, num_nodes)
self.q3 = nn.Linear(num_nodes, num_actions)
def forward(self, state):
q = F.relu(self.q1(state))
q = F.relu(self.q2(q))
return self.q3(q)
class DQN(object):
def __init__(
self,
num_actions,
state_dim,
device,
discount=0.99,
optimizer="Adam",
optimizer_parameters={},
polyak_target_update=False,
target_update_frequency=1e3,
tau=0.005
):
self.device = device
# Determine network type
self.Q = FC_Q(state_dim, num_actions).to(self.device)
self.Q_target = copy.deepcopy(self.Q)
self.Q_optimizer = getattr(torch.optim, optimizer)(self.Q.parameters(), **optimizer_parameters)
self.discount = discount
# Target update rule
self.maybe_update_target = self.polyak_target_update if polyak_target_update else self.copy_target_update
self.target_update_frequency = target_update_frequency
self.tau = tau
# Evaluation hyper-parameters
self.state_shape = (-1, state_dim)
# self.eval_eps = eval_eps
self.num_actions = num_actions
# Number of training iterations
self.iterations = 0
def train(self, replay_buffer):
# Sample replay buffer
state, action, next_state, reward, done, obs_state, next_obs_state = replay_buffer.sample()
# Compute the target Q value
with torch.no_grad():
q_curr = self.Q(next_state)
# Use large negative number to mask actions from argmax
next_action = q_curr.argmax(1, keepdim=True)
q_target = self.Q_target(next_state)
#target_Q = 10*reward + done * self.discount * q_target.gather(1, next_action).reshape(-1, 1)
target_Q = reward + done * self.discount * q_target.gather(1, next_action).reshape(-1, 1)
# Get current Q estimate
current_Q = self.Q(state)
current_Q = current_Q.gather(1, action)
# Compute Q loss
Q_loss = F.smooth_l1_loss(current_Q, target_Q)
# Optimize the Q
self.Q_optimizer.zero_grad()
Q_loss.backward()
self.Q_optimizer.step()
# Update target network by polyak or full copy every X iterations.
self.iterations += 1
self.maybe_update_target()
return Q_loss.item(), target_Q
def polyak_target_update(self):
for param, target_param in zip(self.Q.parameters(), self.Q_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def copy_target_update(self):
if self.iterations % self.target_update_frequency == 0:
self.Q_target.load_state_dict(self.Q.state_dict())
def train_DQN(replay_buffer, num_actions, state_dim, device, parameters, behav_pol, pol_eval_dataloader, is_demog, writer):
# For saving files
pol_eval_file = parameters['pol_eval_file']
pol_file = parameters['policy_file']
buffer_dir = parameters['buffer_dir']
# Initialize and load policy
policy = DQN(
num_actions,
state_dim,
device,
parameters["discount"],
parameters["optimizer"],
parameters["optimizer_parameters"],
parameters["polyak_target_update"],
parameters["target_update_freq"],
parameters["tau"]
)
# Load replay buffer
replay_buffer.load(buffer_dir, bootstrap=True)
evaluations = []
training_iters = 0
while training_iters < parameters["max_timesteps"]:
for _ in range(int(parameters["eval_freq"])):
l, targ_q = policy.train(replay_buffer)
evaluations.append(eval_policy(policy, behav_pol, pol_eval_dataloader, parameters["discount"], is_demog, device)) # TODO Run weighted importance sampling with learned policy and behavior policy
np.save(pol_eval_file, evaluations)
torch.save({'policy_Q_function':policy.Q.state_dict(), 'policy_Q_target':policy.Q_target.state_dict()}, pol_file)
training_iters += int(parameters["eval_freq"])
print(f"Training iterations: {training_iters}")
writer.add_scalar('Loss', l, training_iters)
writer.add_scalar('Current Q value', torch.mean(targ_q), training_iters)
def eval_policy(policy, behav_policy, pol_dataloader, discount, is_demog, device):
wis_est = []
wis_returns = 0
wis_weighting = 0
# Loop through the dataloader (representations, observations, actions, demographics, rewards)
for representations, obs_state, actions, demog, rewards in pol_dataloader:
representations = representations.to(device)
obs_state = obs_state.to(device)
actions = actions.to(device)
demog = demog.to(device)
cur_obs, cur_actions = obs_state[:,:-2,:], actions[:,:-1,:].argmax(dim=-1)
cur_demog, cur_rewards = demog[:,:-2,:], rewards[:,:-2]
# Mask out the data corresponding to the padded observations
mask = (cur_obs==0).all(dim=2)
# Compute the discounted rewards for each trajectory in the minibatch
discount_array = torch.Tensor(discount**np.arange(cur_rewards.shape[1]))[None,:]
discounted_rewards = (discount_array * cur_rewards).sum(dim=-1).squeeze()
# Evaluate the probabilities of the observed action according to the trained policy and the behavior policy
with torch.no_grad():
if is_demog: # Gather the probability from the observed behavior policy
p_obs = F.softmax(behav_policy(torch.cat((cur_obs.flatten(end_dim=1), cur_demog.flatten(end_dim=1)), dim=-1)), dim=-1).gather(1, cur_actions.flatten()[:,None]).reshape(cur_obs.shape[:2])
else:
p_obs = F.softmax(behav_policy(cur_obs.flatten(end_dim=1)), dim=-1).gather(1, cur_actions.flatten()[:,None]).reshape(cur_obs.shape[:2])
q_val = policy.Q(representations) # Compute the Q values of the dBCQ policy
p_new = F.softmax(q_val, dim=-1).gather(2, cur_actions[:,:,None]).squeeze() # Gather the probabilities from the trained policy
# Check for whether there are any zero probabilities in p_obs and replace with small probability since behav_pol may mispredict actual actions...
if not (p_obs > 0).all():
p_obs[p_obs==0] = 0.1
# Eliminate spurious probabilities due to padded observations after trajectories have concluded
# We do this by forcing the probabilities for these observations to be 1 so they don't affect the product
p_obs[mask] = 1.
p_new[mask] = 1.
cum_ir = torch.clamp((p_new / p_obs).prod(axis=1), 1e-30, 1e4)
# wis_idx = (cum_ir > 0) # TODO check that wis_idx isn't empty (all zero)
# if wis_idx.sum() == 0:
# import pdb; pdb.set_trace()
# wis = (cum_ir / cum_ir.mean()).cpu() * discounted_rewards # TODO check that there aren't any nans
# wis_est.extend(wis.cpu().numpy())
wis_rewards = cum_ir.cpu() * discounted_rewards
wis_returns += wis_rewards.sum().item()
wis_weighting += cum_ir.cpu().sum().item()
wis_eval = (wis_returns / wis_weighting)
print("---------------------------------------")
print(f"Evaluation over the test set: {wis_eval:.3f}")
print("---------------------------------------")
return wis_eval
| 34.42671 | 196 | 0.713123 | 4,998 | 0.472892 | 0 | 0 | 0 | 0 | 0 | 0 | 3,876 | 0.366733 |
adb0713e16ed17691a6fc614d394bfcc1510b88b | 684 | py | Python | src/LedgerCompliance/utils.py | vchain-us/ledger-compliance-py | 30ae81e703178e8b8b4b7fb6a9b7d4467c607d53 | [
"Apache-2.0"
] | 3 | 2020-12-20T18:30:12.000Z | 2021-05-31T17:02:15.000Z | src/LedgerCompliance/utils.py | vchain-us/ledger-compliance-py | 30ae81e703178e8b8b4b7fb6a9b7d4467c607d53 | [
"Apache-2.0"
] | null | null | null | src/LedgerCompliance/utils.py | vchain-us/ledger-compliance-py | 30ae81e703178e8b8b4b7fb6a9b7d4467c607d53 | [
"Apache-2.0"
] | 1 | 2020-12-04T18:04:20.000Z | 2020-12-04T18:04:20.000Z | import struct
_SetSeparator=b"_~|IMMU|~_"
def wrap_zindex_ref(key: bytes, index) -> bytes:
fmt=">{}sQB".format(len(key))
if index!=None and index.index!=None:
ret=struct.pack(fmt,key,index.index,1)
else:
ret=struct.pack(fmt,key,0,0)
return ret
def unwrap_zindex_ref(value:bytes):
l=len(value)
fmt=">{}sQB".format(l-8-1)
key, index, flag = struct.unpack(fmt,value)
if flag==0:
index=None
return (key,index)
def build_set_key(key: bytes, zset: bytes, score: float, index) -> bytes:
ret=_SetSeparator + zset + _SetSeparator + float64_2_bytes(score) + key
ret=wrap_zindex_ref(ret,index)
return ret
def float64_2_bytes(f:float)->bytes:
return struct.pack(">d",f)
| 23.586207 | 73 | 0.709064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.048246 |
adb2153a31727c0690dee6c70bb398a70a62d473 | 3,122 | py | Python | expertise_levels.py | erelsgl/voting | 732212df2d29d9e9ff7cb4cb83395f649541c346 | [
"MIT"
] | null | null | null | expertise_levels.py | erelsgl/voting | 732212df2d29d9e9ff7cb4cb83395f649541c346 | [
"MIT"
] | null | null | null | expertise_levels.py | erelsgl/voting | 732212df2d29d9e9ff7cb4cb83395f649541c346 | [
"MIT"
] | null | null | null | #!python3
"""
Utilities for computing random expertise levels.
"""
import numpy as np
from scipy.stats import truncnorm, beta
def fixed_expertise_levels(mean:float, size:int):
return np.array([mean]*size)
MIN_PROBABILITY=0.501
MAX_PROBABILITY=0.999
def truncnorm_expertise_levels(mean:float, std:float, size:int):
"""
Draw "size" random expertise levels.
Each level is a probability in [0.5,1],
drawn from a truncated-normal distribution with the given `mean` and `std`.
:return: an array of `size` random expretise levels, sorted from high to low.
>>> a = truncnorm_expertise_levels(mean=0.7, std=0.01, size=100)
>>> np.round(sum(a)/len(a),2)
0.7
>>> sum(a<MIN_PROBABILITY)
0
>>> sum(a>MAX_PROBABILITY)
0
"""
scale = std
loc = mean
a = (MIN_PROBABILITY - loc) / scale # MIN_PROBABILITY = a*scale + loc
b = (MAX_PROBABILITY - loc) / scale # MAX_PROBABILITY = b*scale + loc
return -np.sort(-truncnorm.rvs(a, b, loc=loc, scale=scale, size=size))
def beta_expertise_levels(mean:float, std:float, size:int):
"""
Draw "size" random expertise levels.
Each level is a probability in [0.5,1],
drawn from a beta distribution with the given `mean` and `std`.
:return: an array of `size` random expretise levels, sorted from high to low.
>>> a = beta_expertise_levels(mean=0.7, std=0.01, size=100)
>>> np.round(sum(a)/len(a),2)
0.7
>>> sum(a<MIN_PROBABILITY)
0
>>> sum(a>MAX_PROBABILITY)
0
"""
loc = MIN_PROBABILITY # MIN_PROBABILITY = 0*scale + loc
scale = MAX_PROBABILITY - loc # MAX_PROBABILITY = 1*scale + loc
# from here: https://stats.stackexchange.com/a/12239/10760
# a = mean^2 * [ (1-mean)/std^2 - (1/mean) ]
# b = a*(1/mean - 1)
# Compute the mean of the standard beta (which is in [0,1]):
mean1 = (mean-loc)/scale # mean = mean1*scale + loc
std1 = std/scale # std = std1*scale
a = (mean1**2) * ( (1-mean1)/(std1**2) - (1/mean1) )
b = a*(1/mean1 - 1)
# print(f"a={a} b={b}")
try:
values = beta.rvs(a, b, loc=loc, scale=scale, size=size)
return -np.sort(-values)
except ValueError as err:
print(f"mean={mean}, std={std}, loc={loc}, scale={scale}, mean1={mean1}, std1={std1}, a={a}, b={b}")
raise err
if __name__ == "__main__":
import doctest
# (failures,tests) = doctest.testmod(report=True)
# print ("{} failures, {} tests".format(failures,tests))
print(truncnorm_expertise_levels(mean=0.6, std=0.1, size=11))
print(beta_expertise_levels(mean=0.6, std=0.1, size=11))
print(beta_expertise_levels(mean=3/4, std=1/np.sqrt(48), size=11)) # equivalent to uniform (std=0.14433)
print(beta_expertise_levels(mean=0.55, std=0.14, size=11)) # almost equivalent to uniform
print(beta_expertise_levels(mean=0.75, std=0.14, size=11)) # almost equivalent to uniform
print(beta_expertise_levels(mean=0.95, std=0.14, size=11))
# print(truncnorm_expertise_levels(mean=0.6, std=0, size=11)) # Division by zero error
| 32.520833 | 110 | 0.633568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,714 | 0.549007 |
adb248407bb1fe608324a55851076099fa561cd8 | 1,132 | py | Python | tests/test_factory.py | mrled/interpersonal | 7282fb498ffc0b2bb9a14cf377b2cb5e4df4af56 | [
"Unlicense"
] | null | null | null | tests/test_factory.py | mrled/interpersonal | 7282fb498ffc0b2bb9a14cf377b2cb5e4df4af56 | [
"Unlicense"
] | null | null | null | tests/test_factory.py | mrled/interpersonal | 7282fb498ffc0b2bb9a14cf377b2cb5e4df4af56 | [
"Unlicense"
] | null | null | null | """Testing the Flask application factory"""
import os
import tempfile
import textwrap
from interpersonal import create_app
def test_config():
"""Test the application configuration
Make sure it works in testing mode and in normal mode.
"""
db_fd, db_path = tempfile.mkstemp()
conf_fd, conf_path = tempfile.mkstemp()
media_staging_path = tempfile.mkdtemp()
appconfig_str = textwrap.dedent(
f"""\
---
loglevel: DEBUG
database: {db_path}
password: whatever
cookie_secret_key: whocaresman
uri: http://interpersonal.example.net
mediastaging: {media_staging_path}
blogs:
- name: example
type: built-in example
uri: http://whatever.example.net
sectionmap:
default: blog
mediaprefix: /media
"""
)
os.write(conf_fd, appconfig_str.encode())
assert not create_app(configpath=conf_path).testing
assert create_app({"TESTING": True}, configpath=conf_path).testing
os.close(db_fd)
os.unlink(db_path)
os.close(conf_fd)
os.unlink(conf_path)
| 24.085106 | 70 | 0.641343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 586 | 0.517668 |
adb376c4779c93a08c2bf6100553c99275ca06ea | 6,077 | py | Python | examples/hand_pose_estimation/processors_keypoints.py | ManuelMeder/paz | 6de73c0a99e0f9c7233c2797eebba2bb01631e6c | [
"MIT"
] | null | null | null | examples/hand_pose_estimation/processors_keypoints.py | ManuelMeder/paz | 6de73c0a99e0f9c7233c2797eebba2bb01631e6c | [
"MIT"
] | null | null | null | examples/hand_pose_estimation/processors_keypoints.py | ManuelMeder/paz | 6de73c0a99e0f9c7233c2797eebba2bb01631e6c | [
"MIT"
] | null | null | null | import numpy as np
from backend_keypoints import create_score_maps, extract_2D_keypoints
from backend_keypoints import crop_image_from_coordinates, extract_keypoints
from backend_keypoints import crop_image_from_mask, extract_hand_segment
from backend_keypoints import extract_bounding_box, find_max_location
from backend_keypoints import extract_dominant_hand_visibility
from backend_keypoints import extract_dominant_keypoints2D
from backend_keypoints import flip_right_to_left_hand
from backend_keypoints import get_hand_side_and_keypooints
from backend_keypoints import normalize_keypoints
from paz.abstract import Processor
class ExtractHandmask(Processor):
"""Extract Hand mask from the segmentation label provided. The pixels
with value greater than 1 belongs to hands
"""
def __init__(self):
super(ExtractHandmask, self).__init__()
def call(self, segmentation_label):
return extract_hand_segment(segmentation_label=segmentation_label)
class ExtractHandsideandKeypoints(Processor):
"""Extract Hand Side by counting the number of pixels belonging to each
hand.
"""
def __init__(self):
super(ExtractHandsideandKeypoints, self).__init__()
def call(self, hand_parts_mask, keypoints3D):
return get_hand_side_and_keypooints(hand_parts_mask, keypoints3D)
class NormalizeKeypoints(Processor):
"""Normalize KeyPoints.
"""
def __init__(self):
super(NormalizeKeypoints, self).__init__()
def call(self, keypoints3D):
return normalize_keypoints(keypoints3D)
class FlipRightHandToLeftHand(Processor):
"""Flip Right hand keypoints to Left hand keypoints.
"""
def __init__(self, flip_to_left=True):
super(FlipRightHandToLeftHand, self).__init__()
self.flip_to_left = flip_to_left
def call(self, keypoints3D):
return flip_right_to_left_hand(keypoints3D, self.flip_to_left)
class ExtractDominantHandVisibility(Processor):
"""Extract hand Visibility of Left or Right hand based on the
dominant_hand flag.
"""
def __init__(self):
super(ExtractDominantHandVisibility, self).__init__()
def call(self, keypoint_visibility, dominant_hand):
return extract_dominant_hand_visibility(keypoint_visibility,
dominant_hand)
class ExtractDominantKeypoints2D(Processor):
"""Extract hand keypoints of Left or Right hand based on the
dominant_hand flag.
"""
def __init__(self):
super(ExtractDominantKeypoints2D, self).__init__()
def call(self, keypoint_visibility, dominant_hand):
return extract_dominant_keypoints2D(keypoint_visibility,
dominant_hand)
class CropImageFromMask(Processor):
"""Crop Image from Mask.
"""
def __init__(self, image_size=(320, 320, 3), crop_size=256):
super(CropImageFromMask, self).__init__()
self.image_size = image_size
self.crop_size = crop_size
def call(self, keypoints, keypoint_visibility, image, camera_matrix):
return crop_image_from_mask(keypoints, keypoint_visibility, image,
self.image_size, self.crop_size,
camera_matrix)
class CreateScoremaps(Processor):
"""Create Gaussian Score maps representing 2D Keypoints.
image_size: Size of the input image
crop_size: Cropped Image size
variance: variance of the gaussian scoremap to be generated
"""
def __init__(self, image_size, crop_size, variance):
super(CreateScoremaps, self).__init__()
self.image_size = image_size
self.crop_size = crop_size
self.variance = variance
def call(self, keypoints2D, keypoints_visibility):
return create_score_maps(keypoints2D, keypoints_visibility,
self.image_size, self.crop_size, self.variance)
class Extract2DKeypoints(Processor):
""" Extract the keyppoints based on the visibility of the hand"""
def __init__(self):
super(Extract2DKeypoints, self).__init__()
def call(self, keypoint_visibility):
return extract_2D_keypoints(keypoint_visibility)
class ExtractBoundingbox(Processor):
""" Extract bounding box from a binary mask"""
def __init__(self):
super(ExtractBoundingbox, self).__init__()
def call(self, binary_hand_mask):
return extract_bounding_box(binary_hand_mask)
class AdjustCropSize(Processor):
""" Adjust the crop size with a buffer of scale 0.25 added"""
def __init__(self, crop_size=256):
super(AdjustCropSize, self).__init__()
self.crop_size = crop_size
def call(self, crop_size_best):
crop_size_best = crop_size_best.astype(dtype=np.float64)
crop_size_best = crop_size_best * 1.25
scaled_crop = np.maximum(self.crop_size / crop_size_best, 0.25)
scaled_crop = np.minimum(scaled_crop, 5.0)
return scaled_crop
class CropImage(Processor):
""" Crop the input image provided the location, output image size and the
scaling of the output image"""
def __init__(self, crop_size=256):
super(CropImage, self).__init__()
self.crop_size = crop_size
def call(self, image, crop_location, scale):
return crop_image_from_coordinates(image, crop_location, self.crop_size,
scale)
class ExtractKeypoints(Processor):
""" Extract keypoints when provided with a predicted scoremap"""
def __init__(self):
super(ExtractKeypoints, self).__init__()
def call(self, keypoint_scoremaps):
return extract_keypoints(keypoint_scoremaps)
class FindMaxLocation(Processor):
""" Find the brightest point in the score map, which is represented as a
keypoint"""
def __init__(self):
super(FindMaxLocation, self).__init__()
def call(self, scoremaps):
keypoints_2D = find_max_location(scoremaps)
return keypoints_2D
| 31.984211 | 80 | 0.70413 | 5,404 | 0.889255 | 0 | 0 | 0 | 0 | 0 | 0 | 1,188 | 0.195491 |
adb4718bc3b46e088dbc8adf3af6ee95f2ddf3e1 | 954 | py | Python | pyschool/static/libs/importhooks/FileSystemHook.py | niansa/brython-in-the-classroom | a1f132898ed1afd1d1baf8170b99e58e39e87912 | [
"MIT"
] | 14 | 2016-03-16T18:01:52.000Z | 2021-09-24T14:07:20.000Z | pyschool/static/libs/importhooks/FileSystemHook.py | niansa/brython-in-the-classroom | a1f132898ed1afd1d1baf8170b99e58e39e87912 | [
"MIT"
] | 9 | 2015-03-31T00:31:26.000Z | 2021-05-28T06:24:16.000Z | pyschool/static/libs/importhooks/FileSystemHook.py | niansa/brython-in-the-classroom | a1f132898ed1afd1d1baf8170b99e58e39e87912 | [
"MIT"
] | 7 | 2015-05-04T04:06:04.000Z | 2020-10-28T09:54:07.000Z | import BaseHook
from browser import window
from javascript import JSObject
import sys
sys.path.append("../FileSystem")
import FileObject
#define my custom import hook (just to see if it get called etc).
class FileSystemHook(BaseHook.BaseHook):
def __init__(self, fullname, path):
BaseHook.BaseHook.__init__(self, fullname, path)
if not path.startswith('/pyschool'):
raise ImportError
def find_module(self):
if not self._path.startswith('/pyschool'):
raise ImportError
fs=JSObject(window._FS)
for _ext in ('.py', '/__init__.py'):
self._modpath='%s/%s%s' % (self._path, self._fullname, _ext)
_msg=fs._read_file(self._modpath)
if _msg['status'] == 'Okay':
self._module=_msg['fileobj'].get_attribute('contents')
return self
#if we got here, we couldn't find the module
raise ImportError
sys.meta_path.append(FileSystemHook)
| 26.5 | 70 | 0.662474 | 709 | 0.743187 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.216981 |
adb4f77c341d6f1aa8e130649b902fe973e3d9f3 | 15,753 | py | Python | demo_app_new.py | pnazarenko1405/final_project_data_analysis | d841d2630457bbf96b3914d27e6d82e80dc2654d | [
"MIT"
] | null | null | null | demo_app_new.py | pnazarenko1405/final_project_data_analysis | d841d2630457bbf96b3914d27e6d82e80dc2654d | [
"MIT"
] | null | null | null | demo_app_new.py | pnazarenko1405/final_project_data_analysis | d841d2630457bbf96b3914d27e6d82e80dc2654d | [
"MIT"
] | null | null | null | import sql as sql
import streamlit as st
from streamlit_folium import folium_static
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import json
import sys
import folium
import requests
from bs4 import BeautifulSoup
import csv
from tqdm import tqdm
import webbrowser
import os.path as osp
import os
from folium.plugins import MarkerCluster
import numpy as np
from numpy import genfromtxt
import sqlite3
with st.echo(code_location='below'):
import zipfile
zipFile = zipfile.ZipFile("2019-20-fullyr-data_sa_crime.csv.zip", 'r')
zipFile.extract('2019-20-fullyr-data_sa_crime.csv')
df1 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
# from sqlite3 import Error
#
#
# def create_connection(path):
# connection = None
# try:
# connection = sqlite3.connect(path)
# print("Connection to SQLite DB successful")
# except Error as e:
# print(f"The error '{e}' occurred")
#
# return connection
st.title("Различные данные по правонарушениям в Южной Австралии за 2018-2020гг.")
xx = df1.copy()
xx.drop(columns = ['Reported Date', 'Postcode - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description', 'Offence Level 3 Description'])
xx.sort_values(by='Suburb - Incident', ascending=False)
groups = xx.groupby('Suburb - Incident', as_index=False).sum()
group1 = groups.sort_values('Offence count', ascending=False).head(15)
st.write('Статистика по пригородам с наибольшим количествам правонарушений за 2019-2020гг.')
fig2, ax2 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group1, x='Suburb - Incident', y='Offence count', palette='magma')
plt.xlabel('Suburb', size=20)
plt.ylabel('Offence count in the suburb', size=20)
plt.title('Total offence count of crimes in the suburbs (top 15) 2019/2020', size=36)
st.pyplot(fig2)
if st.button('Показать статистику по пригородам с наибольшим количествам правонарушений за 2019-2020гг. в виде таблицы'):
st.dataframe(group1)
xx1 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx1.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 2 Description', 'Offence Level 3 Description'])
xx1.sort_values(by='Offence Level 1 Description', ascending=False)
groups1 = xx1.groupby('Offence Level 1 Description', as_index=False).sum()
group12 = groups1.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по первой классификации за 2019-2020гг.')
fig3, ax3 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group12, x='Offence Level 1 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev1)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev1) 2019/2020', size=36)
st.pyplot(fig3)
if st.button('Показать статистику по количеству правонарушений по первой классификации за 2019-2020гг. в виде таблицы'):
st.dataframe(group12)
xx2 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx2.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 3 Description'])
xx2.sort_values(by='Offence Level 2 Description', ascending=False)
groups1_2 = xx2.groupby('Offence Level 2 Description', as_index=False).sum()
group123 = groups1_2.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по второй классификации за 2019-2020гг.')
fig4, ax4 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group123, x='Offence Level 2 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev2)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev2) 2019/2020', size=36)
st.pyplot(fig4)
if st.button('Показать статистику по количеству правонарушений по второй классификации за 2019-2020гг. в виде таблицы'):
st.dataframe(group123)
xx3 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx3.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description'])
xx3.sort_values(by='Offence Level 3 Description', ascending=False)
groups1_2_3 = xx3.groupby('Offence Level 3 Description', as_index=False).sum()
group1234 = groups1_2_3.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по третьей классификации за 2019-2020гг.')
fig5, ax5 = plt.subplots(figsize=(60, 20))
sns.barplot(data=group1234, x='Offence Level 3 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev3)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev3)', size=36)
st.pyplot(fig5)
if st.button('Показать cтатистику по количеству правонарушений по третьей классификации за 2019-2020гг. в виде таблицы'):
st.dataframe(data=group1234)
xx4 = pd.read_csv('2019-20-fullyr-data_sa_crime.csv')
xx4.drop(columns=['Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description', 'Offence Level 3 Description'])
xx4.sort_values(by='Reported Date')
groups1_2_3_4 = xx4.groupby('Reported Date', as_index=False).sum()
group12345 = groups1_2_3_4.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по датам за 2019-2020гг.')
fig6, ax6 = plt.subplots(figsize=(60, 20))
sns.lineplot(data=group12345, x='Reported Date', y='Offence count', color='red')
plt.xlabel('Date', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count by date 01.07.19-30.06.20', size=36)
st.pyplot(fig6)
if st.button('Показать статистику по количеству правонарушений по датам за 2019-2020гг. в виде таблицы'):
st.dataframe(data=group12345)
x_18_19=pd.read_csv ('2018-19-data_sa_crime.csv')
x_18_19.drop(columns=['Reported Date', 'Postcode - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description', 'Offence Level 3 Description'])
x_18_19.sort_values(by='Suburb - Incident', ascending=False)
groups_18_19 = x_18_19.groupby('Suburb - Incident', as_index=False).sum()
group_18_19_1 = groups_18_19.sort_values('Offence count', ascending=False).head(15)
st.write('Статистика по пригородам с наибольшим количествам правонарушений за 2018-2019гг.')
fig7, ax7 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group_18_19_1, x='Suburb - Incident', y='Offence count', palette='magma')
plt.xlabel('Suburb', size=20)
plt.ylabel('Offence count in the suburb', size=20)
plt.title('Total offence count of crimes in the suburbs (top 15) 2018/2019', size=36)
st.pyplot(fig7)
if st.button('Показать статистику по пригородам с наибольшим количествам правонарушений за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_1)
x_18_19_2 = pd.read_csv('2018-19-data_sa_crime.csv')
x_18_19_2.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 2 Description', 'Offence Level 3 Description'])
x_18_19_2.sort_values(by='Offence Level 1 Description', ascending=False)
groups_18_19_2 = x_18_19_2.groupby('Offence Level 1 Description', as_index=False).sum()
group_18_19_2 = groups_18_19_2.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по первой классификации за 2018-2019гг.')
fig8, ax8 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group_18_19_2, x='Offence Level 1 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev1)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev1) 2018/2019', size=36)
st.pyplot(fig8)
if st.button('Показать статистику по количеству правонарушений по первой классификации за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_2)
x_18_19_4 = pd.read_csv('2018-19-data_sa_crime.csv')
x_18_19_4.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 3 Description'])
x_18_19_4.sort_values(by='Offence Level 2 Description', ascending=False)
groups_18_19_4 = x_18_19_4.groupby('Offence Level 2 Description', as_index=False).sum()
group_18_19_4 = groups_18_19_4.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по второй классификации за 2018-2019гг.')
fig10, ax10 = plt.subplots(figsize=(40, 20))
sns.barplot(data=group_18_19_4, x='Offence Level 2 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev2)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev2) 2018/2019', size=36)
st.pyplot(fig10)
if st.button('Показать статистику по количеству правонврушений по второй классификации за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_4)
x_18_19_3 = pd.read_csv('2018-19-data_sa_crime.csv')
x_18_19_3.drop(columns=['Reported Date', 'Postcode - Incident', 'Suburb - Incident', 'Offence Level 1 Description', 'Offence Level 2 Description'])
x_18_19_3.sort_values(by='Offence Level 3 Description', ascending=False)
groups_18_19_3 = x_18_19_3.groupby('Offence Level 3 Description', as_index=False).sum()
group_18_19_3 = groups_18_19_3.sort_values('Offence count', ascending=False)
st.write('Статистика по количеству правонарушений по третьей классификации за 2018-2019гг.')
fig9, ax9 = plt.subplots(figsize=(60, 20))
sns.barplot(data=group_18_19_3, x='Offence Level 3 Description', y='Offence count', palette='magma')
plt.xlabel('Type of crime (lev3)', size=20)
plt.ylabel('Offence count', size=20)
plt.title('Total offence count of different crimes (lev3) 2018/2019', size=36)
st.pyplot(fig9)
if st.button('Показать статистику по количеству правонарушений по третьей классификации за 2018-2019гг. в виде таблицы'):
st.dataframe(group_18_19_3)
din=pd.read_csv("Offenders, principal offence of public order offences.csv")
#din_data = genfromtxt('Offenders, principal offence of public order offences.csv', delimiter=',')
print(din)
#din.columns=["Years", 'Offenders']
#print(din)
st.write('Статистика по количеству правонарушителей 2009-2019гг.')
fig10, ax10 = plt.subplots(figsize=(40, 20))
sns.lineplot(data=din, x="Years", y='Offenders', color='red')
plt.xlabel('Year', size=40)
plt.ylabel('Offenders', size=40)
plt.title('Offenders dinamics', size=50)
st.pyplot(fig10)
if st.button('Показать статистику по количеству правонарушителей 2009-2019гг. в виде таблицы'):
st.dataframe(din)
years = np.array([2019, 2020])
st.write("(Придётся немного подождать, программа обрабатывает примерно 95тыс. результатов для каждого года)")
files = ['2019-20-fullyr-data_sa_crime.csv',
'2018-19-data_sa_crime.csv'] # выполняем весь этот код по созданию карты с маркерами для каждого файла
for file in files:
locations = []
entrypoint1 = "https://nominatim.openstreetmap.org/search"
query1 = {'q': 'MORPHETT VALE australia', 'format': 'xml'}
r1 = requests.get(entrypoint1, params=query1)
soup = BeautifulSoup(r1.text, 'xml')
st.write("Визуализация количества правонарушений по пригородам на карте " + str(years[0]) + "-" + str(years[1]) + "гг.")
years = years-1
print(years)
with open(osp.join(os.environ['HOME'], 'Downloads/first_project 2', file), newline='') as f: # если будут ошибки из-за пути, то просто вставь полный путь к папке с файлами csv
reader = csv.reader(f)
for row in reader:
place = row[1] + ' ' + row[2] # берем название города и почтовый индекс
locations.append(place)
locations.pop(0) # удаляем перую строку (название столбцов)
new_dict = {i: locations.count(i) for i in tqdm(locations)} # собираем словарь {локация : кол-во нарушений}
sorted_values = sorted(new_dict.values(), reverse=True) # сортируем от большего к меньшему значения словаря
sorted_dict = {}
for i in sorted_values: # собираем новый словарь с сортировкой по значению
for k in new_dict.keys():
if new_dict[k] == i:
sorted_dict[k] = new_dict[k]
break
# делаем срез словаря через списки
lst_slice_key = list(sorted_dict.keys())[:27] # берем первые 27 записей (ключи)
lst_slice_val = list(sorted_dict.values())[:27] # берем первые 27 записей (значения)
new_sorted_dict = dict(zip(lst_slice_key, lst_slice_val)) # собираем новый словрь-срез
print(new_sorted_dict)
lat_19_20 = []
lon_19_20 = []
lst_number = []
lst_place = []
# делаем запрос и заполняем словари нужными данными
for name, number in tqdm(new_sorted_dict.items()):
entrypoint2 = "https://nominatim.openstreetmap.org/search"
query2 = {'q': str(name), 'format': 'xml'}
r2 = requests.get(entrypoint2, params=query2)
soup1 = BeautifulSoup(r2.text, 'xml')
for place1 in soup1.find_all("place"):
lst_place.append(place1['display_name'])
lat_19_20.append(float(place1['lat']))
lon_19_20.append(float(place1['lon']))
lst_number.append(number)
break
coord_19_20 = dict(zip(lat_19_20, lon_19_20))
a = list(coord_19_20.keys())[0]
b = coord_19_20[a]
def color_change(count): # менеяем цвет в зависимости от кол-ва преступлений в точке
if (count < 800):
return ('green')
elif (800 <= count < 1100):
return ('orange')
else:
return ('red')
def radius_change(count): # менеяем радиус в зависимости от кол-ва преступлений в точке
if (count < 800):
rad = 7
return rad
elif (800 <= count < 1100):
rad = 14
return rad
else:
rad = 21
return rad
map = folium.Map(location=[a, b], zoom_start=8) # создаем карту с дефолтной локацией
marker_cluster = folium.plugins.MarkerCluster().add_to(map) # создаем кластеризацию маркеров на карте
for lat, lon, place, number in tqdm(zip(lat_19_20, lon_19_20, lst_place, lst_number)): # создаем маркеры на карте one by one
place_splited = place.split(',')
folium.CircleMarker(location=[lat, lon], radius=radius_change(int(number)),
# location - координаты маркера, radius - берем из функции radius_change
popup=f'Place: {place_splited[0]}, {place_splited[1]}, {place_splited[2]}\nCrimes: {str(number)}',
# popup - текст маркера
fill_color=color_change(int(number)), color="black", fill_opacity=0.9).add_to(
marker_cluster) # fill_color - берем из функции color_change
map.save(f"map_{file[:-4]}.html") # сохраняем карту в html формате
print(f'DONE with {file}')
url = f"map_{file[:-4]}.html"
folium_static(map)
| 42.233244 | 184 | 0.675935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,535 | 0.529281 |
adb586800f460b888df5730bb9f1d37813ab8d42 | 1,236 | py | Python | tuxeatpi_common/initializer.py | TuxEatPi/common | 45aed6275636974f2eb94cf40e330fef5fdd0b7a | [
"Apache-2.0"
] | null | null | null | tuxeatpi_common/initializer.py | TuxEatPi/common | 45aed6275636974f2eb94cf40e330fef5fdd0b7a | [
"Apache-2.0"
] | null | null | null | tuxeatpi_common/initializer.py | TuxEatPi/common | 45aed6275636974f2eb94cf40e330fef5fdd0b7a | [
"Apache-2.0"
] | null | null | null | """Module defining the init process for TuxEatPi component"""
import logging
class Initializer(object):
"""Initializer class to run init action for a component"""
def __init__(self, component, skip_dialogs=False, skip_intents=False, skip_settings=False):
self.component = component
self.skip_dialogs = skip_dialogs
self.skip_intents = skip_intents
self.skip_settings = skip_settings
self.logger = logging.getLogger(name="tep").getChild(component.name).getChild('initializer')
def run(self):
"""Run initialization"""
self.logger.info("Starting initialize process")
# start wamp client
self.component._wamp_client.run()
# Load dialogs
if not self.skip_dialogs:
self.component.dialogs.load()
# Get settings
if not self.skip_settings:
self.component.settings.read()
self.component.settings.read_global()
self.logger.info("Global and component settings received")
# Send intent files
if not self.skip_intents:
self.component.intents.save(self.component.settings.nlu_engine)
# Start subtasker
self.component._tasks_thread.start()
| 37.454545 | 100 | 0.665858 | 1,156 | 0.935275 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.253236 |
adb5fa9a4046aad685ec8217a10885613f798277 | 56 | py | Python | devday/sponsoring/__init__.py | jenslauterbach/devday_website | a827c9237e656842542eff07ec9fa7b39716a0ee | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 6 | 2018-09-30T20:18:01.000Z | 2020-03-12T09:03:38.000Z | devday/sponsoring/__init__.py | jenslauterbach/devday_website | a827c9237e656842542eff07ec9fa7b39716a0ee | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 260 | 2018-09-30T14:17:57.000Z | 2022-03-04T13:48:34.000Z | devday/sponsoring/__init__.py | jenslauterbach/devday_website | a827c9237e656842542eff07ec9fa7b39716a0ee | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 9 | 2018-09-30T13:17:21.000Z | 2020-10-03T12:55:05.000Z | default_app_config = 'sponsoring.apps.SponsoringConfig'
| 28 | 55 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.607143 |
adb71119cdfc222b0935d1f0fd6885370fac21e0 | 922 | py | Python | movie/migrations/0003_auto_20200718_0759.py | edith007/The-Movie-Database | fef4aba56be66b93de5665da374ec8aab05c40f9 | [
"CC0-1.0"
] | 2 | 2020-07-12T20:15:53.000Z | 2020-07-19T12:07:48.000Z | movie/migrations/0003_auto_20200718_0759.py | edith007/The-Movie-DataBase | fef4aba56be66b93de5665da374ec8aab05c40f9 | [
"CC0-1.0"
] | 1 | 2020-07-12T07:50:55.000Z | 2020-07-12T07:50:55.000Z | movie/migrations/0003_auto_20200718_0759.py | edith007/The-Movie-DataBase | fef4aba56be66b93de5665da374ec8aab05c40f9 | [
"CC0-1.0"
] | null | null | null | # Generated by Django 2.2.12 on 2020-07-18 07:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0002_auto_20200717_1039'),
]
operations = [
migrations.RemoveField(
model_name='show',
name='plot',
),
migrations.AddField(
model_name='show',
name='genres',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='show',
name='image',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='show',
name='name',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='userrating',
name='position',
field=models.IntegerField(default=0),
),
]
| 24.263158 | 49 | 0.520607 | 828 | 0.898048 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.172451 |
adb85db75ca4685e65491305d77f76efef993ae2 | 220 | py | Python | figures/styles.py | Jakob-Unfried/msc-legacy | 2c41f3f714936c25dd534bd66da802c26176fcfa | [
"MIT"
] | 1 | 2021-03-22T14:16:43.000Z | 2021-03-22T14:16:43.000Z | figures/styles.py | Jakob-Unfried/msc-legacy | 2c41f3f714936c25dd534bd66da802c26176fcfa | [
"MIT"
] | null | null | null | figures/styles.py | Jakob-Unfried/msc-legacy | 2c41f3f714936c25dd534bd66da802c26176fcfa | [
"MIT"
] | null | null | null | colors_per_chi = {2: 'green', 3: 'orange', 4: 'purple', 5: 'pink', 6: 'red'}
style_per_chi = {2: '-', 3: '-.', 4: 'dotted'}
markers_per_reason = {'converged': 'o', 'progress': 'x', 'ressources': 'v'}
linewidth = 5.31596
| 44 | 76 | 0.581818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.413636 |
adb871b1b9356162327f5a254f4edf184c40e44c | 2,222 | py | Python | index.py | tapanbk/compass-distance-and-bearing | 2554a39c79570cb675b8d02d1fe8de7a86a7d0f4 | [
"Apache-2.0"
] | null | null | null | index.py | tapanbk/compass-distance-and-bearing | 2554a39c79570cb675b8d02d1fe8de7a86a7d0f4 | [
"Apache-2.0"
] | null | null | null | index.py | tapanbk/compass-distance-and-bearing | 2554a39c79570cb675b8d02d1fe8de7a86a7d0f4 | [
"Apache-2.0"
] | null | null | null | def calculate_compass_distance(origin, destination):
import math
origin_latitude, origin_longitude = origin
destination_latitude, destination_longitude = destination
# 3959 = > Miles and 6371 = > KM
# unit in meters
radius = 6371*1000
dlat = math.radians(destination_latitude-origin_latitude)
dlon = math.radians(destination_longitude-origin_longitude)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(origin_latitude)) \
* math.cos(math.radians(destination_latitude)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
return radius * c
def calculate_initial_compass_bearing(origin, destination):
import math
if (type(origin) != tuple) or (type(destination) != tuple):
raise TypeError("Only tuples are supported as arguments")
origin_latitude, origin_longitude = origin
destination_latitude, destination_longitude = destination
origin_latitude = math.radians(origin_latitude)
destination_latitude = math.radians(destination_latitude)
diff_long = math.radians(destination_longitude - origin_longitude)
x = math.sin(diff_long) * math.cos(destination_latitude)
y = math.cos(origin_latitude) * math.sin(destination_latitude) - (
math.sin(origin_latitude) * math.cos(destination_latitude) * math.cos(diff_long))
initial_bearing = math.atan2(x, y)
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
if __name__ == '__main__':
# Checked on
# http: // instantglobe.com / CRANES / GeoCoordTool.html
pointa = (27.672944, 85.313551)
# pointb = (27.674198, 85.313379) # 353.074198377
# pointb = (27.674312, 85.313701) # 5.54634422036
# pointb = (27.673761, 85.314173) # 33.989047
# pointb = (27.673723, 85.314581) # 49.5024397615
pointb = (27.672792, 85.315011) # distance: 144.764626263 bearing:96.7043766096
# pointb = (27.671747, 85.313615) # distance: 133.249459006 bearing: 177.288978602
distance = calculate_compass_distance(pointa, pointb)
bearing = calculate_initial_compass_bearing(pointa, pointb)
print(distance, bearing)
| 47.276596 | 93 | 0.713771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 492 | 0.221422 |
adb94e7aa4a224fc307c81684302f11bb6f9c198 | 757 | py | Python | codes/src/util/isotime.py | CorbinFoucart/FEMexperiment | 9bad34d9ed7cbdd740e3a4b67f433779dd53b264 | [
"MIT"
] | 2 | 2018-05-26T22:09:32.000Z | 2018-06-25T21:46:32.000Z | codes/src/util/isotime.py | CorbinFoucart/FEMexperiment | 9bad34d9ed7cbdd740e3a4b67f433779dd53b264 | [
"MIT"
] | 16 | 2018-05-17T21:38:44.000Z | 2022-03-11T23:21:25.000Z | codes/src/util/isotime.py | CorbinFoucart/FEMexperiment | 9bad34d9ed7cbdd740e3a4b67f433779dd53b264 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""@package isotime
Creates a string containing the current local time in ISO 8601 basic format
@author: Chris Mirabito (mirabito@mit.edu)
"""
from datetime import datetime
#from matplotlib.dates import SEC_PER_DAY
SEC_PER_DAY = 86400
def isotime():
"""Current local time in ISO 8601 basic format
@return String containing the current local time in ISO 8601 basic format
"""
local_time = datetime.now()
utc_time = datetime.utcnow()
time_diff = local_time - utc_time
hours, minutes = divmod(
(time_diff.days * SEC_PER_DAY + time_diff.seconds + 30) // 60, 60)
return ('{}{:+03d}{:02d}'
.format(local_time.strftime('%Y%m%dT%H%M%S'),
int(hours), int(minutes)))
| 30.28 | 77 | 0.660502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.490092 |
adba93a1fa272ea925749888b657b23f18f04ab6 | 1,980 | py | Python | src/cosmic_ray/tools/filters/filter_app.py | XD-DENG/cosmic-ray | d265dd0c7bf65484ee2ff1503129b2b16d0c7f55 | [
"MIT"
] | 1 | 2020-10-18T11:29:03.000Z | 2020-10-18T11:29:03.000Z | src/cosmic_ray/tools/filters/filter_app.py | XD-DENG/cosmic-ray | d265dd0c7bf65484ee2ff1503129b2b16d0c7f55 | [
"MIT"
] | 4 | 2020-11-21T07:36:24.000Z | 2020-11-22T03:09:39.000Z | src/cosmic_ray/tools/filters/filter_app.py | XD-DENG/cosmic-ray | d265dd0c7bf65484ee2ff1503129b2b16d0c7f55 | [
"MIT"
] | null | null | null | """A simple base for creating common types of work-db filters.
"""
import argparse
import logging
import sys
from exit_codes import ExitCode
from cosmic_ray.work_db import use_db
class FilterApp:
"""Base class for simple WorkDB filters.
This provides command-line handling for common filter options like
the session and verbosity level. Subclasses can add their own arguments
as well. This provides a `main()` function that open the session's WorkDB
and passes it to the subclass's `filter()` function.
"""
def add_args(self, parser: argparse.ArgumentParser):
"""Add any arguments that the subclass needs to the parser.
Args:
parser: The ArgumentParser for command-line processing.
"""
def description(self):
"""The description of the filter.
This is used for the command-line help message.
"""
return None
def main(self, argv=None):
"""The main function for the app.
Args:
argv: Command line argument list of parse.
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description=self.description(),
)
parser.add_argument(
'session', help="Path to the session on which to operate")
parser.add_argument(
'--verbosity', help='Verbosity level for logging', default='WARNING')
self.add_args(parser)
args = parser.parse_args(argv)
logging.basicConfig(level=getattr(logging, args.verbosity))
with use_db(args.session) as db:
self.filter(db, args)
return ExitCode.OK
def filter(self, work_db, args):
"""Apply this filter to a WorkDB.
This should modify the WorkDB in place.
Args:
work_db: An open WorkDB instance.
args: The argparse Namespace for the command line.
"""
raise NotImplementedError()
| 27.887324 | 81 | 0.629798 | 1,796 | 0.907071 | 0 | 0 | 0 | 0 | 0 | 0 | 1,087 | 0.54899 |
adbadbea54baed7c6fcc238d2ab70e4072465ec8 | 1,702 | py | Python | Crawl/Code/com.vitan.test/mao.py | ivitan/LearnPython | f7c1c8f450f5cbcbd8cabe03711c5e0d81dfdee3 | [
"MIT"
] | 1 | 2020-02-05T12:13:31.000Z | 2020-02-05T12:13:31.000Z | Crawl/Code/com.vitan.test/mao.py | ivitan/LearnPython | f7c1c8f450f5cbcbd8cabe03711c5e0d81dfdee3 | [
"MIT"
] | null | null | null | Crawl/Code/com.vitan.test/mao.py | ivitan/LearnPython | f7c1c8f450f5cbcbd8cabe03711c5e0d81dfdee3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-10-28 上午11:51
# @Author : Vitan
# @File : mao.py
import requests
import re
import json
from multiprocessing import Pool
from requests.exceptions import RequestException
def get_one_page(url):
headers = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'}
try:
response = requests.get(url,headers = headers)
if response.status_code == 200:
html = response.text
return html
return None
except RequestException:
return None
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>'
+ '.*?<p.*?title="(.*?)".*?</p>.*?star">(.*?)</p>'
+ '.*?releasetime">(.*?)</p>.*?integer">(.*?)'
+ '<.*?fraction">(.*?)</i>',re.S)
movies = re.findall(pattern,html)
for item in movies:
yield{
'排名':item[0],
'电影名':item[1],
'主演':item[2].strip()[3:],
'上映时间':item[3][5:],
'评分':item[4]+item[5]
}
def write_to_txt(content):
# 采用 append 追加模式,字符集为utf8
with open('movies.txt','a',encoding='utf8') as f:
# 采用json的dumps方法来初始化字符串
f.write(json.dumps(content,ensure_ascii=False) + '\n')
f.close()
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
write_to_txt(item)
if __name__ == '__main__':
pool = Pool() # 多线程
pool.map(main, [i*10 for i in range(10)])
pool.close()
pool.join() | 30.392857 | 137 | 0.549941 | 0 | 0 | 510 | 0.285874 | 0 | 0 | 0 | 0 | 614 | 0.34417 |
adbb784f60c50615a64e5766faabdc65dde7543d | 446 | py | Python | setup.py | CubexX/shortest-python | 0b778ad88cc329e00ecd94236178f13735451ded | [
"MIT"
] | null | null | null | setup.py | CubexX/shortest-python | 0b778ad88cc329e00ecd94236178f13735451ded | [
"MIT"
] | null | null | null | setup.py | CubexX/shortest-python | 0b778ad88cc329e00ecd94236178f13735451ded | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name='shortest-python',
packages=['shortest'],
version='0.1',
description='Python library for shorte.st url shortener',
long_description="More on github: https://github.com/CubexX/shortest-python",
author='CubexX',
author_email='root@cubexx.xyz',
url='https://github.com/CubexX/shortest-python',
keywords=['shortest', 'shorte.st', 'links'],
license='MIT License'
)
| 29.733333 | 81 | 0.683857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 244 | 0.547085 |
adbd54f48752ea1ae6b17c7029c2a22c69b5f6e2 | 3,381 | py | Python | 04_random_forest_exp.py | markysamson/CDSWcreditcardfraud | 31dc6d417baee4ef5736bce74059815b4f51542a | [
"Apache-2.0"
] | null | null | null | 04_random_forest_exp.py | markysamson/CDSWcreditcardfraud | 31dc6d417baee4ef5736bce74059815b4f51542a | [
"Apache-2.0"
] | null | null | null | 04_random_forest_exp.py | markysamson/CDSWcreditcardfraud | 31dc6d417baee4ef5736bce74059815b4f51542a | [
"Apache-2.0"
] | null | null | null | # # Building and Evaluating Random Forest Model
# ## Setup
# Import useful packages, modules, classes, and functions:
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
#import numpy as np
#import pandas as pd
import matplotlib.pyplot as plt
#import seaborn as sns
import cdsw
# Create a SparkSession:
spark = SparkSession.builder.master("local").appName("creditcard_exp").getOrCreate()
param_numTrees=int(sys.argv[1])
# param_numTrees=10
# ## Preprocess the modeling data
# Read the explored data from HDFS:
df = spark.read.parquet("creditcardfraud/exploredata/")
# Now we manually select our features and label:
# Features selected
feature_selected = ["V1","V2","V3","V4","V9","V10","V11","V12","V14","V16","V17","V18","V19"]
df_selected = df.select("Time","V1","V2","V3","V4","V9","V10","V11","V12","V14","V16","V17","V18","V19","Class")
# The machine learning algorithms in Spark MLlib expect the features to be collected into
# a single column. So we use
# [VectorAssembler](http://spark.apache.org/docs/latest/api/python/pyspark.ml.html#pyspark.ml.feature.VectorAssembler)
# to assemble our feature vector:
from pyspark.ml.feature import VectorAssembler
assembler = VectorAssembler(inputCols=feature_selected, outputCol="Features")
df_assembled = assembler.transform(df_selected)
# **Note:** `features` is stored in sparse format.
# ## Create train and test datasets for machine learning (classification).
# Fit our model on the train DataFrame and evaluate our model on the test DataFrame:
# We want both train and test dataset to have equal proportion of normal and fraud transactions.
df_norm = df_assembled.filter(df_assembled.Class == 0)
df_fraud = df_assembled.filter(df_assembled.Class == 1)
(norm_train, norm_test) = df_norm.randomSplit([0.7, 0.3], 12345)
(fraud_train, fraud_test) = df_fraud.randomSplit([0.7, 0.3], 12345)
df_train = norm_train.union(fraud_train).orderBy("Time")
df_test = norm_test.union(fraud_test).orderBy("Time")
# ## Specify Random Forest model
from pyspark.ml.classification import RandomForestClassifier
rf = RandomForestClassifier(featuresCol="Features", labelCol="Class", numTrees=param_numTrees)
# ## Fit the Random Forest model
# Use the `fit` method to fit the linear regression model on the train DataFrame:
%time rf_model = rf.fit(df_train)
# ## Evaluate model performance on the test dataset.
# Use the `evaluate` method of the
# [BinaryClassificationEvaluator](http://spark.apache.org/docs/latest/api/python/pyspark.ml.html#pyspark.ml.evaluation.BinaryClassificationEvaluator)
# class
# Generate predictions on the test DataFrame:
test_with_prediction = rf_model.transform(df_test)
# **Note:** The resulting DataFrame includes three types of predictions. The
# `rawPrediction` is a vector of log-odds, `prediction` is a vector or
# probabilities `prediction` is the predicted class based on the probability
# vector.
# Create an instance of `BinaryClassificationEvaluator` class:
from pyspark.ml.evaluation import BinaryClassificationEvaluator
evaluator = BinaryClassificationEvaluator(rawPredictionCol="rawPrediction", labelCol="Class",
metricName="areaUnderROC")
auroc=evaluator.evaluate(test_with_prediction)
auroc
cdsw.track_metric("auroc", auroc)
# ## Cleanup
# Stop the SparkSession:
# spark.stop() | 37.988764 | 149 | 0.762496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,970 | 0.582668 |
adc017e0f309fe827c4b742579b8ba5fefa70fea | 9,761 | py | Python | src/vegasflow/vflowplus.py | N3PDF/vegasflow | 21209c928d07c00ae4f789d03b83e518621f174a | [
"Apache-2.0"
] | 20 | 2020-03-02T22:01:54.000Z | 2022-03-30T11:43:48.000Z | src/vegasflow/vflowplus.py | N3PDF/vegasflow | 21209c928d07c00ae4f789d03b83e518621f174a | [
"Apache-2.0"
] | 26 | 2020-02-28T12:27:38.000Z | 2021-09-30T07:54:12.000Z | src/vegasflow/vflowplus.py | N3PDF/vegasflow | 21209c928d07c00ae4f789d03b83e518621f174a | [
"Apache-2.0"
] | 5 | 2020-07-31T08:57:56.000Z | 2021-11-05T15:09:14.000Z | """
Implementation of vegas+ algorithm:
adaptive importance sampling + adaptive stratified sampling
from https://arxiv.org/abs/2009.05112
The main interface is the `VegasFlowPlus` class.
"""
from itertools import product
import numpy as np
import tensorflow as tf
from vegasflow.configflow import (
DTYPE,
DTYPEINT,
fone,
fzero,
float_me,
int_me,
BINS_MAX,
BETA,
MAX_NEVAL_HCUBE,
)
from vegasflow.monte_carlo import wrapper, sampler, MonteCarloFlow
from vegasflow.vflow import VegasFlow, importance_sampling_digest
from vegasflow.utils import consume_array_into_indices
import logging
logger = logging.getLogger(__name__)
FBINS = float_me(BINS_MAX)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None], dtype=DTYPE),
tf.TensorSpec(shape=[], dtype=DTYPEINT),
tf.TensorSpec(shape=[None], dtype=DTYPEINT),
tf.TensorSpec(shape=[None, None], dtype=DTYPEINT),
tf.TensorSpec(shape=[None, None], dtype=DTYPE),
]
)
def generate_samples_in_hypercubes(rnds, n_strat, n_ev, hypercubes, divisions):
"""Receives an array of random numbers 0 and 1 and
distribute them in each hypercube according to the
number of samples in each hypercube specified by n_ev
Parameters
----------
`rnds`: tensor of random number between 0 and 1
`n_strat`: tensor with number of stratifications in each dimension
`n_ev`: tensor containing number of samples per hypercube
`hypercubes`: tensor containing all different hypercube
`divisions`: vegas grid
Returns
-------
`x` : random numbers collocated in hypercubes
`w` : weight of each event
`ind`: division index in which each (n_dim) set of random numbers fall
`segm` : segmentantion for later computations
"""
# Use the event-per-hypercube information to fix each random event to a hypercub
indices = tf.repeat(tf.range(tf.shape(hypercubes, out_type=DTYPEINT)[0]), n_ev)
points = float_me(tf.gather(hypercubes, indices))
n_evs = float_me(tf.gather(n_ev, indices))
# Compute in which division of the importance_sampling grid the points fall
xn = tf.transpose(points + rnds) * FBINS / float_me(n_strat)
ind_xn, x, weights = importance_sampling_digest(xn, divisions)
# Reweight taking into account the number of events per hypercub
final_weights = weights / n_evs
segm = indices
return x, ind_xn, final_weights, segm
class VegasFlowPlus(VegasFlow):
"""
Implementation of the VEGAS+ algorithm
"""
def __init__(self, n_dim, n_events, train=True, adaptive=False, events_limit=None, **kwargs):
# https://github.com/N3PDF/vegasflow/issues/78
if events_limit is None:
logger.info("Events per device limit set to %d", n_events)
events_limit = n_events
elif events_limit < n_events:
logger.warning("VegasFlowPlus needs to hold all events in memory at once, "
"setting the `events_limit` to be equal to `n_events=%d`", n_events)
events_limit = n_events
super().__init__(n_dim, n_events, train, events_limit=events_limit, **kwargs)
# Save the initial number of events
self._init_calls = n_events
# Don't use adaptive if the number of dimension is too big
if n_dim > 13 and adaptive:
self._adaptive = False
logger.warning("Disabling adaptive mode from VegasFlowPlus, too many dimensions!")
else:
self._adaptive = adaptive
# Initialize stratifications
if self._adaptive:
neval_eff = int(self.n_events / 2)
self._n_strat = tf.math.floor(tf.math.pow(neval_eff / 2, 1 / n_dim))
else:
neval_eff = self.n_events
self._n_strat = tf.math.floor(tf.math.pow(neval_eff / 2, 1 / n_dim))
if tf.math.pow(self._n_strat, n_dim) > MAX_NEVAL_HCUBE:
self._n_strat = tf.math.floor(tf.math.pow(1e4, 1 / n_dim))
self._n_strat = int_me(self._n_strat)
# Initialize hypercubes
hypercubes_one_dim = np.arange(0, int(self._n_strat))
hypercubes = [list(p) for p in product(hypercubes_one_dim, repeat=int(n_dim))]
self._hypercubes = tf.convert_to_tensor(hypercubes, dtype=DTYPEINT)
if len(hypercubes) != int(tf.math.pow(self._n_strat, n_dim)):
raise ValueError("Hypercubes are not equal to n_strat^n_dim")
self.min_neval_hcube = int(neval_eff // len(hypercubes))
self.min_neval_hcube = max(self.min_neval_hcube, 2)
self.n_ev = tf.fill([1, len(hypercubes)], self.min_neval_hcube)
self.n_ev = int_me(tf.reshape(self.n_ev, [-1]))
self._n_events = int(tf.reduce_sum(self.n_ev))
self.my_xjac = float_me(1 / len(hypercubes))
if self._adaptive:
logger.warning("Variable number of events requires function signatures all across")
def redistribute_samples(self, arr_var):
"""Receives an array with the variance of the integrand in each
hypercube and recalculate the samples per hypercube according
to VEGAS+ algorithm"""
damped_arr_sdev = tf.pow(arr_var, BETA / 2)
new_n_ev = tf.maximum(
self.min_neval_hcube,
damped_arr_sdev * self._init_calls / 2 / tf.reduce_sum(damped_arr_sdev),
)
self.n_ev = int_me(new_n_ev)
self.n_events = int(tf.reduce_sum(self.n_ev))
def _generate_random_array(self, n_events):
"""Interface compatible with other algorithms dropping the segmentation in hypercubes"""
x, ind, w, _ = self._generate_random_array_plus(n_events, self.n_ev)
return x, ind, w
def _generate_random_array_plus(self, n_events, n_ev):
"""Generate a random array for a given number of events divided in hypercubes"""
# Needs to skip parent and go directly to the random array generation of MonteCarloFlow
rnds, _, _ = MonteCarloFlow._generate_random_array(self, n_events)
# Get random numbers from hypercubes
x, ind, w, segm = generate_samples_in_hypercubes(
rnds,
self._n_strat,
n_ev,
self._hypercubes,
self.divisions,
)
return x, ind, w * self.my_xjac, segm
def _run_event(self, integrand, ncalls=None, n_ev=None):
"""Run one step of VegasFlowPlus
Similar to the event step for importance sampling VegasFlow
adding the n_ev argument for the segmentation into hypercubes
n_ev is a tensor containing the number of samples per hypercube
Parameters
----------
`integrand`: function to integrate
`ncalls`: how many events to run in this step
`n_ev`: number of samples per hypercube
Returns
-------
`res`: sum of the result of the integrand for all events per segement
`res2`: sum of the result squared of the integrand for all events per segment
`arr_res2`: result of the integrand squared per dimension and grid bin
"""
# NOTE: needs to receive both ncalls and n_ev
x, ind, xjac, segm = self._generate_random_array_plus(ncalls, n_ev)
# compute integrand
tmp = xjac * integrand(x, weight=xjac)
tmp2 = tf.square(tmp)
# tensor containing resummed component for each hypercubes
ress = tf.math.segment_sum(tmp, segm)
ress2 = tf.math.segment_sum(tmp2, segm)
fn_ev = float_me(n_ev)
arr_var = ress2 * fn_ev - tf.square(ress)
arr_res2 = self._importance_sampling_array_filling(tmp2, ind)
return ress, arr_var, arr_res2
def _iteration_content(self):
"""Steps to follow per iteration
Differently from importance-sampling Vegas, the result of the integration
is a result _per segment_ and thus the total result needs to be computed at this point
"""
ress, arr_var, arr_res2 = self.run_event(n_ev=self.n_ev)
# Compute the rror
sigmas2 = tf.maximum(arr_var, fzero)
res = tf.reduce_sum(ress)
sigma2 = tf.reduce_sum(sigmas2 / (float_me(self.n_ev) - fone))
sigma = tf.sqrt(sigma2)
# If adaptive is active redistribute the samples
if self._adaptive:
self.redistribute_samples(arr_var)
if self.train:
self.refine_grid(arr_res2)
return res, sigma
def run_event(self, tensorize_events=None, **kwargs):
"""Tensorizes the number of events
so they are not python or numpy primitives if self._adaptive=True"""
return super().run_event(tensorize_events=self._adaptive, **kwargs)
def vegasflowplus_wrapper(integrand, n_dim, n_iter, total_n_events, **kwargs):
"""Convenience wrapper
Parameters
----------
`integrand`: tf.function
`n_dim`: number of dimensions
`n_iter`: number of iterations
`n_events`: number of events per iteration
Returns
-------
`final_result`: integral value
`sigma`: monte carlo error
"""
return wrapper(VegasFlowPlus, integrand, n_dim, n_iter, total_n_events, **kwargs)
def vegasflowplus_sampler(*args, **kwargs):
"""Convenience wrapper for sampling random numbers
Parameters
----------
`integrand`: tf.function
`n_dim`: number of dimensions
`n_events`: number of events per iteration
`training_steps`: number of training_iterations
Returns
-------
`sampler`: a reference to the generate_random_array method of the integrator class
"""
return sampler(VegasFlowPlus, *args, **kwargs)
| 36.421642 | 97 | 0.65649 | 6,275 | 0.642864 | 0 | 0 | 1,795 | 0.183895 | 0 | 0 | 4,158 | 0.425981 |
adc14f4a93a1042eb839691caa5df56d8072eb88 | 4,259 | py | Python | data_collector/neo-wrapper.py | cardwizard/vulnerable-python-ecosystem | 582fda1f45bbe912352ed31a302d798365628713 | [
"MIT"
] | null | null | null | data_collector/neo-wrapper.py | cardwizard/vulnerable-python-ecosystem | 582fda1f45bbe912352ed31a302d798365628713 | [
"MIT"
] | null | null | null | data_collector/neo-wrapper.py | cardwizard/vulnerable-python-ecosystem | 582fda1f45bbe912352ed31a302d798365628713 | [
"MIT"
] | null | null | null | from neo4j import GraphDatabase
port = 7688
data_uri = 'bolt://localhost:' + str(port)
username = 'neo4j'
password = 'abc123'
# data_creds = (username, password)
data_creds = None
driver = GraphDatabase.driver(data_uri, auth=data_creds)
def close_db():
driver.close()
def clear_db():
with driver.session() as session:
session.run("MATCH (n) DETACH DELETE n")
print("Cleared graph.")
"""
@param pkg_data : Python dictionary of package data to be inserted
"""
def push_pkg(pkg_data):
name = pkg_data['name']
#maintainer_info = (pkg_data['main_name'], pkg_data['main_email'])
#author_info = pkg_data['auth_name'], pkg_data['auth_email'])
maintainer_info = pkg_data['maintainer_email']
author_info = pkg_data['author_email']
downloads = pkg_data['downloads']
lic = pkg_data['license']
version = pkg_data['version']
dependences = pkg_data['dep_list']
with driver.session() as session:
result = session.run("MATCH (n:Package {name: $name}) RETURN id(n)", name=name).single()
if result == None:
result = session.run("CREATE (n:Package {name: $name,"
"main_email:$main_emailauth_email:$auth_email, version: $version"
"downloads: $downloads, license: $license}) RETURN id(n)",
name=name,
main_email=maintainer_info,
auth_email=author_info, version=version,
downloads=downloads, license=lic).single().value()
else:
pkg_id = result.value()
session.run("MATCH (n:Package) WHERE id(n) = $pkg_id "
"SET n.main_email = $main_email "
"SET n.auth_email = $auth_email "
"SET n.version = $version"
"SET n.downloads = $downloads "
"SET n.license = $license",
pkg_id=pkg_id,
main_email=maintainer_info,
auth_email=author_info,
version=version,
downloads=downloads, license=lic
)
for dep in dependences:
if dep == "UNKNOWN":
continue
try:
#dep_id = session.run("MATCH (n:Package {name: $name}) RETURN id(n)", name=dep).single()
#session.run("MERGE (n:Package {name: $name})-[:REQURES]->(m:Package {name: $mname})", name=name, mname=dep)
session.run("MATCH (n:Package {name: $name}) "
"MERGE (m:Package {name: $depname}) "
"MERGE (n)-[:REQUIRES]->(m)",
name=name, depname=dep)
except Exception as e:
print("Exception", e.__str__())
def get_dependency_counts(packages):
dep_counts = [0] * len(packages)
with driver.session() as session:
for i, pkg_name in tqdm(enumerate(packages)):
result = session.run("match (n {name: $name})<-[*1..]-(dst) return count(distinct dst)", name=pkg_name).single()
if result == None:
continue
else:
dep_counts[i] = result.value()
return dep_counts
if __name__ == '__main__':
print("Running tests for neo-wrapper.")
clear_db()
pkg1 = {
"name": "pack3",
"main_name": "Josh",
"main_email": "Josh@email.com",
"auth_name": "Aadesh",
"auth_email": "Aadesh@email.com",
"downloads": 30,
"license": "Apache v2.0",
"dep_list": ["packone", "packtwo"]
}
push_pkg(pkg1)
pkg2 = {
"name": "testpack",
"main_name": "Jane",
"main_email": "Jane@email.com",
"auth_name": "Aadesh",
"auth_email": "Aadesh@email.com",
"downloads": 20,
"license": "Apache v2.0",
"dep_list": ["pack3", "packtwo"]
}
push_pkg(pkg2)
pkg3 = {
"name": "packtwo",
"main_name": "Mike",
"main_email": "Jike@email.com",
"auth_name": "Heather",
"auth_email": "Heather@email.com",
"downloads": 20,
"license": "Apache v2.0",
"dep_list": []
}
push_pkg(pkg3)
close_db()
print("Done.")
| 28.972789 | 124 | 0.538154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,703 | 0.399859 |
adc27f0b84c9f11612869bc82c5ec3b7f4947bbd | 12,441 | py | Python | kopf/storage/progress.py | michaelnarodovitch/kopf | df4e51bf5ca3c1fc9e609ffeb4cc951925135db2 | [
"MIT"
] | null | null | null | kopf/storage/progress.py | michaelnarodovitch/kopf | df4e51bf5ca3c1fc9e609ffeb4cc951925135db2 | [
"MIT"
] | null | null | null | kopf/storage/progress.py | michaelnarodovitch/kopf | df4e51bf5ca3c1fc9e609ffeb4cc951925135db2 | [
"MIT"
] | null | null | null | """
State stores are used to track the handlers' states across handling cycles.
Specifically, they track which handlers are finished, which are not yet,
and how many retries were there, and some other information.
There could be more than one low-level k8s watch-events per one actual
high-level kopf-event (a cause). The handlers are called at different times,
and the overall handling routine should persist the handler status somewhere.
When the full event cycle is executed (possibly including multiple re-runs),
the state of all involved handlers is purged. The life-long persistence of state
is not intended: otherwise, multiple distinct causes will clutter the status
and collide with each other (especially critical for multiple updates).
Other unrelated handlers (e.g. from other operators) can co-exist with
the involved handlers (if stored in the resource itself), as the handler states
are independent of each other, and are purged individually, not all at once.
---
Originally, the handlers' state was persisted in ``.status.kopf.progress``.
But due to stricter Kubernetes schemas for built-in resources, they had to
move to annotations. As part of such move, any state persistence engines
are made possible by inheriting and overriding the base classes, though it is
considered an advanced use-case and is only briefly mentioned in the docs.
In all cases, the persisted state for each handler is a fixed-structure dict
with the following keys:
* ``started`` is a timestamp when the handler was first called.
* ``stopped`` is a timestamp when the handler either finished or failed.
* ``delayed`` is a timestamp when the handler should be invoked again (retried).
* ``retries`` is a number of retries so far or in total (if succeeded/failed).
* ``success`` is a boolean flag for a final success (no re-executions).
* ``failure`` is a boolean flag for a final failure (no retries).
* ``message`` is a descriptive message of the last error (an exception).
All timestamps are strings in ISO8601 format in UTC (no explicit ``Z`` suffix).
"""
import abc
import copy
import json
from typing import Optional, Collection, Mapping, Dict, Any, cast
from typing_extensions import TypedDict
from kopf.structs import bodies
from kopf.structs import dicts
from kopf.structs import handlers
from kopf.structs import patches
class ProgressRecord(TypedDict, total=True):
""" A single record stored for persistence of a single handler. """
started: Optional[str]
stopped: Optional[str]
delayed: Optional[str]
retries: Optional[int]
success: Optional[bool]
failure: Optional[bool]
message: Optional[str]
class ProgressStorage(metaclass=abc.ABCMeta):
"""
Base class and an interface for all persistent states.
The state is persisted strict per-handler, not for all handlers at once:
to support overlapping operators (assuming different handler ids) storing
their state on the same fields of the resource (e.g. ``state.kopf``).
This also ensures that no extra logic for state merges will be needed:
the handler states are atomic (i.e. state fields are not used separately)
but independent: i.e. handlers should be persisted on their own, unrelated
to other handlers; i.e. never combined to other atomic structures.
If combining is still needed with performance optimization in mind (e.g.
for relational/transactional databases), the keys can be cached in memory
for short time, and ``flush()`` can be overridden to actually store them.
"""
@abc.abstractmethod
def fetch(
self,
*,
key: handlers.HandlerId,
body: bodies.Body,
) -> Optional[ProgressRecord]:
raise NotImplementedError
@abc.abstractmethod
def store(
self,
*,
key: handlers.HandlerId,
record: ProgressRecord,
body: bodies.Body,
patch: patches.Patch,
) -> None:
raise NotImplementedError
@abc.abstractmethod
def purge(
self,
*,
key: handlers.HandlerId,
body: bodies.Body,
patch: patches.Patch,
) -> None:
raise NotImplementedError
@abc.abstractmethod
def clear(self, *, essence: bodies.BodyEssence) -> bodies.BodyEssence:
return copy.deepcopy(essence)
def flush(self) -> None:
pass
class AnnotationsProgressStorage(ProgressStorage):
"""
State storage in ``.metadata.annotations`` with JSON-serialised content.
An example without a prefix:
.. code-block: yaml
metadata:
annotations:
create_fn_1: '{"started": "2020-02-14T16:58:25.396364", "stopped":
"2020-02-14T16:58:25.401844", "retries": 1, "success": true}'
create_fn_2: '{"started": "2020-02-14T16:58:25.396421", "retries": 0}'
spec: ...
status: ...
An example with a prefix:
.. code-block: yaml
metadata:
annotations:
kopf.zalando.org/create_fn_1: '{"started": "2020-02-14T16:58:25.396364", "stopped":
"2020-02-14T16:58:25.401844", "retries": 1, "success": true}'
kopf.zalando.org/create_fn_2: '{"started": "2020-02-14T16:58:25.396421", "retries": 0}'
spec: ...
status: ...
"""
def __init__(
self,
*,
prefix: Optional[str] = 'kopf.zalando.org',
verbose: bool = False,
) -> None:
super().__init__()
self.prefix = prefix
self.verbose = verbose
def fetch(
self,
*,
key: handlers.HandlerId,
body: bodies.Body,
) -> Optional[ProgressRecord]:
safe_key = key.replace('/', '.')
full_key = f'{self.prefix}/{safe_key}' if self.prefix else safe_key
value = body.metadata.annotations.get(full_key, None)
content = json.loads(value) if value is not None else None
return cast(Optional[ProgressRecord], content)
def store(
self,
*,
key: handlers.HandlerId,
record: ProgressRecord,
body: bodies.Body,
patch: patches.Patch,
) -> None:
safe_key = key.replace('/', '.')
full_key = f'{self.prefix}/{safe_key}' if self.prefix else safe_key
clean_data = {key: val for key, val in record.items() if self.verbose or val is not None}
patch.meta.annotations[full_key] = json.dumps(clean_data)
def purge(
self,
*,
key: handlers.HandlerId,
body: bodies.Body,
patch: patches.Patch,
) -> None:
safe_key = key.replace('/', '.')
full_key = f'{self.prefix}/{safe_key}' if self.prefix else safe_key
if full_key in body.metadata.annotations or full_key in patch.meta.annotations:
patch.meta.annotations[full_key] = None
def clear(self, *, essence: bodies.BodyEssence) -> bodies.BodyEssence:
essence = super().clear(essence=essence)
annotations = essence.get('metadata', {}).get('annotations', {})
for name in list(annotations.keys()):
if name.startswith(f'{self.prefix}/'):
del annotations[name]
return essence
class StatusProgressStorage(ProgressStorage):
"""
State storage in ``.status`` stanza with deep structure.
The structure is this:
.. code-block: yaml
metadata: ...
spec: ...
status: ...
kopf:
progress:
handler1:
started: 2018-12-31T23:59:59,999999
stopped: 2018-01-01T12:34:56,789000
success: true
handler2:
started: 2018-12-31T23:59:59,999999
stopped: 2018-01-01T12:34:56,789000
failure: true
message: "Error message."
handler3:
started: 2018-12-31T23:59:59,999999
retries: 30
handler3/sub1:
started: 2018-12-31T23:59:59,999999
delayed: 2018-01-01T12:34:56,789000
retries: 10
message: "Not ready yet."
handler3/sub2:
started: 2018-12-31T23:59:59,999999
"""
def __init__(
self,
*,
name: str = 'kopf',
field: dicts.FieldSpec = 'status.{name}.progress',
) -> None:
super().__init__()
self._name = name
real_field = field.format(name=self._name) if isinstance(field, str) else field
self._field = dicts.parse_field(real_field)
@property
def field(self) -> dicts.FieldPath:
return self._field
@field.setter
def field(self, field: dicts.FieldSpec) -> None:
real_field = field.format(name=self._name) if isinstance(field, str) else field
self._field = dicts.parse_field(real_field)
def fetch(
self,
*,
key: handlers.HandlerId,
body: bodies.Body,
) -> Optional[ProgressRecord]:
container: Mapping[handlers.HandlerId, ProgressRecord]
container = dicts.resolve(body, self.field, {})
return container.get(key, None)
def store(
self,
*,
key: handlers.HandlerId,
record: ProgressRecord,
body: bodies.Body,
patch: patches.Patch,
) -> None:
# Nones are cleaned by K8s API itself.
dicts.ensure(patch, self.field + (key,), record)
def purge(
self,
*,
key: handlers.HandlerId,
body: bodies.Body,
patch: patches.Patch,
) -> None:
absent = object()
key_field = self.field + (key,)
body_value = dicts.resolve(body, key_field, absent, assume_empty=True)
patch_value = dicts.resolve(patch, key_field, absent, assume_empty=True)
if body_value is not absent:
dicts.ensure(patch, key_field, None)
elif patch_value is not absent:
dicts.remove(patch, key_field)
def clear(self, *, essence: bodies.BodyEssence) -> bodies.BodyEssence:
essence = super().clear(essence=essence)
# Work around an issue with mypy not treating TypedDicts as MutableMappings.
essence_dict = cast(Dict[Any, Any], essence)
dicts.remove(essence_dict, self.field)
return essence
class MultiProgressStorage(ProgressStorage):
def __init__(
self,
storages: Collection[ProgressStorage],
) -> None:
super().__init__()
self.storages = storages
def fetch(
self,
*,
key: handlers.HandlerId,
body: bodies.Body,
) -> Optional[ProgressRecord]:
for storage in self.storages:
content = storage.fetch(key=key, body=body)
if content is not None:
return content
return None
def store(
self,
*,
key: handlers.HandlerId,
record: ProgressRecord,
body: bodies.Body,
patch: patches.Patch,
) -> None:
for storage in self.storages:
storage.store(key=key, record=record, body=body, patch=patch)
def purge(
self,
*,
key: handlers.HandlerId,
body: bodies.Body,
patch: patches.Patch,
) -> None:
for storage in self.storages:
storage.purge(key=key, body=body, patch=patch)
def clear(self, *, essence: bodies.BodyEssence) -> bodies.BodyEssence:
for storage in self.storages:
essence = storage.clear(essence=essence)
return essence
class SmartProgressStorage(MultiProgressStorage):
def __init__(
self,
*,
name: str = 'kopf',
field: dicts.FieldSpec = 'status.{name}.progress',
prefix: str = 'kopf.zalando.org',
verbose: bool = False,
) -> None:
super().__init__([
AnnotationsProgressStorage(prefix=prefix, verbose=verbose),
StatusProgressStorage(name=name, field=field),
])
| 33.715447 | 99 | 0.600997 | 10,091 | 0.811108 | 0 | 0 | 1,090 | 0.087614 | 0 | 0 | 5,338 | 0.429065 |
adc6094a6d2837ad691dd4b73802f81c13e2f5a2 | 1,045 | py | Python | spotty/config/abstract_instance_config.py | Inculus/spotty | 56863012668a6c13ad13c2a04f900047e229fbe6 | [
"MIT"
] | 1 | 2020-07-17T07:02:09.000Z | 2020-07-17T07:02:09.000Z | spotty/config/abstract_instance_config.py | Inculus/spotty | 56863012668a6c13ad13c2a04f900047e229fbe6 | [
"MIT"
] | null | null | null | spotty/config/abstract_instance_config.py | Inculus/spotty | 56863012668a6c13ad13c2a04f900047e229fbe6 | [
"MIT"
] | null | null | null | from abc import ABC
class AbstractInstanceConfig(ABC):
def __init__(self, config: dict):
self._name = config['name']
self._provider_name = config['provider']
self._params = config['parameters']
@property
def name(self) -> str:
"""Name of the instance."""
return self._name
@property
def provider_name(self):
"""Provider name."""
return self._provider_name
@property
def volumes(self) -> list:
"""List of volume configs."""
return self._params['volumes']
@property
def docker_data_root(self) -> str:
"""Data root directory for Docker daemon."""
return self._params['dockerDataRoot']
@property
def local_ssh_port(self) -> int:
"""Local SSH port to connect to the instance (in case of a tunnel)."""
return self._params['localSshPort']
@property
def commands(self) -> str:
"""Commands that should be run once an instance is started."""
return self._params['commands']
| 26.125 | 78 | 0.61244 | 1,022 | 0.97799 | 0 | 0 | 784 | 0.750239 | 0 | 0 | 329 | 0.314833 |
adc70061227488de161786ea30ee1410bc56c27d | 157 | py | Python | scripts/helper.py | skurscheid/camda2019-workflows | 45b10ef9b4b44655fc4b0c59f73c20bc003f70e2 | [
"MIT"
] | 1 | 2020-04-20T16:39:49.000Z | 2020-04-20T16:39:49.000Z | scripts/helper.py | skurscheid/camda2019-workflows | 45b10ef9b4b44655fc4b0c59f73c20bc003f70e2 | [
"MIT"
] | 1 | 2019-05-03T22:31:32.000Z | 2019-05-03T22:34:01.000Z | scripts/helper.py | skurscheid/camda2019-workflows | 45b10ef9b4b44655fc4b0c59f73c20bc003f70e2 | [
"MIT"
] | null | null | null | def get_failed_ids(txt_file):
id = []
fh = open(txt_file, 'r')
for row in fh:
id.append(row.split('/')[1].split('.')[0])
return(id)
| 19.625 | 50 | 0.528662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.057325 |
adc761d738559e1c727ca375143dce7cda714b4b | 887 | py | Python | setup.py | lizhizhou/django_tidb | b615083c159d0c542ef1632810f3ca5c892d7175 | [
"Apache-2.0"
] | null | null | null | setup.py | lizhizhou/django_tidb | b615083c159d0c542ef1632810f3ca5c892d7175 | [
"Apache-2.0"
] | null | null | null | setup.py | lizhizhou/django_tidb | b615083c159d0c542ef1632810f3ca5c892d7175 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
long_description = """TiDB backend for Django"""
setup(
name='django_tidb',
version='2.1',
author='Rain Li',
author_email='blacktear23@gmail.com',
url='http://github.com/blacktear23/django_tidb',
download_url='http://github.com/blackear23/django_tidb/archive/2.1.tar.gz',
description='TiDB backend for Django',
long_description=long_description,
keywords=['django', 'tidb'],
packages=['django_tidb', 'django_tidb.tidb'],
license='Apache2',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries',
],
)
| 31.678571 | 79 | 0.657272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 541 | 0.609921 |
adc85f255e45341493cae7d856bac908be193a3e | 6,347 | py | Python | common/utils/utils.py | hvsuchitra/tv_tracker | 5415d177fe9a4e16ec39d9812e9502840bba5b12 | [
"MIT"
] | null | null | null | common/utils/utils.py | hvsuchitra/tv_tracker | 5415d177fe9a4e16ec39d9812e9502840bba5b12 | [
"MIT"
] | null | null | null | common/utils/utils.py | hvsuchitra/tv_tracker | 5415d177fe9a4e16ec39d9812e9502840bba5b12 | [
"MIT"
] | null | null | null | import smtplib
def get_binary(src_file):
with open(src_file, 'rb') as f:
return f.read()
def send_mail(to, username, password, message_type='account_creation'):
server = 'smtp.mail.me.com'
port = 587
email = 'mailid'
_password = 'password'
if message_type == 'account_creation':
message = f'''Subject: Welcome to TV Tracker
From: TV Tracker Dev<{email}>
To: {to}
Thank You for registering. Your username is {username} and password is {password}.
Have a nice day 0:)'''
elif message_type == 'password_change':
message = f'''Subject: TV Track Password Change
From: TV Tracker Dev<{email}>
To: {to}
The password to your TV Tracker account {username} was changed to {password}.
If you have not made this change, reply to this email to deactivate your account.
Have a nice day 0:)'''
elif message_type == 'reset_password':
message = f'''Subject: TV Track Password Change
From: TV Tracker Dev<{email}>
To: {to}
The password to your TV Tracker account {username} was reset to {password}.
Use this password the next time to login.
Have a nice day 0:)'''
with smtplib.SMTP(server, port) as server:
server.starttls()
server.login(email, _password)
server.sendmail(from_addr=email, to_addrs=to, msg=message)
from pathlib import Path
def get_path(path, to_str=True):
app_root = Path('../common').resolve()
return f'{app_root / path}' if to_str else app_root / path
from PyQt5 import QtCore
from PyQt5.QtGui import QImage, QPainter, QBrush, QColor
def make_trans(image, opaque_factor):
temp = QImage(image.size(), QImage.Format_ARGB32)
temp.fill(QtCore.Qt.transparent)
painter = QPainter(temp)
painter.setOpacity(opaque_factor)
painter.drawImage(QtCore.QRect(0, 0, image.width(), image.height()), image)
return temp
def make_dark(image, dark_factor):
painter = QPainter(image)
brush = QBrush(QColor(0, 0, 0, dark_factor))
painter.setBrush(brush)
painter.drawRect(0, 0, image.width(), image.height())
return image
from random import choice
from json import load
def random_thought():
with open(get_path('resources/misc/quotes.json')) as file_obj:
random_quote = choice(load(file_obj))
return random_quote['text'], random_quote['author']
from string import ascii_lowercase, ascii_uppercase, digits, punctuation
from secrets import choice as secret_choice
from random import shuffle, randint
def generate_password():
characters = [ascii_lowercase, ascii_uppercase, digits, punctuation]
shuffle(characters)
random_password = [*map(secret_choice, characters)]
random_password.extend(secret_choice(secret_choice(characters)) for _ in range(randint(4, 12)))
shuffle(random_password)
return ''.join(random_password)
from PyQt5.QtCore import pyqtSignal, Qt, QThread
from PyQt5.QtWidgets import QLabel
from PyQt5.QtGui import QPixmap
class ClickableLabel(QLabel):
clicked = pyqtSignal(str)
def __init__(self, name=None, src=None):
super().__init__()
self.setObjectName(name)
if name is not None and src is not None:
if name != 'profile':
self.setPixmap(QPixmap.fromImage(QImage(get_path(src))).scaled(100, 100, Qt.KeepAspectRatio))
else:
self.setPixmap(circle_crop(src).scaled(100, 100, Qt.KeepAspectRatio))
def mousePressEvent(self, event):
self.clicked.emit(self.objectName())
class SendMailThread(QThread):
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, to, username, password):
super().__init__()
self.to = to
self.username = username
self.password = password
def run(self):
send_mail(self.to, self.username, self.password, self.message_type)
from PyQt5.QtCore import Qt, QRect
from PyQt5.QtGui import QBrush, QImage, QPainter, QPixmap, QWindow
from PyQt5.QtWidgets import QLabel, QVBoxLayout, QWidget
def circle_crop(image):
size = 100
image = QImage.fromData(image)
image.convertToFormat(QImage.Format_ARGB32)
imgsize = min(image.width(), image.height())
rect = QRect((image.width() - imgsize) / 2, (image.height() - imgsize) / 2, imgsize, imgsize)
image = image.copy(rect)
out_img = QImage(image.size(), QImage.Format_ARGB32)
out_img.fill(Qt.transparent)
brush = QBrush(image)
painter = QPainter(out_img)
painter.setBrush(brush)
painter.setPen(Qt.NoPen)
painter.setRenderHint(QPainter.Antialiasing, True)
painter.drawEllipse(0, 0, imgsize, imgsize)
painter.end()
pr = QWindow().devicePixelRatio()
pm = QPixmap.fromImage(out_img)
# pm.setDevicePixelRatio(pr)
# size*=pr
# pm=pm.scaled(size,size,Qt.KeepAspectRatio,Qt.SmoothTransformation)
return pm
from PyQt5.QtCore import QTimeLine
from PyQt5.QtWidgets import QCalendarWidget, QGridLayout, QStackedWidget, QTextEdit
class FaderWidget(QWidget):
def __init__(self, old_widget, new_widget):
QWidget.__init__(self, new_widget)
self.pixmap_opacity = 1.0
self.old_pixmap = QPixmap(new_widget.size())
old_widget.render(self.old_pixmap)
self.timeline = QTimeLine()
self.timeline.valueChanged.connect(self.animate)
self.timeline.finished.connect(self.close)
self.timeline.setDuration(333)
self.timeline.start()
self.resize(new_widget.size())
self.show()
def paintEvent(self, event):
painter = QPainter(self)
painter.setOpacity(self.pixmap_opacity)
painter.drawPixmap(0, 0, self.old_pixmap)
def animate(self, value):
self.pixmap_opacity = 1.0 - value
self.update()
class StackedWidget(QStackedWidget):
clicked = pyqtSignal(str)
def __init__(self, name):
super().__init__()
self.setEnabled(True)
self.setObjectName(name)
def setCurrentIndex(self, index):
if self.currentIndex() != index:
self.fader_widget = FaderWidget(self.currentWidget(), self.widget(index))
super().setCurrentIndex(index)
def enterEvent(self, event):
self.setCurrentIndex(1)
def leaveEvent(self, event):
self.setCurrentIndex(0)
def mousePressEvent(self, QMouseEvent):
self.clicked.emit(self.objectName())
| 27.595652 | 109 | 0.688357 | 2,272 | 0.357964 | 0 | 0 | 0 | 0 | 0 | 0 | 977 | 0.153931 |
adc89f4cb99225189b2a24aea5a5be1d847b7fa7 | 10,610 | py | Python | venv/lib/python3.8/site-packages/ansible_collections/community/aws/plugins/modules/lambda_alias.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | 7 | 2021-11-16T04:05:42.000Z | 2022-02-19T21:14:29.000Z | venv/lib/python3.8/site-packages/ansible_collections/community/aws/plugins/modules/lambda_alias.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | 1 | 2022-03-12T02:25:26.000Z | 2022-03-12T02:25:26.000Z | venv/lib/python3.8/site-packages/ansible_collections/community/aws/plugins/modules/lambda_alias.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | 1 | 2022-03-01T05:43:07.000Z | 2022-03-01T05:43:07.000Z | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: lambda_alias
version_added: 1.0.0
short_description: Creates, updates or deletes AWS Lambda function aliases
description:
- This module allows the management of AWS Lambda functions aliases via the Ansible
framework. It is idempotent and supports "Check" mode. Use module M(community.aws.lambda) to manage the lambda function
itself and M(community.aws.lambda_event) to manage event source mappings.
author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
options:
function_name:
description:
- The name of the function alias.
required: true
type: str
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
type: str
name:
description:
- Name of the function alias.
required: true
aliases: ['alias_name']
type: str
description:
description:
- A short, user-defined function alias description.
type: str
function_version:
description:
- Version associated with the Lambda function alias.
A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
aliases: ['version']
type: int
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = '''
---
# Simple example to create a lambda function and publish a version
- hosts: localhost
gather_facts: no
vars:
state: present
project_folder: /path/to/deployment/package
deployment_package: lambda.zip
account: 123456789012
production_version: 5
tasks:
- name: AWS Lambda Function
lambda:
state: "{{ state | default('present') }}"
name: myLambdaFunction
publish: True
description: lambda function description
code_s3_bucket: package-bucket
code_s3_key: "lambda/{{ deployment_package }}"
local_path: "{{ project_folder }}/{{ deployment_package }}"
runtime: python2.7
timeout: 5
handler: lambda.handler
memory_size: 128
role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
- name: Get information
lambda_info:
name: myLambdaFunction
register: lambda_info
- name: show results
ansible.builtin.debug:
msg: "{{ lambda_info['lambda_facts'] }}"
# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
- name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} "
community.aws.lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
name: Dev
description: Development is $LATEST version
# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
- name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} "
community.aws.lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
name: QA
version: "{{ lambda_info.lambda_facts.Version }}"
description: "QA is version {{ lambda_info.lambda_facts.Version }}"
when: lambda_info.lambda_facts.Version != "$LATEST"
# The Prod alias will have a fixed version based on a variable
- name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} "
community.aws.lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
name: Prod
version: "{{ production_version }}"
description: "Production is version {{ production_version }}"
'''
RETURN = '''
---
alias_arn:
description: Full ARN of the function, including the alias
returned: success
type: str
sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
description:
description: A short description of the alias
returned: success
type: str
sample: The development stage for my hot new app
function_version:
description: The qualifier that the alias refers to
returned: success
type: str
sample: $LATEST
name:
description: The name of the alias assigned
returned: success
type: str
sample: dev
revision_id:
description: A unique identifier that changes when you update the alias.
returned: success
type: str
sample: 12345678-1234-1234-1234-123456789abc
'''
import re
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
def set_api_params(module, module_params):
"""
Sets non-None module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param, None)
if module_param:
api_params[param] = module_param
return snake_dict_to_camel_dict(api_params, capitalize_first=True)
def validate_params(module):
"""
Performs basic parameter validation.
:param module: AnsibleAWSModule reference
:return:
"""
function_name = module.params['function_name']
# validate function name
if not re.search(r'^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
if module.params['function_version'] == 0:
module.params['function_version'] = '$LATEST'
else:
module.params['function_version'] = str(module.params['function_version'])
return
def get_lambda_alias(module, client):
"""
Returns the lambda function alias if it exists.
:param module: AnsibleAWSModule
:param client: (wrapped) boto3 lambda client
:return:
"""
# set API parameters
api_params = set_api_params(module, ('function_name', 'name'))
# check if alias exists and get facts
try:
results = client.get_alias(aws_retry=True, **api_params)
except is_boto3_error_code('ResourceNotFoundException'):
results = None
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg='Error retrieving function alias')
return results
def lambda_alias(module, client):
"""
Adds, updates or deletes lambda function aliases.
:param module: AnsibleAWSModule
:param client: (wrapped) boto3 lambda client
:return dict:
"""
results = dict()
changed = False
current_state = 'absent'
state = module.params['state']
facts = get_lambda_alias(module, client)
if facts:
current_state = 'present'
if state == 'present':
if current_state == 'present':
snake_facts = camel_dict_to_snake_dict(facts)
# check if alias has changed -- only version and description can change
alias_params = ('function_version', 'description')
for param in alias_params:
if module.params.get(param) is None:
continue
if module.params.get(param) != snake_facts.get(param):
changed = True
break
if changed:
api_params = set_api_params(module, ('function_name', 'name'))
api_params.update(set_api_params(module, alias_params))
if not module.check_mode:
try:
results = client.update_alias(aws_retry=True, **api_params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Error updating function alias')
else:
# create new function alias
api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
try:
if not module.check_mode:
results = client.create_alias(aws_retry=True, **api_params)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Error creating function alias')
else: # state = 'absent'
if current_state == 'present':
# delete the function
api_params = set_api_params(module, ('function_name', 'name'))
try:
if not module.check_mode:
results = client.delete_alias(aws_retry=True, **api_params)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Error deleting function alias')
return dict(changed=changed, **dict(results or facts or {}))
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
function_name=dict(required=True),
name=dict(required=True, aliases=['alias_name']),
function_version=dict(type='int', required=False, default=0, aliases=['version']),
description=dict(required=False, default=None),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[],
required_together=[],
)
client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff())
validate_params(module)
results = lambda_alias(module, client)
module.exit_json(**camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
| 32.347561 | 130 | 0.669463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,182 | 0.582658 |
adc8cf67fa5dfd7aed6e2a5df90113f994c20b3b | 1,148 | py | Python | tarkov/bots/generator/loot/_base.py | JustEmuTarkov/jet_py | 2f352b5e6f5d88594d08afc46e9458e919271788 | [
"MIT"
] | 14 | 2021-02-24T02:32:48.000Z | 2022-01-03T05:51:45.000Z | tarkov/bots/generator/loot/_base.py | JustEmuTarkov/jet_py | 2f352b5e6f5d88594d08afc46e9458e919271788 | [
"MIT"
] | 1 | 2021-03-08T09:02:29.000Z | 2021-03-08T09:02:29.000Z | tarkov/bots/generator/loot/_base.py | JustEmuTarkov/jet_py | 2f352b5e6f5d88594d08afc46e9458e919271788 | [
"MIT"
] | 4 | 2021-04-14T01:47:01.000Z | 2021-11-29T02:18:32.000Z | from __future__ import annotations
from typing import TYPE_CHECKING
from dependency_injector.wiring import Provide, inject
from server.container import AppContainer
from ._types import BotInventoryContainers, LootGenerationConfig
if TYPE_CHECKING:
# pylint: disable=cyclic-import
from tarkov.bots.bots import BotInventory
from tarkov.bots.generator.preset import BotGeneratorPreset
from tarkov.inventory.repositories import ItemTemplatesRepository
class BaseLootGenerator:
@inject
def __init__( # pylint: disable=too-many-arguments
self,
inventory_containers: BotInventoryContainers,
bot_inventory: BotInventory,
config: LootGenerationConfig,
preset: BotGeneratorPreset,
templates_repository: ItemTemplatesRepository = Provide[
AppContainer.repos.templates
],
):
self.inventory_containers = inventory_containers
self.bot_inventory = bot_inventory
self.config = config
self.preset = preset
self.templates_repository = templates_repository
def generate(self) -> None:
raise NotImplementedError
| 31.027027 | 69 | 0.740418 | 677 | 0.589721 | 0 | 0 | 581 | 0.506098 | 0 | 0 | 67 | 0.058362 |
adc9d401feb2c0145e7e065ab182fb792dc27133 | 411 | py | Python | calico/datadog_checks/calico/check.py | davidlrosenblum/integrations-extras | 281864a99ae054c91c3e3ea6a8ee8f04f6d7cdf3 | [
"BSD-3-Clause"
] | 158 | 2016-06-02T16:25:31.000Z | 2022-03-16T15:55:14.000Z | calico/datadog_checks/calico/check.py | davidlrosenblum/integrations-extras | 281864a99ae054c91c3e3ea6a8ee8f04f6d7cdf3 | [
"BSD-3-Clause"
] | 554 | 2016-03-15T17:39:12.000Z | 2022-03-31T10:29:16.000Z | calico/datadog_checks/calico/check.py | davidlrosenblum/integrations-extras | 281864a99ae054c91c3e3ea6a8ee8f04f6d7cdf3 | [
"BSD-3-Clause"
] | 431 | 2016-05-13T15:33:13.000Z | 2022-03-31T10:06:46.000Z | from datadog_checks.base import OpenMetricsBaseCheckV2
from .metrics import METRIC_MAP
class CalicoCheck(OpenMetricsBaseCheckV2):
def __init__(self, name, init_config, instances=None):
super(CalicoCheck, self).__init__(
name,
init_config,
instances,
)
def get_default_config(self):
return {'namespace': 'calico', 'metrics': [METRIC_MAP]}
| 24.176471 | 63 | 0.664234 | 320 | 0.778589 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.068127 |
adcb4497ce1ae012191c1c8f35996119cb7174cf | 1,059 | py | Python | python/python_challenge/25/25.py | yunyu2019/blog | e4dce66504ad9b9c16d8e40ef6dff92e17ad0af0 | [
"Apache-2.0"
] | null | null | null | python/python_challenge/25/25.py | yunyu2019/blog | e4dce66504ad9b9c16d8e40ef6dff92e17ad0af0 | [
"Apache-2.0"
] | null | null | null | python/python_challenge/25/25.py | yunyu2019/blog | e4dce66504ad9b9c16d8e40ef6dff92e17ad0af0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @Date : 2016-05-17 16:36:18
# @Author : Yunyu2019 (yunyu2010@yeah.net)
# @Link : http://www.pythonchallenge.com/pc/hex/lake.html
import os
import wave
import time
import Image
import requests
def download(urls):
filename=os.path.basename(urls)
try:
req=requests.get(urls,auth=('butter','fly'))
fp=open(filename,'wb')
fp.write(req.content)
fp.close()
print 'download:%s' % filename
except:
print 'fail download:%s' % filename
def getdatas(files):
fp=open(files,'rb')
data=fp.read()[44:]
fp.close()
img=Image.new('RGB',(60,60))
img.fromstring(data)
return img
"""
for i in range(1,26):
urls='http://www.pythonchallenge.com/pc/hex/lake%s.wav' % i
download(urls)
time.sleep(1)
"""
img=Image.new('RGB',(300,300))
for i in range(25):
y,x=divmod(i,5)
files='lake{0}.wav'.format(i+1)
pices=getdatas(files)
img.paste(pices,(x*60,y*60))
img.save('lake.jpg') | 24.068182 | 64 | 0.586402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.377715 |
adcc014796de1bd0000f6b7256d97c0e49e27311 | 2,645 | py | Python | scripts/initial_check.py | GenomeImmunobiology/hervk_kmers | 5ede5c34cda75cec840cb499270e5b3c70582045 | [
"MIT"
] | null | null | null | scripts/initial_check.py | GenomeImmunobiology/hervk_kmers | 5ede5c34cda75cec840cb499270e5b3c70582045 | [
"MIT"
] | null | null | null | scripts/initial_check.py | GenomeImmunobiology/hervk_kmers | 5ede5c34cda75cec840cb499270e5b3c70582045 | [
"MIT"
] | 1 | 2021-03-09T04:14:49.000Z | 2021-03-09T04:14:49.000Z | #!/usr/bin/env python
'''
Copyright (c) 2020 RIKEN
All Rights Reserved
See file LICENSE for details.
'''
import os,sys,datetime,multiprocessing
from os.path import abspath,dirname,realpath,join
import log,traceback
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
elif 'PATH' in os.environ:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def check(args, argv):
log.logger.debug('started')
try:
log.logger.debug('command line:\n'+ ' '.join(argv))
# check python version
version=sys.version_info
if (version[0] >= 3) and (version[1] >= 7):
log.logger.debug('Python version=%d.%d.%d' % (version[0], version[1], version[2]))
else:
log.logger.error('Please use Python 3.7 or later. Your Python is version %d.%d.' % (version[0], version[1]))
exit(1)
# check PATH
for i in ['blastn', 'bedtools']:
if which(i) is None:
log.logger.error('%s not found in $PATH. Please check %s is installed and added to PATH.' % (i, i))
exit(1)
# check files
if args.c is not None:
if os.path.exists(args.c) is False:
log.logger.error('CRAM file (%s) was not found.' % args.c)
exit(1)
elif args.b is not None:
if os.path.exists(args.b) is False:
log.logger.error('BAM file (%s) was not found.' % args.b)
exit(1)
else:
log.logger.error('Please specify BAM or CRAM file (-b or -c option).')
exit(1)
if args.c is not None:
if args.fa is None:
log.logger.error('Reference genome (%s) was not specified.' % args.fa)
exit(1)
elif os.path.exists(args.fa) is False:
log.logger.error('Reference genome (%s) was not found.' % args.fa)
exit(1)
# check prerequisite modules
from Bio.Seq import Seq
import gzip
from pybedtools import BedTool
import matplotlib
import pysam
except:
log.logger.error('\n'+ traceback.format_exc())
exit(1)
| 33.481013 | 120 | 0.551985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 676 | 0.255577 |
adcc36ffc2e2e8f7a5b13fe702db8dab22da84df | 381 | py | Python | megalinter/tests/test_megalinter/linters/rust_clippy_test.py | private-forks/mega-linter | e22555d5dd18f6e7c32057b87b366d22f61a115d | [
"MIT"
] | null | null | null | megalinter/tests/test_megalinter/linters/rust_clippy_test.py | private-forks/mega-linter | e22555d5dd18f6e7c32057b87b366d22f61a115d | [
"MIT"
] | 37 | 2021-05-12T06:28:04.000Z | 2022-03-31T10:14:51.000Z | megalinter/tests/test_megalinter/linters/rust_clippy_test.py | private-forks/mega-linter | e22555d5dd18f6e7c32057b87b366d22f61a115d | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
"""
Unit tests for RUST linter clippy
This class has been automatically generated by .automation/build.py, please do not update it manually
"""
from unittest import TestCase
from megalinter.tests.test_megalinter.LinterTestRoot import LinterTestRoot
class rust_clippy_test(TestCase, LinterTestRoot):
descriptor_id = "RUST"
linter_name = "clippy"
| 25.4 | 101 | 0.784777 | 103 | 0.270341 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.472441 |
adcca01c0f26e751ad2af6441598f765ca450604 | 868 | py | Python | tests/test_loader.py | sergeyglazyrindev/asceticcmdrunner | bde966398e8ecdcc638ac8137d4fab485bb5dce7 | [
"MIT"
] | null | null | null | tests/test_loader.py | sergeyglazyrindev/asceticcmdrunner | bde966398e8ecdcc638ac8137d4fab485bb5dce7 | [
"MIT"
] | null | null | null | tests/test_loader.py | sergeyglazyrindev/asceticcmdrunner | bde966398e8ecdcc638ac8137d4fab485bb5dce7 | [
"MIT"
] | null | null | null | import mock
from acmdrunner import Loader
import os
import tests.management.acr_commands
active_dir = os.getcwd()
cur_dir = os.path.dirname(__file__)
def test_load_from_directory():
with mock.patch(
'acmdrunner.loader.load_commands_from_directory',
autospec=True
) as commands_mock:
Loader.load_from_directory(active_dir)
commands_mock.assert_called_once_with(active_dir)
def test_load_from_package():
with mock.patch(
'acmdrunner.loader.load_commands_from_directory',
autospec=True
) as commands_mock:
Loader.load_from_package('tests')
commands_mock.assert_called_once_with(cur_dir, package='tests')
def test_integration():
Loader.load_from_directory(active_dir)
assert (tests.management.acr_commands.command_dispatcher
.is_registered('test'))
| 27.125 | 71 | 0.724654 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.133641 |
adcd400ff1adfb7e8690873eed1def1e2ba865b1 | 6,838 | py | Python | tests/test_from_avro.py | godatadriven/pydantic-avro | f50010dc1bd9bb33bdcbdbe443d6085884d26dd4 | [
"MIT"
] | 7 | 2021-12-20T09:00:25.000Z | 2022-01-17T13:18:39.000Z | tests/test_from_avro.py | godatadriven/pydantic-avro | f50010dc1bd9bb33bdcbdbe443d6085884d26dd4 | [
"MIT"
] | 25 | 2021-12-18T16:18:25.000Z | 2022-03-28T22:33:14.000Z | tests/test_from_avro.py | godatadriven/pydantic-avro | f50010dc1bd9bb33bdcbdbe443d6085884d26dd4 | [
"MIT"
] | 4 | 2022-01-16T11:11:19.000Z | 2022-03-26T16:17:48.000Z | from pydantic_avro.avro_to_pydantic import avsc_to_pydantic
def test_avsc_to_pydantic_empty():
pydantic_code = avsc_to_pydantic({"name": "Test", "type": "record", "fields": []})
assert "class Test(BaseModel):\n pass" in pydantic_code
def test_avsc_to_pydantic_primitive():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{"name": "col1", "type": "string"},
{"name": "col2", "type": "int"},
{"name": "col3", "type": "long"},
{"name": "col4", "type": "double"},
{"name": "col5", "type": "float"},
{"name": "col6", "type": "boolean"},
],
}
)
assert (
"class Test(BaseModel):\n"
" col1: str\n"
" col2: int\n"
" col3: int\n"
" col4: float\n"
" col5: float\n"
" col6: bool" in pydantic_code
)
def test_avsc_to_pydantic_map():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{"name": "col1", "type": {"type": "map", "values": "string", "default": {}}},
],
}
)
assert "class Test(BaseModel):\n" " col1: Dict[str, str]" in pydantic_code
def test_avsc_to_pydantic_map_nested_object():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{
"name": "col1",
"type": {
"type": "map",
"values": {"type": "record", "name": "Nested", "fields": [{"name": "col1", "type": "string"}]},
"default": {},
},
},
],
}
)
assert "class Test(BaseModel):\n" " col1: Dict[str, Nested]" in pydantic_code
assert "class Nested(BaseModel):\n" " col1: str" in pydantic_code
def test_avsc_to_pydantic_map_nested_array():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{
"name": "col1",
"type": {
"type": "map",
"values": {
"type": "array",
"items": "string",
},
"default": {},
},
},
],
}
)
assert "class Test(BaseModel):\n" " col1: Dict[str, List[str]]" in pydantic_code
def test_avsc_to_pydantic_logical():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{
"name": "col1",
"type": {"type": "int", "logicalType": "date"},
},
{
"name": "col2",
"type": {"type": "long", "logicalType": "time-micros"},
},
{
"name": "col3",
"type": {"type": "long", "logicalType": "time-millis"},
},
{
"name": "col4",
"type": {"type": "long", "logicalType": "timestamp-micros"},
},
{
"name": "col5",
"type": {"type": "long", "logicalType": "timestamp-millis"},
},
],
}
)
assert (
"class Test(BaseModel):\n"
" col1: date\n"
" col2: time\n"
" col3: time\n"
" col4: datetime\n"
" col5: datetime" in pydantic_code
)
def test_avsc_to_pydantic_complex():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{
"name": "col1",
"type": {
"name": "Nested",
"type": "record",
"fields": [],
},
},
{
"name": "col2",
"type": {
"type": "array",
"items": "int",
},
},
{
"name": "col3",
"type": {
"type": "array",
"items": "Nested",
},
},
],
}
)
assert (
"class Test(BaseModel):\n"
" col1: Nested\n"
" col2: List[int]\n"
" col3: List[Nested]\n" in pydantic_code
)
assert "class Nested(BaseModel):\n pass\n" in pydantic_code
def test_default():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{"name": "col1", "type": "string", "default": "test"},
{"name": "col2", "type": ["string", "null"], "default": None},
{"name": "col3", "type": {"type": "map", "values": "string"}, "default": {"key": "value"}},
{"name": "col4", "type": "boolean", "default": True},
{"name": "col5", "type": "boolean", "default": False},
],
}
)
assert (
"class Test(BaseModel):\n"
' col1: str = "test"\n'
" col2: Optional[str] = None\n"
' col3: Dict[str, str] = {"key": "value"}\n'
" col4: bool = True\n"
" col5: bool = False\n" in pydantic_code
)
def test_enums():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{"name": "c1", "type": {"type": "enum", "symbols": ["passed", "failed"], "name": "Status"}},
],
}
)
assert "class Test(BaseModel):\n" " c1: Status" in pydantic_code
assert "class Status(str, Enum):\n" ' passed = "passed"\n' ' failed = "failed"' in pydantic_code
def test_enums_reuse():
pydantic_code = avsc_to_pydantic(
{
"name": "Test",
"type": "record",
"fields": [
{"name": "c1", "type": {"type": "enum", "symbols": ["passed", "failed"], "name": "Status"}},
{"name": "c2", "type": "Status"},
],
}
)
assert "class Test(BaseModel):\n" " c1: Status\n" " c2: Status" in pydantic_code
assert "class Status(str, Enum):\n" ' passed = "passed"\n' ' failed = "failed"' in pydantic_code
| 30.123348 | 119 | 0.388125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,596 | 0.379643 |
adcefa1b9b892a7b65705918561ff4fdd2c1bb29 | 1,128 | py | Python | dojo/unittests/tools/test_cloudsploit_parser.py | art-tykh/django-DefectDojo | 22201f336974a99808998e98cdb3ed536bfdd85a | [
"BSD-3-Clause"
] | 1,772 | 2018-01-22T23:32:15.000Z | 2022-03-31T14:49:33.000Z | dojo/unittests/tools/test_cloudsploit_parser.py | art-tykh/django-DefectDojo | 22201f336974a99808998e98cdb3ed536bfdd85a | [
"BSD-3-Clause"
] | 3,461 | 2018-01-20T19:12:28.000Z | 2022-03-31T17:14:39.000Z | dojo/unittests/tools/test_cloudsploit_parser.py | art-tykh/django-DefectDojo | 22201f336974a99808998e98cdb3ed536bfdd85a | [
"BSD-3-Clause"
] | 1,173 | 2018-01-23T07:10:23.000Z | 2022-03-31T14:40:43.000Z | from django.test import TestCase
from dojo.models import Test
from dojo.tools.cloudsploit.parser import CloudsploitParser
class TestCloudsploitParser(TestCase):
def test_cloudsploit_parser_with_no_vuln_has_no_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_zero_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_cloudsploit_parser_with_one_criticle_vuln_has_one_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_one_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(1, len(findings))
def test_cloudsploit_parser_with_many_vuln_has_many_findings(self):
testfile = open("dojo/unittests/scans/cloudsploit/cloudsploit_many_vul.json")
parser = CloudsploitParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(6, len(findings))
| 40.285714 | 85 | 0.734043 | 1,003 | 0.889184 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.158688 |
add258efeef3f956d7fb99304fe175cc5d75a991 | 1,329 | py | Python | mwaa/mwaa-cdk/mwaa_cdk/deploy_files.py | 094459/time-series-and-data-lakes | 75540661764b8bca91debf625278985ceba7b5ca | [
"MIT"
] | 9 | 2021-12-03T17:51:42.000Z | 2022-03-17T08:45:05.000Z | mwaa/mwaa-cdk/mwaa_cdk/deploy_files.py | 094459/time-series-and-data-lakes | 75540661764b8bca91debf625278985ceba7b5ca | [
"MIT"
] | null | null | null | mwaa/mwaa-cdk/mwaa_cdk/deploy_files.py | 094459/time-series-and-data-lakes | 75540661764b8bca91debf625278985ceba7b5ca | [
"MIT"
] | 1 | 2021-12-12T16:00:31.000Z | 2021-12-12T16:00:31.000Z | from aws_cdk import core
import aws_cdk.aws_ec2 as ec2
import aws_cdk.aws_s3 as s3
import aws_cdk.aws_s3_deployment as s3deploy
import aws_cdk.aws_iam as iam
class MwaaCdkStackDeployFiles(core.Stack):
def __init__(self, scope: core.Construct, id: str, vpc, mwaa_props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Create MWAA S3 Bucket and upload local dags
dags_bucket = s3.Bucket(
self,
"mwaa-dags",
bucket_name=f"{mwaa_props['dagss3location'].lower()}",
versioned=True,
block_public_access=s3.BlockPublicAccess.BLOCK_ALL
)
dags = s3deploy.BucketDeployment(self, "DeployDAG",
sources=[s3deploy.Source.asset("./dags")],
destination_bucket=dags_bucket,
destination_key_prefix="dags",
prune=False,
retain_on_delete=False
)
# This uploads a requirements.txt file in the requirements
# folder. If not needed, you can comment this out
dagreqs = s3deploy.BucketDeployment(self, "DeployRequirements",
sources=[s3deploy.Source.asset("./requirements")],
destination_bucket=dags_bucket,
destination_key_prefix="requirements",
prune=False,
retain_on_delete=False
)
| 27.122449 | 90 | 0.641836 | 1,148 | 0.863807 | 0 | 0 | 0 | 0 | 0 | 0 | 280 | 0.210685 |
add2a5d0a4f10c67d63cf8daaaf3c6b9766d80f2 | 2,743 | py | Python | caloric_balance/test_main_getUserString.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | 1 | 2021-03-22T20:45:06.000Z | 2021-03-22T20:45:06.000Z | caloric_balance/test_main_getUserString.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | null | null | null | caloric_balance/test_main_getUserString.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | null | null | null | """
Do Not Edit this file. You may and are encouraged to look at it for reference.
"""
import sys
if sys.version_info.major != 3:
print('You must use Python 3.x version to run this unit test')
sys.exit(1)
import unittest
import main
class TestGetUserString(unittest.TestCase):
def input_replacement(self, prompt):
self.assertFalse(self.too_many_inputs)
self.input_given_prompt = prompt
r = self.input_response_list[self.input_response_index]
self.input_response_index += 1
if self.input_response_index >= len(self.input_response_list):
self.input_response_index = 0
self.too_many_inputs = True
return r
def print_replacement(self, *args, **kargs):
return
def setUp(self):
self.too_many_inputs = False
self.input_given_prompt = None
self.input_response_index = 0
self.input_response_list = [""]
main.input = self.input_replacement
main.print = self.print_replacement
return
def test001_getUserStringExists(self):
self.assertTrue('getUserString' in dir(main),
'Function "getUserString" is not defined, check your spelling')
return
def test002_getUserStringSendsCorrectPrompt(self):
from main import getUserString
expected_prompt = "HELLO"
expected_response = "WORLD"
self.input_response_list = [expected_response]
actual_response = getUserString(expected_prompt)
self.assertEqual(self.input_given_prompt, expected_prompt)
return
def test003_getUserStringGetsInput(self):
from main import getUserString
expected_prompt = "HELLO"
expected_response = "WORLD"
self.input_response_list = [expected_response]
actual_response = getUserString(expected_prompt)
self.assertEqual(actual_response, expected_response)
return
def test004_getUserStringStripsWhitespace(self):
from main import getUserString
expected_prompt = "HELLO"
expected_response = "WORLD"
self.input_response_list = [" \t\n" + expected_response + " \t\n"]
actual_response = getUserString(expected_prompt)
self.assertEqual(actual_response, expected_response)
return
def test005_getUserStringIgnoresBlankLines(self):
from main import getUserString
expected_prompt = "HELLO"
expected_response = "WORLD"
self.input_response_list = ["", "\n", " \t\n" + expected_response + " \t\n"]
actual_response = getUserString(expected_prompt)
self.assertEqual(actual_response, expected_response)
return
if __name__ == '__main__':
unittest.main()
| 33.048193 | 87 | 0.677725 | 2,447 | 0.892089 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.116661 |
add33c54e770750ea8275fe2e30292d5a6c64611 | 1,386 | py | Python | corehq/ex-submodules/phonelog/management/commands/migrate_device_entry.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/ex-submodules/phonelog/management/commands/migrate_device_entry.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/ex-submodules/phonelog/management/commands/migrate_device_entry.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from datetime import datetime, timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection
from phonelog.models import OldDeviceReportEntry, DeviceReportEntry
COLUMNS = (
"xform_id", "i", "msg", "type", "date", "server_date", "domain",
"device_id", "app_version", "username", "user_id"
)
class Command(BaseCommand):
help = "Migrate device reports to partitioned table"
def handle(self, *args, **options):
partitioned_table = DeviceReportEntry._meta.db_table
old_table = OldDeviceReportEntry._meta.db_table
now = datetime.utcnow()
oldest_date = now - timedelta(days=settings.DAYS_TO_KEEP_DEVICE_LOGS)
current = now
while current > oldest_date:
hour_ago = current - timedelta(hours=1)
with connection.cursor() as cursor:
cursor.execute(
"INSERT INTO " + partitioned_table +
" (" + ','.join(COLUMNS) + ") " +
"SELECT " +
','.join(COLUMNS) + " " +
"FROM " + old_table + " " +
"WHERE server_date > %s AND server_date <= %s",
[hour_ago, current]
)
print("Inserted device logs from %s to %s" % (hour_ago, current))
current = hour_ago
| 35.538462 | 77 | 0.584416 | 1,016 | 0.733045 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.195527 |
add35b0f883ca25be7c8935f80592e9b0471f49d | 5,180 | py | Python | UnB/PI2-SESC/SESCdraw.py | nauam/vuepress-next | 4f39162aa38e36b85d74bf5fc858770cbf87d1f4 | [
"MIT"
] | null | null | null | UnB/PI2-SESC/SESCdraw.py | nauam/vuepress-next | 4f39162aa38e36b85d74bf5fc858770cbf87d1f4 | [
"MIT"
] | 2 | 2021-11-02T11:37:02.000Z | 2021-11-02T11:37:53.000Z | UnB/PI2-SESC/SESCdraw.py | nauam/vuepress-next | 4f39162aa38e36b85d74bf5fc858770cbf87d1f4 | [
"MIT"
] | null | null | null | import pygame
import math
from pygame.locals import *
from OpenGL.GL import *
########################################################
################## CONFIGURAÇÕES MESA ##################
larCamp = 1160
altCamp = 770
passo = 0.53
apasso = 0.18 #deg
xJog = 30
yJog = 40
has = 10
hasGOL = 70
disGOL = 250
hasDEF = 215
disDEF = 250
hasMEI = 505
disMEI = 128
hasATA = 795
disATA = 163
########################################################
pygame.init()
pygame.display.set_mode((larCamp, altCamp), DOUBLEBUF | OPENGL)
####################### DESENHO ########################
def rect(x, y, w, l):
glBegin(GL_QUADS)
glVertex2f(x , y )
glVertex2f(x , y+l)
glVertex2f(x+w, y+l)
glVertex2f(x+w, y )
glEnd()
def F_CAM():
glColor4d(0, 255, 0, 255)
rect(0, 0, larCamp, altCamp)
glColor4d(255, 255, 255, 255)
rect(0 , (altCamp-disGOL)/2, has, disGOL)
rect((larCamp-has/4)/2, 0, has/4, altCamp)
rect(larCamp , (altCamp-disGOL)/2, -has, disGOL)
def F_HAS(xhas):
glColor4d(63, 63, 63, 255)
rect(xhas-has/2, 0, has, altCamp)
rect(larCamp-xhas-has/2, 0, has, altCamp)
def F_GOL(Y,A):
glColor4d(0, 0, 255, 126)
rect(hasGOL , altCamp/2-Y-yJog/3, math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasGOL-xJog/2 , altCamp/2-Y-yJog/2, xJog , yJog )
glColor4d(255, 0, 0, 126)
rect(larCamp-hasGOL-xJog/2, altCamp/2 -yJog/2, xJog , yJog )
def F_DEF(Y,A):
glColor4d(0, 0, 255, 126)
rect(hasDEF , altCamp/2-Y-yJog/3-disDEF/2, math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasDEF , altCamp/2-Y-yJog/3+disDEF/2, math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasDEF-xJog/2 , altCamp/2-Y-yJog/2-disDEF/2, xJog , yJog )
rect(hasDEF-xJog/2 , altCamp/2-Y-yJog/2+disDEF/2, xJog , yJog )
glColor4d(255, 0, 0, 126)
rect(larCamp-hasDEF-xJog/2, altCamp/2 -yJog/2-disDEF/2, xJog , yJog )
rect(larCamp-hasDEF-xJog/2, altCamp/2 -yJog/2+disDEF/2, xJog , yJog )
def F_MEI(Y,A):
glColor4d(0, 0, 255, 126)
rect(hasMEI , altCamp/2-Y-yJog/3-disMEI*2, math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasMEI , altCamp/2-Y-yJog/3-disMEI , math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasMEI , altCamp/2-Y-yJog/3 , math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasMEI , altCamp/2-Y-yJog/3+disMEI , math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasMEI , altCamp/2-Y-yJog/3+disMEI*2, math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasMEI-xJog/2 , altCamp/2-Y-yJog/2-disMEI*2, xJog , yJog )
rect(hasMEI-xJog/2 , altCamp/2-Y-yJog/2-disMEI , xJog , yJog )
rect(hasMEI-xJog/2 , altCamp/2-Y-yJog/2 , xJog , yJog )
rect(hasMEI-xJog/2 , altCamp/2-Y-yJog/2+disMEI , xJog , yJog )
rect(hasMEI-xJog/2 , altCamp/2-Y-yJog/2+disMEI*2, xJog , yJog )
glColor4d(255, 0, 0, 126)
rect(larCamp-hasMEI-xJog/2, altCamp/2 -yJog/2-disMEI*2, xJog , yJog )
rect(larCamp-hasMEI-xJog/2, altCamp/2 -yJog/2-disMEI , xJog , yJog )
rect(larCamp-hasMEI-xJog/2, altCamp/2 -yJog/2 , xJog , yJog )
rect(larCamp-hasMEI-xJog/2, altCamp/2 -yJog/2+disMEI , xJog , yJog )
rect(larCamp-hasMEI-xJog/2, altCamp/2 -yJog/2+disMEI*2, xJog , yJog )
def F_ATA(Y,A):
glColor4d(0, 0, 255, 126)
rect(hasATA , altCamp/2-Y-yJog/3-disATA, -math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasATA , altCamp/2-Y-yJog/3 , -math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasATA , altCamp/2-Y-yJog/3+disATA, -math.sin(A*math.pi/180)*45, yJog*2/3)
rect(hasATA-xJog/2 , altCamp/2-Y-yJog/2-disATA, xJog , yJog )
rect(hasATA-xJog/2 , altCamp/2-Y-yJog/2 , xJog , yJog )
rect(hasATA-xJog/2 , altCamp/2-Y-yJog/2+disATA, xJog , yJog )
glColor4d(255, 0, 0, 126)
rect(larCamp-hasATA-xJog/2, altCamp/2 -yJog/2-disATA, xJog , yJog )
rect(larCamp-hasATA-xJog/2, altCamp/2 -yJog/2 , xJog , yJog )
rect(larCamp-hasATA-xJog/2, altCamp/2 -yJog/2+disATA, xJog , yJog )
def _DRAW(yGOL,aGOL,yDEF,aDEF,yMEI,aMEI,yATA,aATA):
glViewport(0,0,larCamp,altCamp)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0,larCamp,0,altCamp,0,1)
glClear(GL_COLOR_BUFFER_BIT)
F_CAM()
F_HAS(hasGOL)
F_HAS(hasDEF)
F_HAS(hasMEI)
F_HAS(hasATA)
F_GOL(yGOL,aGOL)
F_DEF(yDEF,aDEF)
F_MEI(yMEI,aMEI)
F_ATA(yATA,aATA)
pygame.display.flip()
pygame.event.pump()
| 41.774194 | 99 | 0.509073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.045349 |
add36b7db37e26004fe848c8316ce3ebda2a90b6 | 660 | py | Python | linked_list_reversal.py | Nikhilxavier/Linked-List | b934985d937edd2cd4a683d751a930aacd7f96bf | [
"BSD-3-Clause"
] | null | null | null | linked_list_reversal.py | Nikhilxavier/Linked-List | b934985d937edd2cd4a683d751a930aacd7f96bf | [
"BSD-3-Clause"
] | null | null | null | linked_list_reversal.py | Nikhilxavier/Linked-List | b934985d937edd2cd4a683d751a930aacd7f96bf | [
"BSD-3-Clause"
] | null | null | null | """
Implementation of Linked List reversal.
"""
# Author: Nikhil Xavier <nikhilxavier@yahoo.com>
# License: BSD 3 clause
class Node:
"""Node class for Singly Linked List."""
def __init__(self, value):
self.value = value
self.next_node = None
def reverse_linked_list(head):
"""Reverse linked list.
Returns reversed linked list head.
"""
current_node = head
previous_node = None
next_node = None
while current_node:
next_node = current_node.next_node
current_node.next_node = previous_node
previous_node = current_node
current_node = next_node
return previous_node
| 20 | 48 | 0.668182 | 145 | 0.219697 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.34697 |
add538016b2beae44f41546b6a9daabd1efa1beb | 1,310 | py | Python | scripts/stats/cluster/transform_documents_2d.py | foobar999/Wikipedia-Cluster-Analysis | 4dc8166fb01f9b3ab6d7557de331cfc95298ff0c | [
"MIT"
] | null | null | null | scripts/stats/cluster/transform_documents_2d.py | foobar999/Wikipedia-Cluster-Analysis | 4dc8166fb01f9b3ab6d7557de331cfc95298ff0c | [
"MIT"
] | null | null | null | scripts/stats/cluster/transform_documents_2d.py | foobar999/Wikipedia-Cluster-Analysis | 4dc8166fb01f9b3ab6d7557de331cfc95298ff0c | [
"MIT"
] | null | null | null | import argparse
from sklearn import decomposition
from sklearn.manifold import TSNE
from scripts.utils.utils import init_logger, save_npz
from scripts.utils.documents import load_document_topics
logger = init_logger()
def main():
parser = argparse.ArgumentParser(description='maps a given high-dimensional documents to 2d document representations with t-sne')
parser.add_argument('--document-topics', type=argparse.FileType('r'), help='path to input document-topic-file (.npz)', required=True)
parser.add_argument('--documents-2d', type=argparse.FileType('w'), help='path to output document-2d-data (.npz)', required=True)
args = parser.parse_args()
input_document_topics_path = args.document_topics.name
output_documents_2d_path = args.documents_2d.name
document_topics = load_document_topics(input_document_topics_path)
#model = decomposition.PCA(n_components=2)
model = TSNE(n_components=2, verbose=1, perplexity=100, n_iter=1000)
logger.info('running 2d-transformation with model {}'.format(model))
documents_2d = model.fit_transform(document_topics)
logger.debug('2d-transformation res\n{}'.format(documents_2d))
logger.info('saving 2d-documents')
save_npz(output_documents_2d_path, documents_2d)
if __name__ == '__main__':
main()
| 39.69697 | 137 | 0.761832 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 347 | 0.264885 |
add5947a236430ec92e6da0ce2795473737ac226 | 551 | py | Python | 1701-1800/1711-1720/1711-countGoodMeals/countGoodMeals.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | 1701-1800/1711-1720/1711-countGoodMeals/countGoodMeals.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | 1701-1800/1711-1720/1711-countGoodMeals/countGoodMeals.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | from collections import defaultdict
class Solution(object):
def countPairs(self, deliciousness):
"""
:type deliciousness: List[int]
:rtype: int
"""
max_sum = max(deliciousness) * 2
count = 0
dictionary = defaultdict(int)
for value in deliciousness:
summation = 1
while summation <= max_sum:
count = (count + dictionary[summation-value]) % 1000000007
summation <<= 1
dictionary[value] += 1
return count
| 23.956522 | 74 | 0.548094 | 513 | 0.931034 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.134301 |
add70bcfae38da04abc22d36a07d4f398a5298d9 | 2,900 | py | Python | gbmk2pinb.py | tebeka/pythonwise | 56f8cb7aa792c3ad6a3dc754e15f6a04890d694a | [
"BSD-3-Clause"
] | 21 | 2016-11-16T20:08:56.000Z | 2021-12-11T23:13:05.000Z | gbmk2pinb.py | tebeka/pythonwise | 56f8cb7aa792c3ad6a3dc754e15f6a04890d694a | [
"BSD-3-Clause"
] | 1 | 2020-10-05T08:35:31.000Z | 2020-10-05T08:35:31.000Z | gbmk2pinb.py | tebeka/pythonwise | 56f8cb7aa792c3ad6a3dc754e15f6a04890d694a | [
"BSD-3-Clause"
] | 8 | 2016-11-12T22:54:55.000Z | 2021-02-10T10:46:23.000Z | #!/usr/bin/env python
'''
Port Google Bookmarks over to pinboard.in
* Export Google Bookmarks by hitting
http://www.google.com/bookmarks/?output=xml&num=10000
* Get pinboard auth_token from https://pinboard.in/settings/password
Run:
./gbmk2pinb.py bookmarks.xml --auth-token <token>
'''
import requests
from cStringIO import StringIO
from datetime import datetime
import httplib
import logging as log
import xml.etree.cElementTree as et
add_url = 'https://api.pinboard.in/v1/posts/add'
# Example XML:
# <xml_api_reply version="1">
# <bookmarks>
# <bookmark>
# <title>Finnish Doctors Are Prescribing Video Games For ADHD -
# Slashdot</title>
# <url>bit.ly/15J6NSBCustomize</url>
# <timestamp>1381590052580408</timestamp>
# <id>536897562302183779</id>
# <labels>
# <label>psychology</label>
# <label>adhd</label>
# <label>video</label>
# <label>games</label>
# </labels>
# </bookmark>
# ...
def iter_xml(fo):
tree = et.parse(fo)
for bmk in tree.iterfind('.//bookmark'):
title = bmk.find('title')
ts = int(bmk.find('timestamp').text)
yield {
'title': title.text if title is not None else 'UNKNOWN TITLE',
'url': bmk.find('url').text,
'labels': [elem.text for elem in bmk.iterfind('.//label')],
'timestamp': datetime.utcfromtimestamp(ts/1000000),
}
def bmk2params(bmk, auth_token):
return {
'url': bmk['url'],
'description': bmk['title'],
'tags': ','.join(bmk['labels']),
'dt': bmk['timestamp'].strftime('%Y-%m-%dT%H:%M:%SZ'),
'auth_token': auth_token,
'replace': 'yes',
}
def parse_reply(reply):
if reply.status_code != httplib.OK:
return False
root = et.parse(StringIO(reply.content)).getroot()
code = root.get('code')
if code != 'done':
log.error(code)
return False
return True
def post_bookmark(bmk, auth_token):
params = bmk2params(bmk, auth_token)
reply = requests.post(add_url, params=params)
return parse_reply(reply)
def main(argv=None):
import sys
from argparse import ArgumentParser
argv = argv or sys.argv
parser = ArgumentParser(description='Post Google Bookmarks to Pinboard')
parser.add_argument('filename')
parser.add_argument('--auth-token')
parser.add_argument('--start', help='start offset', type=int, default=0)
args = parser.parse_args(argv[1:])
with open(args.filename) as fo:
bmks = list(iter_xml(fo))
if args.start > 0:
bmks = bmks[args.start:]
for i, bmk in enumerate(bmks):
print(u'{}: {}'.format(args.start + i, bmk['title']))
if not post_bookmark(bmk, args.auth_token):
raise SystemExit('error: cannot post {}'.format(bmk['title']))
if __name__ == '__main__':
main()
| 26.605505 | 76 | 0.62 | 0 | 0 | 443 | 0.152759 | 0 | 0 | 0 | 0 | 1,154 | 0.397931 |
add7f9669f3e05d6508ad79cf1105d32eda37887 | 11,106 | py | Python | rover/type-ab/wheels_service.py | GamesCreatorsClub/GCC-Rover | 25a69f62a1bb01fc421924ec39f180f50d6a640b | [
"MIT"
] | 3 | 2018-02-13T21:39:55.000Z | 2018-04-26T18:17:39.000Z | rover/type-ab/wheels_service.py | GamesCreatorsClub/GCC-Rover | 25a69f62a1bb01fc421924ec39f180f50d6a640b | [
"MIT"
] | null | null | null | rover/type-ab/wheels_service.py | GamesCreatorsClub/GCC-Rover | 25a69f62a1bb01fc421924ec39f180f50d6a640b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
import traceback
import time
import re
import copy
import pyroslib
import storagelib
import smbus
#
# wheels service
#
#
# This service is responsible for moving wheels on the rover.
# Current implementation also handles:
# - servos
# - storage map
#
DEBUG = False
DEBUG_SPEED = False
DEBUG_SPEED_VERBOSE = False
DEBUG_TURN = False
DEBUG_SERVO = False
I2C_BUS = 1
I2C_ADDRESS = 0x04
i2cBus = smbus.SMBus(I2C_BUS)
PWM = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
STORAGE_MAP_FILE = "/home/pi/rover-storage.config"
SERVO_REGEX = re.compile("servo/(\d+)")
PROTOTYPE_WHEEL_CALIBRATION = {
"deg": {
"servo": "",
"90": "70",
"0": "160",
"-90": "230"
},
"speed": {
"servo": "",
"-300": "95",
"-240": "115",
"-0": "149",
"0": "155",
"240": "195",
"300": "215"
}
}
pwmIndex = 0
wheelMap = {}
wheelCalibrationMap = {}
wheelMap["servos"] = {}
servoBlasterFile = None
def moveServo(servoid, angle):
global servoBlasterFile, i2cBus
angle = int(angle)
if 12 <= int(servoid) <= 13:
i2cBus.write_byte_data(I2C_ADDRESS, servoid - 6, angle)
else:
servoLine = str(servoid) + "=" + str(angle)
if DEBUG_SERVO:
print("ServoBlaser <- " + servoLine)
try:
servoBlasterFile.write(servoLine + "\n")
servoBlasterFile.flush()
except:
try:
servoBlasterFile.close()
except:
pass
if DEBUG:
print("Lost connection to /dev/servoblaster - reopening")
servoBlasterFile = open("/dev/servoblaster", 'w')
if servoid == "0" and DEBUG_SPEED:
print(str(int(time.time() * 1000) % 10000000) + ": speed wheel 0 = " + str(angle))
def initWheel(wheelName, motorServo, steerServo):
wheelMap[wheelName] = {
"deg": 0,
"speed": 0,
"servoSpeedPos": 0,
"gen": None
}
def initWheels():
global wheelCalibrationMap
if "wheels" not in storagelib.storageMap:
storagelib.storageMap["wheels"] = {}
if "cal" not in storagelib.storageMap["wheels"]:
storagelib.storageMap["wheels"]["cal"] = {}
wheelCalibrationMap = storagelib.storageMap["wheels"]["cal"]
initWheel("fr", 0, 1)
initWheel("fl", 2, 3)
initWheel("br", 4, 5)
initWheel("bl", 6, 7)
def subscribeWheels():
storagelib.subscribeWithPrototype("wheels/cal/fl", PROTOTYPE_WHEEL_CALIBRATION)
storagelib.subscribeWithPrototype("wheels/cal/fr", PROTOTYPE_WHEEL_CALIBRATION)
storagelib.subscribeWithPrototype("wheels/cal/bl", PROTOTYPE_WHEEL_CALIBRATION)
storagelib.subscribeWithPrototype("wheels/cal/br", PROTOTYPE_WHEEL_CALIBRATION)
def ensureWheelData(name, motorServo, steerServo):
calMap = copy.deepcopy(PROTOTYPE_WHEEL_CALIBRATION)
calMap["speed"]["servo"] = str(motorServo)
calMap["deg"]["servo"] = str(steerServo)
storagelib.bulkPopulateIfEmpty("wheels/cal/" + name, calMap)
def loadStorage():
subscribeWheels()
storagelib.waitForData()
ensureWheelData("fr", 0, 1)
ensureWheelData("fl", 2, 3)
ensureWheelData("br", 4, 5)
ensureWheelData("bl", 6, 7)
print(" Storage details loaded.")
def handleServo(servoid, angle=0):
wheelMap["servos"][str(servoid)] = angle
moveServo(servoid, angle)
def handleDeg(wheel, wheelCal, degrees):
if degrees >= 0:
servoPosition = interpolate(degrees / 90.0, wheelCal["0"], wheelCal["90"])
else:
servoPosition = interpolate((degrees + 90) / 90.0, wheelCal["-90"], wheelCal["0"])
wheel["deg"] = degrees
wheel["degsServoPos"] = servoPosition
servoNumber = wheelCal["servo"]
moveServo(servoNumber, servoPosition)
def handleSpeed(wheel, wheelCal, speedStr):
servoNumber = wheelCal["servo"]
if speedStr == "0":
servoPosition = int(interpolate(0.5, wheelCal["-0"], wheelCal["0"]))
if DEBUG_SPEED_VERBOSE:
print(" got speed 0 @ " + str(servoPosition) + " for " + str(servoNumber))
speed = 0
else:
if "240" in wheelCal and "-240" in wheelCal:
if speedStr == "-0":
servoPosition = interpolate(0, wheelCal["-0"], wheelCal["-240"])
if DEBUG_SPEED_VERBOSE:
print(" got speed -0 @ " + str(servoPosition) + " for " + str(servoNumber))
speed = 0
elif speedStr == "+0":
servoPosition = interpolate(0, wheelCal["0"], wheelCal["240"])
if DEBUG_SPEED_VERBOSE:
print(" got speed +0 @ " + str(servoPosition) + " for " + str(servoNumber))
speed = 0
else:
speed = float(speedStr)
if speed >= 0:
if speed <= 240:
servoPosition = interpolate(speed / 300, wheelCal["0"], wheelCal["240"])
else:
servoPosition = interpolate((speed - 240) / 60, wheelCal["240"], wheelCal["300"])
else:
if speed >= -240:
servoPosition = interpolate(-speed / 300, wheelCal["-0"], wheelCal["-240"])
else:
servoPosition = interpolate((-speed - 240) / 60, wheelCal["-240"], wheelCal["-300"])
else:
if speedStr == "-0":
servoPosition = interpolate(0, wheelCal["-0"], wheelCal["-300"])
if DEBUG_SPEED_VERBOSE:
print(" got speed -0 @ " + str(servoPosition) + " for " + str(servoNumber))
speed = 0
elif speedStr == "+0":
servoPosition = interpolate(0, wheelCal["0"], wheelCal["300"])
if DEBUG_SPEED_VERBOSE:
print(" got speed +0 @ " + str(servoPosition) + " for " + str(servoNumber))
speed = 0
else:
speed = float(speedStr)
if speed >= 0:
servoPosition = interpolate(speed / 300, wheelCal["0"], wheelCal["300"])
else:
servoPosition = interpolate(-speed / 300, wheelCal["-0"], wheelCal["-300"])
if DEBUG_SPEED_VERBOSE:
print(" got speed " + speedStr + " @ " + str(servoPosition) + " for " + str(servoNumber))
wheel["speed"] = speedStr
if "speedServoPos" in wheel and wheel["speedServoPos"] != servoPosition:
wheel["gen"] = brakeDance([wheel["speedServoPos"], servoPosition])
wheel["speedServoPos"] = servoPosition
if speedStr == "0" or speedStr == "-0":
moveServo(servoNumber, servoPosition)
wheel["speedServoPos"] = servoPosition
else:
wheel["speedServoPos"] = servoPosition
def interpolate(value, zerostr, maxstr):
zero = float(zerostr)
maxValue = float(maxstr)
return (maxValue - zero) * value + zero
# use the start value, then go through 0 and -0 and then to the target position
# set up as an infinite generator
def brakeDance(vals):
# return the start value
yield vals[0]
yield vals[1]
yield vals[0]
while True:
yield vals[1]
def driveWheel(wheelName):
wheel = wheelMap[wheelName]
wheelCal = wheelCalibrationMap[wheelName]["speed"]
speedStr = wheel["speed"]
# if "speedServoPos" in wheel and "gen" in wheel:
# # servo position is not a value, but a generator
# servoPosition = wheel["speedServoPos"]
# if wheel["gen"] is not None:
# servoPosition = next(wheel["gen"])
#
# pwmPart = (int(servoPosition * 10) % 10) // 2
# servoPosition = int(servoPosition) + PWM[pwmPart][pwmIndex]
#
# servoNumber = wheelCal["servo"]
#
# if speedStr != "0" and speedStr != "-0":
# moveServo(servoNumber, servoPosition)
if "speedServoPos" in wheel:
servoPosition = wheel["speedServoPos"]
servoNumber = wheelCal["servo"]
if speedStr != "0" and speedStr != "-0":
moveServo(servoNumber, servoPosition)
def driveWheels():
global pwmIndex
driveWheel("fl")
driveWheel("fr")
driveWheel("bl")
driveWheel("br")
pwmIndex += 1
if pwmIndex >= len(PWM[0]):
pwmIndex = 0
def servoTopic(topic, payload, groups):
servo = int(groups[0])
moveServo(servo, payload)
print("servo")
def wheelDegTopic(topic, payload, groups):
wheelName = groups[0]
if wheelName in wheelMap:
wheel = wheelMap[wheelName]
wheelCal = wheelCalibrationMap[wheelName]
if DEBUG_TURN:
print(" Turning wheel: " + wheelName + " to " + str(payload) + " degs")
handleDeg(wheel, wheelCal["deg"], float(payload))
else:
print("ERROR: no wheel with name " + wheelName + " fonund.")
def wheelSpeedTopic(topic, payload, groups):
wheelName = groups[0]
if wheelName in wheelMap:
wheel = wheelMap[wheelName]
wheelCal = wheelCalibrationMap[wheelName]
if DEBUG_SPEED:
print(" Setting wheel: " + wheelName + " speed to " + str(payload))
handleSpeed(wheel, wheelCal["speed"], payload)
else:
print("ERROR: no wheel with name " + wheelName + " fonund.")
def wheelsCombined(topic, payload, groups):
if DEBUG_SPEED:
print(str(int(time.time() * 1000) % 10000000) + ": wheels " + payload)
wheelCmds = payload.split(" ")
for wheelCmd in wheelCmds:
kv = wheelCmd.split(":")
if len(kv) > 1:
wheelName = kv[0][:2]
command = kv[0][2]
value = kv[1]
if wheelName in wheelMap:
wheel = wheelMap[wheelName]
wheelCal = wheelCalibrationMap[wheelName]
if command == "s":
handleSpeed(wheel, wheelCal["speed"], value)
elif command == "d":
handleDeg(wheel, wheelCal["deg"], float(value))
if __name__ == "__main__":
try:
print("Starting wheels service...")
print(" initialising wheels...")
initWheels()
print(" opening servo blaster file...")
servoBlasterFile = open("/dev/servoblaster", 'w')
print(" sbscribing to topics...")
pyroslib.subscribe("servo/+", servoTopic)
pyroslib.subscribe("wheel/all", wheelsCombined)
pyroslib.subscribe("wheel/+/deg", wheelDegTopic)
pyroslib.subscribe("wheel/+/speed", wheelSpeedTopic)
pyroslib.init("wheels-service")
print(" Loading storage details...")
loadStorage()
print("Started wheels service.")
pyroslib.forever(0.02, driveWheels)
except Exception as ex:
print("ERROR: " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
| 29.149606 | 108 | 0.572033 | 0 | 0 | 142 | 0.012786 | 0 | 0 | 0 | 0 | 2,387 | 0.214929 |
add81cb05015339a61019826e171a37c7a199581 | 5,960 | py | Python | src/hyper_prompt/segments/git.py | artbycrunk/hyper-prompt | b78d98e4e5c65647b566368ea778b205a86371de | [
"MIT"
] | 5 | 2019-11-11T15:54:09.000Z | 2022-03-23T00:00:07.000Z | src/hyper_prompt/segments/git.py | artbycrunk/hyper-prompt | b78d98e4e5c65647b566368ea778b205a86371de | [
"MIT"
] | 8 | 2019-12-19T16:03:08.000Z | 2021-10-21T01:42:08.000Z | src/hyper_prompt/segments/git.py | artbycrunk/hyper-prompt | b78d98e4e5c65647b566368ea778b205a86371de | [
"MIT"
] | 3 | 2019-08-09T12:05:19.000Z | 2020-10-06T08:16:24.000Z | import os
import re
import subprocess
from ..segment import BasicSegment
class Repo(object):
symbols = {
"detached": "\u2693",
"ahead": "\u2B06",
"behind": "\u2B07",
"staged": "\u2714",
"changed": "\u270E",
"new": "\uf128",
"conflicted": "\u273C",
"stash": "\u2398",
"git": "\uf418",
}
def __init__(self):
self.attrs = [
"new",
"changed",
"staged",
"conflicted",
"active",
"ahead",
"behind",
"conflicted",
"branch",
"remote",
]
for attr in self.attrs:
setattr(self, attr, 0)
@property
def dirty(self):
return sum([getattr(self, attr) for attr in self.attrs[:4]]) > 0
def __str__(self):
return str({attr: getattr(self, attr) for attr in self.attrs})
def subprocess(self, cmd):
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError:
return ''
data = proc.communicate()
if proc.returncode != 0:
return ''
return data[0].decode("utf-8")
def get_branch(self):
cmd = ["git", "rev-parse", "--abbrev-ref", "HEAD"]
return self.subprocess(cmd).strip() or 'master'
def get_stash(self):
cmd = ["git", "stash", "list"]
stash = self.subprocess(cmd).splitlines()
self.stash = len(stash) if stash else None
def status(self, show_stash=False):
cmd = ["git", "status", "--porcelain", "-b"]
status = self.subprocess(cmd).splitlines()
if show_stash:
self.get_stash()
for statusline in status[1:]:
code = statusline[:2]
if code == "??":
self.new += 1
elif code in ("DD", "AU", "UD", "UA", "DU", "AA", "UU"):
self.conflicted += 1
else:
if code[1] != " ":
self.changed += 1
if code[0] != " ":
self.staged += 1
info = re.search(
r"^## (?P<local>\S+?)"
r"(\.{3}(?P<remote>\S+?)( \[(ahead (?P<ahead>\d+)(, )?)?(behind (?P<behind>\d+))?\])?)?$",
status[0],
)
branch = info.groupdict() if info else {}
self.ahead = branch.get("ahead", 0)
self.behind = branch.get("behind", 0)
self.branch = branch.get("local", "")
self.remote = branch.get("remote", "")
self.active = True
class Segment(BasicSegment):
ATTRIBUTES = {
"skip_dirs": [],
"show_stash": False,
}
def is_gitdir(self, cwd):
found = False
_cwd = cwd
while cwd != "/":
if os.access(".git", os.R_OK):
found = True
self.git_dir = cwd
break
_cwd = os.getcwd()
os.chdir("..")
cwd = os.getcwd()
if cwd == _cwd:
break
os.chdir(self.hyper_prompt.cwd)
return found
def add_sub_segment(self, key, fg, bg):
segment = BasicSegment(self.hyper_prompt, self.seg_conf)
value = getattr(self.repo, key, None)
if value:
symbol = self.symbol(key, self.repo.symbols)
content = symbol + str(value)
segment.append(self.hyper_prompt._content % (content), fg, bg)
self.sub_segments.append(segment)
def activate(self):
if self.is_gitdir(self.hyper_prompt.cwd):
self.repo = Repo()
fg, bg = (
self.theme.get("REPO_CLEAN_FG", 0),
self.theme.get("REPO_CLEAN_BG", 148),
)
symbol = self.symbol("git", self.repo.symbols)
content = symbol + str(self.repo.get_branch())
# if skipped dir, only show branch name
if self.hyper_prompt.cwd in self.attr_skip_dirs:
self.append(self.hyper_prompt._content % (content), fg, bg)
return True
self.repo.status(show_stash=self.attr_show_stash)
if not self.repo.active:
return False
if self.repo.dirty:
fg, bg = (
self.theme.get("REPO_DIRTY_FG", 15),
self.theme.get("REPO_DIRTY_BG", 161),
)
self.append(self.hyper_prompt._content % (content), fg, bg)
self.add_sub_segment(
"ahead",
self.theme.get("GIT_AHEAD_FG", 250),
self.theme.get("GIT_AHEAD_BG", 240),
)
self.add_sub_segment(
"behind",
self.theme.get("GIT_BEHIND_FG", 250),
self.theme.get("GIT_BEHIND_BG", 240),
)
self.add_sub_segment(
"staged",
self.theme.get("GIT_STAGED_FG", 15),
self.theme.get("GIT_STAGED_BG", 22),
)
self.add_sub_segment(
"changed",
self.theme.get("GIT_NOTSTAGED_FG", 15),
self.theme.get("GIT_NOTSTAGED_BG", 130),
)
self.add_sub_segment(
"new",
self.theme.get("GIT_UNTRACKED_FG", 15),
self.theme.get("GIT_UNTRACKED_BG", 52),
)
self.add_sub_segment(
"conflicted",
self.theme.get("GIT_CONFLICTED_FG", 15),
self.theme.get("GIT_CONFLICTED_BG", 9),
)
self.add_sub_segment(
"stash",
self.theme.get("GIT_STASH_BG", 221),
self.theme.get("GIT_STASH_FG", 0),
)
| 30.880829 | 103 | 0.460403 | 5,869 | 0.984732 | 0 | 0 | 105 | 0.017617 | 0 | 0 | 937 | 0.157215 |
add8774dbb279519f1395cc74a9dc7957550b2a2 | 7,520 | py | Python | edg_core/test_simple_const_prop.py | tengisd/PolymorphicBlocks | 240a11f813762c4eb5a97c9d9766a0af19cd8f3a | [
"BSD-3-Clause"
] | null | null | null | edg_core/test_simple_const_prop.py | tengisd/PolymorphicBlocks | 240a11f813762c4eb5a97c9d9766a0af19cd8f3a | [
"BSD-3-Clause"
] | null | null | null | edg_core/test_simple_const_prop.py | tengisd/PolymorphicBlocks | 240a11f813762c4eb5a97c9d9766a0af19cd8f3a | [
"BSD-3-Clause"
] | null | null | null | import unittest
from . import *
from edg_core.ScalaCompilerInterface import ScalaCompiler
class TestConstPropInternal(Block):
def __init__(self) -> None:
super().__init__()
self.float_param = self.Parameter(FloatExpr())
self.range_param = self.Parameter(RangeExpr())
class TestParameterConstProp(Block):
def __init__(self) -> None:
super().__init__()
self.float_const = self.Parameter(FloatExpr())
self.float_param = self.Parameter(FloatExpr())
self.range_const = self.Parameter(RangeExpr())
self.range_param = self.Parameter(RangeExpr())
def contents(self):
self.assign(self.float_const, 2.0)
self.assign(self.float_param, self.float_const)
self.assign(self.range_const, Range(1.0, 42.0))
self.assign(self.range_param, self.range_const)
self.block = self.Block(TestConstPropInternal())
self.assign(self.block.float_param, self.float_param)
self.assign(self.block.range_param, self.range_param)
class ConstPropTestCase(unittest.TestCase):
def setUp(self) -> None:
self.compiled = ScalaCompiler.compile(TestParameterConstProp)
def test_float_prop(self) -> None:
self.assertEqual(self.compiled.get_value(['float_const']), 2.0)
self.assertEqual(self.compiled.get_value(['block', 'float_param']), 2.0)
def test_range_prop(self) -> None:
self.assertEqual(self.compiled.get_value(['range_const']), Range(1.0, 42.0))
self.assertEqual(self.compiled.get_value(['block', 'range_param']), Range(1.0, 42.0))
class TestPortConstPropLink(Link):
def __init__(self) -> None:
super().__init__()
self.a = self.Port(TestPortConstPropPort())
self.b = self.Port(TestPortConstPropPort())
self.assign(self.b.float_param, self.a.float_param) # first connected is source
class TestPortConstPropPort(Port[TestPortConstPropLink]):
def __init__(self) -> None:
super().__init__()
self.link_type = TestPortConstPropLink
self.float_param = self.Parameter(FloatExpr())
class TestPortConstPropInnerBlock(Block):
def __init__(self) -> None:
super().__init__()
self.port = self.Port(TestPortConstPropPort(), optional=True)
class TestPortConstPropOuterBlock(Block):
def __init__(self) -> None:
super().__init__()
self.inner = self.Block(TestPortConstPropInnerBlock())
self.port = self.Port(TestPortConstPropPort())
self.connect(self.inner.port, self.port)
class TestPortConstPropTopBlock(Block):
def __init__(self) -> None:
super().__init__()
self.block1 = self.Block(TestPortConstPropInnerBlock())
self.block2 = self.Block(TestPortConstPropOuterBlock())
self.link = self.connect(self.block1.port, self.block2.port)
self.assign(self.block1.port.float_param, 3.5)
class ConstPropPortTestCase(unittest.TestCase):
def setUp(self) -> None:
self.compiled = ScalaCompiler.compile(TestPortConstPropTopBlock)
def test_port_param_prop(self) -> None:
self.assertEqual(self.compiled.get_value(['block1', 'port', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'a', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'b', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['block2', 'port', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['block2', 'inner', 'port', 'float_param']), 3.5)
def test_connected_link(self) -> None:
self.assertEqual(self.compiled.get_value(['block1', 'port', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['block2', 'port', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['block2', 'inner', 'port', edgir.IS_CONNECTED]), True)
class TestDisconnectedTopBlock(Block):
def __init__(self) -> None:
super().__init__()
self.block1 = self.Block(TestPortConstPropInnerBlock())
self.assign(self.block1.port.float_param, 3.5)
class DisconnectedPortTestCase(unittest.TestCase):
def setUp(self) -> None:
self.compiled = ScalaCompiler.compile(TestDisconnectedTopBlock)
def test_disconnected_link(self) -> None:
self.assertEqual(self.compiled.get_value(['block1', 'port', edgir.IS_CONNECTED]), False)
class TestPortConstPropBundleLink(Link):
def __init__(self) -> None:
super().__init__()
self.a = self.Port(TestPortConstPropBundle())
self.b = self.Port(TestPortConstPropBundle())
self.elt1_link = self.connect(self.a.elt1, self.b.elt1)
self.elt2_link = self.connect(self.a.elt2, self.b.elt2)
class TestPortConstPropBundle(Bundle[TestPortConstPropBundleLink]):
def __init__(self) -> None:
super().__init__()
self.link_type = TestPortConstPropBundleLink
self.elt1 = self.Port(TestPortConstPropPort())
self.elt2 = self.Port(TestPortConstPropPort())
class TestPortConstPropBundleInnerBlock(Block):
def __init__(self) -> None:
super().__init__()
self.port = self.Port(TestPortConstPropBundle())
class TestPortConstPropBundleTopBlock(Block):
def __init__(self) -> None:
super().__init__()
def contents(self) -> None:
self.block1 = self.Block(TestPortConstPropBundleInnerBlock())
self.block2 = self.Block(TestPortConstPropBundleInnerBlock())
self.link = self.connect(self.block1.port, self.block2.port)
self.assign(self.block1.port.elt1.float_param, 3.5)
self.assign(self.block1.port.elt2.float_param, 6.0)
class ConstPropBundleTestCase(unittest.TestCase):
def setUp(self) -> None:
self.compiled = ScalaCompiler.compile(TestPortConstPropBundleTopBlock)
def test_port_param_prop(self) -> None:
self.assertEqual(self.compiled.get_value(['block1', 'port', 'elt1', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['block1', 'port', 'elt2', 'float_param']), 6.0)
self.assertEqual(self.compiled.get_value(['link', 'a', 'elt1', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'a', 'elt2', 'float_param']), 6.0)
self.assertEqual(self.compiled.get_value(['link', 'elt1_link', 'a', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'elt2_link', 'a', 'float_param']), 6.0)
self.assertEqual(self.compiled.get_value(['link', 'elt1_link', 'b', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'elt2_link', 'b', 'float_param']), 6.0)
self.assertEqual(self.compiled.get_value(['link', 'b', 'elt1', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['link', 'b', 'elt2', 'float_param']), 6.0)
self.assertEqual(self.compiled.get_value(['block2', 'port', 'elt1', 'float_param']), 3.5)
self.assertEqual(self.compiled.get_value(['block2', 'port', 'elt2', 'float_param']), 6.0)
def test_connected_link(self) -> None:
self.assertEqual(self.compiled.get_value(['block1', 'port', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['block2', 'port', edgir.IS_CONNECTED]), True)
# Note: inner ports IS_CONNECTED is not defined
self.assertEqual(self.compiled.get_value(['link', 'a', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['link', 'b', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['link', 'elt1_link', 'a', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['link', 'elt1_link', 'b', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['link', 'elt2_link', 'a', edgir.IS_CONNECTED]), True)
self.assertEqual(self.compiled.get_value(['link', 'elt2_link', 'b', edgir.IS_CONNECTED]), True)
| 38.367347 | 100 | 0.71742 | 7,381 | 0.981516 | 0 | 0 | 0 | 0 | 0 | 0 | 837 | 0.111303 |
adda310f115e4d135391af4cbeb41ba6e1fe7f71 | 3,061 | py | Python | get_types.py | AllanMoralesPrado/PokeAPI-project | a8ce6f49bc6b4ab395f4d2f5ef84de8d5ec17145 | [
"MIT"
] | null | null | null | get_types.py | AllanMoralesPrado/PokeAPI-project | a8ce6f49bc6b4ab395f4d2f5ef84de8d5ec17145 | [
"MIT"
] | null | null | null | get_types.py | AllanMoralesPrado/PokeAPI-project | a8ce6f49bc6b4ab395f4d2f5ef84de8d5ec17145 | [
"MIT"
] | null | null | null | #Modulo que devuelve tres valores:
# pkmn_type_en: lista de str cuyos valores son los nombres de los tipos del pokemon (en ingles)
# special_type: lista de str cuyos valores son los nombres de los tipos especiales del pokemon
# pkmn_damage_rel: diccionario de listas str cuyos valores son los nombres de los tipos del pokemon (en ingles)
# que tienen relación del daño hacia el(los) tipos de este
from get_module import get_info
#Funcion que captura tipos de pokemon y los almacena en una lista
def add_types_rel(damage_types,type_src):
for ts in type_src:
if ts['name'] not in damage_types:
damage_types.append(ts['name'])
def get_types_info(pkmn_name):
pkmn_base = get_info(f'https://pokeapi.co/api/v2/pokemon/{pkmn_name}')
resultado_ = get_info(f'https://pokeapi.co/api/v2/pokemon-species/{pkmn_name}/')
#fortalezas/debilidades
#Ej: Charizard es tipo fuego, volador
pkmn_damage_rel = {
'double_damage_from': [],
'double_damage_to': [],
'half_damage_from':[],
'half_damage_to': [],
'no_damage_from': [],
'no_damage_to': []
}
#tipo/tipo especial
pkmn_type_en = [tipo['type']['name'] for tipo in pkmn_base['types']]
pkmn_type_url = [tipo['type']['url'] for tipo in pkmn_base['types']]
special_type = []
#Iteracion: por cada tipo el cual pertenece un pokemon
for i in pkmn_type_url:
#Capturar el diccionario con que contenga la relacion de daño entre los tipos
tipo = get_info(i)['damage_relations']
#Para cada lista del diccionario de relaciones de daño
for key in pkmn_damage_rel:
#Añadir los tipos de pokemon según la relación de daño correspondiente
add_types_rel(pkmn_damage_rel[key],tipo[key])
if resultado_['is_baby']:
special_type.append('Bebé')
if resultado_['is_legendary']:
special_type.append('Legendario')
if resultado_['is_mythical']:
special_type.append('Mítico')
return pkmn_type_en, special_type, pkmn_damage_rel
if __name__ == '__main__':
name = 'rhydon'
pokemon_tipo, tipo_especial, pkmn_buffs_n_nerfs = get_types_info(name)
print(f'Pokemon: {name.capitalize()}')
print('\nTIPO')
for i in pokemon_tipo:
print(i, end=' ')
print('\n\nSúper efectivo contra:')
for value in pkmn_buffs_n_nerfs['double_damage_to']:
print(value, end=' ')
print('\n\nDébil contra:')
for value in pkmn_buffs_n_nerfs['double_damage_from']:
print(value, end=' ')
print('\n\nResistente contra:')
for value in pkmn_buffs_n_nerfs['half_damage_from']:
print(value, end=' ')
print('\n\nPoco eficaz contra:')
for value in pkmn_buffs_n_nerfs['half_damage_to']:
print(value, end=' ')
print('\n\nInmune contra:')
for value in pkmn_buffs_n_nerfs['no_damage_from']:
print(value, end=' ')
print('\n\nIneficaz contra:')
for value in pkmn_buffs_n_nerfs['no_damage_to']:
print(value, end=' ')
| 35.593023 | 111 | 0.665795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,475 | 0.479987 |
addc984c604dda76024ddee4181508788307c8b0 | 422 | py | Python | pyPractise/jcp030.py | enyaooshigaolo/MyPython | 67dc3f6ff596545ab70e11a573a6031232128711 | [
"Apache-2.0"
] | null | null | null | pyPractise/jcp030.py | enyaooshigaolo/MyPython | 67dc3f6ff596545ab70e11a573a6031232128711 | [
"Apache-2.0"
] | null | null | null | pyPractise/jcp030.py | enyaooshigaolo/MyPython | 67dc3f6ff596545ab70e11a573a6031232128711 | [
"Apache-2.0"
] | null | null | null | '''
Created on 2017年1月15日
@author: Think
题目:一个5位数,判断它是不是回文数。即12321是回文数,个位与万位相同,十位与千位相同。
1.程序分析:同29例
2.程序源代码:
'''
from pip._vendor.distlib.compat import raw_input
def jcp030():
x = int(raw_input('input a number:\n'))
x = str(x)
for i in range(len(x)//2):
if x[i] != x[-i - 1]:
print('this number is not a huiwen')
break
print('this number is a huiwen')
jcp030() | 21.1 | 49 | 0.597156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 307 | 0.568519 |
addcfbc1202ddedf82c6055c7f9b66dac55c513b | 2,821 | py | Python | scripts/anisotropy.py | jmsung/APC | 9f0e065aa748a4d041b783b07cd8078715d39625 | [
"MIT"
] | null | null | null | scripts/anisotropy.py | jmsung/APC | 9f0e065aa748a4d041b783b07cd8078715d39625 | [
"MIT"
] | null | null | null | scripts/anisotropy.py | jmsung/APC | 9f0e065aa748a4d041b783b07cd8078715d39625 | [
"MIT"
] | null | null | null | """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Created by Jongmin Sung (jongmin.sung@gmail.com)
Anisotropy data analysis
The equation for the curve as published by Marchand et al. in Nature Cell Biology in 2001 is as follows:
y = a + (b-a) / [(c(x+K)/K*d)+1], where
a is the anisotropy without protein,
b is anisotropy with protein,
c is the Kd for ligand,
d is the total concentration of protein.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from pathlib import Path
import os
import shutil
from timeit import default_timer as timer
from scipy.stats import norm
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from inspect import currentframe, getframeinfo
fname = getframeinfo(currentframe()).filename # current file name
current_dir = Path(fname).resolve().parent
# User input ----------------------------------------------------------------
red_x = np.array([100, 50, 25, 12.5, 6.25, 3.125, 1.56, 0])
red_y = np.array([0.179, 0.186, 0.19, 0.195, 0.2, 0.212, 0.222, 0.248])
red_p = np.array([0.191, 0.248, 0.05, 1])
black_x = np.array([100, 50, 25, 18.75, 14.1, 10.5, 7.9, 5.9, 0])
black_y = np.array([0.204, 0.225, 0.248, 0.26, 0.268, 0.271, 0.274, 0.277, 0.278])
black_p = np.array([0.183, 0.278, 1.5, 16])
# ---------------------------------------------------------------------------
def red_anisotropy(x, K):
a = red_p[0]
b = red_p[1]
c = red_p[2]
d = red_p[3]
return a+(b-a)/((c*(x+K)/(K*d))+1)
def black_anisotropy(x, K):
a = black_p[0]
b = black_p[1]
c = black_p[2]
d = black_p[3]
return a+(b-a)/((c*(x+K)/(K*d))+1)
def main():
red_p, _ = curve_fit(red_anisotropy, red_x, red_y, p0=[0.078])
black_p, _ = curve_fit(black_anisotropy, black_x, black_y, p0=[0.1])
# Plot the result
fit_x = np.linspace(0, 100, 1000)
fig, (ax1, ax2) = plt.subplots(figsize=(20, 10), ncols=2, nrows=1, dpi=300)
ax1.plot(red_x, red_y, 'ro', ms=10)
ax1.plot(fit_x, red_anisotropy(fit_x, red_p), 'r', lw=2)
ax1.set_xlabel('[dark D] um')
ax1.set_ylabel('Anisotropy')
ax1.set_title('Red K = %f' %(red_p))
ax1.set_ylim([0.15, 0.3])
ax2.plot(black_x, black_y, 'ko', ms=10)
ax2.plot(fit_x, black_anisotropy(fit_x, black_p), 'k', lw=2)
ax2.set_xlabel('[dark D] um')
ax2.set_ylabel('Anisotropy')
ax2.set_title('Black K = %f' %(black_p))
ax2.set_ylim([0.15, 0.3])
fig.savefig('plot_anisotropy.png')
plt.close(fig)
if __name__ == "__main__":
main()
| 32.802326 | 105 | 0.558313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 829 | 0.293867 |
addd60f66ac5c1f27cca6c73bae47a5f1d867f7c | 4,519 | py | Python | tensorflow/standard/reinforcement_learning/rl_on_gcp_demo/trainer/ddpg_agent.py | VanessaDo/cloudml-samples | ae6cd718e583944beef9d8a90db12091ac399432 | [
"Apache-2.0"
] | 1,552 | 2016-09-29T18:59:01.000Z | 2022-03-30T06:13:41.000Z | tensorflow/standard/reinforcement_learning/rl_on_gcp_demo/trainer/ddpg_agent.py | VanessaDo/cloudml-samples | ae6cd718e583944beef9d8a90db12091ac399432 | [
"Apache-2.0"
] | 385 | 2016-10-01T11:21:18.000Z | 2021-12-17T01:40:25.000Z | tensorflow/standard/reinforcement_learning/rl_on_gcp_demo/trainer/ddpg_agent.py | VanessaDo/cloudml-samples | ae6cd718e583944beef9d8a90db12091ac399432 | [
"Apache-2.0"
] | 983 | 2016-09-29T17:00:57.000Z | 2022-03-30T06:13:46.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a DDPG agent.
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
"""
import agent
from common import replay_buffer
from common.actor_critic import ActorNetwork
from common.actor_critic import CriticNetwork
import numpy as np
class DDPG(agent.Agent):
"""DDPG agent."""
def __init__(self, env, sess, config):
"""Initialize members."""
state_dim = env.observation_space.shape[0]
self.env = env
self.action_dim = env.action_space.shape[0]
self.action_high = env.action_space.high
self.action_low = env.action_space.low
self.batch_size = config.batch_size
self.warmup_size = config.warmup_size
self.gamma = config.gamma
self.sigma = config.sigma
self.noise_cap = config.c
self.actor = ActorNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
action_high=self.action_high,
action_low=self.action_low,
learning_rate=config.actor_lr,
grad_norm_clip=config.grad_norm_clip,
tau=config.tau,
batch_size=config.batch_size)
self.critic = CriticNetwork(sess=sess,
state_dim=state_dim,
action_dim=self.action_dim,
learning_rate=config.critic_lr,
tau=config.tau,
gamma=config.gamma)
self.replay_buffer = replay_buffer.ReplayBuffer(
buffer_size=config.buffer_size)
def random_action(self, observation):
"""Return a random action."""
return self.env.action_space.sample()
def action(self, observation):
"""Return an action according to the agent's policy."""
return self.actor.get_action(observation)
def action_with_noise(self, observation):
"""Return a noisy action."""
if self.replay_buffer.size > self.warmup_size:
action = self.action(observation)
else:
action = self.random_action(observation)
noise = np.clip(np.random.randn(self.action_dim) * self.sigma,
-self.noise_cap, self.noise_cap)
action_with_noise = action + noise
return (np.clip(action_with_noise, self.action_low, self.action_high),
action, noise)
def store_experience(self, s, a, r, t, s2):
"""Save experience to replay buffer."""
self.replay_buffer.add(s, a, r, t, s2)
def train(self, global_step):
"""Train the agent's policy for 1 iteration."""
if self.replay_buffer.size > self.warmup_size:
s0, a, r, t, s1 = self.replay_buffer.sample_batch(self.batch_size)
target_actions = self.actor.get_target_action(s1)
target_qval = self.get_target_qval(s1, target_actions)
t = t.astype(dtype=int)
y = r + self.gamma * target_qval * (1 - t)
self.critic.train(s0, a, y)
actions = self.actor.get_action(s0)
grads = self.critic.get_action_gradients(s0, actions)
self.actor.train(s0, grads[0])
self.update_targets()
def update_targets(self):
"""Update all target networks."""
self.actor.update_target_network()
self.critic.update_target_network()
def get_target_qval(self, observation, action):
"""Get target Q-val."""
return self.critic.get_target_qval(observation, action)
def get_qval(self, observation, action):
"""Get Q-val."""
return self.critic.get_qval(observation, action)
| 40.348214 | 78 | 0.611861 | 3,589 | 0.794202 | 0 | 0 | 0 | 0 | 0 | 0 | 1,068 | 0.236335 |
addded900080d658ae63eb6fa0ec1ba46ebc0d17 | 492 | py | Python | setup.py | my-old-projects/syspy | f870adfa6d2839fa8c8c8d3a6c3bdcfb0c863c1d | [
"MIT"
] | null | null | null | setup.py | my-old-projects/syspy | f870adfa6d2839fa8c8c8d3a6c3bdcfb0c863c1d | [
"MIT"
] | null | null | null | setup.py | my-old-projects/syspy | f870adfa6d2839fa8c8c8d3a6c3bdcfb0c863c1d | [
"MIT"
] | 1 | 2020-11-21T10:09:30.000Z | 2020-11-21T10:09:30.000Z | from distutils.core import setup
setup(
name = 'syspy',
version = '0.2',
url = 'https://github.com/aligoren/syspy',
download_url = 'https://github.com/aligoren/syspy/archive/master.zip',
author = 'Ali GOREN <goren.ali@yandex.com>',
author_email = 'goren.ali@yandex.com',
license = 'Apache v2.0 License',
packages = ['syspy'],
description = 'Windows System Informations',
keywords = ['sys', 'util', 'system', 'info', 'information', 'windows', 'os'],
)
| 32.8 | 81 | 0.636179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.538618 |
adde9423551c7dda2a8e059cbe593f3c2faacded | 1,638 | py | Python | DjangoECom/products/migrations/0003_auto_20210109_2256.py | MostafaSamyFayez/E-Commerce-Sys | 95ed3cb65b238866e336d43422dfb1737bfd7993 | [
"Unlicense"
] | 2 | 2021-04-01T00:23:44.000Z | 2021-04-01T00:23:48.000Z | DjangoECom/products/migrations/0003_auto_20210109_2256.py | MostafaSamyFayez/E-Commerce-Sys | 95ed3cb65b238866e336d43422dfb1737bfd7993 | [
"Unlicense"
] | null | null | null | DjangoECom/products/migrations/0003_auto_20210109_2256.py | MostafaSamyFayez/E-Commerce-Sys | 95ed3cb65b238866e336d43422dfb1737bfd7993 | [
"Unlicense"
] | 1 | 2021-01-23T13:06:25.000Z | 2021-01-23T13:06:25.000Z | # Generated by Django 3.1.4 on 2021-01-09 20:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0002_auto_20210102_1247'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='review',
),
migrations.AddField(
model_name='product',
name='total_review',
field=models.FloatField(default=0),
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(blank=True, max_length=250)),
('status', models.CharField(choices=[('True', 'True'), ('False', 'False'), ('New', 'New')], default='New', max_length=20)),
('subject', models.CharField(blank=True, max_length=50)),
('ip', models.CharField(blank=True, max_length=20)),
('rate', models.IntegerField(default=1)),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 39.95122 | 139 | 0.59707 | 1,479 | 0.90293 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.164835 |
ade08716a0728bd71f409c370ab1b6a0670febc4 | 1,354 | py | Python | setup.py | PiotrRadzinski/envemind | 7eb159e3a9f481b95e26df81595e5c8b7e8485c7 | [
"MIT"
] | null | null | null | setup.py | PiotrRadzinski/envemind | 7eb159e3a9f481b95e26df81595e5c8b7e8485c7 | [
"MIT"
] | null | null | null | setup.py | PiotrRadzinski/envemind | 7eb159e3a9f481b95e26df81595e5c8b7e8485c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from distutils.core import Extension
import pathlib
here = pathlib.Path(__file__).parent.resolve()
setup(
name='envemind',
version='0.0.1',
description='Prediction of monoisotopic mass in mass spectra',
# long_description=(here / 'README.md').read_text(encoding='utf-8'),
# long_description_content_type='text/markdown',
url='https://github.com/PiotrRadzinski/envemind',
author='Piotr Radziński, Michał Piotr Startek',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
keywords = 'Mass spectrometry monisotopic mass',
packages=find_packages(),
python_requires='>=3.6',
install_requires='numpy scipy IsoSpecPy pyteomics'.split(),
# entry_points={},
# scripts=[''],
)
| 36.594595 | 71 | 0.619645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 843 | 0.621681 |
ade1b9af12ca82c1c26ca0644c6d5b378d6445ab | 5,496 | py | Python | build.py | niklas2902/py4godot | bf50624d1fc94b55faf82a3a4d322e33fbba60ce | [
"MIT"
] | 2 | 2021-12-10T21:17:57.000Z | 2021-12-17T18:54:49.000Z | build.py | niklas2902/py4godot | bf50624d1fc94b55faf82a3a4d322e33fbba60ce | [
"MIT"
] | 9 | 2021-12-21T18:35:28.000Z | 2022-03-27T20:03:50.000Z | build.py | niklas2902/py4godot | bf50624d1fc94b55faf82a3a4d322e33fbba60ce | [
"MIT"
] | 1 | 2022-03-07T08:06:57.000Z | 2022-03-07T08:06:57.000Z | import argparse
import os
import subprocess
import time
from Cython.Build import cythonize
import generate_bindings
from meson_scripts import copy_tools, download_python, generate_init_files, \
locations, platform_check, generate_godot, \
download_godot
generate_bindings.build()
def cythonize_files():
module = cythonize('py4godot/core/*/*.pyx', language_level=3)
module += cythonize("py4godot/classes/*.pyx", language_level=3)
module += cythonize("py4godot/utils/*.pyx", language_level=3)
module += cythonize("py4godot/pluginscript_api/*.pyx", language_level=3)
module += cythonize("py4godot/pluginscript_api/*/*.pyx", language_level=3)
module += cythonize("py4godot/pluginscript_api/*/*/*.pyx", language_level=3)
module += cythonize("py4godot/pluginscript_api/*/*/*/*.pyx", language_level=3)
module += cythonize("py4godot/gdnative_api/*.pyx", language_level=3)
module += cythonize("py4godot/enums/*.pyx", language_level=3)
module += cythonize("py4godot/events/*.pyx", language_level=3)
def compile_python_ver_file(platform):
"""compile python file, to find the matching python version"""
python_dir = locations.get_python_dir(platform)
godot_dir = locations.get_godot_dir(platform)
with open("platforms/binary_dirs/python_ver_temp.cross", "r") as python_temp:
file_string = python_temp.read()
# Replacing things like in a template
file_string = file_string.replace("{python_ver}", python_dir)
file_string = file_string.replace("{godot}", godot_dir)
with open("platforms/binary_dirs/python_ver_compile.cross", "w") as python_compile:
python_compile.write(file_string)
def get_compiler():
compiler_res = subprocess.run("vcvarsall", shell=True, stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT)
if compiler_res.returncode == 0:
return "msvc"
compiler_res = subprocess.run("gcc --version", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if compiler_res.returncode == 0:
return "gcc"
raise Exception("No compiler found")
current_platform = platform_check.get_platform()
command_separator = "&"
if "linux" in current_platform:
command_separator = ";"
my_parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
my_parser.add_argument('--compiler',
help='specify the compiler, you want to use to compile')
my_parser.add_argument('--target_platform',
help='specify the platform, you want to go build for')
my_parser.add_argument("-run_tests", help="should tests be run", default="False")
my_parser.add_argument("-download_godot", help="should tests be run", default="False")
# Execute parse_args()
args = my_parser.parse_args()
# Determining if tests should be run
should_run_tests = args.run_tests.lower() == "true"
# Determining if godot binary should be downloaded
should_download_godot = args.download_godot.lower() == "true"
build_dir = f"build_meson/{args.target_platform}"
start = time.time()
if args.compiler is None:
print("Checking for compilers")
args.compiler = get_compiler()
print(f"Got compiler:{args.compiler}")
cythonize_files()
# loading the needed python files for the target platform
download_python.download_file(args.target_platform, allow_copy=True)
# downlaod needed python files for the current platform
download_python.download_file(current_platform, allow_copy=False)
compile_python_ver_file(current_platform)
# initializing for msvc if wanted as compiler (todo:should be improved sometime)
msvc_init = f"vcvarsall.bat {'x86_amd64'} {command_separator} cl {command_separator} " if "msvc" in args.compiler else ""
res = subprocess.Popen(msvc_init +
f"meson {build_dir} --cross-file platforms/{args.target_platform}.cross "
f"--cross-file platforms/compilers/{args.compiler}_compiler.native "
f"--cross-file platforms/binary_dirs/python_ver_compile.cross "
f"--buildtype=release {'--wipe' if os.path.isdir(build_dir) else ''}"
f"{command_separator} ninja -C build_meson/{args.target_platform}",
shell=True)
res.wait()
copy_tools.run(args.target_platform)
generate_init_files.create_init_file(args.target_platform)
copy_tools.copy_main(args.target_platform)
generate_godot.generate_lib(args.target_platform)
generate_godot.generate_gdignore()
print("=================================Build finished==================================")
print("Build took:", time.time() - start, "seconds")
if should_download_godot:
print("=================================Start download==================================")
download_godot.run(current_platform)
print("=================================Fnish download==================================")
# running tests
if should_run_tests:
print("=================================Start tests==================================")
start = time.time()
copy_tools.copy_tests(args.target_platform)
res = subprocess.Popen(
f"ninja -C build_meson/{args.target_platform} test", shell=True)
res.wait()
streamdata = res.communicate()[0]
rc = res.returncode
print("=================================Build finished==================================")
print("Running tests took:", time.time() - start, "seconds")
if rc != 0:
raise Exception("Tests failed")
| 41.014925 | 121 | 0.670852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,163 | 0.393559 |
ade3eb7e1347229abd0cf82b50e4a088e04ffa76 | 543 | py | Python | pacote-download/Python/modulo01/python01/Aula08.py | fabiosabariego/curso-python | a4ffff53ff9e92b5ef0de637e9bcce25f7feebd9 | [
"MIT"
] | null | null | null | pacote-download/Python/modulo01/python01/Aula08.py | fabiosabariego/curso-python | a4ffff53ff9e92b5ef0de637e9bcce25f7feebd9 | [
"MIT"
] | null | null | null | pacote-download/Python/modulo01/python01/Aula08.py | fabiosabariego/curso-python | a4ffff53ff9e92b5ef0de637e9bcce25f7feebd9 | [
"MIT"
] | null | null | null | # ------------------------------- UTILIZANDO MODULOS - AULA 08 -------------------------------
# BIBLIOTECA MATH
#from math import sqrt
#num = int(input('Digite um Numero: '))
#raiz = sqrt(num)
#print('O Valor da raiz de {} é: {:.2f}'.format(num, raiz))
# BIBLIOTECA RANDOM
#import random
# num = random.random() -> Esta é uma opção de gerar numeros randomicos, neste caso gera qualquer numero de 0 a 1.
#num = random.randint(1, 10)
#print(num)
import emoji
print(emoji.emojize("Olá, mundo! :purple_heart:", use_aliases=True))
| 20.884615 | 118 | 0.604052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 471 | 0.859489 |
ade4d82b609c594b7c7b2743267ac4e2efc9393b | 227 | py | Python | tests/conftest.py | skarzi/drf-exception-dispatcher | a950f7ce8ca33551b0860fb4c78ced53afd06163 | [
"MIT"
] | null | null | null | tests/conftest.py | skarzi/drf-exception-dispatcher | a950f7ce8ca33551b0860fb4c78ced53afd06163 | [
"MIT"
] | 70 | 2021-02-05T07:20:57.000Z | 2022-03-31T05:11:20.000Z | tests/conftest.py | skarzi/drf-exception-dispatcher | a950f7ce8ca33551b0860fb4c78ced53afd06163 | [
"MIT"
] | null | null | null | import os
import django
from django.conf import settings
def pytest_configure(config):
"""Configure Django."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
settings.configure()
django.setup()
| 17.461538 | 63 | 0.726872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.251101 |
ade4fdb3bcfaf121e8cc409f04834032214ff47c | 763 | py | Python | Python/p1.py | Nivedya-27/Autumn-of-Automation | 2f645b58d035d6277f7ee0ff77814be812815f6d | [
"MIT"
] | null | null | null | Python/p1.py | Nivedya-27/Autumn-of-Automation | 2f645b58d035d6277f7ee0ff77814be812815f6d | [
"MIT"
] | null | null | null | Python/p1.py | Nivedya-27/Autumn-of-Automation | 2f645b58d035d6277f7ee0ff77814be812815f6d | [
"MIT"
] | null | null | null | d=int(input("enter d"))
n=''
max=''
for i in range(d):
if i==0:
n=n+str(1)
else :
n=n+str(0)
max=max+str(9)
n=int(n)+1 #smallest odd no. with d digits if d>1 or 2 if d==1
max=int(max) #largest no. with d digits
def check_prime(m_odd): #returns truth value of an odd no. or of 2 being prime
if m_odd==2:return True
i=3
while m_odd%i!=0 and i<m_odd:
i=i+2
return i==m_odd
l=[] #list of prime no.s of d digits
while n<=max:
if check_prime(n):
l.append(n)
if n==2:
n=n+1
continue
if n>2:
n=n+2
print(l)
d=[] #list of tuples with consecutive difference 2
for i in range(len(l)-1):
if (l[i+1]-l[i]==2):
d.append((l[i],l[i+1]))
f=open('myFirstFile.txt','w')
for i in range(len(d)):
f.write(str(d[i][0])+' '+str(d[i][1])+"\n")
f.close()
| 20.621622 | 78 | 0.609436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.323722 |
ade56836ea955efdb495cba170a57ad630ee81e1 | 24,951 | py | Python | tests/test_03_main_balancer.py | VolumeFi/exchange-add | 0021a55557cbce43f31b21078a44f62fb14e0d56 | [
"Apache-2.0"
] | 5 | 2021-03-23T21:09:06.000Z | 2021-07-06T19:29:38.000Z | tests/test_03_main_balancer.py | VolumeFi/exchange-add | 0021a55557cbce43f31b21078a44f62fb14e0d56 | [
"Apache-2.0"
] | 4 | 2021-02-08T17:08:06.000Z | 2021-02-24T04:59:50.000Z | tests/test_03_main_balancer.py | VolumeFi/exchange-liquidity | 0021a55557cbce43f31b21078a44f62fb14e0d56 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import pytest
from brownie.test import strategy
from hypothesis import HealthCheck
class StateMachine:
coin = strategy('uint16', max_value=6)
pool = strategy('uint16', max_value=4)
valueEth = strategy('uint256', min_value=9 * 10 ** 17, max_value=11 * 10 ** 17)
valueUSD6 = strategy('uint256', min_value=900 * 10 ** 6, max_value=1100 * 10 ** 6)
valueUSD18 = strategy('uint256', min_value=900 * 10 ** 18, max_value=1100 * 10 ** 18)
valueBTC = strategy('uint256', min_value=9 * 10 ** 6, max_value=11 * 10 ** 6)
def __init__(self, MyBalancerExchangeAdd, MyBalancerExchangeRemove, UniswapV2Router02, DAI, USDC, USDT, WETH, WBTC, accounts, Contract):
self.coins = [
"0x0000000000000000000000000000000000000000",
"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE",
WETH,
USDC,
DAI,
USDT,
WBTC
]
self.MyBalancerExchangeAdd = MyBalancerExchangeAdd
self.MyBalancerExchangeRemove = MyBalancerExchangeRemove
self.accounts = accounts
self.Contract = Contract
self.WETH = WETH
self.USDT = USDT
self.USDC = USDC
UniswapV2Router02.swapETHForExactTokens(5000 * 10 ** 6, [WETH, USDC], accounts[0], 2 ** 256 - 1, {"from":accounts[0], "value": 10 * 10 ** 18})
UniswapV2Router02.swapETHForExactTokens(5000 * 10 ** 6, [WETH, USDT], accounts[0], 2 ** 256 - 1, {"from":accounts[0], "value": 10 * 10 ** 18})
UniswapV2Router02.swapETHForExactTokens(5000 * 10 ** 18, [WETH, DAI], accounts[0], 2 ** 256 - 1, {"from":accounts[0], "value": 10 * 10 ** 18})
WETH.deposit({"from":accounts[0], "value": 5 * 10 ** 18})
UniswapV2Router02.swapETHForExactTokens(5 * 10 ** 7, [WETH, WBTC], accounts[0], 2 ** 256 - 1, {"from":accounts[0], "value": 50 * 10 ** 18})
def rule_any_pair_test(self, coin, pool, valueEth, valueUSD6, valueUSD18, valueBTC):
values = [
valueEth,
valueEth,
valueEth,
valueUSD6,
valueUSD18,
valueUSD6,
valueBTC
]
pools = [
"0x1efF8aF5D577060BA4ac8A29A13525bb0Ee2A3D5", # WETH/WBTC 50/50
"0x59A19D8c652FA0284f44113D0ff9aBa70bd46fB4", # BAL/ETH 80/20
"0x8b6e6E7B5b3801FEd2CaFD4b22b8A16c2F2Db21a", # DAI/ETH 20/80
"0xE3f9cF7D44488715361581DD8B3a15379953eB4C", # SNX/ETH/xSNXa 25/25/50
"0x5B2dC8c02728e8FB6aeA03a622c3849875A48801" # wPE/GIFT/IMPACT/YFU/PIXEL/NFTS/LIFT/STR 86/2/2/2/2/2/2/2
]
accounts = self.accounts
coins = self.coins
Contract = self.Contract
MyBalancerExchangeAdd = self.MyBalancerExchangeAdd
MyBalancerExchangeRemove = self.MyBalancerExchangeRemove
print("Test coin " + str(coin))
print("Test pool " + str(pool))
pair = Contract.from_abi("BPT", pools[pool], [{"inputs":[],"payable":False,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"src","type":"address"},{"indexed":True,"internalType":"address","name":"dst","type":"address"},{"indexed":False,"internalType":"uint256","name":"amt","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":True,"inputs":[{"indexed":True,"internalType":"bytes4","name":"sig","type":"bytes4"},{"indexed":True,"internalType":"address","name":"caller","type":"address"},{"indexed":False,"internalType":"bytes","name":"data","type":"bytes"}],"name":"LOG_CALL","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"caller","type":"address"},{"indexed":True,"internalType":"address","name":"tokenOut","type":"address"},{"indexed":False,"internalType":"uint256","name":"tokenAmountOut","type":"uint256"}],"name":"LOG_EXIT","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"caller","type":"address"},{"indexed":True,"internalType":"address","name":"tokenIn","type":"address"},{"indexed":False,"internalType":"uint256","name":"tokenAmountIn","type":"uint256"}],"name":"LOG_JOIN","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"caller","type":"address"},{"indexed":True,"internalType":"address","name":"tokenIn","type":"address"},{"indexed":True,"internalType":"address","name":"tokenOut","type":"address"},{"indexed":False,"internalType":"uint256","name":"tokenAmountIn","type":"uint256"},{"indexed":False,"internalType":"uint256","name":"tokenAmountOut","type":"uint256"}],"name":"LOG_SWAP","type":"event"},{"anonymous":False,"inputs":[{"indexed":True,"internalType":"address","name":"src","type":"address"},{"indexed":True,"internalType":"address","name":"dst","type":"address"},{"indexed":False,"internalType":"uint256","name":"amt","type":"uint256"}],"name":"Transfer","type":"event"},{"constant":True,"inputs":[],"name":"BONE","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"BPOW_PRECISION","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"EXIT_FEE","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"INIT_POOL_SUPPLY","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MAX_BOUND_TOKENS","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MAX_BPOW_BASE","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MAX_FEE","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MAX_IN_RATIO","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MAX_OUT_RATIO","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MAX_TOTAL_WEIGHT","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MAX_WEIGHT","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MIN_BALANCE","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MIN_BOUND_TOKENS","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MIN_BPOW_BASE","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MIN_FEE","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"MIN_WEIGHT","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"internalType":"address","name":"src","type":"address"},{"internalType":"address","name":"dst","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"dst","type":"address"},{"internalType":"uint256","name":"amt","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[{"internalType":"address","name":"whom","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"balance","type":"uint256"},{"internalType":"uint256","name":"denorm","type":"uint256"}],"name":"bind","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[{"internalType":"uint256","name":"tokenBalanceIn","type":"uint256"},{"internalType":"uint256","name":"tokenWeightIn","type":"uint256"},{"internalType":"uint256","name":"tokenBalanceOut","type":"uint256"},{"internalType":"uint256","name":"tokenWeightOut","type":"uint256"},{"internalType":"uint256","name":"tokenAmountOut","type":"uint256"},{"internalType":"uint256","name":"swapFee","type":"uint256"}],"name":"calcInGivenOut","outputs":[{"internalType":"uint256","name":"tokenAmountIn","type":"uint256"}],"payable":False,"stateMutability":"pure","type":"function"},{"constant":True,"inputs":[{"internalType":"uint256","name":"tokenBalanceIn","type":"uint256"},{"internalType":"uint256","name":"tokenWeightIn","type":"uint256"},{"internalType":"uint256","name":"tokenBalanceOut","type":"uint256"},{"internalType":"uint256","name":"tokenWeightOut","type":"uint256"},{"internalType":"uint256","name":"tokenAmountIn","type":"uint256"},{"internalType":"uint256","name":"swapFee","type":"uint256"}],"name":"calcOutGivenIn","outputs":[{"internalType":"uint256","name":"tokenAmountOut","type":"uint256"}],"payable":False,"stateMutability":"pure","type":"function"},{"constant":True,"inputs":[{"internalType":"uint256","name":"tokenBalanceOut","type":"uint256"},{"internalType":"uint256","name":"tokenWeightOut","type":"uint256"},{"internalType":"uint256","name":"poolSupply","type":"uint256"},{"internalType":"uint256","name":"totalWeight","type":"uint256"},{"internalType":"uint256","name":"tokenAmountOut","type":"uint256"},{"internalType":"uint256","name":"swapFee","type":"uint256"}],"name":"calcPoolInGivenSingleOut","outputs":[{"internalType":"uint256","name":"poolAmountIn","type":"uint256"}],"payable":False,"stateMutability":"pure","type":"function"},{"constant":True,"inputs":[{"internalType":"uint256","name":"tokenBalanceIn","type":"uint256"},{"internalType":"uint256","name":"tokenWeightIn","type":"uint256"},{"internalType":"uint256","name":"poolSupply","type":"uint256"},{"internalType":"uint256","name":"totalWeight","type":"uint256"},{"internalType":"uint256","name":"tokenAmountIn","type":"uint256"},{"internalType":"uint256","name":"swapFee","type":"uint256"}],"name":"calcPoolOutGivenSingleIn","outputs":[{"internalType":"uint256","name":"poolAmountOut","type":"uint256"}],"payable":False,"stateMutability":"pure","type":"function"},{"constant":True,"inputs":[{"internalType":"uint256","name":"tokenBalanceIn","type":"uint256"},{"internalType":"uint256","name":"tokenWeightIn","type":"uint256"},{"internalType":"uint256","name":"poolSupply","type":"uint256"},{"internalType":"uint256","name":"totalWeight","type":"uint256"},{"internalType":"uint256","name":"poolAmountOut","type":"uint256"},{"internalType":"uint256","name":"swapFee","type":"uint256"}],"name":"calcSingleInGivenPoolOut","outputs":[{"internalType":"uint256","name":"tokenAmountIn","type":"uint256"}],"payable":False,"stateMutability":"pure","type":"function"},{"constant":True,"inputs":[{"internalType":"uint256","name":"tokenBalanceOut","type":"uint256"},{"internalType":"uint256","name":"tokenWeightOut","type":"uint256"},{"internalType":"uint256","name":"poolSupply","type":"uint256"},{"internalType":"uint256","name":"totalWeight","type":"uint256"},{"internalType":"uint256","name":"poolAmountIn","type":"uint256"},{"internalType":"uint256","name":"swapFee","type":"uint256"}],"name":"calcSingleOutGivenPoolIn","outputs":[{"internalType":"uint256","name":"tokenAmountOut","type":"uint256"}],"payable":False,"stateMutability":"pure","type":"function"},{"constant":True,"inputs":[{"internalType":"uint256","name":"tokenBalanceIn","type":"uint256"},{"internalType":"uint256","name":"tokenWeightIn","type":"uint256"},{"internalType":"uint256","name":"tokenBalanceOut","type":"uint256"},{"internalType":"uint256","name":"tokenWeightOut","type":"uint256"},{"internalType":"uint256","name":"swapFee","type":"uint256"}],"name":"calcSpotPrice","outputs":[{"internalType":"uint256","name":"spotPrice","type":"uint256"}],"payable":False,"stateMutability":"pure","type":"function"},{"constant":True,"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"dst","type":"address"},{"internalType":"uint256","name":"amt","type":"uint256"}],"name":"decreaseApproval","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"uint256","name":"poolAmountIn","type":"uint256"},{"internalType":"uint256[]","name":"minAmountsOut","type":"uint256[]"}],"name":"exitPool","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint256","name":"tokenAmountOut","type":"uint256"},{"internalType":"uint256","name":"maxPoolAmountIn","type":"uint256"}],"name":"exitswapExternAmountOut","outputs":[{"internalType":"uint256","name":"poolAmountIn","type":"uint256"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint256","name":"poolAmountIn","type":"uint256"},{"internalType":"uint256","name":"minAmountOut","type":"uint256"}],"name":"exitswapPoolAmountIn","outputs":[{"internalType":"uint256","name":"tokenAmountOut","type":"uint256"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[],"name":"finalize","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[{"internalType":"address","name":"token","type":"address"}],"name":"getBalance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"getColor","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"getController","outputs":[{"internalType":"address","name":"","type":"address"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"getCurrentTokens","outputs":[{"internalType":"address[]","name":"tokens","type":"address[]"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"internalType":"address","name":"token","type":"address"}],"name":"getDenormalizedWeight","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"getFinalTokens","outputs":[{"internalType":"address[]","name":"tokens","type":"address[]"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"internalType":"address","name":"token","type":"address"}],"name":"getNormalizedWeight","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"getNumTokens","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"}],"name":"getSpotPrice","outputs":[{"internalType":"uint256","name":"spotPrice","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"address","name":"tokenOut","type":"address"}],"name":"getSpotPriceSansFee","outputs":[{"internalType":"uint256","name":"spotPrice","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"getSwapFee","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"getTotalDenormalizedWeight","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"token","type":"address"}],"name":"gulp","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"dst","type":"address"},{"internalType":"uint256","name":"amt","type":"uint256"}],"name":"increaseApproval","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[{"internalType":"address","name":"t","type":"address"}],"name":"isBound","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"isFinalized","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"isPublicSwap","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"internalType":"uint256","name":"poolAmountOut","type":"uint256"},{"internalType":"uint256[]","name":"maxAmountsIn","type":"uint256[]"}],"name":"joinPool","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"uint256","name":"tokenAmountIn","type":"uint256"},{"internalType":"uint256","name":"minPoolAmountOut","type":"uint256"}],"name":"joinswapExternAmountIn","outputs":[{"internalType":"uint256","name":"poolAmountOut","type":"uint256"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"uint256","name":"poolAmountOut","type":"uint256"},{"internalType":"uint256","name":"maxAmountIn","type":"uint256"}],"name":"joinswapPoolAmountOut","outputs":[{"internalType":"uint256","name":"tokenAmountIn","type":"uint256"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"token","type":"address"},{"internalType":"uint256","name":"balance","type":"uint256"},{"internalType":"uint256","name":"denorm","type":"uint256"}],"name":"rebind","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"manager","type":"address"}],"name":"setController","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"bool","name":"public_","type":"bool"}],"name":"setPublicSwap","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"uint256","name":"swapFee","type":"uint256"}],"name":"setSwapFee","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"uint256","name":"tokenAmountIn","type":"uint256"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint256","name":"minAmountOut","type":"uint256"},{"internalType":"uint256","name":"maxPrice","type":"uint256"}],"name":"swapExactAmountIn","outputs":[{"internalType":"uint256","name":"tokenAmountOut","type":"uint256"},{"internalType":"uint256","name":"spotPriceAfter","type":"uint256"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"tokenIn","type":"address"},{"internalType":"uint256","name":"maxAmountIn","type":"uint256"},{"internalType":"address","name":"tokenOut","type":"address"},{"internalType":"uint256","name":"tokenAmountOut","type":"uint256"},{"internalType":"uint256","name":"maxPrice","type":"uint256"}],"name":"swapExactAmountOut","outputs":[{"internalType":"uint256","name":"tokenAmountIn","type":"uint256"},{"internalType":"uint256","name":"spotPriceAfter","type":"uint256"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":True,"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":True,"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":False,"stateMutability":"view","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"dst","type":"address"},{"internalType":"uint256","name":"amt","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"src","type":"address"},{"internalType":"address","name":"dst","type":"address"},{"internalType":"uint256","name":"amt","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":False,"stateMutability":"nonpayable","type":"function"},{"constant":False,"inputs":[{"internalType":"address","name":"token","type":"address"}],"name":"unbind","outputs":[],"payable":False,"stateMutability":"nonpayable","type":"function"}])
print("-----Before----")
print(pair.balanceOf(accounts[0]))
if coin < 2:
print(accounts[0].balance())
MyBalancerExchangeAdd.investTokenForBalancerPoolToken(coins[coin], pair, values[coin], 1, {"from":accounts[0], "value": values[coin] + 5 * 10 ** 15})
print("-----Invest-----")
print(pair.balanceOf(accounts[0]))
print(accounts[0].balance())
bal = pair.balanceOf(accounts[0])
pair.approve(MyBalancerExchangeRemove, bal, {"from": accounts[0]})
MyBalancerExchangeRemove.divestBalancerPoolTokenToToken(coins[coin], pair, bal, 1, 2 ** 256 - 1, {"from": accounts[0], "value": 5 * 10 ** 15})
print("-----Divest-----")
print(pair.balanceOf(accounts[0]))
print(accounts[0].balance())
else:
print(coins[coin].balanceOf(accounts[0]))
if coins[coin].allowance(accounts[0], MyBalancerExchangeAdd) == 0:
coins[coin].approve(MyBalancerExchangeAdd, 2 ** 256 - 1, {"from": accounts[0]})
print("-----Invest(Eth)-----")
MyBalancerExchangeAdd.investTokenForBalancerPoolToken(coins[coin], pair, values[coin], 1, {"from":accounts[0], "value": 5 * 10 ** 15})
print(pair.balanceOf(accounts[0]))
print(coins[coin].balanceOf(accounts[0]))
bal = pair.balanceOf(accounts[0])
pair.approve(MyBalancerExchangeRemove, bal, {"from": accounts[0]})
MyBalancerExchangeRemove.divestBalancerPoolTokenToToken(coins[coin], pair, bal, 1, 2 ** 256 - 1, {"from": accounts[0], "value": 5 * 10 ** 15})
print("-----Divest(Eth)-----")
print(pair.balanceOf(accounts[0]))
print(coins[coin].balanceOf(accounts[0]))
print("====================")
def test_main(MyBalancerExchangeAdd, MyBalancerExchangeRemove, UniswapV2Router02, DAI, USDC, USDT, WETH, WBTC, accounts, Contract, state_machine):
settings = {"suppress_health_check": HealthCheck.all(), "max_examples": 20}
state_machine(StateMachine, MyBalancerExchangeAdd, MyBalancerExchangeRemove, UniswapV2Router02, DAI, USDC, USDT, WETH, WBTC, accounts, Contract, settings=settings)
| 252.030303 | 19,798 | 0.665945 | 24,450 | 0.979921 | 0 | 0 | 0 | 0 | 0 | 0 | 17,101 | 0.685383 |
ade5713cc5ef83db5a1136d2a28d6781e4caec62 | 6,285 | py | Python | cellxgene_schema_cli/scripts/ontology_processing.py | chanzuckerberg/single-cell-curation | 7ea0aae3b3d8c75d9717b34374a8d10e222d71ce | [
"MIT"
] | 8 | 2021-03-17T23:42:41.000Z | 2022-03-08T13:08:55.000Z | cellxgene_schema_cli/scripts/ontology_processing.py | chanzuckerberg/single-cell-curation | 7ea0aae3b3d8c75d9717b34374a8d10e222d71ce | [
"MIT"
] | 156 | 2021-02-23T18:17:42.000Z | 2022-03-31T20:49:46.000Z | cellxgene_schema_cli/scripts/ontology_processing.py | chanzuckerberg/single-cell-curation | 7ea0aae3b3d8c75d9717b34374a8d10e222d71ce | [
"MIT"
] | 8 | 2021-03-22T17:07:31.000Z | 2022-03-08T11:07:48.000Z | import owlready2
import yaml
import urllib.request
import os
import gzip
import json
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../cellxgene_schema"))
import env
from typing import List
import os
def _download_owls(
owl_info_yml: str = env.OWL_INFO_YAML, output_dir: str = env.ONTOLOGY_DIR
):
"""
Downloads the ontology owl files specified in 'owl_info_yml' into 'output_dir'
:param str owl_info_yml: path to yaml file wit OWL information
:param str output_dir: path to writable directory where owl files will be downloaded to
:rtype None
"""
with open(owl_info_yml, "r") as owl_info_handle:
owl_info = yaml.safe_load(owl_info_handle)
for ontology, info in owl_info.items():
print(f"Downloading {ontology}")
# Get owl info
latest_version = owl_info[ontology]["latest"]
url = owl_info[ontology]["urls"][latest_version]
# Format of owl (handles cases where they are compressed)
download_format = url.split(".")[-1]
output_file = os.path.join(output_dir, ontology + ".owl")
if download_format == "gz":
urllib.request.urlretrieve(url, output_file + ".gz")
_decompress(output_file + ".gz", output_file)
os.remove(output_file + ".gz")
else:
urllib.request.urlretrieve(url, output_file)
def _decompress(infile: str, tofile: str):
"""
Decompresses a gziped file
:param str infile: path gziped file
:param str tofile: path to output decompressed file
:rtype None
"""
with open(infile, "rb") as inf, open(tofile, "w", encoding="utf8") as tof:
decom_str = gzip.decompress(inf.read()).decode("utf-8")
tof.write(decom_str)
def _parse_owls(
working_dir: str = env.ONTOLOGY_DIR,
owl_info_yml: str = env.OWL_INFO_YAML,
output_json_file: str = env.PARSED_ONTOLOGIES_FILE,
):
"""
Parser all owl files in working_dir. Extracts information from all classes in the owl file.
The extracted information is written into a gzipped a json file with the following structure:
{
"ontology_name":
{
"term_id": {
"label": "..."
"deprecated": True
"ancestors": [
"ancestor1_term_id_1",
"ancestor2_term_id_2"
]
}
}
"term_id2": {
...
}
...
}
}
:param str working_dir: path to folder with owl files
:param str owl_info_yml: path to writable directory where owl files will be downloaded to
:param str owl_info_yml: path to yaml file wit owl information
:param str output_json_file: path to output jsaon file
:rtype None
"""
with open(owl_info_yml, "r") as owl_info_handle:
owl_info = yaml.safe_load(owl_info_handle)
owl_files = []
for owl_file in os.listdir(working_dir):
if owl_file.endswith(".owl"):
owl_files.append(os.path.join(working_dir, owl_file))
# Parse owl files
onto_dict = {}
for owl_file in owl_files:
world = owlready2.World()
onto = world.get_ontology(owl_file)
onto.load()
onto_dict[onto.name] = {}
print(f"Processing {onto.name}")
for onto_class in onto.classes():
term_id = onto_class.name.replace("_", ":")
# Skip terms that are not direct children from this ontology
if not onto.name == term_id.split(":")[0]:
continue
# If there are specified target terms then only work with them
if onto.name in owl_info:
if "only" in owl_info[onto.name]:
if term_id not in owl_info[onto.name]["only"]:
continue
# Gets label
onto_dict[onto.name][term_id] = dict()
try:
onto_dict[onto.name][term_id]["label"] = onto_class.label[0]
except IndexError:
onto_dict[onto.name][term_id]["label"] = ""
# Add the "deprecated" status
onto_dict[onto.name][term_id]["deprecated"] = False
if onto_class.deprecated:
if onto_class.deprecated.first():
onto_dict[onto.name][term_id]["deprecated"] = True
# Gets ancestors
ancestors = _get_ancestors(onto_class, onto.name)
# If "children_of" specified in owl info then skip the current term if it is
# not a children of those indicated.
if onto.name in owl_info:
if "children_of" in owl_info[onto.name]:
if not list(set(ancestors) &
set(owl_info[onto.name]["children_of"])):
onto_dict[onto.name].pop(term_id)
continue
# only add the ancestors if it's not NCBITaxon, as this saves a lot of disk space
if onto.name == "NCBITaxon":
onto_dict[onto.name][term_id]["ancestors"] = []
else:
onto_dict[onto.name][term_id]["ancestors"] = ancestors
with gzip.open(output_json_file, "wt") as output_json:
json.dump(onto_dict, output_json, indent=2)
def _get_ancestors(onto_class: owlready2.entity.ThingClass, ontololgy_name: str) -> List[str]:
"""
Returns a list of ancestors ids of the given onto class, only returns those belonging to ontology_name,
it will format the id from the form CL_xxxx to CL:xxxx
:param owlready2.entity.ThingClass onto_class: the class for which ancestors will be retrieved
:param str ontololgy_name: only ancestors from this ontology will be kept
:rtype List[str]
:return list of ancestors (term ids), it could be empty
"""
ancestors = []
for ancestor in onto_class.ancestors():
if onto_class.name == ancestor.name:
continue
if ancestor.name.split("_")[0] == ontololgy_name:
ancestors.append(ancestor.name.replace("_", ":"))
return ancestors
# Download and parse owls upon execution
if __name__ == "__main__":
_download_owls()
_parse_owls()
| 31.742424 | 107 | 0.604455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,558 | 0.407001 |
ade5f5e2cfd5b36a0ae20ab962012600912b9fde | 1,337 | py | Python | backoffice/web/companies/serializers.py | uktrade/trade-access-program | 8fb565e96de7d7bb0bde31255aef0f291063e93c | [
"MIT"
] | 1 | 2021-03-04T15:24:12.000Z | 2021-03-04T15:24:12.000Z | backoffice/web/companies/serializers.py | uktrade/trade-access-program | 8fb565e96de7d7bb0bde31255aef0f291063e93c | [
"MIT"
] | 7 | 2020-08-24T13:27:02.000Z | 2021-06-09T18:42:31.000Z | backoffice/web/companies/serializers.py | uktrade/trade-access-program | 8fb565e96de7d7bb0bde31255aef0f291063e93c | [
"MIT"
] | 1 | 2021-05-20T07:40:00.000Z | 2021-05-20T07:40:00.000Z | from rest_framework import serializers
from web.companies.models import Company, DnbGetCompanyResponse
class DnbGetCompanyResponseSerializer(serializers.ModelSerializer):
class Meta:
model = DnbGetCompanyResponse
fields = ['id', 'company', 'dnb_data', 'registration_number', 'company_address']
class CompanyReadSerializer(serializers.ModelSerializer):
dnb_get_company_responses = DnbGetCompanyResponseSerializer(many=True)
class Meta:
model = Company
fields = '__all__'
class CompanyWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Company
fields = '__all__'
class SearchCompaniesSerializer(serializers.Serializer):
search_term = serializers.CharField(min_length=2, max_length=60, required=False)
primary_name = serializers.CharField(min_length=2, max_length=60, required=False)
registration_numbers = serializers.ListField(
child=serializers.CharField(min_length=1, max_length=60), min_length=1, required=False
)
duns_number = serializers.CharField(required=False)
def validate(self, attrs):
attrs = super().validate(attrs)
if not any(field in attrs for field in self.fields):
raise serializers.ValidationError(f"One of: {', '.join(self.fields)} is required.")
return attrs
| 32.609756 | 95 | 0.732236 | 1,221 | 0.913239 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.094989 |
ade713c52195e4d636a5f8f37736575316cae117 | 5,648 | py | Python | src/4/lr_got.py | marmor97/cds-language-exam | d6f1aa543ba3f78d1a9f34c67a687e4f0944a665 | [
"MIT"
] | null | null | null | src/4/lr_got.py | marmor97/cds-language-exam | d6f1aa543ba3f78d1a9f34c67a687e4f0944a665 | [
"MIT"
] | null | null | null | src/4/lr_got.py | marmor97/cds-language-exam | d6f1aa543ba3f78d1a9f34c67a687e4f0944a665 | [
"MIT"
] | null | null | null | # importing modules and packages
# system tools
import os
import sys
import argparse
sys.path.append(os.path.join("..", ".."))
from contextlib import redirect_stdout
# pandas, numpy, gensim
import pandas as pd
import numpy as np
import gensim.downloader
# import my classifier utility functions - see the Github repo!
import utils.classifier_utils as clf
# Machine learning stuff
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import ShuffleSplit
from sklearn import metrics
# matplotlib
import matplotlib.pyplot as plt
class lr_classifier():
def __init__(self, args):
self.args = args
self.data = pd.read_csv(self.args["filename"])
def preprocessing(self):
'''
The preprocessing function performs various transformations to the data
1. Data is balanced to have an equal amount of label classes
2. Data is split into x and y
3. Data is further split into train and test values
4. X features are vectorized
'''
print("[INFO] Preprocessing Game of Thrones data...")
# I'm interested in seeing how many sentences there are from each season
n_sentences = []
for val in set(self.data['Season']):
length = len(self.data['Sentence'].loc[self.data['Season'] == val])
n_sentences.append(length) # I can see there is a different amount of sentences in each season, which might affect the classification. So I am saving all lengths and choose the minimum as n in the balance function to have a distribution that isn't skewed
# Balancing data to not bias classifier
balanced_data = clf.balance(self.data, label = "Season", n=min(n_sentences))
# Splitting up to x features and y from the balanced data
x = balanced_data['Sentence'].values
y = np.array(balanced_data['Season'].str.extract('(\d+)')).ravel()# Extracting only numbers to have cleaner output - ravel to make it a row-vector instead of column vector
self.y = [int(numeric_string) for numeric_string in y] # Integer
# Splitting into train and test sets
# I am only attributing "self" to y because these are finished being preprocessed. self.X features are defined in vectorization
X_train, X_test, self.y_train, self.y_test = train_test_split(x, # Creating two lists - sentences is an array
self.y, # Labels
test_size=0.25,
random_state=42,
stratify=self.y) # This should keep an equal amount of labels in each set - keeps the original distribution, which is equal.
# .fit_transform(X) = learn feature names + .transform(X)
# Vectorization
print("[INFO] Vectorizing text...")
vectorizer = CountVectorizer()
# Fitting the vectorizer to our data
# Transform to traning featues
self.X_train_feats = vectorizer.fit_transform(X_train)
#... then we do it for our test data
self.X_test_feats = vectorizer.transform(X_test)
# Create a list of the feature names.
feature_names = vectorizer.get_feature_names()
# Vectorize full dataset
self.X_vect = vectorizer.fit_transform(x)
def model(self):
'''
Function that fit a Logistic Regression to countvectorized X and y features and generates predictions
'''
print("[INFO] Defining logistic regression model...")
# Basic logistic regression
classifier = LogisticRegression(random_state=42).fit(self.X_train_feats, self.y_train)
self.y_pred = classifier.predict(self.X_test_feats)
def evaluation(self):
'''
Evaluation function that saves classification report and learning curve in defined paths. The learning curve is made from a 10-fold cross-validation of the the entired dataset
'''
print("[INFO] Evaluating logistic regression model...")
# Evaluation
classifier_metrics = pd.DataFrame(metrics.classification_report(self.y_test,
self.y_pred,
output_dict = True))
print(classifier_metrics)
classifier_metrics.to_csv(os.path.join(self.args['outpath'], "lr_classification_report.csv"))
def main():
# Argparse
ap = argparse.ArgumentParser(description="[INFO] LR classifier arguments")
ap.add_argument("-f",
"--filename",
required=False,
type=str,
default= os.path.join("..","..", "data", "4", "Game_of_Thrones_Script.csv"),
help="str, file name and location")
ap.add_argument("-o",
"--outpath",
required=False,
type=str,
default= os.path.join("..","..", "out","4"),
help="str, output location")
args = vars(ap.parse_args())
# Define class
lr_classifier_got = lr_classifier(args)
lr_classifier_got.preprocessing()
lr_classifier_got.model()
lr_classifier_got.evaluation()
if __name__=="__main__":
main()
| 39.774648 | 266 | 0.611898 | 4,055 | 0.717953 | 0 | 0 | 0 | 0 | 0 | 0 | 2,398 | 0.424575 |
ade8f00c2e2f3654ec06ca9013834fd8116c8f47 | 1,743 | py | Python | fight_tracker/arithmetic.py | jm-begon/fight_tracker | 82dc6bbb9e24015e1dc3edbd05ce319b3c88626d | [
"BSD-3-Clause"
] | null | null | null | fight_tracker/arithmetic.py | jm-begon/fight_tracker | 82dc6bbb9e24015e1dc3edbd05ce319b3c88626d | [
"BSD-3-Clause"
] | null | null | null | fight_tracker/arithmetic.py | jm-begon/fight_tracker | 82dc6bbb9e24015e1dc3edbd05ce319b3c88626d | [
"BSD-3-Clause"
] | null | null | null | class Boolable:
def __bool__(self):
return False
class DescriptiveTrue(Boolable):
def __init__(self, description):
self.description = description
def __bool__(self):
return True
def __str__(self):
return f"{self.description}"
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.description)})"
class Intable:
def __int__(self):
return 0
def __add__(self, other):
return Addition(self, other)
def __repr__(self):
return f"{self.__class__.__name__}()"
class DescriptiveInt(Intable):
def __init__(self, value, description):
self.value = value
self.description = description
def __int__(self):
return self.value
def __str__(self):
return f"{self.value} ({self.description})"
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.value)}, " \
f"{repr(self.description)})"
class Addition(Intable):
def __init__(self, *intables):
self.intables = list(intables)
def __int__(self):
return sum(int(i) for i in self.intables)
def __repr__(self):
return f"{self.__class__.__name__}(*{repr(self.intables)})"
def __str__(self):
return " + ".join(str(x) for x in self.intables)
class Subtraction(Intable):
def __init__(self, left_operand, right_operand):
self.left = left_operand
self.right = right_operand
def __int__(self):
return int(self.left) - int(self.right)
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.left)}, " \
f"{repr(self.right)})"
def __str__(self):
return f"{self.left} - {self.right}" | 23.554054 | 69 | 0.619048 | 1,728 | 0.991394 | 0 | 0 | 0 | 0 | 0 | 0 | 374 | 0.214573 |
adeb17cff886b521d293c0e2174cc4837bd14430 | 1,890 | py | Python | isoprene_pumpjack/helpers/services.py | tommilligan/isoprene-pumpjack | 175c7d963b4d3378a6ad434071cad1fb6d5b8a32 | [
"Apache-2.0"
] | null | null | null | isoprene_pumpjack/helpers/services.py | tommilligan/isoprene-pumpjack | 175c7d963b4d3378a6ad434071cad1fb6d5b8a32 | [
"Apache-2.0"
] | null | null | null | isoprene_pumpjack/helpers/services.py | tommilligan/isoprene-pumpjack | 175c7d963b4d3378a6ad434071cad1fb6d5b8a32 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
'''
Central execution points for non-python services
'''
import logging
from neo4j.v1 import GraphDatabase, basic_auth
import neo4j.bolt.connection
import elasticsearch.exceptions
from isoprene_pumpjack.constants.environment import environment
from isoprene_pumpjack.utils.neo_to_d3 import neo_to_d3
from isoprene_pumpjack.exceptions import IsopumpException
logger = logging.getLogger(__name__)
def execute_cypher(cypher_statement):
'''Configure and safely execute a cypher statement'''
logger.debug("Executing cypher statement")
try:
bolt_driver = GraphDatabase.driver(
environment["ISOPRENE_PUMPJACK_BOLT_URL"],
auth=basic_auth(
environment["ISOPRENE_PUMPJACK_BOLT_USER"],
environment["ISOPRENE_PUMPJACK_BOLT_PASSWORD"]
)
)
with bolt_driver.session() as session:
result = session.run(cypher_statement)
except neo4j.bolt.connection.ServiceUnavailable as e:
logger.error(e)
raise IsopumpException("Could not reach graph server", status_code=503, payload={
"message_original": e.message
})
return result
def execute_cypher_get_d3(cypher_statement, nodeLabels=[], linkLabels=[]):
'''In addition to safe execution, return the cypher query in d3 dict format'''
logger.debug("Executing cypher and returning as d3 dict")
result = execute_cypher(cypher_statement)
data = neo_to_d3(result, nodeLabels, linkLabels)
return data
def execute_search(elasticsearch_dsl_search_object):
'''Execute an elasticsearch-dsl object safely'''
try:
response = elasticsearch_dsl_search_object.execute()
except elasticsearch.exceptions.ConnectionError as e:
logger.error(e)
raise IsopumpException("Could not reach document server", status_code=503)
return response
| 34.363636 | 89 | 0.724868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 498 | 0.263492 |
aded4518a2f294727acdd53f5c2f5b29c9ac192e | 11,148 | py | Python | test/inprogress/test_ee2_api/test_EE2API.py | eapearson/kbase-skd-module-job-browser-bff | 426445f90569adac16632ef4921f174e51abd42f | [
"MIT"
] | null | null | null | test/inprogress/test_ee2_api/test_EE2API.py | eapearson/kbase-skd-module-job-browser-bff | 426445f90569adac16632ef4921f174e51abd42f | [
"MIT"
] | 6 | 2020-05-26T17:40:07.000Z | 2022-03-11T16:33:11.000Z | test/inprogress/test_ee2_api/test_EE2API.py | eapearson/kbase-skd-module-job-browser-bff | 426445f90569adac16632ef4921f174e51abd42f | [
"MIT"
] | 1 | 2020-05-26T17:12:59.000Z | 2020-05-26T17:12:59.000Z | # -*- coding: utf-8 -*-
from JobBrowserBFF.TestBase import TestBase
from JobBrowserBFF.model.EE2Api import EE2Api
from JobBrowserBFF.schemas.Schema import Schema
ENV = 'ci'
USER_CLASS = 'user'
UPSTREAM_SERVICE = 'ee2'
JOB_ID_HAPPY = '5e8285adefac56a4b4bc2b14'
JOB_ID_LOG_HAPPY = '5e8647c576f5df12d4fa6953'
JOB_ID_NOT_FOUND = '5dcb4324fdf6d14ac59ea916'
WORKSPACE_ID_HAPPY = 47458
# Note timeout in seconds since we are dealing directly with the ee2 api
TIMEOUT = 60
START_TIME_1 = 0
END_TIME_1 = 1609459200000
USER = 'kbaseuitest'
class EE2APITest(TestBase):
# Uncomment to skip this test
# @unittest.skip("skipped test_get_jobs_happy")
def test_ver(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
# impl, context = self.impl_for(ENV, USER_CLASS)
# params = {
# 'job_ids': [JOB_ID_HAPPY],
# 'timeout': TIMEOUT
# }
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
version = api.ver()
self.assertEqual(version, '0.0.1')
except Exception as ex:
self.assert_no_exception(ex)
def test_status(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
status = api.status()
self.assertEqual(status['version'], '0.0.1')
self.assertEqual(status['service'], 'KBase Execution Engine')
self.assertIsInstance(status['server_time'], float)
self.assertIsInstance(status['git_commit'], str)
except Exception as ex:
self.assert_no_exception(ex)
def test_list_config(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
expected = {
"external-url": "https://ci.kbase.us/services/ee2",
"kbase-endpoint": "https://ci.kbase.us/services",
"workspace-url": "https://ci.kbase.us/services/ws",
"catalog-url": "https://ci.kbase.us/services/catalog",
"shock-url": "https://ci.kbase.us/services/shock-api",
"handle-url": "https://ci.kbase.us/services/handle_service",
"srv-wiz-url": "https://ci.kbase.us/services/service_wizard",
"auth-service-url":
"https://ci.kbase.us/services/auth/api/legacy/KBase/Sessions/Login",
"auth-service-url-v2": "https://ci.kbase.us/services/auth/api/V2/token",
"auth-service-url-allow-insecure": "false",
"scratch": "/kb/module/work/tmp",
"executable": "execute_runner.sh",
"docker_timeout": "604805",
"initialdir": "/condor_shared",
"transfer_input_files": "/condor_shared/JobRunner.tgz"
}
config = api.list_config()
self.assertEqual(config, expected)
except Exception as ex:
self.assert_no_exception(ex)
def test_check_job(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
params = {
'job_id': JOB_ID_HAPPY
}
job = api.check_job(params)
schema.validate('check_job', job)
self.assertEqual(job['user'], 'kbaseuitest')
except Exception as ex:
self.assert_no_exception(ex)
def test_check_jobs(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
params = {
'job_ids': [JOB_ID_HAPPY]
}
jobs = api.check_jobs(params)
schema.validate('check_jobs', jobs)
self.assertEqual(jobs['job_states'][0]['user'], 'kbaseuitest')
except Exception as ex:
self.assert_no_exception(ex)
def test_check_workspace_jobs(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
params = {
'workspace_id': WORKSPACE_ID_HAPPY
}
jobs = api.check_workspace_jobs(params)
schema.validate('check_workspace_jobs', jobs)
self.assertEqual(jobs['job_states'][0]['user'], 'kbaseuitest')
except Exception as ex:
self.assert_no_exception(ex)
def test_cancel_job(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
params = {
'job_id': JOB_ID_HAPPY
}
result = api.cancel_job(params)
schema.validate('cancel_job', result)
self.assertIsNone(result)
except Exception as ex:
self.assert_no_exception(ex)
def test_check_job_canceled(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
params = {
'job_id': JOB_ID_HAPPY
}
result = api.check_job_canceled(params)
schema.validate('check_job_canceled', result)
self.assertEqual(result['canceled'], False)
except Exception as ex:
self.assert_no_exception(ex)
def test_check_job_status(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
params = {
'job_id': JOB_ID_HAPPY
}
result = api.get_job_status(params)
schema.validate('get_job_status', result)
self.assertEqual(result['status'], 'completed')
except Exception as ex:
self.assert_no_exception(ex)
def test_get_job_logs(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
params = {
'job_id': JOB_ID_LOG_HAPPY
}
result = api.get_job_logs(params)
schema.validate('get_job_logs', result)
self.assertEqual(result['lines'][0]['ts'], 1585858534288)
except Exception as ex:
self.assert_no_exception(ex)
def test_check_jobs_date_range_for_user(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
params = {
'start_time': START_TIME_1,
'end_time': END_TIME_1,
'user': USER,
'offset': 0,
'limit': 10
}
result = api.check_jobs_date_range_for_user(params)
schema.validate('check_jobs_date_range_for_user', result)
self.assertEqual(result['jobs'][0]['job_id'], '5e8285adefac56a4b4bc2b14')
except Exception as ex:
self.assert_no_exception(ex)
def test_check_jobs_date_range_for_all(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, 'admin')
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
params = {
'start_time': START_TIME_1,
'end_time': END_TIME_1,
'user': USER,
'offset': 0,
'limit': 10
}
result = api.check_jobs_date_range_for_all(params)
schema.validate('check_jobs_date_range_for_all', result)
self.assertEqual(result['jobs'][0]['job_id'], '54b02da8e4b06e6b5555476d')
except Exception as ex:
self.assert_no_exception(ex)
def test_is_admin_is_not(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
result = api.is_admin()
schema.validate('is_admin', result)
self.assertEqual(result, False)
except Exception as ex:
self.assert_no_exception(ex)
def test_is_admin_is(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, 'admin')
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
result = api.is_admin()
schema.validate('is_admin', result)
self.assertEqual(result, True)
except Exception as ex:
self.assert_no_exception(ex)
def test_get_admin_permission_not_admin(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, USER_CLASS)
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
result = api.get_admin_permission()
schema.validate('get_admin_permission', result)
self.assertEqual(result['permission'], 'n')
except Exception as ex:
self.assert_no_exception(ex)
def test_get_admin_permission_admin(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
token = self.token_for(ENV, 'admin')
schema = Schema(schema_dir="ee2_api", load_schemas=True)
try:
api = EE2Api(self.get_config('ee2-url'), token, TIMEOUT)
result = api.get_admin_permission()
schema.validate('get_admin_permission', result)
self.assertEqual(result['permission'], 'w')
except Exception as ex:
self.assert_no_exception(ex)
| 39.392226 | 88 | 0.600646 | 10,613 | 0.952009 | 0 | 0 | 0 | 0 | 0 | 0 | 2,379 | 0.213402 |
adeda8e4831bfdd84daf5ffee1aa3f3be1652de6 | 529 | py | Python | ldeep/views/activedirectory.py | podjackel/ldeep | dac9b6d92b905b66868897da4bcc2ec4fa1d0999 | [
"MIT"
] | 41 | 2018-10-24T12:10:34.000Z | 2022-03-29T08:21:30.000Z | ldeep/views/activedirectory.py | podjackel/ldeep | dac9b6d92b905b66868897da4bcc2ec4fa1d0999 | [
"MIT"
] | 7 | 2018-10-25T16:34:25.000Z | 2021-04-14T01:00:14.000Z | ldeep/views/activedirectory.py | podjackel/ldeep | dac9b6d92b905b66868897da4bcc2ec4fa1d0999 | [
"MIT"
] | 17 | 2019-01-08T15:06:15.000Z | 2022-01-21T00:20:53.000Z |
from ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES
from ldap3.protocol.formatters.validators import validate_sid, validate_guid
ALL_ATTRIBUTES = ALL_ATTRIBUTES
ALL_OPERATIONAL_ATTRIBUTES = ALL_OPERATIONAL_ATTRIBUTES
ALL = [ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES]
validate_sid = validate_sid
validate_guid = validate_guid
class ActiveDirectoryView(object):
"""
Manage a view of a Active Directory.
"""
class ActiveDirectoryInvalidSID(Exception):
pass
class ActiveDirectoryInvalidGUID(Exception):
pass
| 24.045455 | 76 | 0.835539 | 189 | 0.357278 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.086957 |
adee84920a6fd9a1311346aec3afbb03a615787d | 1,237 | py | Python | sdk/python/pulumi_azure_native/automation/v20200113preview/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/automation/v20200113preview/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/automation/v20200113preview/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .automation_account import *
from .certificate import *
from .connection import *
from .connection_type import *
from .credential import *
from .dsc_node_configuration import *
from .get_automation_account import *
from .get_certificate import *
from .get_connection import *
from .get_connection_type import *
from .get_credential import *
from .get_dsc_node_configuration import *
from .get_job_schedule import *
from .get_module import *
from .get_private_endpoint_connection import *
from .get_python2_package import *
from .get_schedule import *
from .get_source_control import *
from .get_variable import *
from .get_watcher import *
from .job_schedule import *
from .list_key_by_automation_account import *
from .module import *
from .private_endpoint_connection import *
from .python2_package import *
from .schedule import *
from .source_control import *
from .variable import *
from .watcher import *
from ._inputs import *
from . import outputs
| 30.925 | 80 | 0.78658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.168149 |
adf09047622763f9323976214e678f90e9ed8ff9 | 1,708 | py | Python | project_name/urls.py | rafael-rpa/django-auth-extension-boilerplate | 2fba1f4dc6a14da525c93303e86934682f4bfcbc | [
"MIT"
] | 1 | 2017-04-26T10:13:34.000Z | 2017-04-26T10:13:34.000Z | project_name/urls.py | rafael-rpa/django-auth-extension-boilerplate | 2fba1f4dc6a14da525c93303e86934682f4bfcbc | [
"MIT"
] | null | null | null | project_name/urls.py | rafael-rpa/django-auth-extension-boilerplate | 2fba1f4dc6a14da525c93303e86934682f4bfcbc | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.conf.urls import include, url
from django.contrib import admin
from auth_extension import views as auth_views
from django.contrib.auth.views import login, logout, password_reset, password_reset_done, password_reset_confirm, password_reset_complete
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', auth_views.dashboard, name='dashboard'),
url(r'^register/$', auth_views.register, name='register'),
url(r'^login/$', login, {'template_name': 'login.html'}, name='login'),
url(r'^logout/$', logout, {'next_page': '/'}, name='logout'),
url(r'^account/edit/$', auth_views.account_edit, name='account-edit'),
url(r'^account/change-password/$', auth_views.change_password, name='change-password'),
url(r'^password/reset/$', password_reset, {'template_name': 'password-reset-form.html', 'subject_template_name': 'password_reset_subject.txt', 'post_reset_redirect': '/password/reset/done/', 'html_email_template_name': 'password-reset-email.html'}, name='password-reset'),
url(r'^password/reset/done/$', password_reset_done, {'template_name': 'password-reset-done.html'}),
url(r'^password/reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', password_reset_confirm, {'template_name': 'password-reset-confirm.html', 'post_reset_redirect': '/password/done/'}, name='password_reset_confirm'),
url(r'^password/done/$', password_reset_complete, {'template_name': 'password-reset-complete.html'}),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 55.096774 | 276 | 0.730094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 763 | 0.446721 |
adf16cd52d01a8a986221348b55fd5f12bd7c761 | 826 | py | Python | test_day01.py | clfs/aoc2019 | 940fdbdd7bbb69c4a3a6c947c37bf7b60a201e88 | [
"MIT"
] | null | null | null | test_day01.py | clfs/aoc2019 | 940fdbdd7bbb69c4a3a6c947c37bf7b60a201e88 | [
"MIT"
] | null | null | null | test_day01.py | clfs/aoc2019 | 940fdbdd7bbb69c4a3a6c947c37bf7b60a201e88 | [
"MIT"
] | null | null | null | def fuel_required(weight: int) -> int:
return weight // 3 - 2
def fuel_required_accurate(weight: int) -> int:
fuel = 0
while weight > 0:
weight = max(0, weight // 3 - 2)
fuel += weight
return fuel
def test_fuel_required() -> None:
cases = [(12, 2), (14, 2), (1969, 654), (100756, 33583)]
for x, y in cases:
assert fuel_required(x) == y
def test_fuel_required_accurate() -> None:
cases = [(14, 2), (1969, 966), (100756, 50346)]
for x, y in cases:
assert fuel_required_accurate(x) == y
def test_solutions() -> None:
with open("input/01.txt") as f:
modules = [int(line) for line in f]
part_1 = sum(map(fuel_required, modules))
part_2 = sum(map(fuel_required_accurate, modules))
assert part_1 == 3375962
assert part_2 == 5061072
| 25.030303 | 60 | 0.605327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.016949 |
adf4bfc1de4b0c89ad4020a842811facae16d88a | 499 | py | Python | Easy/After 157/175.Modified Kaprekar Numbers.py | sherryx080/CPTango | c7491156202fa7517c96b96dab27c867b949bb63 | [
"MIT"
] | null | null | null | Easy/After 157/175.Modified Kaprekar Numbers.py | sherryx080/CPTango | c7491156202fa7517c96b96dab27c867b949bb63 | [
"MIT"
] | null | null | null | Easy/After 157/175.Modified Kaprekar Numbers.py | sherryx080/CPTango | c7491156202fa7517c96b96dab27c867b949bb63 | [
"MIT"
] | null | null | null | import sys
p = int(sys.stdin.readline())
q = int(sys.stdin.readline())
result = []
for i in range(p,q+1):
square = i * i
l_num = 0
temp = list(str(square))
#print(temp[:len(temp)//2])
#print(temp[len(temp)//2:])
if square > 10:
l_num = int(''.join(temp[:len(temp)//2]))
r_num = int(''.join(temp[len(temp)//2:]))
if l_num+r_num == i:
result.append(i)
if len(result)==0:
print("INVALID RANGE")
else:
for i in result:
print(i,end=" ") | 20.791667 | 49 | 0.54509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.152305 |
adf6daa9b589cfc3315f30adb8aabf6d767de08b | 808 | py | Python | catalyst/dl/callbacks/metrics/__init__.py | TeAmP0is0N/catalyst | 445e366266196a2f1076cc7fa2438d66dfa58c14 | [
"Apache-2.0"
] | 1 | 2020-09-24T00:34:06.000Z | 2020-09-24T00:34:06.000Z | catalyst/dl/callbacks/metrics/__init__.py | TeAmP0is0N/catalyst | 445e366266196a2f1076cc7fa2438d66dfa58c14 | [
"Apache-2.0"
] | null | null | null | catalyst/dl/callbacks/metrics/__init__.py | TeAmP0is0N/catalyst | 445e366266196a2f1076cc7fa2438d66dfa58c14 | [
"Apache-2.0"
] | 1 | 2020-09-24T00:34:07.000Z | 2020-09-24T00:34:07.000Z | # flake8: noqa
from catalyst.dl.callbacks.metrics.accuracy import (
AccuracyCallback,
MultiLabelAccuracyCallback,
)
from catalyst.dl.callbacks.metrics.auc import AUCCallback
from catalyst.dl.callbacks.metrics.cmc import CMCScoreCallback
from catalyst.dl.callbacks.metrics.dice import (
DiceCallback,
MultiClassDiceMetricCallback,
MulticlassDiceMetricCallback,
)
from catalyst.dl.callbacks.metrics.f1_score import F1ScoreCallback
from catalyst.dl.callbacks.metrics.iou import (
ClasswiseIouCallback,
ClasswiseJaccardCallback,
IouCallback,
JaccardCallback,
)
from catalyst.dl.callbacks.metrics.ppv_tpr_f1 import (
PrecisionRecallF1ScoreCallback,
)
from catalyst.dl.callbacks.metrics.precision import (
AveragePrecisionCallback,
MeanAveragePrecisionCallback,
)
| 27.862069 | 66 | 0.804455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.017327 |
adf797c04c5912626a52449a6704952383c10673 | 451 | py | Python | login/migrations/0003_user_token.py | yuxiaoYX/xiaoshuo | 5652703521aa99774e8e0667c5e6b9f24a6d90ac | [
"MIT"
] | null | null | null | login/migrations/0003_user_token.py | yuxiaoYX/xiaoshuo | 5652703521aa99774e8e0667c5e6b9f24a6d90ac | [
"MIT"
] | null | null | null | login/migrations/0003_user_token.py | yuxiaoYX/xiaoshuo | 5652703521aa99774e8e0667c5e6b9f24a6d90ac | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2019-07-28 08:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20190720_1846'),
]
operations = [
migrations.AddField(
model_name='user',
name='token',
field=models.CharField(default=1, max_length=100, verbose_name='token验证'),
preserve_default=False,
),
]
| 22.55 | 86 | 0.605322 | 362 | 0.795604 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.230769 |
adfa006844d8a11fe82113a8c42d8b2a11a4d2bc | 10,777 | py | Python | usersystem/views.py | sergioruizdavila/asanni-backend | b2da3f3a97dbd1ef46d65f13ee9b2098124d4fc4 | [
"MIT"
] | 8 | 2018-05-24T04:46:58.000Z | 2021-06-11T04:41:49.000Z | usersystem/views.py | sergioruizdavila/asanni-backend | b2da3f3a97dbd1ef46d65f13ee9b2098124d4fc4 | [
"MIT"
] | null | null | null | usersystem/views.py | sergioruizdavila/asanni-backend | b2da3f3a97dbd1ef46d65f13ee9b2098124d4fc4 | [
"MIT"
] | 4 | 2020-01-24T13:35:42.000Z | 2021-06-15T07:38:06.000Z | from allauth.account.utils import setup_user_email, send_email_confirmation
from rest_framework.response import Response
from usersystem.serializers import UserSerializer, UserRegisterSerializer
from rest_framework.views import APIView
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST, HTTP_201_CREATED, HTTP_404_NOT_FOUND
from rest_framework.permissions import AllowAny
from django.contrib.auth.models import User
from usersystem.settings import PASSWORD_MAX_LENGTH, PASSWORD_MIN_LENGTH, LOCAL_OAUTH2_KEY
import requests as makerequest
from usersystem.secrets import SOCIAL_AUTH_GOOGLE_OAUTH2_KEY, SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET
from social.apps.django_app.default.models import UserSocialAuth
# Create your views here.
class AccountView(APIView):
"""
An API endpoint for managing the current user.
GET returns basic information about the current user.
POST expects at least one of 'email', 'first_name' or 'last_name' fields.
DELETE deletes the current user.
"""
def get(self, request):
serializer = UserSerializer(request.user, context={'request': request})
return Response(serializer.data)
def post(self, request):
if not request.data:
return Response(status=HTTP_400_BAD_REQUEST)
serializer = UserSerializer(data=request.data, partial=True)
# Return a 400 response if the data was invalid.
serializer.is_valid(raise_exception=True)
request.user.email = serializer.validated_data.get(
'email', request.user.email)
request.user.first_name = serializer.validated_data.get(
'first_name', request.user.first_name)
request.user.last_name = serializer.validated_data.get(
'last_name', request.user.last_name)
request.user.save()
return Response(status=HTTP_200_OK)
def delete(self, request):
# If this is a Google social account, revoke its Google tokens
socAuth = next(
iter(UserSocialAuth.get_social_auth_for_user(request.user)), None)
if socAuth and socAuth.provider == 'google-oauth2':
refresh_token = socAuth.extra_data.get(
'refresh_token', socAuth.extra_data['access_token'])
makerequest.post(
'https://accounts.google.com/o/oauth2/revoke?token=' + refresh_token)
request.user.delete()
return Response(status=HTTP_200_OK)
class AccountUsernameView(APIView):
"""
A simple API endpoint for getting an username with a given email.
POST must contain 'email' field. Server returns 400 if email is already used or 200 otherwise.
"""
permission_classes = (AllowAny,)
def post(self, request):
email = request.data.get('email', None)
if email is None:
return Response({"message": "'email' field is missing"}, status=HTTP_400_BAD_REQUEST)
try:
data = User.objects.get(email=email)
except User.DoesNotExist:
data = None
if data:
return Response({"userExist": True, "username": data.username}, status=HTTP_200_OK)
return Response({"userExist": False}, status=HTTP_200_OK)
class AccountPasswordView(APIView):
"""
An API endpoint for password management (for the current user)
GET returns 200 if user has a password or 404 otherwise
POST must contain 'newPassword' field ( and 'oldPassword' if user already has a password )
"""
def post(self, request):
newpass = request.data.get('newPassword', None)
if newpass is None:
return Response({"message": "Missing 'newPassword' field"}, status=HTTP_400_BAD_REQUEST)
if len(newpass) < PASSWORD_MIN_LENGTH or len(newpass) > PASSWORD_MAX_LENGTH:
return Response({"message": "New password doesn't match length requirements"}, status=HTTP_400_BAD_REQUEST)
if request.user.has_usable_password():
oldpass = request.data.get('oldPassword', None)
if oldpass is None:
return Response({"message": "Missing 'oldPassword' field"}, status=HTTP_400_BAD_REQUEST)
if not request.user.check_password(oldpass):
return Response({"message": "'oldPassword' is invalid"}, status=HTTP_400_BAD_REQUEST)
if oldpass == newpass:
return Response({"message": "oldPassword and newPassword are identical"}, status=HTTP_400_BAD_REQUEST)
request.user.set_password(newpass)
request.user.save()
return Response(status=HTTP_200_OK)
def get(self, request):
if request.user.has_usable_password():
return Response(status=HTTP_200_OK)
return Response(status=HTTP_404_NOT_FOUND)
class AccountSocialView(APIView):
"""
A simple API endpoint for checking if user has connected social account
GET returns 200 and the name of the social auth provider if user has connected social account or 404 otherwise.
"""
def get(self, request):
socAuth = next(
iter(UserSocialAuth.get_social_auth_for_user(request.user)), None)
if not socAuth:
return Response(status=HTTP_404_NOT_FOUND)
else:
return Response({"social_provider": socAuth.provider}, status=HTTP_200_OK)
class RegisterView(APIView):
"""
An API endpoint for user registration.
POST must contain 'username', 'email', 'first_name', 'last_name' and 'password' fields.
"""
permission_classes = (AllowAny,)
def post(self, request):
serializer = UserRegisterSerializer(
data=request.data, context={'request': request})
# Return a 400 response if the data was invalid.
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
user = User.objects.create(
username=validated_data['username'],
email=validated_data['email'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name']
)
user.set_password(validated_data['password'])
user.save()
setup_user_email(request, user, [])
# send_email_confirmation(request, user, signup=True)
return Response(status=HTTP_201_CREATED)
class RegisterCheckEmailView(APIView):
"""
A simple API endpoint for checking if an user with a given email exists.
POST must contain 'email' field. Server returns 400 if email is already used or 200 otherwise.
"""
permission_classes = (AllowAny,)
def post(self, request):
email = request.data.get('email', None)
if email is None:
return Response({"message": "'email' field is missing"}, status=HTTP_400_BAD_REQUEST)
if User.objects.filter(email=email):
return Response({"emailExist": True}, status=HTTP_400_BAD_REQUEST)
return Response(status=HTTP_200_OK)
class RegisterCheckUsernameView(APIView):
"""
An API endpoint for checking if an username is taken.
POST must contain 'username' field. Server returns 400 if username is already used or 200 is it is available
"""
permission_classes = (AllowAny,)
def post(self, request):
username = request.data.get('username', None)
if username is None:
return Response({"message": "'username' field is missing"}, status=HTTP_400_BAD_REQUEST)
if User.objects.filter(username=username):
return Response(status=HTTP_400_BAD_REQUEST)
return Response(status=HTTP_200_OK)
class GoogleAuthCodeView(APIView):
"""
An API endpoint which expects a google auth code, which is then used for social login.
POST must contain a 'code' field with the authorization code. This code is
exchanged for google's access and refresh tokens, which are stored on server.
Afterwards local access and refresh tokens are generated and returned, which are
then used to communicate with our API.
Go to https://developers.google.com/identity/sign-in/web/server-side-flow
for more information on the google server-side auth flow implemented here.
"""
permission_classes = (AllowAny,)
def post(self, request):
code = request.data.get('code', None)
if not code:
return Response({"message": "Authorization code missing"}, status=HTTP_400_BAD_REQUEST)
# Exchange auth code for tokens
googleurl = 'https://accounts.google.com/o/oauth2/token'
exchangeCodeRequest = makerequest.post(
googleurl,
data={
'code': code,
'redirect_uri': 'postmessage',
'client_id': SOCIAL_AUTH_GOOGLE_OAUTH2_KEY,
'client_secret': SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET,
'grant_type': 'authorization_code'
})
# We can now exchange the external token for a token linked to *OUR*
# OAuth2 provider
exchangeExternalTokenUrl = 'http://' + \
request.META['HTTP_HOST'] + '/social-auth/convert-token'
externalToken = exchangeCodeRequest.json().get('access_token', None)
if externalToken is None:
return Response({"message": "Server could not retrieve external tokens"}, status=HTTP_400_BAD_REQUEST)
exchangeExternalTokenRequest = makerequest.post(exchangeExternalTokenUrl, data={
'grant_type': 'convert_token',
'client_id': LOCAL_OAUTH2_KEY,
'backend': 'google-oauth2',
'token': externalToken}
)
# Get user and add exchangeCodeRequest's (Google's) refresh token to UserSocialAuth extra_data
# This is a bit hacky, @TODO use python-social-auth's pipeline
# mechanism instead
if exchangeExternalTokenRequest.status_code is not makerequest.codes.ok:
# If the social account's email is already used in another account,
# throw an error
return Response({"message": "User with that email already exists!"}, status=HTTP_400_BAD_REQUEST)
getUserUrl = 'http://' + request.META['HTTP_HOST'] + '/account/'
getUserRequest = makerequest.get(getUserUrl, data={}, headers={
'Authorization': 'Bearer ' + exchangeExternalTokenRequest.json()['access_token']})
refreshToken = exchangeCodeRequest.json().get('refresh_token', None)
if refreshToken is not None:
user = User.objects.all().filter(
username=getUserRequest.json()['username'])[0]
userSocial = user.social_auth.get(provider='google-oauth2')
userSocial.extra_data['refresh_token'] = refreshToken
userSocial.save()
return Response(exchangeExternalTokenRequest.json())
| 39.47619 | 119 | 0.675049 | 10,011 | 0.928923 | 0 | 0 | 0 | 0 | 0 | 0 | 3,700 | 0.343324 |
adfa93c02e8adb449c205b08b98c00c078beda51 | 13,966 | py | Python | splitgraph/commandline/image_creation.py | Trase/splitgraph | ef8332b29640230f4eebcbb350a37c67285064b1 | [
"Apache-2.0"
] | 1 | 2020-06-24T23:42:11.000Z | 2020-06-24T23:42:11.000Z | splitgraph/commandline/image_creation.py | Trase/splitgraph | ef8332b29640230f4eebcbb350a37c67285064b1 | [
"Apache-2.0"
] | null | null | null | splitgraph/commandline/image_creation.py | Trase/splitgraph | ef8332b29640230f4eebcbb350a37c67285064b1 | [
"Apache-2.0"
] | null | null | null | """
sgr commands related to creating and checking out images
"""
import sys
from collections import defaultdict
import click
from splitgraph.commandline.common import ImageType, RepositoryType, JsonType, remote_switch_option
from splitgraph.config import get_singleton, CONFIG
from splitgraph.exceptions import TableNotFoundError
@click.command(name="checkout")
@click.argument("image_spec", type=ImageType(default="HEAD", get_image=True))
@click.option(
"-f", "--force", help="Discard all pending changes to the schema", is_flag=True, default=False
)
@click.option(
"-u", "--uncheckout", help="Delete the checked out copy instead", is_flag=True, default=False
)
@click.option(
"-l",
"--layered",
help="Don't materialize the tables, use layered querying instead.",
is_flag=True,
default=False,
)
def checkout_c(image_spec, force, uncheckout, layered):
"""
Check out a Splitgraph image into a Postgres schema.
This downloads the required physical objects and materializes all tables, unless ``-l`` or ``--layered`` is passed,
in which case the objects are downloaded and a foreign data wrapper is set up on the engine to satisfy read-only
queries by combining results from each table's fragments.
Tables checked out in this way are still presented as normal Postgres tables and can queried in the same way.
Since the tables aren't materialized, layered querying is faster to set up, but since each query now results in a
subquery to each object comprising the table, actual query execution is slower than to materialized Postgres tables.
Layered querying is only supported for read-only queries.
Image spec must be of the format ``[NAMESPACE/]REPOSITORY[:HASH_OR_TAG]``. Note that currently, the schema that the
image is checked out into has to have the same name as the repository. If no image hash or tag is passed,
"HEAD" is assumed.
If ``-u`` or ``--uncheckout`` is passed, this instead deletes the checked out schema (assuming there are no pending
changes) and removes the HEAD pointer.
If ``--force`` isn't passed and the schema has pending changes, this will fail.
"""
repository, image = image_spec
if uncheckout:
repository.uncheckout(force=force)
click.echo("Unchecked out %s." % (str(repository),))
else:
image.checkout(force=force, layered=layered)
click.echo("Checked out %s:%s." % (str(repository), image.image_hash[:12]))
@click.command(name="commit")
@click.argument("repository", type=RepositoryType(exists=True))
@click.option(
"-s",
"--snap",
default=False,
is_flag=True,
help="Do not delta compress the changes and instead store the whole table again. "
"This consumes more space, but makes checkouts faster.",
)
@click.option(
"-c",
"--chunk-size",
default=int(get_singleton(CONFIG, "SG_COMMIT_CHUNK_SIZE")),
type=int,
help="Split new tables into chunks of this many rows (by primary key). The default "
"value is governed by the SG_COMMIT_CHUNK_SIZE configuration parameter.",
)
@click.option(
"-k",
"--chunk-sort-keys",
default=None,
type=JsonType(),
help="Sort the data inside each chunk by this/these key(s)",
)
@click.option(
"-t",
"--split-changesets",
default=False,
is_flag=True,
help="Split changesets for existing tables across original chunk boundaries.",
)
@click.option(
"-i",
"--index-options",
type=JsonType(),
help="JSON dictionary of extra indexes to calculate on the new objects.",
)
@click.option("-m", "--message", help="Optional commit message")
@click.option(
"-o", "--overwrite", is_flag=True, help="Overwrite physical objects that already exist"
)
def commit_c(
repository,
snap,
chunk_size,
chunk_sort_keys,
split_changesets,
index_options,
message,
overwrite,
):
"""
Commit changes to a checked-out Splitgraph repository.
This packages up all changes into a new image. Where a table hasn't been created or had its schema changed,
this will delta compress the changes. For all other tables (or if ``-s`` has been passed), this will
store them as full table snapshots.
When a table is stored as a full snapshot, `--chunk-size` sets the maximum size, in rows, of the fragments
that the table will be split into (default is no splitting). The splitting is done by the
table's primary key.
If `--split-changesets` is passed, delta-compressed changes will also be split up according to the original
table chunk boundaries. For example, if there's a change to the first and the 20000th row of a table that was
originally committed with `--chunk-size=10000`, this will create 2 fragments: one based on the first chunk
and one on the second chunk of the table.
If `--chunk-sort-keys` is passed, data inside the chunk is sorted by this key (or multiple keys).
This helps speed up queries on those keys for storage layers than can leverage that (e.g. CStore). The expected format is JSON, e.g. `{table_1: [col_1, col_2]}`
`--index-options` expects a JSON-serialized dictionary of `{table: index_type: column: index_specific_kwargs}`.
Indexes are used to narrow down the amount of chunks to scan through when running a query. By default, each column
has a range index (minimum and maximum values) and it's possible to add bloom filtering to speed up queries that
involve equalities.
Bloom filtering allows to trade off between the space overhead of the index and the probability of a false
positive (claiming that an object contains a record when it actually doesn't, leading to extra scans).
An example `index-options` dictionary:
\b
```
{
"table": {
"bloom": {
"column_1": {
"probability": 0.01, # Only one of probability
"size": 10000 # or size can be specified.
}
},
# Only compute the range index on these columns. By default,
# it's computed on all columns and is always computed on the
# primary key no matter what.
"range": ["column_2", "column_3"]
}
}
```
"""
new_hash = repository.commit(
comment=message,
snap_only=snap,
chunk_size=chunk_size,
split_changeset=split_changesets,
extra_indexes=index_options,
in_fragment_order=chunk_sort_keys,
overwrite=overwrite,
).image_hash
click.echo("Committed %s as %s." % (str(repository), new_hash[:12]))
@click.command(name="tag")
@click.argument("image_spec", type=ImageType(default=None))
@click.argument("tag", required=False)
@click.option("-d", "--delete", is_flag=True, help="Delete the tag instead.")
@remote_switch_option()
def tag_c(image_spec, tag, delete):
"""
Manage tags on images.
Depending on the exact invocation, this command can tag a Splitgraph image,
list all tags in a repository or delete a tag.
Examples:
``sgr tag noaa/climate``
List all tagged images in the ``noaa/climate`` repository and their tags.
``sgr tag noaa/climate:abcdef1234567890``
List all tags assigned to the image ``noaa/climate:abcdef1234567890...``
``sgr tag noaa/climate:abcdef1234567890 my_new_tag``
Tag the image ``noaa/climate:abcdef1234567890...`` with ``my_new_tag``. If the tag already exists, this will
overwrite the tag.
``sgr tag noaa/climate my_new_tag``
Tag the current ``HEAD`` of ``noaa/climate`` with ``my_new_tag``.
``sgr tag --delete noaa/climate:my_new_tag``
Delete the tag ``my_new_tag`` from ``noaa/climate``.
"""
repository, image = image_spec
if delete:
# In this case the tag must be a part of the image spec.
if tag is not None or image is None:
raise click.BadArgumentUsage(
"Use sgr tag --delete %s:TAG_TO_DELETE" % repository.to_schema()
)
if image in ("latest", "HEAD"):
raise click.BadArgumentUsage("%s is a reserved tag!" % image)
repository.images[image].delete_tag(image)
return
if tag is None:
# List all tags
tag_dict = defaultdict(list)
for img, img_tag in repository.get_all_hashes_tags():
tag_dict[img].append(img_tag)
if image is None:
for img, tags in tag_dict.items():
# Sometimes HEAD is none (if we've just cloned the repo)
if img:
click.echo("%s: %s" % (img[:12], ", ".join(sorted(tags))))
else:
click.echo(", ".join(tag_dict[repository.images[image].image_hash]))
return
if tag == "HEAD":
raise click.BadArgumentUsage("HEAD is a reserved tag!")
if image is None:
image = repository.head
else:
image = repository.images[image]
image.tag(tag)
click.echo("Tagged %s:%s with %s." % (str(repository), image.image_hash, tag))
@click.command(name="import")
@click.argument("image_spec", type=ImageType())
@click.argument("table_or_query")
@click.argument("target_repository", type=RepositoryType())
@click.argument("target_table", required=False)
def import_c(image_spec, table_or_query, target_repository, target_table):
"""
Import tables into a Splitgraph repository.
Imports a table or a result of a query from a local Splitgraph repository or a Postgres schema into another
Splitgraph repository.
Examples:
``sgr import noaa/climate:my_tag climate_data my/repository``
Create a new image in ``my/repository`` with the ``climate_data`` table included. This links the new image to
the physical object, meaning that the history of the ``climate_data`` table is preserved.
If no tag is specified, the 'latest' (not the HEAD image or current state of the checked out image)
image is used.
``sgr import noaa/climate:my_tag "SELECT * FROM climate_data" my/repository climate_data``
Create a new image in ``my/repository`` with the result of the query stored in the ``climate_data`` table. This
creates a new physical object without any linkage to the original data, so the history of the ``climate_data``
table isn't preserved. The SQL query can interact with multiple tables in the source image.
``sgr import other_schema other_table my/repository``
Since other_schema isn't a Splitgraph repository, this will copy ``other_schema.other_table``
into a new Splitgraph object and add the ``other_table`` table to a new image in ``my/repository``.
Note that importing doesn't discard or commit pending changes in the target Splitgraph repository: a new image
is created with the new table added, the new table is materialized in the repository and the HEAD pointer is moved.
"""
from splitgraph.core.engine import repository_exists
repository, image = image_spec
if repository_exists(repository):
foreign_table = False
image = repository.images[image]
# If the source table doesn't exist in the image, we'll treat it as a query instead.
try:
image.get_table(table_or_query)
is_query = False
except TableNotFoundError:
is_query = True
else:
# If the source schema isn't actually a Splitgraph repo, we'll be copying the table verbatim.
foreign_table = True
is_query = table_or_query not in repository.engine.get_all_tables(repository.to_schema())
image = None
if is_query and not target_table:
click.echo("TARGET_TABLE is required when the source is a query!")
sys.exit(1)
target_repository.import_tables(
[target_table] if target_table else [],
repository,
[table_or_query],
image_hash=image.image_hash if image else None,
foreign_tables=foreign_table,
table_queries=[] if not is_query else [True],
)
click.echo(
"%s:%s has been imported from %s:%s%s"
% (
str(target_repository),
target_table,
str(repository),
table_or_query,
(" (%s)" % image.image_hash[:12] if image else ""),
)
)
@click.command(name="reindex")
@click.argument("image_spec", type=ImageType(default="HEAD", get_image=True))
@click.argument("table_name", type=str)
@click.option(
"-i",
"--index-options",
type=JsonType(),
required=True,
help="JSON dictionary of extra indexes to calculate, e.g. "
'\'{"bloom": {"column_1": {"probability": 0.01}}}\'',
)
@click.option(
"-o",
"--ignore-patch-objects",
type=bool,
is_flag=True,
default=False,
help="Ignore objects that change other objects' rows instead of raising an error",
)
def reindex_c(image_spec, table_name, index_options, ignore_patch_objects):
"""
Run extra indexes on a table. This will merge the indexing results for all objects
that a table is formed from with the current object indexes. For explanation of
what indexes do, see the documentation for `sgr commit`.
If the objects haven't been downloaded yet, this will download them.
Currently reindexing objects that change other objects is unsupported and will raise
an error. Pass `-o` to ignore these objects and only reindex supported objects.
Image spec must be of the format ``[NAMESPACE/]REPOSITORY[:HASH_OR_TAG]``. If no tag is specified, ``HEAD`` is used.
"""
from splitgraph.core.output import pluralise
repository, image = image_spec
table = image.get_table(table_name)
click.echo("Reindexing table %s:%s/%s" % (repository.to_schema(), image.image_hash, table_name))
reindexed = table.reindex(
extra_indexes=index_options, raise_on_patch_objects=not ignore_patch_objects
)
click.echo("Reindexed %s" % pluralise("object", len(reindexed)))
| 37.951087 | 164 | 0.680009 | 0 | 0 | 0 | 0 | 13,619 | 0.975154 | 0 | 0 | 8,706 | 0.623371 |
adfb427ed9c8aea967913aed0f52cd6bbf4bc8fe | 1,661 | py | Python | tests/libtests/geocoords/data/ConvertDataApp.py | jedbrown/spatialdata | f18d34d92253986e8018f393201bf901e9667c2a | [
"MIT"
] | null | null | null | tests/libtests/geocoords/data/ConvertDataApp.py | jedbrown/spatialdata | f18d34d92253986e8018f393201bf901e9667c2a | [
"MIT"
] | null | null | null | tests/libtests/geocoords/data/ConvertDataApp.py | jedbrown/spatialdata | f18d34d92253986e8018f393201bf901e9667c2a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file geocoords/tests/libtests/data/ConvertDataApp.py
## @brief Python application to generate data for coordinate conversion tests.
from pyre.applications.Script import Script
# ConvertDataApp class
class ConvertDataApp(Script):
"""Python application to generate data for coordinate conversion tests."""
def main(self, *args, **kwds):
"""Run application."""
data = self.inventory.data
data.calculate()
data.dump(self.inventory.dumper)
return
def __init__(self):
"""Constructor."""
Script.__init__(self, 'convertdataapp')
return
class Inventory(Script.Inventory):
## @class Inventory
## Python object for managing ConvertDataApp facilities and properties.
##
## \b Properties
## @li None
##
## \b Facilities
## @li \b data Data generator for coordinate transformation test
## @li \b dumper Dump data to file
import pyre.inventory
from spatialdata.utils.CppData import CppData
from ConvertData import ConvertData
data = pyre.inventory.facility('data', factory=ConvertData)
dumper = pyre.inventory.facility('dumper', factory=CppData)
# main
if __name__ == '__main__':
app = ConvertDataApp()
app.run()
# End of file
| 27.229508 | 78 | 0.64419 | 932 | 0.561108 | 0 | 0 | 0 | 0 | 0 | 0 | 985 | 0.593016 |
adfb5713c6c3ab922ee55b1c8b7f49f69297f607 | 381 | py | Python | index.py | FunctionX/validator_queries | 842d0a75ee07f48b972d1bb18d292cadc730fa8b | [
"MIT"
] | null | null | null | index.py | FunctionX/validator_queries | 842d0a75ee07f48b972d1bb18d292cadc730fa8b | [
"MIT"
] | null | null | null | index.py | FunctionX/validator_queries | 842d0a75ee07f48b972d1bb18d292cadc730fa8b | [
"MIT"
] | null | null | null | import subprocess
import json
import csv
from csv import DictWriter
import datetime
import pandas as pd
import Cmd
import Data
from Report import Report
import File
def main():
Data.val_earnings_w_sum_columns()
dataframe=Data.get_val_token_info()
dataframe.to_csv(File._generate_file_name("fxcored_status"), index=False)
if __name__ == '__main__':
main()
| 14.111111 | 77 | 0.76378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.068241 |
adfc25dd827903785cf0dc2f054806e377cdcb01 | 1,000 | py | Python | py/py_0067_maximum_path_sum_ii.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0067_maximum_path_sum_ii.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0067_maximum_path_sum_ii.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | # Solution of;
# Project Euler Problem 67: Maximum path sum II
# https://projecteuler.net/problem=67
#
# By starting at the top of the triangle below and moving to adjacent numbers
# on the row below, the maximum total from top to bottom is 23. 37 42 4 68 5 9
# 3That is, 3 + 7 + 4 + 9 = 23. Find the maximum total from top to bottom in
# triangle. txt (right click and 'Save Link/Target As. . . '), a 15K text file
# containing a triangle with one-hundred rows. NOTE: This is a much more
# difficult version of Problem 18. It is not possible to try every route to
# solve this problem, as there are 299 altogether! If you could check one
# trillion (1012) routes every second it would take over twenty billion years
# to check them all. There is an efficient algorithm to solve it. ;o)
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 67
timed.caller(dummy, n, i, prob_id)
| 35.714286 | 79 | 0.706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 845 | 0.845 |
adfd589bdaa5f2bed3080ab8be5e570ad08ea48a | 1,457 | py | Python | sistemas_lineares.py | lucaspompeun/metodos-matematicos-aplicados-nas-engenharias-via-sistemas-computacionais | 008d397f76a935af1aba530cc0134b9dd326d3ac | [
"MIT"
] | 16 | 2019-09-27T03:08:44.000Z | 2020-10-16T18:43:45.000Z | primeira-edicao/sistemas_lineares.py | gm2sc-ifpa/metodos-matematicos-aplicados-nas-engenharias-via-sistemas-computacionais-master | f435c366e08dc14b0557f2172ad3b841ddb7ef2e | [
"MIT"
] | null | null | null | primeira-edicao/sistemas_lineares.py | gm2sc-ifpa/metodos-matematicos-aplicados-nas-engenharias-via-sistemas-computacionais-master | f435c366e08dc14b0557f2172ad3b841ddb7ef2e | [
"MIT"
] | 5 | 2019-09-13T20:00:38.000Z | 2020-09-19T03:04:00.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 18:19:25 2019
INSTITUTO FEDERAL DE EDUCAÇÃO, CIÊNCIA E TECNOLOGIA DO PÁRA - IFPA ANANINDEUA
@author:
Prof. Dr. Denis C. L. Costa
Discentes:
Heictor Alves de Oliveira Costa
Lucas Pompeu Neves
Grupo de Pesquisa:
Gradiente de Modelagem Matemática e
Simulação Computacional - GM²SC
Assunto:
Resolução de Sistemas Lineares
Nome do sript: sistemas_lineares
Disponível em:
https://github.com/GM2SC/DEVELOPMENT-OF-MATHEMATICAL-METHODS-IN-
COMPUTATIONAL-ENVIRONMENT/blob/master/SINEPEM_2019/sistemas_lineares.py
"""
# Biblioteca: numpy
import numpy as np
print('')
print('=======================================')
# Resolução de Sistemas Lineares
print('Resolução de Sistemas Lineares')
print('')
# Declarando a Matriz dos Coeficientes: A
A = np.array([[1,1,1], [1,-1,-1], [2,-1,1]])
print('Matriz dos Coeficientes:' )
print('A =',"\n", A,"\n")
# Declarando a Matriz dos Termos Independentes: B
B = np.array([[6], [-4], [1]])
print('Matriz dos Termos Independentes:' )
print('B =',"\n", B,"\n")
# Matriz Solução: X = inv(A)*B
X = np.linalg.solve(A, B)
print('Matriz solução:')
print('X =')
print(X)
print('')
print('=======================================')
print(' ---> Fim do Programa sistemas_lineares <---') | 26.981481 | 78 | 0.572409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,197 | 0.810976 |
adfdf98fe1afda40d1e086b86ceaf5056842822c | 1,956 | py | Python | observations/r/unemp_dur.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | observations/r/unemp_dur.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | observations/r/unemp_dur.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def unemp_dur(path):
"""Unemployment Duration
Journal of Business Economics and Statistics web site :
http://amstat.tandfonline.com/loi/ubes20
*number of observations* : 3343
A time serie containing :
spell
length of spell in number of two-week intervals
censor1
= 1 if re-employed at full-time job
censor2
= 1 if re-employed at part-time job
censor3
1 if re-employed but left job: pt-ft status unknown
censor4
1 if still jobless
age
age
ui
= 1 if filed UI claim
reprate
eligible replacement rate
disrate
eligible disregard rate
logwage
log weekly earnings in lost job (1985\\$)
tenure
years tenure in lost job
McCall, B.P. (1996) “Unemployment Insurance Rules, Joblessness, and
Part-time Work”, *Econometrica*, **64**, 647–682.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `unemp_dur.csv`.
Returns:
Tuple of np.ndarray `x_train` with 3343 rows and 11 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'unemp_dur.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/UnempDur.csv'
maybe_download_and_extract(path, url,
save_file_name='unemp_dur.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 22.744186 | 71 | 0.674847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,260 | 0.642202 |
adff3e929b131603bc3d8d375940f959a87b03cb | 138 | py | Python | Exe22.py | flavioUENP/aula1 | 8deba1de0ac54109c54d7e44d2852a8ca639e625 | [
"Apache-2.0"
] | null | null | null | Exe22.py | flavioUENP/aula1 | 8deba1de0ac54109c54d7e44d2852a8ca639e625 | [
"Apache-2.0"
] | null | null | null | Exe22.py | flavioUENP/aula1 | 8deba1de0ac54109c54d7e44d2852a8ca639e625 | [
"Apache-2.0"
] | null | null | null | f=float(input("Digite a temperatura na escala Farenheit: "))
celsius=5/9*(f-32)
print("A temperatura",f,"ºF, Em Célsius é: ",celsius,"ºC") | 46 | 60 | 0.702899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.612676 |
bc0331e695df64f1d609fc39793c9079fcfd83e0 | 1,289 | py | Python | Exercicios/Mundo 2/ex044.py | EdsonRomao/CursoEmVideo | caf99f5a54c001069572a0318dfcaa50d028d362 | [
"MIT"
] | null | null | null | Exercicios/Mundo 2/ex044.py | EdsonRomao/CursoEmVideo | caf99f5a54c001069572a0318dfcaa50d028d362 | [
"MIT"
] | null | null | null | Exercicios/Mundo 2/ex044.py | EdsonRomao/CursoEmVideo | caf99f5a54c001069572a0318dfcaa50d028d362 | [
"MIT"
] | null | null | null | """
Elabore um programa que calcule o valor a ser pago por um produto,
considerando o seu PREÇO NORMAL e CONDIÇÃO DE PAGAMENTO:
- À vista dinheiro/cheque: 10% de desconto
- À vista no cartão: 5% de desconto
- Em até 2x no cartão: Preço normal
- 3x ou mais no cartão: 20% de JUROS
"""
preco = float(input('Qual o valor do produto? '))
condicao = str(input('Como deseja pagar? Opçoes abaixo. \n'
'(1) A vista dinheiro.\n'
'(2) A vista cartão.\n'
'(3) Em até 2x cartão.\n'
'(4) 3x ou mais no cartão.\n'
'Digite a opção desejada: '))
desconto10 = (preco / 100) * 10
desconto5 = (preco / 100) * 5
juros20 = (preco / 100) * 20
if condicao == '1':
print(f'O produto que era R${preco} com 10% de desconto será R${preco - desconto10}')
elif condicao == '2':
print(f'O produto que era R${preco} com 5% de desconto será R${preco - desconto5}')
elif condicao == '3':
print(f'Parcelando no cartão até 2x não terá juros o valor R${preco} em 2x de R${preco / 2} ')
elif condicao == '4':
print(f'Parcelando em 3x ou mais terá um acrescimo de 20% de juros, o valor de {preco} irá para {preco + juros20}')
else:
print('A opção escolhida não é valida, cheque o número correto! ')
| 40.28125 | 119 | 0.619085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 928 | 0.703563 |
bc03aad972f5e963ed741a11cdefd802a8d37393 | 11,558 | py | Python | Sampling/gp/GPy_wrapper.py | josephhic/AutoDot | 9acd0ddab9191b8a90afc6f1f6373cf711b40b89 | [
"MIT"
] | 7 | 2020-09-16T23:50:01.000Z | 2022-01-29T13:31:35.000Z | Sampling/gp/GPy_wrapper.py | josephhic/AutoDot | 9acd0ddab9191b8a90afc6f1f6373cf711b40b89 | [
"MIT"
] | 1 | 2022-01-15T14:50:16.000Z | 2022-01-15T14:50:16.000Z | Sampling/gp/GPy_wrapper.py | josephhic/AutoDot | 9acd0ddab9191b8a90afc6f1f6373cf711b40b89 | [
"MIT"
] | 6 | 2020-08-20T11:52:51.000Z | 2021-03-12T08:04:35.000Z | import numpy as np
import GPy
from .GP_interface import GPInterface, convert_lengthscale, convert_2D_format
class GPyWrapper(GPInterface):
def __init__(self):
# GPy settings
GPy.plotting.change_plotting_library("matplotlib") # use matpoltlib for drawing
super().__init__()
self.center = 0.0
def create_kernel(self, ndim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
if kernel_name == 'Matern52':
l = convert_lengthscale(ndim, lengthscale)
kernel = GPy.kern.Matern52(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l, name='basic')
elif kernel_name == 'RBF':
l = convert_lengthscale(ndim, lengthscale)
kernel = GPy.kern.RBF(input_dim=ndim, ARD=True, variance=var_f, lengthscale=l, name='basic')
else:
raise ValueError('Unsupported kernel: '+ kernel_name)
self.ndim = ndim
self.kernel = kernel
if const_kernel:
self.kernel += GPy.kern.Bias(1.0)
self.stat_kernel = self.kernel.basic
else:
self.stat_kernel = self.kernel
def set_kernel_length_prior(self, prior_mean, prior_var):
if self.ndim != len(prior_mean) or self.ndim != len(prior_var):
raise ValueError('Incorrect kernel prior parameters.')
if self.kernel is None:
raise ValueError('Kernel should be defined first.')
for i in range(self.ndim):
self.stat_kernel.lengthscale[[i]].set_prior(GPy.priors.Gamma.from_EV(prior_mean[i],prior_var[i])) # don't know why, but [i] does not work
def set_kernel_var_prior(self, prior_mean, prior_var):
self.stat_kernel.variance.set_prior(GPy.priors.Gamma.from_EV(prior_mean,prior_var))
def fix_kernel_lengthscale(self):
self.stat_kernel.lengthscale.fix()
def fix_kernel_var(self):
self.stat_kernel.variance.fix()
def create_model(self, x, y, noise_var, noise_prior='fixed'):
x = convert_2D_format(x)
y = convert_2D_format(y) - self.center
self.outdim = y.shape[1]
noise_var = np.array(noise_var)
if noise_var.ndim == 0:
self.model = GPy.models.GPRegression(x, y, self.kernel, noise_var=noise_var)
noise = self.model.Gaussian_noise
else:
assert noise_var.shape == y.shape
self.model = GPy.models.GPHeteroscedasticRegression(x, y, self.kernel)
self.model['.*het_Gauss.variance'] = noise_var
noise = self.model.het_Gauss.variance
if noise_prior == 'fixed':
noise.fix()
else:
raise ValueError('Not Implemented yet.')
def predict_f(self, x, full_cov=False):
'''
Returns:
posterior mean, posterior variance
'''
x = convert_2D_format(x)
post_mean, post_var = self.model.predict_noiseless(x, full_cov=full_cov)
if self.outdim > 1:
post_var = np.concatenate([post_var]*self.outdim, axis=-1)
return post_mean + self.center, post_var
def predict_withGradients(self, x):
'''
Borrowed from https://github.com/SheffieldML/GPyOpt/blob/master/GPyOpt/models/gpmodel.py
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
'''
x = convert_2D_format(x)
m, v = self.model.predict(x)
v = np.clip(v, 1e-10, np.inf)
dmdx, dvdx = self.model.predictive_gradients(x)
dmdx = dmdx[:,:,0]
dsdx = dvdx / (2*np.sqrt(v))
return m + self.center, np.sqrt(v), dmdx, dsdx
def posterior_sample_f(self, x, size = 10):
'''
Parameters
x: (Nnew x input_dim)
Returns
(Nnew x output_dim x samples)
'''
return self.model.posterior_samples_f(x, size) + self.center
def optimize(self, num_restarts=30, opt_messages=False, print_result=True, parallel=False):
self.model.optimize_restarts(num_restarts=num_restarts, robust=True, parallel=False, messages=opt_messages)
if print_result:
print(self.kernel)
print(self.stat_kernel.lengthscale)
print(self.stat_kernel.variance)
class GPyWrapper_Classifier(GPyWrapper):
def create_model(self, x, y):
assert self.center == 0.0
x = convert_2D_format(x)
y = convert_2D_format(y)
self.outdim = y.shape[1]
self.model = GPy.models.GPClassification(x, y, self.kernel)
def predict_prob(self, x):
x = convert_2D_format(x)
prob = self.model.predict(x, full_cov=False)[0]
return prob
def optimize(self, maxiter=1000, opt_messages=False, print_result=True):
for i in range(5):
self.model.optimize(max_iters=int(maxiter/5), messages=opt_messages)
if print_result:
print(self.kernel)
print(self.stat_kernel.lengthscale)
class GPyWrapper_MultiSeparate(object):
def create_kernel(self, ndim, outdim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
if isinstance(kernel_name, str):
kernel_name = [kernel_name]*outdim
if np.isscalar(var_f):
var_f = np.ones(outdim) * var_f
if np.isscalar(lengthscale):
var_f = np.ones(outdim) * lengthscale
if isinstance(const_kernel, bool):
const_kernel = [const_kernel]*outdim
self.gp_list = list()
for i in range(outdim):
gp = GPyWrapper()
gp.create_kernel(ndim, kernel_name[i], var_f[i], lengthscale[i], const_kernel[i])
self.gp_list.append(gp)
self.outdim = outdim
def set_kernel_length_prior(self, prior_mean, prior_var):
# Apply same prior for all outputs
for i in range(self.outdim):
self.gp_list[i].set_kernel_length_prior(prior_mean, prior_var)
def set_kernel_var_prior(self, prior_mean, prior_var):
# Apply same prior for all outputs
for i in range(self.outdim):
self.gp_list[i].set_kernel_var_prior(prior_mean, prior_var)
def fix_kernel_lengthscale(self):
for i in range(self.outdim):
self.gp_list[i].fix_kernel_lengthscale()
def fix_kernel_var(self):
for i in range(self.outdim):
self.gp_list[i].fix_kernel_var()
def create_model(self, x, y, noise_var, noise_prior='fixed'):
if not (y.ndim == 2 and y.shape[1] == self.outdim):
raise ValueError('Incorrect data shape.')
noise_var = np.array(noise_var)
for i in range(self.outdim):
if noise_var.ndim == 2 and noise_var.shape[1] == self.outdim:
noise_var_i = noise_var[:, i:i+1]
else:
noise_var_i = noise_var
gp = self.gp_list[i]
gp.create_model(x, y[:,i:i+1], noise_var_i, noise_prior)
def predict_f(self, x, full_cov=False):
post_mean_all = list()
post_var_all = list()
for i in range(self.outdim):
post_mean, post_var = self.gp_list[i].predict_f(x, full_cov)
post_mean_all.append(post_mean)
post_var_all.append(post_var)
return np.concatenate(post_mean_all,axis=-1), np.concatenate(post_var_all,axis=-1)
def posterior_sample_f(self, x, size = 10):
post_samples_all = list()
for i in range(self.outdim):
post_samples = self.gp_list[i].predict_f(x, full_cov)
post_samples_all.append(post_samples)
return np.concatenate(post_samples_all,axis=1)
def optimize(self, num_restarts=30, opt_messages=False, print_result=False):
for i in range(self.outdim):
self.gp_list[i].optimize(num_restarts, opt_messages, print_result)
def predict_withGradients(self, x):
'''
Borrowed from https://github.com/SheffieldML/GPyOpt/blob/master/GPyOpt/models/gpmodel.py
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
m_all: (num_x, outdim)
std_all: (num_x, outdim)
dmdx_all: (num_x, outdim, n_dim)
dsdx_all: (num_x, outdim, n_dim)
'''
m_all, std_all, dmdx_all, dsdx_all = [], [], [], []
for i in range(self.outdim):
m, std, dmdx, dsdx = self.gp_list[i].predict_withGradients(x)
m_all.append(m)
std_all.append(std)
dmdx_all.append(dmdx)
dsdx_all.append(dsdx)
return np.concatenate(m_all,axis=-1), np.concatenate(std_all,axis=-1), np.stack(dmdx_all,axis=1), np.stack(dsdx_all,axis=1)
class GPyWrapper_MultiIndep(GPyWrapper):
def create_kernel(self, ndim, outdim, kernel_name, var_f=1.0, lengthscale=1.0, const_kernel=False):
super().create_kernel(ndim, kernel_name, var_f, lengthscale, const_kernel)
k_multi = GPy.kern.IndependentOutputs([self.kernel, self.kernel.copy()])
#icm = GPy.util.multioutput.ICM(input_dim=ndim, num_outputs=outdim, kernel=self.kernel)
#icm.B.W.constrain_fixed(0) # fix W matrix to 0
if const_kernel:
self.stat_kernel = k_multi.sum.basic
else:
self.stat_kernel = k_multi.basic
self.kernel = k_multi
print(self.kernel)
def create_model(self, x, y, noise_var, noise_prior='fixed'):
x = convert_2D_format(x)
y = convert_2D_format(y) - self.center
numdata = x.shape[0]
outdim = y.shape[1]
indim = x.shape[1]
yy = y.transpose().ravel()
ind = np.concatenate([ o*np.ones(numdata) for o in range(outdim)])
xx = np.concatenate([x]*outdim)
xx = np.concatenate((xx,ind[:,np.newaxis]), axis=1)
print(xx.shape, yy.shape)
self.model = GPy.models.GPRegression(x, y, self.kernel, noise_var=noise_var)
if noise_prior == 'fixed':
self.model.Gaussian_noise.fix()
else:
raise ValueError('Not Implemented yet.')
def create_GP(num_active_gates, outdim, k_name='Matern52', var_f=1.0, lengthscale=1.0, center=0.0):
if np.isscalar(lengthscale):
lengthscale = np.ones(num_active_gates)
gp = GPyWrapper() # initialize GP environment
#gp = GPyWrapper_MultiIndep() # initialize GP environment
gp.center = center
# GP kernels
gp.create_kernel(num_active_gates, k_name, var_f, lengthscale)
#gp.create_kernel(num_active_gates, outdim, k_name, var_f, lengthscale)
return gp
def main():
X = np.arange(1,6).reshape((5,1))
f = lambda x : np.square(x-4.0)
#Y = np.concatenate([f(X), -f(X)], axis=1)
Y = np.concatenate([f(X)], axis=1)
#noise_var = 0.01**2
#noise_var = np.concatenate([np.square(X / 10.)]*2, axis=1)
noise_var = np.square(X / 10.)
print(X.shape, Y.shape)
gp = create_GP(1, 2, 'Matern52', 2.0, 1.0, 0.0)
gp.create_model(X, Y, noise_var, noise_prior='fixed')
gp.optimize()
X_pred = np.linspace(1.,5.,10).reshape((-1,1))
mean, cov = gp.predict_f(X_pred)
print(mean)
#print(cov)
'''
###
# GP Classification test
###
X = np.arange(1,6).reshape((5,1))
Y = np.array([1.0, 1.0, 1.0, 0.0, 0.0]).reshape((5,1))
gpc = GPyWrapper_Classifier()
gpc.create_kernel(1, 'RBF', 1.0, 1.0)
gpc.create_model(X, Y)
X_pred = np.linspace(1.,5.,10).reshape((-1,1))
print(gpc.predict_prob(X_pred))
print(gpc.model)
gpc.optimize()
print(gpc.predict_prob(X_pred))
print(gpc.model)
'''
if __name__ == '__main__':
main()
| 37.771242 | 149 | 0.626752 | 9,886 | 0.855338 | 0 | 0 | 0 | 0 | 0 | 0 | 2,101 | 0.181779 |
bc03d7e1e8567f7bd294e43c4cbf61eccfd77084 | 128 | py | Python | Deploying_Models/deploying_sentiment_classifier/SAGunicorn.py | oke-aditya/Machine_Learning | 3dd40ae2b9cba1890e7060448e75c14194b27775 | [
"MIT"
] | 15 | 2019-11-16T11:09:24.000Z | 2022-01-09T01:58:03.000Z | Deploying_Models/deploying_sentiment_classifier/SAGunicorn.py | oke-aditya/Machine_Learning | 3dd40ae2b9cba1890e7060448e75c14194b27775 | [
"MIT"
] | 1 | 2021-11-10T19:46:00.000Z | 2021-11-10T19:46:00.000Z | Deploying_Models/deploying_sentiment_classifier/SAGunicorn.py | oke-aditya/Machine_Learning | 3dd40ae2b9cba1890e7060448e75c14194b27775 | [
"MIT"
] | null | null | null | from flask_adv_deploy import app
# Note Gunicorn is not supported in Windows machine.
if __name__ == "__main__":
app.run()
| 21.333333 | 52 | 0.742188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.484375 |
bc05ff0b37a8954f4cefbbcdd8fe6e02c5cb64be | 3,938 | py | Python | 12/12.py | Hegemege/advent-of-code-2018 | 298e1dc95486df6888c3fb5b73d92dfc49703f8a | [
"MIT"
] | null | null | null | 12/12.py | Hegemege/advent-of-code-2018 | 298e1dc95486df6888c3fb5b73d92dfc49703f8a | [
"MIT"
] | null | null | null | 12/12.py | Hegemege/advent-of-code-2018 | 298e1dc95486df6888c3fb5b73d92dfc49703f8a | [
"MIT"
] | null | null | null |
class Node:
def __init__(self, value, index, next, previous):
self.value = value
self.next_value = value
self.index = index
self.next = next
self.previous = previous
def main():
input_data = read_input()
initial_row = input_data.pop(0) # Extract the initial state
input_data.pop(0) # Remove the empty row
rules = list(map(lambda x: x.split(" => "), input_data))
initial_row = initial_row[15:]
# Build the initial state
current = None
for i in range(len(initial_row)):
previous = None
if current is not None:
previous = current
current = Node(initial_row[0], i, None, None)
initial_row = initial_row[1:]
if previous is not None:
previous.next = current
current.previous = previous
# When growing - add 3 more to both ends, and in the end remove the non-grown nodes from both ends
# Current node is always some node in the hierarchy
generation_number = 0
#debug(current, True, True)
for i in range(20):
generation_number += 1
current = grow(current, rules)
#debug(current, True, True)
leftmost = get_leftmost(current)
index_sum = 0
while leftmost is not None:
if leftmost.value == '#':
index_sum += leftmost.index
leftmost = leftmost.next
print(index_sum)
def grow(node, rules):
'''Take the current state described by one node'''
# Find the leftmost node and add the 3 nodes
leftmost = get_leftmost(node)
for i in range(3):
new_node = Node('.', leftmost.index - 1, None, None)
leftmost.previous = new_node
new_node.next = leftmost
leftmost = new_node
# Find the rightmost and add 3 nodes
rightmost = get_rightmost(node)
for i in range(3):
new_node = Node('.', rightmost.index + 1, None, None)
rightmost.next = new_node
new_node.previous = rightmost
rightmost = new_node
# Go through the nodes and test all rules
current = leftmost.next.next
while current.next.next is not None:
pp = current.previous.previous
p = current.previous
n = current.next
nn = current.next.next
for rule in rules:
if rule[0][0] == pp.value and rule[0][1] == p.value and rule[0][2] == current.value and rule[0][3] == n.value and rule[0][4] == nn.value:
current.next_value = rule[1]
# Assumes that every combination is in the rules
current = current.next
# Remove the ungrown nodes from both ends
leftmost = get_leftmost(node)
while leftmost.next_value == '.':
leftmost.next.previous = None
leftmost = leftmost.next
rightmost = get_rightmost(leftmost)
while rightmost.next_value == '.':
rightmost.previous.next = None
rightmost = rightmost.previous
# Finally update the state for all nodes
current = get_leftmost(rightmost)
while current is not None:
current.value = current.next_value
current = current.next
return rightmost # Return any valid node - in this case rightmost was updated last
def get_leftmost(node):
leftmost = node
while leftmost.previous is not None:
leftmost = leftmost.previous
return leftmost
def get_rightmost(node):
rightmost = node
while rightmost.next is not None:
rightmost = rightmost.next
return rightmost
def debug(node, p, n):
if p and node.previous is not None:
debug(node.previous, True, False)
print(node.value, end="")
if n and node.next is not None:
debug(node.next, False, True)
def read_input():
'''Read the file and remove trailing new line characters'''
f = open('input.txt', 'r')
data = list(map(lambda x: x[:-1], f.readlines()))
f.close()
return data
if __name__ == '__main__':
main()
| 28.744526 | 149 | 0.624937 | 209 | 0.053073 | 0 | 0 | 0 | 0 | 0 | 0 | 748 | 0.189944 |
bc068191a843e9f2423e9bfc3bb78487a28b39fc | 2,243 | py | Python | vissl/models/heads/__init__.py | blazejdolicki/vissl | 9c10748a19fb1c637f32687142c8cd685f2410ff | [
"MIT"
] | 2,512 | 2021-01-27T18:44:44.000Z | 2022-03-31T19:33:49.000Z | vissl/models/heads/__init__.py | blazejdolicki/vissl | 9c10748a19fb1c637f32687142c8cd685f2410ff | [
"MIT"
] | 361 | 2021-01-27T20:12:09.000Z | 2022-03-31T12:39:34.000Z | vissl/models/heads/__init__.py | blazejdolicki/vissl | 9c10748a19fb1c637f32687142c8cd685f2410ff | [
"MIT"
] | 277 | 2021-01-29T08:09:02.000Z | 2022-03-31T07:57:35.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from typing import Callable
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
MODEL_HEADS_REGISTRY = {}
MODEL_HEADS_NAMES = set()
def register_model_head(name: str):
"""Registers Self-Supervision Model Heads.
This decorator allows VISSL to add custom model heads, even if the
model head itself is not part of VISSL. To use it, apply this decorator
to a model head class, like this:
.. code-block:: python
@register_model_head('my_model_head_name')
def my_model_head():
...
To get a model head from a configuration file, see :func:`get_model_head`."""
def register_model_head_cls(cls: Callable[..., Callable]):
if name in MODEL_HEADS_REGISTRY:
raise ValueError("Cannot register duplicate model head ({})".format(name))
if cls.__name__ in MODEL_HEADS_NAMES:
raise ValueError(
"Cannot register task with duplicate model head name ({})".format(
cls.__name__
)
)
MODEL_HEADS_REGISTRY[name] = cls
MODEL_HEADS_NAMES.add(cls.__name__)
return cls
return register_model_head_cls
def get_model_head(name: str):
"""
Given the model head name, construct the head if it's registered
with VISSL.
"""
assert name in MODEL_HEADS_REGISTRY, "Unknown model head"
return MODEL_HEADS_REGISTRY[name]
# automatically import any Python files in the heads/ directory
import_all_modules(FILE_ROOT, "vissl.models.heads")
from vissl.models.heads.linear_eval_mlp import LinearEvalMLP # isort:skip # noqa
from vissl.models.heads.mlp import MLP # isort:skip # noqa
from vissl.models.heads.siamese_concat_view import ( # isort:skip # noqa
SiameseConcatView,
)
from vissl.models.heads.swav_prototypes_head import ( # isort:skip # noqa
SwAVPrototypesHead,
)
__all__ = [
"get_model_head",
"LinearEvalMLP",
"MLP",
"SiameseConcatView",
"SwAVPrototypesHead",
]
| 28.0375 | 86 | 0.695497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,061 | 0.473027 |
bc06f93db51ec5a28da1f833b4d4dd387dfb4c40 | 751 | py | Python | users/tests.py | tonyguthiga/instagram | 4d49e8c8cf0efb8ac875d2986956ddcce756864b | [
"Unlicense"
] | null | null | null | users/tests.py | tonyguthiga/instagram | 4d49e8c8cf0efb8ac875d2986956ddcce756864b | [
"Unlicense"
] | null | null | null | users/tests.py | tonyguthiga/instagram | 4d49e8c8cf0efb8ac875d2986956ddcce756864b | [
"Unlicense"
] | null | null | null | from django.test import TestCase
# Create your tests here.
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Profile
class ProfileTestClass(TestCase):
'''
test class for Profile model
'''
def setUp(self):
self.user = User.objects.create_user("testuser", "secret")
self.profile_test = Profile(image='https://ucarecdn.com/620ac26e-19f7-4c0a-86d1-2b4e4b195fa8/-/crop/610x452/15,0/-/preview/',
bio="this is a test bio",
owner=self.user)
self.profile_test.save()
def test_instance_true(self):
self.profile_test.save()
self.assertTrue(isinstance(self.profile_test, Profile)) | 34.136364 | 133 | 0.643142 | 584 | 0.77763 | 0 | 0 | 0 | 0 | 0 | 0 | 197 | 0.262317 |
bc0762cc61dc9010fbb5ce8a4cca396976aadaf0 | 800 | py | Python | tests/strangenames.py | DasSkelett/AVC-VersionFileValidator | f31bab0cf5e273cbb675ffaf921741c32b3a2e15 | [
"MIT"
] | 2 | 2019-12-18T16:34:06.000Z | 2020-03-13T03:31:26.000Z | tests/strangenames.py | DasSkelett/AVC-VersionFileValidator | f31bab0cf5e273cbb675ffaf921741c32b3a2e15 | [
"MIT"
] | 4 | 2019-12-22T18:40:31.000Z | 2020-05-07T00:52:48.000Z | tests/strangenames.py | DasSkelett/AVC-VersionFileValidator | f31bab0cf5e273cbb675ffaf921741c32b3a2e15 | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from unittest import TestCase
import validator.validator as validator
from .test_utils import schema, build_map
class TestStrangeNames(TestCase):
old_cwd = os.getcwd()
@classmethod
def setUpClass(cls):
os.chdir('./tests/workspaces/strange-names')
@classmethod
def tearDownClass(cls):
os.chdir(cls.old_cwd)
def test_findsAll(self):
(status, successful, failed, ignored) = validator.validate_cwd('', schema, build_map)
self.assertEqual(status, 1)
self.assertSetEqual(successful, {Path('CAPS.VERSION')})
self.assertSetEqual(failed, {Path('camelCaseVersionMissing.Version')})
# Make sure 'not-detected.version.json' has not been detected.
self.assertSetEqual(ignored, set())
| 29.62963 | 93 | 0.70125 | 649 | 0.81125 | 0 | 0 | 160 | 0.2 | 0 | 0 | 145 | 0.18125 |
bc0820e97035bc22e07e063ecf6170b4f34d25c6 | 640 | py | Python | Chap 1/Class-List-(project2).py | dwhickox/NCHS-Programming-1-Python-Programs | 96eba3826585a81a015740f59329c7a06afc9db7 | [
"MIT"
] | null | null | null | Chap 1/Class-List-(project2).py | dwhickox/NCHS-Programming-1-Python-Programs | 96eba3826585a81a015740f59329c7a06afc9db7 | [
"MIT"
] | null | null | null | Chap 1/Class-List-(project2).py | dwhickox/NCHS-Programming-1-Python-Programs | 96eba3826585a81a015740f59329c7a06afc9db7 | [
"MIT"
] | null | null | null | # David Hickox
# Jan 12 17
# HickoxProject2
# Displayes name and classes
# prints my name and classes in columns and waits for the user to hit enter to end the program
print("David Hickox")
print()
print("1st Band")
print("2nd Programming")
print("3rd Ap Pysics C")
print("4th Lunch")
print("5th Ap Lang")
print("6th TA for R&D")
print("7th Gym")
print("8th AP Calc BC")
print()
input("Press Enter To Continue")
#this works too
#input("David Hickox\n\n1st Band\n2nd Programming\n3rd Ap Pysics C\n4th Lunch\n5th Ap Lang\n6th TA for R&D\n7th Gym\n8th AP Calc BC\n\nPress Enter to continue")
| 27.826087 | 184 | 0.676563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 534 | 0.834375 |
bc09c9f073047ca21417a9bc0a6bb6aae99588bd | 7,080 | py | Python | easy_tokenizer/tokenizer.py | tilaboy/easy-tokenizer | c7b99bafa0a22bf0956129cdfd4c9c1069a5df57 | [
"MIT"
] | 1 | 2019-10-23T00:40:38.000Z | 2019-10-23T00:40:38.000Z | easy_tokenizer/tokenizer.py | tilaboy/easy_tokenizer | c7b99bafa0a22bf0956129cdfd4c9c1069a5df57 | [
"MIT"
] | 188 | 2019-11-16T01:48:33.000Z | 2022-03-28T09:36:19.000Z | easy_tokenizer/tokenizer.py | tilaboy/easy-tokenizer | c7b99bafa0a22bf0956129cdfd4c9c1069a5df57 | [
"MIT"
] | null | null | null | '''Tokenizer Class'''
# -*- encoding: utf-8 -*-
import re
from .token_with_pos import TokenWithPos
from .patterns import Patterns
class Tokenizer():
'''
A basic Tokenizer class to tokenize strings and patterns
Parameters:
- regexp: regexp used to tokenize the string
'''
def __init__(self, regexp=None):
if regexp is not None:
self.regexp = regexp
else:
self.regexp = re.compile(r'[^\s]+|\s+')
self.space_regexp = re.compile(r'\s')
def _tokenize(self, text):
for match in self.regexp.finditer(text):
phrase = match.group()
if self.space_regexp.search(phrase):
continue
if self._phrase_full_match(phrase) is not None:
for adjusted_token in self._adjust_on_punc(
TokenWithPos(phrase, match.start(), match.end())):
yield adjusted_token
else:
for token in self._top_down_tokenize(phrase,
match.start()):
for adjusted_token in self._adjust_on_punc(token):
yield adjusted_token
def _adjust_on_punc(self, token):
if Patterns.PUNCT_SEQ_RE.fullmatch(token.text) and \
Patterns.PARA_SEP_RE.fullmatch(token.text) is None:
# a string of punc, very likely .. or ...
for shift, single_char in enumerate(token.text):
start_pos = token.start + shift
yield TokenWithPos(single_char,
start_pos,
start_pos + 1)
elif self._has_end_of_phrase_punc(token.text) and \
self._phrase_full_match(token.text) in [None, 'url/email']:
end_pos = token.end - 1
for splitted_token in [
TokenWithPos(token.text[:-1],
token.start,
end_pos),
TokenWithPos(token.text[-1],
end_pos,
token.end)
]:
yield splitted_token
else:
yield token
def _top_down_tokenize(self, phrase, offset=0):
# first get the web url and emails out
for token in self._top_down_level_1(phrase, offset):
yield token
def _top_down_level_1(self, phrase, offset=0):
'''
level 1: split on url, emails
'''
for sub_phrase in re.split(Patterns.ALL_WEB_CAPTURED_RE, phrase):
if sub_phrase == '':
continue
length_sub_phrase = len(sub_phrase)
if self._phrase_full_match(sub_phrase) is not None:
yield TokenWithPos(sub_phrase,
offset,
offset + length_sub_phrase)
else:
for token in self._top_down_level_2(sub_phrase, offset):
yield token
offset += length_sub_phrase
def _top_down_level_2(self, phrase, offset=0):
'''
level 2: split on number phrases
'''
for sub_phrase in re.split(Patterns.DIGITS_CAPTURED_RE, phrase):
if sub_phrase == '':
continue
length_sub_phrase = len(sub_phrase)
if self._phrase_full_match(sub_phrase) is not None:
yield TokenWithPos(sub_phrase,
offset,
offset + length_sub_phrase)
else:
for token in self._top_down_level_3(sub_phrase, offset):
yield token
offset += length_sub_phrase
def _top_down_level_3(self, phrase, offset=0):
'''
level 3: split on normal word boundaries
'''
for sub_phrase in re.split(Patterns.WORD_BF_CAPTURED_RE, phrase):
if sub_phrase == '':
continue
length_sub_phrase = len(sub_phrase)
if self._phrase_full_match(sub_phrase) is not None:
yield TokenWithPos(sub_phrase,
offset,
offset + length_sub_phrase)
else:
for token in self._top_down_level_4(sub_phrase, offset):
yield token
offset += length_sub_phrase
def _top_down_level_4(self, phrase, offset):
'''
level 4: here we handle special cases
'''
splitted = False
parts = []
# - split on hyphen #
if Patterns.HYPHEN_RE.search(phrase):
splitted = True
parts = [
part
for part in Patterns.HYPHEN_CAPTURED_RE.split(phrase)
if part != ''
]
if len(parts) == 3:
if parts[0].lower() in Patterns.COMMON_HYPHEN_START:
splitted = False
elif len(parts[0]) < 4 and len(parts[2]) < 4 \
and len(parts[0]) + len(parts[2]) < 6:
# mx-doc, tcp-ip, e-mail, hp-ux etc. #
splitted = False
if splitted:
for part in parts:
new_offset = offset + len(part)
yield TokenWithPos(part, offset, new_offset)
offset = new_offset
else:
# pick up what ever left as a token #
yield TokenWithPos(phrase, offset, offset + len(phrase))
def _has_end_of_phrase_punc(self, phrase):
end_char_is_punc = False
if phrase[-1] in Patterns.PUNCT_END_PHRASE:
end_char_is_punc = True
if Patterns.ABBREV_RE.fullmatch(phrase):
end_char_is_punc = False
return end_char_is_punc
def _phrase_full_match(self, phrase):
matched_type = None
if len(phrase) == 1:
matched_type = 'single_char'
elif phrase.isalpha():
matched_type = 'word'
elif phrase in Patterns.si_units:
matched_type = 'unit'
elif Patterns.DIGITS_RE.fullmatch(phrase):
matched_type = 'digit'
elif Patterns.PARA_SEP_RE.fullmatch(phrase):
matched_type = 'punctuation_seq'
elif Patterns.abbreviation(phrase):
matched_type = 'abbreviation'
elif Patterns.ALL_WEB_RE.fullmatch(phrase):
matched_type = 'url/email'
return matched_type
def tokenize(self, text):
'''
tokenize
params:
- text: string
- pos_info: also output the position information when tokenizing
output: tokens (with position info)
'''
return [token.text for token in self._tokenize(text)]
def tokenize_with_pos_info(self, text):
'''
tokenize
params:
- text: string
output:
- a list of Token object
'''
return list(self._tokenize(text))
| 35.223881 | 76 | 0.524859 | 6,947 | 0.981215 | 5,037 | 0.711441 | 0 | 0 | 0 | 0 | 1,034 | 0.146045 |