hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fc75cbaf0d7ddbeb46c63108a566a2e9b09f4dba | 3,781 | py | Python | src/models/predictor.py | pgniewko/solubility | cc3edbf97f474151aa8e1cf3994b2c13a00ae461 | [
"BSD-3-Clause"
] | 10 | 2019-10-06T02:24:29.000Z | 2022-03-25T15:32:34.000Z | src/models/predictor.py | shekfeh/solubility | cc3edbf97f474151aa8e1cf3994b2c13a00ae461 | [
"BSD-3-Clause"
] | 1 | 2019-12-11T23:39:28.000Z | 2019-12-11T23:39:28.000Z | src/models/predictor.py | shekfeh/solubility | cc3edbf97f474151aa8e1cf3994b2c13a00ae461 | [
"BSD-3-Clause"
] | 4 | 2021-02-24T05:04:55.000Z | 2022-03-25T15:32:35.000Z | import logging
from abc import ABC, abstractmethod
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import KFold
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
class Predictor(ABC):
"""
TODO:
"""
@abstractmethod
@abstractmethod
| 34.372727 | 88 | 0.600106 | import logging
from abc import ABC, abstractmethod
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import KFold
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
class Predictor(ABC):
"""
TODO:
"""
def __init__(self):
self._logS_pred_data = []
self._logS_exp_data = []
self._name = None
pass
@abstractmethod
def fit(self, **kwargs):
pass
@abstractmethod
def predict(self, **kwargs):
pass
def get_name(self):
return self._name
def train(self, train_smiles, logS_list, cv=5, fname=None, y_randomization=False):
if y_randomization:
logging.info('y-Randomization mode is chosen. Shuffling logS0 values.')
import random
random.shuffle(logS_list)
scores = []
kf = KFold(n_splits=cv, shuffle=True, random_state=None)
fold = 0
for train_index, validate_index in kf.split(train_smiles):
logging.info('*{}* model is training {} fold'.format(self._name, fold))
X_train = [train_smiles[idx] for idx in list(train_index)]
y_train = [logS_list[idx] for idx in list(train_index)]
X_validate = [train_smiles[idx] for idx in list(validate_index)]
y_validate = [logS_list[idx] for idx in list(validate_index)]
self.fit(X_train, y_train)
scores.append(self.score(X_validate, y_validate))
fold += 1
if fname is not None:
with open(fname, 'w') as fout:
fout.write(f'{self._name}\t')
means = np.mean(scores, axis=0)
stds = np.std(scores, axis=0)
for i, mean_ in enumerate(means):
fout.write('{}\t{}\t'.format(mean_, stds[i]))
fout.write('\n')
return np.mean(scores, axis=0), np.std(scores, axis=0)
def test(self, train_smiles, logS_list, test_smiles, cv=5):
predictions = []
kf = KFold(n_splits=cv, shuffle=True, random_state=None)
fold = 0
for train_index, validate_index in kf.split(train_smiles):
logging.info('*{}* model is training {} fold'.format(self._name, fold))
X_train = [train_smiles[idx] for idx in list(train_index)]
y_train = [logS_list[idx] for idx in list(train_index)]
self.fit(X_train, y_train)
y_pred = self.predict(test_smiles)
predictions.append(y_pred)
fold += 1
return predictions
def score(self, smiles_list, y_true, save_flag=True):
y_pred = self.predict(smiles_list)
mse = mean_squared_error(y_true, y_pred)
mae = mean_absolute_error(y_true, y_pred)
pearson_r = pearsonr(y_true, y_pred)[0]
if save_flag:
self._logS_pred_data += list(y_pred)
self._logS_exp_data += list(y_true)
# Return Pearson's R^2
return (mse, mae, pearson_r**2.0)
def plot(self, out_file=None):
if out_file is not None:
logging.info(f"Saving predictions and measurements in {out_file}")
with open(out_file, 'w') as fout:
for i, _ in enumerate(self._logS_exp_data):
fout.write(f'{self._logS_exp_data[i]}\t{self._logS_pred_data[i]}\n')
plt.figure(figsize=(7, 7))
plt.plot(self._logS_exp_data, self._logS_pred_data, 'o', alpha=0.05)
plt.plot([-11, 3], [-11, 3], '--', color='grey')
plt.xlabel('logS0 [mol/L] (measured)', fontsize=14)
plt.ylabel('logS0 [mol/L] (predicted)', fontsize=14)
plt.title('Model: {}'.format(self._name))
plt.show()
| 3,202 | 0 | 214 |
401baae6945a8dbb1239753759d3f6342dfaab25 | 5,894 | py | Python | addons/openacademy/tests/test_openacademy.py | radchapoom1009/openacademy | 7516da7697906a405d0d67a9bf8373860d1518d4 | [
"MIT"
] | null | null | null | addons/openacademy/tests/test_openacademy.py | radchapoom1009/openacademy | 7516da7697906a405d0d67a9bf8373860d1518d4 | [
"MIT"
] | null | null | null | addons/openacademy/tests/test_openacademy.py | radchapoom1009/openacademy | 7516da7697906a405d0d67a9bf8373860d1518d4 | [
"MIT"
] | null | null | null | import pytest
from odoo import exceptions
from pytest_tr_odoo.fixtures import env
from pytest_tr_odoo import utils
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
'''
openacademy.openacademy
'''
@pytest.mark.parametrize('test_input,expected', [
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 10}, 'Kyle Bogan'),
({'first_name': 'Nickolas', 'last_name': 'Pacocha', 'value': 2},
'Nickolas Pacocha'),
({'first_name': 'Keon', 'last_name': 'Lemke', 'value': 4}, 'Keon Lemke')
])
@pytest.mark.parametrize('test_input,expected', [
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 10, 'repeat': 0},
'Copy of Bogan'),
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 4, 'repeat': 1},
'Copy of Bogan (1)'),
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 1, 'repeat': 2},
'Copy of Bogan (2)'),
({'first_name': 'Dino', 'last_name': 'Green', 'value': 7, 'repeat': 0},
'Copy of Green'),
({'first_name': 'Aliza', 'last_name': 'Green', 'value': 4, 'repeat': 1},
'Copy of Green (1)'),
({'first_name': 'Hugh', 'last_name': 'Green', 'value': 32, 'repeat': 2},
'Copy of Green (2)')
])
@pytest.mark.parametrize('test_input,expected', [
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 10},
{'first_name': 'Kyle', 'last_name': 'Copy of Bogan', 'value': 10}),
({'first_name': 'Cheyenne', 'last_name': 'Erdman', 'value': 2},
{'first_name': 'Cheyenne', 'last_name': 'Copy of Erdman', 'value': 2})
])
'''
openacademy.session
'''
@pytest.mark.parametrize('test_input,expected', [
({'name': 'bypass', 'start_date': '2020-01-01'},
'2020-01-01'),
({'name': 'Lead', 'start_date': '2020-01-01', 'duration': 1},
'2020-01-01'),
({'name': 'Loan', 'start_date': '2020-01-01', 'duration': 2},
'2020-01-02'),
({'name': 'Direct', 'start_date': '2020-01-01', 'duration': 10},
'2020-01-10'),
])
@pytest.mark.parametrize('test_input,expected', [
({'name': 'Plastic', 'start_date': False, 'end_date': '2020-01-01'},
False),
({'name': 'Bedfordshire', 'start_date': '2020-01-01',
'end_date': '2020-01-01'},
1),
({'name': 'Bedfordshire', 'start_date': '2020-01-01',
'end_date': '2020-01-02'},
2),
({'name': 'Bedfordshire', 'start_date': '2020-01-01',
'end_date': '2020-01-10'},
10),
])
@pytest.mark.parametrize('test_input,expected', [
({'name': 'Plastic', 'seats': -10,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
{'warning': {
'title': 'Incorrect \'seats\' value',
'message': 'The number of available seatsmay not be negative'
}}),
({'name': 'Plastic', 'seats': 0,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
{'warning': {
'title': 'Too many attendees',
'message': 'Increase seats or remove excess attendees'
}}),
({'name': 'Plastic', 'seats': 1,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
False),
({'name': 'Plastic', 'seats': 12,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
False),
])
@pytest.mark.parametrize('test_input,expected', [
({'name': 'Plastic', 'seats': 12,
'attendee_ids': []}, 'A session\'s instructor can\'t be an attendee')
])
| 33.68 | 78 | 0.60112 | import pytest
from odoo import exceptions
from pytest_tr_odoo.fixtures import env
from pytest_tr_odoo import utils
@pytest.fixture
def openacademy_model(env):
return env['openacademy.openacademy']
@pytest.fixture
def session_model(env):
return env['openacademy.session']
@pytest.fixture
def session(session_model):
return session_model.create({
'name': 'bypass'
})
@pytest.fixture
def partner(env):
return env['res.partner'].create({
'name': 'Joey Cronin III'
})
'''
openacademy.openacademy
'''
@pytest.mark.parametrize('test_input,expected', [
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 10}, 'Kyle Bogan'),
({'first_name': 'Nickolas', 'last_name': 'Pacocha', 'value': 2},
'Nickolas Pacocha'),
({'first_name': 'Keon', 'last_name': 'Lemke', 'value': 4}, 'Keon Lemke')
])
def test_compute_full_name(openacademy_model, test_input, expected):
openacademy = openacademy_model.create(test_input)
assert openacademy.full_name == expected
@pytest.mark.parametrize('test_input,expected', [
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 10, 'repeat': 0},
'Copy of Bogan'),
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 4, 'repeat': 1},
'Copy of Bogan (1)'),
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 1, 'repeat': 2},
'Copy of Bogan (2)'),
({'first_name': 'Dino', 'last_name': 'Green', 'value': 7, 'repeat': 0},
'Copy of Green'),
({'first_name': 'Aliza', 'last_name': 'Green', 'value': 4, 'repeat': 1},
'Copy of Green (1)'),
({'first_name': 'Hugh', 'last_name': 'Green', 'value': 32, 'repeat': 2},
'Copy of Green (2)')
])
def test_copy_last_name(openacademy_model, test_input, expected):
repeat = test_input['repeat']
del test_input['repeat']
openacademy = openacademy_model.create(test_input)
for i in range(repeat):
openacademy.copy()
copy_last_name = openacademy._copy_last_name()
assert copy_last_name == expected
@pytest.mark.parametrize('test_input,expected', [
({'first_name': 'Kyle', 'last_name': 'Bogan', 'value': 10},
{'first_name': 'Kyle', 'last_name': 'Copy of Bogan', 'value': 10}),
({'first_name': 'Cheyenne', 'last_name': 'Erdman', 'value': 2},
{'first_name': 'Cheyenne', 'last_name': 'Copy of Erdman', 'value': 2})
])
def test_copy(monkeypatch, openacademy_model, test_input, expected):
monkeypatch.setattr(type(openacademy_model), '_copy_last_name',
lambda x: 'Copy of %s' % test_input['last_name'])
openacademy = openacademy_model.create(test_input)
copy_openacademy = openacademy.copy()
def validate_value(key):
assert getattr(copy_openacademy, key, False) == expected[key]
map(validate_value, expected.keys())
'''
openacademy.session
'''
@pytest.mark.parametrize('test_input,expected', [
({'name': 'bypass', 'start_date': '2020-01-01'},
'2020-01-01'),
({'name': 'Lead', 'start_date': '2020-01-01', 'duration': 1},
'2020-01-01'),
({'name': 'Loan', 'start_date': '2020-01-01', 'duration': 2},
'2020-01-02'),
({'name': 'Direct', 'start_date': '2020-01-01', 'duration': 10},
'2020-01-10'),
])
def test_compute_end_date(session_model, test_input, expected):
session = session_model.create(test_input)
assert session.end_date.strftime('%Y-%m-%d') == expected
@pytest.mark.parametrize('test_input,expected', [
({'name': 'Plastic', 'start_date': False, 'end_date': '2020-01-01'},
False),
({'name': 'Bedfordshire', 'start_date': '2020-01-01',
'end_date': '2020-01-01'},
1),
({'name': 'Bedfordshire', 'start_date': '2020-01-01',
'end_date': '2020-01-02'},
2),
({'name': 'Bedfordshire', 'start_date': '2020-01-01',
'end_date': '2020-01-10'},
10),
])
def test_inverse_end_date(session_model, test_input, expected):
session = session_model.create(test_input)
assert session.duration == expected
@pytest.mark.parametrize('test_input,expected', [
({'name': 'Plastic', 'seats': -10,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
{'warning': {
'title': 'Incorrect \'seats\' value',
'message': 'The number of available seatsmay not be negative'
}}),
({'name': 'Plastic', 'seats': 0,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
{'warning': {
'title': 'Too many attendees',
'message': 'Increase seats or remove excess attendees'
}}),
({'name': 'Plastic', 'seats': 1,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
False),
({'name': 'Plastic', 'seats': 12,
'attendee_ids': [(0, 0,
{'name': 'Grant Ritchie',
'email': 'liana46@gmail.com'})]},
False),
])
def test_verify_valid_seats(session_model, test_input, expected):
session = session_model.create(test_input)
verify = session._verify_valid_seats()
if expected:
assert verify == expected
else:
assert not verify
@pytest.mark.parametrize('test_input,expected', [
({'name': 'Plastic', 'seats': 12,
'attendee_ids': []}, 'A session\'s instructor can\'t be an attendee')
])
def test_check_instructor_not_in_attendees(session_model,
partner,
test_input, expected):
test_input['instructor_id'] = partner.id
test_input['attendee_ids'].append((4, partner.id))
with pytest.raises(exceptions.ValidationError) as excinfo:
session_model.create(test_input)
assert excinfo.value.name == expected
| 2,017 | 0 | 242 |
6ad9628195a450b435a1a3d6b4dd609311782467 | 1,132 | py | Python | applications/migrations/0005_tshirtorder.py | hoohacks/HackX | 9d05268b4b075211048a8c727037e7807d476376 | [
"MIT"
] | 5 | 2019-09-05T05:18:39.000Z | 2021-12-29T08:47:27.000Z | applications/migrations/0005_tshirtorder.py | hoohacks/HackX | 9d05268b4b075211048a8c727037e7807d476376 | [
"MIT"
] | 18 | 2019-10-30T05:04:27.000Z | 2022-02-10T11:18:01.000Z | applications/migrations/0005_tshirtorder.py | hoohacks/HackX | 9d05268b4b075211048a8c727037e7807d476376 | [
"MIT"
] | 4 | 2019-11-09T02:52:53.000Z | 2020-05-04T08:04:32.000Z | # Generated by Django 2.2.8 on 2020-04-13 03:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 37.733333 | 156 | 0.607774 | # Generated by Django 2.2.8 on 2020-04-13 03:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0014_ticket_slack_ts'),
('applications', '0004_application_mlh_consent'),
]
operations = [
migrations.CreateModel(
name='TshirtOrder',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('tshirt', models.CharField(default='', max_length=10)),
('street_address', models.CharField(default='', max_length=200)),
('state', models.CharField(default='', max_length=100)),
('country', models.CharField(default='', max_length=100)),
('zip_code', models.CharField(default='', max_length=10)),
('devpost_url', models.URLField()),
('devpost_email', models.EmailField(default='', max_length=100)),
],
),
]
| 0 | 952 | 23 |
c88568a63008aa9674f9b8dc544cb2e6da6606af | 122 | py | Python | old/fundamentus/__init__.py | mv/fundamentus-api | 3d12be9613b8310afb48771a22d5b494e1817e28 | [
"MIT"
] | 12 | 2021-01-20T12:02:05.000Z | 2022-03-24T14:22:48.000Z | old/fundamentus/__init__.py | mv/fundamentus | 43d345dc3c461101e6899fb0b6e28e7615286a9f | [
"MIT"
] | 62 | 2021-01-16T21:44:44.000Z | 2022-03-18T08:10:24.000Z | old/fundamentus/__init__.py | mv/fundamentus | 43d345dc3c461101e6899fb0b6e28e7615286a9f | [
"MIT"
] | 6 | 2021-04-29T19:45:31.000Z | 2022-03-16T14:27:52.000Z |
__all__ = ['get_fundamentus','print_csv']
from fundamentus import get_fundamentus
from fundamentus import print_csv
| 15.25 | 41 | 0.795082 |
__all__ = ['get_fundamentus','print_csv']
from fundamentus import get_fundamentus
from fundamentus import print_csv
| 0 | 0 | 0 |
9168cc8b8ae166eb1d9652010df1d2ff5b5e78ad | 4,308 | py | Python | cibyl/sources/zuul/api.py | amolkahat/cibyl | 586c3c0a6b21a8f1b71db0c5b29e7d60f9cf0def | [
"Apache-2.0"
] | null | null | null | cibyl/sources/zuul/api.py | amolkahat/cibyl | 586c3c0a6b21a8f1b71db0c5b29e7d60f9cf0def | [
"Apache-2.0"
] | null | null | null | cibyl/sources/zuul/api.py | amolkahat/cibyl | 586c3c0a6b21a8f1b71db0c5b29e7d60f9cf0def | [
"Apache-2.0"
] | null | null | null | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from abc import ABC, abstractmethod
class ZuulAPIError(Exception):
"""Represents an error occurring while performing a call to Zuul's API
"""
class ZuulJobAPI(ABC):
"""Interface which defines the information that can be retrieved from
Zuul regarding a particular job.
"""
def __init__(self, tenant, job):
"""Constructor.
:param tenant: Tenant this job belongs to.
:type tenant: :class:`ZuulTenantAPI`
:param job: Description of the job being consulted by this
API. At least a field called 'name' providing the name
of the job is required here.
:type job: dict
"""
self._tenant = tenant
self._job = job
@property
def tenant(self):
"""
:return: The tenant this job belongs to.
:rtype: :class:`ZuulTenantAPI`
"""
return self._tenant
@property
def name(self):
"""
:return: Name of the job being consulted.
:rtype: str
"""
return self._job['name']
@abstractmethod
def builds(self):
"""
:return: The builds of this job.
:rtype: list[dict]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
class ZuulTenantAPI(ABC):
"""Interface which defines the information that can be retrieved from
Zuul regarding a particular tenant.
"""
def __init__(self, tenant):
"""Constructor.
:param tenant: Description of the tenant being consulted by this
API. At least a field called 'name' providing the name
of the tenant is required here.
:type tenant: dict
"""
self._tenant = tenant
@property
def name(self):
"""
:return: Name of the tenant being consulted.
:rtype: str
"""
return self._tenant['name']
@abstractmethod
def builds(self):
"""A build is an instance of a job running independently.
:return: Information about all executed builds under this tenant.
:rtype: list[dict]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
@abstractmethod
def buildsets(self):
"""A buildset is a collection of builds running under a common context.
:return: Information about all executed buildsets under this tenant.
:rtype: list[dict]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
@abstractmethod
def jobs(self):
"""A job describes the steps that need to be taken in order to test
a project.
:return: Information about all jobs under this tenant.
:rtype: list[:class:`ZuulJobAPI`]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
class ZuulAPI(ABC):
"""Interface describing the actions that can be taken over Zuul's API.
"""
@abstractmethod
def info(self):
"""Information which define the target host. Among this info there
are entries such as 'capabilities' or 'authentication' param.
:return: General information about the host.
:rtype: dict
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
@abstractmethod
def tenants(self):
"""Gets all tenants currently present on the host.
:return: A sub-api to retrieve information about all tenants on the
host.
:rtype: list[:class:`ZuulTenantAPI`]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
| 29.108108 | 79 | 0.627205 | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from abc import ABC, abstractmethod
class ZuulAPIError(Exception):
"""Represents an error occurring while performing a call to Zuul's API
"""
class ZuulJobAPI(ABC):
"""Interface which defines the information that can be retrieved from
Zuul regarding a particular job.
"""
def __init__(self, tenant, job):
"""Constructor.
:param tenant: Tenant this job belongs to.
:type tenant: :class:`ZuulTenantAPI`
:param job: Description of the job being consulted by this
API. At least a field called 'name' providing the name
of the job is required here.
:type job: dict
"""
self._tenant = tenant
self._job = job
@property
def tenant(self):
"""
:return: The tenant this job belongs to.
:rtype: :class:`ZuulTenantAPI`
"""
return self._tenant
@property
def name(self):
"""
:return: Name of the job being consulted.
:rtype: str
"""
return self._job['name']
@abstractmethod
def builds(self):
"""
:return: The builds of this job.
:rtype: list[dict]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
class ZuulTenantAPI(ABC):
"""Interface which defines the information that can be retrieved from
Zuul regarding a particular tenant.
"""
def __init__(self, tenant):
"""Constructor.
:param tenant: Description of the tenant being consulted by this
API. At least a field called 'name' providing the name
of the tenant is required here.
:type tenant: dict
"""
self._tenant = tenant
@property
def name(self):
"""
:return: Name of the tenant being consulted.
:rtype: str
"""
return self._tenant['name']
@abstractmethod
def builds(self):
"""A build is an instance of a job running independently.
:return: Information about all executed builds under this tenant.
:rtype: list[dict]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
@abstractmethod
def buildsets(self):
"""A buildset is a collection of builds running under a common context.
:return: Information about all executed buildsets under this tenant.
:rtype: list[dict]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
@abstractmethod
def jobs(self):
"""A job describes the steps that need to be taken in order to test
a project.
:return: Information about all jobs under this tenant.
:rtype: list[:class:`ZuulJobAPI`]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
class ZuulAPI(ABC):
"""Interface describing the actions that can be taken over Zuul's API.
"""
@abstractmethod
def info(self):
"""Information which define the target host. Among this info there
are entries such as 'capabilities' or 'authentication' param.
:return: General information about the host.
:rtype: dict
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
@abstractmethod
def tenants(self):
"""Gets all tenants currently present on the host.
:return: A sub-api to retrieve information about all tenants on the
host.
:rtype: list[:class:`ZuulTenantAPI`]
:raises ZuulAPIError: If the request failed.
"""
raise NotImplementedError
| 0 | 0 | 0 |
7c2fea003b45be32d55da3d4eb17d104b5ac3b18 | 3,264 | py | Python | main.py | alexander-g/Root-Detector | cc7af00d204a294ed967bbaab55c03e6a9a15bcc | [
"MIT"
] | 1 | 2022-02-17T16:18:00.000Z | 2022-02-17T16:18:00.000Z | main.py | ExPlEcoGreifswald/Root-Detector | cc7af00d204a294ed967bbaab55c03e6a9a15bcc | [
"MIT"
] | null | null | null | main.py | ExPlEcoGreifswald/Root-Detector | cc7af00d204a294ed967bbaab55c03e6a9a15bcc | [
"MIT"
] | null | null | null | import webbrowser, os, tempfile, io, sys, time
import glob, shutil
import warnings
warnings.simplefilter('ignore')
import flask
from flask import Flask, escape, request
import processing
#need to import all the packages here in the main file because of dill-ed ipython model
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
arange = np.arange
import skimage.io as skio
import skimage.morphology as skmorph
import skimage.util as skimgutil
import PIL
PIL.Image.MAX_IMAGE_PIXELS = None #Needed to open large images
app = Flask('DigIT! Root Detector', static_folder=os.path.abspath('./HTML'))
is_debug = sys.argv[0].endswith('.py')
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" or not is_debug:
TEMPPREFIX = 'root_detector_'
TEMPFOLDER = tempfile.TemporaryDirectory(prefix=TEMPPREFIX)
print('Temporary Directory: %s'%TEMPFOLDER.name)
#delete all previous temporary folders if not cleaned up properly
for tmpdir in glob.glob( os.path.join(os.path.dirname(TEMPFOLDER.name), TEMPPREFIX+'*') ):
if tmpdir != TEMPFOLDER.name:
print('Removing ',tmpdir)
shutil.rmtree(tmpdir)
@app.route('/')
@app.route('/static/<path:path>')
@app.route('/file_upload', methods=['POST'])
@app.route('/images/<imgname>')
@app.route('/process_image/<imgname>')
@app.route('/processing_progress/<imgname>')
@app.route('/delete_image/<imgname>')
@app.route('/settings', methods=['GET', 'POST'])
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" or not is_debug:
with app.app_context():
processing.init()
if not is_debug:
print('Flask started')
webbrowser.open('http://localhost:5000', new=2)
app.run(host='127.0.0.1',port=5000, debug=is_debug)
| 30.792453 | 97 | 0.672794 | import webbrowser, os, tempfile, io, sys, time
import glob, shutil
import warnings
warnings.simplefilter('ignore')
import flask
from flask import Flask, escape, request
import processing
#need to import all the packages here in the main file because of dill-ed ipython model
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
arange = np.arange
import skimage.io as skio
import skimage.morphology as skmorph
import skimage.util as skimgutil
import PIL
PIL.Image.MAX_IMAGE_PIXELS = None #Needed to open large images
app = Flask('DigIT! Root Detector', static_folder=os.path.abspath('./HTML'))
is_debug = sys.argv[0].endswith('.py')
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" or not is_debug:
TEMPPREFIX = 'root_detector_'
TEMPFOLDER = tempfile.TemporaryDirectory(prefix=TEMPPREFIX)
print('Temporary Directory: %s'%TEMPFOLDER.name)
#delete all previous temporary folders if not cleaned up properly
for tmpdir in glob.glob( os.path.join(os.path.dirname(TEMPFOLDER.name), TEMPPREFIX+'*') ):
if tmpdir != TEMPFOLDER.name:
print('Removing ',tmpdir)
shutil.rmtree(tmpdir)
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/static/<path:path>')
def staticfiles(path):
return app.send_static_file(path)
@app.route('/file_upload', methods=['POST'])
def file_upload():
files = request.files.getlist("files")
for f in files:
filename = request.form.get('filename', f.filename)
print('Upload: %s'%filename)
fullpath = os.path.join(TEMPFOLDER.name, os.path.basename(filename) )
f.save(fullpath)
#save the file additionally as jpg to make sure format is compatible with browser (tiff)
processing.write_as_jpeg(fullpath+'.jpg', processing.load_image(fullpath) )
return 'OK'
@app.route('/images/<imgname>')
def images(imgname):
return flask.send_from_directory(TEMPFOLDER.name, imgname)
@app.route('/process_image/<imgname>')
def process_image(imgname):
fullpath = os.path.join(TEMPFOLDER.name, imgname)
stats = processing.process_image( fullpath )
return flask.jsonify({'statistics':stats})
@app.route('/processing_progress/<imgname>')
def processing_progress(imgname):
return str(processing.processing_progress(imgname))
@app.route('/delete_image/<imgname>')
def delete_image(imgname):
fullpath = os.path.join(TEMPFOLDER.name, imgname)
print('DELETE: %s'%fullpath)
if os.path.exists(fullpath):
os.remove(fullpath)
return 'OK'
@app.route('/settings', methods=['GET', 'POST'])
def settings():
if request.method=='POST':
processing.set_settings(request.get_json(force=True))
return 'OK'
elif request.method=='GET':
return flask.jsonify(processing.get_settings())
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" or not is_debug:
with app.app_context():
processing.init()
if not is_debug:
print('Flask started')
webbrowser.open('http://localhost:5000', new=2)
app.run(host='127.0.0.1',port=5000, debug=is_debug)
| 1,213 | 0 | 184 |
c3c4e160f8c3532ab54b6f59cad341eff94860f9 | 33,385 | py | Python | vilya/models/project.py | ochukai/code | 4865a7e5ee6b42f26fd29c350a2e50d7636a6462 | [
"BSD-3-Clause"
] | null | null | null | vilya/models/project.py | ochukai/code | 4865a7e5ee6b42f26fd29c350a2e50d7636a6462 | [
"BSD-3-Clause"
] | null | null | null | vilya/models/project.py | ochukai/code | 4865a7e5ee6b42f26fd29c350a2e50d7636a6462 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import shutil
import logging
import re
from datetime import datetime
from vilya.config import DOMAIN, DEVELOP_MODE
from vilya.libs import gyt
from vilya.libs.permdir import get_repo_root
from vilya.libs.text import format_md_or_rst
from vilya.libs.store import store, mc, cache, ONE_DAY, IntegrityError
from vilya.libs.props import PropsMixin
from vilya.libs.validators import check_project_name
from vilya.libs.signals import (
repo_create_signal, repo_watch_signal, repo_fork_signal)
from vilya.models.hook import CodeDoubanHook
from vilya.models.git import GitRepo
from vilya.models.ngit.repo import ProjectRepo
from vilya.models.user import User
from vilya.models.inbox import Inbox
from vilya.models.consts import (
PROJECT_BC_KEY, MIRROR_HTTP_PROXY, NOTIFY_ON, PERM_PUSH, PERM_ADMIN)
from vilya.models.project_conf import make_project_conf
from vilya.models.utils import linear_normalized
from vilya.models.project_issue import ProjectIssue
from vilya.models.tag import TagMixin, TAG_TYPE_PROJECT_ISSUE
from vilya.models.release import get_unreleased_commit_num
from vilya.models.lru_counter import (
ProjectOwnLRUCounter, ProjectWatchLRUCounter)
from vilya.models.milestone import Milestone
from vilya.models.utils.switch import WhiteListSwitch
from ellen.utils import JagareError
from vilya.models.nproject import ProjectWatcher
MCKEY_PROJECT = 'code:project:%s:v2'
MCKEY_PROJECT_ID_BY_NAME = 'code:project_id:name:%s'
MCKEY_PROJECT_IDS_BY_OWNER_SORTBY_SUMUP = 'code:project_ids:sumup:%s'
PROPS_LANGUAGE_KEY = 'language'
PROPS_LANGUAGES_KEY = 'languages'
| 34.170931 | 79 | 0.587479 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import shutil
import logging
import re
from datetime import datetime
from vilya.config import DOMAIN, DEVELOP_MODE
from vilya.libs import gyt
from vilya.libs.permdir import get_repo_root
from vilya.libs.text import format_md_or_rst
from vilya.libs.store import store, mc, cache, ONE_DAY, IntegrityError
from vilya.libs.props import PropsMixin
from vilya.libs.validators import check_project_name
from vilya.libs.signals import (
repo_create_signal, repo_watch_signal, repo_fork_signal)
from vilya.models.hook import CodeDoubanHook
from vilya.models.git import GitRepo
from vilya.models.ngit.repo import ProjectRepo
from vilya.models.user import User
from vilya.models.inbox import Inbox
from vilya.models.consts import (
PROJECT_BC_KEY, MIRROR_HTTP_PROXY, NOTIFY_ON, PERM_PUSH, PERM_ADMIN)
from vilya.models.project_conf import make_project_conf
from vilya.models.utils import linear_normalized
from vilya.models.project_issue import ProjectIssue
from vilya.models.tag import TagMixin, TAG_TYPE_PROJECT_ISSUE
from vilya.models.release import get_unreleased_commit_num
from vilya.models.lru_counter import (
ProjectOwnLRUCounter, ProjectWatchLRUCounter)
from vilya.models.milestone import Milestone
from vilya.models.utils.switch import WhiteListSwitch
from ellen.utils import JagareError
from vilya.models.nproject import ProjectWatcher
MCKEY_PROJECT = 'code:project:%s:v2'
MCKEY_PROJECT_ID_BY_NAME = 'code:project_id:name:%s'
MCKEY_PROJECT_IDS_BY_OWNER_SORTBY_SUMUP = 'code:project_ids:sumup:%s'
PROPS_LANGUAGE_KEY = 'language'
PROPS_LANGUAGES_KEY = 'languages'
class CodeDoubanProject(PropsMixin, TagMixin):
def __init__(self, id, name, owner_id, summary, time, product,
git_path, trac_conf, fork_from=None, origin_project_id=None,
intern_banned=None, can_push=None, mirror_url=None,
mirror_proxy=None):
self.id = id
self.name = name
self.owner_id = owner_id
self.summary = summary
self.time = time
self.product = product
self.git_path = git_path
self.trac_conf = trac_conf
self.fork_from = fork_from
self.can_push = can_push
self.origin_project_id = origin_project_id or self.id
self._conf = None
self.intern_banned = intern_banned
self.index_name = name.replace('/', '_')
self.mirror_url = mirror_url
self.mirror_proxy = mirror_proxy
self._repo = None
def __repr__(self):
return '<CodeDoubanProject %s>' % self.name
def __eq__(self, other):
return isinstance(other, CodeDoubanProject) and self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.id)
def __str__(self):
return self.name
@property
def dashboard_enabled(self):
dashboard_enabled_switch = WhiteListSwitch(
'DASHBOARD_ENABLED_PROJECTS')
enabled_projects = dashboard_enabled_switch.get()
return self.name in enabled_projects
@property
def deploy_link_enabled(self):
deploy_link_enabled_switch = WhiteListSwitch(
'DEPLOY_LINK_ENABLED_PROJECTS')
enabled_projects = deploy_link_enabled_switch.get()
return self.name in enabled_projects
@property
def url(self):
return '/%s/' % self.name
@property
def repo_url(self):
return "%s/%s" % (DOMAIN, self.name)
@property
def full_url(self):
hostname = re.sub(r'http[s]?://', '', DOMAIN)
return '%s/%s' % (hostname, self.name)
@property
def repository(self):
return '%s/%s.git' % (DOMAIN, self.name)
@property
def ssh_repository(self):
return 'git@code.dapps.douban.com:%s.git' % self.name
@property
def short_summary(self):
length = len(self.summary)
return self.summary if length <= 20 else self.summary + "..."
@property
def description(self):
return self.summary
def as_dict(self):
return self.get_info(without_commits=True)
def get_info(self, without_commits=False):
#authors = self.git.get_gitstats_data().authors
info = {
'url': self.repo_url,
'name': self.name,
'description': self.summary,
'product': self.product,
'watched_count': self.get_watched_count(self.id),
'committers_count': len(self.committers),
'forked_count': self.get_forked_count(self.id),
'open_issues_count': self.n_open_issues,
'open_tickets_count': self.n_open_tickets,
'owner': {
'name': self.owner_name,
'avatar': User(self.owner_id).avatar_url,
},
}
forked_from = self.get_forked_from()
if forked_from:
info.update({"forked_from": forked_from.get_info(without_commits)})
if not without_commits:
commit = self.repo.get_commit('HEAD')
if commit:
info['last_commit'] = commit.as_dict()
return info
def is_admin(self, username):
return self.has_push_perm(username)
@property
def hooks(self):
rs = store.execute(
'select hook_id, url, project_id from codedouban_hooks '
'where project_id=%s', self.id)
hooks = [CodeDoubanHook(hook_id, url, project_id)
for hook_id, url, project_id in rs]
return hooks
@property
def owner_name(self):
return self.owner_id
@classmethod
@cache(MCKEY_PROJECT % '{id}')
def get(cls, id):
rs = store.execute(
"select project_id, project_name, owner_id, summary, "
"time, product, git_path, trac_conf, fork_from, "
"origin_project, intern_banned, can_push from codedouban_projects "
"where project_id = %s", id)
return rs and cls(*rs[0]) or None
@classmethod
@cache(MCKEY_PROJECT_ID_BY_NAME % '{name}')
def get_id_by_name(cls, name):
rs = store.execute("select project_id from codedouban_projects "
"where project_name = _latin1%s", name)
return rs[0][0] if rs else None
@classmethod
def get_by_name(cls, name):
id = cls.get_id_by_name(name)
if not id:
return None
return cls.get(id)
@classmethod
def gets_by_owner_id(cls, owner_id):
rs = store.execute("select project_id"
" from codedouban_projects"
" where owner_id = %s", owner_id)
ids = [r for r, in rs]
return [cls.get(id) for id in ids]
@staticmethod
def count_by_owner_id(owner_id):
owner_id = owner_id.strip()
if not owner_id:
return 0
rs = store.execute("select count(1) from codedouban_projects "
"where owner_id = %s",
owner_id)
if rs and rs[0]:
return rs[0][0]
else:
return 0
@classmethod
def search_by_name(cls, name, limit=None):
if check_project_name(name.strip()):
return []
rs = store.execute("select project_id from codedouban_projects "
"where lower(project_name) like %s",
"%%%s%%" % name.lower())
project_ids = [pid for pid, in rs]
return [cls.get(project_id) for project_id in project_ids][:limit]
@classmethod
def search_for_owners(cls, name, limit=None):
rs = store.execute(
"select distinct(owner_id) from codedouban_projects "
"where owner_id like %s", '%' + name + '%')
return rs and [owner for owner, in rs][:limit]
@classmethod
def gets(cls, ids):
projs = (cls.get(id) for id in ids)
return [p for p in projs if p]
@classmethod
def get_ids(cls, owner=None, created=None):
if owner and created:
rs = store.execute(
"select project_id from codedouban_projects "
"where owner_id=%s and time>=%s", (owner, created))
elif owner:
rs = store.execute(
"select project_id from codedouban_projects "
"where owner_id=%s", owner)
elif created:
rs = store.execute(
"select project_id from codedouban_projects "
"where time>=%s", created)
else:
rs = store.execute("select project_id from codedouban_projects")
return [pid for pid, in rs]
PROJECTS_SORT_BYS = ['lru', 'sumup']
@classmethod
def get_projects(cls, owner=None, sortby=None):
if sortby and sortby not in CodeDoubanProject.PROJECTS_SORT_BYS:
return []
if sortby == 'lru':
ids = cls.get_ids(owner=owner)
ProjectOwnLRUCounter(owner, ids).sort()
elif sortby == 'sumup':
ids = cls.get_project_ids_sortby_sumup(owner)
else:
ids = cls.get_ids(owner=owner)
results = cls.gets(ids)
return results
@classmethod
@cache(MCKEY_PROJECT_IDS_BY_OWNER_SORTBY_SUMUP % '{owner}', expire=ONE_DAY)
def get_project_ids_sortby_sumup(cls, owner=None):
projects = cls.gets(cls.get_ids(owner=owner))
FORKED_WEIGHT = 2
UPDATED_WEIGHT = 2
ORIGIN_WEIGHT = 4
forked_keys = linear_normalized(
[CodeDoubanProject.get_forked_count(repo.id)
for repo in projects])
updated_keys = linear_normalized([_.repo.get_last_update_timestamp()
for _ in projects])
origin_keys = [0.9 if _.id == _.origin_project_id else 0.1
for _ in projects]
sort_keys = {}
for i, repo in enumerate(projects):
sort_keys[repo.id] = (forked_keys[i] * FORKED_WEIGHT +
updated_keys[i] * UPDATED_WEIGHT +
origin_keys[i] * ORIGIN_WEIGHT) / \
(FORKED_WEIGHT + UPDATED_WEIGHT + ORIGIN_WEIGHT)
projects.sort(key=lambda repo: sort_keys[repo.id], reverse=True)
return [repo.id for repo in projects]
@classmethod
def _flush_project_ids_by_owner(cls, owner=None):
mc.delete(MCKEY_PROJECT_IDS_BY_OWNER_SORTBY_SUMUP % owner)
@classmethod
def add_watch(cls, proj_id, user_id):
try:
ProjectWatcher.create(project_id=proj_id,
user_id=user_id)
except IntegrityError:
return None
repo_watch_signal.send(
cls.get(proj_id), project_id=proj_id, author=user_id)
@classmethod
def get_watched_count(cls, proj_id):
return ProjectWatcher.count(project_id=proj_id)
@classmethod
def del_watch(cls, proj_id, user_id):
w = ProjectWatcher.get(user_id=user_id, project_id=proj_id)
if w:
w.delete()
@classmethod
def has_watched(cls, proj_id, user):
w = ProjectWatcher.get(user_id=user.name, project_id=proj_id)
return True if w else False
@classmethod
def get_watched_projects_by_user(cls, user, sortby=None):
rs = ProjectWatcher.gets(user_id=user)
ids = [r.project_id for r in rs]
results = cls.gets(ids)
if sortby:
results.sort(key=lambda proj:
proj.repo.get_last_update_timestamp(), reverse=True)
return results
@classmethod
def get_watched_others_ids_by_user(cls, user):
# FIXME: "in"
rs = store.execute('select project_id from codedouban_watches '
'where user_id=%s and project_id not in '
'(select project_id from codedouban_projects '
'where owner_id=%s)', (user, user))
ids = [proj_id for proj_id, in rs]
return ids
@classmethod
def get_watched_others_projects_by_user(cls, user, sortby=None):
ids = cls.get_watched_others_ids_by_user(user)
if sortby == 'lru':
ProjectWatchLRUCounter(user, ids).sort()
results = cls.gets(ids)
return results
def get_watch_users(self):
return [User(user_id)
for user_id in CodeDoubanProject.get_watch_user_ids(self.id)]
def get_watch_users_by_channel(self, channel='notify'):
if channel == "notify":
return self.get_watch_users()
elif channel == "email":
return filter(lambda u: u.settings.watching_email == NOTIFY_ON,
self.get_watch_users())
elif channel == "irc":
return filter(lambda u: u.settings.watching_irc == NOTIFY_ON,
self.get_watch_users())
@classmethod
def get_watch_user_ids(cls, project_id):
rs = ProjectWatcher.gets(project_id=project_id)
return [r.user_id for r in rs]
@classmethod
def add_committer(cls, proj_id, user_id):
try:
store.execute("insert into codedouban_committers "
"(project_id, user_id) values (%s, %s)",
(proj_id, user_id))
except IntegrityError:
return None
store.commit()
@classmethod
def del_committer(cls, proj_id, user_id):
store.execute("delete from codedouban_committers "
"where user_id=%s and project_id=%s", (user_id, proj_id))
store.commit()
@classmethod
def get_committers_by_project(cls, proj_id):
rs = store.execute("select user_id from codedouban_committers "
"where project_id=%s", proj_id)
return [User(user_id) for user_id, in rs]
def has_push_perm(self, user_id):
perm = self.get_user_perm(user_id)
if perm is None:
perm = self.get_group_perm(user_id)
if perm and perm >= PERM_PUSH:
return True
return False
def get_user_perm(self, user_id):
if user_id == self.owner_id:
return PERM_ADMIN
committers = self.committers
if User(user_id) in committers:
return PERM_PUSH
def get_group_perm(self, user_id):
from models.team_group import ProjectGroup
pgs = ProjectGroup.gets(project_id=self.id)
perm = None
for pg in pgs:
g = pg.group
if not g:
continue
if g.is_member(user_id):
perm = perm if perm and perm > g.permission else g.permission
return perm
@property
def committers(self):
return CodeDoubanProject.get_committers_by_project(self.id)
def fork(self, fork_name, fork_owner):
return CodeDoubanProject.add(fork_name, fork_owner, self.summary,
self.product, fork_from=self.id)
def new_fork(self, owner):
name = "%s/%s" % (owner, self.realname)
project = CodeDoubanProject.add(name,
owner,
self.summary,
self.product,
fork_from=self.id)
if project:
# FIXME: why do this?
project.update(self.summary, self.product, project.name,
self.intern_banned)
return project
@classmethod
def get_forked_count(cls, proj_id):
# TODO: update tests!!
rs = store.execute('select count(1) from codedouban_projects where '
'origin_project=%s and project_id!=%s',
(proj_id, proj_id))
orig_fork_count = rs[0][0]
rs = store.execute('select count(1) from codedouban_projects where '
'fork_from=%s and project_id!=%s',
(proj_id, proj_id))
fork_count = rs[0][0]
return orig_fork_count or fork_count
def get_forked_users(self):
rs = store.execute('select owner_id from codedouban_projects '
'where fork_from=%s', self.id)
return [User(user_id) for user_id, in rs]
def get_forked_project(self, owner_id):
rs = store.execute('select project_id from codedouban_projects '
'where fork_from=%s and owner_id=%s',
(self.id, owner_id))
if rs:
id = rs[0][0]
return CodeDoubanProject.get(id)
def get_forked_projects(self):
rs = store.execute('select project_id from codedouban_projects '
'where fork_from=%s', self.id)
ids = [str(id) for (id,) in rs]
return CodeDoubanProject.gets(ids)
def get_forked_from(self):
if self.fork_from:
return CodeDoubanProject.get(self.fork_from)
return None
def get_fork_network(self):
rs = store.execute("select project_id from codedouban_projects "
"where origin_project=%s", self.origin_project_id)
ids = [pid for pid, in rs]
return CodeDoubanProject.gets(ids)
def validate(self):
validators = [check_project_name(self.name, 'Project Name')]
errors = [error for error in validators if error]
return errors
# why by name
@classmethod
def update(cls, summary, product, name, intern_banned=None):
project_id = cls.get_id_by_name(name)
if not project_id:
return False
store.execute(
"update codedouban_projects set summary=%s, product=%s, "
"intern_banned=%s where project_name=_latin1%s",
(summary, product, intern_banned, name))
store.commit()
cls.clear_mc(project_id)
def update_summary(self, summary):
store.execute("update codedouban_projects "
"set summary=%s "
"where project_id=%s",
(summary, self.id))
store.commit()
self.clear_mc(self.id)
def update_product(self, product):
store.execute("update codedouban_projects "
"set product=%s "
"where project_id=%s",
(product, self.id))
store.commit()
self.clear_mc(self.id)
def update_intern_banned(self, banned):
store.execute("update codedouban_projects "
"set intern_banned=%s "
"where project_id=%s",
(banned, self.id))
store.commit()
self.clear_mc(self.id)
def update_can_push(self, can_push):
push = 1 if can_push else 0
store.execute("update codedouban_projects "
"set can_push=%s "
"where project_id=%s",
(push, self.id))
store.commit()
self.clear_mc(self.id)
@classmethod
def add(cls, name, owner_id, summary='', product=None, fork_from=None,
create_trac=False, intern_banned=None, mirror=None):
# TODO: add project blacklist
assert ' ' not in name, "Cannot create a project with spacey name"
git_path = "%s.git" % name
try:
proj_id = store.execute(
"insert into codedouban_projects "
"(project_name, owner_id, summary, time, product, "
"git_path, trac_conf, fork_from, intern_banned) values "
"(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(name, owner_id, summary, datetime.now(),
product, git_path, name, fork_from, intern_banned))
except IntegrityError:
return None
if fork_from is not None:
origins = store.execute("select origin_project from "
"codedouban_projects "
"where project_id=%s", fork_from)
origin_id, = origins[0]
else:
origin_id = proj_id
store.execute("update codedouban_projects set origin_project=%s "
"where project_id=%s", (origin_id, proj_id))
store.commit()
cls.clear_mc(proj_id)
project = cls.get(proj_id)
# TODO: split create_repo, fork_repo, mirror_repo
# create git repo
if fork_from:
orig_proj = cls.get(fork_from)
orig_proj.fork_repo(project)
elif mirror:
from queues_handler import mirror_projects_add
mirror_projects_add(mirror, git_path, proj_id)
else:
project.create_repo()
if not mirror:
project.update_hooks()
cls._flush_project_ids_by_owner(owner_id)
if fork_from:
repo_fork_signal.send(
project, project_id=proj_id, author=owner_id)
else:
repo_create_signal.send(
project, project_id=proj_id, creator=owner_id)
return project
# FIXME: clean property about git
# all git things
@property
def git(self):
return GitRepo(self.git_real_path, project=self)
@property
def repo(self):
if not self._repo:
self._repo = ProjectRepo(self)
return self._repo
@property
def default_branch(self):
return self.repo.default_branch
@property
def default_sha(self):
return self.repo.sha(self.default_branch)
def update_hooks(self):
hook_dir = os.path.join(self.repo_path,
'hooks')
link = False if DEVELOP_MODE else True
gyt.update_hook(hook_dir, link)
# TODO: remove this
@classmethod
def create_git_repo(cls, git_path):
git_path = os.path.join(get_repo_root(), git_path)
#check_call(['git', 'init', '--bare', git_path])
gyt.repo(git_path, init=True)
# TODO: remove this
def clone_git(self, to_path):
git_path = os.path.join(self.repo_root_path, self.git_path)
to_path = os.path.join(self.repo_root_path, to_path)
rep = gyt.repo(git_path)
rep.clone(to_path)
# TODO: 统一git路径
@property
def git_dir(self):
git_path = os.path.join(self.repo_root_path, self.git_path)
return git_path
# TODO: 统一git路径
# FIXME: remove this, please use project.repo_path
@property
def git_real_path(self):
return os.path.join(self.repo_root_path, '%s.git' % self.name)
# end git things
@property
def conf(self):
if not self._conf:
self._conf = make_project_conf(self.name, self.repo)
return self._conf
def get_onimaru_url(self, id, project=None):
if self.conf is None:
return None
domain = 'http://onimaru.intra.douban.com'
if project is None:
# example:
# onimaru: /subject/movie
project = self._conf.get('onimaru', '')
if not project:
return None
project = project.strip()
if not project.startswith('/'):
project = '/' + project
return '{0}{1}/group/{2}/'.format(domain, project, id)
@property
def redirect_url(self):
return "%s/%s/" % (DOMAIN, "hub")
@property
def realname(self):
return self.name.rpartition("/")[-1]
def put_in_wsgi_environ(self, environ):
environ['hub.project'] = self
@classmethod
def get_from_wsgi_environ(cls, environ):
return environ.get('hub.project')
@property
def owner(self):
return User(self.owner_id)
def is_owner(self, user):
return user and user.name == self.owner.name
@property
def forked(self):
if self.fork_from:
return CodeDoubanProject.get(self.fork_from)
def delete(self):
from models.nteam import TeamProjectRelationship
shutil.rmtree(self.git_real_path, ignore_errors=True)
for hook in self.hooks:
hook.destroy()
ProjectWatcher.deletes(project_id=self.id)
store.execute("delete from codedouban_projects "
"where project_id=%s", (self.id,))
store.commit()
self.clear_mc(self.id)
CodeDoubanProject._flush_project_ids_by_owner(self.owner_id)
rs = TeamProjectRelationship.gets(project_id=self.id)
for r in rs:
r.delete()
def get_actions(self):
user = PROJECT_BC_KEY % (self.owner_id, self.name)
return Inbox.get(user=user).get_actions()
@classmethod
def exists(cls, name):
return bool(cls.get_id_by_name(name))
def transfer_to(self, user):
sql = ("update codedouban_projects "
"set owner_id=%s where owner_id=%s and project_id=%s")
store.execute(sql, (user, self.owner_id, self.id))
store.commit()
self.clear_mc(self.id)
def transfer_to_top(self, user=None):
new_user = user if user else self.owner_id
sql = ("update codedouban_projects set project_name=%s, "
"git_path=%s, trac_conf=%s, owner_id=%s "
"where owner_id=%s and project_id=%s")
store.execute(sql, (self.realname, self.realname, self.realname,
new_user, self.owner_id, self.id))
store.commit()
self.clear_mc(self.id)
def rename(self, repo_name):
sql = ("update codedouban_projects set project_name=%s, "
"git_path=%s where owner_id=%s and project_id=%s")
if check_project_name(repo_name):
return False
if '/' in self.name:
project_name = "%s/%s" % (self.owner_id, repo_name)
else:
project_name = repo_name
git_path = "%s.git" % project_name
store.execute(sql, (project_name, git_path, self.owner_id, self.id))
if self._move(git_path):
self.git_path = git_path
self.name = project_name
store.commit()
else:
return False
def _move(self, git_path):
base_path = self.repo_root_path
new_path = os.path.join(base_path, git_path)
if os.path.exists(git_path):
return False
else:
shutil.move(self.git_dir, new_path)
return True
@property
def n_open_issues(self):
return ProjectIssue.get_count_by_project_id(self.id, "open")
@property
def n_open_tickets(self):
from models.ticket import Ticket
return Ticket.get_count_by_proj(self.id)
@property
def n_closed_issues(self):
return ProjectIssue.get_count_by_project_id(self.id, "closed")
def get_uuid(self):
return "/project/%s" % self.id
@property
def language(self):
return self.get_props_item(PROPS_LANGUAGE_KEY, '')
@language.setter
def language(self, value):
self.set_props_item(PROPS_LANGUAGE_KEY, value)
@property
def languages(self):
return self.get_props_item(PROPS_LANGUAGES_KEY, {})
@languages.setter
def languages(self, value):
self.set_props_item(PROPS_LANGUAGES_KEY, value)
def doc_tabs(self):
try:
doc_tabs = []
if self.conf['docs']:
sorted_docs = []
for k, v in self.conf['docs'].items():
if not isinstance(v, dict):
sorted_docs.append((k, k, k, k))
else:
sorted_docs.append((v.get('sort', k), k,
v.get('name', k),
v.get('dir', k)))
sorted_docs.sort()
sorted_docs = [(_[1], _[2], _[3]) for _ in sorted_docs]
for doc, name, s_path in sorted_docs:
doc_tabs.append((doc, '/docs/%s' % doc, name, s_path))
except Exception, err:
logging.warning("Error in config: %r" % err)
return [('', '', '#ERROR_IN_CONFIG!')]
return doc_tabs
@property
def tag_type(self):
return TAG_TYPE_PROJECT_ISSUE
# FIXME: remove this, please use project.mirror
@property
def is_mirror_project(self):
from vilya.models.mirror import CodeDoubanMirror
return CodeDoubanMirror.get_by_project_id(self.id) is not None
@property
def unreleased_commit_num(self):
return get_unreleased_commit_num(self)
@property
def mirror(self):
from vilya.models.mirror import CodeDoubanMirror
mirror = CodeDoubanMirror.get_by_project_id(self.id)
return mirror if mirror else None
def fetch(self):
mirror = self.mirror
env = {}
if mirror and mirror.with_proxy:
env['HTTP_PROXY'] = MIRROR_HTTP_PROXY
env['HTTPS_PROXY'] = MIRROR_HTTP_PROXY
return self.repo.fetch_(q=True, env=env)
@property
def repo_path(self):
return os.path.join(get_repo_root(), '%s.git' % self.name)
def fork_repo(self, project):
self.repo.clone(project.repo_path, bare=True)
# TODO: use project instead of url
def mirror_repo(self, url, bare=None, proxy=None):
env = None
if proxy:
env = {
'HTTP_PROXY': MIRROR_HTTP_PROXY,
'HTTPS_PROXY': MIRROR_HTTP_PROXY
}
ProjectRepo.mirror(url, self.repo_path, env=env)
def create_repo(self):
ProjectRepo.init(self.repo_path)
@property
def readme(self):
# TODO: remove tree loop
ref = self.default_branch
repo = self.repo
try:
tree = repo.get_tree(ref)
except JagareError as e:
logging.warning("JagareError: %r" % e)
return ''
for item in tree:
if (item['type'] == 'blob'
and (item['name'] == 'README'
or item['name'].startswith('README.'))):
readme_content = repo.get_file_by_ref("%s:%s" % (
ref, item['path']))
return format_md_or_rst(item['path'], readme_content)
return ''
@property
def issue_milestones(self):
rs = Milestone.gets_by_project(self)
return rs
@property
def repo_root_path(self):
return get_repo_root()
@property
def open_parent_pulls(self):
from vilya.models.ticket import Ticket
from vilya.models.pull import PullRequest
pulls = []
parent = self.get_forked_from()
if parent:
pulls = [PullRequest.get_by_proj_and_ticket(parent.id,
t.ticket_id)
for t in Ticket.gets_by_proj(parent.id,
limit=9999)]
pulls = [p for p in pulls
if p and p.from_proj and p.from_proj.id == self.id]
return pulls
@property
def open_family_pulls(self):
return self.open_parent_pulls + self.open_pulls
@property
def open_network_pulls(self):
from vilya.models.ticket import Ticket
from vilya.models.pull import PullRequest
pulls = []
projects = self.get_fork_network()
for project in projects:
ps = [PullRequest.get_by_proj_and_ticket(project.id,
t.ticket_id)
for t in Ticket.gets_by_proj(project.id,
limit=9999)]
pulls.extend([p for p in ps
if p and p.from_proj and p.from_proj.id == self.id])
return pulls + self.open_pulls
@property
def open_pulls(self):
from vilya.models.ticket import Ticket
from vilya.models.pull import PullRequest
pulls = [PullRequest.get_by_proj_and_ticket(self.id,
t.ticket_id)
for t in Ticket.gets_by_proj(self.id,
limit=9999)]
return pulls
def get_pulls_by_commit_shas(self, sha):
prs = self.open_pulls
return [pr for pr in prs
if sha in pr.get_commits_shas()]
@property
def groups(self):
from vilya.models.team_group import ProjectGroup
rs = ProjectGroup.gets(project_id=self.id)
return [r.group for r in rs]
@property
def remote_name(self):
remote_name = 'hub/' + self.name
remote_name = remote_name.replace('~', '_')
return remote_name
@classmethod
def clear_mc(cls, id_):
project = cls.get(id_)
if project:
mc.delete(MCKEY_PROJECT_ID_BY_NAME % project.name)
mc.delete(MCKEY_PROJECT % id_)
@classmethod
def clear_pull(cls, name, pull_id):
project = cls.get_by_name(name)
if not project:
return None
from vilya.models.pull import PullRequest
pull = PullRequest.gets_by(to_project=project.id, ticket_id=pull_id,
force_flush=True)
if pull:
return True
return None
| 26,776 | 4,950 | 23 |
fc26a6143130ea089dd5fa566469d93b221e0392 | 1,410 | py | Python | release/scripts/addons_contrib/data_overrides/__init__.py | noorbeast/BlenderSource | 65ebecc5108388965678b04b43463b85f6c69c1d | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3 | 2019-09-16T10:29:19.000Z | 2022-02-11T14:43:18.000Z | engine/2.80/scripts/addons_contrib/data_overrides/__init__.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | engine/2.80/scripts/addons_contrib/data_overrides/__init__.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | ### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Data Overrides",
"author": "Lukas Toenne",
"version": (0, 1),
"blender": (2, 73, 0),
"location": "Scene Properties",
"description": "Override settings and caching for linked objects",
"warning": "",
"wiki_url": "",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Object",
}
import bpy
from data_overrides import override, ui
if __name__ == "__main__":
register()
| 30 | 79 | 0.685106 | ### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Data Overrides",
"author": "Lukas Toenne",
"version": (0, 1),
"blender": (2, 73, 0),
"location": "Scene Properties",
"description": "Override settings and caching for linked objects",
"warning": "",
"wiki_url": "",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Object",
}
import bpy
from data_overrides import override, ui
def register():
override.register()
ui.register()
def unregister():
override.unregister()
ui.unregister()
if __name__ == "__main__":
register()
| 78 | 0 | 46 |
c2a74c3d75b83ae19c04de00181ae230c6ab67e9 | 7,725 | py | Python | lab2/zad4.py | vedrankolka/DU | 531cf53b030bd9ef750bfcaea697535042249fe3 | [
"Apache-2.0"
] | null | null | null | lab2/zad4.py | vedrankolka/DU | 531cf53b030bd9ef750bfcaea697535042249fe3 | [
"Apache-2.0"
] | null | null | null | lab2/zad4.py | vedrankolka/DU | 531cf53b030bd9ef750bfcaea697535042249fe3 | [
"Apache-2.0"
] | null | null | null | import torch
from torch.utils import data
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
from pathlib import Path
import argparse
import convolutional_model as cm
import skimage as ski
import math
DATA_DIR = default_data_dir = Path(__file__).parent / 'data' / 'cifar-10-batches-py'
img_height = 32
img_width = 32
num_channels = 3
num_classes = 10
train_x = np.ndarray((0, img_height * img_width * num_channels), dtype=np.float32)
train_y = []
for i in range(1, 6):
subset = unpickle(os.path.join(DATA_DIR, 'data_batch_%d' % i))
train_x = np.vstack((train_x, subset['data']))
train_y += subset['labels']
train_x = train_x.reshape((-1, num_channels, img_height, img_width)).transpose(0, 2, 3, 1)
train_y = np.array(train_y, dtype=np.long)
subset = unpickle(os.path.join(DATA_DIR, 'test_batch'))
test_x = subset['data'].reshape((-1, num_channels, img_height, img_width)).transpose(0, 2, 3, 1).astype(np.float32)
test_y = np.array(subset['labels'], dtype=np.long)
valid_size = 5000
train_x, train_y = shuffle_data(train_x, train_y)
valid_x = train_x[:valid_size, ...]
valid_y = train_y[:valid_size, ...]
train_x = train_x[valid_size:, ...]
train_y = train_y[valid_size:, ...]
data_mean = train_x.mean((0, 1, 2))
data_std = train_x.std((0, 1, 2))
train_x = (train_x - data_mean) / data_std
valid_x = (valid_x - data_mean) / data_std
test_x = (test_x - data_mean) / data_std
train_x = torch.from_numpy(train_x.transpose(0, 3, 1, 2))
valid_x = torch.from_numpy(valid_x.transpose(0, 3, 1, 2))
test_x = torch.from_numpy(test_x.transpose(0, 3, 1, 2))
train_y = torch.from_numpy(train_y)
valid_y = torch.from_numpy(valid_y)
test_y = torch.from_numpy(test_y)
# =================== ARGS stuff =================
args = cm.parse_arguments()
config = vars(args)
# =================== MODEL stuff =================
model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2, padding_mode='replicate'),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=3, stride=2),
torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2, padding_mode='replicate'),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=3, stride=2),
torch.nn.Flatten(start_dim=1, end_dim=-1),
torch.nn.Linear(in_features=1568, out_features=256, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(in_features=256, out_features=128, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(in_features=128, out_features=10, bias=True)
)
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.gamma)
loss = torch.nn.CrossEntropyLoss()
results = train(train_x, train_y, valid_x, valid_y, model, loss, optimizer, scheduler, config)
lrs, train_losses, avg_train_accuracies, valid_losses, avg_valid_accuracies = results
epochs = np.arange(0, len(lrs))
fig, (ax1, ax2, ax3) = plt.subplots(3)
ax1.plot(epochs, lrs, label='learning rate')
ax1.set_title('Learning rate')
ax1.legend()
ax2.plot(epochs, train_losses, label='train')
ax2.plot(epochs, valid_losses, label='validation')
ax2.set_title('Cross-entropy loss')
ax2.legend()
ax3.plot(epochs, avg_train_accuracies, label='train')
ax3.plot(epochs, avg_valid_accuracies, label='validation')
ax3.set_title('Average class accuracy')
ax3.legend()
plt.show()
| 34.486607 | 115 | 0.659029 | import torch
from torch.utils import data
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
from pathlib import Path
import argparse
import convolutional_model as cm
import skimage as ski
import math
def shuffle_data(data_x, data_y):
indices = np.arange(data_x.shape[0])
np.random.shuffle(indices)
shuffled_data_x = np.ascontiguousarray(data_x[indices])
shuffled_data_y = np.ascontiguousarray(data_y[indices])
return shuffled_data_x, shuffled_data_y
def unpickle(file):
fo = open(file, 'rb')
dict = pickle.load(fo, encoding='latin1')
fo.close()
return dict
def evaluate(Y, Y_):
pr = []
n = max(Y_)+1
M = np.bincount(n * Y_ + Y, minlength=n*n).reshape(n, n)
for i in range(n):
tp_i = M[i, i]
fn_i = np.sum(M[i, :]) - tp_i
fp_i = np.sum(M[:, i]) - tp_i
tn_i = np.sum(M) - fp_i - fn_i - tp_i
recall_i = tp_i / (tp_i + fn_i)
precision_i = tp_i / (tp_i + fp_i)
pr.append( (recall_i, precision_i) )
accuracy = np.trace(M) / np.sum(M)
return accuracy, pr, M
def draw_conv_filters(epoch, step, weights, save_dir):
w = weights.copy()
num_filters = w.shape[0]
num_channels = w.shape[1]
k = w.shape[2]
assert w.shape[3] == w.shape[2]
w = w.transpose(2, 3, 1, 0)
w -= w.min()
w /= w.max()
border = 1
cols = 8
rows = math.ceil(num_filters / cols)
width = cols * k + (cols-1) * border
height = rows * k + (rows-1) * border
img = np.zeros([height, width, num_channels])
for i in range(num_filters):
r = int(i / cols) * (k + border)
c = int(i % cols) * (k + border)
img[r:r+k, c:c+k, :] = w[:, :, :, i]
img = (img * 255).astype(np.uint8)
filename = 'epoch_%02d_step_%06d.png' % (epoch, step)
ski.io.imsave(os.path.join(save_dir, filename), img)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def train(train_x, train_y, valid_x, valid_y, model: torch.nn.Module,
loss, optimizer, scheduler=None, config=dict()):
batch_size = config.get('batch_size', 64)
max_epochs = config.get('max_epochs', 5)
verbose = config.get('verbose', False)
print_frequency = config.get('print_frequency', 100)
train_dataset = cm.MyDataset(train_x, train_y)
train_dataloader = data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# stvari koje treba pratit
lrs = []
train_losses = []
valid_losses = []
avg_train_accuracies = []
avg_valid_accuracies = []
for epoch in range(max_epochs):
print(f"Epoch {epoch}")
for batch, batch_data in enumerate(train_dataloader):
train_x_batch, train_y_batch = batch_data
logits = model.forward(train_x_batch)
batch_loss = loss(logits, train_y_batch)
batch_loss.backward()
optimizer.step()
optimizer.zero_grad()
if batch % print_frequency == 0:
print(f"epoch: {epoch} batch: {batch} loss: {batch_loss}")
weights = model[0].weight.detach().numpy()
draw_conv_filters(epoch, batch_size*batch, weights, config['save_dir'])
if scheduler is not None:
scheduler.step()
with torch.no_grad():
learning_rate = get_lr(optimizer)
lrs.append(learning_rate)
train_loss = 0.0
batch_count = 0
train_y_pred = []
for batch_x, batch_y in train_dataloader:
batch_count += 1
batch_logits = model.forward(batch_x)
batch_loss = loss(batch_logits, batch_y)
train_loss += batch_loss
train_y_pred.append(torch.argmax(batch_logits, axis=1))
train_loss /= batch_count
train_y_pred = torch.hstack(train_y_pred)
train_accuracy, pr, train_conf_matrix = evaluate(train_y, train_y_pred)
train_losses.append(train_loss)
avg_train_accuracies.append(train_accuracy)
valid_logits = model.forward(valid_x)
valid_loss = loss(valid_logits, valid_y)
valid_y_pred = torch.argmax(valid_logits, axis=1)
valid_accuracy, pr, valid_conf_matrix = evaluate(valid_y, valid_y_pred)
valid_losses.append(valid_loss)
avg_valid_accuracies.append(valid_accuracy)
return lrs, train_losses, avg_train_accuracies, valid_losses, avg_valid_accuracies
DATA_DIR = default_data_dir = Path(__file__).parent / 'data' / 'cifar-10-batches-py'
img_height = 32
img_width = 32
num_channels = 3
num_classes = 10
train_x = np.ndarray((0, img_height * img_width * num_channels), dtype=np.float32)
train_y = []
for i in range(1, 6):
subset = unpickle(os.path.join(DATA_DIR, 'data_batch_%d' % i))
train_x = np.vstack((train_x, subset['data']))
train_y += subset['labels']
train_x = train_x.reshape((-1, num_channels, img_height, img_width)).transpose(0, 2, 3, 1)
train_y = np.array(train_y, dtype=np.long)
subset = unpickle(os.path.join(DATA_DIR, 'test_batch'))
test_x = subset['data'].reshape((-1, num_channels, img_height, img_width)).transpose(0, 2, 3, 1).astype(np.float32)
test_y = np.array(subset['labels'], dtype=np.long)
valid_size = 5000
train_x, train_y = shuffle_data(train_x, train_y)
valid_x = train_x[:valid_size, ...]
valid_y = train_y[:valid_size, ...]
train_x = train_x[valid_size:, ...]
train_y = train_y[valid_size:, ...]
data_mean = train_x.mean((0, 1, 2))
data_std = train_x.std((0, 1, 2))
train_x = (train_x - data_mean) / data_std
valid_x = (valid_x - data_mean) / data_std
test_x = (test_x - data_mean) / data_std
train_x = torch.from_numpy(train_x.transpose(0, 3, 1, 2))
valid_x = torch.from_numpy(valid_x.transpose(0, 3, 1, 2))
test_x = torch.from_numpy(test_x.transpose(0, 3, 1, 2))
train_y = torch.from_numpy(train_y)
valid_y = torch.from_numpy(valid_y)
test_y = torch.from_numpy(test_y)
# =================== ARGS stuff =================
args = cm.parse_arguments()
config = vars(args)
# =================== MODEL stuff =================
model = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2, padding_mode='replicate'),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=3, stride=2),
torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2, padding_mode='replicate'),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=3, stride=2),
torch.nn.Flatten(start_dim=1, end_dim=-1),
torch.nn.Linear(in_features=1568, out_features=256, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(in_features=256, out_features=128, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(in_features=128, out_features=10, bias=True)
)
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.gamma)
loss = torch.nn.CrossEntropyLoss()
results = train(train_x, train_y, valid_x, valid_y, model, loss, optimizer, scheduler, config)
lrs, train_losses, avg_train_accuracies, valid_losses, avg_valid_accuracies = results
epochs = np.arange(0, len(lrs))
fig, (ax1, ax2, ax3) = plt.subplots(3)
ax1.plot(epochs, lrs, label='learning rate')
ax1.set_title('Learning rate')
ax1.legend()
ax2.plot(epochs, train_losses, label='train')
ax2.plot(epochs, valid_losses, label='validation')
ax2.set_title('Cross-entropy loss')
ax2.legend()
ax3.plot(epochs, avg_train_accuracies, label='train')
ax3.plot(epochs, avg_valid_accuracies, label='validation')
ax3.set_title('Average class accuracy')
ax3.legend()
plt.show()
| 4,154 | 0 | 138 |
d004abc9f0360b5abe7791995600ba3e14457e3f | 670 | py | Python | schoolport/app_core/migrations/0007_tb_param_fees.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | schoolport/app_core/migrations/0007_tb_param_fees.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | schoolport/app_core/migrations/0007_tb_param_fees.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-05-07 11:20
from django.db import migrations, models
| 29.130435 | 114 | 0.589552 | # Generated by Django 3.1.7 on 2021-05-07 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_core', '0006_tb_param_items_price_unit'),
]
operations = [
migrations.CreateModel(
name='TB_Param_Fees',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fee_name', models.CharField(max_length=255)),
('price', models.FloatField(null=True)),
('price_currency', models.CharField(max_length=20, null=True)),
],
),
]
| 0 | 556 | 23 |
75bb9561ab5c5fa328176c32ed78531d0f721459 | 535 | py | Python | stackEx1.py | JaeGyu/PythonEx_1 | e67053db6ca7431c3dd66351c190c53229e3f141 | [
"MIT"
] | null | null | null | stackEx1.py | JaeGyu/PythonEx_1 | e67053db6ca7431c3dd66351c190c53229e3f141 | [
"MIT"
] | null | null | null | stackEx1.py | JaeGyu/PythonEx_1 | e67053db6ca7431c3dd66351c190c53229e3f141 | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
s = Stack()
s.push(1)
s.push(2)
print(s.top())
s.push(5)
print(s.size())
print(s.toString())
s.pop()
print(s.toString())
print(s.size())
| 16.71875 | 31 | 0.527103 |
class Stack():
def __init__(self):
self.store = []
def push(self, num):
self.store.append(num)
def pop(self):
return self.store.pop()
def size(self):
return len(self.store)
def toString(self):
return self.store
def top(self):
return self.store[-1]
if __name__ == "__main__":
s = Stack()
s.push(1)
s.push(2)
print(s.top())
s.push(5)
print(s.size())
print(s.toString())
s.pop()
print(s.toString())
print(s.size())
| 149 | -7 | 188 |
5c8fb708a31f92aa30ed34d57058f849faa1abff | 488 | py | Python | S4/S4 Decompiler/Old Libraries/xdis/opcodes/opcode_23.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Decompiler/Old Libraries/xdis/opcodes/opcode_23.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Decompiler/Old Libraries/xdis/opcodes/opcode_23.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | # (C) Copyright 2017, 2019 by Rocky Bernstein
"""
CPython 2.3 bytecode opcodes
This is a like Python 2.3's opcode.py with some classification
of stack usage.
"""
import xdis.opcodes.opcode_2x as opcode_2x
from xdis.opcodes.base import (
finalize_opcodes,
format_extended_arg,
init_opdata,
update_pj2,
)
version = 2.3
l = locals()
init_opdata(l, opcode_2x, version)
update_pj2(globals(), l)
opcode_arg_fmt = {"EXTENDED_ARG": format_extended_arg}
finalize_opcodes(l)
| 18.074074 | 62 | 0.741803 | # (C) Copyright 2017, 2019 by Rocky Bernstein
"""
CPython 2.3 bytecode opcodes
This is a like Python 2.3's opcode.py with some classification
of stack usage.
"""
import xdis.opcodes.opcode_2x as opcode_2x
from xdis.opcodes.base import (
finalize_opcodes,
format_extended_arg,
init_opdata,
update_pj2,
)
version = 2.3
l = locals()
init_opdata(l, opcode_2x, version)
update_pj2(globals(), l)
opcode_arg_fmt = {"EXTENDED_ARG": format_extended_arg}
finalize_opcodes(l)
| 0 | 0 | 0 |
7c5ff8d87f9e2145f480d8635186467fbfdbd46e | 1,614 | py | Python | main/dal.py | ifdog/ego | cee685c75e55fdc1e76ae9cb9fec86fa93c959fe | [
"BSD-2-Clause"
] | null | null | null | main/dal.py | ifdog/ego | cee685c75e55fdc1e76ae9cb9fec86fa93c959fe | [
"BSD-2-Clause"
] | null | null | null | main/dal.py | ifdog/ego | cee685c75e55fdc1e76ae9cb9fec86fa93c959fe | [
"BSD-2-Clause"
] | null | null | null | from datetime import datetime, timezone
from main import models,statics
| 25.21875 | 80 | 0.693928 | from datetime import datetime, timezone
from main import models,statics
def note_get(key: str):
records = models.Note.objects.filter(key__exact=key)
if records.count() > 0:
return records[0]
else:
return None
def note_add(key: str,markdown :str,draft:str):
_note = models.Note(key=key, markdown=markdown,markdown_time=datetime.now())
if draft:
_note.draft = draft
_note.draft_time = datetime.now()
_note.save()
return _note
def tag_matches(name: str, limit: int = 10):
records = models.Tag.objects.filter(name__icontains=name)
return records[:limit]
def tag_get_or_create(name: str):
query = models.Tag.objects.filter(name__exact=name)
if query.count() > 0:
return query[0]
else:
_tag = models.Tag(name=name)
_tag.save()
return _tag
def note_query_all(part_of_key: str):
_notes = models.Note.objects.filter(key__icontains=part_of_key)
return _notes
def note_query_public(part_of_key:str):
_show_tag = models.Tag.objects.filter(name=statics.SHOW_PUBLIC_TAG)
_notes = models.Note.objects.filter(key__icontains=part_of_key)
_notes = _notes.filter(tags__in=_show_tag).distinct()
return _notes
def contains_tag_attr(tags, name: str):
records = tags.filter(name=name)
return records.count() > 0
def update_markdown(note:models.Note, content:str):
note.markdown = content
note.markdown_time = datetime.now()
note.save()
def update_draft(note:models.Note, content:str):
note.draft = content
note.draft_time = datetime.now()
note.save()
| 1,326 | 0 | 207 |
022100c5e98ea3d365d83de2b43fbb825efddb21 | 309 | py | Python | write_a_function.py | athena15/hackerrank | 4ddb6f0c80369c125c5c2016fad6f790c37027e5 | [
"MIT"
] | null | null | null | write_a_function.py | athena15/hackerrank | 4ddb6f0c80369c125c5c2016fad6f790c37027e5 | [
"MIT"
] | null | null | null | write_a_function.py | athena15/hackerrank | 4ddb6f0c80369c125c5c2016fad6f790c37027e5 | [
"MIT"
] | null | null | null | # HackerRank "Write a function" Leap Year challenge
# https://www.hackerrank.com/challenges/write-a-function/problem
is_leap(1990)
| 16.263158 | 64 | 0.669903 | # HackerRank "Write a function" Leap Year challenge
# https://www.hackerrank.com/challenges/write-a-function/problem
def is_leap(year):
leap = False
if year % 4 == 0:
if year % 400 == 0:
return True
elif year % 100 == 0:
return False
else:
return True
else:
return False
is_leap(1990)
| 152 | 0 | 23 |
c12bdbbecf9f179a4230f51b1650c5787bf123f9 | 2,609 | py | Python | base/utils.py | muzzley/manager-sdk-python | 479a0108b9a3b691c96090b2594bf6e733fe221a | [
"MIT"
] | 1 | 2018-10-29T09:39:42.000Z | 2018-10-29T09:39:42.000Z | base/utils.py | habitio/manager-sdk-python | 479a0108b9a3b691c96090b2594bf6e733fe221a | [
"MIT"
] | null | null | null | base/utils.py | habitio/manager-sdk-python | 479a0108b9a3b691c96090b2594bf6e733fe221a | [
"MIT"
] | null | null | null | import json
import threading
import time
from functools import wraps
from uuid import UUID
from typing import AnyStr
def format_response(resp):
"""
Returns a str formatted response
:param resp: Requests response
:return: response text as a string, formatted as a json if valid
"""
try:
error_msg = format_str(resp.json(), is_json=True)
except ValueError: # requests returns a ValueError when resp.text is not a valid json
error_msg = format_str(resp.text, is_json=False)
return error_msg
def format_str(str_value, is_json):
"""
Returns a formatted string with break lines; if is_json True, pretty format the output
:param str_value: plain text or json value
:param is_json: Boolean
:return: str
"""
str_value = json.dumps(str_value, indent=4, sort_keys=True) if is_json else str_value
return '\n {} \n'.format(str_value)
def is_json(str_value):
"""A function to check if a string contains a valid json"""
try:
json.loads(str_value)
except ValueError:
return False
return True
def rate_limited(max_per_second: int):
"""
Rate-limits the decorated function locally, for one process.
source: https://gist.github.com/gregburek/1441055#gistcomment-945625
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
return decorate
def synchronized(lock):
""" Synchronization decorator. """
return wrap
| 25.831683 | 90 | 0.620544 | import json
import threading
import time
from functools import wraps
from uuid import UUID
from typing import AnyStr
def format_response(resp):
"""
Returns a str formatted response
:param resp: Requests response
:return: response text as a string, formatted as a json if valid
"""
try:
error_msg = format_str(resp.json(), is_json=True)
except ValueError: # requests returns a ValueError when resp.text is not a valid json
error_msg = format_str(resp.text, is_json=False)
return error_msg
def format_str(str_value, is_json):
"""
Returns a formatted string with break lines; if is_json True, pretty format the output
:param str_value: plain text or json value
:param is_json: Boolean
:return: str
"""
str_value = json.dumps(str_value, indent=4, sort_keys=True) if is_json else str_value
return '\n {} \n'.format(str_value)
def is_json(str_value):
"""A function to check if a string contains a valid json"""
try:
json.loads(str_value)
except ValueError:
return False
return True
def rate_limited(max_per_second: int):
"""
Rate-limits the decorated function locally, for one process.
source: https://gist.github.com/gregburek/1441055#gistcomment-945625
"""
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
def mask_token(token):
return '{}...{}'.format(token[:8], token[-5:])
def synchronized(lock):
""" Synchronization decorator. """
def wrap(f):
def newFunction(*args, **kw):
lock.acquire()
try:
return f(*args, **kw)
finally:
lock.release()
return newFunction
return wrap
def get_real_logger_level(level) -> int:
return 100 + 9 - level
def is_valid_uuid(uuid_: AnyStr):
try:
uuid_obj = UUID(uuid_)
return str(uuid_obj) == uuid_
except ValueError:
return False
| 1,015 | 0 | 123 |
efd02f13492a4df018a2eb15c0a965653b246934 | 548 | py | Python | Tree/104. Maximum depth of Binary tree.py | Rage-ops/Leetcode-Solutions | 48d4ecbb92a0bb7a7bb74a1445b593a67357ac02 | [
"MIT"
] | 1 | 2020-11-23T13:52:11.000Z | 2020-11-23T13:52:11.000Z | Tree/104. Maximum depth of Binary tree.py | harsha-sam/Leetcode-Solutions | 48d4ecbb92a0bb7a7bb74a1445b593a67357ac02 | [
"MIT"
] | null | null | null | Tree/104. Maximum depth of Binary tree.py | harsha-sam/Leetcode-Solutions | 48d4ecbb92a0bb7a7bb74a1445b593a67357ac02 | [
"MIT"
] | null | null | null | # Easy
# https://leetcode.com/problems/maximum-depth-of-binary-tree/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# Time Complexity : O(N)
# Space Complexity : O(N) | 27.4 | 73 | 0.609489 | # Easy
# https://leetcode.com/problems/maximum-depth-of-binary-tree/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# Time Complexity : O(N)
# Space Complexity : O(N)
class Solution:
def maxDepth(self, root: TreeNode) -> int:
return self.depth(root)
def depth(self, node):
if node:
return max(self.depth(node.left), self.depth(node.right)) + 1
return 0 | 162 | -6 | 79 |
b7469384c9ed2a9c147a60f81162ab3cf47aa62e | 1,300 | py | Python | makeMKV/model/enum/item_attribute_id.py | coolman565/blu_two | 5c7626145b3644570be99ff0267f88bd61b9806c | [
"MIT"
] | null | null | null | makeMKV/model/enum/item_attribute_id.py | coolman565/blu_two | 5c7626145b3644570be99ff0267f88bd61b9806c | [
"MIT"
] | 1 | 2021-06-01T21:57:23.000Z | 2021-06-01T21:57:23.000Z | makeMKV/model/enum/item_attribute_id.py | coolman565/blu_two | 5c7626145b3644570be99ff0267f88bd61b9806c | [
"MIT"
] | null | null | null | from enum import Enum
class ItemAttributeId(Enum):
"""
Derived from makemkvgui/inc/lgpl/apdefs.h
"""
Unknown = 0
Type = 1
Name = 2
LangCode = 3
LangName = 4
CodecId = 5
CodecShort = 6
CodecLong = 7
ChapterCount = 8
Duration = 9
DiskSize = 10
DiskSizeBytes = 11
StreamTypeExtension = 12
BitRate = 13
AudioChannelsCount = 14
AngleInfo = 15
SourceFileName = 16
AudioSampleRate = 17
AudioSampleSize = 18
VideoSize = 19
VideoAspectRatio = 20
VideoFrameRate = 21
StreamFlags = 22
DateTime = 23
OriginalTitleId = 24
SegmentsCount = 25
SegmentsMap = 26
OutputFileName = 27
MetadataLanguageCode = 28
MetadataLanguageName = 29
TreeInfo = 30
PanelTitle = 31
VolumeName = 32
OrderWeight = 33
OutputFormat = 34
OutputFormatDescription = 35
SeamlessInfo = 36
PanelText = 37
MkvFlags = 38
MkvFlagsText = 39
AudioChannelLayoutName = 40
OutputCodecShort = 41
OutputConversionType = 42
OutputAudioSampleRate = 43
OutputAudioSampleSize = 44
OutputAudioChannelsCount = 45
OutputAudioChannelLayoutName = 46
OutputAudioChannelLayout = 47
OutputAudioMixDescription = 48
Comment = 49
OffsetSequenceId = 50
| 22.033898 | 45 | 0.656923 | from enum import Enum
class ItemAttributeId(Enum):
"""
Derived from makemkvgui/inc/lgpl/apdefs.h
"""
Unknown = 0
Type = 1
Name = 2
LangCode = 3
LangName = 4
CodecId = 5
CodecShort = 6
CodecLong = 7
ChapterCount = 8
Duration = 9
DiskSize = 10
DiskSizeBytes = 11
StreamTypeExtension = 12
BitRate = 13
AudioChannelsCount = 14
AngleInfo = 15
SourceFileName = 16
AudioSampleRate = 17
AudioSampleSize = 18
VideoSize = 19
VideoAspectRatio = 20
VideoFrameRate = 21
StreamFlags = 22
DateTime = 23
OriginalTitleId = 24
SegmentsCount = 25
SegmentsMap = 26
OutputFileName = 27
MetadataLanguageCode = 28
MetadataLanguageName = 29
TreeInfo = 30
PanelTitle = 31
VolumeName = 32
OrderWeight = 33
OutputFormat = 34
OutputFormatDescription = 35
SeamlessInfo = 36
PanelText = 37
MkvFlags = 38
MkvFlagsText = 39
AudioChannelLayoutName = 40
OutputCodecShort = 41
OutputConversionType = 42
OutputAudioSampleRate = 43
OutputAudioSampleSize = 44
OutputAudioChannelsCount = 45
OutputAudioChannelLayoutName = 46
OutputAudioChannelLayout = 47
OutputAudioMixDescription = 48
Comment = 49
OffsetSequenceId = 50
| 0 | 0 | 0 |
673222d869417d5b734e3bde5eaffe06ab13ba5d | 5,156 | py | Python | sportsipy/fb/fb_utils.py | roclark/sports-reference-scraper | fffb7c8170454720622089cf794ebcb106245e4d | [
"MIT"
] | null | null | null | sportsipy/fb/fb_utils.py | roclark/sports-reference-scraper | fffb7c8170454720622089cf794ebcb106245e4d | [
"MIT"
] | null | null | null | sportsipy/fb/fb_utils.py | roclark/sports-reference-scraper | fffb7c8170454720622089cf794ebcb106245e4d | [
"MIT"
] | null | null | null | from difflib import get_close_matches
from .squad_ids import SQUAD_IDS
def _parse_squad_name(team_id):
"""
Parse and clean the team's name.
To try and match requested team names with the master squad ID list, passed
names should be parsed to remove the common 'FC' and 'CF' tags, as well as
force all strings to be lowercase and excess whitespace removed.
Parameters
----------
team_id : string
The requested team's name to be parsed.
Returns
-------
string
Returns a ``string`` of the parsed team's name.
"""
irrelevant = [' FC', ' CF', 'FC ', 'CF ']
for val in irrelevant:
team_id = team_id.replace(val, '')
name = team_id.lower().strip()
return name
def lookup_squad_id(name, quiet=False):
"""
Attempt to match a team name with a squad ID.
A simple utility to make it easier to find squad IDs given a team name.
By supplying a team name, this function will return the squad ID if a
match can be found, or return a dictionary of the top 5 closest teams if a
match cannot be made. For example, specifying 'Tottenham Hotspur' will
return Tottenham's squad ID of '361ca564'. However, specifying 'Tottenham'
doesn't technically match an official team name, and the closest matches
will be returned instead, with Tottenham Hotspur being the first result.
Due to the massive number of teams listed on fbref.com, the incorrect team
could be accidently pulled by what appears to be the proper name. For
example, 'Barcelona' is the name of one of the largest clubs in the world,
located in Barcelona, Spain. However, 'Barcelona' could also refer to
Barcelona Sporting Club (commonly referred to as just 'Barcelona' locally)
who competes in the Ecuadorian Serie A. By using the squad ID, the intended
team is guaranteed to be used.
This helper function does not rely on case for the words, so 'Tottenham
Hotspur' will return the same result as 'tottenham hotspur'. Also, common
tags such as 'FC' and 'CF' are removed, so there is no need to specify
those components.
In the case a match can't be made, a dictionary of suggestions will be
returned instead of the squad ID. The dictionary is intended to be used
to find the best alternatives for later use. The keys are the suggested
names and values are the squad IDs. This allows direct usage of a squad ID
in subsequent calls to various classes in the Football module in
sportsipy instead of attempting to lookup a name. As there can be
multiple return types, it is recommended to check the type of the returned
value before further calculations. If the return is of type ``string``, it
is the 8-digit squad ID. If it is of type ``dictionary``, it is a key-value
object containing suggestions.
Parameters
----------
name : string
A ``string`` of the name of a squad to lookup, such as 'Tottenham
Hotspur'.
quiet : boolean
A ``boolean`` value which suppresses text output while True.
Returns
-------
string or dictionary
Returns a ``string`` of the squad's 8-digit ID if a match could be
found for the requested team. If a match could not be found, a
``dictionary`` is returned with the key-value pairs for the top 5
closest teams as keys and their respective IDs as values.
"""
filtered_name = _parse_squad_name(name)
if filtered_name in SQUAD_IDS:
return SQUAD_IDS[filtered_name]
closest_matches = get_close_matches(filtered_name, SQUAD_IDS.keys(), 5)
squad_match_ids = {}
output = 'Exact match not found - Printing closest matches:\n'
print(closest_matches)
for team in closest_matches:
output += team.title() + ' - ' + SQUAD_IDS[team] + '\n'
squad_match_ids[team.title()] = SQUAD_IDS[team]
if not quiet:
print(output)
return squad_match_ids
def _lookup_team(team_id):
"""
Find the squad ID for the requested team.
Every team on fbref.com has its own unique squad ID, which is a 8-digit
code containing alphanumeric numbers. The user can either supply the
8-digit code as-is, or provide the team's full name. If the squad ID is
provided and matches a master list of IDs, the squad ID will be returned
as-is for later use in the class. If the name is passed, it will first be
parsed to try and match the team with a team in the master squad ID list.
If no squad is found, an error will be raised indicating the requested team
cannot be found.
Parameters
----------
team_id : string
A ``string`` of either the team's ID or the name of the team.
Returns
-------
string
Returns a ``string`` of the squad's 8-digit ID.
"""
if team_id.lower() in SQUAD_IDS.values():
return team_id.lower()
name = lookup_squad_id(team_id)
if type(name) == str:
return name
error_message = ('Team ID of "%s" not found. Did you mean one of the '
'following?\n%s' % (team_id, name))
raise ValueError(error_message)
| 40.28125 | 79 | 0.683088 | from difflib import get_close_matches
from .squad_ids import SQUAD_IDS
def _parse_squad_name(team_id):
"""
Parse and clean the team's name.
To try and match requested team names with the master squad ID list, passed
names should be parsed to remove the common 'FC' and 'CF' tags, as well as
force all strings to be lowercase and excess whitespace removed.
Parameters
----------
team_id : string
The requested team's name to be parsed.
Returns
-------
string
Returns a ``string`` of the parsed team's name.
"""
irrelevant = [' FC', ' CF', 'FC ', 'CF ']
for val in irrelevant:
team_id = team_id.replace(val, '')
name = team_id.lower().strip()
return name
def lookup_squad_id(name, quiet=False):
"""
Attempt to match a team name with a squad ID.
A simple utility to make it easier to find squad IDs given a team name.
By supplying a team name, this function will return the squad ID if a
match can be found, or return a dictionary of the top 5 closest teams if a
match cannot be made. For example, specifying 'Tottenham Hotspur' will
return Tottenham's squad ID of '361ca564'. However, specifying 'Tottenham'
doesn't technically match an official team name, and the closest matches
will be returned instead, with Tottenham Hotspur being the first result.
Due to the massive number of teams listed on fbref.com, the incorrect team
could be accidently pulled by what appears to be the proper name. For
example, 'Barcelona' is the name of one of the largest clubs in the world,
located in Barcelona, Spain. However, 'Barcelona' could also refer to
Barcelona Sporting Club (commonly referred to as just 'Barcelona' locally)
who competes in the Ecuadorian Serie A. By using the squad ID, the intended
team is guaranteed to be used.
This helper function does not rely on case for the words, so 'Tottenham
Hotspur' will return the same result as 'tottenham hotspur'. Also, common
tags such as 'FC' and 'CF' are removed, so there is no need to specify
those components.
In the case a match can't be made, a dictionary of suggestions will be
returned instead of the squad ID. The dictionary is intended to be used
to find the best alternatives for later use. The keys are the suggested
names and values are the squad IDs. This allows direct usage of a squad ID
in subsequent calls to various classes in the Football module in
sportsipy instead of attempting to lookup a name. As there can be
multiple return types, it is recommended to check the type of the returned
value before further calculations. If the return is of type ``string``, it
is the 8-digit squad ID. If it is of type ``dictionary``, it is a key-value
object containing suggestions.
Parameters
----------
name : string
A ``string`` of the name of a squad to lookup, such as 'Tottenham
Hotspur'.
quiet : boolean
A ``boolean`` value which suppresses text output while True.
Returns
-------
string or dictionary
Returns a ``string`` of the squad's 8-digit ID if a match could be
found for the requested team. If a match could not be found, a
``dictionary`` is returned with the key-value pairs for the top 5
closest teams as keys and their respective IDs as values.
"""
filtered_name = _parse_squad_name(name)
if filtered_name in SQUAD_IDS:
return SQUAD_IDS[filtered_name]
closest_matches = get_close_matches(filtered_name, SQUAD_IDS.keys(), 5)
squad_match_ids = {}
output = 'Exact match not found - Printing closest matches:\n'
print(closest_matches)
for team in closest_matches:
output += team.title() + ' - ' + SQUAD_IDS[team] + '\n'
squad_match_ids[team.title()] = SQUAD_IDS[team]
if not quiet:
print(output)
return squad_match_ids
def _lookup_team(team_id):
"""
Find the squad ID for the requested team.
Every team on fbref.com has its own unique squad ID, which is a 8-digit
code containing alphanumeric numbers. The user can either supply the
8-digit code as-is, or provide the team's full name. If the squad ID is
provided and matches a master list of IDs, the squad ID will be returned
as-is for later use in the class. If the name is passed, it will first be
parsed to try and match the team with a team in the master squad ID list.
If no squad is found, an error will be raised indicating the requested team
cannot be found.
Parameters
----------
team_id : string
A ``string`` of either the team's ID or the name of the team.
Returns
-------
string
Returns a ``string`` of the squad's 8-digit ID.
"""
if team_id.lower() in SQUAD_IDS.values():
return team_id.lower()
name = lookup_squad_id(team_id)
if type(name) == str:
return name
error_message = ('Team ID of "%s" not found. Did you mean one of the '
'following?\n%s' % (team_id, name))
raise ValueError(error_message)
| 0 | 0 | 0 |
f36e476ffd668a1e530e1173c209b99e50531202 | 3,621 | py | Python | test/web_runner.py | hhucn/webvulnscan | efb812fd5483157528f37794acecafa35ed0d878 | [
"MIT"
] | 40 | 2015-01-15T14:52:51.000Z | 2022-03-25T08:52:48.000Z | test/web_runner.py | RaviRaaja/webvulnscan | efb812fd5483157528f37794acecafa35ed0d878 | [
"MIT"
] | 1 | 2016-07-21T09:51:15.000Z | 2016-10-02T17:45:37.000Z | test/web_runner.py | RaviRaaja/webvulnscan | efb812fd5483157528f37794acecafa35ed0d878 | [
"MIT"
] | 22 | 2015-01-23T04:21:21.000Z | 2021-08-22T03:36:21.000Z | #!/usr/bin/env python3
from __future__ import unicode_literals
import cgi
import io
import os
import socket
import unittest
import sys
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
_WVS_ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(_WVS_ROOT_DIR)
import webvulnscan
sitemap = {}
if __name__ == "__main__":
main()
| 28.289063 | 78 | 0.567799 | #!/usr/bin/env python3
from __future__ import unicode_literals
import cgi
import io
import os
import socket
import unittest
import sys
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
_WVS_ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(_WVS_ROOT_DIR)
import webvulnscan
sitemap = {}
class WebRunnerHandler(BaseHTTPRequestHandler):
def _write(self, s):
return self.wfile.write(s.encode('utf-8'))
def _default_page(self):
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
w = self._write
w("""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title>webvulnscan tests</title>
</head>
<body>
<h1>webvulnscan tests</h1>
<ul>
""")
for name in sorted(sitemap):
w('<li><a href="' + cgi.escape(name, quote=True) + '/">')
w(cgi.escape(name))
w('</a></li>')
w("""
</ul>
</body>
</html>""")
def _serve_request(self):
parsed_path = urlparse(self.path)
current_path = parsed_path.path.split('/')[1]
if parsed_path.path == "/":
self._default_page()
elif current_path in sitemap:
extended_path = "".join(parsed_path.path.split('/')[2:])
site = sitemap[current_path]
client = site.client
if parsed_path.query == "":
url = "http://test.webvulnscan/" + extended_path
else:
url = "http://test.webvulnscan/" + extended_path +\
"?" + parsed_path.query
request = webvulnscan.request.Request(url)
if 'content-length' in self.headers:
content_len = int(self.headers['content-length'])
body = self.rfile.read(content_len)
request.parameters = parse_qs(body)
for value in request.parameters:
new_value = request.parameters[value][0].decode('utf-8')
request.parameters[value] = new_value
_, status_code, response_data, headers = client._download(request)
self.send_response(status_code)
self.send_header('Content-Type', 'text/html')
for header in headers:
self.send_header(header[0], header[1])
self.end_headers()
self.wfile.write(response_data)
else:
self.send_error(404, "File not Found!")
def __getattr__(self, name):
if name.startswith('do_'):
return self._serve_request
raise AttributeError()
def discover():
testloader = unittest.TestLoader()
suites = testloader.discover(os.path.join(_WVS_ROOT_DIR, 'test'))
for suite in suites:
for klass in suite:
for test in klass._tests:
elements = dir(test)
for subklass in elements:
func = getattr(test, subklass)
if hasattr(func, "client"):
yield func
def main():
for test in discover():
sitemap[test.__name__] = test
httpd = HTTPServer(("", 8000), WebRunnerHandler)
httpd.serve_forever()
if __name__ == "__main__":
main()
| 2,826 | 26 | 176 |
888016f43395b30f5bb42509b2cf824b817a2f5c | 2,203 | py | Python | extract-airbnb-properties.py | townofchapelhill/hospitality-python | f6b0712a7ba0e9fd0164a55f271f163ebce41b6b | [
"WTFPL"
] | 1 | 2022-03-18T14:00:14.000Z | 2022-03-18T14:00:14.000Z | extract-airbnb-properties.py | townofchapelhill/hospitality-python | f6b0712a7ba0e9fd0164a55f271f163ebce41b6b | [
"WTFPL"
] | null | null | null | extract-airbnb-properties.py | townofchapelhill/hospitality-python | f6b0712a7ba0e9fd0164a55f271f163ebce41b6b | [
"WTFPL"
] | null | null | null | import airbnb
import airbnb_secrets
import csv
items=1
output_filename="airbnb_chapelhill.csv"
# Set CSV Header & line format
csv_header = ['City','Latitude','Longitude','Type','Bathrooms','Bedrooms','Public Address','Localized City','Source']
api = airbnb.Api()
# api = airbnb.Api(airbnb_secrets.login, airbnb_secrets.password)
api = airbnb.Api(access_token=airbnb_secrets.access_token)
try:
output_file = open(output_filename, 'w')
csvwriter = csv.writer(output_file, dialect='excel')
except IOError:
print("Output file creation failed")
exit(1)
csvwriter.writerow(csv_header)
while True:
try:
response = api.get_homes("Chapel Hill, NC, USA",items_per_grid=10, offset=items)
except Exception:
print("Terminating on error")
raise Exception
break
print("Starting item: "+ str(items) + " responses: " + str(len(response['explore_tabs'][0]['sections'][0]['listings'])))
# items += 50
items += len(response['explore_tabs'][0]['sections'][0]['listings'])
if len(response['explore_tabs'][0]['sections'][0]['listings']) == 0:
break
# ETL processing result set
for x in range(0, len(response['explore_tabs'][0]['sections'][0]['listings'])):
# build the output values in key order
csv_output=['null']*9
csv_output[0]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['city']
csv_output[1]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['lat']
csv_output[2]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['lng']
csv_output[3]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['room_and_property_type']
csv_output[4]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['bathrooms']
csv_output[5]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['bedrooms']
csv_output[6]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['public_address']
csv_output[7]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['localized_city']
csv_output[8]="AirBnB"
csvwriter.writerow(csv_output)
# cleanup and exit
output_file.close()
| 43.196078 | 123 | 0.67635 | import airbnb
import airbnb_secrets
import csv
items=1
output_filename="airbnb_chapelhill.csv"
# Set CSV Header & line format
csv_header = ['City','Latitude','Longitude','Type','Bathrooms','Bedrooms','Public Address','Localized City','Source']
api = airbnb.Api()
# api = airbnb.Api(airbnb_secrets.login, airbnb_secrets.password)
api = airbnb.Api(access_token=airbnb_secrets.access_token)
try:
output_file = open(output_filename, 'w')
csvwriter = csv.writer(output_file, dialect='excel')
except IOError:
print("Output file creation failed")
exit(1)
csvwriter.writerow(csv_header)
while True:
try:
response = api.get_homes("Chapel Hill, NC, USA",items_per_grid=10, offset=items)
except Exception:
print("Terminating on error")
raise Exception
break
print("Starting item: "+ str(items) + " responses: " + str(len(response['explore_tabs'][0]['sections'][0]['listings'])))
# items += 50
items += len(response['explore_tabs'][0]['sections'][0]['listings'])
if len(response['explore_tabs'][0]['sections'][0]['listings']) == 0:
break
# ETL processing result set
for x in range(0, len(response['explore_tabs'][0]['sections'][0]['listings'])):
# build the output values in key order
csv_output=['null']*9
csv_output[0]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['city']
csv_output[1]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['lat']
csv_output[2]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['lng']
csv_output[3]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['room_and_property_type']
csv_output[4]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['bathrooms']
csv_output[5]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['bedrooms']
csv_output[6]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['public_address']
csv_output[7]=response['explore_tabs'][0]['sections'][0]['listings'][x]['listing']['localized_city']
csv_output[8]="AirBnB"
csvwriter.writerow(csv_output)
# cleanup and exit
output_file.close()
| 0 | 0 | 0 |
042888c821315f9ac9268da7b68e21e264d574b7 | 452 | py | Python | Narcissistic Number/is_narcissistic_number.py | rayvantsahni/Let-us-Math | 571ee70452feae0b15f37d46de658b0c0251bd3d | [
"MIT"
] | 2 | 2020-08-06T07:09:38.000Z | 2020-09-12T02:32:23.000Z | Narcissistic Number/is_narcissistic_number.py | rayvantsahni/Math-is-Fun | 571ee70452feae0b15f37d46de658b0c0251bd3d | [
"MIT"
] | null | null | null | Narcissistic Number/is_narcissistic_number.py | rayvantsahni/Math-is-Fun | 571ee70452feae0b15f37d46de658b0c0251bd3d | [
"MIT"
] | 1 | 2021-08-30T14:17:28.000Z | 2021-08-30T14:17:28.000Z |
if __name__ == "__main__":
n = int(input("Enter number: "))
print("Is a Narcissistic Number" if is_narcissistic_number(n) else "Is NOT a Narcissistic Number")
| 22.6 | 102 | 0.64823 | def is_narcissistic_number(n):
original_number = n
number_of_digits = get_number_of_digits(n)
sum = 0
while n != 0:
sum += (n % 10) ** number_of_digits
n //= 10
return original_number == sum
def get_number_of_digits(n):
return len(str(n))
if __name__ == "__main__":
n = int(input("Enter number: "))
print("Is a Narcissistic Number" if is_narcissistic_number(n) else "Is NOT a Narcissistic Number")
| 237 | 0 | 45 |
0758861400523877891511807ac1ca1e6d3c171c | 962 | py | Python | dataset.py | dddzg/MoCo | 94125b06235032cf74768709bac36e3ffbeb3f7c | [
"MIT"
] | 41 | 2019-12-04T09:56:22.000Z | 2022-02-10T13:07:37.000Z | dataset.py | dddzg/MoCo | 94125b06235032cf74768709bac36e3ffbeb3f7c | [
"MIT"
] | 5 | 2020-03-17T06:53:33.000Z | 2021-01-16T20:15:14.000Z | dataset.py | dddzg/MoCo | 94125b06235032cf74768709bac36e3ffbeb3f7c | [
"MIT"
] | 6 | 2019-12-21T06:50:58.000Z | 2021-12-04T20:48:16.000Z | from PIL import Image
# import torchvision.models as models
#
# print(type(models.__dict__['resnet18']))
| 29.151515 | 68 | 0.602911 | from PIL import Image
def custom_dataset(base_dataset):
# name: str = base_dataset.__class__.__name__
# print(name)
class CustomDataSet(base_dataset):
def __init__(self, *args, **kwargs):
super(CustomDataSet, self).__init__(*args, **kwargs)
def __getitem__(self, index):
img, target = self.data[index], int(self.targets[index])
# if name.startswith('MNIST'):
# img = Image.fromarray(img.numpy(), mode='L')
# else:
img = Image.fromarray(img)
# img = Image.fromarray(img.numpy(), mode='L')
ret_img_q = self.transform(img)
ret_img_k = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return ret_img_q, ret_img_k, target
return CustomDataSet
# import torchvision.models as models
#
# print(type(models.__dict__['resnet18']))
| 832 | 0 | 23 |
f6688c453ab5e94f6089e06c70117a6382a4358e | 436 | py | Python | colossus/apps/templates/migrations/0002_auto_20200423_1850.py | gcallah/colossus | ee5319091cd19c96987825258a57e6d6f9d8fc51 | [
"MIT"
] | 3 | 2020-03-30T14:21:44.000Z | 2020-11-23T06:51:55.000Z | colossus/apps/templates/migrations/0002_auto_20200423_1850.py | gcallah/colossus | ee5319091cd19c96987825258a57e6d6f9d8fc51 | [
"MIT"
] | null | null | null | colossus/apps/templates/migrations/0002_auto_20200423_1850.py | gcallah/colossus | ee5319091cd19c96987825258a57e6d6f9d8fc51 | [
"MIT"
] | 2 | 2019-10-25T20:50:20.000Z | 2019-11-05T02:40:23.000Z | # Generated by Django 2.1.5 on 2020-04-23 22:50
from django.db import migrations
import tinymce.models
| 21.8 | 87 | 0.623853 | # Generated by Django 2.1.5 on 2020-04-23 22:50
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('templates', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='emailtemplate',
name='content',
field=tinymce.models.HTMLField(default='Please enter your contents here!'),
),
]
| 0 | 308 | 23 |
c092d69cc0b597d29e2bed17b7fa24641102e301 | 1,183 | py | Python | api/compliance/enums.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 3 | 2019-05-15T09:30:39.000Z | 2020-04-22T16:14:23.000Z | api/compliance/enums.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 85 | 2019-04-24T10:39:35.000Z | 2022-03-21T14:52:12.000Z | api/compliance/enums.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 1 | 2021-01-17T11:12:19.000Z | 2021-01-17T11:12:19.000Z | # SIEL type compliance cases require a specific control code prefixes. currently: (0 to 9)D, (0 to 9)E, ML21, ML22.
COMPLIANCE_CASE_ACCEPTABLE_GOOD_CONTROL_CODES = "(^[0-9][DE].*$)|(^ML21.*$)|(^ML22.*$)"
| 26.886364 | 115 | 0.598478 | # SIEL type compliance cases require a specific control code prefixes. currently: (0 to 9)D, (0 to 9)E, ML21, ML22.
COMPLIANCE_CASE_ACCEPTABLE_GOOD_CONTROL_CODES = "(^[0-9][DE].*$)|(^ML21.*$)|(^ML22.*$)"
class ComplianceVisitTypes:
FIRST_CONTACT = "first_contact"
FIRST_VISIT = "first_visit"
ROUTINE_VISIT = "routine_visit"
REVISIT = "revisit"
choices = [
(FIRST_CONTACT, "First contact"),
(FIRST_VISIT, "First visit"),
(ROUTINE_VISIT, "Routine visit"),
(REVISIT, "Revisit"),
]
@classmethod
def to_str(cls, visit_type):
return next(choice[1] for choice in cls.choices if choice[0] == visit_type)
class ComplianceRiskValues:
VERY_LOW = "very_low"
LOWER = "lower"
MEDIUM = "medium"
HIGHER = "higher"
HIGHEST = "highest"
choices = (
(VERY_LOW, "Very low risk"),
(LOWER, "Lower risk"),
(MEDIUM, "Medium risk"),
(HIGHER, "Higher risk"),
(HIGHEST, "Highest risk"),
)
@classmethod
def to_str(cls, risk_value):
for value, label in cls.choices:
if value == risk_value:
return label
return ""
| 222 | 709 | 46 |
643f6b318ee0f9a13486c41eea32974757ea7055 | 488 | py | Python | config/jupyter_notebook_config.py | brunocampos01/home-sweet-home | f22533569b9ab913c8d047f0f4989c3682dd2326 | [
"MIT"
] | 3 | 2020-06-05T15:49:39.000Z | 2020-10-26T19:46:12.000Z | config/jupyter_notebook_config.py | brunocampos01/personal_configuration | 115846bedaf82a6de9bb9f6c160a2b84781a4668 | [
"MIT"
] | null | null | null | config/jupyter_notebook_config.py | brunocampos01/personal_configuration | 115846bedaf82a6de9bb9f6c160a2b84781a4668 | [
"MIT"
] | 1 | 2019-12-25T19:46:48.000Z | 2019-12-25T19:46:48.000Z | from jupyter_core.paths import jupyter_data_dir
import subprocess
import os
import stat
c = get_config()
c.NotebookApp.ip = '0.0.0.0'
# The port the notebook server will listen on
c.NotebookApp.port = 8888
# Whether to open in a browser after starting
c.NotebookApp.open_browser = False
# Set the Access-Control-Allow-Credentials: true header
c.NotebookApp.allow_password_change = False
# https://github.com/jupyter/notebook/issues/3130
c.FileContentsManager.delete_to_trash = False | 24.4 | 55 | 0.797131 | from jupyter_core.paths import jupyter_data_dir
import subprocess
import os
import stat
c = get_config()
c.NotebookApp.ip = '0.0.0.0'
# The port the notebook server will listen on
c.NotebookApp.port = 8888
# Whether to open in a browser after starting
c.NotebookApp.open_browser = False
# Set the Access-Control-Allow-Credentials: true header
c.NotebookApp.allow_password_change = False
# https://github.com/jupyter/notebook/issues/3130
c.FileContentsManager.delete_to_trash = False | 0 | 0 | 0 |
76db0e40ddd0cd5196bd57fa00b4d28ed0336bc0 | 5,569 | py | Python | weblog/models.py | demigods5505/w3blog | 6e28c91c79d5ffd58d7ad244c1976508af9372c8 | [
"BSD-3-Clause"
] | 7 | 2018-07-25T21:41:37.000Z | 2019-03-04T15:47:02.000Z | weblog/models.py | demigods5505/w3blog | 6e28c91c79d5ffd58d7ad244c1976508af9372c8 | [
"BSD-3-Clause"
] | 6 | 2018-10-11T08:32:32.000Z | 2019-04-05T12:36:46.000Z | weblog/models.py | demigods5505/w3blog | 6e28c91c79d5ffd58d7ad244c1976508af9372c8 | [
"BSD-3-Clause"
] | 4 | 2019-01-04T19:18:36.000Z | 2020-09-30T19:46:19.000Z | from django.conf import settings
from django.db import models
from django.forms import ModelForm, Textarea
from django.shortcuts import reverse
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
from django.utils import timezone
| 40.355072 | 95 | 0.663135 | from django.conf import settings
from django.db import models
from django.forms import ModelForm, Textarea
from django.shortcuts import reverse
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
from django.utils import timezone
class Category(models.Model):
name = models.CharField(max_length=250, verbose_name=pgettext_lazy(
'Noun, not personal name', 'Name'), blank=False, unique=True)
slug = models.SlugField(max_length=60, verbose_name=_(
'Slug (URL)'), db_index=True, unique=True)
parent_category = models.ForeignKey(
'self', verbose_name=_('Parent category'), null=True, blank=True,
default=None, on_delete=models.SET_DEFAULT)
def get_absolute_url(self):
return reverse('weblog:CategoryIndex',
kwargs={'category_slug': self.slug})
def __str__(self):
return self.name
class Meta:
verbose_name = pgettext_lazy('Post category', 'Category')
verbose_name_plural = pgettext_lazy('Post categories', 'Categories')
class CategoryTranslation(models.Model):
name = models.CharField(max_length=250, verbose_name=pgettext_lazy(
'Noun, not personal name', 'Name'), blank=False)
language = models.CharField(
max_length=5, verbose_name=_('Language (ISO)'), blank=False)
category = models.ForeignKey(Category, verbose_name=pgettext_lazy(
'Post category', 'Category'), blank=False, on_delete=models.CASCADE)
def __str__(self):
return self.name
def slug(self):
return self.category.slug
class Meta:
verbose_name = _('Category name translation')
verbose_name_plural = _('Category name translations')
class BlogPost(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_(
'Author'), on_delete=models.PROTECT)
title = models.CharField(max_length=100, verbose_name=pgettext_lazy(
'As in name', 'Title'), blank=False)
content = models.TextField(verbose_name=pgettext_lazy(
'Of post, comment, article, etc.', 'Content'), blank=False)
preview_image = models.ImageField(
upload_to='weblog/preview_images/%Y/%m/%d/', blank=True,
verbose_name=_('Preview image'))
preview_text = models.CharField(
max_length=250, blank=True, verbose_name=_('Preview Text'))
original_language = models.CharField(
max_length=5, verbose_name=_('Original language (ISO)'), blank=True)
slug = models.SlugField(max_length=100, verbose_name=_(
'Slug (URL)'), db_index=True, unique=True)
categories = models.ManyToManyField(Category, verbose_name=pgettext_lazy(
'Post categories', 'Categories'), blank=True)
pinned = models.BooleanField(
verbose_name=_('Pin blog post'), default=False)
pin_priority = models.IntegerField(verbose_name=_(
'Pinned post priority (if pinned)'), default=0)
published = models.BooleanField(
verbose_name=pgettext_lazy('Make post viewable', 'Published'))
publish_date = models.DateTimeField(verbose_name=_('Publish date'))
def get_absolute_url(self):
if self.categories.all().count() > 0:
category = self.categories.all()[0].slug
return reverse('weblog:PostView',
kwargs={'category_slug': category,
'post_slug': self.slug})
else:
return reverse('weblog:PostView',
kwargs={'category_slug': 'misc',
'post_slug': self.slug})
def __str__(self):
return self.title
class Meta:
ordering = ['-publish_date', 'title']
verbose_name = _('Blog Post')
verbose_name_plural = _('Blog Posts')
class Translation(models.Model):
post = models.ForeignKey(BlogPost, verbose_name=pgettext_lazy(
'Noun, as in blog post', 'Post'), on_delete=models.CASCADE)
language = models.CharField(
max_length=5, verbose_name=_('Language (ISO)'), blank=False)
title = models.CharField(max_length=100, verbose_name=pgettext_lazy(
'As in name', 'Title'), blank=False)
content = models.TextField(verbose_name=pgettext_lazy(
'Of post, comment, article, etc.', 'Content'), blank=False)
preview_image = models.ImageField(
upload_to='weblog/preview_images/%Y/%m/%d/', blank=True,
verbose_name=_('Preview image'))
preview_text = models.CharField(
max_length=250, blank=True, verbose_name=_('Preview Text'))
class Meta:
verbose_name = _('Translation')
verbose_name_plural = _('Translations')
class PostComment(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_(
'Author'), null=True, blank=True, on_delete=models.PROTECT)
post = models.ForeignKey(BlogPost, verbose_name=pgettext_lazy(
'Noun, as in blog post', 'Post'), on_delete=models.CASCADE)
content = models.TextField(verbose_name=pgettext_lazy(
'Of post, comment, article, etc.', 'Content'), blank=False)
publish_date = models.DateTimeField(verbose_name=_('Publish date'), default=timezone.now())
class Meta:
verbose_name = pgettext_lazy('Noun', 'Comment')
verbose_name_plural = pgettext_lazy('Noun', 'Comments')
class PostCommentForm(ModelForm):
class Meta:
model = PostComment
fields = ('content',)
labels = {'content': ''}
widgets = {
'content': Textarea(attrs={'class': 'form-control', 'rows': '5'}),
}
| 662 | 4,514 | 138 |
f8cebbbb7f8f94295106aa3f7279ac6bef1b5ece | 558 | py | Python | apps/admin/serializers/menu_serializer.py | thebesteric/bright | 4cd8173e7e53115395fcf25bd4db72990fdb6b3f | [
"MIT"
] | null | null | null | apps/admin/serializers/menu_serializer.py | thebesteric/bright | 4cd8173e7e53115395fcf25bd4db72990fdb6b3f | [
"MIT"
] | null | null | null | apps/admin/serializers/menu_serializer.py | thebesteric/bright | 4cd8173e7e53115395fcf25bd4db72990fdb6b3f | [
"MIT"
] | null | null | null | """
菜单序列化
@project: bright
@file: .py
@ide: PyCharm
@auth: Eric Joe
@email: whatisjava@hotmail.com
@build: 2019-09-16 10:12
@info:
"""
from apps.admin.api.common import serializers
from apps.admin.models import Menu
class MenuSerializer(serializers.ModelSerializer):
"""
菜单
"""
class MenuTreeSerializer(serializers.ModelSerializer):
"""
菜单树
"""
children = serializers.RecursiveField(many=True)
| 15.942857 | 54 | 0.646953 | """
菜单序列化
@project: bright
@file: .py
@ide: PyCharm
@auth: Eric Joe
@email: whatisjava@hotmail.com
@build: 2019-09-16 10:12
@info:
"""
from apps.admin.api.common import serializers
from apps.admin.models import Menu
class MenuSerializer(serializers.ModelSerializer):
"""
菜单
"""
class Meta:
model = Menu
fields = "__all__"
class MenuTreeSerializer(serializers.ModelSerializer):
"""
菜单树
"""
children = serializers.RecursiveField(many=True)
class Meta:
model = Menu
fields = "__all__"
| 0 | 76 | 54 |
395c442ab8a1ecd4b1923873a2d8b649baab6ee0 | 4,669 | py | Python | tests/test_summary.py | cnschema/cdata | 893e2e1e27b61c8551c8b5f5f9bf05ec61490e23 | [
"Apache-2.0"
] | 16 | 2017-06-20T07:59:27.000Z | 2020-12-26T10:47:31.000Z | tests/test_summary.py | cnschema/cdata | 893e2e1e27b61c8551c8b5f5f9bf05ec61490e23 | [
"Apache-2.0"
] | 3 | 2017-07-12T07:15:42.000Z | 2017-08-17T00:38:32.000Z | tests/test_summary.py | cnschema/cdata | 893e2e1e27b61c8551c8b5f5f9bf05ec61490e23 | [
"Apache-2.0"
] | 9 | 2017-06-23T11:04:36.000Z | 2019-09-17T09:30:44.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Path hack
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from cdata.summary import * # noqa
try:
import unittest2 as unittest
except ImportError:
import unittest
if __name__ == '__main__':
unittest.main()
| 39.567797 | 548 | 0.611694 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Path hack
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from cdata.summary import * # noqa
try:
import unittest2 as unittest
except ImportError:
import unittest
class SummaryTestCase(unittest.TestCase):
def setUp(self):
pass
def test_misc(self):
person = {
"name": u"张三",
"accomplishment": u"三好学生"
}
ret = summarize_entity_person(person)
assert u"张三,主要成就:三好学生。" == ret
person = {
"name": u"张三",
"accomplishment": u"三好学生",
"artName": [u"张老三"]
}
ret = summarize_entity_person(person)
assert u"张三,号张老三,主要成就:三好学生。" == ret
person = {
"name": u"张三",
"accomplishment": u"三好学生",
"artName": []
}
ret = summarize_entity_person(person)
assert u"张三,主要成就:三好学生。" == ret
def test_real_data(self):
person = {
"description": u"黄健中,1941年12月29日出生于印度尼西亚泗水,国家一级导演、编剧、艺术指导。1979年,黄健中与张铮联合执导爱情片《小花》,该片获得第三届电影百花奖上获最佳故事片奖 。1982年,黄健中独立执导首部电影作品——爱情片《如意》。1985年,凭借家庭剧《良家妇女》获得第二十五届卡罗维·发利国际电影节主要奖[2-3] 。1990年,拍摄警匪剧《龙年警官》,该片获得第十四届大众电影百花奖最佳故事片奖。1991年,拍摄家庭剧《过年》,该片获得第十五届大众电影百花奖最佳故事片奖。1995年,执导剧情片《大鸿米店》[4-6] 。1998年,拍摄爱情片《红娘》,该片获得第二十二届大众电影百花奖最佳故事片奖[7-8] 。2001年,执导古装武侠剧《笑傲江湖》 。2003年,与佐藤纯弥联合执导家庭犯罪剧《世纪末的晚钟》[10-12] 。2005年,国家广播电影电视总局授予黄健中“优秀电影艺术家”称号 。2006年,执导古装历史剧《越王勾践》 。2009年,拍摄历史战争剧《大秦帝国之裂变》,该片获得第25届中国电视金鹰奖[14-17] 。2011年,执导古装剧《大风歌》[18-19] 。2013年,执导古装神话剧《蓬莱八仙》[20-22] 。",
"birthPlace": u"印度尼西亚泗水",
"name": u"黄健中",
"image": u"http://c.hiphotos.baidu.com/baike/w%3D268%3Bg%3D0/sign=9ac8a3ed33adcbef01347900949449e0/aec379310a55b319a1ae185c41a98226cffc1747.jpg",
"accomplishment": u"第4届东京国际电影节评委奖第11届中国电影金鸡奖最佳导演奖第12届中国电影金鸡奖最佳编剧奖",
"birthDate": u"1941年12月29日",
"keywords": [u"导演", u"娱乐人物", u"人物"],
"nationality": u"中国",
"alternateName": ["HuangJianzhong", "Huang Jianzhong"],
"authorOf": u"过年、龙年警官、越王勾践、大风歌",
"@id": u"d67f8dc6-3775-3e4a-9d67-84bb4007d6d1",
"@type": ["Person", "Thing"],
"occupation": u"导演、编剧、艺术指导," # Extra comma for punctuation testing
}
ret = summarize_entity_person(person)
logging.info(ret)
assert u"黄健中,1941年12月29日出生于印度尼西亚泗水,中国导演、编剧、艺术指导,主要作品:过年、龙年警官、越王勾践、大风歌。" == ret
person = {
"name": u"陈小群",
"gender": u"女",
"image": u"http://e.hiphotos.baidu.com/baike/w%3D268%3Bg%3D0/sign=3c89cd72acc379317d68812fd3ffd078/b90e7bec54e736d16b57837c98504fc2d5626979.jpg",
"description": u"女,抒情女高音歌唱家,现任上海音乐学院声乐系教授、硕士生导师;先后担任文化部举办的国际声乐比赛全国选拔赛、中国音乐家协会举办的“金钟奖”全国声乐比赛、全国大学生艺术歌曲比赛等比赛评委。",
"@type": ["Person", "Thing"],
"ethnicGroup": u"汉族",
"keywords": [u"音乐", u"行业人物", u"歌手", u"教育", u"娱乐人物", u"人物", u"书籍"],
"nationality": u"中国",
"@id": u"66548f8a-3f9e-37ca-afb1-e2e96fdb083b",
"alumniOf": u"上海音乐学院",
"occupation": u"教授"
}
ret = summarize_entity_person(person)
assert u"陈小群,中国教授。" == ret
# Test for bracket, unknown birth date, courtesy name
person = {
"@id": u"2d8d5ed9-108b-3621-86bd-6c67fbbf0896",
"@type": u"Person,Thing",
"accomplishment": u"袭龙城,收复河朔、河套地区,击败单于",
"birthDate": u"不详",
"birthPlace": u"河东平阳(今山西临汾市)",
"courtesyName": u"仲卿",
"deathDate": u"公元前106年(汉武帝元封五年)",
"description": u"卫青,字仲卿,河东平阳人",
"dynasty": u"西汉",
"ethnicGroup": u"汉族",
"image": "http://c.hiphotos.baidu.com/baike/w%3D268%3Bg%3D0/sign=dce9ce450f3387449cc5287a6934bec4/d53f8794a4c27d1ef8d6abd118d5ad6eddc43836.jpg",
"name": u"卫青",
"posthumousName": u"烈"
}
summary = u"卫青,字仲卿,西汉人,出生于河东平阳,主要成就:袭龙城,收复河朔、河套地区,击败单于。"
assert summary == summarize_entity_person(person)
person = {
"name": u"陈小群",
"gender": u"女",
"image": u"http://e.hiphotos.baidu.com/baike/w%3D268%3Bg%3D0/sign=3c89cd72acc379317d68812fd3ffd078/b90e7bec54e736d16b57837c98504fc2d5626979.jpg",
"description": u"女,抒情女高音歌唱家,现任上海音乐学院声乐系教授、硕士生导师;先后担任文化部举办的国际声乐比赛全国选拔赛、中国音乐家协会举办的“金钟奖”全国声乐比赛、全国大学生艺术歌曲比赛等比赛评委。",
"@type": ["Person", "Thing"],
"ethnicGroup": u"汉族",
"keywords": [u"音乐", u"行业人物", u"歌手", u"教育", u"娱乐人物", u"人物", u"书籍"],
"@id": u"66548f8a-3f9e-37ca-afb1-e2e96fdb083b",
"alumniOf": u"上海音乐学院",
"occupation": u"教授"
}
ret = summarize_entity_person(person)
logging.info(ret)
assert u"陈小群,教授。" == ret
if __name__ == '__main__':
unittest.main()
| 6,238 | 20 | 103 |
29aa7f44b2cf1fea1e5b88977bb02bbc80803903 | 3,495 | py | Python | main.py | jmonsalverodilla/heroku_deploy_iris | 133d609f6fc553763027ed533485a5dfd7f7791e | [
"MIT"
] | null | null | null | main.py | jmonsalverodilla/heroku_deploy_iris | 133d609f6fc553763027ed533485a5dfd7f7791e | [
"MIT"
] | null | null | null | main.py | jmonsalverodilla/heroku_deploy_iris | 133d609f6fc553763027ed533485a5dfd7f7791e | [
"MIT"
] | null | null | null | #imports
import pandas as pd
import joblib
from flask import Flask, render_template, session,Markup, redirect, url_for
from flask_bootstrap import Bootstrap
import os
#Production server
#from waitress import serve
#Form validator
from flask_wtf import FlaskForm
from wtforms import FloatField, SubmitField
from wtforms.validators import NumberRange,InputRequired
###############CODE#####################
loaded_model = joblib.load("./obj/knn_model.pkl")
#Let's open the file that contains the plotly div
plotly_file = open("static/div_html.txt", "r")
div = plotly_file.read()
plotly_file.close()
#Flask app
app = Flask(__name__)
app.config.from_mapping(SECRET_KEY = "DontTellAnyone")
want_to_validate = "NO"
if want_to_validate =="YES":
Bootstrap(app)
index_template = 'index_complex.html'
else:
index_template = 'index.html'
#le indicamos a flask la url que debe lanzar con la función index
@app.route('/index', methods=['GET','POST'])
@app.route('/', methods=['GET','POST'])
@app.route('/result')
@app.errorhandler(404)
if __name__=="__main__":
port = os.environ.get("PORT")
app.run(debug=False, host="0.0.0.0", port=3000)
#serve(app, host ="0.0.0.0", port=port) # If I use serve as my production web server, I need to change my Dockerfile to CMD ["python","main.py"] | 37.580645 | 146 | 0.640343 | #imports
import pandas as pd
import joblib
from flask import Flask, render_template, session,Markup, redirect, url_for
from flask_bootstrap import Bootstrap
import os
#Production server
#from waitress import serve
#Form validator
from flask_wtf import FlaskForm
from wtforms import FloatField, SubmitField
from wtforms.validators import NumberRange,InputRequired
###############CODE#####################
loaded_model = joblib.load("./obj/knn_model.pkl")
#Let's open the file that contains the plotly div
plotly_file = open("static/div_html.txt", "r")
div = plotly_file.read()
plotly_file.close()
#Flask app
app = Flask(__name__)
app.config.from_mapping(SECRET_KEY = "DontTellAnyone")
want_to_validate = "NO"
if want_to_validate =="YES":
Bootstrap(app)
index_template = 'index_complex.html'
else:
index_template = 'index.html'
class LoginForm(FlaskForm):
a = FloatField('SepalLengthCm', default=1, validators=[InputRequired(),
NumberRange(min=0, max=1000, message='SepalLengthCm must be between 0 and 1000')])
b = FloatField('SepalWidthCm', default=1, validators=[InputRequired(),
NumberRange(min=0, max=1000, message='SepalWidthCm must be between 0 and 1000')])
c = FloatField('PetalLengthCm', default=1, validators=[InputRequired(),
NumberRange(min=0, max=1000, message='PetalLengthCm must be between 0 and 1000')])
d = FloatField('PetalWidthCm', default=1, validators=[InputRequired(),
NumberRange(min=0, max=1000,message='PetalLengthCm must be between 0 and 1000')])
submit = SubmitField(label=('Predict'))
#le indicamos a flask la url que debe lanzar con la función index
@app.route('/index', methods=['GET','POST'])
@app.route('/', methods=['GET','POST'])
def index():
form = LoginForm()
print(form.validate_on_submit())
if form.validate_on_submit():
print('Form Successfully Submitted!')
#session = {}
session['a'] = form.a.data
session['b'] = form.b.data
session['c'] = form.c.data
session['d'] = form.d.data
return redirect(url_for('result'))
else:
pass
print(form.errors)
return render_template(index_template, form = form,
plotly_figure = Markup(div))
@app.route('/result')
def result():
#to_predict_list = [list(request.form.values())[1:5]]
to_predict_list = [[session['a'],session['b'],session['c'],session['d']]]
print(to_predict_list)
#Creation of dataframe
columns = ["a", "b", "c", "d"]
df = pd.DataFrame(to_predict_list, columns=columns).astype('float')
print(df.dtypes)
#Prediction
prediction_class = loaded_model.predict(df)[0]
prediction_prob = round(loaded_model.predict_proba(df).max()* 100, 1)
print(prediction_class); print(prediction_prob)
return render_template("result.html",
output = '{} with probability {} %'.format(prediction_class,prediction_prob)
)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html')
if __name__=="__main__":
port = os.environ.get("PORT")
app.run(debug=False, host="0.0.0.0", port=3000)
#serve(app, host ="0.0.0.0", port=port) # If I use serve as my production web server, I need to change my Dockerfile to CMD ["python","main.py"] | 1,153 | 912 | 94 |
c08c0d3462978b99bc09178299bce17ee7f56f79 | 978 | py | Python | 7.functions/challenge1_rouillonh.py | rouillonh/ChallengePython | 7e7d9b69f60394fd1f00a6a4aa32f97de95b1b92 | [
"MIT"
] | null | null | null | 7.functions/challenge1_rouillonh.py | rouillonh/ChallengePython | 7e7d9b69f60394fd1f00a6a4aa32f97de95b1b92 | [
"MIT"
] | null | null | null | 7.functions/challenge1_rouillonh.py | rouillonh/ChallengePython | 7e7d9b69f60394fd1f00a6a4aa32f97de95b1b92 | [
"MIT"
] | null | null | null | import random
print("\tWelcome to the Python Dice App")
flag = True
while flag:
a = dice_sides()
b = dice_number()
c = roll_dice(b,a)
sum_dice(c)
flag = roll_again()
| 25.076923 | 70 | 0.580777 | import random
def dice_sides():
lados = int(input("How many sides would you like on your dice: "))
return lados
def dice_number():
rol = int(input("How many dice would you like to roll: "))
return rol
def roll_dice(a,b):
dados = []
print("You rolled ",a," ",b ," sided dice.")
for i in range(a):
dados.append(random.randint(1,b))
print("\n-----Results are as followed-----")
for i in dados:
print("\t",i)
return dados
def sum_dice(c):
suma = 0
for i in c:
suma += i
print("The total value of your roll is ",suma)
def roll_again():
x = input("Would you like to roll again (y/n): ").lower()
if x == "y":
return True
if x == "n":
print("\nThank you for using the Python Dice App.")
return False
print("\tWelcome to the Python Dice App")
flag = True
while flag:
a = dice_sides()
b = dice_number()
c = roll_dice(b,a)
sum_dice(c)
flag = roll_again()
| 680 | 0 | 110 |
304fddc7a883fd0b9750d92b36e2eae733ea7c41 | 749 | py | Python | gas-dispersion/gas-dispersion-v3.py | j-rheinheimer/Problems-in-Computational-Physics | 3bf8d618a1837fd39524dce23f2d60ede1d1971f | [
"MIT"
] | null | null | null | gas-dispersion/gas-dispersion-v3.py | j-rheinheimer/Problems-in-Computational-Physics | 3bf8d618a1837fd39524dce23f2d60ede1d1971f | [
"MIT"
] | null | null | null | gas-dispersion/gas-dispersion-v3.py | j-rheinheimer/Problems-in-Computational-Physics | 3bf8d618a1837fd39524dce23f2d60ede1d1971f | [
"MIT"
] | null | null | null | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
constant = (3/2)*1.38*10**(-23)
particles_number = [0 for x in range(4)]
temperature = [0 for x in range(4)]
# X = rows, Y = columns
energy = ([[0 for y in range(4)] for x in range(4)])
for x in range(1, 4): # row
particles_number[x] = x
for y in range(1, 4): # column
temperature[y] = y
internal_energy = constant*temperature[y]*particles_number[x]
energy[x][y] = internal_energy
print(temperature)
print(particles_number)
print(energy)
# print(energy)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.set_xlabel('Particles number')
# ax.set_ylabel('Temperature')
# ax.set_zlabel('Internal energy')
# ax.scatter()
| 24.966667 | 69 | 0.672897 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
constant = (3/2)*1.38*10**(-23)
particles_number = [0 for x in range(4)]
temperature = [0 for x in range(4)]
# X = rows, Y = columns
energy = ([[0 for y in range(4)] for x in range(4)])
for x in range(1, 4): # row
particles_number[x] = x
for y in range(1, 4): # column
temperature[y] = y
internal_energy = constant*temperature[y]*particles_number[x]
energy[x][y] = internal_energy
print(temperature)
print(particles_number)
print(energy)
# print(energy)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.set_xlabel('Particles number')
# ax.set_ylabel('Temperature')
# ax.set_zlabel('Internal energy')
# ax.scatter()
| 0 | 0 | 0 |
6fa2c0f1f5245c6c17d6203b1c06c94d8000d974 | 25,138 | py | Python | components/scream/cime_config/eamxx_buildnml_impl.py | ambrad/scream | 52da60f65e870b8a3994bdbf4a6022fdcac7cab5 | [
"BSD-3-Clause"
] | null | null | null | components/scream/cime_config/eamxx_buildnml_impl.py | ambrad/scream | 52da60f65e870b8a3994bdbf4a6022fdcac7cab5 | [
"BSD-3-Clause"
] | null | null | null | components/scream/cime_config/eamxx_buildnml_impl.py | ambrad/scream | 52da60f65e870b8a3994bdbf4a6022fdcac7cab5 | [
"BSD-3-Clause"
] | null | null | null | import os, sys, copy, re
import xml.etree.ElementTree as ET
_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","cime")
sys.path.append(_CIMEROOT)
from CIME.utils import expect
###############################################################################
class MockCase(object):
###############################################################################
"""
Helper function, to generate a cime case to be fed to doctest tests
"""
###############################################################################
def parse_string_as_list (string):
###############################################################################
"""
Takes a string representation of nested list and creates
a nested list of stirng. For instance, with
s = "(a,b,(c,d),e)
l = parse_string_as_list
we would have l = ['a', 'b', '(c,d)', 'e']
>>> s = '(a,(b,c))'
>>> l = parse_string_as_list(s)
>>> len(l)
2
>>> l[0] == 'a'
True
>>> l[1] == '(b,c)'
True
>>> ###### NOT STARTING/ENDING WITH PARENTHESES #######
>>> s = '(a,b,'
>>> l = parse_string_as_list(s)
Traceback (most recent call last):
ValueError: Input string must start with '(' and end with ')'.
>>> ################ UNMATCHED PARENTHESES ##############
>>> s = '(a,(b)'
>>> l = parse_string_as_list(s)
Traceback (most recent call last):
ValueError: Unmatched parentheses in input string
"""
if string[0]!='(' or string[-1]!=')':
raise ValueError ("Input string must start with '(' and end with ')'.")
sub_open = string.find('(',1)
sub_close = string.rfind(')',0,-1)
if not (sub_open>=0)==(sub_close>=0):
raise ValueError ("Unmatched parentheses in input string")
# Prevent empty string to pollute s.split()
my_split = lambda str : [s for s in str.split(',') if s.strip() != '']
if sub_open>=0:
l = []
l.extend(my_split(string[1:sub_open-1]))
l.append(string[sub_open:sub_close+1])
l.extend(my_split(string[sub_close+2:-1]))
else:
l = my_split(string[1:-1])
return l
###############################################################################
def is_array_type (name):
###############################################################################
"""
>>> is_array_type('array(T)')
True
>>> is_array_type('array')
False
>>> is_array_type('array(T)')
True
"""
return name[0:6]=="array(" and name[-1]==")"
###############################################################################
def array_elem_type (name):
###############################################################################
"""
>>> print(array_elem_type('array(T)'))
T
>>> print(array_elem_type('array()'))
<BLANKLINE>
"""
expect (is_array_type(name),
"Error! Type '{}' does not represent an array.".format(name))
return name[6:-1]
###############################################################################
def find_node (root,name):
###############################################################################
"""
Finds node with given name inside the root element, with a depth-search
strategy (i.e., follow children before siblings).
WARNING: this function does not check for uniqueness. If there are
multiple matches, the first match is returned.
>>> xml = '''
... <my_root>
... <a>1</a>
... <b>
... <c>2</c>
... </b>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> find_node(root,'d')==None
True
>>> find_node(root,'c').text
'2'
"""
if root.tag==name:
return root
for elem in root:
found = find_node(elem,name)
if found is not None:
return found
return None
###############################################################################
def get_child (root,name,remove=False,must_exist=True):
###############################################################################
"""
Get children with given name. If not found, throws an exception.
Optionally, the child can be removed from the parent.
>>> xml = '''
... <my_root>
... <a>1</a>
... <b>
... <c>2</c>
... </b>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> get_child(root,'c')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: There must be exactly one c entry inside my_root
>>> get_child(root,'c',must_exist=False)
"""
expect (len(root.findall(name))==1 or must_exist==False,
"There must be exactly one {} entry inside {}".format(name,root.tag))
child = root.find(name)
if remove and child is not None:
root.remove(child)
return child
###############################################################################
def has_child (root,name):
###############################################################################
"""
Check if root element has a *direct* child with given name
>>> xml = '''
... <my_root>
... <a>1</a>
... <b>
... <c>2</c>
... </b>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> has_child(root,'c')
False
>>> has_child(root,'b')
True
"""
return False if root.find(name) is None else True
###############################################################################
def refine_type(entry, force_type=None):
###############################################################################
"""
Try to convert the text entry to the appropriate type based on its contents.
>>> e = '(a,b)'
>>> refine_type(e)==e
True
>>> e = '[a,b]'
>>> refine_type(e)==e
True
>>> e = 'a,b'
>>> refine_type(e)==['a','b']
True
>>> e = 'true,falsE'
>>> refine_type(e)==[True,False]
True
>>> e = '1'
>>> refine_type(e,force_type='real')==1.0
True
>>> e = '1,b'
>>> refine_type(e)==[1,'b',True]
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: List '1,b' has inconsistent types inside
>>> e = '1.0'
>>> refine_type(e,force_type='my_type')
Traceback (most recent call last):
NameError: Bad force_type: my_type
>>> e = 'true,falsE'
>>> refine_type(e,'logical')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Error! Invalid type 'logical' for an array.
>>> refine_type(e,'array(logical)')
[True, False]
"""
# We want to preserve strings representing lists
if (entry[0]=="(" and entry[-1]==")") or \
(entry[0]=="[" and entry[-1]=="]") :
expect (force_type is None or force_type=="string",
"Error! Invalid force type '{}' for a string representing a list"
.format(force_type))
return entry
if "," in entry:
expect (force_type is None or is_array_type(force_type),
"Error! Invalid type '{}' for an array.".format(force_type))
elem_type = force_type if force_type is None else array_elem_type(force_type);
result = [refine_type(item.strip(), force_type=elem_type) for item in entry.split(",") if item.strip() != ""]
expected_type = type(result[0])
for item in result[1:]:
expect(isinstance(item, expected_type),
"List '{}' has inconsistent types inside".format(entry))
return result
if force_type:
try:
elem_type = force_type if not is_array_type(force_type) else array_elem_type(force_type)
if elem_type == "logical":
if entry.upper() == "TRUE":
elem = True
elif entry.upper() == "FALSE":
elem = False
else:
elem = bool(int(entry))
elif elem_type == "integer":
elem = int(entry)
elif elem_type == "real":
elem = float(entry)
elif elem_type == "string":
elem = str(entry)
else:
raise NameError ("Bad force_type: {}".format(force_type))
if is_array_type(force_type):
return [elem]
else:
return elem
except ValueError:
raise ValueError ("Could not use '{}' as type '{}'".format(entry, force_type))
if entry.upper() == "TRUE":
return True
elif entry.upper() == "FALSE":
return False
try:
v = int(entry)
return v
except ValueError:
pass
try:
v = float(entry)
return v
except ValueError:
return entry
###############################################################################
def derive_type(entry):
###############################################################################
"""
Try to determine the type that the input string is representing
>>> derive_type('1')
'integer'
>>> derive_type('1.0')
'real'
>>> derive_type('one')
'string'
>>> derive_type('one,two')
'array(string)'
>>> derive_type('true,FALSE')
'array(logical)'
"""
refined_value = refine_type(entry)
if isinstance(refined_value, list):
elem_value = refined_value[0]
else:
elem_value = refined_value
if isinstance(elem_value, bool):
elem_type = "logical"
elif isinstance(elem_value, int):
elem_type = "integer"
elif isinstance(elem_value, float):
elem_type = "real"
elif isinstance(elem_value, str):
elem_type = "string"
else:
raise(UnrecognizedType, "Couldn't derive type of '{}'".format(entry))
return None
if isinstance(refined_value,list):
return "array(" + elem_type + ")"
else:
return elem_type
###############################################################################
def check_value(elem, value):
###############################################################################
"""
Check that a parameter's value is in the valid list
>>> import xml.etree.ElementTree as ET
>>> xml = '''
... <a type="integer" valid_values="1,2">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'1.0')
Traceback (most recent call last):
ValueError: Could not use '1.0' as type 'integer'
>>> check_value(root,'3')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Invalid value '3' for element 'a'. Value not in the valid list ('[1, 2]')
>>> xml = '''
... <a type="real" constraints="ge 0">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'-1')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Value '-1.0' for entry 'a' violates constraint '-1.0 >= 0.0'
>>> xml = '''
... <a type="real" constraints="mod 2 eq 0">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'2')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Cannot evaluate constraint '2.0 mod 2 eq 0' for entry 'a'
Modulo constraint only makes sense for integer parameters.
>>> xml = '''
... <a constraints="gt 0; le 5">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'2')
>>> check_value(root,'6')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Value '6' for entry 'a' violates constraint '6 <= 5'
"""
v = value
if "type" in elem.attrib.keys():
vtype = elem.attrib["type"]
v = refine_type(v,force_type=vtype)
expect (v is not None,
"Error! Value '{}' for element '{}' does not satisfy the constraint type={}"
.format(value,elem.tag,vtype) +
" NOTE: this error should have been caught earlier! Please, contact developers.")
else:
# If no 'type' attribute present, deduce the type and refine
vtype = derive_type(v)
v = refine_type(v,force_type=vtype)
if "valid_values" in elem.attrib.keys():
valids_str = elem.attrib["valid_values"]
valids = [refine_type(item.strip(), force_type=vtype) for item in valids_str.split(",")]
expect(v in valids,
"Invalid value '{}' for element '{}'. Value not in the valid list ('{}')".format(value, elem.tag, valids))
if "constraints" in elem.attrib.keys():
expect ("type" not in elem.attrib.keys() or not is_array_type(elem.attrib["type"]),
"Attribute 'constraints' only available for non-array parameters.")
constraints = elem.attrib["constraints"].split(";")
for c in constraints:
# The split should return a list [ '', s1, s2, ..., sN, rhs ],
# where sK is 'None' if opK is not found, and s=opK if opK is found.
# NOTE: we don't use math symbols, since XML doesn't like < or > inside
# strings. For consistency, we use worded ops for all operators:
# 'lt': < 'gt': > 'ne': != 'mod': %
# 'le': <= 'ge': >= 'eq': ==
# We use list comprehension to filter out 'None' and empty strings
pattern = "(ge)|(gt)|(lt)|(le)|(eq)|(ne)|(mod)"
tokens = [i.strip() for i in re.split(pattern,c,maxsplit=1) if i and i.strip()]
expect(len(tokens)==2,
"Invalid constraint syntax for entry '{}'.\n".format(elem.tag) +
" Correct syntax: 'op val', to be interpreted as '$param $op val'.\n"
" Constraint found: '{}'".format(c))
lhs = v
op = tokens[0]
if op=="ne":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v!=rhs,
"Value '{}' for entry '{}' violates constraint '{} != {}'"
.format(v,elem.tag,v,rhs))
elif op=="le":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v<=rhs,
"Value '{}' for entry '{}' violates constraint '{} <= {}'"
.format(v,elem.tag,v,rhs))
elif op=="lt":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v<rhs,
"Value '{}' for entry '{}' violates constraint '{} < {}'"
.format(v,elem.tag,v,rhs))
elif op=="ge":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v>=rhs,
"Value '{}' for entry '{}' violates constraint '{} >= {}'"
.format(v,elem.tag,v,rhs))
elif op=="gt":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v>rhs,
"Value '{}' for entry '{}' violates constraint '{} > {}'"
.format(v,elem.tag,v,rhs))
elif op=="mod":
expect (vtype=="integer",
"Cannot evaluate constraint '{} mod {}' for entry '{}'\n"
.format(lhs,tokens[1],elem.tag) +
"Modulo constraint only makes sense for integer parameters.")
# Use list comprehension to filter out None (for the cmp op not found)
rhs_tokens = [i for i in re.split("(eq)|(ne)",tokens[1]) if i]
expect (len(rhs_tokens)==3,
"Modular arithmetic constraint syntax is '% M op rhs', with op being 'eq' or 'ne'"
" String found: {}".format(tokens[1]))
mod = int(rhs_tokens[0])
cmp = rhs_tokens[1]
expect (cmp=="eq" or cmp=="ne",
"Modular arithmetic constraint syntax is '% M op rhs', with op being 'eq' or 'ne'"
" String found: {}".format(tokens[1]))
rhs = int(rhs_tokens[2])
if cmp=="eq":
expect ( (v % mod)==rhs, "Value '{}' for entry '{}' violates constraint {}{}".format(v,elem.tag,v,c))
else:
expect ( (v % mod)!=rhs, "Value '{}' for entry '{}' violates constraint {}{}".format(v,elem.tag,v,c))
###############################################################################
def check_all_values(root):
###############################################################################
"""
Check that all values in the xml tree do not violate their metadata
>>> ############### GENERATE TYPE ATTRIB ###############
>>> xml_str = '''
... <root>
... <prop1>1</prop1>
... <prop2>1.0</prop2>
... <prop3>one</prop3>
... <prop4>true</prop4>
... </root>
... '''
>>> import xml.etree.ElementTree as ET
>>> xml = ET.fromstring(xml_str)
>>> check_all_values(xml)
>>> print (get_child(xml,"prop1").attrib["type"])
integer
>>> print (get_child(xml,"prop2").attrib["type"])
real
>>> print (get_child(xml,"prop3").attrib["type"])
string
>>> print (get_child(xml,"prop4").attrib["type"])
logical
"""
has_children = len(root)>0
if has_children:
for c in root:
check_all_values(c)
else:
if "type" not in root.attrib.keys():
root.attrib["type"] = derive_type(root.text)
check_value(root,root.text)
###############################################################################
def resolve_inheritance (root,elem):
###############################################################################
"""
If elem inherits from another node within $root, this function adds all
children of its "parent" to elem. If parent also inherits, first
resolve parent recursively. If parent is not found, throw an exception
>>> xml = '''
... <my_root>
... <base>
... <a>2</a>
... </base>
... <derived inherit="base">
... </derived>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> d = get_child(root,'derived')
>>> len(d)
0
>>> resolve_inheritance(root,d)
>>> len(d)
1
>>> get_child(d,'a').text
'2'
"""
if "inherit" in elem.attrib.keys():
parent_name = elem.attrib["inherit"]
parent = find_node(root,parent_name)
expect (elem is not None,
"Error! Parent {} of {} not found within root {}"
.format(parent_name,elem.tag,root.tag))
# Make sure the parent is fully resolved
resolve_inheritance(root,parent)
del elem.attrib["inherit"]
for entry in parent:
# Add the parent's default only if this element does not
# have a more specialized version
if not has_child(elem,entry.tag):
new_entry = copy.deepcopy(entry)
elem.append(new_entry)
for child in elem:
resolve_inheritance(root,child)
###############################################################################
def resolve_all_inheritances (root):
###############################################################################
"""
Resolve all inheritances in the root tree
"""
for elem in root:
resolve_inheritance(root,elem)
###############################################################################
def get_valid_selectors(xml_root):
###############################################################################
"""
Extract the <selector> node from the xml root, verifying
its integrity, and returning selectors as a dict.
>>> xml = '''
... <namelist_defaults>
... <selectors>
... <selector name="S1" case_env="ENV1"/>
... <selector name="S2" case_env="ENV2"/>
... </selectors>
... </namelist_defaults>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> selectors = get_valid_selectors(root)
>>> len(selectors)
2
>>> xml = '''
... <namelist_defaults>
... <selectors>
... <blah name="S1" case_env="ENV1"/>
... </selectors>
... </namelist_defaults>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> selectors = get_valid_selectors(root)
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Expected selector tag, not blah
"""
# Get the right XML element, and iterate over its children
selectors_elem = get_child(xml_root,"selectors",remove=True)
selectors = {}
for selector in selectors_elem:
expect(selector.tag == "selector",
"Expected selector tag, not {}".format(selector.tag))
selector_name = selector.attrib["name"]
selector_env = selector.attrib["case_env"]
if "regex" in selector.attrib:
selector_regex = selector.attrib["regex"]
else:
selector_regex = "(.*)" # Just grab the whole thing
selectors[selector_name] = (selector_env, selector_regex)
return selectors
###############################################################################
def gen_group_processes (ap_names_str, atm_procs_defaults):
###############################################################################
"""
Given a (possibly nested) string representation of an atm group,
generates the corresponding atm processes as XML nodes.
"""
group = ET.Element("__APG__")
ap_names_list = parse_string_as_list(ap_names_str)
for ap in ap_names_list:
# The current ap can be itself a group if either:
# - ap = "(ap1,ap2,...,apXYZ)", with each ap possibly itself a group string.
# This group is built on the fly based on the building blocks specs.
# - ap is declared in the XML defaults as an atm proc group (which must store
# the 'atm_procs_list' child, with the string representation of the group.
if ap[0]=='(':
# Create the atm proc group
proc = gen_atm_proc_group(ap,atm_procs_defaults)
else:
# Get defaults
proc = copy.deepcopy(get_child(atm_procs_defaults,ap))
# Check if this pre-defined proc is itself a group, and, if so,
# build all its sub-processes
ptype = get_child(proc,"Type",must_exist=False)
if ptype is not None and ptype.text=="Group":
# This entry of the group is itself a group, with pre-defined
# defaults. Let's add its entries to it
sub_group_procs = get_child(proc,"atm_procs_list").text
proc.extend(gen_group_processes(sub_group_procs,atm_procs_defaults))
# Append subproc to group
group.append(proc)
return group
###############################################################################
def gen_atm_proc_group(atm_procs_list, atm_procs_defaults):
###############################################################################
"""
Given a (possibly nested) list of atm procs names, and the defaults
section for each atm proc, builds an XML node containing the tree
representing the atm process group, with nodes including APG parameters
as well as one sub-node for each atm proc in the group
>>> xml = '''
... <ap>
... <atm_proc_group>
... <prop1>1</prop1>
... <atm_procs_list>THE_LIST</atm_procs_list>
... </atm_proc_group>
... <ap1>
... </ap1>
... <ap2>
... <prop1>2</prop1>
... <prop2>3</prop2>
... </ap2>
... <my_group inherit="atm_proc_group">
... <atm_procs_list>(p1,ap2)</atm_procs_list>
... </my_group>
... </ap>
... '''
>>> import xml.etree.ElementTree as ET
>>> defaults = ET.fromstring(xml)
>>> ap_list = '(ap1,(ap2,ap1))'
>>> apg = gen_atm_proc_group(ap_list,defaults)
>>> get_child(apg,'atm_procs_list').text==ap_list
True
>>>
>>> has_child(apg,'group.ap2_ap1.')
True
>>> get_child(apg,'prop1').text=="1"
True
"""
# Set defaults from atm_proc_group
group = ET.Element("__APG__")
group.attrib["inherit"] = "atm_proc_group"
resolve_inheritance(atm_procs_defaults,group)
get_child(group,"atm_procs_list").text = atm_procs_list
# Create processes
group_procs = gen_group_processes (atm_procs_list, atm_procs_defaults)
# Append procs and generate name for the group.
# NOTE: the name of a 'generic' group is 'group.AP1_AP2_..._APN.'
names = []
for c in group_procs:
names.append(c.tag)
group.append(c)
group.tag = "group." + '_'.join(names) + '.'
return group
| 35.256662 | 122 | 0.499244 | import os, sys, copy, re
import xml.etree.ElementTree as ET
_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","cime")
sys.path.append(_CIMEROOT)
from CIME.utils import expect
###############################################################################
class MockCase(object):
###############################################################################
"""
Helper function, to generate a cime case to be fed to doctest tests
"""
def __init__(self, kv_dict):
self._kv_dict = dict(kv_dict)
def get_value(self, key):
if key in self._kv_dict:
return self._kv_dict[key]
else:
return None
###############################################################################
def parse_string_as_list (string):
###############################################################################
"""
Takes a string representation of nested list and creates
a nested list of stirng. For instance, with
s = "(a,b,(c,d),e)
l = parse_string_as_list
we would have l = ['a', 'b', '(c,d)', 'e']
>>> s = '(a,(b,c))'
>>> l = parse_string_as_list(s)
>>> len(l)
2
>>> l[0] == 'a'
True
>>> l[1] == '(b,c)'
True
>>> ###### NOT STARTING/ENDING WITH PARENTHESES #######
>>> s = '(a,b,'
>>> l = parse_string_as_list(s)
Traceback (most recent call last):
ValueError: Input string must start with '(' and end with ')'.
>>> ################ UNMATCHED PARENTHESES ##############
>>> s = '(a,(b)'
>>> l = parse_string_as_list(s)
Traceback (most recent call last):
ValueError: Unmatched parentheses in input string
"""
if string[0]!='(' or string[-1]!=')':
raise ValueError ("Input string must start with '(' and end with ')'.")
sub_open = string.find('(',1)
sub_close = string.rfind(')',0,-1)
if not (sub_open>=0)==(sub_close>=0):
raise ValueError ("Unmatched parentheses in input string")
# Prevent empty string to pollute s.split()
my_split = lambda str : [s for s in str.split(',') if s.strip() != '']
if sub_open>=0:
l = []
l.extend(my_split(string[1:sub_open-1]))
l.append(string[sub_open:sub_close+1])
l.extend(my_split(string[sub_close+2:-1]))
else:
l = my_split(string[1:-1])
return l
###############################################################################
def is_array_type (name):
###############################################################################
"""
>>> is_array_type('array(T)')
True
>>> is_array_type('array')
False
>>> is_array_type('array(T)')
True
"""
return name[0:6]=="array(" and name[-1]==")"
###############################################################################
def array_elem_type (name):
###############################################################################
"""
>>> print(array_elem_type('array(T)'))
T
>>> print(array_elem_type('array()'))
<BLANKLINE>
"""
expect (is_array_type(name),
"Error! Type '{}' does not represent an array.".format(name))
return name[6:-1]
###############################################################################
def find_node (root,name):
###############################################################################
"""
Finds node with given name inside the root element, with a depth-search
strategy (i.e., follow children before siblings).
WARNING: this function does not check for uniqueness. If there are
multiple matches, the first match is returned.
>>> xml = '''
... <my_root>
... <a>1</a>
... <b>
... <c>2</c>
... </b>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> find_node(root,'d')==None
True
>>> find_node(root,'c').text
'2'
"""
if root.tag==name:
return root
for elem in root:
found = find_node(elem,name)
if found is not None:
return found
return None
###############################################################################
def get_child (root,name,remove=False,must_exist=True):
###############################################################################
"""
Get children with given name. If not found, throws an exception.
Optionally, the child can be removed from the parent.
>>> xml = '''
... <my_root>
... <a>1</a>
... <b>
... <c>2</c>
... </b>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> get_child(root,'c')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: There must be exactly one c entry inside my_root
>>> get_child(root,'c',must_exist=False)
"""
expect (len(root.findall(name))==1 or must_exist==False,
"There must be exactly one {} entry inside {}".format(name,root.tag))
child = root.find(name)
if remove and child is not None:
root.remove(child)
return child
###############################################################################
def has_child (root,name):
###############################################################################
"""
Check if root element has a *direct* child with given name
>>> xml = '''
... <my_root>
... <a>1</a>
... <b>
... <c>2</c>
... </b>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> has_child(root,'c')
False
>>> has_child(root,'b')
True
"""
return False if root.find(name) is None else True
###############################################################################
def refine_type(entry, force_type=None):
###############################################################################
"""
Try to convert the text entry to the appropriate type based on its contents.
>>> e = '(a,b)'
>>> refine_type(e)==e
True
>>> e = '[a,b]'
>>> refine_type(e)==e
True
>>> e = 'a,b'
>>> refine_type(e)==['a','b']
True
>>> e = 'true,falsE'
>>> refine_type(e)==[True,False]
True
>>> e = '1'
>>> refine_type(e,force_type='real')==1.0
True
>>> e = '1,b'
>>> refine_type(e)==[1,'b',True]
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: List '1,b' has inconsistent types inside
>>> e = '1.0'
>>> refine_type(e,force_type='my_type')
Traceback (most recent call last):
NameError: Bad force_type: my_type
>>> e = 'true,falsE'
>>> refine_type(e,'logical')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Error! Invalid type 'logical' for an array.
>>> refine_type(e,'array(logical)')
[True, False]
"""
# We want to preserve strings representing lists
if (entry[0]=="(" and entry[-1]==")") or \
(entry[0]=="[" and entry[-1]=="]") :
expect (force_type is None or force_type=="string",
"Error! Invalid force type '{}' for a string representing a list"
.format(force_type))
return entry
if "," in entry:
expect (force_type is None or is_array_type(force_type),
"Error! Invalid type '{}' for an array.".format(force_type))
elem_type = force_type if force_type is None else array_elem_type(force_type);
result = [refine_type(item.strip(), force_type=elem_type) for item in entry.split(",") if item.strip() != ""]
expected_type = type(result[0])
for item in result[1:]:
expect(isinstance(item, expected_type),
"List '{}' has inconsistent types inside".format(entry))
return result
if force_type:
try:
elem_type = force_type if not is_array_type(force_type) else array_elem_type(force_type)
if elem_type == "logical":
if entry.upper() == "TRUE":
elem = True
elif entry.upper() == "FALSE":
elem = False
else:
elem = bool(int(entry))
elif elem_type == "integer":
elem = int(entry)
elif elem_type == "real":
elem = float(entry)
elif elem_type == "string":
elem = str(entry)
else:
raise NameError ("Bad force_type: {}".format(force_type))
if is_array_type(force_type):
return [elem]
else:
return elem
except ValueError:
raise ValueError ("Could not use '{}' as type '{}'".format(entry, force_type))
if entry.upper() == "TRUE":
return True
elif entry.upper() == "FALSE":
return False
try:
v = int(entry)
return v
except ValueError:
pass
try:
v = float(entry)
return v
except ValueError:
return entry
###############################################################################
def derive_type(entry):
###############################################################################
"""
Try to determine the type that the input string is representing
>>> derive_type('1')
'integer'
>>> derive_type('1.0')
'real'
>>> derive_type('one')
'string'
>>> derive_type('one,two')
'array(string)'
>>> derive_type('true,FALSE')
'array(logical)'
"""
refined_value = refine_type(entry)
if isinstance(refined_value, list):
elem_value = refined_value[0]
else:
elem_value = refined_value
if isinstance(elem_value, bool):
elem_type = "logical"
elif isinstance(elem_value, int):
elem_type = "integer"
elif isinstance(elem_value, float):
elem_type = "real"
elif isinstance(elem_value, str):
elem_type = "string"
else:
raise(UnrecognizedType, "Couldn't derive type of '{}'".format(entry))
return None
if isinstance(refined_value,list):
return "array(" + elem_type + ")"
else:
return elem_type
###############################################################################
def check_value(elem, value):
###############################################################################
"""
Check that a parameter's value is in the valid list
>>> import xml.etree.ElementTree as ET
>>> xml = '''
... <a type="integer" valid_values="1,2">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'1.0')
Traceback (most recent call last):
ValueError: Could not use '1.0' as type 'integer'
>>> check_value(root,'3')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Invalid value '3' for element 'a'. Value not in the valid list ('[1, 2]')
>>> xml = '''
... <a type="real" constraints="ge 0">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'-1')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Value '-1.0' for entry 'a' violates constraint '-1.0 >= 0.0'
>>> xml = '''
... <a type="real" constraints="mod 2 eq 0">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'2')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Cannot evaluate constraint '2.0 mod 2 eq 0' for entry 'a'
Modulo constraint only makes sense for integer parameters.
>>> xml = '''
... <a constraints="gt 0; le 5">1</a>
... '''
>>> root = ET.fromstring(xml)
>>> check_value(root,'2')
>>> check_value(root,'6')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Value '6' for entry 'a' violates constraint '6 <= 5'
"""
v = value
if "type" in elem.attrib.keys():
vtype = elem.attrib["type"]
v = refine_type(v,force_type=vtype)
expect (v is not None,
"Error! Value '{}' for element '{}' does not satisfy the constraint type={}"
.format(value,elem.tag,vtype) +
" NOTE: this error should have been caught earlier! Please, contact developers.")
else:
# If no 'type' attribute present, deduce the type and refine
vtype = derive_type(v)
v = refine_type(v,force_type=vtype)
if "valid_values" in elem.attrib.keys():
valids_str = elem.attrib["valid_values"]
valids = [refine_type(item.strip(), force_type=vtype) for item in valids_str.split(",")]
expect(v in valids,
"Invalid value '{}' for element '{}'. Value not in the valid list ('{}')".format(value, elem.tag, valids))
if "constraints" in elem.attrib.keys():
expect ("type" not in elem.attrib.keys() or not is_array_type(elem.attrib["type"]),
"Attribute 'constraints' only available for non-array parameters.")
constraints = elem.attrib["constraints"].split(";")
for c in constraints:
# The split should return a list [ '', s1, s2, ..., sN, rhs ],
# where sK is 'None' if opK is not found, and s=opK if opK is found.
# NOTE: we don't use math symbols, since XML doesn't like < or > inside
# strings. For consistency, we use worded ops for all operators:
# 'lt': < 'gt': > 'ne': != 'mod': %
# 'le': <= 'ge': >= 'eq': ==
# We use list comprehension to filter out 'None' and empty strings
pattern = "(ge)|(gt)|(lt)|(le)|(eq)|(ne)|(mod)"
tokens = [i.strip() for i in re.split(pattern,c,maxsplit=1) if i and i.strip()]
expect(len(tokens)==2,
"Invalid constraint syntax for entry '{}'.\n".format(elem.tag) +
" Correct syntax: 'op val', to be interpreted as '$param $op val'.\n"
" Constraint found: '{}'".format(c))
lhs = v
op = tokens[0]
if op=="ne":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v!=rhs,
"Value '{}' for entry '{}' violates constraint '{} != {}'"
.format(v,elem.tag,v,rhs))
elif op=="le":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v<=rhs,
"Value '{}' for entry '{}' violates constraint '{} <= {}'"
.format(v,elem.tag,v,rhs))
elif op=="lt":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v<rhs,
"Value '{}' for entry '{}' violates constraint '{} < {}'"
.format(v,elem.tag,v,rhs))
elif op=="ge":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v>=rhs,
"Value '{}' for entry '{}' violates constraint '{} >= {}'"
.format(v,elem.tag,v,rhs))
elif op=="gt":
rhs = refine_type(tokens[1],force_type=vtype)
expect (v>rhs,
"Value '{}' for entry '{}' violates constraint '{} > {}'"
.format(v,elem.tag,v,rhs))
elif op=="mod":
expect (vtype=="integer",
"Cannot evaluate constraint '{} mod {}' for entry '{}'\n"
.format(lhs,tokens[1],elem.tag) +
"Modulo constraint only makes sense for integer parameters.")
# Use list comprehension to filter out None (for the cmp op not found)
rhs_tokens = [i for i in re.split("(eq)|(ne)",tokens[1]) if i]
expect (len(rhs_tokens)==3,
"Modular arithmetic constraint syntax is '% M op rhs', with op being 'eq' or 'ne'"
" String found: {}".format(tokens[1]))
mod = int(rhs_tokens[0])
cmp = rhs_tokens[1]
expect (cmp=="eq" or cmp=="ne",
"Modular arithmetic constraint syntax is '% M op rhs', with op being 'eq' or 'ne'"
" String found: {}".format(tokens[1]))
rhs = int(rhs_tokens[2])
if cmp=="eq":
expect ( (v % mod)==rhs, "Value '{}' for entry '{}' violates constraint {}{}".format(v,elem.tag,v,c))
else:
expect ( (v % mod)!=rhs, "Value '{}' for entry '{}' violates constraint {}{}".format(v,elem.tag,v,c))
###############################################################################
def check_all_values(root):
###############################################################################
"""
Check that all values in the xml tree do not violate their metadata
>>> ############### GENERATE TYPE ATTRIB ###############
>>> xml_str = '''
... <root>
... <prop1>1</prop1>
... <prop2>1.0</prop2>
... <prop3>one</prop3>
... <prop4>true</prop4>
... </root>
... '''
>>> import xml.etree.ElementTree as ET
>>> xml = ET.fromstring(xml_str)
>>> check_all_values(xml)
>>> print (get_child(xml,"prop1").attrib["type"])
integer
>>> print (get_child(xml,"prop2").attrib["type"])
real
>>> print (get_child(xml,"prop3").attrib["type"])
string
>>> print (get_child(xml,"prop4").attrib["type"])
logical
"""
has_children = len(root)>0
if has_children:
for c in root:
check_all_values(c)
else:
if "type" not in root.attrib.keys():
root.attrib["type"] = derive_type(root.text)
check_value(root,root.text)
###############################################################################
def resolve_inheritance (root,elem):
###############################################################################
"""
If elem inherits from another node within $root, this function adds all
children of its "parent" to elem. If parent also inherits, first
resolve parent recursively. If parent is not found, throw an exception
>>> xml = '''
... <my_root>
... <base>
... <a>2</a>
... </base>
... <derived inherit="base">
... </derived>
... </my_root>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> d = get_child(root,'derived')
>>> len(d)
0
>>> resolve_inheritance(root,d)
>>> len(d)
1
>>> get_child(d,'a').text
'2'
"""
if "inherit" in elem.attrib.keys():
parent_name = elem.attrib["inherit"]
parent = find_node(root,parent_name)
expect (elem is not None,
"Error! Parent {} of {} not found within root {}"
.format(parent_name,elem.tag,root.tag))
# Make sure the parent is fully resolved
resolve_inheritance(root,parent)
del elem.attrib["inherit"]
for entry in parent:
# Add the parent's default only if this element does not
# have a more specialized version
if not has_child(elem,entry.tag):
new_entry = copy.deepcopy(entry)
elem.append(new_entry)
for child in elem:
resolve_inheritance(root,child)
###############################################################################
def resolve_all_inheritances (root):
###############################################################################
"""
Resolve all inheritances in the root tree
"""
for elem in root:
resolve_inheritance(root,elem)
###############################################################################
def get_valid_selectors(xml_root):
###############################################################################
"""
Extract the <selector> node from the xml root, verifying
its integrity, and returning selectors as a dict.
>>> xml = '''
... <namelist_defaults>
... <selectors>
... <selector name="S1" case_env="ENV1"/>
... <selector name="S2" case_env="ENV2"/>
... </selectors>
... </namelist_defaults>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> selectors = get_valid_selectors(root)
>>> len(selectors)
2
>>> xml = '''
... <namelist_defaults>
... <selectors>
... <blah name="S1" case_env="ENV1"/>
... </selectors>
... </namelist_defaults>
... '''
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring(xml)
>>> selectors = get_valid_selectors(root)
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Expected selector tag, not blah
"""
class BadSelectorTag (Exception):
pass
# Get the right XML element, and iterate over its children
selectors_elem = get_child(xml_root,"selectors",remove=True)
selectors = {}
for selector in selectors_elem:
expect(selector.tag == "selector",
"Expected selector tag, not {}".format(selector.tag))
selector_name = selector.attrib["name"]
selector_env = selector.attrib["case_env"]
if "regex" in selector.attrib:
selector_regex = selector.attrib["regex"]
else:
selector_regex = "(.*)" # Just grab the whole thing
selectors[selector_name] = (selector_env, selector_regex)
return selectors
###############################################################################
def gen_group_processes (ap_names_str, atm_procs_defaults):
###############################################################################
"""
Given a (possibly nested) string representation of an atm group,
generates the corresponding atm processes as XML nodes.
"""
group = ET.Element("__APG__")
ap_names_list = parse_string_as_list(ap_names_str)
for ap in ap_names_list:
# The current ap can be itself a group if either:
# - ap = "(ap1,ap2,...,apXYZ)", with each ap possibly itself a group string.
# This group is built on the fly based on the building blocks specs.
# - ap is declared in the XML defaults as an atm proc group (which must store
# the 'atm_procs_list' child, with the string representation of the group.
if ap[0]=='(':
# Create the atm proc group
proc = gen_atm_proc_group(ap,atm_procs_defaults)
else:
# Get defaults
proc = copy.deepcopy(get_child(atm_procs_defaults,ap))
# Check if this pre-defined proc is itself a group, and, if so,
# build all its sub-processes
ptype = get_child(proc,"Type",must_exist=False)
if ptype is not None and ptype.text=="Group":
# This entry of the group is itself a group, with pre-defined
# defaults. Let's add its entries to it
sub_group_procs = get_child(proc,"atm_procs_list").text
proc.extend(gen_group_processes(sub_group_procs,atm_procs_defaults))
# Append subproc to group
group.append(proc)
return group
###############################################################################
def gen_atm_proc_group(atm_procs_list, atm_procs_defaults):
###############################################################################
"""
Given a (possibly nested) list of atm procs names, and the defaults
section for each atm proc, builds an XML node containing the tree
representing the atm process group, with nodes including APG parameters
as well as one sub-node for each atm proc in the group
>>> xml = '''
... <ap>
... <atm_proc_group>
... <prop1>1</prop1>
... <atm_procs_list>THE_LIST</atm_procs_list>
... </atm_proc_group>
... <ap1>
... </ap1>
... <ap2>
... <prop1>2</prop1>
... <prop2>3</prop2>
... </ap2>
... <my_group inherit="atm_proc_group">
... <atm_procs_list>(p1,ap2)</atm_procs_list>
... </my_group>
... </ap>
... '''
>>> import xml.etree.ElementTree as ET
>>> defaults = ET.fromstring(xml)
>>> ap_list = '(ap1,(ap2,ap1))'
>>> apg = gen_atm_proc_group(ap_list,defaults)
>>> get_child(apg,'atm_procs_list').text==ap_list
True
>>>
>>> has_child(apg,'group.ap2_ap1.')
True
>>> get_child(apg,'prop1').text=="1"
True
"""
# Set defaults from atm_proc_group
group = ET.Element("__APG__")
group.attrib["inherit"] = "atm_proc_group"
resolve_inheritance(atm_procs_defaults,group)
get_child(group,"atm_procs_list").text = atm_procs_list
# Create processes
group_procs = gen_group_processes (atm_procs_list, atm_procs_defaults)
# Append procs and generate name for the group.
# NOTE: the name of a 'generic' group is 'group.AP1_AP2_..._APN.'
names = []
for c in group_procs:
names.append(c.tag)
group.append(c)
group.tag = "group." + '_'.join(names) + '.'
return group
| 158 | 25 | 81 |
47eb24a0f9d8fd0e4cedeb0986656185548adaf7 | 1,894 | py | Python | src/TODO/dataset.py | michalnand/dynamical_system | df0ad82089672693de03e32a97018e0fd87cbd33 | [
"MIT"
] | null | null | null | src/TODO/dataset.py | michalnand/dynamical_system | df0ad82089672693de03e32a97018e0fd87cbd33 | [
"MIT"
] | null | null | null | src/TODO/dataset.py | michalnand/dynamical_system | df0ad82089672693de03e32a97018e0fd87cbd33 | [
"MIT"
] | null | null | null | import torch
import numpy
| 25.594595 | 95 | 0.63622 | import torch
import numpy
class Create:
def __init__(self, dynamical_system, items_count = 10000, batch_size = 32, device = "cpu"):
self.items_count = items_count
self.seq_length = 256
self.batch_size = batch_size
self.order = 4
self.dynamical_system = dynamical_system(self.order, 0.01)
self.dynamical_system.set_random_transfer_matrix()
self.dataset_x = torch.zeros(self.items_count, self.seq_length, self.order)
self.dataset_y = torch.zeros(self.items_count, self.order*self.order)
for i in range(self.items_count):
y, x = self.create_item()
self.dataset_x[i] = torch.from_numpy(x)
self.dataset_y[i] = torch.from_numpy(y)
self.input_shape = (self.seq_length, self.order)
self.output_shape = (1, self.order*self.order)
self.dataset_x.to(device)
self.dataset_y.to(device)
self.batch_x = torch.zeros(self.batch_size, self.seq_length, self.order).to(device)
self.batch_y = torch.zeros(self.batch_size, self.order*self.order).to(device)
def get_x(self):
return self.dataset_x
def get_y(self):
return self.dataset_y
def get_input_shape(self):
return self.input_shape
def get_output_shape(self):
return self.output_shape
def get_random_batch(self):
for i in range(self.batch_size):
idx = numpy.random.randint(self.items_count)
self.batch_x[i] = self.dataset_x[idx]
self.batch_y[i] = self.dataset_y[idx]
return self.batch_y, self.batch_x
def create_item(self):
self.dynamical_system.set_random_initial_state()
x = self.dynamical_system.process(self.seq_length, 0.2)
y = self.dynamical_system.get_a().copy()
y = y.flatten()
return y, x
| 1,653 | -8 | 215 |
aa54c0a25d7f459b2d365d906fe7dc4e48fd1630 | 4,054 | py | Python | bagofconcepts/boc.py | hank110/Bag-of-Concepts | 28e772296bfe4b0a71859cf2aba1d68252948fb9 | [
"MIT"
] | null | null | null | bagofconcepts/boc.py | hank110/Bag-of-Concepts | 28e772296bfe4b0a71859cf2aba1d68252948fb9 | [
"MIT"
] | null | null | null | bagofconcepts/boc.py | hank110/Bag-of-Concepts | 28e772296bfe4b0a71859cf2aba1d68252948fb9 | [
"MIT"
] | null | null | null | import logging
from collections import Counter, defaultdict
import math
import sys
import numpy as np
from scipy.sparse import csr_matrix
import scipy.sparse
from sklearn.utils.extmath import safe_sparse_dot
from gensim.models import Word2Vec, KeyedVectors
from spherecluster import SphericalKMeans
| 36.196429 | 98 | 0.688949 | import logging
from collections import Counter, defaultdict
import math
import sys
import numpy as np
from scipy.sparse import csr_matrix
import scipy.sparse
from sklearn.utils.extmath import safe_sparse_dot
from gensim.models import Word2Vec, KeyedVectors
from spherecluster import SphericalKMeans
class BOCModel():
def __init__(self, doc_path=None, model_path=None, embedding_dim=200,
context=8, min_freq=100, num_concept=100, iterations=5):
# Unified model path required for incorporating numpy ndarray
# Different embedding methods --> numpy ndarray
if doc_path is None and model_path is None:
raise ValueError("Must specify either the document path or pre-trained word2vec path")
self.doc_path=doc_path
self.model_path=model_path
self.embedding_dim=embedding_dim
self.context=context
self.min_freq=min_freq
self.num_concept=num_concept
self.iterations=iterations
def fit(self, save_path=""):
if self.model_path is not None:
wv, idx2word=load_w2v(self.doc_path)
else:
wv, idx2word=train_w2v(self.doc_path, self.embedding_dim,
self.context, self.min_freq, self.iterations, save_path)
wv_cluster_id=_cluster_wv(wv, self.num_concept)
bow=_create_bow(idx2word, self.doc_path)
w2c=_create_w2c(idx2word, wv_cluster_id, self.num_concept)
boc=_apply_cfidf(safe_sparse_dot(bow, w2c))
if save_path:
_save_boc(save_path, boc, idx2word, wv_cluster_id)
return boc, [wc_pair for wc_pair in zip(idx2word, wv_cluster_id)], idx2word
def _save_boc(filepath, boc, idx2word, wv_cluster_id):
scipy.sparse.save_npz(filepath+'/boc_matrix.npz', boc)
with open(filepath+'/word2context.txt', 'w') as f:
for wc_pair in zip(idx2word, wv_cluster_id):
f.write(str(wc_pair)+'\n')
def _cluster_wv(wv, num_concept):
skm=SphericalKMeans(n_clusters=num_concept)
skm.fit(wv)
return skm.labels_
def _create_bow(idx2word, doc_path):
rows=[]
cols=[]
vals=[]
word2idx={word:idx for idx, word in enumerate(idx2word)}
with open(doc_path, "r") as f:
for i, doc in enumerate(f):
tokens=doc.rstrip().split(" ")
tokens_count=Counter([word2idx[token] for token in tokens if token in word2idx])
for idx, count in tokens_count.items():
rows.append(i)
cols.append(idx)
vals.append(float(count))
return csr_matrix((vals, (rows, cols)), shape=(i+1, len(word2idx)))
def _create_w2c(idx2word, cluster_label, num_concept):
if len(idx2word)!=len(cluster_label):
raise IndexError("Dimensions between words and labels mismatched")
rows=[i for i, idx2word in enumerate(idx2word)]
cols=[j for j in cluster_label]
vals=[1.0 for i in idx2word]
return csr_matrix((vals, (rows, cols)), shape=(len(idx2word), num_concept))
def _apply_cfidf(csr_matrix):
num_docs, num_concepts=csr_matrix.shape
_, nz_concept_idx=csr_matrix.nonzero()
cf=np.bincount(nz_concept_idx, minlength=num_concepts)
icf=np.log(num_docs / cf)
icf[np.isinf(icf)]=0
return safe_sparse_dot(csr_matrix, scipy.sparse.diags(icf))
def tokenize(doc_path):
with open(doc_path, "r") as f:
for doc in f:
yield doc.rstrip().split(" ")
def train_w2v(doc_path, embedding_dim, context, min_freq, iterations, save_path=""):
tokenized_docs=tokenize(doc_path)
model=Word2Vec(size=embedding_dim, window=context, min_count=min_freq, sg=1)
model.build_vocab(tokenized_docs)
model.train(tokenized_docs, total_examples=model.corpus_count, epochs=iterations)
if save_path:
model_name="/w2v_model_d%d_w%d" %(embedding_dim, context)
model.wv.save_word2vec_format(save_path+model_name)
return model.wv.vectors, model.wv.index2word
def load_w2v(model_path):
return KeyedVectors.load_word2vec_format(model_path)
| 3,492 | -4 | 265 |
9e7330c45553e9e1428db4adff74c500854c66f0 | 827 | py | Python | SmartContactManager/smart_contact_manager.py | alexcamargos/Learning_Python_Programming | f1cce9f85a672468b6ed1eb98dea9f7c09443722 | [
"MIT"
] | 2 | 2021-06-04T23:39:14.000Z | 2021-09-15T05:36:35.000Z | SmartContactManager/smart_contact_manager.py | alexcamargos/Learning_Python_Programming | f1cce9f85a672468b6ed1eb98dea9f7c09443722 | [
"MIT"
] | null | null | null | SmartContactManager/smart_contact_manager.py | alexcamargos/Learning_Python_Programming | f1cce9f85a672468b6ed1eb98dea9f7c09443722 | [
"MIT"
] | null | null | null | # #!/usr/bin/env python
# encoding: utf-8
#
# --------------------------------------------------------------------------------------------------------------------
# Name: smart_contact_manager.py
# Version: 0.0.1
# Summary: Smart Contact Manager a contact book GUI application with Python, SQLite, and PyQt.
#
# Author: Alexsander Lopes Camargos
# Author-email: alcamargos@vivaldi.net
#
# License: MIT
# --------------------------------------------------------------------------------------------------------------------
"""Smart Contact Manager entry point script."""
# TODO: Provide search capability: Giving your users a way to search for a contact in the database.
# TODO: Add back-up capability: Providing a way of backing up contact information.
from main import main
if __name__ == '__main__':
main()
| 33.08 | 119 | 0.516324 | # #!/usr/bin/env python
# encoding: utf-8
#
# --------------------------------------------------------------------------------------------------------------------
# Name: smart_contact_manager.py
# Version: 0.0.1
# Summary: Smart Contact Manager a contact book GUI application with Python, SQLite, and PyQt.
#
# Author: Alexsander Lopes Camargos
# Author-email: alcamargos@vivaldi.net
#
# License: MIT
# --------------------------------------------------------------------------------------------------------------------
"""Smart Contact Manager entry point script."""
# TODO: Provide search capability: Giving your users a way to search for a contact in the database.
# TODO: Add back-up capability: Providing a way of backing up contact information.
from main import main
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
dfb0d107bb3528b5f2998439c3afbd1da82d7fdb | 2,245 | py | Python | statsmodels/distributions/tests/test_copula.py | NolanMP/statsmodels | ca6b652188be2422061052f7e61dd7bf2da03d52 | [
"BSD-3-Clause"
] | 1 | 2022-02-24T16:54:07.000Z | 2022-02-24T16:54:07.000Z | statsmodels/distributions/tests/test_copula.py | NolanMP/statsmodels | ca6b652188be2422061052f7e61dd7bf2da03d52 | [
"BSD-3-Clause"
] | null | null | null | statsmodels/distributions/tests/test_copula.py | NolanMP/statsmodels | ca6b652188be2422061052f7e61dd7bf2da03d52 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 23:32:57 2021
Author: Josef Perktold
License: BSD-3
"""
# import numpy as np
from numpy.testing import assert_allclose
import pytest
import statsmodels.sandbox.distributions.copula as cop
ev_list = [
[cop.transform_bilogistic, 0.5, 0.9, (0.25, 0.05), 0.5],
[cop.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.5), 0.4724570876035117],
# note evd has asymmetry reversed, interchange variables
[cop.transform_tawn2, 0.9, 0.5, (0.25, 0.05), 0.464357480263932],
[cop.transform_tawn2, 0.9, 0.5, (0.5, 0.25), 0.4916117128670654],
[cop.transform_tawn2, 0.5, 0.9, (0.5, 0.25), 0.48340673415789],
# note evd has parameter for hr 1/lmbda (inverse of our parameter)
[cop.transform_hr, 0.5, 0.9, (2,), 0.4551235014298542],
[cop.transform_joe, 0.5, 0.9, (0.5, 0.75, 1/0.25), 0.4543698299835434],
[cop.transform_joe, 0.9, 0.5, (0.5, 0.75, 1/0.25), 0.4539773435983587],
# tev is against R `copula` package
# > cop = tevCopula(0.8, df = 4)
# > pCopula(c(0.5, 0.75), cop)
# [1] 0.456807960674953
# > pCopula(c(0.5, 0.9), cop)
# [1] 0.4911039761533587
[cop.transform_tev, 0.5, 0.75, (0.8, 4), 0.456807960674953],
[cop.transform_tev, 0.5, 0.9, (0.8, 4), 0.4911039761533587],
]
cop_list = [
[cop.TransfFrank, 0.5, 0.9, (2,), 0.4710805107852225, 0.9257812360337806],
[cop.TransfGumbel, 0.5, 0.9, (2,), 0.4960348880595387, 0.3973548776136501],
[cop.TransfClayton, 0.5, 0.9, (2,), 0.485954322440435, 0.8921974147432954],
[cop.TransfIndep, 0.5, 0.5, (), 0.25, 1],
]
@pytest.mark.parametrize("case", ev_list)
@pytest.mark.parametrize("case", cop_list)
| 35.078125 | 79 | 0.644989 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 23:32:57 2021
Author: Josef Perktold
License: BSD-3
"""
# import numpy as np
from numpy.testing import assert_allclose
import pytest
import statsmodels.sandbox.distributions.copula as cop
ev_list = [
[cop.transform_bilogistic, 0.5, 0.9, (0.25, 0.05), 0.5],
[cop.transform_tawn, 0.5, 0.9, (0.5, 0.5, 0.5), 0.4724570876035117],
# note evd has asymmetry reversed, interchange variables
[cop.transform_tawn2, 0.9, 0.5, (0.25, 0.05), 0.464357480263932],
[cop.transform_tawn2, 0.9, 0.5, (0.5, 0.25), 0.4916117128670654],
[cop.transform_tawn2, 0.5, 0.9, (0.5, 0.25), 0.48340673415789],
# note evd has parameter for hr 1/lmbda (inverse of our parameter)
[cop.transform_hr, 0.5, 0.9, (2,), 0.4551235014298542],
[cop.transform_joe, 0.5, 0.9, (0.5, 0.75, 1/0.25), 0.4543698299835434],
[cop.transform_joe, 0.9, 0.5, (0.5, 0.75, 1/0.25), 0.4539773435983587],
# tev is against R `copula` package
# > cop = tevCopula(0.8, df = 4)
# > pCopula(c(0.5, 0.75), cop)
# [1] 0.456807960674953
# > pCopula(c(0.5, 0.9), cop)
# [1] 0.4911039761533587
[cop.transform_tev, 0.5, 0.75, (0.8, 4), 0.456807960674953],
[cop.transform_tev, 0.5, 0.9, (0.8, 4), 0.4911039761533587],
]
cop_list = [
[cop.TransfFrank, 0.5, 0.9, (2,), 0.4710805107852225, 0.9257812360337806],
[cop.TransfGumbel, 0.5, 0.9, (2,), 0.4960348880595387, 0.3973548776136501],
[cop.TransfClayton, 0.5, 0.9, (2,), 0.485954322440435, 0.8921974147432954],
[cop.TransfIndep, 0.5, 0.5, (), 0.25, 1],
]
@pytest.mark.parametrize("case", ev_list)
def test_ev_copula(case):
# check ev copulas, cdf and transform against R `evt` package
ev_tr, v1, v2, args, res1 = case
res = cop.copula_bv_ev(v1, v2, ev_tr, args=args)
assert_allclose(res, res1, rtol=1e-13)
@pytest.mark.parametrize("case", cop_list)
def test_copulas(case):
# check ev copulas, cdf and transform against R `copula` package
cop_tr, v1, v2, args, cdf2, pdf2 = case
ca = cop.CopulaArchimedean(cop_tr())
cdf1 = ca.cdf([v1, v2], args=args)
pdf1 = ca.pdf([v1, v2], args=args)
assert_allclose(cdf1, cdf2, rtol=1e-13)
assert_allclose(pdf1, pdf2, rtol=1e-13)
| 525 | 0 | 44 |
ecc4eaaf4acc188cc72769b415a9ee39b2baf9a7 | 1,643 | py | Python | pyrfm/linear_model/loss.py | neonnnnn/pyrfm | e88fe8cb7bf3062616d33826e955e828fc6d8ba6 | [
"BSD-2-Clause"
] | 7 | 2020-05-31T01:47:27.000Z | 2021-12-26T03:45:14.000Z | pyrfm/linear_model/loss.py | neonnnnn/pyrfm | e88fe8cb7bf3062616d33826e955e828fc6d8ba6 | [
"BSD-2-Clause"
] | 2 | 2019-12-01T01:18:38.000Z | 2020-08-27T12:07:26.000Z | pyrfm/linear_model/loss.py | neonnnnn/pyrfm | e88fe8cb7bf3062616d33826e955e828fc6d8ba6 | [
"BSD-2-Clause"
] | 3 | 2021-03-17T13:46:56.000Z | 2022-03-18T21:43:45.000Z | import numpy as np
class Squared(LossFunction):
"""Squared loss: L(p, y) = 0.5 * (y - p)²"""
class Logistic(LossFunction):
"""Logistic loss: L(p, y) = log(1 + exp(-yp))"""
class SquaredHinge(LossFunction):
"""Squared hinge loss: L(p, y) = max(1 - yp, 0)²"""
class Hinge(LossFunction):
"""hinge loss: L(p, y) = max(1 - y*p, 0)""" | 19.795181 | 73 | 0.456482 | import numpy as np
def sigmoid(pred):
return np.exp(np.minimum(0, pred)) / (1.+np.exp(-np.abs(pred)))
class LossFunction:
def loss(self, p, y):
raise NotImplementedError()
def dloss(self, p, y):
raise NotImplementedError()
class Squared(LossFunction):
"""Squared loss: L(p, y) = 0.5 * (y - p)²"""
def __init__(self):
self.mu = 1
def loss(self, p, y):
return 0.5 * (p - y) ** 2
def dloss(self, p, y):
return p - y
class Logistic(LossFunction):
"""Logistic loss: L(p, y) = log(1 + exp(-yp))"""
def __init__(self):
self.mu = 0.25
def loss(self, p, y):
z = p * y
# log(1 + exp(-z))
return np.log(1.0 + np.exp(-np.maximum(0, z))) - np.minimum(0, z)
def dloss(self, p, y):
z = p * y
# def tau = 1 / (1 + exp(-z))
# return y * (tau - 1)
tau = sigmoid(z)
return y * (tau - 1)
class SquaredHinge(LossFunction):
"""Squared hinge loss: L(p, y) = max(1 - yp, 0)²"""
def __init__(self):
self.mu = 2
def loss(self, p, y):
z = 1 - p * y
if z > 0:
return z * z
else:
return 0.0
def dloss(self, p, y):
z = 1 - p * y
z[z < 0] = 0
z[z > 0] *= -2*y[z > 0]
return z
class Hinge(LossFunction):
"""hinge loss: L(p, y) = max(1 - y*p, 0)"""
def __init__(self):
self.mu = 0
def loss(self, p, y):
z = 1 - p * y
z[z < 0] = 0
return z
def dloss(self, p, y):
z = 1 - p * y
z[z < 0] = 0
z[z > 0] = -1
return z | 869 | -2 | 422 |
985e9dc0da5724f8e441e6fb601bdda77d7cb788 | 1,661 | py | Python | test/test_decorators_fromtype.py | dan-win/fairways_py | 771623c6f9ec40e8016b5cebb7951613d01e31f7 | [
"Apache-2.0"
] | null | null | null | test/test_decorators_fromtype.py | dan-win/fairways_py | 771623c6f9ec40e8016b5cebb7951613d01e31f7 | [
"Apache-2.0"
] | null | null | null | test/test_decorators_fromtype.py | dan-win/fairways_py | 771623c6f9ec40e8016b5cebb7951613d01e31f7 | [
"Apache-2.0"
] | null | null | null | import unittest
import json
import os
| 27.683333 | 119 | 0.582781 | import unittest
import json
import os
def setUpModule():
pass
def tearDownModule():
pass
class FromTypeTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# from fairways.api import core, triggers
from fairways.decorators import fromtype
cls.fromtype = fromtype
def test_fromtype(self):
fromtype = self.fromtype
class CustomClass:
def __init__(self, name):
self.name = name
def __str__(self):
return f"{self.__class__.__name__}:{self.name}"
class MyClass(fromtype.FromTypeMixin):
def __init__(self, value=None):
self.value = value
@fromtype.implement(int)
def _from_int(self, value):
self.value = f"From int: {value}"
@fromtype.implement(str)
def _from_str(self, value):
self.value = f"From str: {value}"
@fromtype.implement(dict)
def _from_dict(self, value):
self.value = f"From dict: {value}"
@fromtype.implement(CustomClass)
def _from_obj(self, value):
self.value = f"From CustomClass: {value}"
def __str__(self):
return self.value
self.assertEqual('From int: 1', MyClass.fromtype(1).value)
self.assertEqual('From str: Text', MyClass.fromtype("Text").value)
self.assertEqual("From dict: {'a': 1}", MyClass.fromtype(dict(a=1)).value)
self.assertEqual('From CustomClass: CustomClass:objectName', MyClass.fromtype(CustomClass("objectName")).value)
| 1,459 | 91 | 69 |
dfc0b83160ee292356e7e19855d85e59ddfa121b | 11,235 | py | Python | P2/sde_solvers.py | Ocete/mcd-pe-continuo | ec8c6e39453e3089786396252ea4dc83ad4825c1 | [
"MIT"
] | null | null | null | P2/sde_solvers.py | Ocete/mcd-pe-continuo | ec8c6e39453e3089786396252ea4dc83ad4825c1 | [
"MIT"
] | null | null | null | P2/sde_solvers.py | Ocete/mcd-pe-continuo | ec8c6e39453e3089786396252ea4dc83ad4825c1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 27 17:29:26 2020
@author: Alberto Suárez
"""
# Load packages
import numpy as np
import sys
import matplotlib.pyplot as plt
from numpy.core.shape_base import _accumulate
def generate_regular_grid(t0, delta_t, N):
"""Generates a regular grid of times.
Parameters
----------
t0 : float
Initial time for the simulation
N: int
Number of steps for the simulation
delta_t: float
Step
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0, t0+T]
Example
-------
>>> generate_regular_grid(0, 0.1, 100)
"""
return np.array([t0 + delta_t*i for i in range(N+1)])
def euler_maruyana(t0, x0, T, a, b, M, N):
""" Numerical integration of an SDE using the stochastic Euler scheme
x(t0) = x0
dx(t) = a(t, x(t))*dt + b(t, x(t))*dW(t) [Itô SDE]
Parameters
----------
t0 : float
Initial time for the simulation
x0 : float
Initial level of the process
T : float
Length of the simulation interval [t0, t0+T]
a :
Function a(t,x(t)) that characterizes the drift term
b :
Function b(t,x(t)) that characterizes the diffusion term
M: int
Number of trajectories in simulation
N: int
Number of steps for the simulation
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0, t0+T]
X: numpy.ndarray of shape (M,N+1)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the values
of the process at t.
Example
-------
>>> import matplotlib.pyplot as plt
>>> import sde_solvers as sde
>>> t0, S0, T, mu, sigma = 0, 100.0, 2.0, 0.3, 0.4
>>> M, N = 20, 1000
>>> def a(t, St): return mu*St
>>> def b(t, St): return sigma*St
>>> t, S = sde.euler_maruyana(t0, S0, T, a, b, M, N)
>>> _ = plt.plot(t,S.T)
>>> _= plt.xlabel('t')
>>> _= plt.ylabel('S(t)')
>>> _= plt.title('Geometric BM (Euler scheme)')
"""
# Initialize trayectories using x0 and the rest of the variables.
trajectories = np.tile(x0, (M, N+1))
delta_t = 1.0 * T / N
sqrt_delta_t = np.sqrt(delta_t)
times = generate_regular_grid(t0, delta_t, N)
noise = np.random.randn(M, N)
# Traverse the trajectories columns except the last one.
# I.e., x will be a vector with all the trajectories at time t,
# and z a vector will the noise at time t.
for idx, (t, x, z) in enumerate(zip(times[:-1], trajectories.T[:-1], noise.T)):
trajectories.T[idx+1] = x + a(t, x) * delta_t + z * b(t, x) * sqrt_delta_t
return times, trajectories
def milstein(t0, x0, T, a, b, db_dx, M, N):
""" Numerical integration of an SDE using the stochastic Milstein scheme
x(t0) = x0
dx(t) = a(t, x(t))*dt + b(t, x(t))*dW(t) [Itô SDE]
Parameters
----------
t0 : float
Initial time for the simulation
x0 : float
Initial level of the process
T : float
Length of the simulation interval [t0, t0+T]
a :
Function a(t, x(t)) that characterizes the drift term
b :
Function b(t, x(t)) that characterizes the diffusion term
db_dx :
Function db_dx(t, x(t)), derivative wrt the second argument of b(t, x)
M: int
Number of trajectories in simulation
N: int
Number of steps for the simulation
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0, t0+T]
X: numpy.ndarray of shape (M,N+1)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the
values of the process at t.
Example
-------
>>> import matplotlib.pyplot as plt
>>> import sde_solvers as sde
>>> t0, S0, T, mu, sigma = 0, 100.0, 2.0, 0.3, 0.4
>>> M, N = 20, 1000
>>> def a(t, St): return mu*St
>>> def b(t, St): return sigma*St
>>> def db_dSt(t, St): return sigma
>>> t, S = sde.milstein(t0, S0, T, a, b, db_dSt, M, N)
>>> _ = plt.plot(t,S.T)
>>> _= plt.xlabel('t')
>>> _= plt.ylabel('S(t)')
>>> _= plt.title('Geometric BM (Milstein scheme)')
"""
# Initialize trayectories using x0 and the rest of the variables.
trajectories = np.tile(x0, (M, N+1))
delta_t = 1.0 * T / N
sqrt_delta_t = np.sqrt(delta_t)
times =generate_regular_grid(t0, delta_t, N)
noise = np.random.randn(M, N)
# Traverse the trajectories columns except the last one.
# I.e., x will be a vector with all the trajectories at time t,
# and z a vector will the noise at time t.
for idx, (t, x, z) in enumerate(zip(times[:-1], trajectories.T[:-1], noise.T)):
trajectories.T[idx+1] = x + a(t, x) * delta_t + z * b(t, x) * sqrt_delta_t \
+ 0.5 * b(t, x) * db_dx(t, x) * (z**2 - 1) * delta_t
return times, trajectories
def simulate_jump_process(t0, T, simulator_arrival_times, simulator_jumps, M):
""" Simulation of jump process
Parameters
----------
t0 : float
Initial time for the simulation
T : float
Length of the simulation interval [t0, t0+T]
simulator_arrival_times: callable with arguments (t0,T)
Function that returns a list of M arrays of arrival times in [t0, t0+T]
simulator_jumps: callable with argument N
Function that returns a list of M arrays with the sizes of the jumps
M: int
Number of trajectories in the simulation
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0,t1]
X: numpy.ndarray of shape (M,N+1)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the
values of the process at t.
"""
times_of_jumps = [[] for _ in range(M)]
sizes_of_jumps = [[] for _ in range(M)]
for m in range(M):
times_of_jumps[m] = simulator_arrival_times(t0, T)
max_jumps = len(times_of_jumps[m])
sizes_of_jumps[m] = simulator_jumps(max_jumps)
return times_of_jumps, sizes_of_jumps
# Stochastic Euler scheme for the numerical solution of a jump-diffision SDE
def euler_jump_diffusion(t0, x0, T, a, b, c,
simulator_jump_process,
M, N):
""" Simulation of jump diffusion process
x(t0) = x0
dx(t) = a(t, x(t))*dt + b(t, x(t))*dW(t) + c(t, x(t)) dJ(t)
[Itô SDE with a jump term]
Parameters
----------
t0 : float
Initial time for the simulation
x0 : float
Initial level of the process
T : float
Length of the simulation interval [t0, t0+T]
a : Function a(t,x(t)) that characterizes the drift term
b : Function b(t,x(t)) that characterizes the diffusion term
c : Function c(t,x(t)) that characterizes the jump term
simulator_jump_process: Function that returns times and sizes of jumps
M: int
Number of trajectories in simulation
N: int
Number of steps for the simulation
Returns
-------
t: numpy.ndarray of shape (N_t,)
Non-regular grid of discretization times in [t0,t1].
N_t is the number of jumps generated.
X: numpy.ndarray of shape (M, N_t)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the
values of the process at t.
"""
# Create a 2-d array with the times and the sizes of jumps (0 in this case)
delta_t = 1.0 * T / N
sqrt_delta_t = np.sqrt(delta_t)
original_times = np.array([generate_regular_grid(t0, delta_t, N), np.repeat(None, N+1)])
# Create empy arrays to be filled with final results
final_times = []
final_trajectories = []
# Simulate the jump processes
times_of_jumps, sizes_of_jumps = simulator_jump_process(t0, T, M)
# Each trajectory has different length (the numebr of jumps is different, so the length of
# the times array will be different). By computing
for single_times_oj_jumps, single_sizes_oj_jumps in zip(times_of_jumps, sizes_of_jumps):
# Create a 2-d array with the times and the sizes of jumps
jump_times = np.array([single_times_oj_jumps, single_sizes_oj_jumps])
# Join the time grid and sort them by increasing times, keeping the correct
# jump associated to each time.
times_and_jumps = np.concatenate((original_times, jump_times), axis=1)
times_and_jumps = np.array(sorted(times_and_jumps.T, key=lambda x: x[0])).T
# Unpack the times and jumps
times, jumps = times_and_jumps
N_t = len(times)
# Create noise for every time that will be computed.
noise = np.random.randn(N_t-1)
# Initialize trayectories using x0 and the rest of the variables.
trajectory = np.repeat(x0, N_t)
# Create the trajectories following the studied algorithm
for idx, (t, j, x, z) in enumerate(zip(times[:-1], jumps[:-1], trajectory[:-1], noise)):
trajectory[idx+1] = x + a(t, x) * delta_t + z * b(t, x) * sqrt_delta_t
if j is not None:
trajectory[idx+1] = trajectory[idx+1] + c(t, trajectory[idx+1]) * j
# Save the times and trajectories
final_times.append(times)
final_trajectories.append(trajectory)
return final_times, final_trajectories
def subplot_mean_and_std(x, mean, std, fig_num=1, color='b',
fill_color='#1f77b4',
xlims=None, ylims=None, xlabel=None,
ylabel=None, title=None, alpha_std=.3):
"""
Plots the passed mean and std.
Parameters
----------
x : numpy.ndarray
x-component to plot
mean : numpy.ndarray
mean of the y-component to plot
std : numpy.ndarray
std of the y-component to plot
color : string, optional
Color to plot the mean on
color : string, optional
Color to plot the std on
xlims : numpy.ndarray, optional
xlims for the plot
ylims : numpy.ndarray, optional
xlims for the plot
xlabel : string, optional
xlabel for the plot
ylabel : string, optional
ylabel for the plot
title : string, optional
Title for the plot
alpha_std : float, optional
Alpha of the std-filling color
Returns
-------
No returns, it fills the axis
Example
-------
>>> simulate_wiener_process(n_processes=1000)
>>> mean, std = np.mean(trajectories, axis=0), np.std(trajectories, axis=0)
>>> fig, axis = plt.figure(figsize=(12, 8))
>>> subplot_mean_and_std(axis, ts, mean, 2*std)
"""
plt.figure(fig_num)
plt.plot(x, mean, color=color)
plt.fill_between(x, mean-std, mean+std, color=fill_color, alpha=alpha_std)
if xlims is not None: plt.xlim(xlims)
if ylims is not None: plt.ylim(ylims)
if xlabel is not None: plt.xlabel(xlabel)
if ylabel is not None: plt.ylabel(ylabel)
if title is not None: plt.title(title)
| 33.239645 | 96 | 0.608367 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 27 17:29:26 2020
@author: Alberto Suárez
"""
# Load packages
import numpy as np
import sys
import matplotlib.pyplot as plt
from numpy.core.shape_base import _accumulate
def generate_regular_grid(t0, delta_t, N):
"""Generates a regular grid of times.
Parameters
----------
t0 : float
Initial time for the simulation
N: int
Number of steps for the simulation
delta_t: float
Step
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0, t0+T]
Example
-------
>>> generate_regular_grid(0, 0.1, 100)
"""
return np.array([t0 + delta_t*i for i in range(N+1)])
def euler_maruyana(t0, x0, T, a, b, M, N):
""" Numerical integration of an SDE using the stochastic Euler scheme
x(t0) = x0
dx(t) = a(t, x(t))*dt + b(t, x(t))*dW(t) [Itô SDE]
Parameters
----------
t0 : float
Initial time for the simulation
x0 : float
Initial level of the process
T : float
Length of the simulation interval [t0, t0+T]
a :
Function a(t,x(t)) that characterizes the drift term
b :
Function b(t,x(t)) that characterizes the diffusion term
M: int
Number of trajectories in simulation
N: int
Number of steps for the simulation
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0, t0+T]
X: numpy.ndarray of shape (M,N+1)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the values
of the process at t.
Example
-------
>>> import matplotlib.pyplot as plt
>>> import sde_solvers as sde
>>> t0, S0, T, mu, sigma = 0, 100.0, 2.0, 0.3, 0.4
>>> M, N = 20, 1000
>>> def a(t, St): return mu*St
>>> def b(t, St): return sigma*St
>>> t, S = sde.euler_maruyana(t0, S0, T, a, b, M, N)
>>> _ = plt.plot(t,S.T)
>>> _= plt.xlabel('t')
>>> _= plt.ylabel('S(t)')
>>> _= plt.title('Geometric BM (Euler scheme)')
"""
# Initialize trayectories using x0 and the rest of the variables.
trajectories = np.tile(x0, (M, N+1))
delta_t = 1.0 * T / N
sqrt_delta_t = np.sqrt(delta_t)
times = generate_regular_grid(t0, delta_t, N)
noise = np.random.randn(M, N)
# Traverse the trajectories columns except the last one.
# I.e., x will be a vector with all the trajectories at time t,
# and z a vector will the noise at time t.
for idx, (t, x, z) in enumerate(zip(times[:-1], trajectories.T[:-1], noise.T)):
trajectories.T[idx+1] = x + a(t, x) * delta_t + z * b(t, x) * sqrt_delta_t
return times, trajectories
def milstein(t0, x0, T, a, b, db_dx, M, N):
""" Numerical integration of an SDE using the stochastic Milstein scheme
x(t0) = x0
dx(t) = a(t, x(t))*dt + b(t, x(t))*dW(t) [Itô SDE]
Parameters
----------
t0 : float
Initial time for the simulation
x0 : float
Initial level of the process
T : float
Length of the simulation interval [t0, t0+T]
a :
Function a(t, x(t)) that characterizes the drift term
b :
Function b(t, x(t)) that characterizes the diffusion term
db_dx :
Function db_dx(t, x(t)), derivative wrt the second argument of b(t, x)
M: int
Number of trajectories in simulation
N: int
Number of steps for the simulation
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0, t0+T]
X: numpy.ndarray of shape (M,N+1)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the
values of the process at t.
Example
-------
>>> import matplotlib.pyplot as plt
>>> import sde_solvers as sde
>>> t0, S0, T, mu, sigma = 0, 100.0, 2.0, 0.3, 0.4
>>> M, N = 20, 1000
>>> def a(t, St): return mu*St
>>> def b(t, St): return sigma*St
>>> def db_dSt(t, St): return sigma
>>> t, S = sde.milstein(t0, S0, T, a, b, db_dSt, M, N)
>>> _ = plt.plot(t,S.T)
>>> _= plt.xlabel('t')
>>> _= plt.ylabel('S(t)')
>>> _= plt.title('Geometric BM (Milstein scheme)')
"""
# Initialize trayectories using x0 and the rest of the variables.
trajectories = np.tile(x0, (M, N+1))
delta_t = 1.0 * T / N
sqrt_delta_t = np.sqrt(delta_t)
times =generate_regular_grid(t0, delta_t, N)
noise = np.random.randn(M, N)
# Traverse the trajectories columns except the last one.
# I.e., x will be a vector with all the trajectories at time t,
# and z a vector will the noise at time t.
for idx, (t, x, z) in enumerate(zip(times[:-1], trajectories.T[:-1], noise.T)):
trajectories.T[idx+1] = x + a(t, x) * delta_t + z * b(t, x) * sqrt_delta_t \
+ 0.5 * b(t, x) * db_dx(t, x) * (z**2 - 1) * delta_t
return times, trajectories
def simulate_jump_process(t0, T, simulator_arrival_times, simulator_jumps, M):
""" Simulation of jump process
Parameters
----------
t0 : float
Initial time for the simulation
T : float
Length of the simulation interval [t0, t0+T]
simulator_arrival_times: callable with arguments (t0,T)
Function that returns a list of M arrays of arrival times in [t0, t0+T]
simulator_jumps: callable with argument N
Function that returns a list of M arrays with the sizes of the jumps
M: int
Number of trajectories in the simulation
Returns
-------
t: numpy.ndarray of shape (N+1,)
Regular grid of discretization times in [t0,t1]
X: numpy.ndarray of shape (M,N+1)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the
values of the process at t.
"""
times_of_jumps = [[] for _ in range(M)]
sizes_of_jumps = [[] for _ in range(M)]
for m in range(M):
times_of_jumps[m] = simulator_arrival_times(t0, T)
max_jumps = len(times_of_jumps[m])
sizes_of_jumps[m] = simulator_jumps(max_jumps)
return times_of_jumps, sizes_of_jumps
# Stochastic Euler scheme for the numerical solution of a jump-diffision SDE
def euler_jump_diffusion(t0, x0, T, a, b, c,
simulator_jump_process,
M, N):
""" Simulation of jump diffusion process
x(t0) = x0
dx(t) = a(t, x(t))*dt + b(t, x(t))*dW(t) + c(t, x(t)) dJ(t)
[Itô SDE with a jump term]
Parameters
----------
t0 : float
Initial time for the simulation
x0 : float
Initial level of the process
T : float
Length of the simulation interval [t0, t0+T]
a : Function a(t,x(t)) that characterizes the drift term
b : Function b(t,x(t)) that characterizes the diffusion term
c : Function c(t,x(t)) that characterizes the jump term
simulator_jump_process: Function that returns times and sizes of jumps
M: int
Number of trajectories in simulation
N: int
Number of steps for the simulation
Returns
-------
t: numpy.ndarray of shape (N_t,)
Non-regular grid of discretization times in [t0,t1].
N_t is the number of jumps generated.
X: numpy.ndarray of shape (M, N_t)
Simulation consisting of M trajectories.
Each trajectory is a row vector composed of the
values of the process at t.
"""
# Create a 2-d array with the times and the sizes of jumps (0 in this case)
delta_t = 1.0 * T / N
sqrt_delta_t = np.sqrt(delta_t)
original_times = np.array([generate_regular_grid(t0, delta_t, N), np.repeat(None, N+1)])
# Create empy arrays to be filled with final results
final_times = []
final_trajectories = []
# Simulate the jump processes
times_of_jumps, sizes_of_jumps = simulator_jump_process(t0, T, M)
# Each trajectory has different length (the numebr of jumps is different, so the length of
# the times array will be different). By computing
for single_times_oj_jumps, single_sizes_oj_jumps in zip(times_of_jumps, sizes_of_jumps):
# Create a 2-d array with the times and the sizes of jumps
jump_times = np.array([single_times_oj_jumps, single_sizes_oj_jumps])
# Join the time grid and sort them by increasing times, keeping the correct
# jump associated to each time.
times_and_jumps = np.concatenate((original_times, jump_times), axis=1)
times_and_jumps = np.array(sorted(times_and_jumps.T, key=lambda x: x[0])).T
# Unpack the times and jumps
times, jumps = times_and_jumps
N_t = len(times)
# Create noise for every time that will be computed.
noise = np.random.randn(N_t-1)
# Initialize trayectories using x0 and the rest of the variables.
trajectory = np.repeat(x0, N_t)
# Create the trajectories following the studied algorithm
for idx, (t, j, x, z) in enumerate(zip(times[:-1], jumps[:-1], trajectory[:-1], noise)):
trajectory[idx+1] = x + a(t, x) * delta_t + z * b(t, x) * sqrt_delta_t
if j is not None:
trajectory[idx+1] = trajectory[idx+1] + c(t, trajectory[idx+1]) * j
# Save the times and trajectories
final_times.append(times)
final_trajectories.append(trajectory)
return final_times, final_trajectories
def subplot_mean_and_std(x, mean, std, fig_num=1, color='b',
fill_color='#1f77b4',
xlims=None, ylims=None, xlabel=None,
ylabel=None, title=None, alpha_std=.3):
"""
Plots the passed mean and std.
Parameters
----------
x : numpy.ndarray
x-component to plot
mean : numpy.ndarray
mean of the y-component to plot
std : numpy.ndarray
std of the y-component to plot
color : string, optional
Color to plot the mean on
color : string, optional
Color to plot the std on
xlims : numpy.ndarray, optional
xlims for the plot
ylims : numpy.ndarray, optional
xlims for the plot
xlabel : string, optional
xlabel for the plot
ylabel : string, optional
ylabel for the plot
title : string, optional
Title for the plot
alpha_std : float, optional
Alpha of the std-filling color
Returns
-------
No returns, it fills the axis
Example
-------
>>> simulate_wiener_process(n_processes=1000)
>>> mean, std = np.mean(trajectories, axis=0), np.std(trajectories, axis=0)
>>> fig, axis = plt.figure(figsize=(12, 8))
>>> subplot_mean_and_std(axis, ts, mean, 2*std)
"""
plt.figure(fig_num)
plt.plot(x, mean, color=color)
plt.fill_between(x, mean-std, mean+std, color=fill_color, alpha=alpha_std)
if xlims is not None: plt.xlim(xlims)
if ylims is not None: plt.ylim(ylims)
if xlabel is not None: plt.xlabel(xlabel)
if ylabel is not None: plt.ylabel(ylabel)
if title is not None: plt.title(title)
| 0 | 0 | 0 |
11acd7128accc76ea793f88fdc32b2cdbf2df5af | 239 | py | Python | tudo/ex026.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
] | 1 | 2021-07-08T00:35:57.000Z | 2021-07-08T00:35:57.000Z | tudo/ex026.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
] | null | null | null | tudo/ex026.py | Ramon-Erik/Exercicios-Python | 158a7f1846dd3d486aa0517fa337d46d73aab649 | [
"MIT"
] | null | null | null | nome = str(input('>>Seu nome completo:\n')).strip()
print('>>No seu nome há ', nome.upper().count('A'), 'letra(s) A.')
print('>>Aparece na posição ', nome.upper().find('A')+1)
print('>>A última aparição é na ', nome.upper().rfind('A') + 1) | 59.75 | 66 | 0.610879 | nome = str(input('>>Seu nome completo:\n')).strip()
print('>>No seu nome há ', nome.upper().count('A'), 'letra(s) A.')
print('>>Aparece na posição ', nome.upper().find('A')+1)
print('>>A última aparição é na ', nome.upper().rfind('A') + 1) | 0 | 0 | 0 |
d89154d9d48506bf0641a0e03e468bd570763d0c | 639 | py | Python | mapper/mid_level/encoder.py | umerhasan17/deep-robust-robotnav | 765e586dc897b26286af8f13d6ecdf212e78bccf | [
"Apache-2.0"
] | null | null | null | mapper/mid_level/encoder.py | umerhasan17/deep-robust-robotnav | 765e586dc897b26286af8f13d6ecdf212e78bccf | [
"Apache-2.0"
] | null | null | null | mapper/mid_level/encoder.py | umerhasan17/deep-robust-robotnav | 765e586dc897b26286af8f13d6ecdf212e78bccf | [
"Apache-2.0"
] | null | null | null | import visualpriors
import torch
from config.config import device
def mid_level_representations(input_image_tensor, representation_names):
"""
:param input_image_tensor: (batch_size, 3, 256, 256)
:param representation_names: list
:return: concatted image tensor to pass into FCN (batch_size, 8*len(representation_names), 16, 16)
"""
representations = []
for name in representation_names:
# (batch_size, 3, 256, 256) ——>(batch_size, 8, 16, 16)
representations.append(visualpriors.representation_transform(input_image_tensor, name, device=device))
return torch.cat(representations, dim=1)
| 37.588235 | 110 | 0.733959 | import visualpriors
import torch
from config.config import device
def mid_level_representations(input_image_tensor, representation_names):
"""
:param input_image_tensor: (batch_size, 3, 256, 256)
:param representation_names: list
:return: concatted image tensor to pass into FCN (batch_size, 8*len(representation_names), 16, 16)
"""
representations = []
for name in representation_names:
# (batch_size, 3, 256, 256) ——>(batch_size, 8, 16, 16)
representations.append(visualpriors.representation_transform(input_image_tensor, name, device=device))
return torch.cat(representations, dim=1)
| 0 | 0 | 0 |
09fb8e8aa27e2b40173fc4065e41b574b8f437f2 | 1,334 | py | Python | pmutt/tests/statmech/test_pmutt_statmech_nucl.py | wittregr/pMuTT | 1678fd3d3a10d8ef5389c02970a7ebaa92fc7344 | [
"MIT"
] | 28 | 2018-10-29T17:44:30.000Z | 2022-03-23T14:20:16.000Z | pmutt/tests/statmech/test_pmutt_statmech_nucl.py | wittregr/pMuTT | 1678fd3d3a10d8ef5389c02970a7ebaa92fc7344 | [
"MIT"
] | 101 | 2018-10-18T19:49:30.000Z | 2022-01-19T10:59:57.000Z | pmutt/tests/statmech/test_pmutt_statmech_nucl.py | wittregr/pMuTT | 1678fd3d3a10d8ef5389c02970a7ebaa92fc7344 | [
"MIT"
] | 16 | 2018-12-15T17:01:21.000Z | 2022-01-03T17:42:23.000Z | # -*- coding: utf-8 -*-
"""
pmutt.test_pmutt_model_statmech_nucl
Tests for pmutt module
"""
import unittest
from pmutt.statmech import nucl
if __name__ == '__main__':
unittest.main()
| 25.653846 | 69 | 0.649925 | # -*- coding: utf-8 -*-
"""
pmutt.test_pmutt_model_statmech_nucl
Tests for pmutt module
"""
import unittest
from pmutt.statmech import nucl
class TestEmptyNucl(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.nuclear = nucl.EmptyNucl()
self.nuclear_dict = {
'class': "<class 'pmutt.statmech.nucl.EmptyNucl'>"
}
def test_get_q(self):
self.assertEqual(self.nuclear.get_q(), 1.)
def test_get_CvoR(self):
self.assertEqual(self.nuclear.get_CvoR(), 0.)
def test_get_CpoR(self):
self.assertEqual(self.nuclear.get_CpoR(), 0.)
def test_get_UoRT(self):
self.assertEqual(self.nuclear.get_UoRT(), 0.)
def test_get_HoRT(self):
self.assertEqual(self.nuclear.get_HoRT(), 0.)
def test_get_SoR(self):
self.assertEqual(self.nuclear.get_SoR(), 0.)
def test_get_FoRT(self):
self.assertEqual(self.nuclear.get_FoRT(), 0.)
def test_get_GoRT(self):
self.assertEqual(self.nuclear.get_GoRT(), 0.)
def test_to_dict(self):
self.assertEqual(self.nuclear.to_dict(), self.nuclear_dict)
def test_from_dict(self):
self.assertEqual(nucl.EmptyNucl.from_dict(self.nuclear_dict),
self.nuclear)
if __name__ == '__main__':
unittest.main()
| 807 | 18 | 319 |
dffaaaba64307dc711352793913b40a2616734eb | 5,049 | py | Python | python_examples/control_mapping_framework/utils.py | kirank0220/api-examples | 9d6c51eeb2d4e38d95b0b7d88fd30fe96ef28d20 | [
"MIT"
] | 1 | 2021-12-20T16:49:00.000Z | 2021-12-20T16:49:00.000Z | python_examples/control_mapping_framework/utils.py | kirank0220/api-examples | 9d6c51eeb2d4e38d95b0b7d88fd30fe96ef28d20 | [
"MIT"
] | 2 | 2020-11-20T04:51:16.000Z | 2021-06-16T17:02:35.000Z | python_examples/control_mapping_framework/utils.py | kirank0220/api-examples | 9d6c51eeb2d4e38d95b0b7d88fd30fe96ef28d20 | [
"MIT"
] | 1 | 2020-11-20T04:46:17.000Z | 2020-11-20T04:46:17.000Z | #########################################################################
# _________ ___. ______________________ ___
# \_ ___ \___.__.\_ |__ ___________ / _____/\______ \ \/ /
# / \ \< | | | __ \_/ __ \_ __ \/ \ ___ | _/\ /
# \ \___\___ | | \_\ \ ___/| | \/\ \_\ \| | \/ \
# \______ / ____| |___ /\___ >__| \______ /|____|_ /___/\ \
# \/\/ \/ \/ \/ \/ \_/
#
#
import re
from glom import glom
from openpyxl.cell import Cell, MergedCell
from openpyxl.styles import PatternFill
from openpyxl.styles import colors
from openpyxl.styles.fills import FILL_SOLID
from openpyxl.utils.exceptions import IllegalCharacterError
_VLOOKUP_REGEX = re.compile(r'.*?VLOOKUP\("(?P<control>\d+\.\d+\.\d+\.\d+|[A-Z]{1,3}\.\d+\.\d+\.\d+).*?".*')
| 35.556338 | 114 | 0.520895 | #########################################################################
# _________ ___. ______________________ ___
# \_ ___ \___.__.\_ |__ ___________ / _____/\______ \ \/ /
# / \ \< | | | __ \_/ __ \_ __ \/ \ ___ | _/\ /
# \ \___\___ | | \_\ \ ___/| | \/\ \_\ \| | \/ \
# \______ / ____| |___ /\___ >__| \______ /|____|_ /___/\ \
# \/\/ \/ \/ \/ \/ \_/
#
#
import re
from glom import glom
from openpyxl.cell import Cell, MergedCell
from openpyxl.styles import PatternFill
from openpyxl.styles import colors
from openpyxl.styles.fills import FILL_SOLID
from openpyxl.utils.exceptions import IllegalCharacterError
_VLOOKUP_REGEX = re.compile(r'.*?VLOOKUP\("(?P<control>\d+\.\d+\.\d+\.\d+|[A-Z]{1,3}\.\d+\.\d+\.\d+).*?".*')
def create_sheet(wb, sheet_name):
try:
sheet = wb[sheet_name]
sheet.delete_rows(2, amount=len([r for r in sheet]))
except KeyError:
wb.create_sheet(sheet_name)
def cell_value(cell):
return "{}".format(cell.value).strip() if cell and cell.value else ""
def control_search(row):
found = set()
for cell in row.values():
if isinstance(cell, (Cell, MergedCell)):
cell = cell_value(cell)
found.update(_VLOOKUP_REGEX.findall(cell))
return found
def sheet_writer(wb, name, columns, mapping=None, insert_controls=None):
if not mapping:
mapping = {}
for c in columns:
if not mapping.get(c[1], None):
mapping[c[1]] = c[1]
def builder(sheet):
for idx, injector in enumerate(columns):
cell = sheet.cell(row=1, column=1 + idx)
cell.value = injector[0]
cell.font = cell.font.copy(bold=True)
if len(injector) <= 2:
cell.fill = PatternFill(FILL_SOLID, start_color="C9C9C9", end_color="C9C9C9")
elif injector[2] == "blue":
cell.fill = PatternFill(FILL_SOLID, start_color="0065B8", end_color="0065B8")
cell.font = cell.font.copy(color=colors.WHITE)
elif injector[2] == "orange":
cell.fill = PatternFill(FILL_SOLID, start_color="FFB802", end_color="FFB802")
else:
cell.fill = PatternFill(FILL_SOLID, start_color="C9C9C9", end_color="C9C9C9")
def write_value(_row, _col, _val):
cell = sheet.cell(row=_row, column=_col)
try:
cell.value = _val
except IllegalCharacterError:
print(f"Unable to store {_val} it contained an illegal character.")
cell.value = ""
row = 2
encountered = set()
def writer(blob):
nonlocal row
nonlocal encountered
transformed = glom(blob, mapping)
if insert_controls:
for v in transformed.values():
if v in insert_controls:
encountered.add(v)
multi_row = 0
for idx, injector in enumerate(columns):
value = transformed[injector[1]]
if value is None:
continue
if not isinstance(value, (list, tuple)):
write_value(row, 1 + idx, value)
else:
multi_row = max(multi_row, len(value))
for i, v in enumerate(value):
write_value(row + i, 1 + idx, v)
row = row + (multi_row if multi_row else 1)
def finalizer():
nonlocal encountered
nonlocal row
if insert_controls:
# Restore controls as NA that were not present in the assessment so that the VLOOKUPS do not break
missing_controls = [c for c in insert_controls.difference(encountered)]
missing_controls.sort()
for control in missing_controls:
write_value(row, 1, control)
write_value(
row, 2, "Inserted as Not Applicable, the vendor's assessment did not ask this question"
)
write_value(row, 3, "AnsweredNotApplicable")
write_value(row, 4, "")
write_value(row, 5, "")
write_value(row, 6, "")
write_value(row, 7, "")
write_value(row, 8, "Not Reviewed")
write_value(row, 9, "SubControl")
row += 1
for column_cells in sheet.columns:
length = min(125, max(9, max(len(cell_value(cell)) + 1 for cell in column_cells)),)
for cell in column_cells:
cell.alignment = cell.alignment.copy(wrapText=True)
sheet.column_dimensions[column_cells[0].column_letter].width = length
writer.finalizer = finalizer
return writer
return builder(wb[name])
| 4,072 | 0 | 92 |
12b82684a12db7b841b1d175e77609b1454d82f8 | 8,378 | py | Python | delphi/translators/for2py/measure-coverage.py | mikiec84/delphi | 2e517f21e76e334c7dfb14325d25879ddf26d10d | [
"Apache-2.0"
] | 25 | 2018-03-03T11:57:57.000Z | 2022-01-16T21:19:54.000Z | delphi/translators/for2py/measure-coverage.py | mikiec84/delphi | 2e517f21e76e334c7dfb14325d25879ddf26d10d | [
"Apache-2.0"
] | 385 | 2018-02-21T16:52:06.000Z | 2022-02-17T07:44:56.000Z | delphi/translators/for2py/measure-coverage.py | mikiec84/delphi | 2e517f21e76e334c7dfb14325d25879ddf26d10d | [
"Apache-2.0"
] | 19 | 2018-03-20T01:08:11.000Z | 2021-09-29T01:04:49.000Z | #!/usr/bin/env python3.7
""" This file contains code to carry out a simple estimate of the amount of
Fortran code 'handled' by for2py.
COMMAND-LINE INVOCATION:
python3.7 measure-coverage.py <directory with Fortran code files>
ADDING HANDLED LANGUAGE CONSTRUCTS:
As the set of language features handled by for2py grows, they should be
incorporated into this script. This can be done as follows:
1) Write a regular expression to recognize that feature (see examples
under "SYNTAX MATCHING" below).
2) Add the regular expression to the list for the variable HANDLED.
"""
import os
import sys
import delphi.translators.for2py.preprocessor
from .syntax import *
FORTRAN_EXTENSIONS = ['.f', '.f90', '.for']
################################################################################
# #
# SYNTAX MATCHING #
# #
################################################################################
# Regular expressions that specify patterns for various Fortran constructs.
# These are very similar to the constructs in the file syntax.py, but only
# include constructs that are currently handled in for2py.
FN_START = r"\s*(\w*\s*){0,2}function\s+(\w+)\s*\("
RE_FN_START = re.compile(FN_START, re.I)
PGM_UNIT = r"\s*\w*\s*(program|module|subroutine|(\w*\s*){0,2}function)\s+(\w+)"
RE_PGM_UNIT_START = re.compile(PGM_UNIT, re.I)
PGM_UNIT_SEP = r"\s+contains(\W+)"
RE_PGM_UNIT_SEP = re.compile(PGM_UNIT_SEP, re.I)
PGM_UNIT_END = r"\s*[a-z]*\s*end\s+(program|module|subroutine|function)\s+"
RE_PGM_UNIT_END = re.compile(PGM_UNIT_END, re.I)
SUBPGM_END = r"\s*end\s+"
RE_SUBPGM_END = re.compile(SUBPGM_END, re.I)
ASSG_STMT = r"\s*(\d+|&)?\s*.*=\s*"
RE_ASSG_STMT = re.compile(ASSG_STMT, re.I)
IMPLICIT_STMT = r"\s*implicit\s+"
RE_IMPLICIT_STMT = re.compile(IMPLICIT_STMT, re.I)
CALL_STMT = r"\s*(\d+|&)?\s*call\s*"
RE_CALL_STMT = re.compile(CALL_STMT, re.I)
IO_STMT = r"\s*(\d+|&)?\s*(open|close|read|write|print|format|rewind)\W*"
RE_IO_STMT = re.compile(IO_STMT, re.I)
DO_STMT = r"\s*(\d+|&)?\s*do\s*"
RE_DO_STMT = re.compile(DO_STMT, re.I)
ENDDO_STMT = r"\s*(\d+|&)?\s*end\s*do\s*"
RE_ENDDO_STMT = re.compile(ENDDO_STMT, re.I)
ENDIF_STMT = r"\s*(\d+|&)?\s*end\s*if\s*"
RE_ENDIF_STMT = re.compile(ENDIF_STMT, re.I)
GOTO_STMT = r"\s*(\d+|&)?\s*go\s*to\s*"
RE_GOTO_STMT = re.compile(GOTO_STMT, re.I)
IF_STMT = r"\s*(\d+|&)?\s*(if|elseif|else)\s*"
RE_IF_STMT = re.compile(IF_STMT, re.I)
PAUSE_STMT = r"\s*(\d+|&)?\s*pause\s*"
RE_PAUSE_STMT = re.compile(PAUSE_STMT, re.I)
USE_STMT = r"\s*(\d+|&)?\s*use\s*"
RE_USE_STMT = re.compile(USE_STMT, re.I)
RETURN_STMT = r"\s*(\d+|&)?\s*return\s*"
RE_RETURN_STMT = re.compile(RETURN_STMT, re.I)
CYCLE_STMT = r"\s*(\d+|&)?\s*cycle\s*"
RE_CYCLE_STMT = re.compile(CYCLE_STMT, re.I)
EXIT_STMT = r"\s*(\d+|&)?\s*exit\s*"
RE_EXIT_STMT = re.compile(EXIT_STMT, re.I)
SAVE_STMT = r"\s*(\d+|&)?\s*save\s*"
RE_SAVE_STMT = re.compile(SAVE_STMT, re.I)
SELECT_STMT = r"\s*(\d+|&)?\s*select\s*case\s*"
RE_SELECT_STMT = re.compile(SELECT_STMT, re.I)
ENDSELECT_STMT = r"\s*(\d+|&)?\s*end\s*select\s*"
RE_ENDSELECT_STMT = re.compile(ENDSELECT_STMT, re.I)
CASE_STMT = r"\s*(\d+|&)?\s*case\s*"
RE_CASE_STMT = re.compile(CASE_STMT, re.I)
STOP_STMT = r"\s*(\d+|&)?\s*stop\s*"
RE_STOP_STMT = re.compile(STOP_STMT, re.I)
TYPE_NAMES = r"^\s*(integer|real|double\s+precision|logical|dimension|type)\W*"
RE_TYPE_NAMES = re.compile(TYPE_NAMES, re.I)
HANDLED = [
RE_FN_START,
RE_PGM_UNIT_START,
RE_PGM_UNIT_SEP,
RE_PGM_UNIT_END,
RE_SUBPGM_END,
RE_ASSG_STMT,
RE_CALL_STMT,
RE_CYCLE_STMT,
RE_EXIT_STMT,
RE_IMPLICIT_STMT,
RE_IO_STMT,
RE_DO_STMT,
RE_ENDDO_STMT,
RE_ENDIF_STMT,
RE_GOTO_STMT,
RE_IF_STMT,
RE_PAUSE_STMT,
RE_RETURN_STMT,
RE_SAVE_STMT,
RE_STOP_STMT,
RE_TYPE_NAMES,
RE_USE_STMT,
]
KEYWD = r"\s*(\d+|&)?\s*([a-z]+).*"
RE_KEYWD = re.compile(KEYWD)
################################################################################
# #
# FILE PROCESSING #
# #
################################################################################
if __name__ == "__main__":
main()
| 29.814947 | 94 | 0.579852 | #!/usr/bin/env python3.7
""" This file contains code to carry out a simple estimate of the amount of
Fortran code 'handled' by for2py.
COMMAND-LINE INVOCATION:
python3.7 measure-coverage.py <directory with Fortran code files>
ADDING HANDLED LANGUAGE CONSTRUCTS:
As the set of language features handled by for2py grows, they should be
incorporated into this script. This can be done as follows:
1) Write a regular expression to recognize that feature (see examples
under "SYNTAX MATCHING" below).
2) Add the regular expression to the list for the variable HANDLED.
"""
import os
import sys
import delphi.translators.for2py.preprocessor
from .syntax import *
FORTRAN_EXTENSIONS = ['.f', '.f90', '.for']
################################################################################
# #
# SYNTAX MATCHING #
# #
################################################################################
# Regular expressions that specify patterns for various Fortran constructs.
# These are very similar to the constructs in the file syntax.py, but only
# include constructs that are currently handled in for2py.
FN_START = r"\s*(\w*\s*){0,2}function\s+(\w+)\s*\("
RE_FN_START = re.compile(FN_START, re.I)
PGM_UNIT = r"\s*\w*\s*(program|module|subroutine|(\w*\s*){0,2}function)\s+(\w+)"
RE_PGM_UNIT_START = re.compile(PGM_UNIT, re.I)
PGM_UNIT_SEP = r"\s+contains(\W+)"
RE_PGM_UNIT_SEP = re.compile(PGM_UNIT_SEP, re.I)
PGM_UNIT_END = r"\s*[a-z]*\s*end\s+(program|module|subroutine|function)\s+"
RE_PGM_UNIT_END = re.compile(PGM_UNIT_END, re.I)
SUBPGM_END = r"\s*end\s+"
RE_SUBPGM_END = re.compile(SUBPGM_END, re.I)
ASSG_STMT = r"\s*(\d+|&)?\s*.*=\s*"
RE_ASSG_STMT = re.compile(ASSG_STMT, re.I)
IMPLICIT_STMT = r"\s*implicit\s+"
RE_IMPLICIT_STMT = re.compile(IMPLICIT_STMT, re.I)
CALL_STMT = r"\s*(\d+|&)?\s*call\s*"
RE_CALL_STMT = re.compile(CALL_STMT, re.I)
IO_STMT = r"\s*(\d+|&)?\s*(open|close|read|write|print|format|rewind)\W*"
RE_IO_STMT = re.compile(IO_STMT, re.I)
DO_STMT = r"\s*(\d+|&)?\s*do\s*"
RE_DO_STMT = re.compile(DO_STMT, re.I)
ENDDO_STMT = r"\s*(\d+|&)?\s*end\s*do\s*"
RE_ENDDO_STMT = re.compile(ENDDO_STMT, re.I)
ENDIF_STMT = r"\s*(\d+|&)?\s*end\s*if\s*"
RE_ENDIF_STMT = re.compile(ENDIF_STMT, re.I)
GOTO_STMT = r"\s*(\d+|&)?\s*go\s*to\s*"
RE_GOTO_STMT = re.compile(GOTO_STMT, re.I)
IF_STMT = r"\s*(\d+|&)?\s*(if|elseif|else)\s*"
RE_IF_STMT = re.compile(IF_STMT, re.I)
PAUSE_STMT = r"\s*(\d+|&)?\s*pause\s*"
RE_PAUSE_STMT = re.compile(PAUSE_STMT, re.I)
USE_STMT = r"\s*(\d+|&)?\s*use\s*"
RE_USE_STMT = re.compile(USE_STMT, re.I)
RETURN_STMT = r"\s*(\d+|&)?\s*return\s*"
RE_RETURN_STMT = re.compile(RETURN_STMT, re.I)
CYCLE_STMT = r"\s*(\d+|&)?\s*cycle\s*"
RE_CYCLE_STMT = re.compile(CYCLE_STMT, re.I)
EXIT_STMT = r"\s*(\d+|&)?\s*exit\s*"
RE_EXIT_STMT = re.compile(EXIT_STMT, re.I)
SAVE_STMT = r"\s*(\d+|&)?\s*save\s*"
RE_SAVE_STMT = re.compile(SAVE_STMT, re.I)
SELECT_STMT = r"\s*(\d+|&)?\s*select\s*case\s*"
RE_SELECT_STMT = re.compile(SELECT_STMT, re.I)
ENDSELECT_STMT = r"\s*(\d+|&)?\s*end\s*select\s*"
RE_ENDSELECT_STMT = re.compile(ENDSELECT_STMT, re.I)
CASE_STMT = r"\s*(\d+|&)?\s*case\s*"
RE_CASE_STMT = re.compile(CASE_STMT, re.I)
STOP_STMT = r"\s*(\d+|&)?\s*stop\s*"
RE_STOP_STMT = re.compile(STOP_STMT, re.I)
TYPE_NAMES = r"^\s*(integer|real|double\s+precision|logical|dimension|type)\W*"
RE_TYPE_NAMES = re.compile(TYPE_NAMES, re.I)
HANDLED = [
RE_FN_START,
RE_PGM_UNIT_START,
RE_PGM_UNIT_SEP,
RE_PGM_UNIT_END,
RE_SUBPGM_END,
RE_ASSG_STMT,
RE_CALL_STMT,
RE_CYCLE_STMT,
RE_EXIT_STMT,
RE_IMPLICIT_STMT,
RE_IO_STMT,
RE_DO_STMT,
RE_ENDDO_STMT,
RE_ENDIF_STMT,
RE_GOTO_STMT,
RE_IF_STMT,
RE_PAUSE_STMT,
RE_RETURN_STMT,
RE_SAVE_STMT,
RE_STOP_STMT,
RE_TYPE_NAMES,
RE_USE_STMT,
]
KEYWD = r"\s*(\d+|&)?\s*([a-z]+).*"
RE_KEYWD = re.compile(KEYWD)
def line_is_handled(line):
unhandled_keywds, unhandled_lines = set(), set()
for handled_construct in HANDLED:
if handled_construct.match(line) != None:
return (True, unhandled_keywds, unhandled_lines)
match = RE_KEYWD.match(line.strip().lower())
if match != None:
first_wd = match.group(2)
else:
first_wd = None
if first_wd in F_KEYWDS:
unhandled_keywds.add(first_wd)
else:
unhandled_lines.add(line)
return (False, unhandled_keywds, unhandled_lines)
################################################################################
# #
# FILE PROCESSING #
# #
################################################################################
def get_code_lines(fname):
try:
print("@@@ FILE: " + fname)
f = open(fname, mode="r", encoding="latin-1")
lines = f.readlines()
f.close()
except IOError:
errmsg(f"ERROR: Could not open file {fname}")
else:
enum_lines = list(enumerate(lines, 1))
# Discard empty lines. While these are technically comments, they provide
# no semantic content.
enum_lines = [line for line in enum_lines if line[1].rstrip() != ""]
enum_lines = preprocessor.separate_trailing_comments(enum_lines)
enum_lines = preprocessor.merge_continued_lines(enum_lines)
code_lines = [line[1] for line in enum_lines if not line_is_comment(line[1])]
return code_lines
def process_lines(lines):
unhandled_keywds, unhandled_lines = set(), set()
nlines = len(lines)
nhandled = 0
for line in lines:
handled, u_keywds, u_lines = line_is_handled(line)
if handled:
nhandled += 1
else:
unhandled_keywds|= u_keywds
unhandled_lines |= u_lines
return (nlines, nhandled, unhandled_keywds, unhandled_lines)
def process_file(fname):
code_lines = get_code_lines(fname)
results = process_lines(code_lines)
return results
def process_dir(dirname):
unhandled_keywds, unhandled_lines = set(), set()
nfiles, ntot, nhandled = 0, 0, 0
abs_path = os.path.abspath(dirname)
print(f"processing: {dirname}")
list_of_files = os.listdir(dirname)
for fname in list_of_files:
full_path_to_file = abs_path + "/" + fname
if os.path.isdir(full_path_to_file):
nf1, nt1, nh1, uk1, ul1 = process_dir(full_path_to_file)
nfiles += nf1
ntot += nt1
nhandled += nh1
unhandled_keywds |= uk1
unhandled_lines |= ul1
else:
_, fext = os.path.splitext(fname)
if fext in FORTRAN_EXTENSIONS:
ftot, fhandled, u_keywds, u_lines = process_file(full_path_to_file)
ntot += ftot
nhandled += fhandled
unhandled_keywds |= u_keywds
unhandled_lines |= u_lines
nfiles += 1
else:
sys.stderr.write(f" *** Ignoring {fname} [unrecognized extension]\n")
return (nfiles, ntot, nhandled, unhandled_keywds, unhandled_lines)
def usage():
sys.stderr.write("Usage: measure-coverage.py <src-directory>\n")
def errmsg(msg):
sys.stderr.write(msg + "\n")
sys.exit(1)
def print_results(results):
nfiles, ntot, nhandled, u_keywds, u_lines = results
pct_handled = nhandled/ntot * 100
print(f"Files: {nfiles}; total lines: {ntot}; handled: {nhandled} [{pct_handled:.1f}%]\n")
if u_keywds != set():
print("UNHANDLED KEYWORDS:")
print("------------------")
for item in u_keywds:
print(f" {item}")
if u_lines != set():
print("UNHANDLED LINES:")
print("---------------")
for item in u_lines:
print(f" {item}")
def main():
if len(sys.argv) < 2:
usage()
sys.exit(1)
results = process_dir(sys.argv[1])
print_results(results)
if __name__ == "__main__":
main()
| 3,580 | 0 | 207 |
d8cb68e98db0238e855e2e1a16f15193c330cfd2 | 821 | py | Python | repos/system_upgrade/el7toel8/libraries/config/tests/test_getenvvars.py | panovotn/leapp-repository | e80bdbf65393e68bc2e91b43b46fdd9b9b787878 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/libraries/config/tests/test_getenvvars.py | panovotn/leapp-repository | e80bdbf65393e68bc2e91b43b46fdd9b9b787878 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/libraries/config/tests/test_getenvvars.py | panovotn/leapp-repository | e80bdbf65393e68bc2e91b43b46fdd9b9b787878 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
from leapp.libraries.common.config import get_env, get_all_envs
from leapp.libraries.stdlib import api
from leapp.models import EnvVar
| 31.576923 | 77 | 0.74056 | from collections import namedtuple
from leapp.libraries.common.config import get_env, get_all_envs
from leapp.libraries.stdlib import api
from leapp.models import EnvVar
class CurrentActorMocked(object):
env_vars = [EnvVar(name='LEAPP_DEVEL_SKIP_WIP', value='0'),
EnvVar(name='LEAPP_DEVEL_SKIP_DIP', value='1'),
EnvVar(name='LEAPP_DEVEL_SKIP_RIP', value='2')]
configuration = namedtuple('configuration', ['leapp_env_vars'])(env_vars)
def test_env_var_match(monkeypatch):
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked)
assert get_env('LEAPP_DEVEL_SKIP_WIP') == '0'
assert not get_env('LEAPP_DEVEL_SKIP_PIP')
def test_get_all_vars(monkeypatch):
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked)
assert len(get_all_envs()) == 3
| 296 | 282 | 69 |
9c124f5233a7bd69a4c61ad530b4d9b53345808c | 1,222 | py | Python | examples/save_data_to_file_example.py | eugtsa/led_wabbit | 04c56b438518d65a385788fa9a118adbe74a1771 | [
"Apache-2.0"
] | null | null | null | examples/save_data_to_file_example.py | eugtsa/led_wabbit | 04c56b438518d65a385788fa9a118adbe74a1771 | [
"Apache-2.0"
] | null | null | null | examples/save_data_to_file_example.py | eugtsa/led_wabbit | 04c56b438518d65a385788fa9a118adbe74a1771 | [
"Apache-2.0"
] | null | null | null | from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from itertools import chain
import numpy as np
from led_wabbit.models import LogisticRegressionBinary
if __name__ == '__main__':
X1 = [[0, 1, 1] for i in range(40)]
X2 = [[0, 2, 0] for i in range(40)]
X3 = [[1, 0, 1] for i in range(40)]
X4 = [[0, 2, 2] for i in range(3)]
X = np.array([x for x in chain(X1, X2, X3, X4)])
Y1 = [0 for i in range(40)]
Y2 = [1 for i in range(40)]
Y3 = [0 for i in range(40)]
Y4 = [1 for i in range(3)]
Y = np.array([y for y in chain(Y1, Y2, Y3, Y4)])
X, Y = shuffle(X, Y)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
header_dict = {0: ('n', 'X', 'x0'), 1: ('n', 'Y', 'y0'), 2: ('n', 'Z', 'z0')}
clf = LogisticRegressionBinary(learning_rate=5, header_dict=header_dict) # loss='logistic',
params = {'passes': [50, 100], 'header_dict': [header_dict], \
'learning_rate': [0.5, 0.2, 0.8], 'log_stderr_to_file': [True]} # 'loss':['logistic'],
with open('wv_learning.vw','w') as g:
for s in clf.iterate_over_vw_strings(X_train,y_train):
g.write(s)
g.write('\n') | 33.027027 | 101 | 0.587561 | from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from itertools import chain
import numpy as np
from led_wabbit.models import LogisticRegressionBinary
if __name__ == '__main__':
X1 = [[0, 1, 1] for i in range(40)]
X2 = [[0, 2, 0] for i in range(40)]
X3 = [[1, 0, 1] for i in range(40)]
X4 = [[0, 2, 2] for i in range(3)]
X = np.array([x for x in chain(X1, X2, X3, X4)])
Y1 = [0 for i in range(40)]
Y2 = [1 for i in range(40)]
Y3 = [0 for i in range(40)]
Y4 = [1 for i in range(3)]
Y = np.array([y for y in chain(Y1, Y2, Y3, Y4)])
X, Y = shuffle(X, Y)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1)
header_dict = {0: ('n', 'X', 'x0'), 1: ('n', 'Y', 'y0'), 2: ('n', 'Z', 'z0')}
clf = LogisticRegressionBinary(learning_rate=5, header_dict=header_dict) # loss='logistic',
params = {'passes': [50, 100], 'header_dict': [header_dict], \
'learning_rate': [0.5, 0.2, 0.8], 'log_stderr_to_file': [True]} # 'loss':['logistic'],
with open('wv_learning.vw','w') as g:
for s in clf.iterate_over_vw_strings(X_train,y_train):
g.write(s)
g.write('\n') | 0 | 0 | 0 |
22fbc1339414b2f40f360e4ea79f9e4a128ac3f5 | 1,382 | py | Python | tests/bugs/issue_58/issue_58_1_categorical_exogenous.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/bugs/issue_58/issue_58_1_categorical_exogenous.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/bugs/issue_58/issue_58_1_categorical_exogenous.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
b1 = tsds.load_ozone_exogenous_categorical()
df = b1.mPastData
print(b1.mExogenousDataFrame.Exog2.cat.categories)
print(b1.mExogenousDataFrame.Exog3.cat.categories)
print(b1.mExogenousDataFrame.Exog4.cat.categories)
lEngine = autof.cForecastEngine()
lEngine.mOptions.mDebug = True;
lEngine.mOptions.mDebugProfile = True;
lEngine.mOptions.disable_all_periodics()
lEngine.mOptions.set_active_autoregressions(['ARX'])
lExogenousData = (b1.mExogenousDataFrame , b1.mExogenousVariables)
H = 12
lEngine.train(df , 'Time' , b1.mSignalVar, H, lExogenousData)
lEngine
lEngine.getModelInfo();
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots(name = "outputs/my_categorical_arx_ozone")
dfapp_in = df.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/arx_ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H).values);
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| 28.791667 | 83 | 0.7822 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
b1 = tsds.load_ozone_exogenous_categorical()
df = b1.mPastData
print(b1.mExogenousDataFrame.Exog2.cat.categories)
print(b1.mExogenousDataFrame.Exog3.cat.categories)
print(b1.mExogenousDataFrame.Exog4.cat.categories)
lEngine = autof.cForecastEngine()
lEngine.mOptions.mDebug = True;
lEngine.mOptions.mDebugProfile = True;
lEngine.mOptions.disable_all_periodics()
lEngine.mOptions.set_active_autoregressions(['ARX'])
lExogenousData = (b1.mExogenousDataFrame , b1.mExogenousVariables)
H = 12
lEngine.train(df , 'Time' , b1.mSignalVar, H, lExogenousData)
lEngine
lEngine.getModelInfo();
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots(name = "outputs/my_categorical_arx_ozone")
dfapp_in = df.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/arx_ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H).values);
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| 0 | 0 | 0 |
2004b70e249cdbb83d33599179518dd3cc5c4574 | 7,164 | py | Python | urbanterror.py | zephrax/sopel-module-urbanterror | 4bc38fffc3f2128f35fe5864cabfc83302e1e498 | [
"MIT"
] | null | null | null | urbanterror.py | zephrax/sopel-module-urbanterror | 4bc38fffc3f2128f35fe5864cabfc83302e1e498 | [
"MIT"
] | null | null | null | urbanterror.py | zephrax/sopel-module-urbanterror | 4bc38fffc3f2128f35fe5864cabfc83302e1e498 | [
"MIT"
] | null | null | null | # coding=utf-8
"""UrbanTerror server info"""
# Author: zephrax http://kernelpanic.com.ar
from __future__ import unicode_literals, absolute_import, print_function, division
from sopel import web
from sopel.module import commands
from sopel.logger import get_logger
from sopel.config.types import StaticSection, ValidatedAttribute, ListAttribute
import socket
import re
LOGGER = get_logger(__name__)
class Player(object):
"""
Player class
"""
def __init__(self, num, name, frags, ping, address=None, bot=-1):
"""
create a new instance of Player
"""
self.num = num
self.name = name
self.frags = frags
self.ping = ping
self.address = address
self.bot = bot
class PyQuake3(object):
"""
PyQuake3 class
"""
packet_prefix = b'\xff' * 4
player_reo = re.compile(r'^(\d+) (\d+) "(.*)"')
rcon_password = None
port = None
address = None
players = None
values = None
def __init__(self, server, rcon_password=''):
"""
create a new instance of PyQuake3
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.set_server(server)
self.set_rcon_password(rcon_password)
def set_server(self, server):
"""
set IP address and port and connect to socket
"""
try:
self.address, self.port = server.split(':')
except Exception:
raise ValueError('Server address format must be: "address:port"')
self.port = int(self.port)
self.sock.connect((self.address, self.port))
def get_address(self):
"""
get IP address and port
"""
return '%s:%s' % (self.address, self.port)
def set_rcon_password(self, rcon_password):
"""
set RCON password
"""
self.rcon_password = rcon_password
def send_packet(self, data):
"""
send packet
"""
base = b''
self.sock.send(base.join([self.packet_prefix, data.encode(), b'\n']))
# self.sock.send('{}{}\n'.format(self.packet_prefix, data).encode())
def recv(self, timeout=1):
"""
receive packets
"""
self.sock.settimeout(timeout)
try:
return self.sock.recv(8192)
except Exception as err:
raise Exception('Error receiving the packet: %s' % err[1])
def command(self, cmd, timeout=1, retries=5):
"""
send command and receive response
"""
while retries:
self.send_packet(cmd)
try:
data = self.recv(timeout)
except Exception:
data = None
if data:
return self.parse_packet(data)
retries -= 1
raise Exception('Server response timed out')
def rcon(self, cmd):
"""
send RCON command
"""
r_cmd = self.command('rcon "{}" {}'.format(self.rcon_password, cmd))
if r_cmd[1] == 'No rconpassword set on the server.\n' or r_cmd[1] == 'Bad rconpassword.\n':
raise Exception(r_cmd[1][:-1])
return r_cmd
def parse_packet(self, data):
"""
parse the received packet
"""
if data.find(self.packet_prefix) != 0:
raise Exception('Malformed packet')
first_line_length = data.find(b'\n')
if first_line_length == -1:
raise Exception('Malformed packet')
response_type = data[len(self.packet_prefix):first_line_length].decode()
response_data = data[first_line_length + 1:].decode()
return response_type, response_data
def parse_status(self, data):
"""
parse the response message and return a list
"""
split = data[1:].split('\\')
values = dict(zip(split[::2], split[1::2]))
# if there are \n's in one of the values, it's the list of players
for var, val in values.items():
pos = val.find('\n')
if pos == -1:
continue
split = val.split('\n', 1)
values[var] = split[0]
self.parse_players(split[1])
return values
def parse_players(self, data):
"""
parse player information - name, frags and ping
"""
self.players = []
for player in data.split('\n'):
if not player:
continue
match = self.player_reo.match(player)
if not match:
print('couldnt match {}'.format(player))
continue
frags, ping, name = match.groups()
self.players.append(Player(1, name, frags, ping))
def update(self):
"""
get status
"""
data = self.command('getstatus')[1]
self.values = self.parse_status(data)
def rcon_update(self):
"""
perform RCON status update
"""
data = self.rcon('status')[1]
lines = data.split(b'\n')
players = lines[3:]
self.players = []
for ply in players:
while ply.find(' ') != -1:
ply = ply.replace(' ', ' ')
while ply.find(' ') == 0:
ply = ply[1:]
if ply == '':
continue
ply = ply.split(' ')
try:
self.players.append(Player(int(ply[0]), ply[3], int(ply[1]), int(ply[2]), ply[5]))
except (IndexError, ValueError):
continue
class UrbanTerrorSection(StaticSection):
"""UrbanTerror server host. Default to localhost."""
server_host = ValidatedAttribute('server_host', str, default='localhost')
"""UrbanTerror server port. Default to 27960."""
server_port = ValidatedAttribute('server_port', int, default=27960)
"""UrbanTerror server rcon password."""
rcon_password = ValidatedAttribute('rcon_password', str)
@commands('ut')
def ut(bot, trigger):
"""UrbanTerror server stats"""
try:
ut_cfg = bot.config.urbanterror
UT = PyQuake3(server='{}:{}'.format(ut_cfg.server_host, ut_cfg.server_port), rcon_password=ut_cfg.rcon_password)
UT.update()
bot.say('Server: {} ({}) | Map: {} | Players ({}) {}'.format(
UT.values['sv_hostname'],
UT.get_address(),
UT.values['mapname'],
len(UT.players),
[gamer.name for gamer in UT.players]))
except Exception as err:
LOGGER.debug('Internal Error. {}'.format(err))
| 29.004049 | 120 | 0.561697 | # coding=utf-8
"""UrbanTerror server info"""
# Author: zephrax http://kernelpanic.com.ar
from __future__ import unicode_literals, absolute_import, print_function, division
from sopel import web
from sopel.module import commands
from sopel.logger import get_logger
from sopel.config.types import StaticSection, ValidatedAttribute, ListAttribute
import socket
import re
LOGGER = get_logger(__name__)
class Player(object):
"""
Player class
"""
def __init__(self, num, name, frags, ping, address=None, bot=-1):
"""
create a new instance of Player
"""
self.num = num
self.name = name
self.frags = frags
self.ping = ping
self.address = address
self.bot = bot
def __str__(self):
return self.name
def __repr__(self):
return str(self)
class PyQuake3(object):
"""
PyQuake3 class
"""
packet_prefix = b'\xff' * 4
player_reo = re.compile(r'^(\d+) (\d+) "(.*)"')
rcon_password = None
port = None
address = None
players = None
values = None
def __init__(self, server, rcon_password=''):
"""
create a new instance of PyQuake3
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.set_server(server)
self.set_rcon_password(rcon_password)
def set_server(self, server):
"""
set IP address and port and connect to socket
"""
try:
self.address, self.port = server.split(':')
except Exception:
raise ValueError('Server address format must be: "address:port"')
self.port = int(self.port)
self.sock.connect((self.address, self.port))
def get_address(self):
"""
get IP address and port
"""
return '%s:%s' % (self.address, self.port)
def set_rcon_password(self, rcon_password):
"""
set RCON password
"""
self.rcon_password = rcon_password
def send_packet(self, data):
"""
send packet
"""
base = b''
self.sock.send(base.join([self.packet_prefix, data.encode(), b'\n']))
# self.sock.send('{}{}\n'.format(self.packet_prefix, data).encode())
def recv(self, timeout=1):
"""
receive packets
"""
self.sock.settimeout(timeout)
try:
return self.sock.recv(8192)
except Exception as err:
raise Exception('Error receiving the packet: %s' % err[1])
def command(self, cmd, timeout=1, retries=5):
"""
send command and receive response
"""
while retries:
self.send_packet(cmd)
try:
data = self.recv(timeout)
except Exception:
data = None
if data:
return self.parse_packet(data)
retries -= 1
raise Exception('Server response timed out')
def rcon(self, cmd):
"""
send RCON command
"""
r_cmd = self.command('rcon "{}" {}'.format(self.rcon_password, cmd))
if r_cmd[1] == 'No rconpassword set on the server.\n' or r_cmd[1] == 'Bad rconpassword.\n':
raise Exception(r_cmd[1][:-1])
return r_cmd
def parse_packet(self, data):
"""
parse the received packet
"""
if data.find(self.packet_prefix) != 0:
raise Exception('Malformed packet')
first_line_length = data.find(b'\n')
if first_line_length == -1:
raise Exception('Malformed packet')
response_type = data[len(self.packet_prefix):first_line_length].decode()
response_data = data[first_line_length + 1:].decode()
return response_type, response_data
def parse_status(self, data):
"""
parse the response message and return a list
"""
split = data[1:].split('\\')
values = dict(zip(split[::2], split[1::2]))
# if there are \n's in one of the values, it's the list of players
for var, val in values.items():
pos = val.find('\n')
if pos == -1:
continue
split = val.split('\n', 1)
values[var] = split[0]
self.parse_players(split[1])
return values
def parse_players(self, data):
"""
parse player information - name, frags and ping
"""
self.players = []
for player in data.split('\n'):
if not player:
continue
match = self.player_reo.match(player)
if not match:
print('couldnt match {}'.format(player))
continue
frags, ping, name = match.groups()
self.players.append(Player(1, name, frags, ping))
def update(self):
"""
get status
"""
data = self.command('getstatus')[1]
self.values = self.parse_status(data)
def rcon_update(self):
"""
perform RCON status update
"""
data = self.rcon('status')[1]
lines = data.split(b'\n')
players = lines[3:]
self.players = []
for ply in players:
while ply.find(' ') != -1:
ply = ply.replace(' ', ' ')
while ply.find(' ') == 0:
ply = ply[1:]
if ply == '':
continue
ply = ply.split(' ')
try:
self.players.append(Player(int(ply[0]), ply[3], int(ply[1]), int(ply[2]), ply[5]))
except (IndexError, ValueError):
continue
class UrbanTerrorSection(StaticSection):
"""UrbanTerror server host. Default to localhost."""
server_host = ValidatedAttribute('server_host', str, default='localhost')
"""UrbanTerror server port. Default to 27960."""
server_port = ValidatedAttribute('server_port', int, default=27960)
"""UrbanTerror server rcon password."""
rcon_password = ValidatedAttribute('rcon_password', str)
def configure(config):
config.define_section('urbanterror', UrbanTerrorSection)
config.urbanterror.configure_setting(
'server_host',
"UrbanTerror server hostname or ip.",
)
config.urbanterror.configure_setting(
'server_port',
'UrbanTerror server port.',
)
config.urbanterror.configure_setting(
'rcon_password',
'UrbanTerror rcon password.',
)
def setup(bot):
bot.config.define_section('urbanterror', UrbanTerrorSection)
@commands('ut')
def ut(bot, trigger):
"""UrbanTerror server stats"""
try:
ut_cfg = bot.config.urbanterror
UT = PyQuake3(server='{}:{}'.format(ut_cfg.server_host, ut_cfg.server_port), rcon_password=ut_cfg.rcon_password)
UT.update()
bot.say('Server: {} ({}) | Map: {} | Players ({}) {}'.format(
UT.values['sv_hostname'],
UT.get_address(),
UT.values['mapname'],
len(UT.players),
[gamer.name for gamer in UT.players]))
except Exception as err:
LOGGER.debug('Internal Error. {}'.format(err))
| 501 | 0 | 100 |
3f642ae81cbb6c2d0f605ddea6efce124c74a74f | 3,477 | py | Python | fpga_template_low_level.py | KilometersG/Python-API-for-VeriStand-FPGA | 2607bae5582aa2872d31c05bc0135ed1d7dd9e34 | [
"BSD-2-Clause"
] | 2 | 2018-07-24T18:14:14.000Z | 2019-08-26T06:29:36.000Z | fpga_template_low_level.py | KilometersG/Python-API-for-VeriStand-FPGA | 2607bae5582aa2872d31c05bc0135ed1d7dd9e34 | [
"BSD-2-Clause"
] | 8 | 2018-07-24T18:11:32.000Z | 2018-11-13T16:24:45.000Z | fpga_template_low_level.py | KilometersG/Python-API-for-VeriStand-FPGA | 2607bae5582aa2872d31c05bc0135ed1d7dd9e34 | [
"BSD-2-Clause"
] | null | null | null | import fpga_config
from nifpga import Session
import ntpath
configpath = input('Please enter the full filepath of your .fpgaconfig file: ')
vsfpga = fpga_config.VeriStandFPGA(configpath)
folder = ntpath.split(configpath)
read_count = vsfpga.read_packets + 1
write_count = vsfpga.write_packets + 1
read_packets = {}
write_packets = {}
for i in range(1, read_count):
read_packets['packet{}'.format(i)] = vsfpga._create_packet('read', i)
for i in range(1, write_count):
write_packets['packet{}'.format(i)] = vsfpga._create_packet('write', i)
print('Please input five values separated by commas for the following channels')
print('Please enter PWMs as 0-100 Duty Cycles, Digital Lines as 1\'s or 0\'s and Analog Lines as floating points')
write_values = {}
for i in range(1, write_count):
write_packet = write_packets['packet{}'.format(i)]
iteration_writes = {}
for j in range(write_packet.definition['channel_count']):
valuestr = input('{}: '.format(write_packet.definition['name{}'.format(j)]))
channel_values = valuestr.split(',')
for k, value in enumerate(channel_values):
iteration_writes['{},{}'.format(write_packet.definition['name{}'.format(j)], k)] = value
write_values['packet{}'.format(i)] = iteration_writes
device = input('Please input the name of your FPGA board as it appears in NI-MAX: ')
with Session(vsfpga.full_bitpath, device) as sesh:
read_fifo = sesh.fifos['DMA_READ']
write_fifo = sesh.fifos['DMA_WRITE']
loop_timer = sesh.registers['Loop Rate (usec)']
start = sesh.registers['Start']
rtsi = sesh.registers['Write to RTSI']
ex_timing = sesh.registers['Use External Timing']
irq = sesh.registers['Generate IRQ']
loop_timer.write(1000)
rtsi.write(False)
ex_timing.write(False)
irq.write(False)
start.write(True)
packed_reads = {}
for i in range(5):
packed_reads["iteration{}".format(i)] = read_fifo.read(number_of_elements=vsfpga.read_packets, timeout_ms=2000)
write_list = []
for j in range(1, write_count):
packet_of_interest = write_packets['packet{}'.format(j)]
p_values = []
this_iteration = write_values['packet{}'.format(j)]
for k in range(packet_of_interest.definition['channel_count']):
channel_name = packet_of_interest.definition['name{}'.format(k)]
p_values.append(this_iteration['{},{}'.format(channel_name, i)])
packed_data = packet_of_interest._pack(p_values)
write_list.append(packed_data)
write_fifo.write(data=write_list, timeout_ms=2000)
for i in range(5):
print("Iteration {} Reads:".format(i+1))
read_tup = packed_reads['iteration{}'.format(i)]
current_it = read_tup[0]
for j, u64 in enumerate(current_it):
packet_of_interest = read_packets['packet{}'.format(j+1)]
print(packet_of_interest._unpack(u64))
sesh.close()
# Assumptions:
# Bitfile in the same folder as the .fpgaconfig file
# .fpgaconfig file follows the VeriStand standard
# Bitfile is written with the VeriStand FPGA project template in LabVIEW
# Control names and FIFO names have not been edited from the template names
# The FPGA bitfile was generated using the VeriStand FPGA Suppport VIs for all IO.
# Basic IO palette
# Digital Lines not Ports
# Pulse Measurement VI
# Pulse Generation VI
# Analog IO
| 41.891566 | 119 | 0.681047 | import fpga_config
from nifpga import Session
import ntpath
configpath = input('Please enter the full filepath of your .fpgaconfig file: ')
vsfpga = fpga_config.VeriStandFPGA(configpath)
folder = ntpath.split(configpath)
read_count = vsfpga.read_packets + 1
write_count = vsfpga.write_packets + 1
read_packets = {}
write_packets = {}
for i in range(1, read_count):
read_packets['packet{}'.format(i)] = vsfpga._create_packet('read', i)
for i in range(1, write_count):
write_packets['packet{}'.format(i)] = vsfpga._create_packet('write', i)
print('Please input five values separated by commas for the following channels')
print('Please enter PWMs as 0-100 Duty Cycles, Digital Lines as 1\'s or 0\'s and Analog Lines as floating points')
write_values = {}
for i in range(1, write_count):
write_packet = write_packets['packet{}'.format(i)]
iteration_writes = {}
for j in range(write_packet.definition['channel_count']):
valuestr = input('{}: '.format(write_packet.definition['name{}'.format(j)]))
channel_values = valuestr.split(',')
for k, value in enumerate(channel_values):
iteration_writes['{},{}'.format(write_packet.definition['name{}'.format(j)], k)] = value
write_values['packet{}'.format(i)] = iteration_writes
device = input('Please input the name of your FPGA board as it appears in NI-MAX: ')
with Session(vsfpga.full_bitpath, device) as sesh:
read_fifo = sesh.fifos['DMA_READ']
write_fifo = sesh.fifos['DMA_WRITE']
loop_timer = sesh.registers['Loop Rate (usec)']
start = sesh.registers['Start']
rtsi = sesh.registers['Write to RTSI']
ex_timing = sesh.registers['Use External Timing']
irq = sesh.registers['Generate IRQ']
loop_timer.write(1000)
rtsi.write(False)
ex_timing.write(False)
irq.write(False)
start.write(True)
packed_reads = {}
for i in range(5):
packed_reads["iteration{}".format(i)] = read_fifo.read(number_of_elements=vsfpga.read_packets, timeout_ms=2000)
write_list = []
for j in range(1, write_count):
packet_of_interest = write_packets['packet{}'.format(j)]
p_values = []
this_iteration = write_values['packet{}'.format(j)]
for k in range(packet_of_interest.definition['channel_count']):
channel_name = packet_of_interest.definition['name{}'.format(k)]
p_values.append(this_iteration['{},{}'.format(channel_name, i)])
packed_data = packet_of_interest._pack(p_values)
write_list.append(packed_data)
write_fifo.write(data=write_list, timeout_ms=2000)
for i in range(5):
print("Iteration {} Reads:".format(i+1))
read_tup = packed_reads['iteration{}'.format(i)]
current_it = read_tup[0]
for j, u64 in enumerate(current_it):
packet_of_interest = read_packets['packet{}'.format(j+1)]
print(packet_of_interest._unpack(u64))
sesh.close()
# Assumptions:
# Bitfile in the same folder as the .fpgaconfig file
# .fpgaconfig file follows the VeriStand standard
# Bitfile is written with the VeriStand FPGA project template in LabVIEW
# Control names and FIFO names have not been edited from the template names
# The FPGA bitfile was generated using the VeriStand FPGA Suppport VIs for all IO.
# Basic IO palette
# Digital Lines not Ports
# Pulse Measurement VI
# Pulse Generation VI
# Analog IO
| 0 | 0 | 0 |
a7241d0c0ee6c2db2de1ddde13add40050d749a8 | 29,611 | py | Python | intel_pytorch_extension_py/launch.py | CaoE/intel-extension-for-pytorch | 2a31cef7592207a7d08e346542218b5a79df8df9 | [
"Apache-2.0"
] | null | null | null | intel_pytorch_extension_py/launch.py | CaoE/intel-extension-for-pytorch | 2a31cef7592207a7d08e346542218b5a79df8df9 | [
"Apache-2.0"
] | 1 | 2021-03-30T04:54:24.000Z | 2021-03-30T04:54:24.000Z | intel_pytorch_extension_py/launch.py | CaoE/intel-extension-for-pytorch | 2a31cef7592207a7d08e346542218b5a79df8df9 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import platform
import subprocess
import os
from os.path import expanduser
import re
import glob
import numpy as np
from argparse import ArgumentParser, REMAINDER
from argparse import RawTextHelpFormatter
import logging
import psutil
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
r"""
This is a script for launching PyTorch training and inference on Intel Xeon CPU with optimal configurations.
Now, single instance inference/training, multi-instance inference/training and distributed training
with oneCCL backend is enabled.
To get the peak performance on Intel Xeon CPU, the script optimizes the configuration of thread and memory
management. For thread management, the script configures thread affinity and the preload of Intel OMP library.
For memory management, it configures NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc).
**How to use this module:**
*** Single instance inference/training ***
1. Run single-instance inference or training on a single node with all CPU sockets.
::
>>> python -m intel_pytorch_extension.launch script.py args
2. Run single-instance inference or training on a single CPU socket.
::
>>> python -m intel_pytorch_extension.launch --socket_id 1 script.py args
*** Multi-instance inference ***
1. Multi-instance
By default, one instance per socket. if you want to set the instance numbers and core per instance,
--nintances and --ncore_per_instance should be set.
>>> python -m intel_pytorch_extension.launch --multi_instance python_script args
eg: on CLX8280 with 14 instance, 4 cores per instance
::
>>> python -m intel_pytorch_extension.launch --multi_instance --nintances 14 --ncore_per_instance 4 python_script args
*** Distributed Training ***
spawns up multiple distributed training processes on each of the training nodes. For intel_pytorch_extension, oneCCL
is used as the communication backend and MPI used to launch multi-proc. To get the better
performance, you should specify the different cores for oneCCL communication and computation
process seperately. This tool can automatically set these ENVs(such as I_MPI_PIN_DOMIN) and launch
multi-proc for you.
The utility can be used for single-node distributed training, in which one or
more processes per node will be spawned. It can also be used in
multi-node distributed training, by spawning up multiple processes on each node
for well-improved multi-node distributed training performance as well.
1. Single-Node multi-process distributed training
::
>>> python -m intel_pytorch_extension.launch --distributed python_script --arg1 --arg2 --arg3 and all other
arguments of your training script
2. Multi-Node multi-process distributed training: (e.g. two nodes)
rank 0: *(IP: 192.168.10.10, and has a free port: 295000)*
::
>>> python -m intel_pytorch_extension.launch --distributed --nproc_per_node=xxx
--nnodes=2 --hostfile hostfile python_sript --arg1 --arg2 --arg3
and all other arguments of your training script)
3. To look up what optional arguments this module offers:
::
>>> python -m intel_pytorch_extension.launch --help
*** Memory allocator ***
"--enable_tcmalloc" and "--enable_jemalloc" can be used to enable different memory allcator.
"""
def set_mpi_pin_domain(args):
'''
I_MPI_PIN_DOMAIN specify the cores used for every MPI process.
The first ccl_worker_count cores of every rank for ccl communication
and the other cores will be used to do computation.
For example: on CascadeLake 8280 CPU, 2 ranks on one node. ccl_worker_count=4
CCL_WORKER_COUNT=4
CCL_WORKER_AFFINITY="0,1,2,3,28,29,30,31"
I_MPI_PIN_DOMAIN=[0xffffff0,0xffffff0000000]
'''
cpuinfo = CPUinfo()
ppn = args.nproc_per_node
total_cores = cpuinfo.physical_core_nums()
if args.use_logical_core:
total_cores = cpuinfo.logcal_core_nums()
cores_per_rank = total_cores // ppn
pin_domain = "["
for proc in range(ppn):
domain_binary = 0
begin = proc * cores_per_rank + args.ccl_worker_count
end = proc * cores_per_rank + cores_per_rank -1
for i in range(begin, end + 1):
domain_binary |= (1 << i)
pin_domain += hex(domain_binary) + ","
return pin_domain + "]"
def set_ccl_worker_affinity(args):
'''
computation and communication use different cores when using oneCCL
backend for distributed training. we use first ccl_worker_count cores of
every rank for ccl communication
'''
cpuinfo = CPUinfo()
ppn = args.nproc_per_node
total_cores = cpuinfo.physical_core_nums()
if args.use_logical_core:
total_cores = cpuinfo.logcal_core_nums()
cores_per_rank = total_cores // ppn
affinity = ''
for proc in range(ppn):
for ccl_worker in range(args.ccl_worker_count):
affinity += str(proc * cores_per_rank + ccl_worker)+ ","
os.environ["CCL_WORKER_AFFINITY"] = affinity
def add_lib_preload(lib_type=None):
'''
Enale TCMalloc/JeMalloc/iomp
'''
library_paths = []
if "CONDA_PREFIX" in os.environ:
library_paths.append(os.environ["CONDA_PREFIX"] + "/lib/")
library_paths += ["{}/.local/lib/".format(expanduser("~")), "/usr/local/lib/",
"/usr/local/lib64/", "/usr/lib/", "/usr/lib64/"]
lib_find = False
for lib_path in library_paths:
library_file = lib_path + "lib" + lib_type + ".so"
matches = glob.glob(library_file)
if len(matches) > 0:
if "LD_PRELOAD" in os.environ:
os.environ["LD_PRELOAD"] = matches[0] + ":" + os.environ["LD_PRELOAD"]
else:
os.environ["LD_PRELOAD"] = matches[0]
lib_find = True
break
return lib_find
def launch(args):
'''
single-instance / multi-instance launcher
'''
processes = []
cores = []
cpuinfo = CPUinfo()
if args.core_list:#user specify what cores will be used by params
cores = args.core_list.strip().split(",")
if args.ncore_per_instance == -1:
logger.error("please specify the '--ncore_per_instance' if you have pass the --core_list params")
exit(-1)
elif args.ninstances > 1 and args.ncore_per_instance * args.ninstances < len(cores):
logger.warning("only first {} cores will be used, but you specify {} cores in core_list".format
(args.ncore_per_instance * args.ninstances, len(cores)))
else:
args.ninstances = len(cores) // args.ncore_per_instance
else:
if args.use_logical_core:
if args.socket_id != -1:
cores = cpuinfo.get_socket_logical_cores(args.socket_id)
else:
cores = cpuinfo.get_all_logical_cores()
else:
if args.socket_id != -1:
cores = cpuinfo.get_socket_physical_cores(args.socket_id)
else:
cores = cpuinfo.get_all_physical_cores()
if not args.multi_instance and args.ninstances == -1 and args.ncore_per_instance == -1:
args.ninstances = 1;
args.ncore_per_instance = len(cores)
elif args.multi_instance and args.ninstances == -1 and args.ncore_per_instance == -1:
args.throughput_performance = True
elif args.ncore_per_instance == -1 and args.ninstances != -1:
args.ncore_per_instance = len(cores) // args.ninstances
elif args.ncore_per_instance != -1 and args.ninstances == -1:
args.ninstances = len(cores) // args.ncore_per_instance
else:
if args.ninstances * args.ncore_per_instance > len(cores):
logger.error("Please make sure ninstances * ncore_per_instance <= total_cores")
exit(-1)
if args.latency_performance:
if args.ncore_per_instance !=4:
logger.warning("latency_performance is a specail mode, args.ncore_per_instance can only be set to be 4")
args.ncore_per_instance = 4
cores = cpuinfo.get_all_physical_cores()
args.ninstances = len(cores) // args.ncore_per_instance
if args.throughput_performance:
args.ninstances = cpuinfo.socket_nums()
cores = cpuinfo.get_all_physical_cores()
args.ncore_per_instance = len(cores) // args.ninstances
os.environ["LAUNCH_CMD"] = "#"
set_multi_thread_and_allcator(args)
for i in range(args.ninstances):
cmd = []
cur_process_cores = ""
if not args.disable_numactl:
cmd = ["numactl"]
for core in cores[i * args.ncore_per_instance:(i + 1) * args.ncore_per_instance]:
cur_process_cores = cur_process_cores + str(core) + ","
numa_params = "-C {} ".format(cur_process_cores[:-1])
cmd.extend(numa_params.split())
with_python = not args.no_python
if with_python:
cmd.append(sys.executable)
if args.module:
cmd.append("-m")
cmd.append(args.program)
cmd.extend(args.program_args)
os.environ["LAUNCH_CMD"] += " ".join(cmd) + ",#"
process = subprocess.Popen(cmd, env=os.environ)
processes.append(process)
os.environ["LAUNCH_CMD"] = os.environ["LAUNCH_CMD"][:-2]
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode,
cmd=cmd)
def mpi_dist_launch(args):
'''
Set ENVs and launch MPI process for distributed training.
'''
if args.nnodes > 1 and not os.path.exists(args.hostfile):
raise ValueError("hostfile is necessary when you use multi-node distributed training,"
"Please create hostfile which include the ip list you used for distributed running")
elif args.nnodes > 1:
ipv4_addr_pattern = r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
ip_list = []
with open(args.hostfile) as f:
for line in f:
line = line.strip().strip("\n")
is_valid = re.match(ipv4_addr_pattern, line)
if not is_valid:
logger.error("{} is not valid IPV4 address".format(line))
exit(-1)
else:
ip_list.append(line)
if len(ip_list) < args.nnodes:
logger.error("The number of IP {} should greater than nnodes parameters {}".format(len(ip_list), args.nnodes))
exit(-1)
master_check = False
dic = psutil.net_if_addrs()
for adapter in dic:
snicList = dic[adapter]
for snic in snicList:
if snic.address == ip_list[0]:
master_check = True
if not master_check:
logger.error("MASTER_ADDR is not right. Please make sure the first ip {} in your hostfile is the current node".format(ip_list[0]))
exit(-1)
logger.info("Begin to validate the ip connect")
args.master_addr = ip_list[0]
for ip in ip_list[1:]:
completed_process = subprocess.run("ssh -o PasswordAuthentication=no {} ':'".format(ip), shell=True)
if completed_process.returncode != 0:
logger.error("Passwordless SSH login to {} failed, please make sure you have setup SSH public key right")
exit(-1)
else:
logger.info("connection from master node {} to slave node {} is OK".format(args.master_addr, ip))
set_memory_allocator(args)
# set distributed related environmental variables
os.environ["MASTER_ADDR"] = args.master_addr
os.environ["MASTER_PORT"] = str(args.master_port)
if "I_MPI_PIN_DOMAIN" not in os.environ:
mpi_pin_domain = set_mpi_pin_domain(args)
else:
mpi_pin_domain = os.environ["I_MPI_PIN_DOMAIN"]
cpuinfo = CPUinfo()
ppn = args.nproc_per_node
total_cores = len(cpuinfo.get_all_physical_cores())
cores_per_rank = total_cores // ppn
if "OMP_NUM_THREADS" not in os.environ:
opm_num_threads = cores_per_rank - args.ccl_worker_count
else:
opm_num_threads = os.environ["OMP_NUM_THREADS"]
os.environ["CCL_WORKER_COUNT"] = str(args.ccl_worker_count)
if "CCL_WORKER_AFFINITY" not in os.environ:
set_ccl_worker_affinity(args)
if "CCL_ATL_TRANSPORT" not in os.environ:
os.environ["CCL_ATL_TRANSPORT"] = "ofi"
if args.enable_iomp:
find_iomp = add_lib_preload(lib_type="iomp")
if not find_iomp:
logger.warning("Unable to find the {} library file lib{}.so in $CONDA_PREFIX/lib or /.local/lib/"
" or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or "
"~/.local/lib/ so the LD_PRELOAD environment variable will not be set."
.format("iomp", "iomp", expanduser("~")))
else:
logger.info("Enale iomp by set LD_PRELOAD")
logger.info("MASTER_ADDR={}".format(args.master_addr))
logger.info("MASTER_PORT={}".format(args.master_port))
logger.info("I_MPI_PIN_DOMAIN={}".format(mpi_pin_domain))
logger.info("OMP_NUM_THREADS={} ".format(opm_num_threads))
logger.info("CCL_WORKER_COUNT={}".format(args.ccl_worker_count))
logger.info("CCL_WORKER_AFFINITY={}".format(os.environ["CCL_WORKER_AFFINITY"]))
os.environ["LAUNCH_CMD"] = "#"
cmd = ['mpiexec.hydra']
mpi_config = "-l -np {} -ppn {} -genv I_MPI_PIN_DOMAIN={} -genv OMP_NUM_THREADS={} ".format(args.nnodes*args.nproc_per_node,
args.nproc_per_node, mpi_pin_domain, opm_num_threads)
mpi_config += args.more_mpi_parms
if args.nnodes > 1:
mpi_config += " -hostfile {}".format(args.hostfile)
cmd.extend(mpi_config.split())
with_python = not args.no_python
if with_python:
cmd.append(sys.executable)
cmd.append("-u")
if args.module:
cmd.append("-m")
cmd.append(args.program)
cmd.extend(args.program_args)
process = subprocess.Popen(cmd, env=os.environ)
process.wait()
os.environ["LAUNCH_CMD"] += " ".join(cmd) + ",#"
os.environ["LAUNCH_CMD"] = os.environ["LAUNCH_CMD"][:-2]
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="This is a script for launching PyTorch training and inference on Intel Xeon CPU "
"with optimal configurations. Now, single instance inference/training, multi-instance "
"inference/training and distributed training with oneCCL backend is enabled. "
"To get the peak performance on Intel Xeon CPU, the script optimizes the configuration "
"of thread and memory management. For thread management, the script configures thread "
"affinity and the preload of Intel OMP library. For memory management, it configures "
"NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc) "
"\n################################# Basic usage ############################# \n"
"\n 1. single instance\n"
"\n >>> python -m intel_pytorch_extension.launch python_script args \n"
"\n2. multi-instance \n"
"\n >>> python -m intel_pytorch_extension.launch --multi_instance python_script args\n"
"\n3. Single-Node multi-process distributed training\n"
"\n >>> python -m intel_pytorch_extension.launch --distributed python_script args\n"
"\n4. Multi-Node multi-process distributed training: (e.g. two nodes)\n"
"\n rank 0: *(IP: 192.168.10.10, and has a free port: 295000)*\n"
"\n >>> python -m intel_pytorch_extension.launch --distributed --nproc_per_node=2\n"
"\n --nnodes=2 --hostfile hostfile python_script args\n",
formatter_class=RawTextHelpFormatter)
parser.add_argument("--multi_instance", action='store_true', default=False,
help="Enable multi-instance, by default one instance per socket")
parser.add_argument('--distributed', action='store_true', default=False,
help='Enable distributed training.')
parser.add_argument("-m", "--module", default=False, action="store_true",
help="Changes each process to interpret the launch script "
"as a python module, executing with the same behavior as"
"'python -m'.")
parser.add_argument("--no_python", default=False, action="store_true",
help="Do not prepend the --program script with \"python\" - just exec "
"it directly. Useful when the script is not a Python script.")
add_memory_allocator_params(parser)
add_kmp_iomp_params(parser)
add_distributed_training_params(parser)
add_multi_instance_params(parser)
# positional
parser.add_argument("program", type=str,
help="The full path to the proram/script to be launched. "
"followed by all the arguments for the script")
# rest from the training program
parser.add_argument('program_args', nargs=REMAINDER)
return parser.parse_args()
if __name__ == "__main__":
main()
| 45.485407 | 141 | 0.624667 | from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import platform
import subprocess
import os
from os.path import expanduser
import re
import glob
import numpy as np
from argparse import ArgumentParser, REMAINDER
from argparse import RawTextHelpFormatter
import logging
import psutil
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
r"""
This is a script for launching PyTorch training and inference on Intel Xeon CPU with optimal configurations.
Now, single instance inference/training, multi-instance inference/training and distributed training
with oneCCL backend is enabled.
To get the peak performance on Intel Xeon CPU, the script optimizes the configuration of thread and memory
management. For thread management, the script configures thread affinity and the preload of Intel OMP library.
For memory management, it configures NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc).
**How to use this module:**
*** Single instance inference/training ***
1. Run single-instance inference or training on a single node with all CPU sockets.
::
>>> python -m intel_pytorch_extension.launch script.py args
2. Run single-instance inference or training on a single CPU socket.
::
>>> python -m intel_pytorch_extension.launch --socket_id 1 script.py args
*** Multi-instance inference ***
1. Multi-instance
By default, one instance per socket. if you want to set the instance numbers and core per instance,
--nintances and --ncore_per_instance should be set.
>>> python -m intel_pytorch_extension.launch --multi_instance python_script args
eg: on CLX8280 with 14 instance, 4 cores per instance
::
>>> python -m intel_pytorch_extension.launch --multi_instance --nintances 14 --ncore_per_instance 4 python_script args
*** Distributed Training ***
spawns up multiple distributed training processes on each of the training nodes. For intel_pytorch_extension, oneCCL
is used as the communication backend and MPI used to launch multi-proc. To get the better
performance, you should specify the different cores for oneCCL communication and computation
process seperately. This tool can automatically set these ENVs(such as I_MPI_PIN_DOMIN) and launch
multi-proc for you.
The utility can be used for single-node distributed training, in which one or
more processes per node will be spawned. It can also be used in
multi-node distributed training, by spawning up multiple processes on each node
for well-improved multi-node distributed training performance as well.
1. Single-Node multi-process distributed training
::
>>> python -m intel_pytorch_extension.launch --distributed python_script --arg1 --arg2 --arg3 and all other
arguments of your training script
2. Multi-Node multi-process distributed training: (e.g. two nodes)
rank 0: *(IP: 192.168.10.10, and has a free port: 295000)*
::
>>> python -m intel_pytorch_extension.launch --distributed --nproc_per_node=xxx
--nnodes=2 --hostfile hostfile python_sript --arg1 --arg2 --arg3
and all other arguments of your training script)
3. To look up what optional arguments this module offers:
::
>>> python -m intel_pytorch_extension.launch --help
*** Memory allocator ***
"--enable_tcmalloc" and "--enable_jemalloc" can be used to enable different memory allcator.
"""
class CPUinfo():
def __init__(self):
self.cpuinfo = []
if platform.system() == "Windows":
raise RuntimeError("Windows platform is not supported!!!")
elif platform.system() == "Linux":
args = ["lscpu", "--parse=CPU,Core,Socket,Node"]
lscpu_info = subprocess.check_output(args, universal_newlines=True).split("\n")
# Get information about cpu, core, socket and node
for line in lscpu_info:
pattern = r"^([\d]+,[\d]+,[\d]+,[\d]+)"
regex_out = re.search(pattern, line)
if regex_out:
self.cpuinfo.append(regex_out.group(1).strip().split(","))
self._get_socket_info()
def _get_socket_info(self):
self.socket_physical_cores = [] #socket_id is index
self.socket_logical_cores = [] #socket_id is index
self.sockets = int(max([line[2] for line in self.cpuinfo])) + 1
for socket_id in range(self.sockets):
cur_socket_physical_core = []
cur_socket_logical_core = []
for line in self.cpuinfo:
if socket_id == int(line[2]):
if line[1] not in cur_socket_physical_core:
cur_socket_physical_core.append(line[1])
cur_socket_logical_core.append(line[0])
self.socket_physical_cores.append(cur_socket_physical_core)
self.socket_logical_cores.append(cur_socket_logical_core)
def socket_nums(self):
return self.sockets
def physical_core_nums(self):
return len(self.socket_physical_cores) * len(self.socket_physical_cores[0])
def logical_core_nums(self):
return len(self.socket_logical_cores) * len(self.socket_logical_cores[0])
def get_socket_physical_cores(self, socket_id):
if socket_id < 0 or socket_id > self.sockets - 1:
logger.error("Invalid socket id")
return self.socket_physical_cores[socket_id]
def get_socket_logical_cores(self, socket_id):
if socket_id < 0 or socket_id > self.sockets - 1:
logger.error("Invalid socket id")
return self.socket_logical_cores[socket_id]
def get_all_physical_cores(self):
return np.array(self.socket_physical_cores).flatten().tolist()
def get_all_logical_cores(self):
return np.array(self.socket_logical_cores).flatten().tolist()
def set_mpi_pin_domain(args):
'''
I_MPI_PIN_DOMAIN specify the cores used for every MPI process.
The first ccl_worker_count cores of every rank for ccl communication
and the other cores will be used to do computation.
For example: on CascadeLake 8280 CPU, 2 ranks on one node. ccl_worker_count=4
CCL_WORKER_COUNT=4
CCL_WORKER_AFFINITY="0,1,2,3,28,29,30,31"
I_MPI_PIN_DOMAIN=[0xffffff0,0xffffff0000000]
'''
cpuinfo = CPUinfo()
ppn = args.nproc_per_node
total_cores = cpuinfo.physical_core_nums()
if args.use_logical_core:
total_cores = cpuinfo.logcal_core_nums()
cores_per_rank = total_cores // ppn
pin_domain = "["
for proc in range(ppn):
domain_binary = 0
begin = proc * cores_per_rank + args.ccl_worker_count
end = proc * cores_per_rank + cores_per_rank -1
for i in range(begin, end + 1):
domain_binary |= (1 << i)
pin_domain += hex(domain_binary) + ","
return pin_domain + "]"
def set_ccl_worker_affinity(args):
'''
computation and communication use different cores when using oneCCL
backend for distributed training. we use first ccl_worker_count cores of
every rank for ccl communication
'''
cpuinfo = CPUinfo()
ppn = args.nproc_per_node
total_cores = cpuinfo.physical_core_nums()
if args.use_logical_core:
total_cores = cpuinfo.logcal_core_nums()
cores_per_rank = total_cores // ppn
affinity = ''
for proc in range(ppn):
for ccl_worker in range(args.ccl_worker_count):
affinity += str(proc * cores_per_rank + ccl_worker)+ ","
os.environ["CCL_WORKER_AFFINITY"] = affinity
def add_lib_preload(lib_type=None):
'''
Enale TCMalloc/JeMalloc/iomp
'''
library_paths = []
if "CONDA_PREFIX" in os.environ:
library_paths.append(os.environ["CONDA_PREFIX"] + "/lib/")
library_paths += ["{}/.local/lib/".format(expanduser("~")), "/usr/local/lib/",
"/usr/local/lib64/", "/usr/lib/", "/usr/lib64/"]
lib_find = False
for lib_path in library_paths:
library_file = lib_path + "lib" + lib_type + ".so"
matches = glob.glob(library_file)
if len(matches) > 0:
if "LD_PRELOAD" in os.environ:
os.environ["LD_PRELOAD"] = matches[0] + ":" + os.environ["LD_PRELOAD"]
else:
os.environ["LD_PRELOAD"] = matches[0]
lib_find = True
break
return lib_find
def set_memory_allocator(args):
if args.enable_tcmalloc and args.enable_jemalloc:
logger.error("Unable to enable TCMalloc and JEMalloc at the same time")
exit(-1)
if args.enable_tcmalloc:
find_tc = add_lib_preload(lib_type="tcmalloc")
if not find_tc:
logger.warning("Unable to find the {} library file lib{}.so in $CONDA_PREFIX/lib or /.local/lib/"
" or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or "
"~/.local/lib/ so the LD_PRELOAD environment variable will not be set."
.format("TCmalloc", "tcmalloc", expanduser("~")))
else:
logger.info("Use TCMalloc memory allocator")
elif args.enable_jemalloc:
find_je = add_lib_preload(lib_type="jemalloc")
if not find_je:
logger.warning("Unable to find the {} library file lib{}.so in $CONDA_PREFIX/lib or /.local/lib/"
" or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or "
"~/.local/lib/ so the LD_PRELOAD environment variable will not be set."
.format("JeMalloc", "jemalloc", expanduser("~")))
else:
logger.info("Use JeMallocl memory allocator")
elif args.use_default_allocator:
pass
else:
find_tc = add_lib_preload(lib_type="tcmalloc")
if find_tc:
logger.info("Use TCMalloc memory allocator")
return
find_je = add_lib_preload(lib_type="jemalloc")
if find_je:
logger.info("Use JeMallocl memory allocator")
return
logger.warning("Both TCMalloc and JeMalloc are not fount in $CONDA_PREFIX/lib or /.local/lib/"
" or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or "
"~/.local/lib/ so the LD_PRELOAD environment variable will not be set. This may drop the performance"
.format(expanduser("~")))
def set_multi_thread_and_allcator(args):
set_memory_allocator(args)
if "OMP_NUM_THREADS" not in os.environ:
os.environ["OMP_NUM_THREADS"] = str(args.ncore_per_instance)
elif "OMP_NUM_THREADS" in os.environ:
args.ncore_per_instance = int(os.environ["OMP_NUM_THREADS"])
if "KMP_AFFINITY" not in os.environ:
os.environ["KMP_AFFINITY"] = args.kmp_affinity
if "KMP_BLOCKTIME" not in os.environ:
os.environ["KMP_BLOCKTIME"] = "1"
if "DNNL_PRIMITIVE_CACHE_CAPACITY" not in os.environ:
os.environ["DNNL_PRIMITIVE_CACHE_CAPACITY"] = '1024'
logger.info("OMP_NUM_THREADS={} ".format(os.environ["OMP_NUM_THREADS"]))
logger.info("KMP_AFFINITY={}".format(os.environ["KMP_AFFINITY"]))
logger.info("KMP_BLOCKTIME={}".format(os.environ["KMP_BLOCKTIME"]))
logger.info("DNNL_PRIMITIVE_CACHE_CAPACITY={}".format(os.environ["DNNL_PRIMITIVE_CACHE_CAPACITY"]))
if args.enable_iomp:
find_iomp = add_lib_preload(lib_type="iomp")
if not find_iomp:
logger.warning("Unable to find the {} library file lib{}.so in $CONDA_PREFIX/lib or /.local/lib/"
" or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or "
"~/.local/lib/ so the LD_PRELOAD environment variable will not be set."
.format("iomp", "iomp", expanduser("~")))
else:
logger.info("User iomp")
def launch(args):
'''
single-instance / multi-instance launcher
'''
processes = []
cores = []
cpuinfo = CPUinfo()
if args.core_list:#user specify what cores will be used by params
cores = args.core_list.strip().split(",")
if args.ncore_per_instance == -1:
logger.error("please specify the '--ncore_per_instance' if you have pass the --core_list params")
exit(-1)
elif args.ninstances > 1 and args.ncore_per_instance * args.ninstances < len(cores):
logger.warning("only first {} cores will be used, but you specify {} cores in core_list".format
(args.ncore_per_instance * args.ninstances, len(cores)))
else:
args.ninstances = len(cores) // args.ncore_per_instance
else:
if args.use_logical_core:
if args.socket_id != -1:
cores = cpuinfo.get_socket_logical_cores(args.socket_id)
else:
cores = cpuinfo.get_all_logical_cores()
else:
if args.socket_id != -1:
cores = cpuinfo.get_socket_physical_cores(args.socket_id)
else:
cores = cpuinfo.get_all_physical_cores()
if not args.multi_instance and args.ninstances == -1 and args.ncore_per_instance == -1:
args.ninstances = 1;
args.ncore_per_instance = len(cores)
elif args.multi_instance and args.ninstances == -1 and args.ncore_per_instance == -1:
args.throughput_performance = True
elif args.ncore_per_instance == -1 and args.ninstances != -1:
args.ncore_per_instance = len(cores) // args.ninstances
elif args.ncore_per_instance != -1 and args.ninstances == -1:
args.ninstances = len(cores) // args.ncore_per_instance
else:
if args.ninstances * args.ncore_per_instance > len(cores):
logger.error("Please make sure ninstances * ncore_per_instance <= total_cores")
exit(-1)
if args.latency_performance:
if args.ncore_per_instance !=4:
logger.warning("latency_performance is a specail mode, args.ncore_per_instance can only be set to be 4")
args.ncore_per_instance = 4
cores = cpuinfo.get_all_physical_cores()
args.ninstances = len(cores) // args.ncore_per_instance
if args.throughput_performance:
args.ninstances = cpuinfo.socket_nums()
cores = cpuinfo.get_all_physical_cores()
args.ncore_per_instance = len(cores) // args.ninstances
os.environ["LAUNCH_CMD"] = "#"
set_multi_thread_and_allcator(args)
for i in range(args.ninstances):
cmd = []
cur_process_cores = ""
if not args.disable_numactl:
cmd = ["numactl"]
for core in cores[i * args.ncore_per_instance:(i + 1) * args.ncore_per_instance]:
cur_process_cores = cur_process_cores + str(core) + ","
numa_params = "-C {} ".format(cur_process_cores[:-1])
cmd.extend(numa_params.split())
with_python = not args.no_python
if with_python:
cmd.append(sys.executable)
if args.module:
cmd.append("-m")
cmd.append(args.program)
cmd.extend(args.program_args)
os.environ["LAUNCH_CMD"] += " ".join(cmd) + ",#"
process = subprocess.Popen(cmd, env=os.environ)
processes.append(process)
os.environ["LAUNCH_CMD"] = os.environ["LAUNCH_CMD"][:-2]
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode,
cmd=cmd)
def mpi_dist_launch(args):
'''
Set ENVs and launch MPI process for distributed training.
'''
if args.nnodes > 1 and not os.path.exists(args.hostfile):
raise ValueError("hostfile is necessary when you use multi-node distributed training,"
"Please create hostfile which include the ip list you used for distributed running")
elif args.nnodes > 1:
ipv4_addr_pattern = r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
ip_list = []
with open(args.hostfile) as f:
for line in f:
line = line.strip().strip("\n")
is_valid = re.match(ipv4_addr_pattern, line)
if not is_valid:
logger.error("{} is not valid IPV4 address".format(line))
exit(-1)
else:
ip_list.append(line)
if len(ip_list) < args.nnodes:
logger.error("The number of IP {} should greater than nnodes parameters {}".format(len(ip_list), args.nnodes))
exit(-1)
master_check = False
dic = psutil.net_if_addrs()
for adapter in dic:
snicList = dic[adapter]
for snic in snicList:
if snic.address == ip_list[0]:
master_check = True
if not master_check:
logger.error("MASTER_ADDR is not right. Please make sure the first ip {} in your hostfile is the current node".format(ip_list[0]))
exit(-1)
logger.info("Begin to validate the ip connect")
args.master_addr = ip_list[0]
for ip in ip_list[1:]:
completed_process = subprocess.run("ssh -o PasswordAuthentication=no {} ':'".format(ip), shell=True)
if completed_process.returncode != 0:
logger.error("Passwordless SSH login to {} failed, please make sure you have setup SSH public key right")
exit(-1)
else:
logger.info("connection from master node {} to slave node {} is OK".format(args.master_addr, ip))
set_memory_allocator(args)
# set distributed related environmental variables
os.environ["MASTER_ADDR"] = args.master_addr
os.environ["MASTER_PORT"] = str(args.master_port)
if "I_MPI_PIN_DOMAIN" not in os.environ:
mpi_pin_domain = set_mpi_pin_domain(args)
else:
mpi_pin_domain = os.environ["I_MPI_PIN_DOMAIN"]
cpuinfo = CPUinfo()
ppn = args.nproc_per_node
total_cores = len(cpuinfo.get_all_physical_cores())
cores_per_rank = total_cores // ppn
if "OMP_NUM_THREADS" not in os.environ:
opm_num_threads = cores_per_rank - args.ccl_worker_count
else:
opm_num_threads = os.environ["OMP_NUM_THREADS"]
os.environ["CCL_WORKER_COUNT"] = str(args.ccl_worker_count)
if "CCL_WORKER_AFFINITY" not in os.environ:
set_ccl_worker_affinity(args)
if "CCL_ATL_TRANSPORT" not in os.environ:
os.environ["CCL_ATL_TRANSPORT"] = "ofi"
if args.enable_iomp:
find_iomp = add_lib_preload(lib_type="iomp")
if not find_iomp:
logger.warning("Unable to find the {} library file lib{}.so in $CONDA_PREFIX/lib or /.local/lib/"
" or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or "
"~/.local/lib/ so the LD_PRELOAD environment variable will not be set."
.format("iomp", "iomp", expanduser("~")))
else:
logger.info("Enale iomp by set LD_PRELOAD")
logger.info("MASTER_ADDR={}".format(args.master_addr))
logger.info("MASTER_PORT={}".format(args.master_port))
logger.info("I_MPI_PIN_DOMAIN={}".format(mpi_pin_domain))
logger.info("OMP_NUM_THREADS={} ".format(opm_num_threads))
logger.info("CCL_WORKER_COUNT={}".format(args.ccl_worker_count))
logger.info("CCL_WORKER_AFFINITY={}".format(os.environ["CCL_WORKER_AFFINITY"]))
os.environ["LAUNCH_CMD"] = "#"
cmd = ['mpiexec.hydra']
mpi_config = "-l -np {} -ppn {} -genv I_MPI_PIN_DOMAIN={} -genv OMP_NUM_THREADS={} ".format(args.nnodes*args.nproc_per_node,
args.nproc_per_node, mpi_pin_domain, opm_num_threads)
mpi_config += args.more_mpi_parms
if args.nnodes > 1:
mpi_config += " -hostfile {}".format(args.hostfile)
cmd.extend(mpi_config.split())
with_python = not args.no_python
if with_python:
cmd.append(sys.executable)
cmd.append("-u")
if args.module:
cmd.append("-m")
cmd.append(args.program)
cmd.extend(args.program_args)
process = subprocess.Popen(cmd, env=os.environ)
process.wait()
os.environ["LAUNCH_CMD"] += " ".join(cmd) + ",#"
os.environ["LAUNCH_CMD"] = os.environ["LAUNCH_CMD"][:-2]
def add_distributed_training_params(parser):
cpuinfo = CPUinfo()
socket_nums = cpuinfo.socket_nums()
group = parser.add_argument_group("Distributed Training Parameters With oneCCL backend")
group.add_argument("--nnodes", metavar='\b', type=int, default=1,
help="The number of nodes to use for distributed "
"training")
group.add_argument("--nproc_per_node", metavar='\b', type=int, default=socket_nums,
help="The number of processes to launch on each node")
#ccl control
group.add_argument("--ccl_worker_count", metavar='\b', default=4, type=int,
help="Core numbers per rank used for ccl communication")
#mpi control
group.add_argument("--master_addr", metavar='\b', default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
group.add_argument("--master_port", metavar='\b', default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communication during distributed "
"training")
group.add_argument("--hostfile", metavar='\b', default="hostfile", type=str,
help="Hostfile is necessary for multi-node multi-proc "
"training. hostfile includes the node address list "
"node address which should be either the IP address"
"or the hostname.")
group.add_argument("--more_mpi_parms", metavar='\b', default="", type=str,
help="User can pass more parameters for mpiexec.hydra "
"except for -np -ppn -hostfile and -genv I_MPI_PIN_DOMAIN")
def add_memory_allocator_params(parser):
group = parser.add_argument_group("Memory Allocator Parameters")
#allocator control
group.add_argument("--enable_tcmalloc", action='store_true', default=False,
help="Enable tcmalloc allocator")
group.add_argument("--enable_jemalloc", action='store_true', default=False,
help="Enable jemalloc allocator")
group.add_argument("--use_default_allocator", action='store_true', default=False,
help="Use default memory allocator")
def add_multi_instance_params(parser):
group = parser.add_argument_group("Multi-instance Parameters")
#multi-instance control
group.add_argument("--ncore_per_instance", metavar='\b', default=-1, type=int,
help="Cores per instance")
group.add_argument("--ninstances", metavar='\b', default=-1, type=int,
help="For multi-instance, you should give the cores number you used for per insantance.")
group.add_argument("--latency_performance", action='store_true', default=False,
help="By detault 4 core per instance and use all physical cores")
group.add_argument("--throughput_performance", action='store_true', default=False,
help="By default one instance per socket and use all physical cores")
group.add_argument("--socket_id", metavar='\b', default=-1, type=int,
help="Socket id for multi-instance, by default all sockets will be used")
group.add_argument("--use_logical_core", action='store_true', default=False,
help="Whether only use physical cores")
group.add_argument("--disable_numactl", action='store_true', default=False,
help="Disable numactl")
group.add_argument("--core_list", metavar='\b', default=None, type=str,
help="Specify the core list as 'core_id, core_id, ....', otherwise, all the cores will be used.")
def add_kmp_iomp_params(parser):
group = parser.add_argument_group("KMP/IOMP Affinity Parameters")
group.add_argument("--kmp_affinity", metavar='\b', default="granularity=fine,compact,1,0", type=str,
help="KMP_AFFINITY setup, environment variable has higher priority than this args."
"defualt value is : granularity=fine,compact,1,0")
group.add_argument("--enable_iomp", action='store_true', default=False,
help="Enable iomp and libiomp.so will be add to LD_PRELOAD")
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="This is a script for launching PyTorch training and inference on Intel Xeon CPU "
"with optimal configurations. Now, single instance inference/training, multi-instance "
"inference/training and distributed training with oneCCL backend is enabled. "
"To get the peak performance on Intel Xeon CPU, the script optimizes the configuration "
"of thread and memory management. For thread management, the script configures thread "
"affinity and the preload of Intel OMP library. For memory management, it configures "
"NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc) "
"\n################################# Basic usage ############################# \n"
"\n 1. single instance\n"
"\n >>> python -m intel_pytorch_extension.launch python_script args \n"
"\n2. multi-instance \n"
"\n >>> python -m intel_pytorch_extension.launch --multi_instance python_script args\n"
"\n3. Single-Node multi-process distributed training\n"
"\n >>> python -m intel_pytorch_extension.launch --distributed python_script args\n"
"\n4. Multi-Node multi-process distributed training: (e.g. two nodes)\n"
"\n rank 0: *(IP: 192.168.10.10, and has a free port: 295000)*\n"
"\n >>> python -m intel_pytorch_extension.launch --distributed --nproc_per_node=2\n"
"\n --nnodes=2 --hostfile hostfile python_script args\n",
formatter_class=RawTextHelpFormatter)
parser.add_argument("--multi_instance", action='store_true', default=False,
help="Enable multi-instance, by default one instance per socket")
parser.add_argument('--distributed', action='store_true', default=False,
help='Enable distributed training.')
parser.add_argument("-m", "--module", default=False, action="store_true",
help="Changes each process to interpret the launch script "
"as a python module, executing with the same behavior as"
"'python -m'.")
parser.add_argument("--no_python", default=False, action="store_true",
help="Do not prepend the --program script with \"python\" - just exec "
"it directly. Useful when the script is not a Python script.")
add_memory_allocator_params(parser)
add_kmp_iomp_params(parser)
add_distributed_training_params(parser)
add_multi_instance_params(parser)
# positional
parser.add_argument("program", type=str,
help="The full path to the proram/script to be launched. "
"followed by all the arguments for the script")
# rest from the training program
parser.add_argument('program_args', nargs=REMAINDER)
return parser.parse_args()
def main():
env_before = set(os.environ.keys())
if platform.system() == "Windows":
raise RuntimeError("Windows platform is not supported!!!")
args = parse_args()
if args.distributed and args.multi_instance:
raise RuntimeError("Either args.distributed or args.multi_instance should be set")
if args.latency_performance and args.throughput_performance:
raise RuntimeError("Either args.latency_performance or args.throughput_performance should be set")
if args.nnodes > 1:
args.distributed = True
if args.distributed:
mpi_dist_launch(args)
else:
launch(args)
for x in sorted(set(os.environ.keys()) - env_before):
logger.debug(f'{x}={os.environ[x]}')
if __name__ == "__main__":
main()
| 10,841 | -5 | 454 |
e134eca658f6b3c6715f2461726f071c23915ead | 773 | py | Python | dashboard/admin.py | avisionx/fms-portal-iiitd | 1b78112dc0fb92ab8c53512fd76d14f5ea06b587 | [
"MIT"
] | null | null | null | dashboard/admin.py | avisionx/fms-portal-iiitd | 1b78112dc0fb92ab8c53512fd76d14f5ea06b587 | [
"MIT"
] | 1 | 2021-05-29T06:13:21.000Z | 2021-05-29T06:13:21.000Z | dashboard/admin.py | avisionx/fms-portal-iiitd | 1b78112dc0fb92ab8c53512fd76d14f5ea06b587 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Complaint, Notification
admin.site.register(Notification, NotifAdmin)
admin.site.register(Complaint, ComplaintAdmin)
| 20.342105 | 72 | 0.61837 | from django.contrib import admin
from .models import Complaint, Notification
class ComplaintAdmin(admin.ModelAdmin):
model = Complaint
verbose_name_plural = 'Complaints'
list_display = (
'complaint_id',
'customer',
'category',
'location',
'created_at',
'active'
)
list_filter = (
'active',
)
readonly_fields = ('created_at', 'updated_at', 'rating', 'feedback')
class NotifAdmin(admin.ModelAdmin):
model = Notification
verbose_name_plural = 'Notifications'
list_display = (
'msg',
'created_at',
'active'
)
list_filter = (
'active',
)
admin.site.register(Notification, NotifAdmin)
admin.site.register(Complaint, ComplaintAdmin)
| 0 | 552 | 46 |
bc79a36f95b9dc54c12111e334dd778a4db2e572 | 3,820 | py | Python | tests/test_operations.py | Jeremiah-England/Shapely | 769b203f2b7cbeeb0a694c21440b4025a563f807 | [
"BSD-3-Clause"
] | 2,382 | 2015-01-04T03:16:59.000Z | 2021-12-10T15:48:56.000Z | tests/test_operations.py | Jeremiah-England/Shapely | 769b203f2b7cbeeb0a694c21440b4025a563f807 | [
"BSD-3-Clause"
] | 1,009 | 2015-01-03T23:44:02.000Z | 2021-12-10T16:02:42.000Z | tests/test_operations.py | Jeremiah-England/Shapely | 769b203f2b7cbeeb0a694c21440b4025a563f807 | [
"BSD-3-Clause"
] | 467 | 2015-01-19T23:18:33.000Z | 2021-12-09T18:31:28.000Z | from . import unittest
import pytest
from shapely.geometry import Point, LineString, Polygon, MultiPoint, \
GeometryCollection
from shapely.wkt import loads
from shapely.geos import TopologicalError
| 36.380952 | 120 | 0.607853 | from . import unittest
import pytest
from shapely.geometry import Point, LineString, Polygon, MultiPoint, \
GeometryCollection
from shapely.wkt import loads
from shapely.geos import TopologicalError
class OperationsTestCase(unittest.TestCase):
def test_operations(self):
point = Point(0.0, 0.0)
# General geometry
self.assertEqual(point.area, 0.0)
self.assertEqual(point.length, 0.0)
self.assertAlmostEqual(point.distance(Point(-1.0, -1.0)),
1.4142135623730951)
# Topology operations
# Envelope
self.assertIsInstance(point.envelope, Point)
# Intersection
self.assertTrue(point.intersection(Point(-1, -1)).is_empty)
# Buffer
self.assertIsInstance(point.buffer(10.0), Polygon)
self.assertIsInstance(point.buffer(10.0, 32), Polygon)
# Simplify
p = loads('POLYGON ((120 120, 140 199, 160 200, 180 199, 220 120, '
'122 122, 121 121, 120 120))')
expected = loads('POLYGON ((120 120, 140 199, 160 200, 180 199, '
'220 120, 120 120))')
s = p.simplify(10.0, preserve_topology=False)
self.assertTrue(s.equals_exact(expected, 0.001))
p = loads('POLYGON ((80 200, 240 200, 240 60, 80 60, 80 200),'
'(120 120, 220 120, 180 199, 160 200, 140 199, 120 120))')
expected = loads(
'POLYGON ((80 200, 240 200, 240 60, 80 60, 80 200),'
'(120 120, 220 120, 180 199, 160 200, 140 199, 120 120))')
s = p.simplify(10.0, preserve_topology=True)
self.assertTrue(s.equals_exact(expected, 0.001))
# Convex Hull
self.assertIsInstance(point.convex_hull, Point)
# Differences
self.assertIsInstance(point.difference(Point(-1, 1)), Point)
self.assertIsInstance(point.symmetric_difference(Point(-1, 1)),
MultiPoint)
# Boundary
self.assertIsInstance(point.boundary, GeometryCollection)
# Union
self.assertIsInstance(point.union(Point(-1, 1)), MultiPoint)
self.assertIsInstance(point.representative_point(), Point)
self.assertIsInstance(point.centroid, Point)
def test_relate(self):
# Relate
self.assertEqual(Point(0, 0).relate(Point(-1, -1)), 'FF0FFF0F2')
# issue #294: should raise TopologicalError on exception
invalid_polygon = loads('POLYGON ((40 100, 80 100, 80 60, 40 60, 40 100), (60 60, 80 60, 80 40, 60 40, 60 60))')
assert(not invalid_polygon.is_valid)
with pytest.raises(TopologicalError):
invalid_polygon.relate(invalid_polygon)
def test_hausdorff_distance(self):
point = Point(1, 1)
line = LineString([(2, 0), (2, 4), (3, 4)])
distance = point.hausdorff_distance(line)
self.assertEqual(distance, point.distance(Point(3, 4)))
def test_interpolate(self):
# successful interpolation
test_line = LineString(((1,1),(1,2)))
known_point = Point(1,1.5)
interpolated_point = test_line.interpolate(.5, normalized=True)
self.assertEqual(interpolated_point, known_point)
# Issue #653; should raise ValueError on exception
empty_line = loads('LINESTRING EMPTY')
assert(empty_line.is_empty)
with pytest.raises(ValueError):
empty_line.interpolate(.5, normalized=True)
def test_normalize(self):
point = Point(1, 1)
result = point.normalize()
assert result == point
line = loads("MULTILINESTRING ((1 1, 0 0), (1 1, 1 2))")
result = line.normalize()
expected = loads("MULTILINESTRING ((1 1, 1 2), (0 0, 1 1))")
assert result == expected
| 3,411 | 23 | 158 |
4477e3642a343862afd32bfdb254fdd48f6d7b21 | 1,641 | py | Python | binaries/setup.py | meryacine/shaka-streamer | 7b7e90143f531c52d96c162cc7393862db0830b7 | [
"Apache-2.0"
] | 154 | 2019-08-29T16:53:24.000Z | 2022-02-25T00:29:56.000Z | binaries/setup.py | meryacine/shaka-streamer | 7b7e90143f531c52d96c162cc7393862db0830b7 | [
"Apache-2.0"
] | 101 | 2019-08-30T17:34:51.000Z | 2022-03-02T18:46:22.000Z | binaries/setup.py | meryacine/shaka-streamer | 7b7e90143f531c52d96c162cc7393862db0830b7 | [
"Apache-2.0"
] | 56 | 2019-09-08T17:47:22.000Z | 2022-02-23T17:35:11.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import setuptools # type: ignore
import streamer_binaries
separator_index = sys.argv.index('--')
platform_binaries = sys.argv[separator_index + 1:]
sys.argv = sys.argv[:separator_index]
setuptools.setup(
name='shaka-streamer-binaries',
version=streamer_binaries.__version__,
author='Google',
description='A package containing FFmpeg, FFprobe, and Shaka Packager static builds.',
long_description=('An auxiliary package that provides platform-specific'
' binaries used by Shaka Streamer.'),
url='https://github.com/google/shaka-streamer/tree/master/binaries',
packages=[streamer_binaries.__name__,],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
],
package_data={
# Only add the corresponding platform specific binaries to the wheel.
streamer_binaries.__name__: platform_binaries,
}
) | 36.466667 | 88 | 0.728215 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import setuptools # type: ignore
import streamer_binaries
separator_index = sys.argv.index('--')
platform_binaries = sys.argv[separator_index + 1:]
sys.argv = sys.argv[:separator_index]
setuptools.setup(
name='shaka-streamer-binaries',
version=streamer_binaries.__version__,
author='Google',
description='A package containing FFmpeg, FFprobe, and Shaka Packager static builds.',
long_description=('An auxiliary package that provides platform-specific'
' binaries used by Shaka Streamer.'),
url='https://github.com/google/shaka-streamer/tree/master/binaries',
packages=[streamer_binaries.__name__,],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
],
package_data={
# Only add the corresponding platform specific binaries to the wheel.
streamer_binaries.__name__: platform_binaries,
}
) | 0 | 0 | 0 |
bab48fc4bbab67505fa893b469125721ab5426d4 | 9,047 | py | Python | tests/preprocess/task.py | jarokaz/nvidia-merlin-on-vertex | f40fe997108395a310b9a9ec4d97110967d725ac | [
"Apache-2.0"
] | 1 | 2021-11-16T13:11:38.000Z | 2021-11-16T13:11:38.000Z | tests/preprocess/task.py | jarokaz/nvidia-merlin-on-vertex | f40fe997108395a310b9a9ec4d97110967d725ac | [
"Apache-2.0"
] | null | null | null | tests/preprocess/task.py | jarokaz/nvidia-merlin-on-vertex | f40fe997108395a310b9a9ec4d97110967d725ac | [
"Apache-2.0"
] | 1 | 2021-11-16T13:11:41.000Z | 2021-11-16T13:11:41.000Z | import argparse
import logging
import os
import sys
import time
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
import fsspec
import nvtabular as nvt
from nvtabular.io.shuffle import Shuffle
from nvtabular.ops import Categorify
from nvtabular.ops import Clip
from nvtabular.ops import FillMissing
from nvtabular.ops import Normalize
from nvtabular.utils import device_mem_size
import numpy as np
from typing import Dict, List, Union
def create_csv_dataset(
data_paths,
sep,
recursive,
col_dtypes,
frac_size,
client
):
"""Create nvt.Dataset definition for CSV files."""
fs_spec = fsspec.filesystem('file')
rec_symbol = '**' if recursive else '*'
valid_paths = []
for path in data_paths:
try:
if fs_spec.isfile(path):
valid_paths.append(path)
else:
path = os.path.join(path, rec_symbol)
for i in fs_spec.glob(path):
if fs_spec.isfile(i):
valid_paths.append(f'{i}')
except FileNotFoundError as fnf_expt:
print(fnf_expt)
print('Incorrect path: {path}.')
except OSError as os_err:
print(os_err)
print('Verify access to the bucket.')
return nvt.Dataset(
path_or_source=valid_paths,
engine='csv',
names=list(col_dtypes.keys()),
sep=sep,
dtypes=col_dtypes,
part_mem_fraction=frac_size,
client=client,
assume_missing=True
)
def convert_csv_to_parquet(
output_path,
dataset,
output_files,
shuffle=None
):
"""Convert CSV file to parquet and write to GCS."""
if shuffle == 'None':
shuffle = None
else:
try:
shuffle = getattr(Shuffle, shuffle)
except:
print('Shuffle method not available. Using default.')
shuffle = None
dataset.to_parquet(
output_path,
shuffle=shuffle,
output_files=output_files
)
def create_criteo_nvt_workflow(client):
"""Create a nvt.Workflow definition with transformation all the steps."""
# Columns definition
cont_names = ['I' + str(x) for x in range(1, 14)]
cat_names = ['C' + str(x) for x in range(1, 27)]
# Transformation pipeline
num_buckets = 10000000
categorify_op = Categorify(max_size=num_buckets)
cat_features = cat_names >> categorify_op
cont_features = cont_names >> FillMissing() >> Clip(
min_value=0) >> Normalize()
features = cat_features + cont_features + ['label']
# Create and save workflow
return nvt.Workflow(features, client)
def create_cluster(
n_workers,
device_limit_frac,
device_pool_frac,
memory_limit
):
"""Create a Dask cluster to apply the transformations steps to the Dataset."""
device_size = device_mem_size()
device_limit = int(device_limit_frac * device_size)
device_pool_size = int(device_pool_frac * device_size)
rmm_pool_size = (device_pool_size // 256) * 256
cluster = LocalCUDACluster(
n_workers=n_workers,
device_memory_limit=device_limit,
rmm_pool_size=rmm_pool_size,
memory_limit=memory_limit
)
return Client(cluster)
def create_parquet_dataset(
client,
data_path,
frac_size
):
"""Create a nvt.Dataset definition for the parquet files."""
fs = fsspec.filesystem('file')
file_list = fs.glob(
os.path.join(data_path, '*.parquet')
)
if not file_list:
raise FileNotFoundError('Parquet file(s) not found')
file_list = [os.path.join(i) for i in file_list]
return nvt.Dataset(
file_list,
engine='parquet',
part_mem_fraction=frac_size,
client=client
)
def save_dataset(
dataset,
output_path,
output_files,
shuffle=None
):
"""Save dataset to parquet files to path."""
if shuffle == 'None':
shuffle = None
else:
try:
shuffle = getattr(Shuffle, shuffle)
except:
print('Shuffle method not available. Using default.')
shuffle = None
dataset.to_parquet(
output_path=output_path,
shuffle=shuffle,
output_files=output_files,
write_hugectr_keyset=True
)
def get_criteo_col_dtypes() -> Dict[str, Union[str, np.int32]]:
"""Returns a dict mapping column names to numpy dtype."""
# Specify column dtypes. Note that "hex" means that
# the values will be hexadecimal strings that should
# be converted to int32
col_dtypes = {}
col_dtypes["label"] = np.int32
for x in ["I" + str(i) for i in range(1, 14)]:
col_dtypes[x] = np.int32
for x in ["C" + str(i) for i in range(1, 27)]:
col_dtypes[x] = "hex"
return col_dtypes
# --------------------------------------------
# ---------- Convert CSV to Parquet ----------
# --------------------------------------------
# --------------------------------------------
# -------------- Analyse Dataset -------------
# --------------------------------------------
# --------------------------------------------
# -------- Transform Parquet Dataset ---------
# --------------------------------------------
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--task',
type=str,
required=False)
parser.add_argument('--csv_data_path',
required=False,
nargs='+')
parser.add_argument('--parquet_data_path',
type=str,
required=False)
parser.add_argument('--output_path',
type=str,
required=False)
parser.add_argument('--output_files',
type=int,
required=False)
parser.add_argument('--workflow_path',
type=str,
required=False)
parser.add_argument('--n_workers',
type=int,
required=False)
parser.add_argument('--sep',
type=str,
required=False)
parser.add_argument('--frac_size',
type=float,
required=False,
default=0.10)
parser.add_argument('--memory_limit',
type=int,
required=False,
default=100_000_000_000)
parser.add_argument('--device_limit_frac',
type=float,
required=False,
default=0.60)
parser.add_argument('--device_pool_frac',
type=float,
required=False,
default=0.90)
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(message)s',
level=logging.INFO,
datefmt='%d-%m-%y %H:%M:%S',
stream=sys.stdout)
parsed_args = parse_args()
start_time = time.time()
logging.info('Timing task')
if parsed_args.task == 'convert':
main_convert(parsed_args)
elif parsed_args.task == 'analyse':
main_analyse(parsed_args)
elif parsed_args.task == 'transform':
main_transform(parsed_args)
end_time = time.time()
elapsed_time = end_time - start_time
logging.info('Task completed. Elapsed time: %s', elapsed_time)
| 25.922636 | 80 | 0.623522 | import argparse
import logging
import os
import sys
import time
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
import fsspec
import nvtabular as nvt
from nvtabular.io.shuffle import Shuffle
from nvtabular.ops import Categorify
from nvtabular.ops import Clip
from nvtabular.ops import FillMissing
from nvtabular.ops import Normalize
from nvtabular.utils import device_mem_size
import numpy as np
from typing import Dict, List, Union
def create_csv_dataset(
data_paths,
sep,
recursive,
col_dtypes,
frac_size,
client
):
"""Create nvt.Dataset definition for CSV files."""
fs_spec = fsspec.filesystem('file')
rec_symbol = '**' if recursive else '*'
valid_paths = []
for path in data_paths:
try:
if fs_spec.isfile(path):
valid_paths.append(path)
else:
path = os.path.join(path, rec_symbol)
for i in fs_spec.glob(path):
if fs_spec.isfile(i):
valid_paths.append(f'{i}')
except FileNotFoundError as fnf_expt:
print(fnf_expt)
print('Incorrect path: {path}.')
except OSError as os_err:
print(os_err)
print('Verify access to the bucket.')
return nvt.Dataset(
path_or_source=valid_paths,
engine='csv',
names=list(col_dtypes.keys()),
sep=sep,
dtypes=col_dtypes,
part_mem_fraction=frac_size,
client=client,
assume_missing=True
)
def convert_csv_to_parquet(
output_path,
dataset,
output_files,
shuffle=None
):
"""Convert CSV file to parquet and write to GCS."""
if shuffle == 'None':
shuffle = None
else:
try:
shuffle = getattr(Shuffle, shuffle)
except:
print('Shuffle method not available. Using default.')
shuffle = None
dataset.to_parquet(
output_path,
shuffle=shuffle,
output_files=output_files
)
def create_criteo_nvt_workflow(client):
"""Create a nvt.Workflow definition with transformation all the steps."""
# Columns definition
cont_names = ['I' + str(x) for x in range(1, 14)]
cat_names = ['C' + str(x) for x in range(1, 27)]
# Transformation pipeline
num_buckets = 10000000
categorify_op = Categorify(max_size=num_buckets)
cat_features = cat_names >> categorify_op
cont_features = cont_names >> FillMissing() >> Clip(
min_value=0) >> Normalize()
features = cat_features + cont_features + ['label']
# Create and save workflow
return nvt.Workflow(features, client)
def create_cluster(
n_workers,
device_limit_frac,
device_pool_frac,
memory_limit
):
"""Create a Dask cluster to apply the transformations steps to the Dataset."""
device_size = device_mem_size()
device_limit = int(device_limit_frac * device_size)
device_pool_size = int(device_pool_frac * device_size)
rmm_pool_size = (device_pool_size // 256) * 256
cluster = LocalCUDACluster(
n_workers=n_workers,
device_memory_limit=device_limit,
rmm_pool_size=rmm_pool_size,
memory_limit=memory_limit
)
return Client(cluster)
def create_parquet_dataset(
client,
data_path,
frac_size
):
"""Create a nvt.Dataset definition for the parquet files."""
fs = fsspec.filesystem('file')
file_list = fs.glob(
os.path.join(data_path, '*.parquet')
)
if not file_list:
raise FileNotFoundError('Parquet file(s) not found')
file_list = [os.path.join(i) for i in file_list]
return nvt.Dataset(
file_list,
engine='parquet',
part_mem_fraction=frac_size,
client=client
)
def save_dataset(
dataset,
output_path,
output_files,
shuffle=None
):
"""Save dataset to parquet files to path."""
if shuffle == 'None':
shuffle = None
else:
try:
shuffle = getattr(Shuffle, shuffle)
except:
print('Shuffle method not available. Using default.')
shuffle = None
dataset.to_parquet(
output_path=output_path,
shuffle=shuffle,
output_files=output_files,
write_hugectr_keyset=True
)
def get_criteo_col_dtypes() -> Dict[str, Union[str, np.int32]]:
"""Returns a dict mapping column names to numpy dtype."""
# Specify column dtypes. Note that "hex" means that
# the values will be hexadecimal strings that should
# be converted to int32
col_dtypes = {}
col_dtypes["label"] = np.int32
for x in ["I" + str(i) for i in range(1, 14)]:
col_dtypes[x] = np.int32
for x in ["C" + str(i) for i in range(1, 27)]:
col_dtypes[x] = "hex"
return col_dtypes
# --------------------------------------------
# ---------- Convert CSV to Parquet ----------
def main_convert(args):
logging.info('Creating cluster')
client = create_cluster(
args.n_workers,
args.device_limit_frac,
args.device_pool_frac,
args.memory_limit
)
logging.info('Creating CSV dataset')
dataset = create_csv_dataset(
args.csv_data_path,
args.sep,
False,
get_criteo_col_dtypes(),
args.frac_size,
client
)
logging.info('Converting CSV to Parquet')
convert_csv_to_parquet(
args.output_path,
dataset,
args.output_files
)
# --------------------------------------------
# --------------------------------------------
# -------------- Analyse Dataset -------------
def main_analyse(args):
logging.info('Creating cluster')
client = create_cluster(
args.n_workers,
args.device_limit_frac,
args.device_pool_frac,
args.memory_limit
)
logging.info('Creating Parquet dataset')
dataset = create_parquet_dataset(
client=client,
data_path=args.parquet_data_path,
frac_size=args.frac_size
)
logging.info('Creating Workflow')
# Create Workflow
criteo_workflow = create_criteo_nvt_workflow(client)
logging.info('Analyzing dataset')
criteo_workflow = criteo_workflow.fit(dataset)
logging.info('Saving Workflow')
criteo_workflow.save(args.output_path)
# --------------------------------------------
# --------------------------------------------
# -------- Transform Parquet Dataset ---------
def main_transform(args):
logging.info('Creating cluster')
client = create_cluster(
args.n_workers,
args.device_limit_frac,
args.device_pool_frac,
args.memory_limit
)
logging.info('Creating Parquet dataset')
dataset = create_parquet_dataset(
client=client,
data_path=args.parquet_data_path,
frac_size=args.frac_size
)
logging.info('Loading Workflow')
criteo_workflow = nvt.Workflow.load(args.workflow_path, client)
logging.info('Transforming Dataset')
transformed_dataset = criteo_workflow.transform(dataset)
logging.info('Saving transformed dataset')
save_dataset(
transformed_dataset,
output_path=args.output_path,
output_files=args.output_files
)
# --------------------------------------------
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--task',
type=str,
required=False)
parser.add_argument('--csv_data_path',
required=False,
nargs='+')
parser.add_argument('--parquet_data_path',
type=str,
required=False)
parser.add_argument('--output_path',
type=str,
required=False)
parser.add_argument('--output_files',
type=int,
required=False)
parser.add_argument('--workflow_path',
type=str,
required=False)
parser.add_argument('--n_workers',
type=int,
required=False)
parser.add_argument('--sep',
type=str,
required=False)
parser.add_argument('--frac_size',
type=float,
required=False,
default=0.10)
parser.add_argument('--memory_limit',
type=int,
required=False,
default=100_000_000_000)
parser.add_argument('--device_limit_frac',
type=float,
required=False,
default=0.60)
parser.add_argument('--device_pool_frac',
type=float,
required=False,
default=0.90)
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(message)s',
level=logging.INFO,
datefmt='%d-%m-%y %H:%M:%S',
stream=sys.stdout)
parsed_args = parse_args()
start_time = time.time()
logging.info('Timing task')
if parsed_args.task == 'convert':
main_convert(parsed_args)
elif parsed_args.task == 'analyse':
main_analyse(parsed_args)
elif parsed_args.task == 'transform':
main_transform(parsed_args)
end_time = time.time()
elapsed_time = end_time - start_time
logging.info('Task completed. Elapsed time: %s', elapsed_time)
| 1,796 | 0 | 66 |
737f31f4d982127703765a37410e7fd674578fdd | 20,144 | py | Python | pipopipette.py | itskatt/pipopipette | a39ecc1997f1a64a5539c4642b1197e9f719ba1c | [
"MIT"
] | null | null | null | pipopipette.py | itskatt/pipopipette | a39ecc1997f1a64a5539c4642b1197e9f719ba1c | [
"MIT"
] | null | null | null | pipopipette.py | itskatt/pipopipette | a39ecc1997f1a64a5539c4642b1197e9f719ba1c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 19 10:23:29 2021
@author: caldwell ©
"""
import pygame as pg
from random import choice
# from pprint import pprint
## --- Paramètres de jeu ------------------------------
# si on joue contre l'IA (True) ou JvJ (False)
MODE_IA = True
# la taille de la grille de jeu (largeur, hauteur)
TAILLE_GRILLE = (3, 2)
# la taille de l'écran au lancement
TAILLE_ECRAN = (1080, 720)
# la profondeur de recursivité pour l'IA
MAX_PROFONDEUR = 5
## ----------------------------------------------------
## les couleures
# https://coolors.co/00d9ff-ff1f1f-67697c-253d5b-dbd053 (liens pas à jours)
COUL_FOND = (37, 61, 91)
COUL_CERCLE_JONCTION = (219, 208, 83)
COUL_SEGMENTS = (103, 105, 124)
COUL_SEGMENT_HOVER = (128, 131, 158)
COUL_BLEU = (0, 217, 255)
COUL_ROUGE = (255, 56, 56)
class Grille:
"""
Represente la grille du jeu.
----
taille (tuple): taille de la grille (largeur, hauteur)
© Raphaël
"""
def setup(self):
"""
Initialise la grille.
"""
self.joueur_actuel = 0 # 0 bleu, 1 rouge
self.nb_tour = 0 # nb de tours depuis le début de la partie
## Création des segments
# par lignes, il y aura toujours un segment de plus que la largeur,
# d'ou le +1 (et bien sur pareil pour les colones et la hauteur)
# pour accelerer l'acces aux segments:
# pour chaques indices de segments on peut obtenir sa localisation
# dans self.segments: (ligne-colone, rang, pos dans le rang)
self.table_acces = {}
ind_segment = 0
# 1ère étape, ajout des lignes (gauche-droite)
lignes = []
for i in range(self.hauteur):
rang_indiv = []
for j in range(self.largeur + 1):
rang_indiv.append(None)
self.table_acces[ind_segment] = (0, i, j)
ind_segment += 1
lignes.append(rang_indiv)
# 2nde étape, ajout des colones (haut-bas)
colones = []
for i in range(self.largeur):
rang_indiv = []
for j in range(self.hauteur + 1):
rang_indiv.append(None)
self.table_acces[ind_segment] = (1, i, j)
ind_segment += 1
colones.append(rang_indiv)
# 3ème étape, on rassemble tout ensemble
self.segments = [lignes, colones]
## Création des carrés
# un carré n'est qu'autre que 4 segments mis en relations
# on determine a l'avance le dernier segment vertical
dernier_vertical = self.largeur + (self.largeur + 1) * (self.hauteur - 1)
self.carres = []
for j in range(self.hauteur):
for i in range(self.largeur):
carre = []
# ajout des deux segments verticaux
# voir en bas pour comprendre le fonctionement (sauf que
# cette fois ci c'est aux changements de lignes qu'il y
# a le décalage d'indices)
carre.append(j * (self.largeur + 1) + i)
carre.append(j * (self.largeur + 1) + i + 1)
# pour les deux autres horizontaux
# on a tout d'abord besoin de connaitre l'indice du dernier
# segment horizontal. puis de la c'est la même chose qu'au
# dessus: quand on change de colone il faut "ajouter"
# le nombre d'indice par colones (= hauteur + 1)
carre.append(dernier_vertical + j + i * (self.hauteur + 1) + 1)
carre.append(dernier_vertical + j + i * (self.hauteur + 1) + 2)
self.carres.append(carre)
# pour savoir qui a gagné chaques carres
self.carres_gagnes = [None] * len(self.carres)
def reset(self):
"""
Reinitialise la grille.
"""
self.setup()
def copie(self):
"""
Produit une copie de la grille et la renvoie.
"""
grille = Grille((self.largeur, self.hauteur), copie=self)
return grille
def changer_joueur(self):
"""
Change de joueur.
"""
if self.joueur_actuel == 0:
self.joueur_actuel = 1
self.nb_tour += 1
else:
self.joueur_actuel = 0
def get_segment(self, indice):
"""
Renvoie le contenu du segment à l'indice indiqué.
----
indice (int): L'indice du segment
"""
# si il y a une erreur, l'indice est invalide
orientation, rang, pos_rang = self.table_acces[indice]
return self.segments[orientation][rang][pos_rang]
def set_segment(self, indice, couleur):
"""
Change la couleur d'un segment a l'indice indiqué.
----
...
"""
# si il y a une erreur, l'indice est invalide
orientation, rang, pos_rang = self.table_acces[indice]
self.segments[orientation][rang][pos_rang] = couleur
def get_carres(self, ind_segment):
"""
Renvoie les indices de tout les carrés qui possedent ce segment.
"""
carres = []
for ind, carre in enumerate(self.carres):
if ind_segment in carre:
carres.append(ind)
if len(carres) == 0:
raise IndexError(f"Indice invalide ({ind_segment})")
return carres
def carre_rempli(self, ind_carre):
"""
Verifie si un carre est rempli (les 4 segments sont coloriés,
differents de None).
"""
carre = self.carres[ind_carre]
for seg in carre:
if self.get_segment(seg) is None:
# le carre n'est pas rempli
return False
return True
def score_detaille(self):
"""
Calcule le score de chaques joueurs.
"""
bleu = 0
rouge = 0
for carre in self.carres_gagnes:
if carre == 0:
bleu += 1
elif carre == 1:
rouge += 1
return bleu, rouge
def calculer_score(self):
"""
Calcule le score de la partie.
Un score < 0 signifie que les bleus ont l'avantage, et le cas contraire les rouges.
"""
bleu, rouge = self.score_detaille()
return rouge - bleu
def partie_finie(self):
"""
Indique si la partie est finie (plus aucunes cases libres).
"""
for ind, chemin in self.table_acces.items():
orientation, rang, pos_rang = chemin
seg = self.segments[orientation][rang][pos_rang]
if seg is None:
return False
return True # on a trouvé aucun "None"
def coups_possibles_ia(self):
"""
Retourne les indices de tout les segments libres.
"""
segments_libres = []
for ind, chemin in self.table_acces.items():
orientation, rang, pos_rang = chemin
seg = self.segments[orientation][rang][pos_rang]
if seg is None:
segments_libres.append(ind)
return segments_libres
def calcul_coup_ia(self, profondeur=0):
"""
Calcule le meilleur coup a jouer pour l'IA.
"""
# condition de sortie
if self.partie_finie() or profondeur == MAX_PROFONDEUR:
return self.calculer_score(), None
## le minimax
choix_coups = []
for segment in self.coups_possibles_ia():
# on copie la grille actuelle
copie_grille = self.copie()
# on joue un coup et on analyse le meilleur score qu'il peut nous
# offrir en fonction des suivants
copie_grille.jouer_coup(segment)
score, _ = copie_grille.calcul_coup_ia(profondeur + 1)
choix_coups.append((score, segment))
# on choisi le meilleur coup en fonction du joueur actuel:
# le joueur bleu veut le score le plus bas possible
# et le rouge veut l'inverse
if self.joueur_actuel == 0: # bleu
meilleur_choix = min(choix_coups)
else: # rouge
meilleur_choix = max(choix_coups)
return meilleur_choix
def jouer_coup_ia(self):
"""
Joue le meilleur coup possible pour que les humains se preparent
psycologiquement à se faire renverser et dominer par les formes de
vie superieures que sont les IA.
"""
# si c'est le premier tour et que la grille est assez grande on peut
# se permettre de jouer un coup aléatoire
if self.nb_tour == 1 and (self.largeur, self.hauteur) >= (2, 2):
choix = choice(self.coups_possibles_ia())
self.jouer_coup(choix)
return
# la boucle sert a s'assurer que le bot joue une 2nde fois si il viens
# de remporter un carré
while self.joueur_actuel == 1 and not self.partie_finie(): # hard codé pour le rouge...
_, meilleur_coup = self.calcul_coup_ia()
if meilleur_coup is not None: # surviens en fin de partie
self.jouer_coup(meilleur_coup)
def coup_valide(self, ind_segment):
"""
Vérifie si un coup est valide (personne n'y a joué avant).
"""
return self.get_segment(ind_segment) is None
def jouer_coup(self, ind_segment):
"""
Joue un coup pendant une partie.
Le coups doit obligatoirement être valide (testé avec self.coup_valide).
"""
# on change le segment
self.set_segment(ind_segment, self.joueur_actuel)
## verification si un carré a été gagné
changement = True
carres = self.get_carres(ind_segment)
for carre in carres:
# on verifie si le coup du joueur rempli un carre, et dans ce cas
# il le remporte et peut jouer à nouveau
if self.carre_rempli(carre):
self.carres_gagnes[carre] = self.joueur_actuel
changement = False
# et finalement on peut changer de joueur
if changement:
self.changer_joueur()
class JeuPipopipette:
"""
L'interface graphique du jeu.
"""
def maj_tailles(self, taille_ecran):
"""
Crée ou met à jour les variables des tailles.
"""
# taille ecran
self.largeur, self.hauteur = taille_ecran
# le coté de l'ecran le plus petit (comme il est en numerateur)
cote_min = min(self.largeur, self.hauteur)
# le coté de la grille le plus grand (comme il est en denominateur)
cote_max = max(self.grille.largeur, self.grille.hauteur)
# les segments
self.long_segment = cote_min / (1.2 * cote_max)
# self.long_segment = self.hauteur / 6
self.larg_segment = round(self.long_segment / 8)
# les cercles
self.rayon_cercle = round(self.long_segment / 3.5)
self.rayon_cercle_jonction = round(self.long_segment / 5.5)
# calcul de la position du coin en haut a gauche de la grille, pour la centrer.
# pour dessiner la grille, tout part de la
x = self.largeur / 2 - self.grille.largeur * self.long_segment / 2
y = self.hauteur / 2 - self.grille.hauteur * self.long_segment / 2
self.depart_grille = [x, y]
# texte
self.police = pg.font.SysFont("Impact", round(cote_min / 12))
def get_couleur(self, objet, ind=None):
"""
Renvoie la couleur a utiliser pour un objet (segment/carré) donné.
"""
if objet == 0: # bleu
couleur = COUL_BLEU
elif objet == 1:
couleur = COUL_ROUGE
else:
if self.segment_hover == ind:
couleur = COUL_SEGMENT_HOVER
else:
couleur = COUL_SEGMENTS
return couleur
def dessiner_grille(self):
"""
Affiche la grille du jeu.
"""
self.rects_segment.clear() # enlever ?
lignes, colones = self.grille.segments
## Tracage des segments -------------------------------------------
depart_l = self.depart_grille.copy()
depart_c = self.depart_grille.copy()
indice_segment = 0 # pour la surbrillance
# on trace les traits verticaux
for rangee in lignes:
for ind_seg, segment in enumerate(rangee):
couleur = self.get_couleur(segment, indice_segment)
indice_segment += 1
rect = pg.draw.line(
self.surf, couleur,
(depart_l[0] + (self.long_segment * ind_seg), depart_l[1]),
(depart_l[0] + (self.long_segment * ind_seg), depart_l[1] + self.long_segment),
self.larg_segment
)
# si c'est vide, pas besoin d'ajouter les rects
# if not self.rects_segment:
self.rects_segment.append(rect)
depart_l[1] += self.long_segment
# on trace les traits horizontaux
for rangee in colones:
for ind_seg, segment in enumerate(rangee):
couleur = self.get_couleur(segment, indice_segment)
indice_segment += 1
rect = pg.draw.line(
self.surf, couleur,
(depart_c[0], depart_c[1] + (self.long_segment * ind_seg)),
(depart_c[0] + self.long_segment, depart_c[1] + (self.long_segment * ind_seg)),
self.larg_segment
)
# if not self.rects_segment:
self.rects_segment.append(rect)
depart_c[0] += self.long_segment
## Remplissage des carres gagnés ----------------------------------
ind = 0
for j in range(self.grille.hauteur):
for i in range(self.grille.largeur):
carre = self.grille.carres_gagnes[ind]
if carre is not None:
couleur = self.get_couleur(carre)
topleft = (
self.depart_grille[0] + self.long_segment * (i + 0.5),
self.depart_grille[1] + self.long_segment * (j + 0.5)
)
pg.draw.circle(
self.surf, couleur,
(round(topleft[0]), round(topleft[1])),
self.rayon_cercle
)
ind += 1
## Tracage des cercles aux jonctions de segments ------------------
origine = self.depart_grille.copy()
for i in range(self.grille.largeur + 1):
origine[1] = self.depart_grille[1] # reset
for j in range(self.grille.hauteur + 1):
pg.draw.circle(
self.surf, COUL_CERCLE_JONCTION,
(round(origine[0]), round(origine[1])),
self.rayon_cercle_jonction
)
origine[1] += self.long_segment
origine[0] += self.long_segment
def dessiner_hud(self):
"""
Affiche les scores et le joueur actuel.
"""
bleu, rouge = self.grille.score_detaille()
# pour connaitre à qui le tour
if self.grille.joueur_actuel: # rouge
txt_rouge = f"[{rouge}]"
txt_bleu = f" {bleu} "
else: # bleu
txt_rouge = f" {rouge} "
txt_bleu = f"[{bleu}]"
# creation des textes de score
score_b = self.police.render(txt_bleu, True, COUL_BLEU)
score_r = self.police.render(txt_rouge, True, COUL_ROUGE)
# le "panneau" pour tout mettre ensemble
largeur = score_b.get_width() + score_r.get_width()
panneau = pg.Surface((largeur, score_b.get_height()))
panneau.fill(COUL_FOND)
panneau.blit(score_b, (0, 0))
panneau.blit(score_r, (largeur - score_r.get_width(), 0))
# on le centre
rect = panneau.get_rect(midtop=(self.largeur / 2, 0))
self.surf.blit(panneau, rect)
def deter_segment(self, pos):
"""
Determine si il y a un segment a la position donnée.
Renvoie None si il y a aucun segment.
----
pos (tuple): la position a verifier
"""
i = 0
for rect in self.rects_segment:
if rect.collidepoint(pos):
return i
i += 1
return None
def boucle_jeu(self):
"""
La boucle du jeu.
"""
clock = pg.time.Clock()
run = True
while run:
## gestion des evenements
for event in pg.event.get():
if event.type == pg.QUIT:
run = False
# on change la taille de l'écran
elif event.type == pg.VIDEORESIZE:
self.maj_tailles(event.dict["size"])
elif event.type == 32778: # la meme chose mais bug
self.maj_tailles((event.x, event.y))
# on enfonce une touche
elif event.type == pg.KEYDOWN:
if event.key == pg.K_r:
self.grille.reset()
# clic
elif event.type == pg.MOUSEBUTTONDOWN:
if pg.mouse.get_pressed() == (1,0,0):
pos = pg.mouse.get_pos()
ind_seg = self.deter_segment(pos)
# si on a cliqué sur un segment valide et pas dans le vide
# et que le coup sois valide
if ind_seg is not None and self.grille.coup_valide(ind_seg):
self.grille.jouer_coup(ind_seg)
if MODE_IA:
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_WAIT)
self.grille.jouer_coup_ia()
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_ARROW)
# pour mettre les segments en surbrillance
pos = pg.mouse.get_pos()
self.segment_hover = self.deter_segment(pos)
## Maj de l'ecran...
self.surf.fill(COUL_FOND)
self.dessiner_grille()
self.dessiner_hud()
pg.display.flip()
pg.display.set_caption(f"PIPOPIPETTE: IA vs HUMAIN - {round(clock.get_fps())} FPS")
clock.tick(60)
pg.quit()
def main():
"""
La foncion qui lance tout !
"""
jeu = JeuPipopipette()
jeu.boucle_jeu()
main()
| 33.022951 | 99 | 0.548203 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 19 10:23:29 2021
@author: caldwell ©
"""
import pygame as pg
from random import choice
# from pprint import pprint
## --- Paramètres de jeu ------------------------------
# si on joue contre l'IA (True) ou JvJ (False)
MODE_IA = True
# la taille de la grille de jeu (largeur, hauteur)
TAILLE_GRILLE = (3, 2)
# la taille de l'écran au lancement
TAILLE_ECRAN = (1080, 720)
# la profondeur de recursivité pour l'IA
MAX_PROFONDEUR = 5
## ----------------------------------------------------
## les couleures
# https://coolors.co/00d9ff-ff1f1f-67697c-253d5b-dbd053 (liens pas à jours)
COUL_FOND = (37, 61, 91)
COUL_CERCLE_JONCTION = (219, 208, 83)
COUL_SEGMENTS = (103, 105, 124)
COUL_SEGMENT_HOVER = (128, 131, 158)
COUL_BLEU = (0, 217, 255)
COUL_ROUGE = (255, 56, 56)
class Grille:
"""
Represente la grille du jeu.
----
taille (tuple): taille de la grille (largeur, hauteur)
© Raphaël
"""
def __init__(self, taille, *, copie=None):
# les variables de bases
self.largeur, self.hauteur = taille
# si on est en train de copier la grille
if copie is not None:
# copie du joueur actuel
self.joueur_actuel = copie.joueur_actuel
# copie du nb de tours
self.nb_tour = copie.nb_tour
# copie des segments
self.segments = []
for rang in copie.segments:
nrang = []
for rang_indiv in rang:
nrang.append(rang_indiv.copy())
self.segments.append(nrang)
self.carres = copie.carres # pas besoin de copier comme cette liste ne change pas
self.table_acces = copie.table_acces # pareil pour ce dictionaire
self.carres_gagnes = copie.carres_gagnes.copy()
return # fin de la fonction, on a déjà tout ce qu'on a besoin
self.setup()
def setup(self):
"""
Initialise la grille.
"""
self.joueur_actuel = 0 # 0 bleu, 1 rouge
self.nb_tour = 0 # nb de tours depuis le début de la partie
## Création des segments
# par lignes, il y aura toujours un segment de plus que la largeur,
# d'ou le +1 (et bien sur pareil pour les colones et la hauteur)
# pour accelerer l'acces aux segments:
# pour chaques indices de segments on peut obtenir sa localisation
# dans self.segments: (ligne-colone, rang, pos dans le rang)
self.table_acces = {}
ind_segment = 0
# 1ère étape, ajout des lignes (gauche-droite)
lignes = []
for i in range(self.hauteur):
rang_indiv = []
for j in range(self.largeur + 1):
rang_indiv.append(None)
self.table_acces[ind_segment] = (0, i, j)
ind_segment += 1
lignes.append(rang_indiv)
# 2nde étape, ajout des colones (haut-bas)
colones = []
for i in range(self.largeur):
rang_indiv = []
for j in range(self.hauteur + 1):
rang_indiv.append(None)
self.table_acces[ind_segment] = (1, i, j)
ind_segment += 1
colones.append(rang_indiv)
# 3ème étape, on rassemble tout ensemble
self.segments = [lignes, colones]
## Création des carrés
# un carré n'est qu'autre que 4 segments mis en relations
# on determine a l'avance le dernier segment vertical
dernier_vertical = self.largeur + (self.largeur + 1) * (self.hauteur - 1)
self.carres = []
for j in range(self.hauteur):
for i in range(self.largeur):
carre = []
# ajout des deux segments verticaux
# voir en bas pour comprendre le fonctionement (sauf que
# cette fois ci c'est aux changements de lignes qu'il y
# a le décalage d'indices)
carre.append(j * (self.largeur + 1) + i)
carre.append(j * (self.largeur + 1) + i + 1)
# pour les deux autres horizontaux
# on a tout d'abord besoin de connaitre l'indice du dernier
# segment horizontal. puis de la c'est la même chose qu'au
# dessus: quand on change de colone il faut "ajouter"
# le nombre d'indice par colones (= hauteur + 1)
carre.append(dernier_vertical + j + i * (self.hauteur + 1) + 1)
carre.append(dernier_vertical + j + i * (self.hauteur + 1) + 2)
self.carres.append(carre)
# pour savoir qui a gagné chaques carres
self.carres_gagnes = [None] * len(self.carres)
def reset(self):
"""
Reinitialise la grille.
"""
self.setup()
def copie(self):
"""
Produit une copie de la grille et la renvoie.
"""
grille = Grille((self.largeur, self.hauteur), copie=self)
return grille
def changer_joueur(self):
"""
Change de joueur.
"""
if self.joueur_actuel == 0:
self.joueur_actuel = 1
self.nb_tour += 1
else:
self.joueur_actuel = 0
def get_segment(self, indice):
"""
Renvoie le contenu du segment à l'indice indiqué.
----
indice (int): L'indice du segment
"""
# si il y a une erreur, l'indice est invalide
orientation, rang, pos_rang = self.table_acces[indice]
return self.segments[orientation][rang][pos_rang]
def set_segment(self, indice, couleur):
"""
Change la couleur d'un segment a l'indice indiqué.
----
...
"""
# si il y a une erreur, l'indice est invalide
orientation, rang, pos_rang = self.table_acces[indice]
self.segments[orientation][rang][pos_rang] = couleur
def get_carres(self, ind_segment):
"""
Renvoie les indices de tout les carrés qui possedent ce segment.
"""
carres = []
for ind, carre in enumerate(self.carres):
if ind_segment in carre:
carres.append(ind)
if len(carres) == 0:
raise IndexError(f"Indice invalide ({ind_segment})")
return carres
def carre_rempli(self, ind_carre):
"""
Verifie si un carre est rempli (les 4 segments sont coloriés,
differents de None).
"""
carre = self.carres[ind_carre]
for seg in carre:
if self.get_segment(seg) is None:
# le carre n'est pas rempli
return False
return True
def score_detaille(self):
"""
Calcule le score de chaques joueurs.
"""
bleu = 0
rouge = 0
for carre in self.carres_gagnes:
if carre == 0:
bleu += 1
elif carre == 1:
rouge += 1
return bleu, rouge
def calculer_score(self):
"""
Calcule le score de la partie.
Un score < 0 signifie que les bleus ont l'avantage, et le cas contraire les rouges.
"""
bleu, rouge = self.score_detaille()
return rouge - bleu
def partie_finie(self):
"""
Indique si la partie est finie (plus aucunes cases libres).
"""
for ind, chemin in self.table_acces.items():
orientation, rang, pos_rang = chemin
seg = self.segments[orientation][rang][pos_rang]
if seg is None:
return False
return True # on a trouvé aucun "None"
def coups_possibles_ia(self):
"""
Retourne les indices de tout les segments libres.
"""
segments_libres = []
for ind, chemin in self.table_acces.items():
orientation, rang, pos_rang = chemin
seg = self.segments[orientation][rang][pos_rang]
if seg is None:
segments_libres.append(ind)
return segments_libres
def calcul_coup_ia(self, profondeur=0):
"""
Calcule le meilleur coup a jouer pour l'IA.
"""
# condition de sortie
if self.partie_finie() or profondeur == MAX_PROFONDEUR:
return self.calculer_score(), None
## le minimax
choix_coups = []
for segment in self.coups_possibles_ia():
# on copie la grille actuelle
copie_grille = self.copie()
# on joue un coup et on analyse le meilleur score qu'il peut nous
# offrir en fonction des suivants
copie_grille.jouer_coup(segment)
score, _ = copie_grille.calcul_coup_ia(profondeur + 1)
choix_coups.append((score, segment))
# on choisi le meilleur coup en fonction du joueur actuel:
# le joueur bleu veut le score le plus bas possible
# et le rouge veut l'inverse
if self.joueur_actuel == 0: # bleu
meilleur_choix = min(choix_coups)
else: # rouge
meilleur_choix = max(choix_coups)
return meilleur_choix
def jouer_coup_ia(self):
"""
Joue le meilleur coup possible pour que les humains se preparent
psycologiquement à se faire renverser et dominer par les formes de
vie superieures que sont les IA.
"""
# si c'est le premier tour et que la grille est assez grande on peut
# se permettre de jouer un coup aléatoire
if self.nb_tour == 1 and (self.largeur, self.hauteur) >= (2, 2):
choix = choice(self.coups_possibles_ia())
self.jouer_coup(choix)
return
# la boucle sert a s'assurer que le bot joue une 2nde fois si il viens
# de remporter un carré
while self.joueur_actuel == 1 and not self.partie_finie(): # hard codé pour le rouge...
_, meilleur_coup = self.calcul_coup_ia()
if meilleur_coup is not None: # surviens en fin de partie
self.jouer_coup(meilleur_coup)
def coup_valide(self, ind_segment):
"""
Vérifie si un coup est valide (personne n'y a joué avant).
"""
return self.get_segment(ind_segment) is None
def jouer_coup(self, ind_segment):
"""
Joue un coup pendant une partie.
Le coups doit obligatoirement être valide (testé avec self.coup_valide).
"""
# on change le segment
self.set_segment(ind_segment, self.joueur_actuel)
## verification si un carré a été gagné
changement = True
carres = self.get_carres(ind_segment)
for carre in carres:
# on verifie si le coup du joueur rempli un carre, et dans ce cas
# il le remporte et peut jouer à nouveau
if self.carre_rempli(carre):
self.carres_gagnes[carre] = self.joueur_actuel
changement = False
# et finalement on peut changer de joueur
if changement:
self.changer_joueur()
class JeuPipopipette:
"""
L'interface graphique du jeu.
"""
def __init__(self):
# initialisation de pygame
pg.init()
# grille du jeu
self.grille = Grille(TAILLE_GRILLE)
# variables des tailles
self.maj_tailles(TAILLE_ECRAN)
# contiens les objets rectangles des segments, utilisé pour determiner
# si on clique sur un segment
self.rects_segment = []
# pour que les segments soient en surbrillance quand on passe la
# souris dessus
self.segment_hover = None
# creation de la surface d'affichage
self.surf = pg.display.set_mode(TAILLE_ECRAN, pg.RESIZABLE)
def maj_tailles(self, taille_ecran):
"""
Crée ou met à jour les variables des tailles.
"""
# taille ecran
self.largeur, self.hauteur = taille_ecran
# le coté de l'ecran le plus petit (comme il est en numerateur)
cote_min = min(self.largeur, self.hauteur)
# le coté de la grille le plus grand (comme il est en denominateur)
cote_max = max(self.grille.largeur, self.grille.hauteur)
# les segments
self.long_segment = cote_min / (1.2 * cote_max)
# self.long_segment = self.hauteur / 6
self.larg_segment = round(self.long_segment / 8)
# les cercles
self.rayon_cercle = round(self.long_segment / 3.5)
self.rayon_cercle_jonction = round(self.long_segment / 5.5)
# calcul de la position du coin en haut a gauche de la grille, pour la centrer.
# pour dessiner la grille, tout part de la
x = self.largeur / 2 - self.grille.largeur * self.long_segment / 2
y = self.hauteur / 2 - self.grille.hauteur * self.long_segment / 2
self.depart_grille = [x, y]
# texte
self.police = pg.font.SysFont("Impact", round(cote_min / 12))
def get_couleur(self, objet, ind=None):
"""
Renvoie la couleur a utiliser pour un objet (segment/carré) donné.
"""
if objet == 0: # bleu
couleur = COUL_BLEU
elif objet == 1:
couleur = COUL_ROUGE
else:
if self.segment_hover == ind:
couleur = COUL_SEGMENT_HOVER
else:
couleur = COUL_SEGMENTS
return couleur
def dessiner_grille(self):
"""
Affiche la grille du jeu.
"""
self.rects_segment.clear() # enlever ?
lignes, colones = self.grille.segments
## Tracage des segments -------------------------------------------
depart_l = self.depart_grille.copy()
depart_c = self.depart_grille.copy()
indice_segment = 0 # pour la surbrillance
# on trace les traits verticaux
for rangee in lignes:
for ind_seg, segment in enumerate(rangee):
couleur = self.get_couleur(segment, indice_segment)
indice_segment += 1
rect = pg.draw.line(
self.surf, couleur,
(depart_l[0] + (self.long_segment * ind_seg), depart_l[1]),
(depart_l[0] + (self.long_segment * ind_seg), depart_l[1] + self.long_segment),
self.larg_segment
)
# si c'est vide, pas besoin d'ajouter les rects
# if not self.rects_segment:
self.rects_segment.append(rect)
depart_l[1] += self.long_segment
# on trace les traits horizontaux
for rangee in colones:
for ind_seg, segment in enumerate(rangee):
couleur = self.get_couleur(segment, indice_segment)
indice_segment += 1
rect = pg.draw.line(
self.surf, couleur,
(depart_c[0], depart_c[1] + (self.long_segment * ind_seg)),
(depart_c[0] + self.long_segment, depart_c[1] + (self.long_segment * ind_seg)),
self.larg_segment
)
# if not self.rects_segment:
self.rects_segment.append(rect)
depart_c[0] += self.long_segment
## Remplissage des carres gagnés ----------------------------------
ind = 0
for j in range(self.grille.hauteur):
for i in range(self.grille.largeur):
carre = self.grille.carres_gagnes[ind]
if carre is not None:
couleur = self.get_couleur(carre)
topleft = (
self.depart_grille[0] + self.long_segment * (i + 0.5),
self.depart_grille[1] + self.long_segment * (j + 0.5)
)
pg.draw.circle(
self.surf, couleur,
(round(topleft[0]), round(topleft[1])),
self.rayon_cercle
)
ind += 1
## Tracage des cercles aux jonctions de segments ------------------
origine = self.depart_grille.copy()
for i in range(self.grille.largeur + 1):
origine[1] = self.depart_grille[1] # reset
for j in range(self.grille.hauteur + 1):
pg.draw.circle(
self.surf, COUL_CERCLE_JONCTION,
(round(origine[0]), round(origine[1])),
self.rayon_cercle_jonction
)
origine[1] += self.long_segment
origine[0] += self.long_segment
def dessiner_hud(self):
"""
Affiche les scores et le joueur actuel.
"""
bleu, rouge = self.grille.score_detaille()
# pour connaitre à qui le tour
if self.grille.joueur_actuel: # rouge
txt_rouge = f"[{rouge}]"
txt_bleu = f" {bleu} "
else: # bleu
txt_rouge = f" {rouge} "
txt_bleu = f"[{bleu}]"
# creation des textes de score
score_b = self.police.render(txt_bleu, True, COUL_BLEU)
score_r = self.police.render(txt_rouge, True, COUL_ROUGE)
# le "panneau" pour tout mettre ensemble
largeur = score_b.get_width() + score_r.get_width()
panneau = pg.Surface((largeur, score_b.get_height()))
panneau.fill(COUL_FOND)
panneau.blit(score_b, (0, 0))
panneau.blit(score_r, (largeur - score_r.get_width(), 0))
# on le centre
rect = panneau.get_rect(midtop=(self.largeur / 2, 0))
self.surf.blit(panneau, rect)
def deter_segment(self, pos):
"""
Determine si il y a un segment a la position donnée.
Renvoie None si il y a aucun segment.
----
pos (tuple): la position a verifier
"""
i = 0
for rect in self.rects_segment:
if rect.collidepoint(pos):
return i
i += 1
return None
def boucle_jeu(self):
"""
La boucle du jeu.
"""
clock = pg.time.Clock()
run = True
while run:
## gestion des evenements
for event in pg.event.get():
if event.type == pg.QUIT:
run = False
# on change la taille de l'écran
elif event.type == pg.VIDEORESIZE:
self.maj_tailles(event.dict["size"])
elif event.type == 32778: # la meme chose mais bug
self.maj_tailles((event.x, event.y))
# on enfonce une touche
elif event.type == pg.KEYDOWN:
if event.key == pg.K_r:
self.grille.reset()
# clic
elif event.type == pg.MOUSEBUTTONDOWN:
if pg.mouse.get_pressed() == (1,0,0):
pos = pg.mouse.get_pos()
ind_seg = self.deter_segment(pos)
# si on a cliqué sur un segment valide et pas dans le vide
# et que le coup sois valide
if ind_seg is not None and self.grille.coup_valide(ind_seg):
self.grille.jouer_coup(ind_seg)
if MODE_IA:
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_WAIT)
self.grille.jouer_coup_ia()
pg.mouse.set_cursor(pg.SYSTEM_CURSOR_ARROW)
# pour mettre les segments en surbrillance
pos = pg.mouse.get_pos()
self.segment_hover = self.deter_segment(pos)
## Maj de l'ecran...
self.surf.fill(COUL_FOND)
self.dessiner_grille()
self.dessiner_hud()
pg.display.flip()
pg.display.set_caption(f"PIPOPIPETTE: IA vs HUMAIN - {round(clock.get_fps())} FPS")
clock.tick(60)
pg.quit()
def main():
"""
La foncion qui lance tout !
"""
jeu = JeuPipopipette()
jeu.boucle_jeu()
main()
| 1,532 | 0 | 52 |
18bdb26f9ceaedbf555e6c644de7130ee2b447ae | 1,800 | py | Python | examples/tilechain_coals.py | pauljones0/lifxlan | 292cdbbe3515da4b40cf348d91b7a1480832fff9 | [
"MIT"
] | 464 | 2015-07-08T11:44:12.000Z | 2022-03-22T04:18:19.000Z | examples/tilechain_coals.py | pauljones0/lifxlan | 292cdbbe3515da4b40cf348d91b7a1480832fff9 | [
"MIT"
] | 147 | 2015-07-08T15:33:32.000Z | 2022-02-14T17:46:38.000Z | examples/tilechain_coals.py | pauljones0/lifxlan | 292cdbbe3515da4b40cf348d91b7a1480832fff9 | [
"MIT"
] | 134 | 2015-09-25T12:20:26.000Z | 2022-03-23T08:33:11.000Z | from lifxlan import *
from random import randint, betavariate
from time import sleep
if __name__=="__main__":
main()
| 36.734694 | 133 | 0.582222 | from lifxlan import *
from random import randint, betavariate
from time import sleep
def main():
lan = LifxLAN()
tilechain_lights = lan.get_tilechain_lights()
if len(tilechain_lights) != 0:
t = lan.get_tilechain_lights()[0] #grab the first tilechain
print("Selected TileChain light: {}".format(t.get_label()))
original_colors = t.get_tilechain_colors()
(cols, rows) = t.get_canvas_dimensions()
hue = 0
coal_colors = []
for row in range(rows):
color_row = []
for col in range(cols):
color_row.append(get_fire_color())
hue += int(65535.0/(cols*rows))
coal_colors.append(color_row)
t.project_matrix(coal_colors)
duration_ms = 400
try:
while(True):
proportion_change = 0.2
sample_size = int((rows * cols) * proportion_change)
if sample_size % 2 == 1:
sample_size = int(sample_size - 1)
col_samples = [randint(0, cols-1) for i in range(sample_size)]
row_samples = [randint(0, rows-1) for i in range(sample_size)]
for i in range(0, sample_size):
coal_colors[row_samples[i]][col_samples[i]] = get_fire_color()
t.project_matrix(coal_colors, duration_ms, rapid=True)
sleep(max(duration_ms/2000.0, 0.05))
except KeyboardInterrupt:
t.set_tilechain_colors(original_colors)
print("Done.")
else:
print("No TileChain lights found.")
def get_fire_color():
return (int(800 + (5000 * betavariate(0.2, 0.9))), randint(60000, 65535), int(65535 * betavariate(0.05, 1)), randint(2500, 3500))
if __name__=="__main__":
main()
| 1,632 | 0 | 46 |
5177877a677eab63ecd6710fa86ccb89f8ec69d7 | 23,158 | py | Python | webroot/cgi-bin/gameslist.py | elocemearg/atropine | 894010bcc89d4e6962cf3fc15ef526068c38898d | [
"CC-BY-4.0"
] | null | null | null | webroot/cgi-bin/gameslist.py | elocemearg/atropine | 894010bcc89d4e6962cf3fc15ef526068c38898d | [
"CC-BY-4.0"
] | null | null | null | webroot/cgi-bin/gameslist.py | elocemearg/atropine | 894010bcc89d4e6962cf3fc15ef526068c38898d | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/python3
import sys;
import cgicommon;
import urllib.request, urllib.parse, urllib.error;
import cgi;
import cgitb;
import os;
import re;
import random;
CONFLICT_STRATEGY_FORCE = 0
CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY = 1
CONFLICT_STRATEGY_ONLY_FILL_BLANKS = 2
CONFLICT_STRATEGY_DISCARD = 3
cgitb.enable();
cgicommon.writeln("Content-Type: text/html; charset=utf-8");
cgicommon.writeln("");
baseurl = "/cgi-bin/gameslist.py";
form = cgi.FieldStorage();
tourney_name = form.getfirst("tourney");
tourney = None;
request_method = os.environ.get("REQUEST_METHOD", "");
cgicommon.set_module_path();
import countdowntourney;
cgicommon.print_html_head("Games: " + str(tourney_name));
cgicommon.writeln("<body>");
cgicommon.assert_client_from_localhost()
if tourney_name is None:
cgicommon.writeln("<h1>No tourney specified</h1>");
cgicommon.writeln("<p><a href=\"/cgi-bin/home.py\">Home</a></p>");
cgicommon.writeln("</body></html>");
sys.exit(0);
try:
tourney = countdowntourney.tourney_open(tourney_name, cgicommon.dbdir);
cgicommon.show_sidebar(tourney);
cgicommon.writeln("<div class=\"mainpane\">");
# If a round is selected, show the scores for that round, in editable
# boxes so they can be changed.
round_no = None;
if "round" in form:
try:
round_no = int(form.getfirst("round"));
except ValueError:
cgicommon.writeln("<h1>Invalid round number</h1>");
cgicommon.writeln("<p>\"%s\" is not a valid round number.</p>");
if round_no is not None:
games = tourney.get_games(round_no=round_no);
rounds = tourney.get_rounds();
round_name = None;
last_modified_element = None;
for r in rounds:
if r["num"] == round_no:
round_name = r.get("name", None);
break;
if not round_name:
round_name = "Round " + str(round_no);
remarks = dict();
cgicommon.writeln("<h1>Score editor: %s</h1>" % cgicommon.escape(round_name));
cgicommon.writeln("<p>");
cgicommon.writeln("<a href=\"/cgi-bin/fixtureedit.py?tourney=%s&round=%d\">Edit fixtures</a>" % (urllib.parse.quote_plus(tourney_name), round_no));
cgicommon.writeln("</p>");
cgicommon.writeln("<script>")
cgicommon.writeln("""function set_unsaved_data_warning() {
if (window.onbeforeunload == null) {
window.onbeforeunload = function() {
return 'You have modified scores on this page and not saved them.';
};
}
}
function unset_unsaved_data_warning() {
window.onbeforeunload = null;
}
function score_modified(control_name) {
document.getElementById('lastmodified').value = control_name;
document.getElementById(control_name).style.backgroundColor = '#ffffcc';
set_unsaved_data_warning();
}
""");
cgicommon.writeln("</script>")
conflict_resolution = False
conflict_strategy = int_or_none(form.getfirst("conflictstrategy"))
stored_revision_no = tourney.get_game_table_revision_no(round_no)
stored_revision_timestamp = tourney.get_game_table_revision_time(round_no, stored_revision_no)
if "save" in form or "randomresults" in form:
# If the user clicked Save, then save the new scores to the
# database.
last_modified_element = form.getfirst("lastmodified");
if last_modified_element:
if not re.match("^gamescore_[0-9]+_[0-9]+$", last_modified_element):
last_modified_element = None;
submitted_revision_no = int_or_none(form.getfirst("revision"))
if "randomresults" not in form and submitted_revision_no < stored_revision_no:
# One or more games in this round have changed since the user
# last refreshed the page. Ask the user how we should cope with
# this.
cgicommon.show_warning_box("<p>The results for this round have been modified in another window since you last refreshed this page.</p>" +
"<p>The current state of the games is shown below, with your changes on the right-hand side.</p>" +
"<p>What do you want to do with your changes? Select one of the options below, then Resolve Conflicts.</p>");
show_conflict_resolution_box(tourney, games, round_no, stored_revision_no, stored_revision_timestamp, form)
conflict_resolution = True
else:
for g in games:
if "randomresults" in form and not g.is_complete():
set_random_score(g, 15 if int_or_none(form.getfirst("scrabbleresults")) else 9, int_or_none(form.getfirst("scrabbleresults")));
else:
score = form.getfirst("gamescore_%d_%d" % (g.round_no, g.seq));
parsed_score = parse_score(score)
if parsed_score is None:
remarks[(g.round_no, g.seq)] = "Invalid score: %s" % (score)
else:
apply_change = True
if conflict_strategy == CONFLICT_STRATEGY_DISCARD:
# Don't overwrite any changes
apply_change = False
elif conflict_strategy == CONFLICT_STRATEGY_ONLY_FILL_BLANKS:
# Prefer our changes only when that would fill
# in an unplayed game with a result
if g.is_complete():
apply_change = False
elif conflict_strategy == CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY:
# Prefer our changes except where that would
# replace a filled-in result with a blank one
if parsed_score[0] is None or parsed_score[1] is None:
apply_change = False
# Otherwise, always prefer our changes
if apply_change:
g.set_score(parsed_score[0], parsed_score[1], parsed_score[2])
tourney.merge_games(games);
stored_revision_no = tourney.get_game_table_revision_no(round_no)
num_divisions = tourney.get_num_divisions()
cgicommon.writeln("<div class=\"scorestable\">");
# If we've put up the conflict resolution form, then what we print here
# isn't a form but an ordinary table showing the current results and
# the user's submission.
# The usual case is not conflict_resolution, where we put the game list
# form here.
if not conflict_resolution:
cgicommon.writeln("<form method=\"POST\" action=\"%s?tourney=%s&round=%d\">" % (baseurl, urllib.parse.quote_plus(tourney_name), round_no));
cgicommon.writeln("<input type=\"hidden\" name=\"tourney\" value=\"%s\" />" % cgicommon.escape(tourney_name, True));
cgicommon.writeln("<input type=\"hidden\" name=\"round\" value=\"%d\" />" % round_no);
cgicommon.writeln("<input type=\"hidden\" id=\"lastmodified\" name=\"lastmodified\" value=\"\" />");
cgicommon.writeln("<input type=\"hidden\" name=\"revision\" value=\"%d\" />" % (stored_revision_no))
for div_index in range(num_divisions):
if num_divisions > 1:
cgicommon.writeln("<h2>%s</h2>" % (cgicommon.escape(tourney.get_division_name(div_index))))
if tourney.are_players_assigned_teams():
team_scores = tourney.get_team_scores()
cgicommon.show_team_score_table(team_scores)
cgicommon.writeln('<br />')
div_games = tourney.get_games(round_no=round_no, only_players_known=False, division=div_index);
if conflict_resolution:
for g in games:
score = form.getfirst("gamescore_%d_%d" % (g.round_no, g.seq));
parsed_score = parse_score(score)
if parsed_score is None:
remarks[(g.round_no, g.seq)] = "Invalid score: %s" % (score)
else:
# If the score the user has entered is different
# from the score in the table, display the
# user's submitted score in the Remarks column.
if not ((g.s1 is None and g.s2 is None and parsed_score[0] is None and parsed_score[1] is None) or (g.s1 == parsed_score[0] and g.s2 == parsed_score[1] and g.tb == parsed_score[2]) ):
player_names = g.get_player_names()
if parsed_score[0] is None or parsed_score[1] is None:
remarks[(g.round_no, g.seq)] = "%s - %s" % (player_names[0], player_names[1])
else:
remarks[(g.round_no, g.seq)] = "%s %d%s - %d%s %s" % (
player_names[0],
parsed_score[0],
"*" if (parsed_score[0] > parsed_score[1] and parsed_score[2]) else "",
parsed_score[1],
"*" if (parsed_score[1] >= parsed_score[0] and parsed_score[2]) else "",
player_names[1])
cgicommon.show_games_as_html_table(div_games, editable=False,
remarks=remarks, include_round_column=False,
round_namer=None,
player_to_link=lambda x : cgicommon.player_to_link(x, tourney.get_name(), False, True),
remarks_heading="Your submission")
else:
cgicommon.show_games_as_html_table(div_games, editable=True,
remarks=remarks, include_round_column=False,
round_namer=None,
player_to_link=lambda x : cgicommon.player_to_link(x, tourney.get_name(), False, True))
if not conflict_resolution:
cgicommon.writeln("<p><input type=\"submit\" name=\"save\" value=\"Save\" onclick=\"unset_unsaved_data_warning();\" /></p>");
if form.getfirst("showrandomresultsbutton"):
cgicommon.writeln("<p><input type=\"submit\" name=\"randomresults\" value=\"Random Results\" /></p>");
elif form.getfirst("showscrabbleresultsbutton"):
cgicommon.writeln("<p><input type=\"submit\" name=\"randomresults\" value=\"Random Scrabble-ish Results\" /></p>");
cgicommon.writeln("<p><input type=\"hidden\" name=\"scrabbleresults\" value=\"1\" /></p>");
cgicommon.writeln("</form>")
focus = None;
if last_modified_element:
m = re.match("^gamescore_([0-9]+)_([0-9]+)$", last_modified_element)
if m:
lastmod_index = (int(m.group(1)), int(m.group(2)));
# The box with focus should be the next unfilled box equal
# to or after the one that was last modified. If they're all
# filled, put the focus on the first box.
for i in range(0, len(games)):
if games[i].round_no == lastmod_index[0] and games[i].seq == lastmod_index[1]:
# We've found the control we last modified;
for j in range(0, len(games)):
g = games[(i + j) % len(games)]
if not g.is_complete():
focus = (g.round_no, g.seq)
break
break
if games:
if focus is None:
focus = (games[0].round_no, games[0].seq);
control_with_focus = "gamescore_%d_%d" % (focus[0], focus[1]);
cgicommon.writeln("<script>")
cgicommon.writeln("document.getElementById('" + control_with_focus + "').focus();")
cgicommon.writeln("</script>")
cgicommon.writeln("</div>"); #scorestable
cgicommon.writeln("</div>"); #mainpane
except countdowntourney.TourneyException as e:
cgicommon.show_tourney_exception(e);
cgicommon.writeln("</body>");
cgicommon.writeln("</html>");
sys.exit(0);
| 47.748454 | 310 | 0.60912 | #!/usr/bin/python3
import sys;
import cgicommon;
import urllib.request, urllib.parse, urllib.error;
import cgi;
import cgitb;
import os;
import re;
import random;
CONFLICT_STRATEGY_FORCE = 0
CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY = 1
CONFLICT_STRATEGY_ONLY_FILL_BLANKS = 2
CONFLICT_STRATEGY_DISCARD = 3
def int_or_none(s):
try:
value = int(s)
return value
except:
return None
def parse_score(score):
# We used to be pretty liberal about what a score looked
# like, until we started allowing negative scores. Now
# we have to be a bit more strict so that a regexp can
# always tell the difference between a minus sign
# that's intended to separate one score from another,
# and a minus sign that's intended to mean a score is
# negative.
# A score is a number (which may be negative), followed
# by an optional *, followed by a minus sign, followed
# by a number (which again may be negative), followed by
# an optional *. Either * means it was a tiebreak. Any
# number of spaces are allowed between any of these
# tokens, but you can't put a space in the middle
# of a number, or between a negative sign and a number.
# If the score consists only of whitespace, then the
# game hasn't been played.
if not score or re.match("^\s*$", score):
return (None, None, False)
else:
m = re.match("^\s*(-?\d+)\s*(\*?)\s*-\s*(-?\d+)\s*(\*?)\s*$", score);
if not m:
return None
else:
s1 = int(m.group(1));
s2 = int(m.group(3));
if m.group(2) == "*" or m.group(4) == "*":
tb = True;
else:
tb = False;
return (s1, s2, tb)
def set_random_score(game, rounds, scrabble):
r1 = game.p1.rating;
r2 = game.p2.rating;
if r1 + r2 == 0:
game.set_score(0, 0, False);
return;
p1_threshold = float(r1) / float(r1 + r2);
p2_threshold = p1_threshold;
p1_threshold *= 0.8
p2_threshold = 1 - ((1 - p2_threshold) * 0.8)
#print "%g %g %.3f %.3f" % (game.p1.rating, game.p2.rating, p1_threshold, p2_threshold);
p1_score = 0;
p2_score = 0;
for i in range(rounds):
x = random.random();
round_score = random.randint(5, 10);
if round_score == 9:
if random.randint(1, 4) == 1:
round_score = 18;
else:
round_score = 7;
if scrabble:
round_score *= random.randint(2, 4)
if r1 > 0 and x < p1_threshold:
p1_score += round_score;
elif r2 > 0 and x > p2_threshold:
p2_score += round_score;
else:
if r1 > 0:
p1_score += round_score;
if r2 > 0:
p2_score += round_score;
if p1_score == p2_score and not(scrabble):
if random.randint(0, 1) == 0:
p1_score += 10;
else:
p2_score += 10;
tb = True;
else:
tb = False;
game.set_score(p1_score, p2_score, tb);
def show_conflict_resolution_box(tourney, games, round_no, stored_revision_no, stored_revision_timestamp, form):
tourney_name = tourney.get_name()
existing_strategy = int_or_none(form.getfirst("conflictstrategy"))
if existing_strategy is None:
existing_strategy = CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY
cgicommon.writeln("""
<script>
function update_conflict_resolution_example(value) {
var blank_to_non_blank = document.getElementById("cr_blanktononblank");
var non_blank_to_non_blank = document.getElementById("cr_nonblanktononblank");
var non_blank_to_blank = document.getElementById("cr_nonblanktoblank");
if (value == 0) { /* CONFLICT_STRATEGY_FORCE */
blank_to_non_blank.innerHTML = "88-88";
non_blank_to_non_blank.innerHTML = "88-88";
non_blank_to_blank.innerHTML = "-";
}
else if (value == 1) { /* CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY */
blank_to_non_blank.innerHTML = "88-88";
non_blank_to_non_blank.innerHTML = "88-88";
non_blank_to_blank.innerHTML = "77-77";
}
else if (value == 2) { /* CONFLICT_STRATEGY_ONLY_FILL_BLANKS */
blank_to_non_blank.innerHTML = "88-88";
non_blank_to_non_blank.innerHTML = "77-77";
non_blank_to_blank.innerHTML = "77-77";
}
else if (value == 3) { /* CONFLICT_STRATEGY_DISCARD */
blank_to_non_blank.innerHTML = "-";
non_blank_to_non_blank.innerHTML = "77-77";
non_blank_to_blank.innerHTML = "77-77";
}
}
</script>
""")
cgicommon.writeln("<div class=\"conflictresolution\">")
cgicommon.writeln("<form method=\"POST\" action=\"%s?tourney=%s&round=%d\">" % (baseurl, urllib.parse.quote_plus(tourney_name), round_no));
cgicommon.writeln("<input type=\"hidden\" name=\"tourney\" value=\"%s\" />" % (cgicommon.escape(tourney_name, True)));
cgicommon.writeln("<input type=\"hidden\" name=\"round\" value=\"%d\" />" % (round_no));
cgicommon.writeln("<input type=\"hidden\" name=\"revision\" value=\"%d\" />" % (stored_revision_no))
# Include the submitted scores in this conflict resolution form, so that
# when the user presses "Resolve Conflicts" we remember what the original
# submissions were.
for g in games:
input_name = "gamescore_%d_%d" % (g.round_no, g.seq)
submitted_score = form.getfirst(input_name)
if submitted_score is not None:
cgicommon.writeln("<input type=\"hidden\" name=\"%s\" value=\"%s\" />" % (cgicommon.escape(input_name), cgicommon.escape(submitted_score, True)))
score = form.getfirst("gamescore_%d_%d" % (g.round_no, g.seq));
cgicommon.writeln("<div class=\"conflictresolutiontoprow\">")
cgicommon.writeln("Last conflicting modification occurred at: %s" % (cgicommon.escape(stored_revision_timestamp)))
cgicommon.writeln("</div>")
cgicommon.writeln("<div class=\"conflictresolutionchoicerow\">")
cgicommon.writeln("<div class=\"conflictresolutionradiobutton\">")
cgicommon.writeln("<input type=\"radio\" name=\"conflictstrategy\" id=\"conflictstrategydiscard\" value=\"%d\" onchange=\"update_conflict_resolution_example(this.value)\" %s />" % (CONFLICT_STRATEGY_DISCARD, "checked" if existing_strategy == CONFLICT_STRATEGY_DISCARD else ""))
cgicommon.writeln("</div>")
cgicommon.writeln("<div class=\"conflictresolutionlabel\">")
cgicommon.writeln("<label for=\"conflictstrategydiscard\">Discard my submission - go with what's currently in the database.</label>")
cgicommon.writeln("</div>")
cgicommon.writeln("</div>")
cgicommon.writeln("<div class=\"conflictresolutionchoicerow\">")
cgicommon.writeln("<div class=\"conflictresolutionradiobutton\">")
cgicommon.writeln("<input type=\"radio\" name=\"conflictstrategy\" id=\"conflictstrategyfillblanks\" value=\"%d\" onchange=\"update_conflict_resolution_example(this.value)\" %s />" % (CONFLICT_STRATEGY_ONLY_FILL_BLANKS, "checked" if existing_strategy == CONFLICT_STRATEGY_ONLY_FILL_BLANKS else ""))
cgicommon.writeln("</div>")
cgicommon.writeln("<div class=\"conflictresolutionlabel\">")
cgicommon.writeln("<label for=\"conflictstrategyfillblanks\">If a game currently has no result but my submission provides one, fill in that game's result with my submission. Discard any other changes.</label>")
cgicommon.writeln("</div>")
cgicommon.writeln("</div>")
cgicommon.writeln("<div class=\"conflictresolutionchoicerow\">")
cgicommon.writeln("<div class=\"conflictresolutionradiobutton\">")
cgicommon.writeln("<input type=\"radio\" name=\"conflictstrategy\" id=\"conflictstrategydonotemblankify\" value=\"%d\" onchange=\"update_conflict_resolution_example(this.value);\" %s />" % (CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY, "checked" if existing_strategy == CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY else ""))
cgicommon.writeln("</div>")
cgicommon.writeln("<div class=\"conflictresolutionlabel\">")
cgicommon.writeln("<label for=\"conflictstrategydonotemblankify\">If my submission has a result for a game, overwrite the existing result with my submission, but do not overwrite an existing result with a blank one.</label>")
cgicommon.writeln("</div>")
cgicommon.writeln("</div>")
cgicommon.writeln("<div class=\"conflictresolutionchoicerow\">")
cgicommon.writeln("<div class=\"conflictresolutionradiobutton\">")
cgicommon.writeln("<input type=\"radio\" name=\"conflictstrategy\" id=\"conflictstrategyforce\" value=\"%d\" onchange=\"update_conflict_resolution_example(this.value);\" %s />" % (CONFLICT_STRATEGY_FORCE, "checked" if existing_strategy == CONFLICT_STRATEGY_FORCE else ""))
cgicommon.writeln("</div>")
cgicommon.writeln("<div class=\"conflictresolutionlabel\">")
cgicommon.writeln("<label for=\"conflictstrategyforce\">Overwrite everything with my submission, even if that means overwriting existing results with blank results.</label>")
cgicommon.writeln("</div>")
cgicommon.writeln("</div>")
show_conflict_resolution_example(existing_strategy)
cgicommon.writeln("<div class=\"conflictresolutionbottomrow\">")
cgicommon.writeln("<div class=\"conflictresolutionsubmit\">")
cgicommon.writeln("<input type=\"submit\" name=\"save\" value=\"Resolve Conflicts\" />")
cgicommon.writeln("</div>")
cgicommon.writeln("</div>")
cgicommon.writeln("</form>")
cgicommon.writeln("</div>")
def show_conflict_resolution_example(existing_strategy):
cgicommon.writeln("<div class=\"conflictresolutionexample\">")
cgicommon.writeln("<div class=\"conflictresolutionexampletitle\">Example:</div>")
cgicommon.writeln("<table class=\"conflictresolutionexampletable\">")
cgicommon.writeln("<tr>")
cgicommon.writeln("<th>Current result</th><th>Your submission</th><th>New result</th>")
cgicommon.writeln("</tr>")
cgicommon.writeln("<tr>")
cgicommon.writeln("<td>-</td><td>88-88</td><td class=\"cr_newresultcol\" id=\"cr_blanktononblank\">%s</td>" % ("88-88" if existing_strategy != CONFLICT_STRATEGY_DISCARD else "-"))
cgicommon.writeln("</tr>")
cgicommon.writeln("<tr>")
cgicommon.writeln("<td>77-77</td><td>88-88</td><td class=\"cr_newresultcol\" id=\"cr_nonblanktononblank\">%s</td>" % ("88-88" if existing_strategy in (CONFLICT_STRATEGY_FORCE, CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY) else "77-77"))
cgicommon.writeln("</tr>")
cgicommon.writeln("<tr>")
cgicommon.writeln("<td>77-77</td><td>-</td><td class=\"cr_newresultcol\" id=\"cr_nonblanktoblank\">%s</td>" % ("-" if existing_strategy == CONFLICT_STRATEGY_FORCE else "77-77"))
cgicommon.writeln("</tr>")
cgicommon.writeln("</table>")
cgicommon.writeln("</div>")
cgitb.enable();
cgicommon.writeln("Content-Type: text/html; charset=utf-8");
cgicommon.writeln("");
baseurl = "/cgi-bin/gameslist.py";
form = cgi.FieldStorage();
tourney_name = form.getfirst("tourney");
tourney = None;
request_method = os.environ.get("REQUEST_METHOD", "");
cgicommon.set_module_path();
import countdowntourney;
cgicommon.print_html_head("Games: " + str(tourney_name));
cgicommon.writeln("<body>");
cgicommon.assert_client_from_localhost()
if tourney_name is None:
cgicommon.writeln("<h1>No tourney specified</h1>");
cgicommon.writeln("<p><a href=\"/cgi-bin/home.py\">Home</a></p>");
cgicommon.writeln("</body></html>");
sys.exit(0);
try:
tourney = countdowntourney.tourney_open(tourney_name, cgicommon.dbdir);
cgicommon.show_sidebar(tourney);
cgicommon.writeln("<div class=\"mainpane\">");
# If a round is selected, show the scores for that round, in editable
# boxes so they can be changed.
round_no = None;
if "round" in form:
try:
round_no = int(form.getfirst("round"));
except ValueError:
cgicommon.writeln("<h1>Invalid round number</h1>");
cgicommon.writeln("<p>\"%s\" is not a valid round number.</p>");
if round_no is not None:
games = tourney.get_games(round_no=round_no);
rounds = tourney.get_rounds();
round_name = None;
last_modified_element = None;
for r in rounds:
if r["num"] == round_no:
round_name = r.get("name", None);
break;
if not round_name:
round_name = "Round " + str(round_no);
remarks = dict();
cgicommon.writeln("<h1>Score editor: %s</h1>" % cgicommon.escape(round_name));
cgicommon.writeln("<p>");
cgicommon.writeln("<a href=\"/cgi-bin/fixtureedit.py?tourney=%s&round=%d\">Edit fixtures</a>" % (urllib.parse.quote_plus(tourney_name), round_no));
cgicommon.writeln("</p>");
cgicommon.writeln("<script>")
cgicommon.writeln("""function set_unsaved_data_warning() {
if (window.onbeforeunload == null) {
window.onbeforeunload = function() {
return 'You have modified scores on this page and not saved them.';
};
}
}
function unset_unsaved_data_warning() {
window.onbeforeunload = null;
}
function score_modified(control_name) {
document.getElementById('lastmodified').value = control_name;
document.getElementById(control_name).style.backgroundColor = '#ffffcc';
set_unsaved_data_warning();
}
""");
cgicommon.writeln("</script>")
conflict_resolution = False
conflict_strategy = int_or_none(form.getfirst("conflictstrategy"))
stored_revision_no = tourney.get_game_table_revision_no(round_no)
stored_revision_timestamp = tourney.get_game_table_revision_time(round_no, stored_revision_no)
if "save" in form or "randomresults" in form:
# If the user clicked Save, then save the new scores to the
# database.
last_modified_element = form.getfirst("lastmodified");
if last_modified_element:
if not re.match("^gamescore_[0-9]+_[0-9]+$", last_modified_element):
last_modified_element = None;
submitted_revision_no = int_or_none(form.getfirst("revision"))
if "randomresults" not in form and submitted_revision_no < stored_revision_no:
# One or more games in this round have changed since the user
# last refreshed the page. Ask the user how we should cope with
# this.
cgicommon.show_warning_box("<p>The results for this round have been modified in another window since you last refreshed this page.</p>" +
"<p>The current state of the games is shown below, with your changes on the right-hand side.</p>" +
"<p>What do you want to do with your changes? Select one of the options below, then Resolve Conflicts.</p>");
show_conflict_resolution_box(tourney, games, round_no, stored_revision_no, stored_revision_timestamp, form)
conflict_resolution = True
else:
for g in games:
if "randomresults" in form and not g.is_complete():
set_random_score(g, 15 if int_or_none(form.getfirst("scrabbleresults")) else 9, int_or_none(form.getfirst("scrabbleresults")));
else:
score = form.getfirst("gamescore_%d_%d" % (g.round_no, g.seq));
parsed_score = parse_score(score)
if parsed_score is None:
remarks[(g.round_no, g.seq)] = "Invalid score: %s" % (score)
else:
apply_change = True
if conflict_strategy == CONFLICT_STRATEGY_DISCARD:
# Don't overwrite any changes
apply_change = False
elif conflict_strategy == CONFLICT_STRATEGY_ONLY_FILL_BLANKS:
# Prefer our changes only when that would fill
# in an unplayed game with a result
if g.is_complete():
apply_change = False
elif conflict_strategy == CONFLICT_STRATEGY_DO_NOT_EMBLANKIFY:
# Prefer our changes except where that would
# replace a filled-in result with a blank one
if parsed_score[0] is None or parsed_score[1] is None:
apply_change = False
# Otherwise, always prefer our changes
if apply_change:
g.set_score(parsed_score[0], parsed_score[1], parsed_score[2])
tourney.merge_games(games);
stored_revision_no = tourney.get_game_table_revision_no(round_no)
num_divisions = tourney.get_num_divisions()
cgicommon.writeln("<div class=\"scorestable\">");
# If we've put up the conflict resolution form, then what we print here
# isn't a form but an ordinary table showing the current results and
# the user's submission.
# The usual case is not conflict_resolution, where we put the game list
# form here.
if not conflict_resolution:
cgicommon.writeln("<form method=\"POST\" action=\"%s?tourney=%s&round=%d\">" % (baseurl, urllib.parse.quote_plus(tourney_name), round_no));
cgicommon.writeln("<input type=\"hidden\" name=\"tourney\" value=\"%s\" />" % cgicommon.escape(tourney_name, True));
cgicommon.writeln("<input type=\"hidden\" name=\"round\" value=\"%d\" />" % round_no);
cgicommon.writeln("<input type=\"hidden\" id=\"lastmodified\" name=\"lastmodified\" value=\"\" />");
cgicommon.writeln("<input type=\"hidden\" name=\"revision\" value=\"%d\" />" % (stored_revision_no))
for div_index in range(num_divisions):
if num_divisions > 1:
cgicommon.writeln("<h2>%s</h2>" % (cgicommon.escape(tourney.get_division_name(div_index))))
if tourney.are_players_assigned_teams():
team_scores = tourney.get_team_scores()
cgicommon.show_team_score_table(team_scores)
cgicommon.writeln('<br />')
div_games = tourney.get_games(round_no=round_no, only_players_known=False, division=div_index);
if conflict_resolution:
for g in games:
score = form.getfirst("gamescore_%d_%d" % (g.round_no, g.seq));
parsed_score = parse_score(score)
if parsed_score is None:
remarks[(g.round_no, g.seq)] = "Invalid score: %s" % (score)
else:
# If the score the user has entered is different
# from the score in the table, display the
# user's submitted score in the Remarks column.
if not ((g.s1 is None and g.s2 is None and parsed_score[0] is None and parsed_score[1] is None) or (g.s1 == parsed_score[0] and g.s2 == parsed_score[1] and g.tb == parsed_score[2]) ):
player_names = g.get_player_names()
if parsed_score[0] is None or parsed_score[1] is None:
remarks[(g.round_no, g.seq)] = "%s - %s" % (player_names[0], player_names[1])
else:
remarks[(g.round_no, g.seq)] = "%s %d%s - %d%s %s" % (
player_names[0],
parsed_score[0],
"*" if (parsed_score[0] > parsed_score[1] and parsed_score[2]) else "",
parsed_score[1],
"*" if (parsed_score[1] >= parsed_score[0] and parsed_score[2]) else "",
player_names[1])
cgicommon.show_games_as_html_table(div_games, editable=False,
remarks=remarks, include_round_column=False,
round_namer=None,
player_to_link=lambda x : cgicommon.player_to_link(x, tourney.get_name(), False, True),
remarks_heading="Your submission")
else:
cgicommon.show_games_as_html_table(div_games, editable=True,
remarks=remarks, include_round_column=False,
round_namer=None,
player_to_link=lambda x : cgicommon.player_to_link(x, tourney.get_name(), False, True))
if not conflict_resolution:
cgicommon.writeln("<p><input type=\"submit\" name=\"save\" value=\"Save\" onclick=\"unset_unsaved_data_warning();\" /></p>");
if form.getfirst("showrandomresultsbutton"):
cgicommon.writeln("<p><input type=\"submit\" name=\"randomresults\" value=\"Random Results\" /></p>");
elif form.getfirst("showscrabbleresultsbutton"):
cgicommon.writeln("<p><input type=\"submit\" name=\"randomresults\" value=\"Random Scrabble-ish Results\" /></p>");
cgicommon.writeln("<p><input type=\"hidden\" name=\"scrabbleresults\" value=\"1\" /></p>");
cgicommon.writeln("</form>")
focus = None;
if last_modified_element:
m = re.match("^gamescore_([0-9]+)_([0-9]+)$", last_modified_element)
if m:
lastmod_index = (int(m.group(1)), int(m.group(2)));
# The box with focus should be the next unfilled box equal
# to or after the one that was last modified. If they're all
# filled, put the focus on the first box.
for i in range(0, len(games)):
if games[i].round_no == lastmod_index[0] and games[i].seq == lastmod_index[1]:
# We've found the control we last modified;
for j in range(0, len(games)):
g = games[(i + j) % len(games)]
if not g.is_complete():
focus = (g.round_no, g.seq)
break
break
if games:
if focus is None:
focus = (games[0].round_no, games[0].seq);
control_with_focus = "gamescore_%d_%d" % (focus[0], focus[1]);
cgicommon.writeln("<script>")
cgicommon.writeln("document.getElementById('" + control_with_focus + "').focus();")
cgicommon.writeln("</script>")
cgicommon.writeln("</div>"); #scorestable
cgicommon.writeln("</div>"); #mainpane
except countdowntourney.TourneyException as e:
cgicommon.show_tourney_exception(e);
cgicommon.writeln("</body>");
cgicommon.writeln("</html>");
sys.exit(0);
| 10,371 | 0 | 115 |
c68efaf3e88073a6ef86b995eddf8209d73c8cc2 | 5,329 | py | Python | syzoj_tools/__init__.py | syzoj/syzoj-tools | ade6a4e05422894881b242e517933843fab71756 | [
"MIT"
] | 34 | 2018-11-04T16:11:11.000Z | 2022-01-26T15:09:39.000Z | syzoj_tools/__init__.py | syzoj/syzoj-tools | ade6a4e05422894881b242e517933843fab71756 | [
"MIT"
] | 6 | 2018-11-04T15:58:29.000Z | 2019-07-03T06:58:48.000Z | syzoj_tools/__init__.py | syzoj/syzoj-tools | ade6a4e05422894881b242e517933843fab71756 | [
"MIT"
] | 6 | 2018-12-01T05:45:31.000Z | 2020-01-08T08:06:44.000Z | #!/usr/bin/python3
import argparse
import logging
from .problem import Problem
from .contest import Contest
| 41.310078 | 160 | 0.682867 | #!/usr/bin/python3
import argparse
import logging
from .problem import Problem
from .contest import Contest
def main():
parser = argparse.ArgumentParser(prog="syzoj")
subparser = parser.add_subparsers(dest="subcommands")
parser.add_argument("--path", dest="path", default=".")
parser.add_argument('-v', '--verbose', action='count', default=0)
parser_config = subparser.add_parser("config", help="creates/edits the config")
parser_config.set_defaults(func=cmd_config)
parser_build = subparser.add_parser("build", help="builds the problem resources", description="Builds the problem resources and prepares it for deployment")
parser_build.set_defaults(func=cmd_build)
parser_test = subparser.add_parser("test", help="verifys the problem")
parser_test.set_defaults(func=cmd_test)
parser_judge = subparser.add_parser("judge", help="judge submissions")
parser_judge.set_defaults(func=cmd_judge)
parser_judge.add_argument("--nolazy", default=True, dest="lazy", action="store_const", const=False, help="Judge every testcase and don't be lazy")
parser_judge.add_argument("--show-detail", default=False, dest="show_detail", action="store_const", const=True, help="Show detailed judge result (messy)")
parser_judge.add_argument("--show-testcases", default=False, dest="show_testcases", action="store_const", const=True, help="Show result for every testcase")
parser_judge.add_argument("prog", nargs="+")
parser_contest = subparser.add_parser("contest", help="contest related commands")
parser_contest.set_defaults(func=cmd_contest)
subparser_contest = parser_contest.add_subparsers(dest="contest_subcommands")
parser_contest_judge = subparser_contest.add_parser("judge", help="judges contest players")
parser_contest_judge.set_defaults(func_contest=cmd_contest_judge)
parser_contest_judge.add_argument("-f", "--force", default=False, dest="contest_judge_force", action="store_const", const=True, help="Judge even if judged")
parser_contest_judge.add_argument("contest_judge_players", metavar="players", nargs="*", help="List of players to judge (empty to judge all)")
parser_contest_export = subparser_contest.add_parser("export", help="exports contest result")
parser_contest_export.set_defaults(func_contest=cmd_contest_export)
parser_contest_export.add_argument("export_file", default="result.csv", nargs="?", help="The file to export to, must be CSV")
parser_daemon = subparser.add_parser("daemon", help="starts the daemon")
parser_daemon.set_defaults(func=cmd_daemon)
args = parser.parse_args()
if args.subcommands == None:
print("No subcommand supplied")
parser.print_help()
exit(1)
elif args.subcommands == "contest" and args.contest_subcommands == None:
print("No subcommand supplied")
parser_contest.print_help()
exit(1)
logging.addLevelName(15, "VERBOSE")
logging.VERBOSE = 15
def verbose(self, message, *args, **kwargs):
self.log(logging.VERBOSE, message, *args, **kwargs)
logging.Logger.verbose = verbose
if args.verbose == 0:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 1:
logging.basicConfig(level=logging.VERBOSE)
elif args.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
args.func(args)
def cmd_config(args):
problem = Problem(args.path)
def cmd_build(args):
problem = Problem(args.path)
problem.build(force=True)
def cmd_test(args):
problem = Problem(args.path)
test = problem.test()
if test:
print("All tests passed")
else:
print("Some tests failed, check for \"Assertion %d failed\" above")
if not test:
exit(1)
def cmd_judge(args):
problem = Problem(args.path)
for prog in args.prog:
result = problem.judge(prog, lazy=args.lazy)
if result.success:
print("Score: %d" % result.score)
if args.show_testcases:
for name, case in result.case_result.items():
print(" Test case %s: %f, %s" % (name, case.score, case.message))
for i, subtask in enumerate(result.subtask_result):
passed = subtask.last_case == None
print(" Subtask %d: %f, passed=%s" % (i, subtask.score, passed))
if not passed:
print(" Last case: %s, message: %s" % (subtask.last_case, result.case_result[subtask.last_case].message))
else:
print("Failed: %s" % result.pre_judge_result.message)
if args.show_detail:
print("Detailed result: ", result)
def cmd_contest(args):
args.func_contest(args)
def cmd_contest_judge(args):
contest = Contest(args.path)
players = args.contest_judge_players
try:
contest.scan()
if len(players) == 0:
contest.judge_all(force=args.contest_judge_force)
else:
for player in players:
contest.judge_player(player, force=args.contest_judge_force)
finally:
contest.save()
def cmd_contest_export(args):
contest = Contest(args.path)
contest.export(args.export_file)
def cmd_daemon(args):
from .daemon import Daemon
daemon = Daemon(args.path)
daemon.run()
| 5,002 | 0 | 219 |
beb60eaaf0df87866ceb9d7435941b848c78cc78 | 512 | py | Python | .archive/projectlib/common/number/functional.py | abhmul/projects-repo | 6d3a8110ce69219e769e0e56d14fbe394f6389c6 | [
"MIT"
] | null | null | null | .archive/projectlib/common/number/functional.py | abhmul/projects-repo | 6d3a8110ce69219e769e0e56d14fbe394f6389c6 | [
"MIT"
] | null | null | null | .archive/projectlib/common/number/functional.py | abhmul/projects-repo | 6d3a8110ce69219e769e0e56d14fbe394f6389c6 | [
"MIT"
] | null | null | null | import math
import itertools
from collections import defaultdict
flatten_iter = itertools.chain.from_iterable
# https://stackoverflow.com/a/6909532/5538273
| 20.48 | 92 | 0.673828 | import math
import itertools
from collections import defaultdict
flatten_iter = itertools.chain.from_iterable
# https://stackoverflow.com/a/6909532/5538273
def factors(n):
return set(flatten_iter((i, n//i) for i in range(1, int(math.sqrt(n)+1)) if n % i == 0))
def binary(x, padding=0):
return format(x, f"0{padding}b")
def digit_map(x: int, func=list):
return func(str(x))
def digit_counts(x: int):
counts = defaultdict(int)
for i in str(x):
counts[i] += 1
return counts
| 260 | 0 | 91 |
d82f7ebd4c8e0f662bc9dace46a30fecf0fddce5 | 1,616 | py | Python | ex_03_session_object.py | albertgreinoecker/python-flask-examples | 86cd4dd214241910c20fcd21550aaea2fd9eb0b5 | [
"Apache-2.0"
] | null | null | null | ex_03_session_object.py | albertgreinoecker/python-flask-examples | 86cd4dd214241910c20fcd21550aaea2fd9eb0b5 | [
"Apache-2.0"
] | null | null | null | ex_03_session_object.py | albertgreinoecker/python-flask-examples | 86cd4dd214241910c20fcd21550aaea2fd9eb0b5 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template, session, jsonify
from flask_session import Session # new style
'''
Möchte man z.B.: Objekte in der Session speichern, gibt es bei der normalen Verwendung ein Problem,
weil Flask die Session Info als JSON-String (verschlüsselt!) in einem Cookie speichert.
Es gäbe die Möglichkeit, das Schreiben als JSON selbst zu implementieren bzw. das Objekt o mit o.__dict__ in ein Dictionary
zu übertragen, aber die hier verwendete Erweiterung flask-session macht es einfacher. Hier werden die Session-Daten im Dateisystem des Servers gespeichert.
pip3 install flask_session
'''
app = Flask(__name__)
SESSION_TYPE = 'filesystem'
app.config.from_object(__name__)
Session(app)
#Session darf nur innerhalb von den Methoden verwendet werden, sonst fehlt der Kontext!
@app.route('/question')
@app.route('/question/<int:correct>')
if __name__ == '__main__':
app.run(debug=True) | 36.727273 | 155 | 0.689356 | from flask import Flask, render_template, session, jsonify
from flask_session import Session # new style
'''
Möchte man z.B.: Objekte in der Session speichern, gibt es bei der normalen Verwendung ein Problem,
weil Flask die Session Info als JSON-String (verschlüsselt!) in einem Cookie speichert.
Es gäbe die Möglichkeit, das Schreiben als JSON selbst zu implementieren bzw. das Objekt o mit o.__dict__ in ein Dictionary
zu übertragen, aber die hier verwendete Erweiterung flask-session macht es einfacher. Hier werden die Session-Daten im Dateisystem des Servers gespeichert.
pip3 install flask_session
'''
app = Flask(__name__)
SESSION_TYPE = 'filesystem'
app.config.from_object(__name__)
Session(app)
#Session darf nur innerhalb von den Methoden verwendet werden, sonst fehlt der Kontext!
class Question:
def __init__(self, text, level, answers, correct):
self.text = text
self.level = level
self.answers = answers
self.correct = correct
@app.route('/question')
@app.route('/question/<int:correct>')
def home(correct = 0):
feedback = ''
if 'question' in session:
print(1)
if session['question'].correct == correct:
feedback = 'correct!'
else:
feedback = 'wrong'
session['question'] = Question('Q2', 1, ['E','F','G','H'], 1)
else: # Neue Session zum Starten
print(2)
session['question'] = Question('Q1', 0, ['A','B','C','D'], 3)
return render_template('03_session_object.html', feedback=feedback, question= session['question'])
if __name__ == '__main__':
app.run(debug=True) | 638 | -6 | 71 |
281a66a4818f868ede0abdbd047f61165264375d | 8,172 | py | Python | cvxpy_tinoco/constraints.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | cvxpy_tinoco/constraints.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | cvxpy_tinoco/constraints.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | #***********************************************************************#
# Copyright (C) 2010-2012 Tomas Tinoco De Rubira #
# #
# This file is part of CVXPY #
# #
# CVXPY is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# CVXPY is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
#***********************************************************************#
import numpy as np
from .defs import *
from .scalars import cvxpy_obj
from .arrays import cvxpy_array
from .arrays import cvxpy_matrix
from functools import reduce
#***********************************************************************#
# Class definition: cvxpy_constr #
#***********************************************************************#
#***********************************************************************#
# Class definition: cvxpy_list #
#***********************************************************************#
#***********************************************************************#
# Function definition: compare #
#***********************************************************************#
def compare(obj1,constraint_type,obj2):
"""
Compares obj1 with obj2.
:param obj1: Left hand side obejct.
:param constraint_type: Keyword (See cvxpy.defs.).
:param obj2: Right hand side object.
"""
# Both scalars
if ((np.isscalar(obj1) or type(obj1).__name__ in SCALAR_OBJS) and
(np.isscalar(obj2) or type(obj2).__name__ in SCALAR_OBJS)):
# Upgrade scalars to cvxpy_obj
if np.isscalar(obj1):
obj1 = cvxpy_obj(CONSTANT,obj1,str(obj1))
if np.isscalar(obj2):
obj2 = cvxpy_obj(CONSTANT,obj2,str(obj2))
# Construct and return constraint
return cvxpy_constr(obj1,constraint_type,obj2)
# Upgrate scalars to arrays
if ((type(obj1) is cvxpy_matrix or type(obj1).__name__ in ARRAY_OBJS) and
(np.isscalar(obj2) or type(obj2).__name__ in SCALAR_OBJS)):
(m,n) = obj1.shape
new_ar = cvxpy_array(m,n)
for i in range(0,m,1):
for j in range(0,n,1):
new_ar[i,j] = obj2
obj2 = new_ar
if ((type(obj2) is cvxpy_matrix or type(obj2).__name__ in ARRAY_OBJS) and
(np.isscalar(obj1) or type(obj1).__name__ in SCALAR_OBJS)):
(m,n) = obj2.shape
new_ar = cvxpy_array(m,n)
for i in range(0,m,1):
for j in range(0,n,1):
new_ar[i,j] = obj1
obj1 = new_ar
# Both arrays
if ((type(obj1) is cvxpy_matrix or type(obj1).__name__ in ARRAY_OBJS) and
(type(obj2) is cvxpy_matrix or type(obj2).__name__ in ARRAY_OBJS)):
constr = []
if obj1.shape != obj2.shape:
raise ValueError('Invalid dimensions')
(m,n) = obj1.shape
for i in range(0,m,1):
for j in range(0,n,1):
constr += [compare(obj1[i,j],constraint_type,obj2[i,j])]
return cvxpy_list(constr)
# Invalid arguments
raise TypeError('Objects not comparable')
| 33.768595 | 78 | 0.488253 | #***********************************************************************#
# Copyright (C) 2010-2012 Tomas Tinoco De Rubira #
# #
# This file is part of CVXPY #
# #
# CVXPY is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# CVXPY is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
#***********************************************************************#
import numpy as np
from .defs import *
from .scalars import cvxpy_obj
from .arrays import cvxpy_array
from .arrays import cvxpy_matrix
from functools import reduce
#***********************************************************************#
# Class definition: cvxpy_constr #
#***********************************************************************#
class cvxpy_constr(object):
# Method: __init__
def __init__(self,left,constraint_type,right):
"""
Class constructor.
:param left: Left hand side.
:param constraint type: Keyword (See cvxpy.defs).
:param right: Right hand side.
"""
self.left = left
self.type = constraint_type
self.right = right
# Method: __getattribute__
def __getattribute__(self,name):
# Variables
if name == 'variables':
if self.type == BELONGS:
return self.left.variables
else:
return cvxpy_list(set(self.left.variables +
self.right.variables))
# Parameters
elif name == 'parameters':
if self.type == BELONGS:
return self.left.parameters
else:
return cvxpy_list(set(self.left.parameters +
self.right.parameters))
# Other
else:
return object.__getattribute__(self,name)
# Method: __str__
def __str__(self):
l_text = str(self.left)
type_text = self.type
r_text = str(self.right)
return l_text+' '+type_text+' '+r_text
# Method: is_dcp
def is_dcp(self):
"""
Determines if the constraint is DCP-compliant.
"""
if (self.type == EQUALS and
self.left.is_affine() and
self.right.is_affine()):
return True
elif (self.type == LESS_EQUALS and
self.left.is_convex() and
self.right.is_concave()):
return True
elif (self.type == GREATER_EQUALS and
self.left.is_concave() and
self.right.is_convex()):
return True
elif (self.type == BELONGS and
self.left.is_affine()):
return True
else:
return False
# Method: is_affine
def is_affine(self):
if self.type == BELONGS:
return False
else:
return self.left.is_affine() and self.right.is_affine()
#***********************************************************************#
# Class definition: cvxpy_list #
#***********************************************************************#
class cvxpy_list(list):
# Method: __getattributes__
def __getattribute__(self,name):
# Variables
if name == 'variables':
all_vars = reduce(lambda x,y: x+y,
list(map(lambda x: x.variables,self)),[])
return cvxpy_list(set(all_vars))
# Parameters
elif name == 'parameters':
all_params = reduce(lambda x,y: x+y,
list(map(lambda x: x.parameters,self)),[])
return cvxpy_list(set(all_params))
# Other
else:
return object.__getattribute__(self,name)
# Method: _get_eq
def _get_eq(self):
"""
Gives equality constraints.
"""
return cvxpy_list([c for c in self if c.type == EQUALS])
# Method: _get_ineq_in
def _get_ineq_in(self):
"""
Gives inequality and membership constraints.
"""
return cvxpy_list([c for c in self if c.type != EQUALS])
# Method: is_dcp
def is_dcp(self):
"""
Determines if all constraints are DCP-compliant.
"""
return all(list(map(lambda x:x.is_dcp(),self)))
# Method: is_affine
def is_affine(self):
"""
Determines if all constraints are affine.
"""
return all(list(map(lambda x:x.is_affine(),self)))
# Method: __add__
def __add__(self,other):
return cvxpy_list(list(self) + other)
# Method: __radd__
def __radd__(self,other):
return cvxpy_list(other + list(self))
# Method: __str__
def __str__(self):
output = ''
for i in range(0,len(self),1):
output += str(self[i])
if i != len(self)-1:
output += '\n'
return output
#***********************************************************************#
# Function definition: compare #
#***********************************************************************#
def compare(obj1,constraint_type,obj2):
"""
Compares obj1 with obj2.
:param obj1: Left hand side obejct.
:param constraint_type: Keyword (See cvxpy.defs.).
:param obj2: Right hand side object.
"""
# Both scalars
if ((np.isscalar(obj1) or type(obj1).__name__ in SCALAR_OBJS) and
(np.isscalar(obj2) or type(obj2).__name__ in SCALAR_OBJS)):
# Upgrade scalars to cvxpy_obj
if np.isscalar(obj1):
obj1 = cvxpy_obj(CONSTANT,obj1,str(obj1))
if np.isscalar(obj2):
obj2 = cvxpy_obj(CONSTANT,obj2,str(obj2))
# Construct and return constraint
return cvxpy_constr(obj1,constraint_type,obj2)
# Upgrate scalars to arrays
if ((type(obj1) is cvxpy_matrix or type(obj1).__name__ in ARRAY_OBJS) and
(np.isscalar(obj2) or type(obj2).__name__ in SCALAR_OBJS)):
(m,n) = obj1.shape
new_ar = cvxpy_array(m,n)
for i in range(0,m,1):
for j in range(0,n,1):
new_ar[i,j] = obj2
obj2 = new_ar
if ((type(obj2) is cvxpy_matrix or type(obj2).__name__ in ARRAY_OBJS) and
(np.isscalar(obj1) or type(obj1).__name__ in SCALAR_OBJS)):
(m,n) = obj2.shape
new_ar = cvxpy_array(m,n)
for i in range(0,m,1):
for j in range(0,n,1):
new_ar[i,j] = obj1
obj1 = new_ar
# Both arrays
if ((type(obj1) is cvxpy_matrix or type(obj1).__name__ in ARRAY_OBJS) and
(type(obj2) is cvxpy_matrix or type(obj2).__name__ in ARRAY_OBJS)):
constr = []
if obj1.shape != obj2.shape:
raise ValueError('Invalid dimensions')
(m,n) = obj1.shape
for i in range(0,m,1):
for j in range(0,n,1):
constr += [compare(obj1[i,j],constraint_type,obj2[i,j])]
return cvxpy_list(constr)
# Invalid arguments
raise TypeError('Objects not comparable')
| 1,781 | 2,212 | 44 |
ae705d599832a8e29581cd28be7198a946fe8002 | 5,141 | py | Python | TestCaseCodes/Saadat/OLDVersion/RecomendWeb.py | Dieuzu/SDGP-Nullpoint-G22 | 1080e1c77dddb59057de1ea9bccd97ec987ae202 | [
"MIT"
] | null | null | null | TestCaseCodes/Saadat/OLDVersion/RecomendWeb.py | Dieuzu/SDGP-Nullpoint-G22 | 1080e1c77dddb59057de1ea9bccd97ec987ae202 | [
"MIT"
] | null | null | null | TestCaseCodes/Saadat/OLDVersion/RecomendWeb.py | Dieuzu/SDGP-Nullpoint-G22 | 1080e1c77dddb59057de1ea9bccd97ec987ae202 | [
"MIT"
] | null | null | null | # write-html.py
import webbrowser
if __name__ == "__main__":
main() | 51.41 | 229 | 0.541723 | # write-html.py
import webbrowser
def main():
f = open('4_RefferenceResultGen\TestFolder\TaskResults.html','a')
r = open('4_RefferenceResultGen\TestFolder\Refined_Links.txt','r')
P1 = """<!DOCTYPE html>
<html style="font-size: 16px;">
<head>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta charset="utf-8">
<meta name="keywords" content="Learn marketing strategy, Our Courses, Drive Your Career Forward">
<meta name="description" content="">
<meta name="page_type" content="np-template-header-footer-from-plugin">
<title>NullPoint</title>
<link rel="stylesheet" href="nicepage.css" media="screen">
<link rel="stylesheet" href="Page-2.css" media="screen">
<script class="u-script" type="text/javascript" src="jquery.js" defer=""></script>
<script class="u-script" type="text/javascript" src="nicepage.js" defer=""></script>
<meta name="generator" content="Nicepage 4.4.3, nicepage.com">
<link id="u-theme-google-font" rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:100,100i,300,300i,400,400i,500,500i,700,700i,900,900i|Open+Sans:300,300i,400,400i,600,600i,700,700i,800,800i">
<link id="u-page-google-font" rel="stylesheet" href="https://fonts.googleapis.com/css?family=Montserrat:100,100i,200,200i,300,300i,400,400i,500,500i,600,600i,700,700i,800,800i,900,900i">
<script type="application/ld+json">{
"@context": "http://schema.org",
"@type": "Organization",
"name": "",
"logo": "images/default-logo.png"
}</script>
<meta name="theme-color" content="#478ac9">
<meta property="og:title" content="Page 2">
<meta property="og:type" content="website">
</head>
<body class="u-body u-xl-mode"><header class="u-clearfix u-header u-header" id="sec-00c3"><div class="u-clearfix u-sheet u-sheet-1">
<a href="https://nicepage.com" class="u-image u-logo u-image-1">
<img src="images/default-logo.png" class="u-logo-image u-logo-image-1">
</a>
</div></header>
<section class="u-align-center u-clearfix u-image u-shading u-section-1" src="" id="carousel_0791" data-image-width="150" data-image-height="97">
<div class="u-clearfix u-sheet u-sheet-1">
<h2 class="u-text u-text-1">NULLPOINT<br>Assignment Manager
</h2>
</div>
</section>
<section class="u-align-left u-clearfix u-grey-10 u-section-2" id="carousel_77f5">
<div class="u-clearfix u-sheet u-valign-middle-md u-valign-middle-sm u-valign-middle-xs u-sheet-1">
<h2 class="u-align-center u-custom-font u-font-montserrat u-text u-text-default u-text-1"> Maybe the Following Links Might help you.....</h2>
<div class="u-border-16 u-border-palette-3-base u-line u-line-horizontal u-line-1"></div>
<div class="u-expanded-width u-list u-list-1">
<div class="u-repeater u-repeater-1"> """
P2_1 = """ <div class="u-align-left u-container-style u-list-item u-repeater-item u-white u-list-item-1">
<div class="u-container-layout u-similar-container u-valign-middle u-container-layout-1">
<h5 class="u-text u-text-default u-text-2"> <a href=\""""
P2_3 = """</a></h5>
<div class="u-expanded-height-lg u-expanded-height-sm u-expanded-height-xl u-expanded-height-xs u-palette-3-base u-shape u-shape-rectangle u-shape-1"></div>
</div>
</div> """
P3 = """
</div>
</div>
</div>
</section>
<footer class="u-align-center u-clearfix u-footer u-grey-80 u-footer" id="sec-67f3"><div class="u-clearfix u-sheet u-sheet-1">
<p class="u-small-text u-text u-text-variant u-text-1">Nullpoint Assignment manager</p>
</div></footer>
</body>
</html>"""
print ("[SYSTEM] Creating the TaskResults.html")
f.write(P1)
Linkindex = 1
for x in r:
spliter = x.split(",")
P2_2 = "\" target=\"_blank\">Check out Link Number: " + str(Linkindex) + " (" + spliter[1] +"% Relevancy)"
FullHtml = P2_1 + spliter[0] + P2_2 + P2_3
Linkindex += 1
f.write(FullHtml)
f.write(P3)
f.close()
print("[SYSTEM] Deploying the TaskResults.html")
url = '4_RefferenceResultGen\TestFolder\TaskResults.html'
webbrowser.open(url, new=2) # open in new tab
if __name__ == "__main__":
main() | 5,036 | 0 | 23 |
2ee5c04d969e04b06006dc6d3b010e073f165efb | 1,366 | py | Python | test/test_classes/test_graph.py | MHenderson1988/PyLineofsight | 169a693320fd39ded4c76fd96b4613de2d0de85c | [
"MIT"
] | 4 | 2021-07-05T09:19:42.000Z | 2022-01-22T01:51:02.000Z | test/test_classes/test_graph.py | MHenderson1988/PyLineofsight | 169a693320fd39ded4c76fd96b4613de2d0de85c | [
"MIT"
] | 3 | 2020-06-20T10:47:44.000Z | 2022-01-11T19:50:27.000Z | test/test_classes/test_graph.py | MHenderson1988/PyLineofsight | 169a693320fd39ded4c76fd96b4613de2d0de85c | [
"MIT"
] | 2 | 2020-06-20T10:23:36.000Z | 2022-01-11T16:14:30.000Z | from unittest import TestCase
from main.classes.arc_solver import ArcSolver
from main.classes.decimal_location import DecimalLocation
from main.classes.graph import Graph
| 35.025641 | 87 | 0.662518 | from unittest import TestCase
from main.classes.arc_solver import ArcSolver
from main.classes.decimal_location import DecimalLocation
from main.classes.graph import Graph
class TestGraph(TestCase):
def setUp(self) -> None:
self.test_data = [10, 20, 30]
self.test_data2 = [900, 2000, 1000]
self.test_loc = DecimalLocation(55.111, -4.111, 2, "Test1")
self.test_loc2 = DecimalLocation(44.111, 5.111, 2, "Test2")
self.test_loc3 = DecimalLocation(55.111, -4.111, 50, "Test1")
self.test_loc4 = DecimalLocation(44.111, 5.111, 50, "Test2")
self.test_arc = ArcSolver(3440.065, self.test_loc.great_circle(self.test_loc2))
self.graph = Graph(self.test_data, self.test_loc, self.test_loc2, 3)
self.graph2 = Graph(self.test_data2, self.test_loc3, self.test_loc4, 3)
def test_get_los_line(self):
actual = self.graph.get_los_line()
expected = [12, 22, 32]
i = 0
while i < len(actual):
self.assertEqual(expected[i], actual[i])
i += 1
def test_does_intersect(self):
self.assertFalse(self.graph.check_intersect())
self.assertTrue(self.graph2.check_intersect())
def test_select_colour(self):
self.assertEqual('green', self.graph.select_colour())
self.assertEqual('red', self.graph2.select_colour())
| 1,058 | 5 | 130 |
48846ea3e6c8c54699820da1a89594fc5a229ad0 | 6,852 | py | Python | third_party/tlslite/tlslite/integration/clienthelper.py | Acidburn0zzz/chromium-1 | 4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/tlslite/tlslite/integration/clienthelper.py | Acidburn0zzz/chromium-1 | 4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/tlslite/tlslite/integration/clienthelper.py | Acidburn0zzz/chromium-1 | 4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | """
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from tlslite.checker import Checker
class ClientHelper:
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
self.tlsSession = None
| 41.780488 | 76 | 0.619527 | """
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from tlslite.checker import Checker
class ClientHelper:
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
self.tlsSession = None
def _handshake(self, tlsConnection):
if self.username and self.password:
tlsConnection.handshakeClientSRP(username=self.username,
password=self.password,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
elif self.username and self.sharedKey:
tlsConnection.handshakeClientSharedKey(username=self.username,
sharedKey=self.sharedKey,
settings=self.settings)
else:
tlsConnection.handshakeClientCert(certChain=self.certChain,
privateKey=self.privateKey,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
self.tlsSession = tlsConnection.session
| 1,094 | 0 | 27 |
619206d534a8f617a96a868f06a8e7f359b490b1 | 7,838 | py | Python | src/gym-snake/gym_snake/envs/snake_adversarial_env.py | jdubkim/Self-play-on-Multi-Sankes-Environment | 8e72c66110a007d6bf0ca2ff68fc0a845f3b3a42 | [
"MIT"
] | 5 | 2018-07-02T12:42:00.000Z | 2018-11-22T12:56:21.000Z | src/gym-snake/gym_snake/envs/snake_adversarial_env.py | jdubkim/dlcampjeju2018 | 8e72c66110a007d6bf0ca2ff68fc0a845f3b3a42 | [
"MIT"
] | null | null | null | src/gym-snake/gym_snake/envs/snake_adversarial_env.py | jdubkim/dlcampjeju2018 | 8e72c66110a007d6bf0ca2ff68fc0a845f3b3a42 | [
"MIT"
] | null | null | null | import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from gym.envs.classic_control import rendering
from config import Config
| 28.194245 | 118 | 0.495917 | import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from gym.envs.classic_control import rendering
from config import Config
class SnakeAdversarial(gym.Env):
def __init__(self):
self.dim = 10 # 10 X 10 environment
self.action_space = spaces.Discrete(5)
self.viewer = None
self.spare_fruits = 0
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def draw_snake(self, ob, snake, body_color, head_color):
if (len(snake) == 0):
return
head = snake[0]
for piece in snake:
ob[piece[0] + 1][piece[1] + 1] = body_color
ob[head[0] + 1][head[1] + 1] = head_color
def get_ob_for_snake(self, idx):
dim = self.dim
ob = np.full((dim + 2, dim + 2, 3), 0, dtype='uint8')
snakes = self.state[0]
fruits = self.state[1]
for fruit in fruits:
ob[fruit[0] + 1][fruit[1] + 1] = [255, 0, 0]
for i in range(len(snakes)):
if (i == idx):
self.draw_snake(ob, snakes[i], [0, 204, 0], [191, 242, 191])
else:
self.draw_snake(ob, snakes[i], [0, 51, 204], [128, 154, 230])
for i in range(dim + 2):
ob[i][0] = [255, 255, 255]
ob[i][dim + 1] = [255, 255, 255]
ob[0][i] = [255, 255, 255]
ob[dim + 1][i] = [255, 255, 255]
return ob # 12 * 12 * 3 array
def get_ob_world(self):
dim = self.dim
ob = np.full((dim + 2, dim + 2, 3), 0, dtype='uint8')
snakes = self.state[0]
fruits = self.state[1]
for fruit in fruits:
ob[fruit[0] + 1][fruit[1] + 1] = [255, 0, 0]
for i in range(len(snakes)):
color = self.get_color(i)
self.draw_snake(ob, snakes[i], color[0], color[1])
for i in range(dim + 2):
ob[i][0] = [255, 255, 255]
ob[i][dim + 1] = [255, 255, 255]
ob[0][i] = [255, 255, 255]
ob[dim + 1][i] = [255, 255, 255]
return ob # 12 * 12 * 3 array
def get_color(self, idx):
p_colors = {0: [[0, 204, 0], [191, 242, 191]], # Green
1: [[0, 51, 204], [128, 154, 230]], # Blue
2: [[204, 0, 119], [230, 128, 188]], # Magenta
3: [[119, 0, 204], [188, 128, 230]], # Violet
}
return p_colors[idx]
def get_multi_snake_ob(self):
t = np.concatenate((self.get_ob_for_snake(0), self.get_ob_for_snake(1), self.get_ob_for_snake(2)), axis=2)
return t # concatenate two arrays (12 * 12 * 3) convert it to (12 * 12 * 9)
def update_snake(self, idx, action):
[snakes, fruits, vels, grow_to_lengths, t] = self.state
snake = snakes[idx]
if (len(snake) == 0):
return 0
head = snake[0]
vel = vels[idx]
if (action == 1 and vel != (-1, 0)):
vel = (1, 0)
elif (action == 2 and vel != (0, -1)):
vel = (0, 1)
elif (action == 3 and vel != (1, 0)):
vel = (-1, 0)
elif (action == 4 and vel != (0, 1)):
vel = (0, -1)
reward = 0.0
if vel != (0, 0):
head = (head[0] + vel[0], head[1] + vel[1])
snake_length = grow_to_lengths[idx]
pending_fruit = []
for i in range(len(fruits)):
fruit = fruits[i]
if head == fruit:
pending_fruit.append(i)
reward += 1.0
snake_length += 2
if len(snake) >= snake_length:
snake.pop()
snake.insert(0, head)
for i in pending_fruit:
if self.spare_fruits > 0:
self.spare_fruits -= 1
elif self.spare_fruits == 0:
fruits[i] = self.safe_choose_cell()
vels[idx] = vel
grow_to_lengths[idx] = snake_length
return reward
def is_snake_alive(self, idx):
snakes = self.state[0]
snake = snakes[idx]
if (len(snake) == 0):
return False
head = snake[0]
if (max(head) > self.dim - 1) or (min(head) < 0):
return False
for (s_idx, snake) in enumerate(snakes):
for i in range(0, len(snake)):
if head == snake[i] and not (i == 0 and s_idx == idx):
return False
return True
def step(self, action):
if not hasattr(action, '__len__'):
action = [action]
[snakes, fruits, vels, grow_to_lengths, t] = self.state
reward = self.update_snake(0, action[0])
for idx in range(1, len(snakes)):
action_other = action[idx]
self.update_snake(idx, action_other)
dead_idxs = []
for idx in range(len(snakes)):
if not self.is_snake_alive(idx):
dead_idxs.append(idx)
# body becomes fruits
for body in snakes[idx]:
fruits.append(body)
self.spare_fruits += len(snakes[idx])
for idx in dead_idxs:
snakes[idx] = []
isMainDead = len(snakes[0]) == 0
if isMainDead:
reward = -1
t += 1
self.state[4] = t
done = t >= 2000 or isMainDead
return self.get_multi_snake_ob(), reward, done, {"ale.lives": 1, "num_snakes": (len(snakes) - len(dead_idxs))}
def choose_cell(self):
return (self.np_random.randint(self.dim), self.np_random.randint(self.dim))
def safe_choose_cell(self):
snakes = self.state[0]
available = list(range(self.dim * self.dim))
for snake in snakes:
if len(snake) > 0:
used_idxs = list(map(lambda x: x[1] * self.dim + x[0], snake))
available = np.setdiff1d(available, used_idxs)
x = 0
if len(available) > 0:
x = available[self.np_random.randint(len(available))]
return (x % self.dim, x // self.dim)
def reset(self):
snakes = []
fruits = []
for i in range(Config.NUM_SNAKES):
snakes.append([self.choose_cell()])
fruits.append(self.choose_cell())
grow_to_lengths = [3, 3, 3]
vels = [(0, 0), (0, 0), (0, 0)]
self.state = [snakes, fruits, vels, grow_to_lengths, 0]
self.steps_beyond_done = None
return self.get_multi_snake_ob()
def render(self, mode='human'):
dim = self.dim
screen_dim = 300
head_cell = self.state[0]
fruit_cell = self.state[1]
view_dim = dim + 2
cell_dim = screen_dim / view_dim
if self.viewer is None:
self.viewer = rendering.Viewer(screen_dim, screen_dim)
cells = []
for i in range(view_dim):
for j in range(view_dim):
l, r, t, b = cell_dim * i, cell_dim * (i + 1), cell_dim * (j + 1), cell_dim * j,
cell = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.viewer.add_geom(cell)
cells.append(cell)
self.cells = cells
ob = self.get_ob_world()
for i in range(view_dim):
for j in range(view_dim):
idx = i * view_dim + j
rgb255 = ob[i][j]
self.cells[idx].set_color(rgb255[0] / 255, rgb255[1] / 255, rgb255[2] / 255)
if self.state is None:
return None
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer: self.viewer.close()
| 7,232 | 11 | 427 |
8e9db4c6e20bbfa8232bbeefe8a9e0178b83da57 | 161 | py | Python | 050_Soma_dos_pares.py | fabioeomedeiros/Python-Base | ef9c1c66b3221f71d1c8dcaf4c2f86503712e9f1 | [
"MIT"
] | null | null | null | 050_Soma_dos_pares.py | fabioeomedeiros/Python-Base | ef9c1c66b3221f71d1c8dcaf4c2f86503712e9f1 | [
"MIT"
] | null | null | null | 050_Soma_dos_pares.py | fabioeomedeiros/Python-Base | ef9c1c66b3221f71d1c8dcaf4c2f86503712e9f1 | [
"MIT"
] | null | null | null | #050_Soma_dos_pares.py
soma = 0
for c in range(1, 6):
num = int(input(f"{c}º Número: "))
if (num % 2 == 0):
soma += num
print (f"Soma = {soma}") | 20.125 | 38 | 0.534161 | #050_Soma_dos_pares.py
soma = 0
for c in range(1, 6):
num = int(input(f"{c}º Número: "))
if (num % 2 == 0):
soma += num
print (f"Soma = {soma}") | 0 | 0 | 0 |
01fa47b5572e9be6d9fb493eb70734e1c33edeab | 22,313 | py | Python | libs/csvfile.py | hokaze123/ChartViewer | f9ee0ca5c23ad36dfe780fa573eed1d43c93f11b | [
"MIT"
] | 3 | 2020-09-13T10:51:58.000Z | 2021-06-09T10:07:01.000Z | libs/csvfile.py | Lihao2017-11-15/ChartViewer | 7295ee462ab4a25d585210bad4568f8685b0ee61 | [
"MIT"
] | null | null | null | libs/csvfile.py | Lihao2017-11-15/ChartViewer | 7295ee462ab4a25d585210bad4568f8685b0ee61 | [
"MIT"
] | 1 | 2020-09-13T10:48:09.000Z | 2020-09-13T10:48:09.000Z | import re
import math
from .datetime_string import to_seconds, to_timedelta
import logging
import os
logger = logging.getLogger()
| 36.942053 | 120 | 0.558105 | import re
import math
from .datetime_string import to_seconds, to_timedelta
import logging
import os
logger = logging.getLogger()
class CSVFile:
indexes = {}
columns = []
headers_dict = {}
file_name = ''
@staticmethod
def read_rows_from_file(source_file_name):
rows = []
try:
with open(source_file_name, mode='r', encoding='UTF-8-sig') as source_file:
counter = 0
for line in source_file:
rows.append(line)
counter += 1
except:
logger.error('Cannot read a file {}'.format(source_file_name))
input()
return rows
def __init__(self, file_name=None, headers=None, rows_list=None):
if file_name is not None or rows_list is not None:
# Get a rows list if not given as argument
if rows_list == None:
rows_list = self.read_rows_from_file(file_name)
# Detect number of columns in a rows list
number_of_columns = self.detect_number_of_columns_in_rows_list(
rows_list)
# Case 1: headers are the first row of the file
if headers == None:
first_row_list = rows_list[0].replace('\n', '').split(',')
first_row_correct_lenght = len(
first_row_list) == number_of_columns
if first_row_correct_lenght:
first_row_correct_type = all(isinstance(
item, str) for item in first_row_list)
if first_row_correct_type:
headers = first_row_list
rows_list.pop(0)
# Case 2: default values
else:
if headers != None:
headers_correct_type = (isinstance(headers, list) and all(
isinstance(item, str) for item in headers))
headers_correct_lenght = len(headers) != number_of_columns
if not headers_correct_type or headers_correct_lenght:
if headers_correct_type and len(headers) != number_of_columns:
logger.warning(
'{}: Incorrect headers so they have been replaced by default values.'.format(file_name))
headers = []
headers = self.get_default_headers(number_of_columns)
self.create_table(rows_list, headers)
def detect_number_of_columns_in_rows_list(self, rows_list):
max_lenght = 0
for row in rows_list:
lenght = len(row.split(','))
if lenght > max_lenght:
max_lenght = lenght
return max_lenght
def detect_max_number_of_instances(self):
maximum = 0
for column in self.columns:
if maximum < len(column.instances):
maximum = len(column.instances)
return maximum
def idx(self, name):
for column in self.columns:
if column.name == name:
return self.columns.index(column)
logger.warning('There is no column with that name.')
def sort_table(self, key_column_name, names, descending=False):
list_dict_table = self.get_list_dict(column_names=names)
for column in self.columns:
if column.name == key_column_name:
if column.content_type == 'time':
list_dict_table.sort(key=lambda row: to_timedelta(
row[key_column_name]).total_seconds(), reverse=descending)
else:
list_dict_table.sort(key=lambda row: int(
row[key_column_name]), reverse=descending)
self.create_table(list_dict_table=list_dict_table, column_names=names)
def get_list_dict(self, rows_list=None, column_names=None):
if rows_list == None:
rows_list = self.get_rows_list()
if column_names == None:
column_names = self.get_names_list()
table = []
row_counter = 0
for row in rows_list:
splitted_row = row.split(',')
table.append(dict())
for i in range(len(column_names)):
if len(splitted_row) >= (i + 1):
content = splitted_row[i].replace('\n', '')
content = content.strip()
table[row_counter][column_names[i]] = content
else:
table[row_counter][column_names[i]] = ''
row_counter += 1
return table
def get_list_list(self, start=0):
number_of_rows = self.detect_max_number_of_instances()
# Create rows scaffolding
rows = []
for _ in range(number_of_rows):
rows.append([])
# Collect column values into a row (list), convert to strings
# and add empty instead of empty cells
for column in self.columns[start:]:
for counter in range(number_of_rows):
if len(column.instances) <= counter:
rows[counter].append('None')
else:
rows[counter].append(str(column.instances[counter]))
return rows
def create_table(self, rows_list=None, column_names=None, list_dict_table=None):
self.columns = []
if list_dict_table == None and rows_list != None:
list_dict_table = self.get_list_dict(rows_list, column_names)
elif list_dict_table == None and rows_list == None:
logger.error(
'Create table error: needed rows list or list_dict_table to create table.')
if column_names == None:
column_names = self.get_names_list()
for name in column_names:
instances = []
for row in list_dict_table:
if row[name] != None:
cell = row[name].replace('\n', '')
cell = cell.strip()
instances.append(cell)
elif row[name] == None:
instances.append('')
self.columns.append(Column(name, instances))
def get_rows_list(self):
rows = self.get_list_list()
# Join columns in rows
counter = 0
for _ in rows:
if rows[counter] is not None:
row_without_none = []
for field in rows[counter]:
if field is not None:
row_without_none.append(field)
else:
row_without_none.append('None')
rows[counter] = row_without_none
rows[counter] = ','.join(rows[counter])
rows[counter] = rows[counter].replace('\n', '') + '\n'
counter += 1
return rows
def add_column(self, name, instances, header=None):
new_column = Column(name, instances)
self.columns.append(new_column)
if new_column.name in self.headers_dict:
self.columns[-1].header = self.headers_dict[new_column.name]
def insert_column(self, name, instances, col_idx):
try:
new_column = Column(name, instances)
self.columns.insert(col_idx, new_column)
if new_column.name in self.headers_dict:
self.columns[col_idx].header = self.headers_dict[new_column.name]
except:
logger.error('Cannot insert a column.')
def get_default_headers(self, number_of_columns):
headers_list = []
for index in range(1, number_of_columns + 1):
headers_list.append('column_{}'.format(index))
return headers_list
def get_headers_list(self):
headers_list = []
for column in self.columns:
if self.headers_dict != None and column.name in self.headers_dict:
headers_list.append(self.headers_dict[column.name])
else:
headers_list.append(column.name)
return headers_list
def get_names_list(self):
names_list = []
idx = 0
for name in self.indexes:
if self.indexes[name] == idx:
names_list.append(name)
idx += 1
return names_list
def create_table_from_table(self, table):
self.columns = []
for column in table.columns:
self.columns.append(column)
if column.name in self.headers_dict:
self.columns[-1].header = self.headers_dict[column.name]
def update_names(self):
names = self.get_names_list()
indexing_range = range(min(len(names), len(self.columns)))
for i in indexing_range:
self.columns[i].name = names[i]
def print_table(self, how_many_rows=None, print_separator=' '):
rows = self.get_rows_list()
for_print_headers = self.get_headers_list()
# Align spaces betweenen columns if it is possible
max_row_lenght = 0
counter = 0
for row in rows:
if max_row_lenght < len(row):
max_row_lenght = len(row)
assumpted_console_width = 110
if max_row_lenght < assumpted_console_width:
# Prepare headers
for counter in range(len(for_print_headers)):
if len(for_print_headers[counter]) < self.columns[counter].detect_max_instance_lenght():
diff = self.columns[counter].detect_max_instance_lenght(
) - len(for_print_headers[counter])
for_print_headers[counter] += ' ' * diff
# Prepare rows
for counter in range(len(rows)):
rows[counter] = rows[counter].replace('\n', '')
one_row_list = rows[counter].split(',')
for col_idx in range(len(one_row_list)):
if self.columns[col_idx].detect_max_instance_lenght() < len(for_print_headers[col_idx]):
diff = len(
for_print_headers[col_idx]) - len(one_row_list[col_idx])
one_row_list[col_idx] += ' ' * diff
elif len(one_row_list[col_idx]) < self.columns[col_idx].detect_max_instance_lenght():
diff = self.columns[col_idx].detect_max_instance_lenght(
) - len(one_row_list[col_idx])
one_row_list[col_idx] += ' ' * diff
rows[counter] = ','.join(one_row_list) + '\n'
# Print headers
print(print_separator.join(for_print_headers))
# Print rows
counter = 0
for row in rows:
row = row.replace(',', print_separator)
print(row, end='')
if how_many_rows != None and counter + 1 >= how_many_rows:
break
counter += 1
def save_file(self, new_file_name):
# Save rows list without last new line transition
rows_list = self.get_rows_list()
with open(new_file_name, mode='w', encoding='UTF-8') as new_file:
# Write headers
new_file.write(','.join(self.get_headers_list()) + '\n')
# Write rows
for row in rows_list:
new_file.write(row)
class CSV_gateway(CSVFile):
def __init__(self, source, correct=False):
if type(source) is str:
rows_list = CSV_gateway.get_rows(source)
else:
rows_list = source
separator_parameters = CSV_gateway.detect_separator(rows_list)
if correct:
rows_list = CSV_gateway.replace_separator(
rows_list, separator_parameters['original_character'])
CSVFile.__init__(self, rows_list=rows_list, headers='default')
@staticmethod
def get_rows(source_file_name):
rows_list = []
try:
with open(source_file_name, mode='r', encoding='UTF-8-sig') as raw_file:
for row in raw_file:
rows_list.append(row)
except:
logger.error('Cannot read a file {0}'.format(source_file_name))
return rows_list
@staticmethod
def detect_separator(rows_list):
def standard_error(number_list):
if len(number_list) > 1:
avg = sum(number_list) / len(number_list)
big_sum = 0
for num in number_list:
big_sum += (avg - num) * (avg - num)
return math.sqrt(big_sum / (len(number_list) - 1))
elif len(number_list) == 1:
logger.error(
'List have only one element. Cannot obtain standard error.')
return None
else:
logger.error(
'List have no elements. Cannot obtain standard error.')
return None
separators = [',', '|', ' ', '.']
separators_standard_errors = []
separators_avarages = []
# Calculate separators characteristics
for sep in separators:
separators_in_line = []
for row in rows_list:
separators_in_line.append(row.count(sep))
if sum(separators_in_line) < len(rows_list):
separators_standard_errors.append(9999)
else:
se = standard_error(separators_in_line)
if se is None:
se = 9999
separators_standard_errors.append(se)
separators_avarages.append(
round(sum(separators_in_line) / len(separators_in_line)))
# Based on characteristics select correct separator
counter = 0
for se in separators_standard_errors:
if se == 9999:
counter += 1
if counter == len(separators_standard_errors):
logger.warning(
'Cannot detect separator: default separator set (",").')
correct_separator_index = 0
else:
correct_separator_index = separators_standard_errors.index(
min(separators_standard_errors))
separator = {}
separator['original_character'] = separators[correct_separator_index]
separator['number_in_line'] = separators_avarages[correct_separator_index]
return separator
@staticmethod
def replace_separator(rows_list, current_separator, target_separator=','):
rows_without_separator_list = []
row_counter = 0
for row in rows_list:
if row.count(current_separator) > 0:
rows_list[row_counter] = row.replace(
current_separator, target_separator)
row_counter += 1
else:
rows_without_separator_list.append(row)
row_counter += 1
return rows_list
@staticmethod
def save_to_csv(new_file_name, rows_list):
# Write cleaned content into a new csv file
try:
with open(new_file_name, mode='w', encoding='UTF-8') as cleaned_file:
for element in rows_list:
cleaned_file.write(element)
except:
logger.error('Save to {} failed.'.format(new_file_name))
@staticmethod
def check_file_format(file_name, f_type=['.csv', '.txt']):
if file_name[-4:] in f_type:
return True
else:
return False
@staticmethod
def get_data_for_chart(file_path='source_file.csv', stacked=False, type_expected='float',
labels_column_index=0, values_columns_indexes=[1], first_row_index=1):
labels = ['empty']
values = [0]
legend = []
values_type = 'empty'
if CSV_gateway.check_file_format(file_path) and os.path.isfile(file_path):
table = CSV_gateway(file_path)
# Prevent indexing beyond array
if len(table.columns) < max(values_columns_indexes) + 1:
values_columns_indexes = [0]
if table.detect_max_number_of_instances() < first_row_index + 1:
first_row_index = 0
min_val_idx = min(values_columns_indexes)
if stacked:
values_columns_indexes = []
max_val_idx = len(table.columns)
for i in range(min_val_idx, max_val_idx+1):
values_columns_indexes.append(i)
else:
max_val_idx = max(values_columns_indexes)
labels = table.columns[labels_column_index].instances[first_row_index:]
if stacked:
for column in table.columns[min_val_idx:max_val_idx+1]:
legend.append(column.instances[0])
else:
for column in table.columns[min_val_idx:max_val_idx+1]:
legend.append(column.instances[0])
if stacked is False:
# Single
table.columns[min_val_idx].replace_empty()
values = table.columns[min_val_idx].instances[first_row_index:]
values_type = Column.detect_type(values)
if values_type is 'time':
for i in range(len(values)):
values[i] = to_seconds(values[i])
else:
# Stacked
for i, column in enumerate(table.columns):
table.columns[i].replace_empty()
values = table.get_list_list(min_val_idx)[first_row_index:]
# Detect type
types_list = []
for i in range(len(values)):
types_list.append(Column.detect_type(values[i]))
if all([typo is 'float' or typo is 'int' for typo in types_list]):
values_type = 'float'
for i in range(len(values)):
for j in range(len(values[i])):
values[i][j] = float(values[i][j])
else:
values_type = 'string'
elif os.path.isfile(file_path):
values_type = 'binary'
if values_type == 'string':
labels = ['Provided values are inappriopriate.']
values = [0]
elif values_type is 'binary':
labels = ['Only .csv or .txt formats are allowed.',
'Inappropriate file type.']
values = [0, 0]
elif len(values) != len(labels):
labels = ['Lenght of values list is not equal lenght of names list']
values = [0]
elif not os.path.isfile(file_path):
labels = ['File not found.']
values = [0]
return labels, values, legend, values_type
class Column:
name = None
header = None
content_type = None
type_without_headers = None
instances = None
treshold_type = None
header_first_row = False
def __init__(self, name, instances, header=None, convert=False, replace_empty=True):
self.name = name
if header == None:
self.header = name
else:
self.header = header
self.instances = instances
if replace_empty:
self.replace_empty(without_first_row=True)
self.type_without_headers = Column.detect_type(self.instances[1:])
self.content_type = Column.detect_type(self.instances)
if self.type_without_headers != self.content_type:
self.header_first_row = True
if replace_empty:
self.replace_empty()
if convert:
min_range = 0
if self.content_type is self.type_without_headers:
convert_type = self.content_type
elif self.type_without_headers != 'string':
convert_type = self.type_without_headers
min_range = 1
for i in range(min_range, len(self.instances)):
if convert_type is 'int':
self.instances[i] = int(self.instances[i])
elif convert_type is 'float':
self.instances[i] = float(self.instances[i])
def replace_empty(self, without_first_row=False):
'''Replace empty fields with zeros'''
if self.header_first_row or without_first_row:
start = 1
else:
start = 0
for i, instance in enumerate(self.instances[start:]):
if instance is '':
self.instances[i + start] = '0'
@staticmethod
def detect_type(instances):
int_pattern = r'[0-9]+$'
float_pattern = r'[0-9]+\.?[0-9]+$'
datetime_pattern = r'[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}$'
time_pattern = r'[0-9]{1,}:[0-9]{2}:[0-9]{2}$'
column_type = ''
none_counter = 0
int_counter = 0
float_counter = 0
datetime_counter = 0
time_counter = 0
for instance in instances:
if type(instance) is not str:
instance = str(instance)
if instance == None:
none_counter += 1
elif re.match(int_pattern, instance):
int_counter += 1
elif re.match(datetime_pattern, instance):
datetime_counter += 1
elif re.match(time_pattern, instance):
time_counter += 1
elif re.match(float_pattern, instance):
float_counter += 1
else:
pass
lenght = len(instances)
if int_counter + none_counter == lenght:
column_type = 'int'
elif datetime_counter + none_counter == lenght:
column_type = 'datetime'
elif time_counter + none_counter == lenght:
column_type = 'time'
elif float_counter + none_counter + int_counter == lenght:
column_type = 'float'
else:
column_type = 'string'
return column_type
def detect_max_instance_lenght(self):
maximum = 0
for instance in self.instances:
if instance != None and maximum < len(instance):
maximum = len(instance)
elif instance == None and maximum < len(self.header):
maximum = len(self.header)
return maximum
| 20,597 | 1,513 | 69 |
e73b49f0245f75c58acf2aaf368e26b875525419 | 527 | py | Python | tests/test_receipe.py | MrLeeh/beerpy | 1bbec29a39b01a9d8e54c475de29c768dfd27597 | [
"MIT"
] | null | null | null | tests/test_receipe.py | MrLeeh/beerpy | 1bbec29a39b01a9d8e54c475de29c768dfd27597 | [
"MIT"
] | null | null | null | tests/test_receipe.py | MrLeeh/beerpy | 1bbec29a39b01a9d8e54c475de29c768dfd27597 | [
"MIT"
] | null | null | null | import pytest
from beerpy.receipe import hop_quantity, malt_composition, PILSENER_MALT, \
MUNICH_MALT
from beerpy.units.gravity import Gravity
| 26.35 | 75 | 0.650854 | import pytest
from beerpy.receipe import hop_quantity, malt_composition, PILSENER_MALT, \
MUNICH_MALT
from beerpy.units.gravity import Gravity
def test_malt_composition():
res = malt_composition(
22, Gravity(14), [(PILSENER_MALT, 0.8), (MUNICH_MALT, 0.2)]
)
assert res[0] == 5.44
assert res[1][0] == ('Pilsener Malz', 4.35)
assert res[1][1] == ('Münchener Malz', 1.09)
def test_hop_quantity():
res = hop_quantity(40, 5.5, 22, 60, Gravity(20))
assert "{:.2f}".format(res) == "82.84"
| 332 | 0 | 46 |
fa62c70a7a9c4ffda7faccbae1d471d4594d9dce | 22,791 | py | Python | tools/visual_utils/visualize_utils.py | ElectronicElephant/OpenPCDet-PX-fork | 7f5f792c38d1052c550e34bd7acc085bc4bebc4f | [
"Apache-2.0"
] | 1 | 2021-07-12T07:07:40.000Z | 2021-07-12T07:07:40.000Z | tools/visual_utils/visualize_utils.py | ElectronicElephant/OpenPCDet-PX-fork | 7f5f792c38d1052c550e34bd7acc085bc4bebc4f | [
"Apache-2.0"
] | null | null | null | tools/visual_utils/visualize_utils.py | ElectronicElephant/OpenPCDet-PX-fork | 7f5f792c38d1052c550e34bd7acc085bc4bebc4f | [
"Apache-2.0"
] | 1 | 2021-06-30T08:54:25.000Z | 2021-06-30T08:54:25.000Z | import mayavi.mlab as mlab
import numpy as np
import torch
import cv2
import matplotlib.pyplot as plt
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
label_dict = {0:"background", 1:"Car", 2:"Ped", 3:"Cycler", 4:"Truck"}
color_dict = {"0":[255, 255, 255], "1":[255, 255, 0], "2":[51, 153, 255], "3":[255, 0, 255], "4":[0, 0, 0]}
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
"""
:param corners3d: (N, 8, 3)
:param fig:
:param color:
:param line_width:
:param cls:
:param tag:
:param max_num:
:return:
"""
import mayavi.mlab as mlab
num = min(max_num, len(corners3d))
for n in range(num):
b = corners3d[n] # (8, 3)
if cls is not None:
if isinstance(cls, np.ndarray):
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
else:
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
for k in range(0, 4):
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 0, 5
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 1, 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
return fig
# def draw_on_image(image, calib, points=None, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
# if points is not None:
# if not isinstance(points, np.ndarray):
# points = points.cpu().numpy()
# if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
# ref_boxes = ref_boxes.cpu().numpy()
# if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
# gt_boxes = gt_boxes.cpu().numpy()
# if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
# ref_scores = ref_scores.cpu().numpy()
# if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
# ref_labels = ref_labels.cpu().numpy()
# if gt_boxes is not None:
# corners3d = boxes_to_corners_3d(gt_boxes)
# for i in range(len(corners3d)):
# image = draw_one_frame_on_img(image, corners3d[i], color_dict, label_dict, 0, calib, pretty=True)
# if ref_boxes is not None and len(ref_boxes) > 0:
# ref_corners3d = boxes_to_corners_3d(ref_boxes)
# for i in range(len(ref_corners3d)):
# if ref_labels is None:
# label = 0
# else:
# label = ref_labels[i]
# image = draw_one_frame_on_img(image, ref_corners3d[i], color_dict, label_dict, label, calib, pretty=True)
# return image
# def draw_one_frame_on_img(img_draw, corners3d, color_dict, label_dict, label, calib, pretty = True):
# box_list = [(0, 1), (0, 2), (0, 4), (1, 5), (1, 3), (2, 6), (2, 3), (3, 7), (4, 6), (4, 5), (5, 7), (6, 7)]
# rect_min_w_h = [50, 20]
# if not str(label) in color_dict.keys():
# obj_color = (255, 255, 255)
# else:
# obj_color = color_dict[str(label)]
# plane_cor, plane_obj_cen = project_3D_to_image(corners3d, calib)
# print(plane_cor)
# if pretty:
# img_draw = draw_color_on_contour_roi(img_draw, plane_cor, obj_color)
# for point_pair in box_list:
# cv2.line(img_draw, (int(plane_cor[0][point_pair[0]]), int(plane_cor[1][point_pair[0]])),
# (int(plane_cor[0][point_pair[1]]), int(plane_cor[1][point_pair[1]])), obj_color, 1)
# for index in range(8):
# cv2.circle(img_draw, (int(plane_cor[0][index]), int(plane_cor[1][index])), 2, (0, 255, 255), -1)
# cv2.circle(img_draw, (int(plane_obj_cen[0]), int(plane_obj_cen[1])), 3, (255, 0, 255), -1)
# if not label_dict == None:
# left_corner_cor = []
# image_label = img_draw.copy()
# for index in range(8):
# left_corner_cor.append([plane_cor[0][index], plane_cor[1][index]])
# left_corner_cor.sort(key=lambda x: x[0])
# left_corner_cor = left_corner_cor[0:2]
# left_corner_cor.sort(key=lambda x: x[1])
# left_corner_cor = left_corner_cor[0]
# rect_left_top = (int(left_corner_cor[0]), int(left_corner_cor[1] - rect_min_w_h[1]))
# rect_right_down = (int(left_corner_cor[0] + rect_min_w_h[0]), int(left_corner_cor[1]))
# cv2.rectangle(image_label, rect_left_top, rect_right_down, (102, 178, 255), -1)
# if label in label_dict:
# text = label_dict[label]
# else:
# text = "None"
# cv2.putText(image_label, text, (int(left_corner_cor[0]), int(left_corner_cor[1]) - 5), cv2.FONT_HERSHEY_DUPLEX,
# 0.5, (0, 0, 0), 1, cv2.LINE_AA)
# img_draw = cv2.addWeighted(img_draw, 0.4, image_label, 0.6, 0)
# return img_draw
# def draw_one_bv_frame(corners3d, color_dict):
# if plt.gcf().number > 1:
# plt.close('all')
# if plt.gcf().number < 1:
# plt.figure()
# plt.ion()
# fig = plt.gcf()
# ax = plt.gca()
# fig.set_size_inches(5, 12.5)
# # point_list = [(1, 3), (3, 7), (7, 5), (5, 1)]
# point_list = [(3, 7), (7, 6), (6, 2), (2, 3)]
# plt.cla()
#
# for cat in dets:
# for i in range(len(dets[cat])):
# if dets[cat][i, -1] > center_thresh:
# dim_ = dets[cat][i, 5:8]
# loc_ = dets[cat][i, 8:11]
# rot_y = dets[cat][i, 11]
# loc = np.array([loc_[0], loc_[1] - dim_[0] / 2, loc_[2]])
# dim = np.array([dim_[2], dim_[0], dim_[1]])
# if not str(cat) in obj_color_dict.keys():
# obj_color = (1, 1, 1)
# else:
# obj_color = (obj_color_dict[str(cat)][2] / 255, obj_color_dict[str(cat)][1] / 255,
# obj_color_dict[str(cat)][0] / 255)
# corner_point = self.generate_obj_cam_cor(np.array(loc), np.array(dim), np.array([rot_y, 0, 0]))
# for point_ in point_list:
# ax.plot((corner_point[0][point_[0]], corner_point[0][point_[1]]),
# (corner_point[2][point_[0]], corner_point[2][point_[1]]), color=obj_color)
# ax.axis(xmin=-25, xmax=25)
# ax.axis(ymin=0, ymax=100)
# ax.grid()
# plt.title("bird view")
# plt.xlabel("horizontal distance/m")
# plt.ylabel("vertical distance/m")
# fig.canvas.draw()
# img_from_mat = cv2.cvtColor(np.asarray(fig.canvas.buffer_rgba()), cv2.COLOR_RGBA2BGR)
# img_from_mat = cv2.resize(img_from_mat, (400, 1000))
#
# return img_from_mat
# def project_3D_to_image(corner_point, calib):
# calib_ = np.array(calib)[:, 0:3]
# center_3d = corner_point[0]
# for i in range(1, 4):
# center_3d += corner_point[i]
# center_3d = center_3d/4
# center_3d = center_3d[np.newaxis,...]
# corner_point = np.append(center_3d, corner_point, axis=0)
# corner_point = corner_point.T
# tmp = corner_point[0,...]
# corner_point[0, ...] = corner_point[2,...]
# corner_point[2, ...] = tmp
# tmp = corner_point[0,...]
# corner_point[0, ...] = corner_point[1,...]
# corner_point[1, ...] = tmp
# for i in range(9):
# if corner_point[2, i] < 0:
# corner_point[0, i] = -corner_point[0, i]
# corner_point[1, i] = -corner_point[1, i]
# corner_point = np.matmul(calib_, corner_point)
# plane_cor = corner_point[0:2, :]
# for i in range(9):
# plane_cor[0][i] = corner_point[0][i] / corner_point[2][i]
# plane_cor[1][i] = corner_point[1][i] / corner_point[2][i]
# plane_cen = plane_cor[:, 0]
# plane_cor = plane_cor[:, 1:]
# #
# return plane_cor, plane_cen
# def draw_color_on_contour_roi(self, image, plane_cor, color):
# plane_list = [[2, 3, 7, 6], [0, 1, 3, 2], [4, 5, 7, 6], [0, 1, 5, 4], [0, 2, 6, 4], [1, 3, 7 ,5]]
# contour = []
# image_draw_contour = image.copy()
# for plane in plane_list:
# contour_ = []
# for index in plane:
# contour_.append([plane_cor[0][index], plane_cor[1][index]])
# contour.append(np.array(contour_).reshape((-1,1,2)).astype(np.int32))
# for contour_ in contour:
# cv2.drawContours(image_draw_contour, [contour_], -1, color, thickness = -1)
# image = cv2.addWeighted(image, 0.75, image_draw_contour, 0.25, 0)
# return image
# def generate_obj_cam_cor(self, position, size, ZYX):
# pitch = ZYX[0]
# roll = ZYX[1]
# yaw = ZYX[2]
# R_roll = np.array([[1 , 0 , 0 ],
# [0 , np.cos(roll), -np.sin(roll)],
# [0 , np.sin(roll), np.cos(roll)]])
# R_pitch = np.array([[np.cos(pitch) , 0 , np.sin(pitch)],
# [0 , 1 , 0 ],
# [-np.sin(pitch), 0 , np.cos(pitch)]])
# R_yaw = np.array([[np.cos(yaw) , -np.sin(yaw), 0 ],
# [np.sin(yaw) , np.cos(yaw) , 0 ],
# [0 , 0 , 1 ]])
# R = np.matmul(R_pitch, R_roll)
# R = np.matmul(R_yaw, R)
# size = 0.5 * size
# arithm_list = []
# for i in ['+','-']:
# for j in ['+', '-']:
# for k in ['+', '-']:
# arithm_list.append(i+j+k)
# corner_point = np.array([0, 0, 0])
# for arithm in arithm_list:
# point = np.array([eval(str(0) + arithm[0] + str(size[0])),eval(str(0) + arithm[1] + str(size[1])),eval(str(0) + arithm[2] + str(size[2]))])
# corner_point = np.vstack((corner_point, point))
# corner_point = corner_point.T
# corner_point = np.matmul(R, corner_point)
# for i in range(9):
# corner_point[:,i] = corner_point[:,i] + position
# return corner_point
| 43.66092 | 184 | 0.555263 | import mayavi.mlab as mlab
import numpy as np
import torch
import cv2
import matplotlib.pyplot as plt
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
label_dict = {0:"background", 1:"Car", 2:"Ped", 3:"Cycler", 4:"Truck"}
color_dict = {"0":[255, 255, 255], "1":[255, 255, 0], "2":[51, 153, 255], "3":[255, 0, 255], "4":[0, 0, 0]}
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),
show_intensity=False, size=(600, 600), draw_origin=True):
if pts is None:
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)
return fig
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)
if show_intensity:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
else:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
if draw_origin:
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)
return fig
def draw_sphere_pts(pts, color=(0, 1, 0), fig=None, bgcolor=(0, 0, 0), scale_factor=0.2):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(600, 600))
if isinstance(color, np.ndarray) and color.shape[0] == 1:
color = color[0]
color = (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
if isinstance(color, np.ndarray):
pts_color = np.zeros((pts.__len__(), 4), dtype=np.uint8)
pts_color[:, 0:3] = color
pts_color[:, 3] = 255
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], np.arange(0, pts_color.__len__()), mode='sphere',
scale_factor=scale_factor, figure=fig)
G.glyph.color_mode = 'color_by_scalar'
G.glyph.scale_mode = 'scale_by_vector'
G.module_manager.scalar_lut_manager.lut.table = pts_color
else:
mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='sphere', color=color,
colormap='gnuplot', scale_factor=scale_factor, figure=fig)
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), line_width=3, tube_radius=None, figure=fig)
return fig
def draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)):
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
return fig
def draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)):
for x in range(bv_range[0], bv_range[2], grid_size):
for y in range(bv_range[1], bv_range[3], grid_size):
fig = draw_grid(x, y, x + grid_size, y + grid_size, fig)
return fig
def draw_scenes(points=None, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
if points is not None:
if not isinstance(points, np.ndarray):
points = points.cpu().numpy()
if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
ref_boxes = ref_boxes.cpu().numpy()
if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
gt_boxes = gt_boxes.cpu().numpy()
if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
ref_scores = ref_scores.cpu().numpy()
if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
ref_labels = ref_labels.cpu().numpy()
fig = visualize_pts(points)
fig = draw_multi_grid_range(fig, bv_range=(0, -40, 80, 40))
if gt_boxes is not None:
corners3d = boxes_to_corners_3d(gt_boxes)
fig = draw_corners3d(corners3d, fig=fig, color=(0, 0, 1), max_num=100)
if ref_boxes is not None and len(ref_boxes) > 0:
ref_corners3d = boxes_to_corners_3d(ref_boxes)
if ref_labels is None:
fig = draw_corners3d(ref_corners3d, fig=fig, color=(0, 1, 0), cls=ref_scores, max_num=100)
else:
for k in range(ref_labels.min(), ref_labels.max() + 1):
cur_color = tuple(box_colormap[k % len(box_colormap)])
mask = (ref_labels == k)
fig = draw_corners3d(ref_corners3d[mask], fig=fig, color=cur_color, cls=ref_scores[mask], max_num=5)
mlab.view(azimuth=-179, elevation=54.0, distance=104.0, roll=90.0)
return fig
def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
"""
:param corners3d: (N, 8, 3)
:param fig:
:param color:
:param line_width:
:param cls:
:param tag:
:param max_num:
:return:
"""
import mayavi.mlab as mlab
num = min(max_num, len(corners3d))
for n in range(num):
b = corners3d[n] # (8, 3)
if cls is not None:
if isinstance(cls, np.ndarray):
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
else:
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
for k in range(0, 4):
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 0, 5
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 1, 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
return fig
# def draw_on_image(image, calib, points=None, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
# if points is not None:
# if not isinstance(points, np.ndarray):
# points = points.cpu().numpy()
# if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
# ref_boxes = ref_boxes.cpu().numpy()
# if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
# gt_boxes = gt_boxes.cpu().numpy()
# if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
# ref_scores = ref_scores.cpu().numpy()
# if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
# ref_labels = ref_labels.cpu().numpy()
# if gt_boxes is not None:
# corners3d = boxes_to_corners_3d(gt_boxes)
# for i in range(len(corners3d)):
# image = draw_one_frame_on_img(image, corners3d[i], color_dict, label_dict, 0, calib, pretty=True)
# if ref_boxes is not None and len(ref_boxes) > 0:
# ref_corners3d = boxes_to_corners_3d(ref_boxes)
# for i in range(len(ref_corners3d)):
# if ref_labels is None:
# label = 0
# else:
# label = ref_labels[i]
# image = draw_one_frame_on_img(image, ref_corners3d[i], color_dict, label_dict, label, calib, pretty=True)
# return image
# def draw_one_frame_on_img(img_draw, corners3d, color_dict, label_dict, label, calib, pretty = True):
# box_list = [(0, 1), (0, 2), (0, 4), (1, 5), (1, 3), (2, 6), (2, 3), (3, 7), (4, 6), (4, 5), (5, 7), (6, 7)]
# rect_min_w_h = [50, 20]
# if not str(label) in color_dict.keys():
# obj_color = (255, 255, 255)
# else:
# obj_color = color_dict[str(label)]
# plane_cor, plane_obj_cen = project_3D_to_image(corners3d, calib)
# print(plane_cor)
# if pretty:
# img_draw = draw_color_on_contour_roi(img_draw, plane_cor, obj_color)
# for point_pair in box_list:
# cv2.line(img_draw, (int(plane_cor[0][point_pair[0]]), int(plane_cor[1][point_pair[0]])),
# (int(plane_cor[0][point_pair[1]]), int(plane_cor[1][point_pair[1]])), obj_color, 1)
# for index in range(8):
# cv2.circle(img_draw, (int(plane_cor[0][index]), int(plane_cor[1][index])), 2, (0, 255, 255), -1)
# cv2.circle(img_draw, (int(plane_obj_cen[0]), int(plane_obj_cen[1])), 3, (255, 0, 255), -1)
# if not label_dict == None:
# left_corner_cor = []
# image_label = img_draw.copy()
# for index in range(8):
# left_corner_cor.append([plane_cor[0][index], plane_cor[1][index]])
# left_corner_cor.sort(key=lambda x: x[0])
# left_corner_cor = left_corner_cor[0:2]
# left_corner_cor.sort(key=lambda x: x[1])
# left_corner_cor = left_corner_cor[0]
# rect_left_top = (int(left_corner_cor[0]), int(left_corner_cor[1] - rect_min_w_h[1]))
# rect_right_down = (int(left_corner_cor[0] + rect_min_w_h[0]), int(left_corner_cor[1]))
# cv2.rectangle(image_label, rect_left_top, rect_right_down, (102, 178, 255), -1)
# if label in label_dict:
# text = label_dict[label]
# else:
# text = "None"
# cv2.putText(image_label, text, (int(left_corner_cor[0]), int(left_corner_cor[1]) - 5), cv2.FONT_HERSHEY_DUPLEX,
# 0.5, (0, 0, 0), 1, cv2.LINE_AA)
# img_draw = cv2.addWeighted(img_draw, 0.4, image_label, 0.6, 0)
# return img_draw
# def draw_one_bv_frame(corners3d, color_dict):
# if plt.gcf().number > 1:
# plt.close('all')
# if plt.gcf().number < 1:
# plt.figure()
# plt.ion()
# fig = plt.gcf()
# ax = plt.gca()
# fig.set_size_inches(5, 12.5)
# # point_list = [(1, 3), (3, 7), (7, 5), (5, 1)]
# point_list = [(3, 7), (7, 6), (6, 2), (2, 3)]
# plt.cla()
#
# for cat in dets:
# for i in range(len(dets[cat])):
# if dets[cat][i, -1] > center_thresh:
# dim_ = dets[cat][i, 5:8]
# loc_ = dets[cat][i, 8:11]
# rot_y = dets[cat][i, 11]
# loc = np.array([loc_[0], loc_[1] - dim_[0] / 2, loc_[2]])
# dim = np.array([dim_[2], dim_[0], dim_[1]])
# if not str(cat) in obj_color_dict.keys():
# obj_color = (1, 1, 1)
# else:
# obj_color = (obj_color_dict[str(cat)][2] / 255, obj_color_dict[str(cat)][1] / 255,
# obj_color_dict[str(cat)][0] / 255)
# corner_point = self.generate_obj_cam_cor(np.array(loc), np.array(dim), np.array([rot_y, 0, 0]))
# for point_ in point_list:
# ax.plot((corner_point[0][point_[0]], corner_point[0][point_[1]]),
# (corner_point[2][point_[0]], corner_point[2][point_[1]]), color=obj_color)
# ax.axis(xmin=-25, xmax=25)
# ax.axis(ymin=0, ymax=100)
# ax.grid()
# plt.title("bird view")
# plt.xlabel("horizontal distance/m")
# plt.ylabel("vertical distance/m")
# fig.canvas.draw()
# img_from_mat = cv2.cvtColor(np.asarray(fig.canvas.buffer_rgba()), cv2.COLOR_RGBA2BGR)
# img_from_mat = cv2.resize(img_from_mat, (400, 1000))
#
# return img_from_mat
# def project_3D_to_image(corner_point, calib):
# calib_ = np.array(calib)[:, 0:3]
# center_3d = corner_point[0]
# for i in range(1, 4):
# center_3d += corner_point[i]
# center_3d = center_3d/4
# center_3d = center_3d[np.newaxis,...]
# corner_point = np.append(center_3d, corner_point, axis=0)
# corner_point = corner_point.T
# tmp = corner_point[0,...]
# corner_point[0, ...] = corner_point[2,...]
# corner_point[2, ...] = tmp
# tmp = corner_point[0,...]
# corner_point[0, ...] = corner_point[1,...]
# corner_point[1, ...] = tmp
# for i in range(9):
# if corner_point[2, i] < 0:
# corner_point[0, i] = -corner_point[0, i]
# corner_point[1, i] = -corner_point[1, i]
# corner_point = np.matmul(calib_, corner_point)
# plane_cor = corner_point[0:2, :]
# for i in range(9):
# plane_cor[0][i] = corner_point[0][i] / corner_point[2][i]
# plane_cor[1][i] = corner_point[1][i] / corner_point[2][i]
# plane_cen = plane_cor[:, 0]
# plane_cor = plane_cor[:, 1:]
# #
# return plane_cor, plane_cen
def draw_color_on_contour_roi(image, plane_cor, color):
plane_list = [[2, 3, 7, 6], [0, 1, 3, 2], [4, 5, 7, 6], [0, 1, 5, 4], [0, 2, 6, 4], [1, 3, 7 ,5]]
contour = []
image_draw_contour = image.copy()
for plane in plane_list:
contour_ = []
for index in plane:
contour_.append([plane_cor[0][index], plane_cor[1][index]])
contour.append(np.array(contour_).reshape((-1,1,2)).astype(np.int32))
for contour_ in contour:
cv2.drawContours(image_draw_contour, [contour_], -1, color, thickness = -1)
image = cv2.addWeighted(image, 0.75, image_draw_contour, 0.25, 0)
return image
def draw_one_frame_on_img(img_draw, boxes, labels, calib, point_color, center_color, obj_color_dict, pretty = False, label_dict = None):
img_draw = np.ascontiguousarray(img_draw, dtype=np.uint8)
box_list = [(0, 1),(0, 2),(0, 4), (1, 5), (1, 3), (2, 6), (2, 3), (3, 7), (4, 6), (4, 5), (5, 7), (6, 7)]
rect_min_w_h = [50, 20]
objs = list(zip(torch.Tensor.cpu(boxes), labels))
for obj in objs:
if not obj[1] in obj_color_dict.keys():
obj_color = (255, 255, 255)
else:
obj_color = obj_color_dict[obj[1]]
plane_cor, plane_obj_cen = project_3D_to_image(np.array([-obj[0][1], -obj[0][2],obj[0][0]]), np.array([obj[0][4], obj[0][5], obj[0][3]]), np.array([-obj[0][6], 0, 0]), calib)
if pretty:
img_draw = draw_color_on_contour_roi(img_draw, plane_cor, obj_color)
for point_pair in box_list:
cv2.line(img_draw, (int(plane_cor[0][point_pair[0]]), int(plane_cor[1][point_pair[0]])), (int(plane_cor[0][point_pair[1]]), int(plane_cor[1][point_pair[1]])), obj_color, 1)
for index in range(8):
cv2.circle(img_draw, (int(plane_cor[0][index]), int(plane_cor[1][index])), 2, point_color, -1)
cv2.circle(img_draw, (int(plane_obj_cen[0]), int(plane_obj_cen[1])), 3, center_color, -1)
if not label_dict == None:
left_corner_cor = []
image_label = img_draw.copy()
for index in range(8):
left_corner_cor.append([plane_cor[0][index], plane_cor[1][index]])
left_corner_cor.sort(key = lambda x: x[0])
left_corner_cor = left_corner_cor[0:2]
left_corner_cor.sort(key = lambda x:x[1])
left_corner_cor = left_corner_cor[0]
rect_left_top = (int(left_corner_cor[0]), int(left_corner_cor[1] - rect_min_w_h[1]))
rect_right_down = (int(left_corner_cor[0] + rect_min_w_h[0]), int(left_corner_cor[1]))
cv2.rectangle(image_label, rect_left_top, rect_right_down, (102, 178, 255), -1)
if obj[1] in label_dict:
text = label_dict[obj[1]]
else:
text = "None"
cv2.putText(image_label, text, (int(left_corner_cor[0]), int(left_corner_cor[1]) - 5), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
img_draw = cv2.addWeighted(img_draw, 0.4, image_label, 0.6, 0)
return img_draw
# def draw_color_on_contour_roi(self, image, plane_cor, color):
# plane_list = [[2, 3, 7, 6], [0, 1, 3, 2], [4, 5, 7, 6], [0, 1, 5, 4], [0, 2, 6, 4], [1, 3, 7 ,5]]
# contour = []
# image_draw_contour = image.copy()
# for plane in plane_list:
# contour_ = []
# for index in plane:
# contour_.append([plane_cor[0][index], plane_cor[1][index]])
# contour.append(np.array(contour_).reshape((-1,1,2)).astype(np.int32))
# for contour_ in contour:
# cv2.drawContours(image_draw_contour, [contour_], -1, color, thickness = -1)
# image = cv2.addWeighted(image, 0.75, image_draw_contour, 0.25, 0)
# return image
# def generate_obj_cam_cor(self, position, size, ZYX):
# pitch = ZYX[0]
# roll = ZYX[1]
# yaw = ZYX[2]
# R_roll = np.array([[1 , 0 , 0 ],
# [0 , np.cos(roll), -np.sin(roll)],
# [0 , np.sin(roll), np.cos(roll)]])
# R_pitch = np.array([[np.cos(pitch) , 0 , np.sin(pitch)],
# [0 , 1 , 0 ],
# [-np.sin(pitch), 0 , np.cos(pitch)]])
# R_yaw = np.array([[np.cos(yaw) , -np.sin(yaw), 0 ],
# [np.sin(yaw) , np.cos(yaw) , 0 ],
# [0 , 0 , 1 ]])
# R = np.matmul(R_pitch, R_roll)
# R = np.matmul(R_yaw, R)
# size = 0.5 * size
# arithm_list = []
# for i in ['+','-']:
# for j in ['+', '-']:
# for k in ['+', '-']:
# arithm_list.append(i+j+k)
# corner_point = np.array([0, 0, 0])
# for arithm in arithm_list:
# point = np.array([eval(str(0) + arithm[0] + str(size[0])),eval(str(0) + arithm[1] + str(size[1])),eval(str(0) + arithm[2] + str(size[2]))])
# corner_point = np.vstack((corner_point, point))
# corner_point = corner_point.T
# corner_point = np.matmul(R, corner_point)
# for i in range(9):
# corner_point[:,i] = corner_point[:,i] + position
# return corner_point
def generate_obj_cam_cor(position, size, ZYX):
#
pitch = ZYX[0]
roll = ZYX[1]
yaw = ZYX[2]
#
R_roll = np.array([[1 , 0 , 0 ],
[0 , np.cos(roll), -np.sin(roll)],
[0 , np.sin(roll), np.cos(roll)]])
R_pitch = np.array([[np.cos(pitch) , 0 , np.sin(pitch)],
[0 , 1 , 0 ],
[-np.sin(pitch), 0 , np.cos(pitch)]])
R_yaw = np.array([[np.cos(yaw) , -np.sin(yaw), 0 ],
[np.sin(yaw) , np.cos(yaw) , 0 ],
[0 , 0 , 1 ]])
#
R = np.matmul(R_pitch, R_roll)
R = np.matmul(R_yaw, R)
size = 0.5 * size
arithm_list = []
for i in ['+','-']:
for j in ['+', '-']:
for k in ['+', '-']:
arithm_list.append(i+j+k)
corner_point = np.array([0, 0, 0])
for arithm in arithm_list:
point = np.array([eval(str(0) + arithm[0] + str(size[0])),eval(str(0) + arithm[1] + str(size[1])),eval(str(0) + arithm[2] + str(size[2]))])
corner_point = np.vstack((corner_point, point))
corner_point = corner_point.T
corner_point = np.matmul(R, corner_point)
for i in range(9):
corner_point[:,i] = corner_point[:,i] + position
return corner_point
def project_3D_to_image(position, size, ZYX, calib):
calib_ = np.array(calib)
calib_ = calib_[:, 0:3]
corner_point = generate_obj_cam_cor(position, size, ZYX)
for i in range(9):
if corner_point[2, i] < 0:
corner_point[0, i] = -corner_point[0, i]
corner_point[1, i] = -corner_point[1, i]
corner_point = np.matmul(calib_, corner_point)
plane_cor = corner_point[0:2,:]
for i in range(9):
plane_cor[0][i] = corner_point[0][i] / corner_point[2][i]
plane_cor[1][i] = corner_point[1][i] / corner_point[2][i]
plane_cen = plane_cor[:,0]
plane_cor = plane_cor[:,1:]
#
return plane_cor, plane_cen
| 10,167 | 0 | 230 |
4e5009b784d23c0d5ce31e9da785788590332b6f | 422 | py | Python | resourceUtils.py | hanhet/hanhet-co2 | bdeba2388a2179962d14b17121969aea3e4b65f0 | [
"Apache-2.0"
] | null | null | null | resourceUtils.py | hanhet/hanhet-co2 | bdeba2388a2179962d14b17121969aea3e4b65f0 | [
"Apache-2.0"
] | null | null | null | resourceUtils.py | hanhet/hanhet-co2 | bdeba2388a2179962d14b17121969aea3e4b65f0 | [
"Apache-2.0"
] | null | null | null | import os
import time
import psutil
import sys
import atexit
| 16.88 | 33 | 0.718009 | import os
import time
import psutil
import sys
import atexit
def get_cpu_time():
return psutil.cpu_times().idle
def get_total_memory():
mem = psutil.virtual_memory()
return mem.total
def get_free_memory():
mem = psutil.virtual_memory()
return mem.free
def get_total_disk():
disk = psutil.disk_usage('/')
return (disk.total)
def get_free_disk():
disk = psutil.disk_usage('/')
return disk.free
| 242 | 0 | 119 |
2bc44aa34b4bc1b094fb35d6ffe1882200ba961b | 68 | py | Python | lombscargle/__init__.py | jakevdp/nfftls | 01aebd51189a6ed96e44c58cd55b74b8691cbe77 | [
"BSD-3-Clause"
] | 2 | 2017-07-29T13:11:34.000Z | 2019-11-12T01:41:42.000Z | lombscargle/__init__.py | jakevdp/nfftls | 01aebd51189a6ed96e44c58cd55b74b8691cbe77 | [
"BSD-3-Clause"
] | null | null | null | lombscargle/__init__.py | jakevdp/nfftls | 01aebd51189a6ed96e44c58cd55b74b8691cbe77 | [
"BSD-3-Clause"
] | null | null | null | """Fast Lomb-Scargle Periodograms"""
from .core import LombScargle
| 17 | 36 | 0.764706 | """Fast Lomb-Scargle Periodograms"""
from .core import LombScargle
| 0 | 0 | 0 |
22b110f5e6e266f87ccb45082c2eba1d4494b357 | 724 | py | Python | scormxblock/views.py | eol-uchile/edx_xblock_scorm | af72761ad08d5570e85d35d3ed7908618e707dac | [
"Apache-2.0"
] | null | null | null | scormxblock/views.py | eol-uchile/edx_xblock_scorm | af72761ad08d5570e85d35d3ed7908618e707dac | [
"Apache-2.0"
] | null | null | null | scormxblock/views.py | eol-uchile/edx_xblock_scorm | af72761ad08d5570e85d35d3ed7908618e707dac | [
"Apache-2.0"
] | 1 | 2022-01-13T19:22:17.000Z | 2022-01-13T19:22:17.000Z | import os.path
import mimetypes
from django.http import HttpResponse
from .utils import get_scorm_storage
import logging
logger = logging.getLogger(__name__)
def proxy_scorm_media(request, block_id, file, sha1=None):
"""
Render the media objects by proxy, as the files
must be in the same domain as the LMS
"""
guess = mimetypes.guess_type(file)
if guess[0] is None:
content_type = "text/html"
else:
content_type = guess[0]
if sha1:
location = "scorm/{}/{}/{}".format(block_id, sha1, file)
else:
location = "scorm/{}/{}".format(block_id, file)
return HttpResponse(
get_scorm_storage().open(location).read(),
content_type=content_type,
) | 23.354839 | 62 | 0.667127 | import os.path
import mimetypes
from django.http import HttpResponse
from .utils import get_scorm_storage
import logging
logger = logging.getLogger(__name__)
def proxy_scorm_media(request, block_id, file, sha1=None):
"""
Render the media objects by proxy, as the files
must be in the same domain as the LMS
"""
guess = mimetypes.guess_type(file)
if guess[0] is None:
content_type = "text/html"
else:
content_type = guess[0]
if sha1:
location = "scorm/{}/{}/{}".format(block_id, sha1, file)
else:
location = "scorm/{}/{}".format(block_id, file)
return HttpResponse(
get_scorm_storage().open(location).read(),
content_type=content_type,
) | 0 | 0 | 0 |
46a92d70b54fe3bf5b3629c2905e334570a0b744 | 2,367 | py | Python | data_quality_problem_set2_area.py | mugofjoe/dand-data-wrangling | c415219cc8178a59494599f2f84a3692bdc7d87f | [
"MIT"
] | null | null | null | data_quality_problem_set2_area.py | mugofjoe/dand-data-wrangling | c415219cc8178a59494599f2f84a3692bdc7d87f | [
"MIT"
] | null | null | null | data_quality_problem_set2_area.py | mugofjoe/dand-data-wrangling | c415219cc8178a59494599f2f84a3692bdc7d87f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
TOPIC: Decide what to do with a field that has multiple items in it.
In this problem set you work with cities infobox data, audit it, come up with a
cleaning idea and then clean it up.
Since in the previous quiz you made a decision on which value to keep for the
"areaLand" field, you now know what has to be done.
Finish the function fix_area(). It will receive a string as an input, and it
has to return a float representing the value of the area or None.
You have to change the function fix_area. You can use extra functions if you
like, but changes to process_file will not be taken into account.
The rest of the code is just an example on how this function can be used.
"""
import codecs
import csv
import json
import pprint
CITIES = 'cities.csv'
if __name__ == "__main__":
test() | 28.178571 | 80 | 0.613012 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
TOPIC: Decide what to do with a field that has multiple items in it.
In this problem set you work with cities infobox data, audit it, come up with a
cleaning idea and then clean it up.
Since in the previous quiz you made a decision on which value to keep for the
"areaLand" field, you now know what has to be done.
Finish the function fix_area(). It will receive a string as an input, and it
has to return a float representing the value of the area or None.
You have to change the function fix_area. You can use extra functions if you
like, but changes to process_file will not be taken into account.
The rest of the code is just an example on how this function can be used.
"""
import codecs
import csv
import json
import pprint
CITIES = 'cities.csv'
def fix_area(area):
# YOUR CODE HERE
if area.startswith("{"):
area_items = area.replace("{", "").replace("}", "").split("|")
significant_area_item = ""
for area_item in area_items:
if len(area_item) > len(significant_area_item):
significant_area_item = area_item
if significant_area_item == "":
area = None
else:
area = float(significant_area_item)
elif area == "NULL" or "":
area = None
else:
area = float(area)
return area
def process_file(filename):
# CHANGES TO THIS FUNCTION WILL BE IGNORED WHEN YOU SUBMIT THE EXERCISE
data = []
with open(filename, "r") as f:
reader = csv.DictReader(f)
#skipping the extra metadata
for i in range(3):
l = reader.next()
# processing file
for line in reader:
# calling your function to fix the area value
if "areaLand" in line:
line["areaLand"] = fix_area(line["areaLand"])
data.append(line)
return data
def test():
data = process_file(CITIES)
print "Printing three example results:"
for n in range(5, 8):
pprint.pprint(data[n]["areaLand"])
assert data[3]["areaLand"] == None
assert data[8]["areaLand"] == 55166700.0
assert data[20]["areaLand"] == 14581600.0
assert data[33]["areaLand"] == 20564500.0
if __name__ == "__main__":
test() | 1,415 | 0 | 75 |
6028501e1702c0232cceb1c713386a86d37a048d | 754 | py | Python | constrainmol/examples/parmed_box/example_parmed_box.py | rsdefever/constrainmol | 0baa4c25abf2d01ad4c3078a541b4fd943514121 | [
"BSD-3-Clause"
] | 7 | 2020-12-15T05:08:36.000Z | 2022-01-03T02:20:25.000Z | constrainmol/examples/parmed_box/example_parmed_box.py | rsdefever/constrainmol | 0baa4c25abf2d01ad4c3078a541b4fd943514121 | [
"BSD-3-Clause"
] | 5 | 2020-11-06T21:17:51.000Z | 2020-11-18T22:44:18.000Z | constrainmol/examples/parmed_box/example_parmed_box.py | rsdefever/constrainmol | 0baa4c25abf2d01ad4c3078a541b4fd943514121 | [
"BSD-3-Clause"
] | 1 | 2020-12-15T05:08:44.000Z | 2020-12-15T05:08:44.000Z | import parmed
import numpy as np
from constrainmol import ConstrainedMolecule
# Load from gro/topology
system = parmed.load_file("system.top", xyz="system.gro")
system.save("unconstrained.pdb", overwrite=True)
constrained_coordinates = np.zeros(system.coordinates.shape)
unique_res = system.split()
for (res, resids) in unique_res:
constrain_mol = ConstrainedMolecule(res)
for resid in resids:
constrain_mol.update_xyz(system[resid, :].coordinates)
constrain_mol.solve()
atom_ids = [atom.idx for atom in system.residues[resid].atoms]
constrained_coordinates[atom_ids] = constrain_mol.structure.coordinates
system.coordinates = constrained_coordinates
system.save("constrained.pdb", overwrite=True)
| 30.16 | 79 | 0.753316 | import parmed
import numpy as np
from constrainmol import ConstrainedMolecule
# Load from gro/topology
system = parmed.load_file("system.top", xyz="system.gro")
system.save("unconstrained.pdb", overwrite=True)
constrained_coordinates = np.zeros(system.coordinates.shape)
unique_res = system.split()
for (res, resids) in unique_res:
constrain_mol = ConstrainedMolecule(res)
for resid in resids:
constrain_mol.update_xyz(system[resid, :].coordinates)
constrain_mol.solve()
atom_ids = [atom.idx for atom in system.residues[resid].atoms]
constrained_coordinates[atom_ids] = constrain_mol.structure.coordinates
system.coordinates = constrained_coordinates
system.save("constrained.pdb", overwrite=True)
| 0 | 0 | 0 |
8e6067ab50fa68db3a45a2d3b5be05697b3dac59 | 700 | py | Python | edzapp/items.py | jamesadney/edzapp-scraper | 03c55a6db342dc75a1299d9ea450dfb4b460392f | [
"BSD-3-Clause"
] | 1 | 2015-11-26T13:54:59.000Z | 2015-11-26T13:54:59.000Z | edzapp/items.py | jamesadney/edzapp-scraper | 03c55a6db342dc75a1299d9ea450dfb4b460392f | [
"BSD-3-Clause"
] | null | null | null | edzapp/items.py | jamesadney/edzapp-scraper | 03c55a6db342dc75a1299d9ea450dfb4b460392f | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import Join, MapCompose, Identity
from scrapy.contrib_exp.djangoitem import DjangoItem
from django_edzapp.jobs.models import Job
| 24.137931 | 70 | 0.747143 | from datetime import datetime
from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import Join, MapCompose, Identity
from scrapy.contrib_exp.djangoitem import DjangoItem
from django_edzapp.jobs.models import Job
def to_datetime(string):
try:
datetime_object = datetime.strptime(string, '%m/%d/%Y')
out = unicode(datetime_object.date())
except ValueError:
out = None
return out
class JobItemLoader(ItemLoader):
default_input_processor = Identity()
default_output_processor = Join()
deadline_in = MapCompose(to_datetime)
date_posted_in = MapCompose(to_datetime)
class JobItem(DjangoItem):
django_model = Job
| 180 | 206 | 69 |
f04216eba5a676ae04e6688a7532804acc165343 | 88 | py | Python | comments/tests/__init__.py | RichardHirtle/c4all | a09c4b098cf1a58ed5e3ab6116a749a17ec035e0 | [
"MIT"
] | 4 | 2016-09-03T12:43:13.000Z | 2020-04-22T14:49:28.000Z | comments/tests/__init__.py | RichardHirtle/c4all | a09c4b098cf1a58ed5e3ab6116a749a17ec035e0 | [
"MIT"
] | 1 | 2019-09-25T12:49:01.000Z | 2020-08-04T11:33:09.000Z | comments/tests/__init__.py | RichardHirtle/c4all | a09c4b098cf1a58ed5e3ab6116a749a17ec035e0 | [
"MIT"
] | 3 | 2015-03-17T13:38:42.000Z | 2016-05-06T15:06:31.000Z | from comment import *
from thread import *
from site import *
from custom_user import *
| 17.6 | 25 | 0.772727 | from comment import *
from thread import *
from site import *
from custom_user import *
| 0 | 0 | 0 |
1dbcfb56385522b7d409b3f30b741f3688d38d67 | 3,392 | py | Python | regexlib/python_re_test_file/regexlib_1091.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | 1 | 2022-01-24T14:43:23.000Z | 2022-01-24T14:43:23.000Z | regexlib/python_re_test_file/regexlib_1091.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | regexlib/python_re_test_file/regexlib_1091.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | # 1091
# (?:(?:w{3}\.)(?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])+[\.com|\.edu|\.gov|\.int|\.mil|\.net|\.org|\.biz|\.info|\.name|\.pro|\.aero|\.coop|\.museum|\.cat|\.jobs|\.travel|\.arpa|\.mobi|\.ac|\.ad|\.ae|\.af|\.ag|\.ai|\.al|\.am|\.an|\.ao|\.aq|\.ar|\.as|\.at|\.au|\.aw|\.az|\.ax|\.ba|\.bb|\.bd|\.be|\.bf|\.bg|\.bh|\.bi|\.bj|\.bm|\.bn|\.bo|\.br|\.bs|\.bt|\.bv|\.bw|\.by|\.bz|\.ca|\.cc|\.cd|\.cf|\.cg|\.ch|\.ci|\.ck|\.cl|\.cm|\.cn|\.co|\.cr|\.cs|\.cu|\.cv|\.cx|\.cy|\.cz|\.de|\.dj|\.dk|\.dm|\.do|\.dz|\.ec|\.ee|\.eg|\.eh|\.er|\.es|\.et|\.eu|\.fi|\.fj|\.fk|\.fm|\.fo|\.fr|\.ga|\.gb|\.gd|\.ge|\.gf|\.gg|\.gh|\.gi|\.gl|\.gm|\.gn|\.gp|\.gq|\.gr|\.gs|\.gt|\.gu|\.gw|\.gy|\.hk|\.hm|\.hn|\.hr|\.ht|\.hu|\.id|\.ie|\.il|\.im|\.in|\.io|\.iq|\.ir|\.is|\.it|\.je|\.jm|\.jo|\.jp|\.ke|\.kg|\.kh|\.ki|\.km|\.kn|\.kp|\.kr|\.kw|\.ky|\.kz|\.la|\.lb|\.lc|\.li|\.lk|\.lr|\.ls|\.lt|\.lu|\.lv|\.ly|\.ma|\.mc|\.md|\.mg|\.mh|\.mk|\.ml|\.mm|\.mn|\.mo|\.mp|\.mq|\.mr|\.ms|\.mt|\.mu|\.mv|\.mw|\.mx|\.my|\.mz|\.na|\.nc|\.ne|\.nf|\.ng|\.ni|\.nl|\.no|\.np|\.nr|\.nu|\.nz|\.om|\.pa|\.pe|\.pf|\.pg|\.ph|\.pk|\.pl|\.pm|\.pn|\.pr|\.ps|\.pt|\.pw|\.py|\.qa|\.re|\.ro|\.ru|\.rw|\.sa|\.sb|\.sc|\.sd|\.se|\.sg|\.sh|\..si|\.sj|\.sk|\.sl|\.sm|\.sn|\.so|\.sr|\.st|\.sv|\.sy|\.sz|\.tc|\.td|\.tf|\.tg|\.th|\.tj|\.tk|\.tl|\.tm|\.tn|\.to|\.tp|\.tr|\.tt|\.tv|\.tw|\.tz|\.ua|\.ug|\.uk|\.um|\.us|\.uy|\.uz|\.va|\.vc|\.ve|\.vg|\.vi|\.vn|\.vu|\.wf|\.ws|\.ye|\.yt|\.yu|\.za|\.zm|\.zw](?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])*)
# EXPONENT
# nums:4
# EXPONENT AttackString:""+"www."*1024+"@1 _SLQ_2"
import re
from time import perf_counter
regex = """(?:(?:w{3}\.)(?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])+[\.com|\.edu|\.gov|\.int|\.mil|\.net|\.org|\.biz|\.info|\.name|\.pro|\.aero|\.coop|\.museum|\.cat|\.jobs|\.travel|\.arpa|\.mobi|\.ac|\.ad|\.ae|\.af|\.ag|\.ai|\.al|\.am|\.an|\.ao|\.aq|\.ar|\.as|\.at|\.au|\.aw|\.az|\.ax|\.ba|\.bb|\.bd|\.be|\.bf|\.bg|\.bh|\.bi|\.bj|\.bm|\.bn|\.bo|\.br|\.bs|\.bt|\.bv|\.bw|\.by|\.bz|\.ca|\.cc|\.cd|\.cf|\.cg|\.ch|\.ci|\.ck|\.cl|\.cm|\.cn|\.co|\.cr|\.cs|\.cu|\.cv|\.cx|\.cy|\.cz|\.de|\.dj|\.dk|\.dm|\.do|\.dz|\.ec|\.ee|\.eg|\.eh|\.er|\.es|\.et|\.eu|\.fi|\.fj|\.fk|\.fm|\.fo|\.fr|\.ga|\.gb|\.gd|\.ge|\.gf|\.gg|\.gh|\.gi|\.gl|\.gm|\.gn|\.gp|\.gq|\.gr|\.gs|\.gt|\.gu|\.gw|\.gy|\.hk|\.hm|\.hn|\.hr|\.ht|\.hu|\.id|\.ie|\.il|\.im|\.in|\.io|\.iq|\.ir|\.is|\.it|\.je|\.jm|\.jo|\.jp|\.ke|\.kg|\.kh|\.ki|\.km|\.kn|\.kp|\.kr|\.kw|\.ky|\.kz|\.la|\.lb|\.lc|\.li|\.lk|\.lr|\.ls|\.lt|\.lu|\.lv|\.ly|\.ma|\.mc|\.md|\.mg|\.mh|\.mk|\.ml|\.mm|\.mn|\.mo|\.mp|\.mq|\.mr|\.ms|\.mt|\.mu|\.mv|\.mw|\.mx|\.my|\.mz|\.na|\.nc|\.ne|\.nf|\.ng|\.ni|\.nl|\.no|\.np|\.nr|\.nu|\.nz|\.om|\.pa|\.pe|\.pf|\.pg|\.ph|\.pk|\.pl|\.pm|\.pn|\.pr|\.ps|\.pt|\.pw|\.py|\.qa|\.re|\.ro|\.ru|\.rw|\.sa|\.sb|\.sc|\.sd|\.se|\.sg|\.sh|\..si|\.sj|\.sk|\.sl|\.sm|\.sn|\.so|\.sr|\.st|\.sv|\.sy|\.sz|\.tc|\.td|\.tf|\.tg|\.th|\.tj|\.tk|\.tl|\.tm|\.tn|\.to|\.tp|\.tr|\.tt|\.tv|\.tw|\.tz|\.ua|\.ug|\.uk|\.um|\.us|\.uy|\.uz|\.va|\.vc|\.ve|\.vg|\.vi|\.vn|\.vu|\.wf|\.ws|\.ye|\.yt|\.yu|\.za|\.zm|\.zw](?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])*)"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "www." * i * 1 + "@1 _SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | 178.526316 | 1,495 | 0.420106 | # 1091
# (?:(?:w{3}\.)(?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])+[\.com|\.edu|\.gov|\.int|\.mil|\.net|\.org|\.biz|\.info|\.name|\.pro|\.aero|\.coop|\.museum|\.cat|\.jobs|\.travel|\.arpa|\.mobi|\.ac|\.ad|\.ae|\.af|\.ag|\.ai|\.al|\.am|\.an|\.ao|\.aq|\.ar|\.as|\.at|\.au|\.aw|\.az|\.ax|\.ba|\.bb|\.bd|\.be|\.bf|\.bg|\.bh|\.bi|\.bj|\.bm|\.bn|\.bo|\.br|\.bs|\.bt|\.bv|\.bw|\.by|\.bz|\.ca|\.cc|\.cd|\.cf|\.cg|\.ch|\.ci|\.ck|\.cl|\.cm|\.cn|\.co|\.cr|\.cs|\.cu|\.cv|\.cx|\.cy|\.cz|\.de|\.dj|\.dk|\.dm|\.do|\.dz|\.ec|\.ee|\.eg|\.eh|\.er|\.es|\.et|\.eu|\.fi|\.fj|\.fk|\.fm|\.fo|\.fr|\.ga|\.gb|\.gd|\.ge|\.gf|\.gg|\.gh|\.gi|\.gl|\.gm|\.gn|\.gp|\.gq|\.gr|\.gs|\.gt|\.gu|\.gw|\.gy|\.hk|\.hm|\.hn|\.hr|\.ht|\.hu|\.id|\.ie|\.il|\.im|\.in|\.io|\.iq|\.ir|\.is|\.it|\.je|\.jm|\.jo|\.jp|\.ke|\.kg|\.kh|\.ki|\.km|\.kn|\.kp|\.kr|\.kw|\.ky|\.kz|\.la|\.lb|\.lc|\.li|\.lk|\.lr|\.ls|\.lt|\.lu|\.lv|\.ly|\.ma|\.mc|\.md|\.mg|\.mh|\.mk|\.ml|\.mm|\.mn|\.mo|\.mp|\.mq|\.mr|\.ms|\.mt|\.mu|\.mv|\.mw|\.mx|\.my|\.mz|\.na|\.nc|\.ne|\.nf|\.ng|\.ni|\.nl|\.no|\.np|\.nr|\.nu|\.nz|\.om|\.pa|\.pe|\.pf|\.pg|\.ph|\.pk|\.pl|\.pm|\.pn|\.pr|\.ps|\.pt|\.pw|\.py|\.qa|\.re|\.ro|\.ru|\.rw|\.sa|\.sb|\.sc|\.sd|\.se|\.sg|\.sh|\..si|\.sj|\.sk|\.sl|\.sm|\.sn|\.so|\.sr|\.st|\.sv|\.sy|\.sz|\.tc|\.td|\.tf|\.tg|\.th|\.tj|\.tk|\.tl|\.tm|\.tn|\.to|\.tp|\.tr|\.tt|\.tv|\.tw|\.tz|\.ua|\.ug|\.uk|\.um|\.us|\.uy|\.uz|\.va|\.vc|\.ve|\.vg|\.vi|\.vn|\.vu|\.wf|\.ws|\.ye|\.yt|\.yu|\.za|\.zm|\.zw](?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])*)
# EXPONENT
# nums:4
# EXPONENT AttackString:""+"www."*1024+"@1 _SLQ_2"
import re
from time import perf_counter
regex = """(?:(?:w{3}\.)(?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])+[\.com|\.edu|\.gov|\.int|\.mil|\.net|\.org|\.biz|\.info|\.name|\.pro|\.aero|\.coop|\.museum|\.cat|\.jobs|\.travel|\.arpa|\.mobi|\.ac|\.ad|\.ae|\.af|\.ag|\.ai|\.al|\.am|\.an|\.ao|\.aq|\.ar|\.as|\.at|\.au|\.aw|\.az|\.ax|\.ba|\.bb|\.bd|\.be|\.bf|\.bg|\.bh|\.bi|\.bj|\.bm|\.bn|\.bo|\.br|\.bs|\.bt|\.bv|\.bw|\.by|\.bz|\.ca|\.cc|\.cd|\.cf|\.cg|\.ch|\.ci|\.ck|\.cl|\.cm|\.cn|\.co|\.cr|\.cs|\.cu|\.cv|\.cx|\.cy|\.cz|\.de|\.dj|\.dk|\.dm|\.do|\.dz|\.ec|\.ee|\.eg|\.eh|\.er|\.es|\.et|\.eu|\.fi|\.fj|\.fk|\.fm|\.fo|\.fr|\.ga|\.gb|\.gd|\.ge|\.gf|\.gg|\.gh|\.gi|\.gl|\.gm|\.gn|\.gp|\.gq|\.gr|\.gs|\.gt|\.gu|\.gw|\.gy|\.hk|\.hm|\.hn|\.hr|\.ht|\.hu|\.id|\.ie|\.il|\.im|\.in|\.io|\.iq|\.ir|\.is|\.it|\.je|\.jm|\.jo|\.jp|\.ke|\.kg|\.kh|\.ki|\.km|\.kn|\.kp|\.kr|\.kw|\.ky|\.kz|\.la|\.lb|\.lc|\.li|\.lk|\.lr|\.ls|\.lt|\.lu|\.lv|\.ly|\.ma|\.mc|\.md|\.mg|\.mh|\.mk|\.ml|\.mm|\.mn|\.mo|\.mp|\.mq|\.mr|\.ms|\.mt|\.mu|\.mv|\.mw|\.mx|\.my|\.mz|\.na|\.nc|\.ne|\.nf|\.ng|\.ni|\.nl|\.no|\.np|\.nr|\.nu|\.nz|\.om|\.pa|\.pe|\.pf|\.pg|\.ph|\.pk|\.pl|\.pm|\.pn|\.pr|\.ps|\.pt|\.pw|\.py|\.qa|\.re|\.ro|\.ru|\.rw|\.sa|\.sb|\.sc|\.sd|\.se|\.sg|\.sh|\..si|\.sj|\.sk|\.sl|\.sm|\.sn|\.so|\.sr|\.st|\.sv|\.sy|\.sz|\.tc|\.td|\.tf|\.tg|\.th|\.tj|\.tk|\.tl|\.tm|\.tn|\.to|\.tp|\.tr|\.tt|\.tv|\.tw|\.tz|\.ua|\.ug|\.uk|\.um|\.us|\.uy|\.uz|\.va|\.vc|\.ve|\.vg|\.vi|\.vn|\.vu|\.wf|\.ws|\.ye|\.yt|\.yu|\.za|\.zm|\.zw](?:[a-zA-Z0-9/;\?&=:\-_\$\+!\*'\(\|\\~\[\]#%\.])*)"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "www." * i * 1 + "@1 _SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | 0 | 0 | 0 |
4475be2c905f13e27a18554229e89144691feca4 | 1,294 | py | Python | met/scripts/met-merger.py | DoggySmooth/MET | 91e6e5a9ecd66d60059abbcaf9389c6780ba7ce7 | [
"MIT"
] | null | null | null | met/scripts/met-merger.py | DoggySmooth/MET | 91e6e5a9ecd66d60059abbcaf9389c6780ba7ce7 | [
"MIT"
] | 3 | 2021-03-31T18:53:02.000Z | 2021-12-13T19:49:33.000Z | met/scripts/met-merger.py | DoggySmooth/MET | 91e6e5a9ecd66d60059abbcaf9389c6780ba7ce7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
This module merge to xml files into one. First it checks if its sha's are the same to avoid merging of different projects an finally merge it into one called merged.xml.
"""
import sys
import argparse
from lxml import etree
import met.xmlMerger as merger
USAGE = './met-merger firstXml.xml secondXml.xml'
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE)
parser.add_argument('firstXml', help=argparse.SUPPRESS)
parser.add_argument('secondXml', help=argparse.SUPPRESS)
if __name__ == '__main__':
main()
| 26.408163 | 169 | 0.692427 | #!/usr/bin/env python3
"""
This module merge to xml files into one. First it checks if its sha's are the same to avoid merging of different projects an finally merge it into one called merged.xml.
"""
import sys
import argparse
from lxml import etree
import met.xmlMerger as merger
USAGE = './met-merger firstXml.xml secondXml.xml'
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE)
parser.add_argument('firstXml', help=argparse.SUPPRESS)
parser.add_argument('secondXml', help=argparse.SUPPRESS)
def main(args=None):
opts = parser.parse_args(args)
if not (opts.firstXml.endswith('.xml') and opts.secondXml.endswith(".xml")):
print("Wrong extension!\nUsage: " + USAGE)
else:
xmlOne = sys.argv[1]
xmlTwo = sys.argv[2]
xml_doc_one = etree.parse(xmlOne)
xml_doc_two = etree.parse(xmlTwo)
rootOne = xml_doc_one.getroot()
rootTwo = xml_doc_two.getroot()
firstSha = rootOne.find('res').text
secondSha = rootTwo.find('res').text
secondArtefacts = rootTwo.find('data')
merger.checkSha(firstSha, secondSha)
merger.mergeFiles(rootOne, rootTwo, secondArtefacts)
if __name__ == '__main__':
main()
| 647 | 0 | 23 |
d9ffa1e8e2159ccdee3a14fac64ce819ff327cc8 | 2,291 | py | Python | app/db_config.py | queenfiona/SendITc3 | 7bb2ed5165e07ec307bbd4f5abe2e53073f05f9c | [
"MIT"
] | null | null | null | app/db_config.py | queenfiona/SendITc3 | 7bb2ed5165e07ec307bbd4f5abe2e53073f05f9c | [
"MIT"
] | 13 | 2020-02-12T01:22:14.000Z | 2022-03-11T23:34:38.000Z | app/db_config.py | queenfiona/SendITc3 | 7bb2ed5165e07ec307bbd4f5abe2e53073f05f9c | [
"MIT"
] | 1 | 2018-11-23T02:35:32.000Z | 2018-11-23T02:35:32.000Z | """Docstring for db_config file."""
import psycopg2
import os
from instance.config import app_config
# from boto.s3.connection import S3Connection
# s3 = S3Connection(os.environ['S3_KEY'], os.environ['S3_SECRET'])
env = os.getenv("FLASK_ENV")
if not env:
url = app_config["production"].DATABASE_URL
else:
url = "host='localhost' port='5433' dbname='sendit' user='app' password='app'"
def connection(url):
"""Docstring for connection method."""
connection_to_db = psycopg2.connect(url, sslmode='require')
return connection_to_db
def init_db():
"""Docstring for init_db method."""
connection_to_db = psycopg2.connect(url)
return connection_to_db
def tables():
"""Docstring for tables method."""
users = """CREATE TABLE IF NOT EXISTS users(
user_id SERIAL PRIMARY KEY,
first_name CHARACTER VARYING(200) NOT NULL,
last_name CHARACTER VARYING(200) NOT NULL,
username CHARACTER VARYING(200) NOT NULL,
role CHARACTER VARYING(200) DEFAULT 'user',
email CHARACTER VARYING(320) NOT NULL,
password CHARACTER VARYING(200) NOT NULL);"""
orders = """CREATE TABLE IF NOT EXISTS orders(
parcel_id SERIAL PRIMARY KEY,
user_id SERIAL REFERENCES users(user_id),
item_shipped CHARACTER VARYING(200) NOT NULL,
origin CHARACTER VARYING(200) NOT NULL,
destination CHARACTER VARYING(200) NOT NULL,
weight INTEGER NOT NULL,
current_location CHARACTER VARYING(200) NOT NULL,
pickup_location CHARACTER VARYING(200) NOT NULL,
status CHARACTER VARYING(200) NOT NULL);"""
query = [users, orders]
return query
def create_tables():
"""Docstring for create_tables method."""
tables_to_create = tables()
connection_to_db = connection(url)
cursor = connection_to_db.cursor()
for table in tables_to_create:
cursor.execute(table)
connection_to_db.commit()
def destroy_tables():
"""Docstring for destroy tables method."""
connection_to_db = connection(url)
cursor = connection_to_db.cursor()
drop_orders = """DROP TABLE IF EXISTS orders CASCADE"""
drop_users = """DROP TABLE IF EXISTS users CASCADE"""
queries = [drop_users, drop_orders]
for table_to_drop in queries:
cursor.execute(table_to_drop)
connection_to_db.commit()
| 30.546667 | 82 | 0.709297 | """Docstring for db_config file."""
import psycopg2
import os
from instance.config import app_config
# from boto.s3.connection import S3Connection
# s3 = S3Connection(os.environ['S3_KEY'], os.environ['S3_SECRET'])
env = os.getenv("FLASK_ENV")
if not env:
url = app_config["production"].DATABASE_URL
else:
url = "host='localhost' port='5433' dbname='sendit' user='app' password='app'"
def connection(url):
"""Docstring for connection method."""
connection_to_db = psycopg2.connect(url, sslmode='require')
return connection_to_db
def init_db():
"""Docstring for init_db method."""
connection_to_db = psycopg2.connect(url)
return connection_to_db
def tables():
"""Docstring for tables method."""
users = """CREATE TABLE IF NOT EXISTS users(
user_id SERIAL PRIMARY KEY,
first_name CHARACTER VARYING(200) NOT NULL,
last_name CHARACTER VARYING(200) NOT NULL,
username CHARACTER VARYING(200) NOT NULL,
role CHARACTER VARYING(200) DEFAULT 'user',
email CHARACTER VARYING(320) NOT NULL,
password CHARACTER VARYING(200) NOT NULL);"""
orders = """CREATE TABLE IF NOT EXISTS orders(
parcel_id SERIAL PRIMARY KEY,
user_id SERIAL REFERENCES users(user_id),
item_shipped CHARACTER VARYING(200) NOT NULL,
origin CHARACTER VARYING(200) NOT NULL,
destination CHARACTER VARYING(200) NOT NULL,
weight INTEGER NOT NULL,
current_location CHARACTER VARYING(200) NOT NULL,
pickup_location CHARACTER VARYING(200) NOT NULL,
status CHARACTER VARYING(200) NOT NULL);"""
query = [users, orders]
return query
def create_tables():
"""Docstring for create_tables method."""
tables_to_create = tables()
connection_to_db = connection(url)
cursor = connection_to_db.cursor()
for table in tables_to_create:
cursor.execute(table)
connection_to_db.commit()
def destroy_tables():
"""Docstring for destroy tables method."""
connection_to_db = connection(url)
cursor = connection_to_db.cursor()
drop_orders = """DROP TABLE IF EXISTS orders CASCADE"""
drop_users = """DROP TABLE IF EXISTS users CASCADE"""
queries = [drop_users, drop_orders]
for table_to_drop in queries:
cursor.execute(table_to_drop)
connection_to_db.commit()
| 0 | 0 | 0 |
880d5fbb50cdf9cfc0405f5b88ac61839cc441ce | 3,222 | py | Python | code/gaze_ocr.py | wolfmanstout/knausj_talon | 4763a45f6f17b925a5e37061c0ccaab2c1c487ec | [
"MIT"
] | null | null | null | code/gaze_ocr.py | wolfmanstout/knausj_talon | 4763a45f6f17b925a5e37061c0ccaab2c1c487ec | [
"MIT"
] | null | null | null | code/gaze_ocr.py | wolfmanstout/knausj_talon | 4763a45f6f17b925a5e37061c0ccaab2c1c487ec | [
"MIT"
] | null | null | null | from typing import Union
from talon import Context, Module, actions, app
from talon.grammar import Phrase
import gaze_ocr
import screen_ocr # dependency of gaze-ocr
from gaze_ocr import _talon_wrappers as talon_wrappers
mod = Module()
setting_ocr_logging_dir = mod.setting(
"ocr_logging_dir",
type=str,
default=None,
desc="If specified, log OCR'ed images to this directory.",
)
app.register("ready", on_ready)
@mod.capture(rule="<user.prose> | <user.single_digit_string>")
def onscreen_text(m) -> str:
"""Either words or a number."""
return str(m)
@mod.capture(rule="<user.word> | {user.punctuation} | <user.single_digit_string>")
def onscreen_word(m) -> str:
"""Either a word or a number."""
return str(m)
@mod.action_class | 39.777778 | 86 | 0.678771 | from typing import Union
from talon import Context, Module, actions, app
from talon.grammar import Phrase
import gaze_ocr
import screen_ocr # dependency of gaze-ocr
from gaze_ocr import _talon_wrappers as talon_wrappers
mod = Module()
setting_ocr_logging_dir = mod.setting(
"ocr_logging_dir",
type=str,
default=None,
desc="If specified, log OCR'ed images to this directory.",
)
def on_ready():
# Initialize eye tracking and OCR. See installation instructions:
# https://github.com/wolfmanstout/gaze-ocr
global tracker, ocr_reader, gaze_ocr_controller
tracker = gaze_ocr.eye_tracking.TalonEyeTracker()
ocr_reader = screen_ocr.Reader.create_fast_reader(radius=200)
gaze_ocr_controller = gaze_ocr.Controller(
ocr_reader,
tracker,
save_data_directory=setting_ocr_logging_dir.get(),
mouse=talon_wrappers.Mouse(),
keyboard=talon_wrappers.Keyboard())
app.register("ready", on_ready)
@mod.capture(rule="<user.prose> | <user.single_digit_string>")
def onscreen_text(m) -> str:
"""Either words or a number."""
return str(m)
@mod.capture(rule="<user.word> | {user.punctuation} | <user.single_digit_string>")
def onscreen_word(m) -> str:
"""Either a word or a number."""
return str(m)
@mod.action_class
class GazeOcrActions:
def move_cursor_to_word(text: str):
"""Moves cursor to onscreen word."""
gaze_ocr_controller.read_nearby()
if not gaze_ocr_controller.move_cursor_to_word(text):
raise RuntimeError("Unable to find: \"{}\"".format(text))
def move_text_cursor_to_word(text: str, position: str):
"""Moves text cursor near onscreen word."""
gaze_ocr_controller.read_nearby()
if not gaze_ocr_controller.move_text_cursor_to_word(text, position):
raise RuntimeError("Unable to find: \"{}\"".format(text))
def move_text_cursor_to_word_ignore_errors(text: str, position: str):
"""Moves text cursor near onscreen word, ignoring errors (log only)."""
gaze_ocr_controller.read_nearby()
if not gaze_ocr_controller.move_text_cursor_to_word(text, position):
print("Unable to find: \"{}\"".format(text))
def select_text(start: str, end: str="", for_deletion: bool=False):
"""Selects text near onscreen word."""
gaze_ocr_controller.read_nearby()
if not gaze_ocr_controller.select_text(start, end, for_deletion):
raise RuntimeError("Unable to select \"{}\" to \"{}\"".format(start, end))
def select_text_with_timestamps(start: Phrase, end: Union[Phrase, str]=None,
for_deletion: bool=False):
"""Selects text near onscreen word at phrase timestamps."""
if not gaze_ocr_controller.select_text(
start, end, for_deletion,
start.words[0].start,
end.words[0].start if end else start.words[-1].end):
raise RuntimeError("Unable to select \"{}\" to \"{}\"".format(start, end))
def move_cursor_to_gaze_point(offset_right: int=0, offset_down: int=0):
"""Moves mouse cursor to gaze location."""
tracker.move_to_gaze_point((offset_right, offset_down)) | 508 | 1,904 | 45 |
f6b623f822f4a01b98088658428c858da8bb5f1d | 3,159 | py | Python | booksite/views.py | mark-graciov/bookit | dfe471efe0a1fb1e3688fdebd79a14f8113acd42 | [
"Apache-2.0"
] | null | null | null | booksite/views.py | mark-graciov/bookit | dfe471efe0a1fb1e3688fdebd79a14f8113acd42 | [
"Apache-2.0"
] | null | null | null | booksite/views.py | mark-graciov/bookit | dfe471efe0a1fb1e3688fdebd79a14f8113acd42 | [
"Apache-2.0"
] | null | null | null | import ast
import os
import string
from wsgiref.util import FileWrapper
from django.conf import settings
from django.contrib.staticfiles import finders
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.utils.crypto import random
from .docx import read_docx_tags, replace_docx_tags
from .forms import TaleForm
from .models import Tale, TaleTag
| 31.277228 | 108 | 0.675847 | import ast
import os
import string
from wsgiref.util import FileWrapper
from django.conf import settings
from django.contrib.staticfiles import finders
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.utils.crypto import random
from .docx import read_docx_tags, replace_docx_tags
from .forms import TaleForm
from .models import Tale, TaleTag
def _random_id():
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10))
def tale_list(request):
tales = Tale.objects.all()
img_path = os.path.join(settings.STATIC_URL, 'booksite/tales/tale1/preview/p1.jpg')
return render(request, 'booksite/index.html', {'tale_list': tales, 'img_path': img_path})
def tale_details(request, tale_id):
tale = Tale.objects.get(pk=tale_id)
preview_dir = os.path.join(os.path.dirname(tale.doc_path), 'preview')
preview_path = os.path.dirname(finders.find(tale.doc_path))
preview_path = os.path.join(preview_path, 'preview')
img_paths = []
for (dirpath, dirnames, filenames) in os.walk(preview_path):
for filename in filenames:
img_paths.append(os.path.join(preview_dir, filename))
break
return render(request, 'booksite/tale_details.html', {'tale': tale, 'img_paths': img_paths})
def create_tale(request, tale_id):
tale = Tale.objects.get(pk=tale_id)
if request.method == 'POST':
fields = ast.literal_eval(request.POST['tale__fields'])
form = TaleForm(request.POST, fields=fields)
if form.is_valid():
r_id = _random_id()
while r_id in request.session:
r_id = _random_id()
values = form.collect()
request.session[r_id] = (tale_id, values)
tale.downloads += 1
tale.save()
return redirect('download_tale', r_id)
else:
tags = read_docx_tags(finders.find(tale.doc_path))
defined = TaleTag.objects.filter(name__in=tags).order_by('id')
fields = []
for tag in defined:
fields.append((tag.name, tag.label, tag.help))
tags.remove(tag.name)
for tag in tags:
fields.append((tag, tag, ''))
form = TaleForm(fields=fields)
return render(request, 'booksite/create_tale.html', {'tale': tale, 'form': form, 'fields': str(fields)})
def download_tale(request, s_id):
s_tuple = request.session[s_id]
tale = Tale.objects.get(pk=s_tuple[0])
return render(request, 'booksite/download_tale.html', {'tale': tale, 's_id': s_id})
def download_tale_doc(request, s_id):
s_tuple = request.session[s_id]
tale = Tale.objects.get(pk=s_tuple[0])
values = s_tuple[1]
file = replace_docx_tags(finders.find(tale.doc_path), values)
wrapper = FileWrapper(file)
response = HttpResponse(wrapper, content_type='application/docx')
response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(tale.doc_path)
response['Content-Length'] = file.tell()
file.seek(0)
return response
def despre_noi(request):
return render(request, 'booksite/despre_noi.html', )
| 2,601 | 0 | 161 |
6ed97324166b2fdbbae89b96cecf9a98cd36aeb0 | 471 | py | Python | Students/chase/extra3.py | MikelShifrin/Python1 | 0096a327023a28e0c639042ae01268b07e61943e | [
"MIT"
] | 3 | 2019-07-02T13:46:23.000Z | 2019-08-19T14:41:25.000Z | Students/chase/extra3.py | MikelShifrin/Python1 | 0096a327023a28e0c639042ae01268b07e61943e | [
"MIT"
] | null | null | null | Students/chase/extra3.py | MikelShifrin/Python1 | 0096a327023a28e0c639042ae01268b07e61943e | [
"MIT"
] | null | null | null | main()
| 33.642857 | 74 | 0.675159 | def main():
number_of_item=float(input("how many number of item do you want buy"))
if(number_of_item<10):
print(number_of_item*99)
elif(number_of_item>=10 and number_of_item<=19):
print(number_of_item*99*0.9)
elif(number_of_item>=20 and number_of_item<=49):
print(number_of_item*99*0.8)
elif(number_of_item>=50 and number_of_item<=99):
print(number_of_item*99*0.7)
else:
print(number_of_item*99*0.6)
main()
| 442 | 0 | 22 |
31cb143e2377343eee8f33cd17f412399d49b638 | 10,226 | py | Python | ocs_ci/deployment/disconnected.py | kesavanvt/ocs-ci | f120044486631f49133c9f3a137842673d765a1c | [
"MIT"
] | null | null | null | ocs_ci/deployment/disconnected.py | kesavanvt/ocs-ci | f120044486631f49133c9f3a137842673d765a1c | [
"MIT"
] | null | null | null | ocs_ci/deployment/disconnected.py | kesavanvt/ocs-ci | f120044486631f49133c9f3a137842673d765a1c | [
"MIT"
] | null | null | null | """
This module contains functionality required for disconnected installation.
"""
import logging
import os
import tempfile
import yaml
from ocs_ci.framework import config
from ocs_ci.helpers.disconnected import get_opm_tool
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import NotFoundError
from ocs_ci.ocs.resources.catalog_source import CatalogSource
from ocs_ci.utility import templating
from ocs_ci.utility.utils import (
create_directory_path,
exec_cmd,
get_image_with_digest,
get_latest_ds_olm_tag,
get_ocp_version,
login_to_mirror_registry,
prepare_customized_pull_secret,
wait_for_machineconfigpool_status,
)
logger = logging.getLogger(__name__)
def get_csv_from_image(bundle_image):
"""
Extract clusterserviceversion.yaml file from operator bundle image.
Args:
bundle_image (str): OCS operator bundle image
Returns:
dict: loaded yaml from CSV file
"""
manifests_dir = os.path.join(
config.ENV_DATA["cluster_path"], constants.MANIFESTS_DIR
)
ocs_operator_csv_yaml = os.path.join(manifests_dir, constants.OCS_OPERATOR_CSV_YAML)
create_directory_path(manifests_dir)
with prepare_customized_pull_secret(bundle_image) as authfile_fo:
exec_cmd(
f"oc image extract --registry-config {authfile_fo.name} "
f"{bundle_image} --confirm "
f"--path /manifests/ocs-operator.clusterserviceversion.yaml:{manifests_dir}"
)
try:
with open(ocs_operator_csv_yaml) as f:
return yaml.safe_load(f)
except FileNotFoundError as err:
logger.error(f"File {ocs_operator_csv_yaml} does not exists ({err})")
raise
def prepare_disconnected_ocs_deployment():
"""
Prepare disconnected ocs deployment:
- get related images from OCS operator bundle csv
- mirror related images to mirror registry
- create imageContentSourcePolicy for the mirrored images
- disable the default OperatorSources
Returns:
str: OCS registry image prepared for disconnected installation (with
sha256 digest) or None (for live deployment)
"""
logger.info("Prepare for disconnected OCS installation")
if config.DEPLOYMENT.get("live_deployment"):
get_opm_tool()
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
ocp_version = get_ocp_version()
index_image = f"{config.DEPLOYMENT['cs_redhat_operators_image']}:v{ocp_version}"
mirrored_index_image = (
f"{config.DEPLOYMENT['mirror_registry']}/{constants.MIRRORED_INDEX_IMAGE_NAMESPACE}/"
f"{constants.MIRRORED_INDEX_IMAGE_NAME}:v{ocp_version}"
)
# prune an index image
logger.info(
f"Prune index image {index_image} -> {mirrored_index_image} "
f"(packages: {', '.join(constants.DISCON_CL_REQUIRED_PACKAGES)})"
)
cmd = (
f"opm index prune -f {index_image} "
f"-p {','.join(constants.DISCON_CL_REQUIRED_PACKAGES)} "
f"-t {mirrored_index_image}"
)
# opm tool doesn't have --atuhfile parameter, we have to suply auth
# file through env variable
os.environ["REGISTRY_AUTH_FILE"] = pull_secret_path
exec_cmd(cmd)
# login to mirror registry
login_to_mirror_registry(pull_secret_path)
# push pruned index image to mirror registry
logger.info(
f"Push pruned index image to mirror registry: {mirrored_index_image}"
)
cmd = f"podman push --authfile {pull_secret_path} --tls-verify=false {mirrored_index_image}"
exec_cmd(cmd)
# mirror related images (this might take very long time)
logger.info(f"Mirror images related to index image: {mirrored_index_image}")
cmd = (
f"oc adm catalog mirror {mirrored_index_image} -a {pull_secret_path} --insecure "
f"{config.DEPLOYMENT['mirror_registry']} --index-filter-by-os='.*'"
)
oc_acm_result = exec_cmd(cmd, timeout=7200)
for line in oc_acm_result.stdout.decode("utf-8").splitlines():
if "wrote mirroring manifests to" in line:
break
else:
raise NotFoundError(
"Manifests directory not printed to stdout of 'oc adm catalog mirror ...' command."
)
mirroring_manifests_dir = line.replace("wrote mirroring manifests to ", "")
logger.debug(f"Mirrored manifests directory: {mirroring_manifests_dir}")
# create ImageContentSourcePolicy
icsp_file = os.path.join(
f"{mirroring_manifests_dir}",
"imageContentSourcePolicy.yaml",
)
exec_cmd(f"oc apply -f {icsp_file}")
# Disable the default OperatorSources
exec_cmd(
"""oc patch OperatorHub cluster --type json """
"""-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'"""
)
# create redhat-operators CatalogSource
catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML)
catalog_source_manifest = tempfile.NamedTemporaryFile(
mode="w+", prefix="catalog_source_manifest", delete=False
)
catalog_source_data["spec"]["image"] = f"{mirrored_index_image}"
catalog_source_data["metadata"]["name"] = "redhat-operators"
catalog_source_data["spec"]["displayName"] = "Red Hat Operators - Mirrored"
templating.dump_data_to_temp_yaml(
catalog_source_data, catalog_source_manifest.name
)
exec_cmd(f"oc apply -f {catalog_source_manifest.name}")
catalog_source = CatalogSource(
resource_name="redhat-operators",
namespace=constants.MARKETPLACE_NAMESPACE,
)
# Wait for catalog source is ready
catalog_source.wait_for_state("READY")
return
if config.DEPLOYMENT.get("stage_rh_osbs"):
raise NotImplementedError(
"Disconnected installation from stage is not implemented!"
)
ocs_registry_image = config.DEPLOYMENT.get("ocs_registry_image", "")
logger.debug(f"ocs-registry-image: {ocs_registry_image}")
ocs_registry_image_and_tag = ocs_registry_image.split(":")
ocs_registry_image = ocs_registry_image_and_tag[0]
image_tag = (
ocs_registry_image_and_tag[1] if len(ocs_registry_image_and_tag) == 2 else None
)
if not image_tag and config.REPORTING.get("us_ds") == "DS":
image_tag = get_latest_ds_olm_tag(
upgrade=False,
latest_tag=config.DEPLOYMENT.get("default_latest_tag", "latest"),
)
ocs_registry_image = f"{config.DEPLOYMENT['default_ocs_registry_image'].split(':')[0]}:{image_tag}"
bundle_image = f"{constants.OCS_OPERATOR_BUNDLE_IMAGE}:{image_tag}"
logger.debug(f"ocs-operator-bundle image: {bundle_image}")
csv_yaml = get_csv_from_image(bundle_image)
ocs_operator_image = (
csv_yaml.get("spec", {})
.get("install", {})
.get("spec", {})
.get("deployments", [{}])[0]
.get("spec", {})
.get("template", {})
.get("spec", {})
.get("containers", [{}])[0]
.get("image")
)
logger.debug(f"ocs-operator-image: {ocs_operator_image}")
# prepare list related images (bundle, registry and operator images and all
# images from relatedImages section from csv)
ocs_related_images = []
ocs_related_images.append(get_image_with_digest(bundle_image))
ocs_registry_image_with_digest = get_image_with_digest(ocs_registry_image)
ocs_related_images.append(ocs_registry_image_with_digest)
ocs_related_images.append(get_image_with_digest(ocs_operator_image))
ocs_related_images += [
image["image"] for image in csv_yaml.get("spec").get("relatedImages")
]
logger.debug(f"OCS Related Images: {ocs_related_images}")
mirror_registry = config.DEPLOYMENT["mirror_registry"]
# prepare images mapping file for mirroring
mapping_file_content = [
f"{image}={mirror_registry}{image[image.index('/'):image.index('@')]}\n"
for image in ocs_related_images
]
logger.debug(f"Mapping file content: {mapping_file_content}")
name = "ocs-images"
mapping_file = os.path.join(config.ENV_DATA["cluster_path"], f"{name}-mapping.txt")
# write mapping file to disk
with open(mapping_file, "w") as f:
f.writelines(mapping_file_content)
# prepare ImageContentSourcePolicy for OCS images
with open(constants.TEMPLATE_IMAGE_CONTENT_SOURCE_POLICY_YAML) as f:
ocs_icsp = yaml.safe_load(f)
ocs_icsp["metadata"]["name"] = name
ocs_icsp["spec"]["repositoryDigestMirrors"] = []
for image in ocs_related_images:
ocs_icsp["spec"]["repositoryDigestMirrors"].append(
{
"mirrors": [
f"{mirror_registry}{image[image.index('/'):image.index('@')]}"
],
"source": image[: image.index("@")],
}
)
logger.debug(f"OCS imageContentSourcePolicy: {yaml.safe_dump(ocs_icsp)}")
ocs_icsp_file = os.path.join(
config.ENV_DATA["cluster_path"], f"{name}-imageContentSourcePolicy.yaml"
)
with open(ocs_icsp_file, "w+") as fs:
yaml.safe_dump(ocs_icsp, fs)
# create ImageContentSourcePolicy
exec_cmd(f"oc apply -f {ocs_icsp_file}")
# mirror images based on mapping file
with prepare_customized_pull_secret(ocs_related_images) as authfile_fo:
login_to_mirror_registry(authfile_fo.name)
exec_cmd(
f"oc image mirror --filter-by-os='.*' -f {mapping_file} --insecure "
f"--registry-config={authfile_fo.name} --max-per-registry=2",
timeout=3600,
)
# Disable the default OperatorSources
exec_cmd(
"""oc patch OperatorHub cluster --type json """
"""-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'"""
)
# wait for newly created imageContentSourcePolicy is applied on all nodes
wait_for_machineconfigpool_status("all")
return ocs_registry_image_with_digest
| 37.457875 | 107 | 0.663896 | """
This module contains functionality required for disconnected installation.
"""
import logging
import os
import tempfile
import yaml
from ocs_ci.framework import config
from ocs_ci.helpers.disconnected import get_opm_tool
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import NotFoundError
from ocs_ci.ocs.resources.catalog_source import CatalogSource
from ocs_ci.utility import templating
from ocs_ci.utility.utils import (
create_directory_path,
exec_cmd,
get_image_with_digest,
get_latest_ds_olm_tag,
get_ocp_version,
login_to_mirror_registry,
prepare_customized_pull_secret,
wait_for_machineconfigpool_status,
)
logger = logging.getLogger(__name__)
def get_csv_from_image(bundle_image):
"""
Extract clusterserviceversion.yaml file from operator bundle image.
Args:
bundle_image (str): OCS operator bundle image
Returns:
dict: loaded yaml from CSV file
"""
manifests_dir = os.path.join(
config.ENV_DATA["cluster_path"], constants.MANIFESTS_DIR
)
ocs_operator_csv_yaml = os.path.join(manifests_dir, constants.OCS_OPERATOR_CSV_YAML)
create_directory_path(manifests_dir)
with prepare_customized_pull_secret(bundle_image) as authfile_fo:
exec_cmd(
f"oc image extract --registry-config {authfile_fo.name} "
f"{bundle_image} --confirm "
f"--path /manifests/ocs-operator.clusterserviceversion.yaml:{manifests_dir}"
)
try:
with open(ocs_operator_csv_yaml) as f:
return yaml.safe_load(f)
except FileNotFoundError as err:
logger.error(f"File {ocs_operator_csv_yaml} does not exists ({err})")
raise
def prepare_disconnected_ocs_deployment():
"""
Prepare disconnected ocs deployment:
- get related images from OCS operator bundle csv
- mirror related images to mirror registry
- create imageContentSourcePolicy for the mirrored images
- disable the default OperatorSources
Returns:
str: OCS registry image prepared for disconnected installation (with
sha256 digest) or None (for live deployment)
"""
logger.info("Prepare for disconnected OCS installation")
if config.DEPLOYMENT.get("live_deployment"):
get_opm_tool()
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
ocp_version = get_ocp_version()
index_image = f"{config.DEPLOYMENT['cs_redhat_operators_image']}:v{ocp_version}"
mirrored_index_image = (
f"{config.DEPLOYMENT['mirror_registry']}/{constants.MIRRORED_INDEX_IMAGE_NAMESPACE}/"
f"{constants.MIRRORED_INDEX_IMAGE_NAME}:v{ocp_version}"
)
# prune an index image
logger.info(
f"Prune index image {index_image} -> {mirrored_index_image} "
f"(packages: {', '.join(constants.DISCON_CL_REQUIRED_PACKAGES)})"
)
cmd = (
f"opm index prune -f {index_image} "
f"-p {','.join(constants.DISCON_CL_REQUIRED_PACKAGES)} "
f"-t {mirrored_index_image}"
)
# opm tool doesn't have --atuhfile parameter, we have to suply auth
# file through env variable
os.environ["REGISTRY_AUTH_FILE"] = pull_secret_path
exec_cmd(cmd)
# login to mirror registry
login_to_mirror_registry(pull_secret_path)
# push pruned index image to mirror registry
logger.info(
f"Push pruned index image to mirror registry: {mirrored_index_image}"
)
cmd = f"podman push --authfile {pull_secret_path} --tls-verify=false {mirrored_index_image}"
exec_cmd(cmd)
# mirror related images (this might take very long time)
logger.info(f"Mirror images related to index image: {mirrored_index_image}")
cmd = (
f"oc adm catalog mirror {mirrored_index_image} -a {pull_secret_path} --insecure "
f"{config.DEPLOYMENT['mirror_registry']} --index-filter-by-os='.*'"
)
oc_acm_result = exec_cmd(cmd, timeout=7200)
for line in oc_acm_result.stdout.decode("utf-8").splitlines():
if "wrote mirroring manifests to" in line:
break
else:
raise NotFoundError(
"Manifests directory not printed to stdout of 'oc adm catalog mirror ...' command."
)
mirroring_manifests_dir = line.replace("wrote mirroring manifests to ", "")
logger.debug(f"Mirrored manifests directory: {mirroring_manifests_dir}")
# create ImageContentSourcePolicy
icsp_file = os.path.join(
f"{mirroring_manifests_dir}",
"imageContentSourcePolicy.yaml",
)
exec_cmd(f"oc apply -f {icsp_file}")
# Disable the default OperatorSources
exec_cmd(
"""oc patch OperatorHub cluster --type json """
"""-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'"""
)
# create redhat-operators CatalogSource
catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML)
catalog_source_manifest = tempfile.NamedTemporaryFile(
mode="w+", prefix="catalog_source_manifest", delete=False
)
catalog_source_data["spec"]["image"] = f"{mirrored_index_image}"
catalog_source_data["metadata"]["name"] = "redhat-operators"
catalog_source_data["spec"]["displayName"] = "Red Hat Operators - Mirrored"
templating.dump_data_to_temp_yaml(
catalog_source_data, catalog_source_manifest.name
)
exec_cmd(f"oc apply -f {catalog_source_manifest.name}")
catalog_source = CatalogSource(
resource_name="redhat-operators",
namespace=constants.MARKETPLACE_NAMESPACE,
)
# Wait for catalog source is ready
catalog_source.wait_for_state("READY")
return
if config.DEPLOYMENT.get("stage_rh_osbs"):
raise NotImplementedError(
"Disconnected installation from stage is not implemented!"
)
ocs_registry_image = config.DEPLOYMENT.get("ocs_registry_image", "")
logger.debug(f"ocs-registry-image: {ocs_registry_image}")
ocs_registry_image_and_tag = ocs_registry_image.split(":")
ocs_registry_image = ocs_registry_image_and_tag[0]
image_tag = (
ocs_registry_image_and_tag[1] if len(ocs_registry_image_and_tag) == 2 else None
)
if not image_tag and config.REPORTING.get("us_ds") == "DS":
image_tag = get_latest_ds_olm_tag(
upgrade=False,
latest_tag=config.DEPLOYMENT.get("default_latest_tag", "latest"),
)
ocs_registry_image = f"{config.DEPLOYMENT['default_ocs_registry_image'].split(':')[0]}:{image_tag}"
bundle_image = f"{constants.OCS_OPERATOR_BUNDLE_IMAGE}:{image_tag}"
logger.debug(f"ocs-operator-bundle image: {bundle_image}")
csv_yaml = get_csv_from_image(bundle_image)
ocs_operator_image = (
csv_yaml.get("spec", {})
.get("install", {})
.get("spec", {})
.get("deployments", [{}])[0]
.get("spec", {})
.get("template", {})
.get("spec", {})
.get("containers", [{}])[0]
.get("image")
)
logger.debug(f"ocs-operator-image: {ocs_operator_image}")
# prepare list related images (bundle, registry and operator images and all
# images from relatedImages section from csv)
ocs_related_images = []
ocs_related_images.append(get_image_with_digest(bundle_image))
ocs_registry_image_with_digest = get_image_with_digest(ocs_registry_image)
ocs_related_images.append(ocs_registry_image_with_digest)
ocs_related_images.append(get_image_with_digest(ocs_operator_image))
ocs_related_images += [
image["image"] for image in csv_yaml.get("spec").get("relatedImages")
]
logger.debug(f"OCS Related Images: {ocs_related_images}")
mirror_registry = config.DEPLOYMENT["mirror_registry"]
# prepare images mapping file for mirroring
mapping_file_content = [
f"{image}={mirror_registry}{image[image.index('/'):image.index('@')]}\n"
for image in ocs_related_images
]
logger.debug(f"Mapping file content: {mapping_file_content}")
name = "ocs-images"
mapping_file = os.path.join(config.ENV_DATA["cluster_path"], f"{name}-mapping.txt")
# write mapping file to disk
with open(mapping_file, "w") as f:
f.writelines(mapping_file_content)
# prepare ImageContentSourcePolicy for OCS images
with open(constants.TEMPLATE_IMAGE_CONTENT_SOURCE_POLICY_YAML) as f:
ocs_icsp = yaml.safe_load(f)
ocs_icsp["metadata"]["name"] = name
ocs_icsp["spec"]["repositoryDigestMirrors"] = []
for image in ocs_related_images:
ocs_icsp["spec"]["repositoryDigestMirrors"].append(
{
"mirrors": [
f"{mirror_registry}{image[image.index('/'):image.index('@')]}"
],
"source": image[: image.index("@")],
}
)
logger.debug(f"OCS imageContentSourcePolicy: {yaml.safe_dump(ocs_icsp)}")
ocs_icsp_file = os.path.join(
config.ENV_DATA["cluster_path"], f"{name}-imageContentSourcePolicy.yaml"
)
with open(ocs_icsp_file, "w+") as fs:
yaml.safe_dump(ocs_icsp, fs)
# create ImageContentSourcePolicy
exec_cmd(f"oc apply -f {ocs_icsp_file}")
# mirror images based on mapping file
with prepare_customized_pull_secret(ocs_related_images) as authfile_fo:
login_to_mirror_registry(authfile_fo.name)
exec_cmd(
f"oc image mirror --filter-by-os='.*' -f {mapping_file} --insecure "
f"--registry-config={authfile_fo.name} --max-per-registry=2",
timeout=3600,
)
# Disable the default OperatorSources
exec_cmd(
"""oc patch OperatorHub cluster --type json """
"""-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'"""
)
# wait for newly created imageContentSourcePolicy is applied on all nodes
wait_for_machineconfigpool_status("all")
return ocs_registry_image_with_digest
| 0 | 0 | 0 |
c14ef67b64dac63462b415ccd6897ee14da0a882 | 622 | py | Python | test/test_helpers.py | virrim/rivalcfg | 1215ca2061feddd7b85599e6b986705982138edd | [
"WTFPL"
] | 2 | 2019-06-28T15:13:59.000Z | 2020-06-05T19:00:39.000Z | test/test_helpers.py | virrim/rivalcfg | 1215ca2061feddd7b85599e6b986705982138edd | [
"WTFPL"
] | 1 | 2020-05-09T06:12:34.000Z | 2020-07-31T23:58:55.000Z | test/test_helpers.py | virrim/rivalcfg | 1215ca2061feddd7b85599e6b986705982138edd | [
"WTFPL"
] | null | null | null | import pytest
import rivalcfg.helpers
| 34.555556 | 98 | 0.742765 | import pytest
import rivalcfg.helpers
class TestIntToLittleEndianBytearray(object):
def test_small_number_16bit(self):
assert rivalcfg.helpers.uint_to_little_endian_bytearray(0x0001, 2) == [0x01, 0x00] # noqa
def test_bigger_number_16bit(self):
assert rivalcfg.helpers.uint_to_little_endian_bytearray(0x0a01, 2) == [0x01, 0x0a] # noqa
def test_overflow_number_16bit(self):
assert rivalcfg.helpers.uint_to_little_endian_bytearray(0xFFFF, 2) == [0xFF, 0xFF] # noqa
with pytest.raises(ValueError):
rivalcfg.helpers.uint_to_little_endian_bytearray(0xABCDEF, 2)
| 454 | 24 | 104 |
0455f230ec9eb1646bbd8b355eef81e8a026f547 | 5,969 | py | Python | tests/test_adapters.py | bernt-matthias/cutadapt | 9ba5b705ba0e6cc5e32d4ce3810788b05b16a306 | [
"MIT"
] | null | null | null | tests/test_adapters.py | bernt-matthias/cutadapt | 9ba5b705ba0e6cc5e32d4ce3810788b05b16a306 | [
"MIT"
] | null | null | null | tests/test_adapters.py | bernt-matthias/cutadapt | 9ba5b705ba0e6cc5e32d4ce3810788b05b16a306 | [
"MIT"
] | null | null | null | import pytest
from dnaio import Sequence
from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter
def test_issue_265():
"""Crash when accessing the matches property of non-anchored linked adapters"""
s = Sequence('name', 'AAAATTTT')
front_adapter = Adapter('GGG', where=Where.FRONT)
back_adapter = Adapter('TTT', where=Where.BACK)
la = LinkedAdapter(front_adapter, back_adapter, front_required=False, back_required=False, name='name')
assert la.match_to(s).matches == 3
@pytest.mark.parametrize("where", [Where.PREFIX, Where.SUFFIX])
| 33.346369 | 107 | 0.640978 | import pytest
from dnaio import Sequence
from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter
def test_issue_52():
adapter = Adapter(
sequence='GAACTCCAGTCACNNNNN',
where=Where.BACK,
remove='suffix',
max_error_rate=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True)
read = Sequence(name="abc", sequence='CCCCAGAACTACAGTCCCGGC')
am = Match(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2,
remove_before=False, adapter=adapter, read=read)
assert am.wildcards() == 'GGC'
"""
The result above should actually be 'CGGC' since the correct
alignment is this one:
adapter GAACTCCAGTCACNNNNN
mismatches X X
read CCCCAGAACTACAGTC-CCGGC
Since we do not keep the alignment, guessing 'GGC' is the best we
can currently do.
"""
def test_issue_80():
# This issue turned out to not be an actual issue with the alignment
# algorithm. The following alignment is found because it has more matches
# than the 'obvious' one:
#
# TCGTATGCCGTCTTC
# =========X==XX=
# TCGTATGCCCTC--C
#
# This is correct, albeit a little surprising, since an alignment without
# indels would have only two errors.
adapter = Adapter(
sequence="TCGTATGCCGTCTTC",
where=Where.BACK,
remove='suffix',
max_error_rate=0.2,
min_overlap=3,
read_wildcards=False,
adapter_wildcards=False)
read = Sequence(name="seq2", sequence="TCGTATGCCCTCC")
result = adapter.match_to(read)
assert result.errors == 3, result
assert result.astart == 0, result
assert result.astop == 15, result
def test_str():
a = Adapter('ACGT', where=Where.BACK, remove='suffix', max_error_rate=0.1)
str(a)
str(a.match_to(Sequence(name='seq', sequence='TTACGT')))
def test_linked_adapter():
front_adapter = Adapter('AAAA', where=Where.PREFIX, min_overlap=4)
back_adapter = Adapter('TTTT', where=Where.BACK, min_overlap=3)
linked_adapter = LinkedAdapter(
front_adapter, back_adapter, front_required=True, back_required=False, name='name')
assert linked_adapter.front_adapter.min_overlap == 4
assert linked_adapter.back_adapter.min_overlap == 3
sequence = Sequence(name='seq', sequence='AAAACCCCCTTTT')
trimmed = linked_adapter.match_to(sequence).trimmed()
assert trimmed.name == 'seq'
assert trimmed.sequence == 'CCCCC'
def test_info_record():
adapter = Adapter(
sequence='GAACTCCAGTCACNNNNN',
where=Where.BACK,
max_error_rate=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True,
name="Foo")
read = Sequence(name="abc", sequence='CCCCAGAACTACAGTCCCGGC')
am = Match(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2, remove_before=False,
adapter=adapter, read=read)
assert am.get_info_record() == (
"abc",
2,
5,
21,
'CCCCA',
'GAACTACAGTCCCGGC',
'',
'Foo',
'',
'',
'',
)
def test_random_match_probabilities():
a = Adapter('A', where=Where.BACK, max_error_rate=0.1).create_statistics()
assert a.back.random_match_probabilities(0.5) == [1, 0.25]
assert a.back.random_match_probabilities(0.2) == [1, 0.4]
for s in ('ACTG', 'XMWH'):
a = Adapter(s, where=Where.BACK, max_error_rate=0.1).create_statistics()
assert a.back.random_match_probabilities(0.5) == [1, 0.25, 0.25**2, 0.25**3, 0.25**4]
assert a.back.random_match_probabilities(0.2) == [1, 0.4, 0.4*0.1, 0.4*0.1*0.4, 0.4*0.1*0.4*0.1]
a = Adapter('GTCA', where=Where.FRONT, max_error_rate=0.1).create_statistics()
assert a.front.random_match_probabilities(0.5) == [1, 0.25, 0.25**2, 0.25**3, 0.25**4]
assert a.front.random_match_probabilities(0.2) == [1, 0.4, 0.4*0.1, 0.4*0.1*0.4, 0.4*0.1*0.4*0.1]
def test_add_adapter_statistics():
stats = Adapter('A', name='name', where=Where.BACK, max_error_rate=0.1).create_statistics()
end_stats = stats.back
end_stats.adjacent_bases['A'] = 7
end_stats.adjacent_bases['C'] = 19
end_stats.adjacent_bases['G'] = 23
end_stats.adjacent_bases['T'] = 42
end_stats.adjacent_bases[''] = 45
end_stats.errors[10][0] = 100
end_stats.errors[10][1] = 11
end_stats.errors[10][2] = 3
end_stats.errors[20][0] = 600
end_stats.errors[20][1] = 66
end_stats.errors[20][2] = 6
stats2 = Adapter('A', name='name', where=Where.BACK, max_error_rate=0.1).create_statistics()
end_stats2 = stats2.back
end_stats2.adjacent_bases['A'] = 43
end_stats2.adjacent_bases['C'] = 31
end_stats2.adjacent_bases['G'] = 27
end_stats2.adjacent_bases['T'] = 8
end_stats2.adjacent_bases[''] = 5
end_stats2.errors[10][0] = 234
end_stats2.errors[10][1] = 14
end_stats2.errors[10][3] = 5
end_stats2.errors[15][0] = 90
end_stats2.errors[15][1] = 17
end_stats2.errors[15][2] = 2
stats += stats2
r = stats.back
assert r.adjacent_bases == {'A': 50, 'C': 50, 'G': 50, 'T': 50, '': 50}
assert r.errors == {
10: {0: 334, 1: 25, 2: 3, 3: 5},
15: {0: 90, 1: 17, 2: 2},
20: {0: 600, 1: 66, 2: 6},
}
def test_issue_265():
"""Crash when accessing the matches property of non-anchored linked adapters"""
s = Sequence('name', 'AAAATTTT')
front_adapter = Adapter('GGG', where=Where.FRONT)
back_adapter = Adapter('TTT', where=Where.BACK)
la = LinkedAdapter(front_adapter, back_adapter, front_required=False, back_required=False, name='name')
assert la.match_to(s).matches == 3
@pytest.mark.parametrize("where", [Where.PREFIX, Where.SUFFIX])
def test_no_indels_empty_read(where):
# Issue #376
adapter = Adapter('ACGT', where=where, indels=False)
empty = Sequence('name', '')
adapter.match_to(empty)
| 5,206 | 0 | 183 |
77524bcd98a60ce23ba96fe03b7a6453cc256460 | 6,815 | py | Python | pooch/hashes.py | rabernat/pooch | bc32d4eecec115e1fdf9bd4e306df5a6c22661fd | [
"BSD-3-Clause"
] | 214 | 2018-07-21T19:21:45.000Z | 2022-03-25T01:46:41.000Z | pooch/hashes.py | rabernat/pooch | bc32d4eecec115e1fdf9bd4e306df5a6c22661fd | [
"BSD-3-Clause"
] | 194 | 2018-07-20T23:09:05.000Z | 2022-03-14T14:50:29.000Z | pooch/hashes.py | rabernat/pooch | bc32d4eecec115e1fdf9bd4e306df5a6c22661fd | [
"BSD-3-Clause"
] | 48 | 2018-07-23T21:20:55.000Z | 2022-03-25T01:46:46.000Z | # Copyright (c) 2018 The Pooch Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Calculating and checking file hashes.
"""
import hashlib
import functools
from pathlib import Path
# From the docs: https://docs.python.org/3/library/hashlib.html#hashlib.new
# The named constructors are much faster than new() and should be
# preferred.
# Need to fallback on new() for some algorithms.
ALGORITHMS_AVAILABLE = {
alg: getattr(hashlib, alg, functools.partial(hashlib.new, alg))
for alg in hashlib.algorithms_available
}
try:
import xxhash
# xxhash doesn't have a list of available algorithms yet.
# https://github.com/ifduyue/python-xxhash/issues/48
ALGORITHMS_AVAILABLE.update(
{
alg: getattr(xxhash, alg, None)
for alg in ["xxh128", "xxh64", "xxh32", "xxh3_128", "xxh3_64"]
}
)
# The xxh3 algorithms are only available for version>=2.0. Set to None and
# remove to ensure backwards compatibility.
ALGORITHMS_AVAILABLE = {
alg: func for alg, func in ALGORITHMS_AVAILABLE.items() if func is not None
}
except ImportError:
pass
def file_hash(fname, alg="sha256"):
"""
Calculate the hash of a given file.
Useful for checking if a file has changed or been corrupted.
Parameters
----------
fname : str
The name of the file.
alg : str
The type of the hashing algorithm
Returns
-------
hash : str
The hash of the file.
Examples
--------
>>> fname = "test-file-for-hash.txt"
>>> with open(fname, "w") as f:
... __ = f.write("content of the file")
>>> print(file_hash(fname))
0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00
>>> import os
>>> os.remove(fname)
"""
if alg not in ALGORITHMS_AVAILABLE:
raise ValueError(
f"Algorithm '{alg}' not available to the pooch library. "
"Only the following algorithms are available "
f"{list(ALGORITHMS_AVAILABLE.keys())}."
)
# Calculate the hash in chunks to avoid overloading the memory
chunksize = 65536
hasher = ALGORITHMS_AVAILABLE[alg]()
with open(fname, "rb") as fin:
buff = fin.read(chunksize)
while buff:
hasher.update(buff)
buff = fin.read(chunksize)
return hasher.hexdigest()
def hash_algorithm(hash_string):
"""
Parse the name of the hash method from the hash string.
The hash string should have the following form ``algorithm:hash``, where
algorithm can be the name of any algorithm known to :mod:`hashlib`.
If the algorithm is omitted or the hash string is None, will default to
``"sha256"``.
Parameters
----------
hash_string : str
The hash string with optional algorithm prepended.
Returns
-------
hash_algorithm : str
The name of the algorithm.
Examples
--------
>>> print(hash_algorithm("qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
sha256
>>> print(hash_algorithm("md5:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
md5
>>> print(hash_algorithm("sha256:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
sha256
>>> print(hash_algorithm("SHA256:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
sha256
>>> print(hash_algorithm("xxh3_64:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
xxh3_64
>>> print(hash_algorithm(None))
sha256
"""
default = "sha256"
if hash_string is None:
algorithm = default
elif ":" not in hash_string:
algorithm = default
else:
algorithm = hash_string.split(":")[0]
return algorithm.lower()
def hash_matches(fname, known_hash, strict=False, source=None):
"""
Check if the hash of a file matches a known hash.
If the *known_hash* is None, will always return True.
Coverts hashes to lowercase before comparison to avoid system specific
mismatches between hashes in the registry and computed hashes.
Parameters
----------
fname : str or PathLike
The path to the file.
known_hash : str
The known hash. Optionally, prepend ``alg:`` to the hash to specify the
hashing algorithm. Default is SHA256.
strict : bool
If True, will raise a :class:`ValueError` if the hash does not match
informing the user that the file may be corrupted.
source : str
The source of the downloaded file (name or URL, for example). Will be
used in the error message if *strict* is True. Has no other use other
than reporting to the user where the file came from in case of hash
mismatch. If None, will default to *fname*.
Returns
-------
is_same : bool
True if the hash matches, False otherwise.
"""
if known_hash is None:
return True
algorithm = hash_algorithm(known_hash)
new_hash = file_hash(fname, alg=algorithm)
matches = new_hash.lower() == known_hash.split(":")[-1].lower()
if strict and not matches:
if source is None:
source = str(fname)
raise ValueError(
f"{algorithm.upper()} hash of downloaded file ({source}) does not match"
f" the known hash: expected {known_hash} but got {new_hash}. Deleted"
" download for safety. The downloaded file may have been corrupted or"
" the known hash may be outdated."
)
return matches
def make_registry(directory, output, recursive=True):
"""
Make a registry of files and hashes for the given directory.
This is helpful if you have many files in your test dataset as it keeps you
from needing to manually update the registry.
Parameters
----------
directory : str
Directory of the test data to put in the registry. All file names in
the registry will be relative to this directory.
output : str
Name of the output registry file.
recursive : bool
If True, will recursively look for files in subdirectories of
*directory*.
"""
directory = Path(directory)
if recursive:
pattern = "**/*"
else:
pattern = "*"
files = sorted(
[
str(path.relative_to(directory))
for path in directory.glob(pattern)
if path.is_file()
]
)
hashes = [file_hash(str(directory / fname)) for fname in files]
with open(output, "w") as outfile:
for fname, fhash in zip(files, hashes):
# Only use Unix separators for the registry so that we don't go
# insane dealing with file paths.
outfile.write("{} {}\n".format(fname.replace("\\", "/"), fhash))
| 30.288889 | 84 | 0.639765 | # Copyright (c) 2018 The Pooch Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Calculating and checking file hashes.
"""
import hashlib
import functools
from pathlib import Path
# From the docs: https://docs.python.org/3/library/hashlib.html#hashlib.new
# The named constructors are much faster than new() and should be
# preferred.
# Need to fallback on new() for some algorithms.
ALGORITHMS_AVAILABLE = {
alg: getattr(hashlib, alg, functools.partial(hashlib.new, alg))
for alg in hashlib.algorithms_available
}
try:
import xxhash
# xxhash doesn't have a list of available algorithms yet.
# https://github.com/ifduyue/python-xxhash/issues/48
ALGORITHMS_AVAILABLE.update(
{
alg: getattr(xxhash, alg, None)
for alg in ["xxh128", "xxh64", "xxh32", "xxh3_128", "xxh3_64"]
}
)
# The xxh3 algorithms are only available for version>=2.0. Set to None and
# remove to ensure backwards compatibility.
ALGORITHMS_AVAILABLE = {
alg: func for alg, func in ALGORITHMS_AVAILABLE.items() if func is not None
}
except ImportError:
pass
def file_hash(fname, alg="sha256"):
"""
Calculate the hash of a given file.
Useful for checking if a file has changed or been corrupted.
Parameters
----------
fname : str
The name of the file.
alg : str
The type of the hashing algorithm
Returns
-------
hash : str
The hash of the file.
Examples
--------
>>> fname = "test-file-for-hash.txt"
>>> with open(fname, "w") as f:
... __ = f.write("content of the file")
>>> print(file_hash(fname))
0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00
>>> import os
>>> os.remove(fname)
"""
if alg not in ALGORITHMS_AVAILABLE:
raise ValueError(
f"Algorithm '{alg}' not available to the pooch library. "
"Only the following algorithms are available "
f"{list(ALGORITHMS_AVAILABLE.keys())}."
)
# Calculate the hash in chunks to avoid overloading the memory
chunksize = 65536
hasher = ALGORITHMS_AVAILABLE[alg]()
with open(fname, "rb") as fin:
buff = fin.read(chunksize)
while buff:
hasher.update(buff)
buff = fin.read(chunksize)
return hasher.hexdigest()
def hash_algorithm(hash_string):
"""
Parse the name of the hash method from the hash string.
The hash string should have the following form ``algorithm:hash``, where
algorithm can be the name of any algorithm known to :mod:`hashlib`.
If the algorithm is omitted or the hash string is None, will default to
``"sha256"``.
Parameters
----------
hash_string : str
The hash string with optional algorithm prepended.
Returns
-------
hash_algorithm : str
The name of the algorithm.
Examples
--------
>>> print(hash_algorithm("qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
sha256
>>> print(hash_algorithm("md5:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
md5
>>> print(hash_algorithm("sha256:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
sha256
>>> print(hash_algorithm("SHA256:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
sha256
>>> print(hash_algorithm("xxh3_64:qouuwhwd2j192y1lb1iwgowdj2898wd2d9"))
xxh3_64
>>> print(hash_algorithm(None))
sha256
"""
default = "sha256"
if hash_string is None:
algorithm = default
elif ":" not in hash_string:
algorithm = default
else:
algorithm = hash_string.split(":")[0]
return algorithm.lower()
def hash_matches(fname, known_hash, strict=False, source=None):
"""
Check if the hash of a file matches a known hash.
If the *known_hash* is None, will always return True.
Coverts hashes to lowercase before comparison to avoid system specific
mismatches between hashes in the registry and computed hashes.
Parameters
----------
fname : str or PathLike
The path to the file.
known_hash : str
The known hash. Optionally, prepend ``alg:`` to the hash to specify the
hashing algorithm. Default is SHA256.
strict : bool
If True, will raise a :class:`ValueError` if the hash does not match
informing the user that the file may be corrupted.
source : str
The source of the downloaded file (name or URL, for example). Will be
used in the error message if *strict* is True. Has no other use other
than reporting to the user where the file came from in case of hash
mismatch. If None, will default to *fname*.
Returns
-------
is_same : bool
True if the hash matches, False otherwise.
"""
if known_hash is None:
return True
algorithm = hash_algorithm(known_hash)
new_hash = file_hash(fname, alg=algorithm)
matches = new_hash.lower() == known_hash.split(":")[-1].lower()
if strict and not matches:
if source is None:
source = str(fname)
raise ValueError(
f"{algorithm.upper()} hash of downloaded file ({source}) does not match"
f" the known hash: expected {known_hash} but got {new_hash}. Deleted"
" download for safety. The downloaded file may have been corrupted or"
" the known hash may be outdated."
)
return matches
def make_registry(directory, output, recursive=True):
"""
Make a registry of files and hashes for the given directory.
This is helpful if you have many files in your test dataset as it keeps you
from needing to manually update the registry.
Parameters
----------
directory : str
Directory of the test data to put in the registry. All file names in
the registry will be relative to this directory.
output : str
Name of the output registry file.
recursive : bool
If True, will recursively look for files in subdirectories of
*directory*.
"""
directory = Path(directory)
if recursive:
pattern = "**/*"
else:
pattern = "*"
files = sorted(
[
str(path.relative_to(directory))
for path in directory.glob(pattern)
if path.is_file()
]
)
hashes = [file_hash(str(directory / fname)) for fname in files]
with open(output, "w") as outfile:
for fname, fhash in zip(files, hashes):
# Only use Unix separators for the registry so that we don't go
# insane dealing with file paths.
outfile.write("{} {}\n".format(fname.replace("\\", "/"), fhash))
| 0 | 0 | 0 |
cf14ac456dd19a8963fff0edfb3d66062abdab74 | 3,703 | py | Python | classphoto/tests/test_model.py | p2pu/mechanical-mooc | b57ce2e3a61f4fc5fe4b1c485b2a69429933ebcc | [
"MIT"
] | 12 | 2015-01-12T17:26:32.000Z | 2020-02-19T19:13:18.000Z | classphoto/tests/test_model.py | p2pu/mechanical-mooc | b57ce2e3a61f4fc5fe4b1c485b2a69429933ebcc | [
"MIT"
] | 7 | 2015-01-22T13:09:47.000Z | 2021-08-22T02:43:06.000Z | classphoto/tests/test_model.py | p2pu/mechanical-mooc | b57ce2e3a61f4fc5fe4b1c485b2a69429933ebcc | [
"MIT"
] | 8 | 2015-10-27T22:34:01.000Z | 2020-04-10T23:15:49.000Z | from django.test import TestCase
from mock import patch
from classphoto import models as classphoto_api
from classphoto import db
from classphoto import emails
from signup import models as signup_api
@patch('signup.models.sequence_model.get_current_sequence_number', lambda: 1)
| 33.663636 | 77 | 0.623819 | from django.test import TestCase
from mock import patch
from classphoto import models as classphoto_api
from classphoto import db
from classphoto import emails
from signup import models as signup_api
@patch('signup.models.sequence_model.get_current_sequence_number', lambda: 1)
class SimpleTest(TestCase):
def setUp(self):
self.BIO_DATA = {
'email': 'test@email.com',
'sequence': 1,
'name': 'Test User',
'bio': 'This is some test data about a user',
'avatar': 'http://some.url/image.png'
}
def test_save_bio(self):
user_bio = classphoto_api.save_bio(**self.BIO_DATA)
self.assertEqual(self.BIO_DATA, user_bio)
def test_save_bio_with_twitter(self):
bio_data = self.BIO_DATA.copy()
bio_data['twitter'] = 'testhandle'
user_bio = classphoto_api.save_bio(**bio_data)
self.assertEqual(bio_data, user_bio)
def test_save_bio_with_twitter(self):
bio_data = self.BIO_DATA.copy()
bio_data['gplus'] = 'http://plus.google.com/user/1231231231/'
user_bio = classphoto_api.save_bio(**bio_data)
self.assertEqual(bio_data, user_bio)
def test_get_sequence_bios(self):
for i in range(10):
data = self.BIO_DATA.copy()
data['email'] = 'test-{0}@mail.com'.format(i)
user_bio = classphoto_api.save_bio(**data)
bios = classphoto_api.get_bios(1)
self.assertEquals(len(bios), 10)
def test_get_sequence_bios_by_email(self):
emails = ['test-{0}@mail.com'.format(i) for i in range(10)]
for email in emails:
data = self.BIO_DATA.copy()
data['email'] = email
user_bio = classphoto_api.save_bio(**data)
bios = classphoto_api.get_bios_by_email(1, emails[:5])
self.assertEquals(len(bios), 5)
def test_update_bio(self):
# create bio
user_bio = classphoto_api.save_bio(**self.BIO_DATA)
# bio should now be in primary list of bios
bios = classphoto_api.get_bios(1)
f = lambda x: x['email'] == user_bio['email']
bios = filter(f, bios)
self.assertEquals(len(bios), 1)
self.assertEquals(bios[0], user_bio)
# update bio
update_data = self.BIO_DATA.copy()
update_data['bio'] = 'This is the updated BIO'
updated_bio = classphoto_api.save_bio(**update_data)
# new bio should now be displayed
bios = classphoto_api.get_bios(1)
f = lambda x: x['email'] == user_bio['email']
bios = filter(f, bios)
self.assertEquals(len(bios), 1)
self.assertEquals(bios[0], updated_bio)
def test_send_user_link_to_whole_sequence( self ):
signup_api.create_signup('mail1@mail.com', {})
signup_api.create_signup('mail2@mail.com', {})
signup_api.create_signup('mail3@mail.com', {})
signup_api.create_signup('mail4@mail.com', {})
signup_api.create_signup('mail5@mail.com', {})
bio = self.BIO_DATA.copy()
bio['email'] = 'mail1@mail.com'
user_bio = classphoto_api.save_bio(**bio)
bio['email'] = 'mail2@mail.com'
user_bio = classphoto_api.save_bio(**bio)
bio['email'] = 'mail3@mail.com'
user_bio = classphoto_api.save_bio(**bio)
bio['email'] = 'mail4@mail.com'
user_bio = classphoto_api.save_bio(**bio)
bio['email'] = 'mail5@mail.com'
user_bio = classphoto_api.save_bio(**bio)
with patch('classphoto.emails.mailgun.api.send_mass_email') as sme:
emails.send_user_link_to_whole_sequence(1)
self.assertTrue(sme.called)
| 3,169 | 6 | 238 |
79b5eb78dad043dccbf2b484e3afb8079f17cc47 | 15,153 | py | Python | crowdsourcing/util/multibox/model.py | sbranson/online_crowdsourcing | d1f7c814bb60aae9cf5e76e0b299713246f98ce3 | [
"MIT"
] | 4 | 2019-08-14T21:14:18.000Z | 2021-11-04T09:32:37.000Z | crowdsourcing/util/multibox/model.py | sbranson/online_crowdsourcing | d1f7c814bb60aae9cf5e76e0b299713246f98ce3 | [
"MIT"
] | null | null | null | crowdsourcing/util/multibox/model.py | sbranson/online_crowdsourcing | d1f7c814bb60aae9cf5e76e0b299713246f98ce3 | [
"MIT"
] | 1 | 2019-11-09T08:20:27.000Z | 2019-11-09T08:20:27.000Z | import tensorflow as tf
slim = tf.contrib.slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def inception_resnet_v2(inputs,
reuse=None,
scope='InceptionResnetV2'):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_5a_3x3')
end_points['MaxPool_5a_3x3'] = net
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[tower_conv, tower_conv1_1,
tower_conv2_2, tower_pool_1])
end_points['Mixed_5b'] = net
net = slim.repeat(net, 10, block35, scale=0.17)
# 17 x 17 x 1024
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[tower_conv, tower_conv1_2, tower_pool])
end_points['Mixed_6a'] = net
net = slim.repeat(net, 20, block17, scale=0.10)
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[tower_conv_1, tower_conv1_1,
tower_conv2_2, tower_pool])
end_points['Mixed_7a'] = net
net = slim.repeat(net, 9, block8, scale=0.20)
net = block8(net, activation_fn=None)
# GVH: Not sure if we want or need this convolution
# 8 x 8 x 2080
net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
end_points['Conv2d_7b_1x1'] = net
# 8 x 8 x 1536
return net, end_points | 44.831361 | 178 | 0.613806 | import tensorflow as tf
slim = tf.contrib.slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def inception_resnet_v2(inputs,
reuse=None,
scope='InceptionResnetV2'):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_5a_3x3')
end_points['MaxPool_5a_3x3'] = net
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[tower_conv, tower_conv1_1,
tower_conv2_2, tower_pool_1])
end_points['Mixed_5b'] = net
net = slim.repeat(net, 10, block35, scale=0.17)
# 17 x 17 x 1024
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[tower_conv, tower_conv1_2, tower_pool])
end_points['Mixed_6a'] = net
net = slim.repeat(net, 20, block17, scale=0.10)
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[tower_conv_1, tower_conv1_1,
tower_conv2_2, tower_pool])
end_points['Mixed_7a'] = net
net = slim.repeat(net, 9, block8, scale=0.20)
net = block8(net, activation_fn=None)
# GVH: Not sure if we want or need this convolution
# 8 x 8 x 2080
net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
end_points['Conv2d_7b_1x1'] = net
# 8 x 8 x 1536
return net, end_points
def build_detection_heads(inputs, num_bboxes_per_cell, scope='Multibox', reuse=None):
endpoints = {}
with tf.variable_scope(scope, 'Multibox', [inputs], reuse=reuse):
with slim.arg_scope([slim.conv2d, slim.avg_pool2d], stride=1, padding='SAME'):
# 8 x 8 grid cells
with tf.variable_scope("8x8"):
# 8 x 8 x 2048
branch8x8 = slim.conv2d(inputs, 96, [1, 1])
# 8 x 8 x 96
branch8x8 = slim.conv2d(branch8x8, 96, [3, 3])
# 8 x 8 x 96
endpoints['8x8_locations'] = slim.conv2d(branch8x8, num_bboxes_per_cell * 4, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 8 x 8 x 96
endpoints['8x8_confidences'] = slim.conv2d(branch8x8, num_bboxes_per_cell, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 6 x 6 grid cells
with tf.variable_scope("6x6"):
# 8 x 8 x 2048
branch6x6 = slim.conv2d(inputs, 96, [3, 3])
# 8 x 8 x 96
branch6x6 = slim.conv2d(branch6x6, 96, [3, 3], padding = "VALID")
# 6 x 6 x 96
endpoints['6x6_locations'] = slim.conv2d(branch6x6, num_bboxes_per_cell * 4, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 6 x 6 x 96
endpoints['6x6_confidences'] = slim.conv2d(branch6x6, num_bboxes_per_cell, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 8 x 8 x 2048
net = slim.conv2d(inputs, 256, [3, 3], stride=2)
# 4 x 4 grid cells
with tf.variable_scope("4x4"):
# 4 x 4 x 256
branch4x4 = slim.conv2d(net, 128, [3, 3])
# 4 x 4 x 128
endpoints['4x4_locations'] = slim.conv2d(branch4x4, num_bboxes_per_cell * 4, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 4 x 4 x 128
endpoints['4x4_confidences'] = slim.conv2d(branch4x4, num_bboxes_per_cell, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 3 x 3 grid cells
with tf.variable_scope("3x3"):
# 4 x 4 x 256
branch3x3 = slim.conv2d(net, 128, [1, 1])
# 4 x 4 x 128
branch3x3 = slim.conv2d(branch3x3, 96, [2, 2], padding="VALID")
# 3 x 3 x 96
endpoints['3x3_locations'] = slim.conv2d(branch3x3, num_bboxes_per_cell * 4, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 3 x 3 x 96
endpoints['3x3_confidences'] = slim.conv2d(branch3x3, num_bboxes_per_cell, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 2 x 2 grid cells
with tf.variable_scope("2x2"):
# 4 x 4 x 256
branch2x2 = slim.conv2d(net, 128, [1, 1])
# 4 x 4 x 128
branch2x2 = slim.conv2d(branch2x2, 96, [3, 3], padding = "VALID")
# 2 x 2 x 96
endpoints['2x2_locations'] = slim.conv2d(branch2x2, num_bboxes_per_cell * 4, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 2 x 2 x 96
endpoints['2x2_confidences'] = slim.conv2d(branch2x2, num_bboxes_per_cell, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 1 x 1 grid cell
with tf.variable_scope("1x1"):
# 8 x 8 x 2048
branch1x1 = slim.avg_pool2d(inputs, [8, 8], padding="VALID")
# 1 x 1 x 2048
endpoints['1x1_locations'] = slim.conv2d(branch1x1, 4, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
# 1 x 1 x 2048
endpoints['1x1_confidences'] = slim.conv2d(branch1x1, 1, [1, 1],
activation_fn=None, normalizer_fn=None, biases_initializer=None
)
batch_size = tf.shape(inputs)[0]#inputs.get_shape().as_list()[0]
# reshape the locations and confidences for easy concatenation
detect_8_locations = tf.reshape(endpoints['8x8_locations'], [batch_size, -1])
detect_8_confidences = tf.reshape(endpoints['8x8_confidences'], [batch_size, -1])
detect_6_locations = tf.reshape(endpoints['6x6_locations'], [batch_size, -1])
detect_6_confidences = tf.reshape(endpoints['6x6_confidences'], [batch_size, -1])
detect_4_locations = tf.reshape(endpoints['4x4_locations'], [batch_size, -1])
detect_4_confidences = tf.reshape(endpoints['4x4_confidences'], [batch_size, -1])
detect_3_locations = tf.reshape(endpoints['3x3_locations'], [batch_size, -1])
detect_3_confidences = tf.reshape(endpoints['3x3_confidences'], [batch_size, -1])
detect_2_locations = tf.reshape(endpoints['2x2_locations'], [batch_size, -1])
detect_2_confidences = tf.reshape(endpoints['2x2_confidences'], [batch_size, -1])
detect_1_locations = tf.reshape(endpoints['1x1_locations'], [batch_size, -1])
detect_1_confidences = tf.reshape(endpoints['1x1_confidences'], [batch_size, -1])
# Collect all of the locations and confidences
locations = tf.concat(axis=1, values=[detect_8_locations, detect_6_locations, detect_4_locations, detect_3_locations, detect_2_locations, detect_1_locations])
locations = tf.reshape(locations, [batch_size, -1, 4])
confidences = tf.concat(axis=1, values=[detect_8_confidences, detect_6_confidences, detect_4_confidences, detect_3_confidences, detect_2_confidences, detect_1_confidences])
confidences = tf.reshape(confidences, [batch_size, -1, 1])
confidences = tf.sigmoid(confidences)
return locations, confidences, endpoints
def build(inputs, num_bboxes_per_cell, reuse=False, scope=''):
# Build the Inception-v3 model
features, _ = inception_resnet_v2(inputs, reuse=reuse, scope='InceptionResnetV2')
# Save off the original variables (for ease of restoring)
model_variables = slim.get_model_variables()
original_inception_vars = {var.op.name:var for var in model_variables}
# Add on the detection heads
locs, confs, _ = build_detection_heads(features, num_bboxes_per_cell)
return locs, confs, original_inception_vars | 6,190 | 0 | 46 |
2caf5ed64f41a475eb0f6f365fe1d8e6d8d66bac | 747 | py | Python | train/minimalistic_train.py | imagelint/focusfinder | 834b4e2dd873739ba7dbe8389fdfeeaa12c03c87 | [
"MIT"
] | null | null | null | train/minimalistic_train.py | imagelint/focusfinder | 834b4e2dd873739ba7dbe8389fdfeeaa12c03c87 | [
"MIT"
] | null | null | null | train/minimalistic_train.py | imagelint/focusfinder | 834b4e2dd873739ba7dbe8389fdfeeaa12c03c87 | [
"MIT"
] | null | null | null | from fastai.vision.all import *
import re
df = pd.read_csv(Path('download/labels/train_labels.csv'), names=['name','x_p','y_p'], header=0)
imgs = DataBlock(blocks=(ImageBlock, PointBlock), get_items=get_image_files, get_y=get_focus_point, splitter=RandomSplitter(valid_pct=0.2, seed=42), batch_tfms=[*aug_transforms(size=(244, 244)), Normalize.from_stats(*imagenet_stats)], item_tfms=Resize(244),)
dls = imgs.dataloaders(Path('download/images/norm_images'), bs=16)
cnn_learner(dls, resnet18, y_range=(-1,1)).fine_tune(3, 4e-5).export(('./models/m.pkl')) | 62.25 | 258 | 0.726908 | from fastai.vision.all import *
import re
def get_focus_point(path_name):
dfb = next(iter(df[df['name']==path_name.name].index), ('no match for '+path_name.name))
return tensor([float(df['x_p'][dfb]), float(df['y_p'][dfb])])
df = pd.read_csv(Path('download/labels/train_labels.csv'), names=['name','x_p','y_p'], header=0)
imgs = DataBlock(blocks=(ImageBlock, PointBlock), get_items=get_image_files, get_y=get_focus_point, splitter=RandomSplitter(valid_pct=0.2, seed=42), batch_tfms=[*aug_transforms(size=(244, 244)), Normalize.from_stats(*imagenet_stats)], item_tfms=Resize(244),)
dls = imgs.dataloaders(Path('download/images/norm_images'), bs=16)
cnn_learner(dls, resnet18, y_range=(-1,1)).fine_tune(3, 4e-5).export(('./models/m.pkl')) | 169 | 0 | 23 |
5ad5b0dce0aebfcc0decf69ea1433b9412ccc9bb | 5,230 | py | Python | scripts/create_fluseverity_figs/Supp_zOR_epiduration_state.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | 3 | 2018-03-29T23:02:43.000Z | 2020-08-10T12:01:50.000Z | scripts/create_fluseverity_figs/Supp_zOR_epiduration_state.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | scripts/create_fluseverity_figs/Supp_zOR_epiduration_state.py | eclee25/flu-SDI-exploratory-age | 2f5a4d97b84d2116e179e85fe334edf4556aa946 | [
"MIT"
] | null | null | null | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 8/1/14
###Function: scatter plot zOR metrics vs. epidemic duration at state level, using the state peak retrospective classification
## one plot per season per classification period vs. epidemic duration
## a single plot for all seasons for retrospective period vs. epidemic duration
## define epidemic duration as the number of weeks that falls between the point in the epidemic where cumulative incidence is 20% and 80% cumulative incidence (inclusive)
###Import data: Py_export/SDI_state_classifications_7st.csv, R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
###Command Line: python Supp_zOR_epiduration_state.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions as fxn
### data structures ###
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
fw = fxn.gp_fluweeks
wklab = fxn.gp_weeklabels
scol = fxn.gp_colors
epi_min_perc = 10 # cumulative incidence percentage that defines beginning of epidemic
epi_max_perc = 90 # cumulative incidence percentage that defines end of epidemic
### functions ###
### data files ###
# state zOR data
st_zORin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications_7st.csv', 'r')
st_zORin.readline()
st_zOR = csv.reader(st_zORin, delimiter=',')
# state incidence files
st_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
st_incidin.readline()
stincid = csv.reader(st_incidin, delimiter=',')
st_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
st_popin.readline()
stpop = csv.reader(st_popin, delimiter=',')
### program ###
# import state classification data
# d_st_classif[(season, state abbr)] = (mean retro zOR, mean early zOR)
d_st_classif = fxn.readStateClassifFile(st_zOR)
# grab list of unique states in dataset
states = list(set([key[1] for key in d_st_classif]))
## state-level data ##
d_wk, d_zip3_st, d_incid_st, d_OR_st = fxn.week_OR_processing_state(stincid, stpop)
# dict_zOR_st[(week, state)] = zOR
d_zOR_st = fxn.week_zOR_processing_state(d_wk, d_OR_st)
# dict_incid53ls_st[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_st[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_st[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_st, d_OR53ls_st, d_zOR53ls_st = fxn.week_plotting_dicts_state(d_wk, d_incid_st, d_OR_st, d_zOR_st)
# plot values per season
for s in ps:
retrozOR = [d_st_classif[(s, st)][0] for st in states]
earlyzOR = [d_st_classif[(s, st)][1] for st in states]
epidur = [fxn.epidemic_duration(d_incid53ls_st[(s, st)], epi_min_perc, epi_max_perc) for st in states]
# mean retro zOR vs peak timing
plt.plot(epidur, retrozOR, marker = 'o', color = 'black', linestyle = 'None')
for st, x, y in zip(states, epidur, retrozOR):
plt.annotate(st, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
plt.ylabel('Mean Retrospective zOR', fontsize=fs)
plt.xlabel('Epidemic Duration (number of weeks), Season %s' %(s), fontsize=fs)
plt.xticks(range(fw)[::5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.xlim([0,fw])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_epidur_state/zOR_retro_epidur_state_Season%s.png' %(s), transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
# mean retro zOR vs peak timing
plt.plot(epidur, earlyzOR, marker = 'o', color = 'black', linestyle = 'None')
for st, x, y in zip(states, epidur, earlyzOR):
plt.annotate(st, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
plt.ylabel('Mean Early Warning zOR', fontsize=fs)
plt.xlabel('Epidemic Duration (number of weeks), Season %s' %(s), fontsize=fs)
plt.xticks(range(fw)[::5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.xlim([0,fw])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_epidur_state/zOR_early_epidur_state_Season%s.png' %(s), transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
# mean retro zOR vs timing -- all seasons on a single plot
for s, col, lab in zip(ps, scol, sl):
retrozOR = [d_st_classif[(s, st)][0] for st in states]
epidur = [fxn.epidemic_duration(d_incid53ls_st[(s, st)], epi_min_perc, epi_max_perc) for st in states]
plt.plot(epidur, retrozOR, marker = 'o', color = col, label = lab, linestyle = 'None')
plt.ylabel('Mean Retrospective zOR', fontsize=fs)
plt.xlabel('Epidemic Duration (number of weeks)', fontsize=fs)
plt.xticks(range(fw)[::5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.legend(loc='upper left')
plt.xlim([0,fw])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_epidur_state/zOR_retro_epidur_state_allseas.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show() | 43.583333 | 214 | 0.736329 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 8/1/14
###Function: scatter plot zOR metrics vs. epidemic duration at state level, using the state peak retrospective classification
## one plot per season per classification period vs. epidemic duration
## a single plot for all seasons for retrospective period vs. epidemic duration
## define epidemic duration as the number of weeks that falls between the point in the epidemic where cumulative incidence is 20% and 80% cumulative incidence (inclusive)
###Import data: Py_export/SDI_state_classifications_7st.csv, R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
###Command Line: python Supp_zOR_epiduration_state.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions as fxn
### data structures ###
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
fw = fxn.gp_fluweeks
wklab = fxn.gp_weeklabels
scol = fxn.gp_colors
epi_min_perc = 10 # cumulative incidence percentage that defines beginning of epidemic
epi_max_perc = 90 # cumulative incidence percentage that defines end of epidemic
### functions ###
### data files ###
# state zOR data
st_zORin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications_7st.csv', 'r')
st_zORin.readline()
st_zOR = csv.reader(st_zORin, delimiter=',')
# state incidence files
st_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
st_incidin.readline()
stincid = csv.reader(st_incidin, delimiter=',')
st_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
st_popin.readline()
stpop = csv.reader(st_popin, delimiter=',')
### program ###
# import state classification data
# d_st_classif[(season, state abbr)] = (mean retro zOR, mean early zOR)
d_st_classif = fxn.readStateClassifFile(st_zOR)
# grab list of unique states in dataset
states = list(set([key[1] for key in d_st_classif]))
## state-level data ##
d_wk, d_zip3_st, d_incid_st, d_OR_st = fxn.week_OR_processing_state(stincid, stpop)
# dict_zOR_st[(week, state)] = zOR
d_zOR_st = fxn.week_zOR_processing_state(d_wk, d_OR_st)
# dict_incid53ls_st[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_st[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_st[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_st, d_OR53ls_st, d_zOR53ls_st = fxn.week_plotting_dicts_state(d_wk, d_incid_st, d_OR_st, d_zOR_st)
# plot values per season
for s in ps:
retrozOR = [d_st_classif[(s, st)][0] for st in states]
earlyzOR = [d_st_classif[(s, st)][1] for st in states]
epidur = [fxn.epidemic_duration(d_incid53ls_st[(s, st)], epi_min_perc, epi_max_perc) for st in states]
# mean retro zOR vs peak timing
plt.plot(epidur, retrozOR, marker = 'o', color = 'black', linestyle = 'None')
for st, x, y in zip(states, epidur, retrozOR):
plt.annotate(st, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
plt.ylabel('Mean Retrospective zOR', fontsize=fs)
plt.xlabel('Epidemic Duration (number of weeks), Season %s' %(s), fontsize=fs)
plt.xticks(range(fw)[::5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.xlim([0,fw])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_epidur_state/zOR_retro_epidur_state_Season%s.png' %(s), transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
# mean retro zOR vs peak timing
plt.plot(epidur, earlyzOR, marker = 'o', color = 'black', linestyle = 'None')
for st, x, y in zip(states, epidur, earlyzOR):
plt.annotate(st, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
plt.ylabel('Mean Early Warning zOR', fontsize=fs)
plt.xlabel('Epidemic Duration (number of weeks), Season %s' %(s), fontsize=fs)
plt.xticks(range(fw)[::5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.xlim([0,fw])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_epidur_state/zOR_early_epidur_state_Season%s.png' %(s), transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
# mean retro zOR vs timing -- all seasons on a single plot
for s, col, lab in zip(ps, scol, sl):
retrozOR = [d_st_classif[(s, st)][0] for st in states]
epidur = [fxn.epidemic_duration(d_incid53ls_st[(s, st)], epi_min_perc, epi_max_perc) for st in states]
plt.plot(epidur, retrozOR, marker = 'o', color = col, label = lab, linestyle = 'None')
plt.ylabel('Mean Retrospective zOR', fontsize=fs)
plt.xlabel('Epidemic Duration (number of weeks)', fontsize=fs)
plt.xticks(range(fw)[::5], fontsize=fssml)
plt.yticks(fontsize=fssml)
plt.legend(loc='upper left')
plt.xlim([0,fw])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/zOR_epidur_state/zOR_retro_epidur_state_allseas.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show() | 0 | 0 | 0 |
686738f8c5cb3f4321168859c030d4ecc830fda5 | 988 | py | Python | scripts/filter_actions.py | heyjoakim/munaiah-analyser | d76056cecd3b7f4a6cd72d7fd526cea18aa671d6 | [
"MIT"
] | 1 | 2022-01-03T17:47:20.000Z | 2022-01-03T17:47:20.000Z | scripts/filter_actions.py | heyjoakim/munaiah-analyser | d76056cecd3b7f4a6cd72d7fd526cea18aa671d6 | [
"MIT"
] | null | null | null | scripts/filter_actions.py | heyjoakim/munaiah-analyser | d76056cecd3b7f4a6cd72d7fd526cea18aa671d6 | [
"MIT"
] | 1 | 2021-12-22T13:59:34.000Z | 2021-12-22T13:59:34.000Z | import pandas as pd
from subprocess import call
# Generate raw data file
# call("./get_actions.sh") // Add this if you dont have the out raw data TODO: Replace this with the go script
# Do we add a count for each category a action has or ???
all_actions = open("data/actions_out/actions_raw.txt", "r")
no_ver_actions = open("data/actions_out/no_version_actions.txt", "w")
for i in all_actions:
tmp = i.split("@")
no_ver_actions.write(tmp[0] + "\n")
all_actions.close()
no_ver_actions.close()
# Data analysis
data = pd.read_csv(r'data/actions_out/no_version_actions.txt',
names=['action_name', 'usages'])
# print(data)
# Write value counts to csv
table = data['action_name'].value_counts()
table.to_csv('table.csv', index=True, header=False)
# Get unique actions names
unique_actions = set(data['action_name'].to_list())
out = open("data/actions_out/unique_actions_list.txt", "w")
for action in unique_actions:
out.write(action + "\n")
out.close()
| 27.444444 | 110 | 0.711538 | import pandas as pd
from subprocess import call
# Generate raw data file
# call("./get_actions.sh") // Add this if you dont have the out raw data TODO: Replace this with the go script
# Do we add a count for each category a action has or ???
all_actions = open("data/actions_out/actions_raw.txt", "r")
no_ver_actions = open("data/actions_out/no_version_actions.txt", "w")
for i in all_actions:
tmp = i.split("@")
no_ver_actions.write(tmp[0] + "\n")
all_actions.close()
no_ver_actions.close()
# Data analysis
data = pd.read_csv(r'data/actions_out/no_version_actions.txt',
names=['action_name', 'usages'])
# print(data)
# Write value counts to csv
table = data['action_name'].value_counts()
table.to_csv('table.csv', index=True, header=False)
# Get unique actions names
unique_actions = set(data['action_name'].to_list())
out = open("data/actions_out/unique_actions_list.txt", "w")
for action in unique_actions:
out.write(action + "\n")
out.close()
| 0 | 0 | 0 |
7609dac71fe6ef641efd49318b3e76b0364b2b33 | 4,936 | py | Python | utils/sigproc/dsp.py | cyx669521/Conv-TasNet-PyTorch | 64188ffa48971218fdd68b66906970f215d7eca2 | [
"MIT"
] | 39 | 2019-04-03T03:25:40.000Z | 2021-11-12T01:05:07.000Z | utils/sigproc/dsp.py | cyx669521/Conv-TasNet-PyTorch | 64188ffa48971218fdd68b66906970f215d7eca2 | [
"MIT"
] | 5 | 2019-11-23T14:16:39.000Z | 2021-08-30T05:33:59.000Z | utils/sigproc/dsp.py | wangkenpu/Conv-TasNet-PyTorch | 64188ffa48971218fdd68b66906970f215d7eca2 | [
"MIT"
] | 10 | 2019-04-03T08:17:42.000Z | 2021-12-14T01:23:51.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 ASLP@NPU Ke Wang
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
import librosa
import numpy as np
import soundfile as sf
from scipy.io import wavfile
MAX_INT16 = np.iinfo(np.int16).max
EPSILON = np.finfo(np.float32).eps
MAX_EXP = np.log(np.finfo(np.float32).max - 10.0)
def get_window(window_size, window_type, square_root_window=True):
"""Return the window"""
window = {
'hamming': np.hamming(window_size),
'hanning': np.hanning(window_size),
}[window_type]
if square_root_window:
window = np.sqrt(window)
return window
def pre_emphasis(signal, coefficient=0.97):
"""Pre-emphasis original signal
y(n) = x(n) - a*x(n-1)
"""
return np.append(signal[0], signal[1:] - coefficient * signal[:-1])
def de_emphasis(signal, coefficient=0.97):
"""De-emphasis original signal
y(n) = x(n) + a*x(n-1)
"""
length = signal.shape[0]
for i in range(1, length):
signal[i] = signal[i] + coefficient * signal[i - 1]
return signal
def stft(signal,
sample_rate,
frame_length=32,
frame_shift=8,
window_type="hanning",
preemphasis=0.0,
square_root_window=True):
"""Compute the Short Time Fourier Transform.
Args:
signal: input speech signal
sample_rate: waveform data sample frequency (Hz)
frame_length: frame length in milliseconds
frame_shift: frame shift in milliseconds
window_type: type of window
square_root_window: square root window
Return:
fft: (n/2)+1 dim complex STFT restults
"""
if preemphasis != 0.0:
signal = pre_emphasis(signal, preemphasis)
hop_length = int(sample_rate * frame_shift / 1000)
win_length = int(sample_rate * frame_length / 1000)
num_point = fft_point(win_length)
window = get_window(num_point, window_type, square_root_window)
feat = librosa.stft(signal, n_fft=num_point, hop_length=hop_length,
win_length=win_length, window=window)
return np.transpose(feat)
def get_phase(signal,
sample_rate,
frame_length=32,
frame_shift=8,
window_type="hanning",
preemphasis=0.0,
square_root_window=True):
"""Compute phase imformation.
Args:
signal: input speech signal
sample_rate: waveform data sample frequency (Hz)
frame_length: frame length in milliseconds
frame_shift: frame shift in milliseconds
window_type: type of window
square_root_window: square root window
"""
feat = stft(signal, sample_rate, frame_length, frame_shift,
window_type, preemphasis, square_root_window)
phase = np.angle(feat)
return phase
def overlap_and_add(spectrum,
signal,
sample_rate,
frame_length=32,
frame_shift=8,
window_type="hanning",
preemphasis=0.0,
use_log=False,
use_power=False,
square_root_window=True):
"""Convert frames to signal using overlap-and-add systhesis.
Args:
spectrum: magnitude spectrum
signal: wave signal to supply phase information
Return:
wav: synthesied output waveform
"""
if use_log:
spectrum = np.clip(spectrum, a_min=None, a_max=MAX_EXP)
spectrum = np.exp(spectrum)
if use_power:
spectrum = np.sqrt(spectrum)
phase = get_phase(signal, sample_rate, frame_length, frame_shift,
window_type, preemphasis, square_root_window)
spectrum = spectrum * np.exp(1.0j * phase)
if spectrum.shape != phase.shape:
print(('Wave and Spectrum are not the same length, '
'phase.shape = {}, spectrum.shape = {}').format(
spectrum.shape, phase.shape), 'error')
spectrum = np.transpose(spectrum)
hop_length = int(sample_rate * frame_shift / 1000)
win_length = int(sample_rate * frame_length / 1000)
num_point = fft_point(win_length)
window = get_window(num_point, window_type, square_root_window)
wav = librosa.istft(spectrum, hop_length=hop_length,
win_length=win_length, window=window)
if preemphasis != 0.0:
wav = de_emphasis(wav, preemphasis)
return wav
| 30.469136 | 71 | 0.632496 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 ASLP@NPU Ke Wang
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
import librosa
import numpy as np
import soundfile as sf
from scipy.io import wavfile
MAX_INT16 = np.iinfo(np.int16).max
EPSILON = np.finfo(np.float32).eps
MAX_EXP = np.log(np.finfo(np.float32).max - 10.0)
def wavread(path):
wav, sample_rate = sf.read(path, dtype='float32')
return wav, sample_rate
def wavwrite(signal, sample_rate, path):
signal = (signal * MAX_INT16).astype(np.int16)
wavfile.write(path, sample_rate, signal)
def get_window(window_size, window_type, square_root_window=True):
"""Return the window"""
window = {
'hamming': np.hamming(window_size),
'hanning': np.hanning(window_size),
}[window_type]
if square_root_window:
window = np.sqrt(window)
return window
def fft_point(dim):
assert dim > 0
num = math.log(dim, 2)
num_point = 2**(math.ceil(num))
return num_point
def pre_emphasis(signal, coefficient=0.97):
"""Pre-emphasis original signal
y(n) = x(n) - a*x(n-1)
"""
return np.append(signal[0], signal[1:] - coefficient * signal[:-1])
def de_emphasis(signal, coefficient=0.97):
"""De-emphasis original signal
y(n) = x(n) + a*x(n-1)
"""
length = signal.shape[0]
for i in range(1, length):
signal[i] = signal[i] + coefficient * signal[i - 1]
return signal
def stft(signal,
sample_rate,
frame_length=32,
frame_shift=8,
window_type="hanning",
preemphasis=0.0,
square_root_window=True):
"""Compute the Short Time Fourier Transform.
Args:
signal: input speech signal
sample_rate: waveform data sample frequency (Hz)
frame_length: frame length in milliseconds
frame_shift: frame shift in milliseconds
window_type: type of window
square_root_window: square root window
Return:
fft: (n/2)+1 dim complex STFT restults
"""
if preemphasis != 0.0:
signal = pre_emphasis(signal, preemphasis)
hop_length = int(sample_rate * frame_shift / 1000)
win_length = int(sample_rate * frame_length / 1000)
num_point = fft_point(win_length)
window = get_window(num_point, window_type, square_root_window)
feat = librosa.stft(signal, n_fft=num_point, hop_length=hop_length,
win_length=win_length, window=window)
return np.transpose(feat)
def get_phase(signal,
sample_rate,
frame_length=32,
frame_shift=8,
window_type="hanning",
preemphasis=0.0,
square_root_window=True):
"""Compute phase imformation.
Args:
signal: input speech signal
sample_rate: waveform data sample frequency (Hz)
frame_length: frame length in milliseconds
frame_shift: frame shift in milliseconds
window_type: type of window
square_root_window: square root window
"""
feat = stft(signal, sample_rate, frame_length, frame_shift,
window_type, preemphasis, square_root_window)
phase = np.angle(feat)
return phase
def overlap_and_add(spectrum,
signal,
sample_rate,
frame_length=32,
frame_shift=8,
window_type="hanning",
preemphasis=0.0,
use_log=False,
use_power=False,
square_root_window=True):
"""Convert frames to signal using overlap-and-add systhesis.
Args:
spectrum: magnitude spectrum
signal: wave signal to supply phase information
Return:
wav: synthesied output waveform
"""
if use_log:
spectrum = np.clip(spectrum, a_min=None, a_max=MAX_EXP)
spectrum = np.exp(spectrum)
if use_power:
spectrum = np.sqrt(spectrum)
phase = get_phase(signal, sample_rate, frame_length, frame_shift,
window_type, preemphasis, square_root_window)
spectrum = spectrum * np.exp(1.0j * phase)
if spectrum.shape != phase.shape:
print(('Wave and Spectrum are not the same length, '
'phase.shape = {}, spectrum.shape = {}').format(
spectrum.shape, phase.shape), 'error')
spectrum = np.transpose(spectrum)
hop_length = int(sample_rate * frame_shift / 1000)
win_length = int(sample_rate * frame_length / 1000)
num_point = fft_point(win_length)
window = get_window(num_point, window_type, square_root_window)
wav = librosa.istft(spectrum, hop_length=hop_length,
win_length=win_length, window=window)
if preemphasis != 0.0:
wav = de_emphasis(wav, preemphasis)
return wav
| 295 | 0 | 69 |
cd064f19699be74497beed14a6f016f0c582cc7d | 874 | py | Python | recipes/Python/576431_export_variable_win32_like/recipe-576431.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/576431_export_variable_win32_like/recipe-576431.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/576431_export_variable_win32_like/recipe-576431.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | """
vim: set enc=utf-8
Author : winterTTr
Mail : winterTTr@gmail.com
Desc : Tools for Operation on Win32 Environment variables
Module : win32export.py
"""
import win32gui
import win32con
import win32api
| 26.484848 | 158 | 0.713959 | """
vim: set enc=utf-8
Author : winterTTr
Mail : winterTTr@gmail.com
Desc : Tools for Operation on Win32 Environment variables
Module : win32export.py
"""
import win32gui
import win32con
import win32api
def export ( name , value , update_system = True ):
try :
modifyVariableInRegister( name , value )
except:
return False
if update_system :
updateSystem()
return True
def modifyVariableInRegister( name , value ):
key = win32api.RegOpenKey( win32con.HKEY_CURRENT_USER,"Environment",0,win32con.KEY_ALL_ACCESS)
if not key : raise
win32api.RegSetValueEx( key , name , 0 , win32con.REG_SZ , value )
win32api.RegCloseKey( key )
def updateSystem():
rc,dwReturnValue = win32gui.SendMessageTimeout( win32con.HWND_BROADCAST , win32con.WM_SETTINGCHANGE , 0 , "Environment" , win32con.SMTO_ABORTIFHUNG, 5000)
| 592 | 0 | 69 |
3debe193f06ad7a3963adeec17ffcb14130e01d6 | 201 | py | Python | execpeewee/__init__.py | supplayer/exec_peewee | 095b94d0a82e2f9d13e983d5cb6d22a09710b7c7 | [
"MIT"
] | null | null | null | execpeewee/__init__.py | supplayer/exec_peewee | 095b94d0a82e2f9d13e983d5cb6d22a09710b7c7 | [
"MIT"
] | null | null | null | execpeewee/__init__.py | supplayer/exec_peewee | 095b94d0a82e2f9d13e983d5cb6d22a09710b7c7 | [
"MIT"
] | null | null | null | from execpeewee.builder import PeeweeModel
from execpeewee.handler import ExecPeewee
from execpeewee.mapping import PeeweeFields
__all__ = [
'PeeweeModel',
'ExecPeewee',
'PeeweeFields'
]
| 18.272727 | 43 | 0.766169 | from execpeewee.builder import PeeweeModel
from execpeewee.handler import ExecPeewee
from execpeewee.mapping import PeeweeFields
__all__ = [
'PeeweeModel',
'ExecPeewee',
'PeeweeFields'
]
| 0 | 0 | 0 |
b8d2ac159ea46c08c77cc17cbe99d12772509c45 | 181 | py | Python | tests/bytecode/mp-tests/fun4.py | LabAixBidouille/micropython | 11aa6ba456287d6c80598a7ebbebd2887ce8f5a2 | [
"MIT"
] | 303 | 2015-07-11T17:12:55.000Z | 2018-01-08T03:02:37.000Z | tests/bytecode/mp-tests/fun4.py | LabAixBidouille/micropython | 11aa6ba456287d6c80598a7ebbebd2887ce8f5a2 | [
"MIT"
] | 13 | 2016-05-12T16:51:22.000Z | 2018-01-10T22:33:25.000Z | tests/bytecode/mp-tests/fun4.py | LabAixBidouille/micropython | 11aa6ba456287d6c80598a7ebbebd2887ce8f5a2 | [
"MIT"
] | 26 | 2018-01-18T09:15:33.000Z | 2022-02-07T13:09:14.000Z | f = lambda a, b, *c, d: None # default arg
#f = lambda a, b=1, *c, d: None # default arg for lambda not implemented
| 30.166667 | 72 | 0.574586 | def f(a, b=1, *c, d):
pass
#print(a,b,c,d) # bug in uPy!
f = lambda a, b, *c, d: None # default arg
#f = lambda a, b=1, *c, d: None # default arg for lambda not implemented
| 43 | 0 | 22 |
7651b47a04e65ce5e0188647bc79ee8630ac895a | 633 | py | Python | fsdviz/stocking/migrations/0017_stocking_colorfields.py | AdamCottrill/fsdivz | 98dd1f35a08dba26424e2951a40715e01399478c | [
"MIT"
] | null | null | null | fsdviz/stocking/migrations/0017_stocking_colorfields.py | AdamCottrill/fsdivz | 98dd1f35a08dba26424e2951a40715e01399478c | [
"MIT"
] | 6 | 2020-02-12T00:03:40.000Z | 2020-11-30T01:20:56.000Z | fsdviz/stocking/migrations/0017_stocking_colorfields.py | AdamCottrill/fsdviz | 98dd1f35a08dba26424e2951a40715e01399478c | [
"MIT"
] | null | null | null | # Generated by Django 2.2.24 on 2021-09-13 15:58
import colorfield.fields
from django.db import migrations
| 25.32 | 81 | 0.612954 | # Generated by Django 2.2.24 on 2021-09-13 15:58
import colorfield.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("stocking", "0016_stocking_event_fk_defaults"),
]
operations = [
migrations.AddField(
model_name="lifestage",
name="color",
field=colorfield.fields.ColorField(default="#FF0000", max_length=18),
),
migrations.AddField(
model_name="stockingmethod",
name="color",
field=colorfield.fields.ColorField(default="#FF0000", max_length=18),
),
]
| 0 | 501 | 23 |
be5f2b2b8a5a39a22461e8da453417d2919fbfd7 | 4,675 | py | Python | app/forms.py | hazzillrodriguez/travel-pass | 2508bf3bdbaed409022eb4748e3efcc78dee0ebd | [
"MIT"
] | null | null | null | app/forms.py | hazzillrodriguez/travel-pass | 2508bf3bdbaed409022eb4748e3efcc78dee0ebd | [
"MIT"
] | null | null | null | app/forms.py | hazzillrodriguez/travel-pass | 2508bf3bdbaed409022eb4748e3efcc78dee0ebd | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, BooleanField, SubmitField
from wtforms.fields.html5 import DateField
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms.validators import DataRequired, ValidationError, EqualTo, Email, Length
from app.models import User | 36.24031 | 108 | 0.695829 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SelectField, BooleanField, SubmitField
from wtforms.fields.html5 import DateField
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms.validators import DataRequired, ValidationError, EqualTo, Email, Length
from app.models import User
class SignupForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=1, max=64)])
password = PasswordField('Password', validators=[DataRequired(), Length(min=6, max=32)])
confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password')])
agreement = BooleanField('I agree to the Terms of Service and Privacy Policy', validators=[DataRequired()])
submit = SubmitField('Create account')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('The email address is already taken, please try again!')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember me')
submit = SubmitField('Login')
class ApplicationForm(FlaskForm):
firstname = StringField('First name',
validators=[DataRequired(),
Length(min=1, max=64)])
middlename = StringField('Middle name',
validators=[Length(max=64)])
lastname = StringField('Last name',
validators=[DataRequired(),
Length(min=1, max=64)])
sex = SelectField('Sex',
choices=[
('', '--- Please select ---'),
('Male', 'Male'),
('Female', 'Female')
],
validators=[DataRequired()])
dob = DateField('Date of Birth',
validators=[DataRequired()])
address = StringField('Address',
validators=[DataRequired(),
Length(min=1, max=128)])
purpose_of_travel = StringField('Purpose of Travel',
validators=[DataRequired(),
Length(min=8, max=128)])
details_of_travel = SelectField('Details of Travel',
choices=[
('', '--- Please select ---'),
('Going to Pampanga', 'Going to Pampanga'),
('Leaving Pampanga', 'Leaving Pampanga')
],
validators=[DataRequired()])
origin_address_1 = StringField('PRK / Office / Establishment',
validators=[DataRequired(),
Length(min=1, max=64)])
origin_barangay = StringField('Barangay',
validators=[DataRequired(),
Length(min=1, max=64)])
origin_city = StringField('City / Municipality',
validators=[DataRequired(),
Length(min=1, max=64)])
origin_province = StringField('Province',
validators=[DataRequired(),
Length(min=1, max=64)])
destination_address_1 = StringField('PRK / Office / Establishment',
validators=[DataRequired(),
Length(min=1, max=64)])
destination_barangay = StringField('Barangay',
validators=[DataRequired(),
Length(min=1, max=64)])
destination_city = StringField('City / Municipality',
validators=[DataRequired(),
Length(min=1, max=64)])
destination_province = StringField('Province',
validators=[DataRequired(),
Length(min=1, max=64)])
vehicle_type = SelectField('Vehicle Type',
choices=[
('', '--- Please select ---'),
('Motorcycle', 'Motorcycle'),
('Tricycle', 'Tricycle'),
('Public Utility Van', 'Public Utility Van'),
('Public Utility Bus', 'Public Utility Bus'),
('Private Car', 'Private Car'),
('10 Wheeler Truck', '10 Wheeler Truck'),
('6 Wheeler Truck', '6 Wheeler Truck'),
('4 Wheeler Truck', '4 Wheeler Truck'),
('Government Vehicle', 'Government Vehicle'),
('Others', 'Others')
],
validators=[DataRequired()])
plate_number = StringField('Plate number',
validators=[DataRequired(),
Length(min=1, max=16)])
classification = SelectField('Classification',
choices=[
('', '--- Please select ---'),
('Authorized Person Outside of Residence (APOR)', 'Authorized Person Outside of Residence (APOR)'),
('Business', 'Business'),
('Locally Stranded Individual (LSI)', 'Locally Stranded Individual (LSI)'),
('Medical', 'Medical'),
('Personal', 'Personal'),
('Trucking', 'Trucking'),
('Tourist (Family)', 'Tourist (Family)'),
('Tourist (Individual)', 'Tourist (Individual)'),
('Others', 'Others')
],
validators=[DataRequired()])
date_of_arrival = DateField('Date of Arrival',
validators=[DataRequired()])
attachment_1 = FileField('Valid ID or Certificate of Employment',
validators=[FileRequired(),
FileAllowed(['pdf', 'png', 'jpg', 'jpeg', 'gif'],
'File extension is not allowed.')])
attachment_2 = FileField('Rapid Test Result',
validators=[FileRequired(),
FileAllowed(['pdf', 'png', 'jpg', 'jpeg', 'gif'],
'File extension is not allowed.')])
submit = SubmitField('Submit') | 160 | 4,108 | 69 |
6268a654cc967f8b117fefbd98f4b6bf0ecac528 | 2,277 | py | Python | source/ccit2021.py | simon242/CampusCyberInspectionTool2021 | 5f585afa2ad9473082e690df8e7ca44dbac943ca | [
"MIT"
] | null | null | null | source/ccit2021.py | simon242/CampusCyberInspectionTool2021 | 5f585afa2ad9473082e690df8e7ca44dbac943ca | [
"MIT"
] | null | null | null | source/ccit2021.py | simon242/CampusCyberInspectionTool2021 | 5f585afa2ad9473082e690df8e7ca44dbac943ca | [
"MIT"
] | null | null | null | import argparse
import os
import webbrowser
from Option import Option
from functions.Clock import Clock
from functions.shut import shut
from functions.Ipconfig import Showip
from functions.CryptoSystem import *
from functions.Nslookup import Nslookup
from functions.PortScanner import Scanport
if __name__ == "__main__":
main()
| 30.36 | 127 | 0.59552 | import argparse
import os
import webbrowser
from Option import Option
from functions.Clock import Clock
from functions.shut import shut
from functions.Ipconfig import Showip
from functions.CryptoSystem import *
from functions.Nslookup import Nslookup
from functions.PortScanner import Scanport
def main():
# 準備參數解析
app_description = "校園資安測試常用工具集合"
epilog_text = "歡迎至https://github.com/TwMoonBear-Arsenal/BetterCalculator/issues提供建議"
parser = argparse.ArgumentParser(
description=app_description, epilog=epilog_text)
args = parser.parse_args()
# 準備選單
optionList = []
optionList.append(Option(1, "顯示今天日期"))
optionList.append(Option(2, "顯示本地端IP地址"))
optionList.append(Option(3, "ip或hostname相互反查"))
optionList.append(Option(4, "詢找目標主機有開啟的port"))
optionList.append(Option(5,"Encrypt & Decrypt System"))
print()
while(True):
# 顯示選單
os.system('cls')
print(app_description)
print(epilog_text)
print("--------")
for option in optionList:
print("[", option.number, "] ", option.descritpion)
print("[", 99, "]", " 結束程式")
# 詢問使用者
selection = input("請輸入需要的功能:").strip()
if(selection == "1"):
Clock.ShowTime()
elif(selection == "2"):
Showip.ipconfig()
elif(selection == "3"):
nslookup_selection = input("\033[33mchoose type you want to use:\033[0m\n[1]hostname2ip\n[2]ip2hostname\n").strip()
#print(nslookup_selection)
if(nslookup_selection == "1"):
Nslookup.domainip()
else:
Nslookup.ipdomain()
elif(selection == "4"):
portscanner_selection = input("\033[33mchoose type you want to use:\033[0m\n[1]TCP\n[2]UDP\n").strip()
if(portscanner_selection == "1"):
Scanport.portscannerTCP()
else:
Scanport.portscannerUDP()
elif(selection=="5"):
CryptoSystem.Crypto_system_options()
elif(selection == "99"):
print("See you next time...")
print()
return
else:
print("輸入錯誤")
# 開始下一循環
input("按任意鍵繼續...")
if __name__ == "__main__":
main()
| 2,110 | 0 | 23 |
7aab91c67e5a0b014d8044e8f2712e2c85d03816 | 1,598 | py | Python | soft_delete_history/templatetags/soft_delete_extras.py | ThiloSavary/django-soft-delete-history | d511d3b3a13ddb56691d807c919fe312b5e2f3cf | [
"MIT"
] | null | null | null | soft_delete_history/templatetags/soft_delete_extras.py | ThiloSavary/django-soft-delete-history | d511d3b3a13ddb56691d807c919fe312b5e2f3cf | [
"MIT"
] | null | null | null | soft_delete_history/templatetags/soft_delete_extras.py | ThiloSavary/django-soft-delete-history | d511d3b3a13ddb56691d807c919fe312b5e2f3cf | [
"MIT"
] | null | null | null | from django import template
from django.contrib.auth import get_permission_codename
from django.utils.translation import gettext_lazy as _
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag(takes_context=True)
| 51.548387 | 133 | 0.702753 | from django import template
from django.contrib.auth import get_permission_codename
from django.utils.translation import gettext_lazy as _
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag(takes_context=True)
def admin_soft_delete_bar(context, *args, **kwargs):
# Could add except for KeyError, if rendering the template
# without a request available.
opts = context['original']._meta
user = context['request'].user
can_restore_codename = get_permission_codename('can_restore', opts)
can_change_codename = get_permission_codename('change', opts)
can_soft_delete_codename = get_permission_codename('can_soft_delete', opts)
can_delete_codename = get_permission_codename('delete', opts)
if ((user.has_perm('%s.%s' % (opts.app_label, can_soft_delete_codename)) or
user.has_perm('%s.%s' % (opts.app_label, can_delete_codename))) and
not context['original'].is_deleted
):
print(_('Soft delete'))
return mark_safe('<input type="submit" value="'+_('Soft delete')+'" name="_soft-delete" style="background-color: #fc4545;">')
if ((user.has_perm('%s.%s' % (opts.app_label, can_restore_codename)) or
user.has_perm('%s.%s' % (opts.app_label, can_change_codename))) and
context['original'].is_deleted
):
print('<input type="submit" value="'+_('Restore')+'" name="_restore" style="background-color: #49859c;">')
return mark_safe('<input type="submit" value="'+_('Restore')+'" name="_restore" style="background-color: #49859c;">')
return ''
| 1,318 | 0 | 22 |
c963e25a39c48d1cf87711a245bad7fb912e676d | 49 | py | Python | discoin/config.py | Discoin/discoin.py | 4a3459dfaab6695fe88d05290465a1b7842b3606 | [
"MIT"
] | 2 | 2020-07-26T11:29:47.000Z | 2021-09-08T22:38:35.000Z | discoin/config.py | Discoin/discoin.py | 4a3459dfaab6695fe88d05290465a1b7842b3606 | [
"MIT"
] | 8 | 2020-02-11T14:23:38.000Z | 2021-04-16T21:38:15.000Z | discoin/config.py | Discoin/discoin.py | 4a3459dfaab6695fe88d05290465a1b7842b3606 | [
"MIT"
] | null | null | null | DOMAIN = "https://discoin.zws.im"
VERSION = "4.0" | 24.5 | 33 | 0.653061 | DOMAIN = "https://discoin.zws.im"
VERSION = "4.0" | 0 | 0 | 0 |