max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
python/sprint_13/Subsequence.py
|
Talgatovich/algorithms-templates
| 0
|
12780251
|
<filename>python/sprint_13/Subsequence.py
def sequense(line_1, line_2):
start = -1
for word in line_1:
start = line_2.find(word, start + 1)
if start == -1:
return False
return True
def read_input():
s = input()
t = input()
return s, t
def main():
s, t = read_input()
print(sequense(s, t))
#if __name__ == "__main__":
# main()
list1 = 'abcp'
list2 = 'ahpc'
print(sequense(list1, list2))
| 3.78125
| 4
|
data/transcoder_evaluation_gfg/python/MINIMUM_PERIMETER_N_BLOCKS.py
|
mxl1n/CodeGen
| 241
|
12780252
|
<filename>data/transcoder_evaluation_gfg/python/MINIMUM_PERIMETER_N_BLOCKS.py
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
def f_gold ( n ) :
l = math.sqrt ( n )
sq = l * l
if ( sq == n ) :
return l * 4
else :
row = n / l
perimeter = 2 * ( l + row )
if ( n % l != 0 ) :
perimeter += 2
return perimeter
#TOFILL
if __name__ == '__main__':
param = [
(45,),
(80,),
(54,),
(48,),
(83,),
(68,),
(32,),
(20,),
(68,),
(66,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
| 3.34375
| 3
|
src/charma/countries/__init__.py
|
mononobi/charma-server
| 1
|
12780253
|
# -*- coding: utf-8 -*-
"""
countries package.
"""
from pyrin.packaging.base import Package
class CountriesPackage(Package):
"""
countries package class.
"""
NAME = __name__
COMPONENT_NAME = 'countries.component'
| 1.367188
| 1
|
setup.py
|
geonda/RIXS.phonons
| 2
|
12780254
|
import setuptools
requirements = []
with open('requirements.txt', 'r') as fh:
for line in fh:
requirements.append(line.strip())
with open("README.md", "r") as fh:
long_description = fh.read()
print(setuptools.find_packages(),)
setuptools.setup(
name="phlab",
version="0.0.0.dev6",
authors="<NAME>, <NAME>",
# author_email="<EMAIL>",
description="Phonon contirbution in RIXS",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/geonda/RIXS.phonons/",
packages=setuptools.find_packages(),
install_requires = requirements,
python_requires='>=3.6',
)
| 1.710938
| 2
|
tests/unit/tspapi/metric_test.py
|
jdgwartney/pulse-api-python
| 0
|
12780255
|
<gh_stars>0
#!/usr/bin/env python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from unittest import TestCase
from tspapi import API
from tspapi import Metric
from tspapi import HTTPResponseError
from tspapi import aggregates
from tspapi import units
import tspapi.metric
import json
import random
import requests
import logging
_path = os.path.dirname(__file__)
sys.path.append(_path)
from api_test_utils import TestUtils
class MetricTest(TestCase):
def setUp(self):
self.api = API()
logging.basicConfig(level=logging.DEBUG)
self.name = 'TEST_' + TestUtils.random_string(6)
self.display_name = 'green'
self.display_name_short = 'blue'
self.description = 'magenta'
self.default_aggregate = aggregates.SUM
self.default_resolution = 60000
self.unit = units.DURATION
self.type = 'FOOBAR'
self.is_disabled = False
self.metric = Metric(name=self.name,
display_name=self.display_name,
display_name_short=self.display_name_short,
description=self.description,
default_aggregate=self.default_aggregate,
default_resolution=self.default_resolution,
unit=self.unit,
_type=self.type,
is_disabled=self.is_disabled)
self.api.metric_create_batch([self.metric])
logging.basicConfig(level=logging.INFO)
def tearDown(self):
self.api.metric_delete(self.metric.name)
def test_minimal_constructor(self):
name = 'FOO'
m = Metric(name=name)
self.assertEqual(name, m.name)
self.assertEqual(name, m.display_name)
self.assertEqual(name, m.display_name_short)
self.assertEqual('', m.description)
self.assertEqual(m.default_aggregate, aggregates.AVG)
self.assertEqual(m.default_resolution, 1000)
self.assertEqual(m.unit, units.NUMBER)
self.assertIsNone(m.type)
def test_constructor_arguments(self):
self.assertEqual(self.name, self.metric.name)
self.assertEqual(self.display_name, self.metric.display_name)
self.assertEqual(self.display_name_short, self.metric.display_name_short)
self.assertEqual(self.description, self.metric.description)
self.assertEqual(self.default_aggregate, self.metric.default_aggregate)
self.assertEqual(self.default_resolution, self.metric.default_resolution)
self.assertEqual(self.unit, self.metric.unit)
self.assertEqual(self.type, self.metric.type)
self.assertEqual(self.is_disabled, self.metric.is_disabled)
def test_representation_string(self):
"""
Test the output of the __repr__ method
:param self:
:return:
"""
expected = ["Metric(name='{0}', display_name='green', display_name_short='blue',".format(self.metric.name),
" description='magenta', default_aggregate='sum', default_resolution=60000,",
" unit='duration', _type='FOOBAR', is_disabled='False')"]
self.assertEqual("".join(expected), self.metric.__repr__())
def test_metric_to_json(self):
m = Metric(name="TEST")
data = json.dumps(m, sort_keys=True, default=tspapi.metric.serialize_instance)
s = ['{"defaultAggregate": "avg", "defaultResolutionMS": 1000, "description": "",',
' "displayName": "TEST", "displayNameShort": "TEST", "isDisabled": false, "name": "TEST",',
' "unit": "number"}']
expected = "".join(s)
self.assertEqual(expected, data)
def test_metric_list_to_json(self):
l = [Metric(name="ONE"), Metric(name="TWO")]
self.maxDiff = None
s = ['[{"defaultAggregate": "avg", "defaultResolutionMS": 1000, "description": "", "displayName": "ONE",',
' "displayNameShort": "ONE", "isDisabled": false, "name": "ONE",',
' "unit": "number"},',
' {"defaultAggregate": "avg", "defaultResolutionMS": 1000, "description": "", "displayName": "TWO",',
' "displayNameShort": "TWO", "isDisabled": false, "name": "TWO",',
' "unit": "number"}]']
expected = "".join(s)
data = json.dumps(l, sort_keys=True, default=tspapi.metric.serialize_instance)
self.assertEqual(expected, data)
def test_metric_instance_empty_name(self):
"""
Ensure that creating a metric with an empty name throws a
ValueError exception
:return:
"""
try:
m = Metric()
print(m)
self.assertTrue(False)
except ValueError:
pass
def test_metric_empty_name(self):
"""
Ensure that trying to call the create metric API with an empty name
throws a ValueError exception
:return:
"""
try:
self.api.metric_create()
self.assertTrue(False)
except ValueError:
pass
def test_metric_create(self):
name = "TEST_CREATE_FOOBAR" + TestUtils.random_string(6)
display_name = "TEST_METRIC_CREATE" + TestUtils.random_string(6)
display_name_short = "TEST_METRIC" + TestUtils.random_string(6)
description = TestUtils.random_string(32)
default_aggregate = aggregates.AVG
default_resolution = 60000
unit = units.DURATION
_type = 'FOO'
is_disabled = True
metric = self.api.metric_create(name=name,
display_name=display_name,
display_name_short=display_name_short,
description=description,
default_aggregate=default_aggregate,
default_resolution=default_resolution,
unit=unit,
_type=_type,
is_disabled=is_disabled)
self.assertEqual(name, metric.name)
self.assertEqual(display_name, metric.display_name)
self.assertEqual(display_name_short, metric.display_name_short)
self.assertEqual(description, metric.description)
self.assertEqual(default_aggregate.upper(), metric.default_aggregate)
self.assertEqual(default_resolution, metric.default_resolution)
self.assertEqual(unit, metric.unit)
self.assertEqual(_type, metric.type)
self.assertEqual(is_disabled, metric.is_disabled)
def test_metric_create_one_batch(self):
name = 'TEST_CREATE_BATCH_ONE_FOOBAR' + TestUtils.random_string(6)
display_name = "BATCH" + TestUtils.random_string(6)
display_name_short = "BATCH" + TestUtils.random_string(3)
description = TestUtils.random_string(32)
default_aggregate = aggregates.SUM
default_resolution = random.randrange(1000, 60000)
unit = units.PERCENT
_type = 'FOO'
is_disabled = True
metric1 = Metric(name=name,
display_name=display_name,
display_name_short=display_name_short,
description=description,
default_aggregate=default_aggregate,
default_resolution=default_resolution,
unit=unit,
_type=_type,
is_disabled=is_disabled)
metrics = self.api.metric_create_batch([metric1])
self.assertEqual(len(metrics), 1)
m = metrics[0]
self.assertEqual(name, m.name)
self.assertEqual(display_name, m.display_name)
self.assertEqual(display_name_short, m.display_name_short)
self.assertEqual(description, m.description)
self.assertEqual(default_aggregate.upper(), m.default_aggregate)
self.assertEqual(default_resolution, m.default_resolution)
self.assertEqual(unit, m.unit)
self.assertEqual(_type, m.type)
self.assertEqual(is_disabled, m.is_disabled)
self.api.metric_delete(name)
def test_metric_large_display_name(self):
"""
Test to see that we can handle a display name up to 1K characters
:return:
"""
try:
name = 'TEST_CREATE' + TestUtils.random_string(6)
display_name = TestUtils.random_string(1024*1024)
metric = self.api.metric_create(name=name, display_name=display_name)
self.assertTrue(True)
except HTTPResponseError as e:
self.assertEqual(requests.codes.request_entity_too_large, e.status_code)
def test_metric_large_short_display_name(self):
"""
Test on the limit of the short display name
:return:
"""
try:
name = 'TEST_CREATE' + TestUtils.random_string(6)
display_name_short = TestUtils.random_string(1024*1024)
metric = self.api.metric_create(name=name, display_name_short=display_name_short)
self.assertTrue(True)
except HTTPResponseError as e:
self.assertEqual(requests.codes.request_entity_too_large, e.status_code)
def test_metric_bad_aggregate(self):
try:
name = 'TEST_CREATE' + TestUtils.random_string(6)
display_name = TestUtils.random_string(32)
metric = self.api.metric_create(name=name,
display_name=display_name,
default_aggregate='foo')
self.assertTrue(False)
except HTTPResponseError as e:
self.assertEqual(requests.codes.unprocessable_entity, e.status_code)
def test_metric_bad_unit(self):
try:
name = 'TEST_CREATE' + TestUtils.random_string(6)
display_name = TestUtils.random_string(32)
metric = self.api.metric_create(name=name,
display_name=display_name,
unit='foo')
self.assertTrue(False)
except HTTPResponseError as e:
self.assertEqual(requests.codes.unprocessable_entity, e.status_code)
def test_metric_create_multiple_batch(self):
name1 = 'TEST_CREATE_BATCH_ONE_FOOBAR' + TestUtils.random_string(6)
name2 = 'TEST_CREATE_BATCH_TWO_FOOBAR' + TestUtils.random_string(6)
name3 = 'TEST_CREATE_BATCH_THREE_FOOBAR' + TestUtils.random_string(6)
name4 = 'TEST_CREATE_BATCH_FOUR_FOOBAR' + TestUtils.random_string(6)
display_name1 = 'TEST_DISPLAY_NAME' + TestUtils.random_string(6)
display_name2 = 'TEST_DISPLAY_NAME' + TestUtils.random_string(6)
display_name3 = 'TEST_DISPLAY_NAME' + TestUtils.random_string(6)
display_name4 = 'TEST_DISPLAY_NAME' + TestUtils.random_string(6)
display_name_short1 = 'TEST_SHORT' + TestUtils.random_string(10)
display_name_short2 = 'TEST_SHORT' + TestUtils.random_string(10)
display_name_short3 = 'TEST_SHORT' + TestUtils.random_string(10)
display_name_short4 = 'TEST_SHORT' + TestUtils.random_string(10)
description1 = TestUtils.random_string(32)
description2 = TestUtils.random_string(32)
description3 = TestUtils.random_string(32)
description4 = TestUtils.random_string(32)
default_aggregate1 = aggregates.AVG
default_aggregate2 = aggregates.MIN
default_aggregate3 = aggregates.MAX
default_aggregate4 = aggregates.SUM
default_resolution1 = random.randrange(1000, 60000)
default_resolution2 = random.randrange(1000, 60000)
default_resolution3 = random.randrange(1000, 60000)
default_resolution4 = random.randrange(1000, 60000)
unit1 = units.BYTECOUNT
unit2 = units.DURATION
unit3 = units.NUMBER
unit4 = units.PERCENT
is_disabled1 = True
is_disabled2 = False
is_disabled3 = True
is_disabled4 = False
_type1 = TestUtils.random_string(6)
_type2 = TestUtils.random_string(6)
_type3 = TestUtils.random_string(6)
_type4 = TestUtils.random_string(6)
new_metrics = [Metric(name=name1,
display_name=display_name1,
display_name_short=display_name_short1,
description=description1,
default_aggregate=default_aggregate1,
default_resolution=default_resolution1,
unit=unit1,
_type=_type1,
is_disabled=is_disabled1),
Metric(name=name2,
display_name=display_name2,
display_name_short=display_name_short2,
description=description2,
default_aggregate=default_aggregate2,
default_resolution=default_resolution2,
unit=unit2,
_type=_type2,
is_disabled=is_disabled2),
Metric(name=name3,
display_name=display_name3,
display_name_short=display_name_short3,
description=description3,
default_aggregate=default_aggregate3,
default_resolution=default_resolution3,
unit=unit3,
_type=_type3,
is_disabled=is_disabled3),
Metric(name=name4,
display_name=display_name4,
display_name_short=display_name_short4,
description=description4,
default_aggregate=default_aggregate4,
default_resolution=default_resolution4,
unit=unit4,
_type=_type4,
is_disabled=is_disabled4)]
metrics = self.api.metric_create_batch(new_metrics)
self.assertEqual(4, len(metrics))
def test_metric_create_batch_from_file(self):
self.api.metric_create_batch(path="tests/unit/tspapi/metric_batch.json")
self.api.metric_delete('TEST_MY_COOL_METRIC_31')
self.api.metric_delete('TEST_MY_COOL_METRIC_32')
def test_metric_get(self):
metrics = self.api.metric_get()
self.assertIsNotNone(metrics)
def test_metric_delete(self):
name = 'TEST_DELETE_FOOBAR' + TestUtils.random_string(6)
self.api.metric_create(name=name)
self.api.metric_delete(name)
def test_metric_delete_no_name(self):
try:
self.api.metric_delete()
self.assertTrue(False)
except ValueError:
pass
def test_metric_delete_name_does_not_exist(self):
try:
self.api.metric_delete(TestUtils.random_string(10))
except HTTPResponseError as e:
self.assertEqual(requests.codes.unprocessable_entity, e.status_code)
def test_metric_update(self):
name = 'TEST_UPDATE_' + TestUtils.random_string(6)
display_name = TestUtils.random_string(8)
display_name_short = TestUtils.random_string(16)
description = TestUtils.random_string(16)
default_aggregate = aggregates.SUM
default_resolution = 60000
unit = units.PERCENT
is_disabled = False
_type = 'DEVICE'
self.api.metric_create(name=name,
display_name=display_name,
display_name_short=display_name_short,
description=description,
default_aggregate=default_aggregate,
default_resolution=default_resolution,
unit=unit,
is_disabled=is_disabled,
_type=_type
)
display_name = TestUtils.random_string(8)
display_name_short = TestUtils.random_string(16)
description = TestUtils.random_string(16)
default_aggregate = aggregates.MAX
default_resolution = 30000
unit = units.DURATION
is_disabled = True
_type = 'HOST'
metric = self.api.metric_update(name=name,
display_name=display_name,
display_name_short=display_name_short,
description=description,
default_aggregate=default_aggregate,
default_resolution=default_resolution,
unit=unit,
is_disabled=is_disabled,
_type=_type
)
self.assertEqual(name, metric.name)
self.assertEqual(display_name, metric.display_name)
self.assertEqual(display_name_short, metric.display_name_short)
self.assertEqual(description, metric.description)
self.assertEqual(default_aggregate.upper(), metric.default_aggregate)
self.assertEqual(default_resolution, metric.default_resolution)
self.assertEqual(unit, metric.unit)
self.assertEqual(_type, metric.type)
self.assertEqual(is_disabled, metric.is_disabled)
self.api.metric_delete(name)
def test_metric_batch_update(self):
pass
| 1.976563
| 2
|
find_beautifulsoup.py
|
jasperan/imdb-celebrities
| 0
|
12780256
|
<filename>find_beautifulsoup.py
from bs4 import BeautifulSoup
from sys import exit, argv
import requests
def parse_people(url):
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
obj = soup.find(id='meterRank')
if obj.string == 'SEE RANK':
obj = 'Below Rank 5000'
else:
obj = obj.string
print('MeterRank: {}'.format(
obj))
obj_2 = soup.find(id='knownfor')
'''
print('Known for: {}'.format(
obj_2))
'''
obj_3 = obj_2.find_all('span', class_='knownfor-ellipsis')
obj_4 = obj_2.find_all('a', class_='knownfor-ellipsis')
obj_5 = obj_2.find_all(class_='knownfor-year')
list_titles = list()
list_years = list()
list_roles = list()
for i in range(0, len(obj_3), 2):
list_titles.append(obj_3[i].string)
for i in range(1, len(obj_3), 2):
list_years.append(obj_3[i].string)
for i in range(len(obj_4)):
list_roles.append(obj_4[i].string)
print('{} // {} // {}'.format(
list_titles,
list_roles,
list_years))
assert len(list_titles) == len(list_roles) == len(list_years)
list_returner = list()
for i in range(len(list_titles)):
dictionary = {
'title':list_titles[i],
'role':list_roles[i],
'year':list_years[i]
}
list_returner.append(dictionary)
'''
print('Found {} known for movies'.format(
len(obj_3)))
'''
'''
for i in obj_3:
print('Title: {}'.format(
obj_3.string))
print('Title Role: {}'.format(
obj_4.string))
print('Year: {}'.format(
obj_5.string))
'''
return list_returner
def main(mode='default', day='11', month='12', year='1996'):
url = 'https://www.imdb.com/search/name/?birth_monthday={}-{}&birth_year={}'.format(
month,
day,
year
)
list_obj = list()
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
# print(soup.prettify())
obj = soup.find_all('h3')
print(obj)
print('Found {} elements'.format(
len(obj)))
for i in range(len(obj)):
try:
href = obj[i].find('a').get('href')
name = obj[i].find('a').string.rstrip()
parsed_info = parse_people('{}{}'.format(
'https://imdb.com',
href))
iteration_dict = {
'name':name,
'url':href,
'information':parsed_info
}
print(href)
print(name)
list_obj.append(iteration_dict)
except AttributeError:
print('Element {} was empty: {}'.format(
i+1,
obj[i]))
print('Objects: {}'.format(
list_obj))
return list_obj
if __name__ == '__main__':
main()
| 3.3125
| 3
|
bluesky/tests/test_transformer.py
|
AbbyGi/bluesky
| 43
|
12780257
|
<gh_stars>10-100
import pytest
from bluesky.utils import register_transform
@pytest.fixture
def transform_cell():
IPython = pytest.importorskip('IPython')
ip = IPython.core.interactiveshell.InteractiveShell()
register_transform('RE', prefix='<', ip=ip)
if IPython.__version__ >= '7':
return ip.transform_cell
else:
return ip.input_splitter.transform_cell
def test_register_transform_smoke(transform_cell):
assert True
@pytest.mark.parametrize('cell',
['a < b\n',
'RE(plan(a < b))\n',
'for j in range(5):\n < a\n <b\n'])
def test_no_transform(transform_cell, cell):
new_cell = transform_cell(cell)
assert cell == new_cell
@pytest.mark.parametrize('cell',
['< b\n',
'<a\n',
'<fly(a, b, c), md={"a": "aardvark"}\n',
' <b\n',
'<a, c=d\n'])
def test_transform(transform_cell, cell):
new_cell = transform_cell(cell)
assert f'RE({cell.lstrip("< ").strip()})\n' == new_cell
| 2.21875
| 2
|
projects/migrations/0005_auto_20191104_1335.py
|
EddyAnalytics/eddy-backend
| 1
|
12780258
|
<filename>projects/migrations/0005_auto_20191104_1335.py
# Generated by Django 2.2.6 on 2019-11-04 13:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_auto_20191016_1732'),
]
operations = [
migrations.AlterField(
model_name='dataconnectortype',
name='integration',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='data_connector_types', to='integrations.Integration'),
),
]
| 1.234375
| 1
|
trains/migrations/0010_auto_20201125_1131.py
|
Seshathri-saravanan/quest
| 0
|
12780259
|
# Generated by Django 3.1.3 on 2020-11-25 06:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('trains', '0009_auto_20201125_0840'),
]
operations = [
migrations.DeleteModel(
name='Station',
),
migrations.DeleteModel(
name='TrainRoutes',
),
]
| 1.523438
| 2
|
env/vagrant/vagrant.py
|
iterativ/django-project-template
| 0
|
12780260
|
# -*- coding: utf-8 -*-
#
# ITerativ GmbH
# http://www.iterativ.ch/
#
# Copyright (c) 2012 ITerativ GmbH. All rights reserved.
#
# Created on Jul 20, 2012
# @author: <NAME> <<EMAIL>>
from fabric.api import env
from deployit.fabrichelper.servicebase import UwsgiService, NginxService, CeleryService
from deployit.fabrichelper.environments import EnvTask
class VagrantEnv(EnvTask):
"""
Use vagrant environment
"""
name = "vagrant"
def run(self):
env.hosts = ['192.168.33.10']
env.server_names = ['127.0.0.1', '10.*', '192.168.*']
env.user = 'vagrant'
env.key_filename = "~/.vagrant.d/insecure_private_key"
env.use_dev_pip = True
env.env_name = 'vagrant'
env.services = [UwsgiService, NginxService, CeleryService]
env.project_name = '{{ project_name }}'
env.puppet_branch_name = 'ubuntu1204'
env.settings_module = '{{ project_name }}.settings.vagrant'
env.debug = True
env.puppet_temp_dir = '/home/vagrant/puppettmp'
env.puppet_dir = '/home/vagrant/puppet'
env.requirements_file = 'requirements/base.txt'
test_local_env = VagrantEnv()
| 1.796875
| 2
|
flexget/components/managed_lists/lists/pending_list/db.py
|
mfonville/Flexget
| 2
|
12780261
|
import logging
from datetime import datetime
from sqlalchemy import Boolean, Column, DateTime, Integer, Unicode, func
from sqlalchemy.orm import relationship
from sqlalchemy.sql.elements import and_
from sqlalchemy.sql.schema import ForeignKey
from flexget import db_schema
from flexget.db_schema import versioned_base
from flexget.utils.database import entry_synonym, with_session
plugin_name = 'pending_list'
log = logging.getLogger(plugin_name)
Base = versioned_base(plugin_name, 0)
@db_schema.upgrade(plugin_name)
def upgrade(ver, session):
ver = 0
return ver
class PendingListList(Base):
__tablename__ = 'pending_list_lists'
id = Column(Integer, primary_key=True)
name = Column(Unicode, unique=True)
added = Column(DateTime, default=datetime.now)
entries = relationship(
'PendingListEntry', backref='list', cascade='all, delete, delete-orphan', lazy='dynamic'
)
def to_dict(self):
return {'id': self.id, 'name': self.name, 'added_on': self.added}
class PendingListEntry(Base):
__tablename__ = 'wait_list_entries'
id = Column(Integer, primary_key=True)
list_id = Column(Integer, ForeignKey(PendingListList.id), nullable=False)
added = Column(DateTime, default=datetime.now)
title = Column(Unicode)
original_url = Column(Unicode)
_json = Column('json', Unicode)
entry = entry_synonym('_json')
approved = Column(Boolean)
def __init__(self, entry, pending_list_id):
self.title = entry['title']
self.original_url = entry.get('original_url') or entry['url']
self.entry = entry
self.list_id = pending_list_id
self.approved = False
def __repr__(self):
return '<PendingListEntry,title=%s,original_url=%s,approved=%s>' % (
self.title,
self.original_url,
self.approved,
)
def to_dict(self):
return {
'id': self.id,
'list_id': self.list_id,
'added_on': self.added,
'title': self.title,
'original_url': self.original_url,
'entry': dict(self.entry),
'approved': self.approved,
}
@with_session
def get_pending_lists(name=None, session=None):
log.debug('retrieving pending lists')
query = session.query(PendingListList)
if name:
log.debug('searching for pending lists with name %s', name)
query = query.filter(PendingListList.name.contains(name))
return query.all()
@with_session
def get_list_by_exact_name(name, session=None):
log.debug('returning pending list with name %s', name)
return (
session.query(PendingListList)
.filter(func.lower(PendingListList.name) == name.lower())
.one()
)
@with_session
def get_list_by_id(list_id, session=None):
log.debug('returning pending list with id %d', list_id)
return session.query(PendingListList).filter(PendingListList.id == list_id).one()
@with_session
def delete_list_by_id(list_id, session=None):
entry_list = get_list_by_id(list_id=list_id, session=session)
if entry_list:
log.debug('deleting pending list with id %d', list_id)
session.delete(entry_list)
@with_session
def get_entries_by_list_id(
list_id,
start=None,
stop=None,
order_by='title',
descending=False,
approved=False,
filter=None,
entry_ids=None,
session=None,
):
log.debug('querying entries from pending list with id %d', list_id)
query = session.query(PendingListEntry).filter(PendingListEntry.list_id == list_id)
if filter:
query = query.filter(func.lower(PendingListEntry.title).contains(filter.lower()))
if approved:
query = query.filter(PendingListEntry.approved is approved)
if entry_ids:
query = query.filter(PendingListEntry.id.in_(entry_ids))
if descending:
query = query.order_by(getattr(PendingListEntry, order_by).desc())
else:
query = query.order_by(getattr(PendingListEntry, order_by))
return query.slice(start, stop).all()
@with_session
def get_entry_by_title(list_id, title, session=None):
entry_list = get_list_by_id(list_id=list_id, session=session)
if entry_list:
log.debug('fetching entry with title `%s` from list id %d', title, list_id)
return (
session.query(PendingListEntry)
.filter(and_(PendingListEntry.title == title, PendingListEntry.list_id == list_id))
.first()
)
@with_session
def get_entry_by_id(list_id, entry_id, session=None):
log.debug('fetching entry with id %d from list id %d', entry_id, list_id)
return (
session.query(PendingListEntry)
.filter(and_(PendingListEntry.id == entry_id, PendingListEntry.list_id == list_id))
.one()
)
| 1.992188
| 2
|
functional/high_order_func/map_reduce.py
|
zhaoyu69/python3-learning
| 1
|
12780262
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# map
# def f(x):
# return x * x
#
# r = map(f, [1, 2, 3, 4, 5, 6, 7, 8, 9])
# print(list(r))
#
# L = []
# for n in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
# L.append(f(n))
# print(L)
#
# print(list(map(str,[1,2,3,4,5,6,7,8,9])))
# reduce
# from functools import reduce
# def add(x,y):
# return x + y
#
# print(reduce(add, [1,3,5,7,9]))
#
# def fn(x,y):
# return x * 10 + y
#
# print(reduce(fn, [1,3,5,7,9]))
#
# DIGITS = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}
# def char2num(s):
# return DIGITS[s]
#
# def str2int(s):
# return reduce(lambda x, y : x * 10 + y, map(char2num, s))
#
# print(str2int('12345'))
# test1
def normalize(name):
return name[:1].upper() + name[1:].lower()
L1 = ['adam', 'LISA', 'barT']
L2 = list(map(normalize, L1))
print(L2)
# test2
def prod(L):
def cj(x, y):
return x * y
return reduce(cj, L)
print('3 * 5 * 7 * 9 =', prod([3, 5, 7, 9]))
if prod([3, 5, 7, 9]) == 945:
print('测试成功!')
else:
print('测试失败!')
# test3
def str2float(s):
digits = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}
s1, s2 = s.split('.')
d1 = reduce(lambda x, y: x * 10 + y, map(lambda x: digits[x], s1))
d2 = reduce(lambda x, y: x * 0.1 + y, map(lambda x: digits[x], reversed(s2))) * 0.1
return d1 + d2
print('str2float(\'123.456\') =', str2float('123.456'))
if abs(str2float('123.456') - 123.456) < 0.00001:
print('测试成功!')
else:
print('测试失败!')
| 3.75
| 4
|
tools/crawler/get-icon.py
|
JoyPang123/Textmage
| 13
|
12780263
|
import sys
import os
import logging
import argparse
from bs4 import BeautifulSoup
import requests
# Output data to stdout instead of stderr
log = logging.getLogger()
log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
log.addHandler(handler)
# Parse the argument for user specification files
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--txt",
type=str,
required=True,
help="Text file for downloading a pack of icons")
parser.add_argument("-o", "--output",
type=str,
required=True,
help="Output directory for the download")
args = parser.parse_args()
# Check directory is exist or not
os.makedirs(f"{args.output}",
exist_ok=True)
# Create the fake headers to cheat the website
headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/56.0.2924.87 Safari/537.36 "
}
# Save the category files
buffer_information = open(args.txt, "r").readlines()
for cur_information in buffer_information:
# Remove "\n"
cur_website, categories = cur_information.strip().split(",")
# Know the current category
logging.info(categories)
# End line
if cur_website == "":
sys.exit()
count = 0
# Get the html and parse the tags
response = requests.get(f"{cur_website}",
headers=headers)
soup = BeautifulSoup(response.text, "html.parser")
find_ul = soup.find("section", {"class": "search-result"}).find("ul", {"class": "icons"})
find_li = find_ul.findAll("li", {"class": "icon--item"})
# Run through all the image section
for li in find_li:
try:
img = li.find("img", {"class": "lzy"})
img_url = img.get("data-src")
# Check the url is valid for saving image file
if not img_url.endswith(".png"):
continue
# Save the data
img_data = requests.get(img_url).content
# Save the image and text file
with open(f"{args.output}/{categories}_{count+1}.png", "wb") as img_file, \
open(f"{args.output}/{categories}_{count+1}.txt", "w") as text_file:
img_file.write(img_data)
text_file.write(f"{categories}\n")
count += 1
except Exception:
break
| 2.875
| 3
|
build_windows/nvim_bridge.py
|
grigorievich/Viy
| 14
|
12780264
|
"""Bridge for connecting a UI instance to nvim."""
import sys
from threading import Semaphore, Thread
from traceback import format_exc
class UIBridge(object):
"""UIBridge class. Connects a Nvim instance to a UI class."""
def connect(self, nvim, ui):
"""Connect nvim and the ui.
This will start loops for handling the UI and nvim events while
also synchronizing both.
"""
self._error = None
self._nvim = nvim
self._ui = ui
self._nvim_event_loop()
if self._error:
print(self._error)
def exit(self):
"""Disconnect by exiting nvim."""
self.detach()
self._call(self._nvim.quit)
def input(self, input_str):
"""Send input to nvim."""
self._call(self._nvim.input, input_str)
def resize(self, columns, rows):
"""Send a resize request to nvim."""
self._call(self._nvim.ui_try_resize, columns, rows)
def attach(self, columns, rows, rgb):
"""Attach the UI to nvim."""
self._call(self._nvim.ui_attach, columns, rows, rgb)
def detach(self):
"""Detach the UI from nvim."""
self._call(self._nvim.ui_detach)
def _call(self, fn, *args):
self._nvim.async_call(fn, *args)
def _nvim_event_loop(self):
def on_setup():
self.input("<ESC>:let g:NeoSFMLGUIChannelID=" + str(self._nvim.channel_id) + "<CR>")
import messages_from_ui
file_to_edit = messages_from_ui.get_command_line_argument()
if file_to_edit != None and file_to_edit != "":
'''
In case there is a swap file, command_input will error out
and the program won't work. Use input instead.
'''
self._nvim.input("<esc>:edit " + file_to_edit + "<cr>")
self._ui.start(self)
self._ui.switch_to_navigator()
def on_request(method, args):
if method == "switchToNavigator":
self._ui.switch_to_navigator()
else:
raise Exception('Not implemented')
def on_notification(method, updates):
def apply_updates():
try:
for update in updates:
try:
handler = getattr(self._ui, '_nvim_' + update[0])
#print('_nvim_' + update[0])
except AttributeError:
pass
else:
#for args in update[1:]:
#print(*args, end = " ")
#print("END")
text = ''
if update[0] == 'put':
for args in update[1:]:
text += str(args)[2]
handler(text)
else:
for args in update[1:]:
handler(*args)
except:
self._error = format_exc()
self._call(self._nvim.quit)
if method == 'redraw':
if len(updates) > 0:
self._ui._nvim_lock_update_mutex();
apply_updates();
self._ui._nvim_redraw();
self._ui._nvim_unlock_update_mutex();
self._nvim.run_loop(on_request, on_notification, on_setup)
self._ui.quit() #end definition of nvim event loop
| 2.953125
| 3
|
worlds_worst_serverless/worlds_worst_mapper/mapper.py
|
nigelmathes/worlds-worst-serverless
| 0
|
12780265
|
<reponame>nigelmathes/worlds-worst-serverless
try:
import unzip_requirements
except ImportError:
pass
import json
from typing import Dict, Any
from fuzzywuzzy import process
try:
from guidelines import ACTIONS_MAP
except ImportError:
from .guidelines import ACTIONS_MAP
LambdaDict = Dict[str, Any]
def get_matching_action(event: LambdaDict, context: LambdaDict) -> LambdaDict:
"""
Function to receive an action and find the closest matching action in
the COMMON_ACTIONS_MAP dictionary.
:param event: Input AWS Lambda event dict
:param context: Input AWS Lambda context dict
:return: Function name corresponding to the best matching action
"""
# Decode the request
request_body = event.get("body")
if type(request_body) == str:
request_body = json.loads(request_body)
command_to_match = request_body["action"]
possible_actions = ACTIONS_MAP.keys()
matched_action = process.extractOne(command_to_match, possible_actions)
function_to_execute = ACTIONS_MAP[matched_action[0]]
result = {
"statusCode": 200,
"body": function_to_execute,
"headers": {"Access-Control-Allow-Origin": "*"},
}
print(f"Sending response: {result}")
return result
| 2.390625
| 2
|
src/command_line.py
|
jpope8/container-escape-dataset
| 0
|
12780266
|
"""
Utility to execute command line processes.
"""
import subprocess
import os
import sys
import re
def execute( command ):
"""
Convenience function for executing commands as though
from the command line. The command is executed and the
results are returned as str list. For example,
command = "/usr/bin/git commit -m 'Fixes a bug.'".
Parameters
----------
command : str
command with single space between terms
Return
------
list of the output : str
"""
args = command.split()
# using the Popen function to execute the
# command and store the result in temp.
# it returns a tuple that contains the
# data and the error if any.
outputStreams = subprocess.Popen(args, stdout = subprocess.PIPE)
# we use the communicate function
# to fetch the output
stdout_data, stderr_data = outputStreams.communicate()
# https://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate
# communicate() returns a tuple (stdout_data, stderr_data)
#print('stdout_data: ' + str( stdout_data ) )
#print('stderr_data: ' + str( stderr_data ) )
# splitting the output so that
# we can parse them line by line
#print( 'COMMAND_LINE: ' + str( type(stdout_data) ) )
#output = stdout_data.split("\n")
# Issue: python2 stdout_data is a str (or str-like)
# python3 stdout_data is bytes and cnnot be directly split
# Solution: convert to str using decode, check type instead of version check
# NB: If not done correctly the system logging fails and pollutes the audit log!!!
# type=SYSCALL msg=audit(1630261099.364:78511): arch=40000003 syscall=295 success=no exit=-13
# ... comm="python3" exe="/usr/bin/python3.7" subj==unconfined key="access"
# type=CWD msg=audit(1630261099.364:78511): cwd="/home/pi/container-escape-dataset/src"
#
#output = re.split('\n', str(stdout_data) )
output = None
#print( 'COMMAND_LINE: ' + str(type(stdout_data)) )
if( isinstance(stdout_data, str) ):
# this is python2 behaviour, stdout_data is str
output = stdout_data.split("\n")
else:
# this is python3 behaviour, stdout_data is bytes
output = re.split('\n', stdout_data.decode('utf-8') )
# a variable to store the output
result = []
# iterate through the output
# line by line
for line in output:
#print('LINE: ' + line)
result.append(line)
return result
#
# Test main function
#
def main():
command = sys.argv[1]
result = execute(command)
for line in result:
print(line)
if __name__ == '__main__':
main()
| 3.921875
| 4
|
Dynamic_Programming/514.Paint Fence/Solution.py
|
Zhenye-Na/LxxxCode
| 12
|
12780267
|
class Solution:
"""
@param n: non-negative integer, n posts
@param k: non-negative integer, k colors
@return: an integer, the total number of ways
"""
def numWays(self, n, k):
# write your code here
if n == 1:
return k
if n == 2:
return k * k
if k == 1:
return 0
# initialization
# f[i] represents the number of ways to paint first i posts
f = [0 for _ in range(n + 1)]
f[0] = 0
f[1] = k
f[2] = k * k
# function: (k - 1) * f[i - 1] + (k - 1) * f[i - 2]
for i in range(3, n + 1):
f[i] = (k - 1) * f[i - 1] + (k - 1) * f[i - 2]
return f[-1]
| 3.625
| 4
|
infoblox_netmri/api/remote/models/auth_user_remote.py
|
NastyaArslanova/infoblox-netmri
| 0
|
12780268
|
<reponame>NastyaArslanova/infoblox-netmri
from ..remote import RemoteModel
class AuthUserRemote(RemoteModel):
"""
The defined NetMRI users.
| ``id:`` The internal NetMRI identifier for this user.
| ``attribute type:`` number
| ``user_name:`` The user's login name.
| ``attribute type:`` string
| ``password:`` The user's password (required for local authentication).
| ``attribute type:`` string
| ``email:`` The user's email address.
| ``attribute type:`` string
| ``notes:`` Notes on the user, as entered by the administrator.
| ``attribute type:`` string
| ``created_at:`` The date and time the record was initially created in NetMRI.
| ``attribute type:`` datetime
| ``updated_at:`` The date and time the record was last modified in NetMRI.
| ``attribute type:`` datetime
| ``first_name:`` The user's first name.
| ``attribute type:`` string
| ``last_name:`` The user's last name.
| ``attribute type:`` string
| ``is_system:`` A flag indicating whether this is a built-in user. Built-in users cannot be removed.
| ``attribute type:`` number
| ``last_login:`` The date and time this use last logged into the NetMRI.
| ``attribute type:`` datetime
| ``expiration:`` The expiration date for this user's password.
| ``attribute type:`` datetime
| ``consecutive_failed_logins:`` The number of failed logins since the last successful login.
| ``attribute type:`` number
| ``account_locked:`` A flag indicating whether or not this account is locked due to failed login attempts.
| ``attribute type:`` number
| ``account_locked_date:`` The date and time that the user's account was locked.
| ``attribute type:`` datetime
| ``account_disabled:`` A flag indicating whether this user's account has been administratively disabled.
| ``attribute type:`` number
| ``account_disabled_date:`` The date and time that the user's account was disabled.
| ``attribute type:`` datetime
| ``cli_creds_enabled_ind:`` A flag indicating whether or not to use this user's individual CLI credentials for device interaction.
| ``attribute type:`` bool
| ``password_secure:`` Internal representation of password
| ``attribute type:`` string
| ``password_version:`` version of encryption used to encrypt password
| ``attribute type:`` number
| ``cli_user_name_secure:`` The user's network device user name.
| ``attribute type:`` string
| ``cli_password_secure:`` The user's network device password.
| ``attribute type:`` string
| ``cli_enable_password_secure:`` The user's network device privileged mode password.
| ``attribute type:`` string
| ``secure_version:`` The encryption version of the username and passwords.
| ``attribute type:`` number
| ``auth_service_id:`` The id of the last authentication service where this user was authenticated.
| ``attribute type:`` number
| ``force_local_ind:`` A flag indicating whether user is forced to use local authorisation or not.
| ``attribute type:`` bool
| ``last_local_authz_ind:`` The source where the last authorization came from. May be 0 - Remote, 1 - Local, 2 - Forced Local
| ``attribute type:`` number
| ``cert:`` Client Certificate stored on clinet success authorization when CAC is enabled
| ``attribute type:`` string
| ``db_username:`` Username for MySQL Database.
| ``attribute type:`` string
| ``db_password_secure:`` Password for MySQL Database.
| ``attribute type:`` string
| ``db_creds_enabled_ind:`` A flag which indicates that the user has database credentials enabled.
| ``attribute type:`` bool
"""
properties = ("id",
"user_name",
"password",
"email",
"notes",
"created_at",
"updated_at",
"first_name",
"last_name",
"is_system",
"last_login",
"expiration",
"consecutive_failed_logins",
"account_locked",
"account_locked_date",
"account_disabled",
"account_disabled_date",
"cli_creds_enabled_ind",
"password_secure",
"password_version",
"cli_user_name_secure",
"cli_password_secure",
"cli_enable_password_secure",
"secure_version",
"auth_service_id",
"force_local_ind",
"last_local_authz_ind",
"cert",
"db_username",
"db_password_secure",
"db_creds_enabled_ind",
)
| 2.25
| 2
|
src/admin_panel/__init__.py
|
sahilsehgal1995/lenme-api
| 0
|
12780269
|
<reponame>sahilsehgal1995/lenme-api
from src.user.schemas import *
from src.products.schemas import *
| 1.03125
| 1
|
Products/PloneGetPaid/Extensions/plugin.py
|
collective/Products.PloneGetPaid
| 2
|
12780270
|
<filename>Products/PloneGetPaid/Extensions/plugin.py
def install_ups( self ):
from getpaid.ups import plugin
plugin.UPSPlugin( self ).install()
return "installed ups"
def install_warehouse( self ):
from getpaid.warehouse import plugin
plugin.WarehousePlugin( self ).install()
return "warehouse installed"
| 1.789063
| 2
|
test/py/test4.py
|
mischareitsma/json2dataclass
| 0
|
12780271
|
from dataclasses import dataclass
from typing import Union
@dataclass
class root:
"""root dataclass"""
layerOne: object
@dataclass
class root_layerOne:
"""root_layerOne dataclass"""
layerTwo: object
@dataclass
class root_layerOne_layerTwo:
"""root_layerOne_layerTwo dataclass"""
layerThree: list[object]
@dataclass
class root_layerOne_layerTwo_layerThree:
"""root_layerOne_layerTwo_layerThree dataclass"""
layerFour: object
@dataclass
class root_layerOne_layerTwo_layerThree_layerFour:
"""root_layerOne_layerTwo_layerThree_layerFour dataclass"""
_finally: str
| 2.671875
| 3
|
update_contest.py
|
pratikgk45/Auto-Test-Case-Checker
| 1
|
12780272
|
import sqlite3
import sys
import requests
from lxml import html
from lxml import etree
from bs4 import BeautifulSoup
def get_contest_info(contest_id):
contest_info = {}
url = "https://codeforces.com/contest/"+contest_id
response = requests.get(url)
if response.status_code != 200:
sys.exit(0)
html_content = html.document_fromstring(response.content)
node=etree.tostring(html_content).decode('utf-8')
soup = BeautifulSoup(node,'lxml')
contest_info['contest_name'] = soup.findAll("th", {"class" : "left"})[0].text
node = html_content.find_class("problems")[0]
node=etree.tostring(html_content).decode('utf-8')
soup = BeautifulSoup(node,'lxml')
for i in soup.findAll("tr"):
try:
problem_id = i.a.text.replace(" ","").replace("\r","").replace("\n","")
problem_name = i.div.div.a.text.replace("\r","").replace("\n","")
contest_info[problem_id] = problem_name
except:
pass
return contest_info
conn = sqlite3.connect('sqlite.db')
cur = conn.cursor()
contest_id = (sys.argv)[1]
problem_list_1 = (sys.argv)[2:]
problem_list_2 = list(cur.execute('SELECT problem_id FROM problems WHERE contest_id = '+str(contest_id)).fetchall())
problem_list_2 = [i[0] for i in problem_list_2]
problem_list = list(set(problem_list_1) - set(problem_list_2))
problem_list.sort()
contest_info = get_contest_info(contest_id)
cur.execute('SELECT * FROM contests WHERE contest_id = '+ str(contest_id))
rows = cur.fetchall()
if len(rows) :
cur.execute('UPDATE contests SET update_time = DATETIME() WHERE contest_id = '+str(contest_id))
else:
cur.execute('INSERT INTO contests(contest_id, contest_name, update_time) VALUES ('+str(contest_id)+', "'+contest_info['contest_name']+'", DATETIME("now"))')
conn.commit()
for problem in problem_list:
cur.execute('INSERT INTO problems(contest_id, contest_name, problem_id, problem_name) VALUES('+str(contest_id)+', "'+contest_info['contest_name']+'", "'+problem+'", "'+contest_info[problem]+'")')
conn.commit()
conn.close()
| 3.109375
| 3
|
dao/permissao_dao.py
|
lucianoanjos02/Choco-Stock-System-CSS
| 0
|
12780273
|
<gh_stars>0
from database import db_session
from models.permissao import Permissao
class PermissaoDAO:
'''
CLASSE PermissaoDAO - IMPLEMENTA O ACESSO AO BANCO RELACIONADO A CLASSE
Permisssao DO MÓDULO models.py QUE MAPEIA A TABELA TPermissao
@autor: <NAME> -
@data: 07/08/2020 -
@versao: 1.0.0
'''
def __init__(self, db):
self.__db = db_session
def get_permissoes(self):
'''
METODO QUE RETORNA AS PERMISSÕES DE USUÁRIO REGISTRADAS NO BANCO
@autor: <NAME> -
@data: 09/08/2020 -
@versao: 1.0.0
'''
dados_permissoes = self.__db.query(Permissao).all()
permissoes = []
for permissao in dados_permissoes:
permissoes.append(permissao.permissao)
self.__db.expunge_all()
self.__db.close()
return permissoes
def get_permissao(self, id_permissao):
permissao = self.__db.query(Permissao.permissao).filter(Permissao.id_permissao == id_permissao).first()
self.__db.expunge_all()
self.__db.close()
return permissao
def get_id_permissao(self, permissao):
id_permissao = self.__db.query(Permissao.id_permissao).filter(Permissao.permissao == permissao).first()
self.__db.expunge_all()
self.__db.close()
return id_permissao
| 3.015625
| 3
|
src/utils_tensorflow.py
|
takumiw/nishika-cable-classification-1st-place
| 0
|
12780274
|
import os
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras.models import Model
def plot_model(model: Model, path: str) -> None:
if not os.path.isfile(path):
keras.utils.plot_model(model, to_file=path, show_shapes=True)
def plot_learning_history(fit, metric: str = "accuracy", path: str = "history.png") -> None:
"""Plot learning curve
Args:
fit (Any): History object
path (str, default="history.png")
"""
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10, 4))
axL.plot(fit.history["loss"], label="train")
axL.plot(fit.history["val_loss"], label="validation")
axL.set_title("Loss")
axL.set_xlabel("epoch")
axL.set_ylabel("loss")
axL.legend(loc="upper right")
axR.plot(fit.history[metric], label="train")
axR.plot(fit.history[f"val_{metric}"], label="validation")
axR.set_title(metric.capitalize())
axR.set_xlabel("epoch")
axR.set_ylabel(metric)
axR.legend(loc="best")
fig.savefig(path)
plt.close()
| 3.046875
| 3
|
tests/features/test_bootloader_subcmd_creator.py
|
keaparrot/secbootctl
| 0
|
12780275
|
<reponame>keaparrot/secbootctl
import unittest
from tests import unittest_helper
class TestBootloaderSubcmdCreatorController(unittest_helper.SubCmdCreatorTestCase):
FEATURE_NAME: str = 'bootloader'
SUBCOMMAND_DATA: list = [
{'name': 'bootloader:install', 'help_message': 'install bootloader (systemd-boot)'},
{'name': 'bootloader:update', 'help_message': 'update bootloader (systemd-boot)'},
{'name': 'bootloader:remove', 'help_message': 'remove bootloader (systemd-boot)'},
{'name': 'bootloader:status', 'help_message': 'show bootloader status (systemd-boot)'},
{'name': 'bootloader:update-menu', 'help_message': 'update bootloader menu'}
]
if __name__ == '__main__':
unittest.main()
| 2.25
| 2
|
tests/inventory/pipelines/test_data/fake_groups.py
|
pombredanne/forseti-security
| 1
|
12780276
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test group data."""
FAKE_GROUPS = [
{
'nonEditableAliases': ['<EMAIL>'],
'kind': 'admin#directory#group',
'name': 'aaaaa',
'adminCreated': True,
'directMembersCount': '1',
'email': '<EMAIL>',
'etag': '"pCd5iosDe_tWdPv4ke8sAYzlGK8/oWZC62Ysx9kAKLlW23uoKQlYu3k"',
'id': '11111',
'description': ''
},
{
'nonEditableAliases': ['<EMAIL>'],
'kind': 'admin#directory#group',
'name': 'bbbbb',
'adminCreated': False,
'directMembersCount': '1',
'email': '<EMAIL>',
'etag': '"pCd5iosDe_tWdPv4ke8sAYzlGK8/cglP2U9YgiKA9zjJ-DvxjotnaLU"',
'id': '22222',
'description': ''
},
{
'nonEditableAliases': ['<EMAIL>'],
'kind': 'admin#directory#group',
'name': '<NAME>',
'adminCreated': True,
'directMembersCount': '4',
'email': '<EMAIL>',
'etag': '"pCd5iosDe_tWdPv4ke8sAYzlGK8/kQ2NdfLnWQTiAs-FCSEKJRaipxw"',
'id': '33333',
'description': 'Members of this group will be allowed to perform bar.'
}
]
EXPECTED_LOADABLE_GROUPS = [
{
'group_id': '11111',
'group_email': '<EMAIL>',
'group_kind': 'admin#directory#group',
'direct_member_count': '1',
'raw_group': '{"nonEditableAliases": ["<EMAIL>"], "kind": "admin#directory#group", "etag": "\\"pCd5iosDe_tWdPv4ke8sAYzlGK8/oWZC62Ysx9kAKLlW23uoKQlYu3k\\"", "name": "aaaaa", "adminCreated": true, "description": "", "directMembersCount": "1", "email": "<EMAIL>", "id": "11111"}',
},
{
'group_id': '22222',
'group_email': '<EMAIL>',
'group_kind': 'admin#directory#group',
'direct_member_count': '1',
'raw_group': '{"nonEditableAliases": ["<EMAIL>"], "kind": "admin#directory#group", "etag": "\\"pCd5iosDe_tWdPv4ke8sAYzlGK8/cglP2U9YgiKA9zjJ-DvxjotnaLU\\"", "name": "bbbbb", "adminCreated": false, "description": "", "directMembersCount": "1", "email": "<EMAIL>", "id": "22222"}',
},
{
'group_id': '33333',
'group_email': '<EMAIL>',
'group_kind': 'admin#directory#group',
'direct_member_count': '4',
'raw_group': '{"nonEditableAliases": ["<EMAIL>"], "kind": "admin#directory#group", "etag": "\\"pCd5iosDe_tWdPv4ke8sAYzlGK8/kQ2NdfLnWQTiAs-FCSEKJRaipxw\\"", "name": "CCCCC Users", "adminCreated": true, "description": "Members of this group will be allowed to perform bar.", "directMembersCount": "4", "email": "<EMAIL>", "id": "33333"}',
}
]
| 1.234375
| 1
|
rs/Database/mackinac/test/test_workspace.py
|
stevenirby/RetSynth
| 3
|
12780277
|
import pytest
import mackinac
@pytest.fixture(scope='module')
def test_model(b_theta_genome_id, b_theta_id):
# Reconstruct a model so there is a folder in the workspace.
stats = mackinac.create_patric_model(b_theta_genome_id, model_id=b_theta_id)
yield stats
mackinac.delete_patric_model(b_theta_id)
@pytest.fixture(scope='module')
def test_file():
return '/{0}/modelseed/emergency'.format(mackinac.workspace.ws_client.username)
@pytest.fixture(scope='module')
def test_file_data():
return 'This is a test of the emergency broadcasting system.'
@pytest.fixture(scope='module')
def bad_reference():
return '/{0}/modelseed/badref'.format(mackinac.workspace.ws_client.username)
@pytest.mark.usefixtures('authenticate')
class TestWorkspace:
# Remember these tests are calling a server and can take a while depending on the network
# and how busy the server is servicing other requests.
def test_list_objects(self, test_model):
output = mackinac.list_workspace_objects(test_model['ref'])
assert len(output) == 13
assert len(output[0]) == 12
def test_list_objects_by_name(self, test_model):
output = mackinac.list_workspace_objects(test_model['ref'], sort_key='name')
assert len(output) == 13
assert output[0][0] == '{0}.cpdtbl'.format(test_model['id'])
def test_list_objects_by_type(self, test_model):
output = mackinac.list_workspace_objects(test_model['ref'], sort_key='type')
assert len(output) == 13
assert output[4][1] == 'genome'
assert output[5][1] == 'model'
def test_list_objects_bad_folder(self):
# This fails because there is no leading forward slash.
bad_reference = '{0}/modelseed/badref'.format(mackinac.workspace.ws_client.username)
with pytest.raises(mackinac.SeedClient.ObjectNotFoundError):
mackinac.list_workspace_objects(bad_reference)
def test_list_objects_no_exist_folder(self):
no_exist_reference = '/{0}/modelseed/badref'.format(mackinac.workspace.ws_client.username)
output = mackinac.list_workspace_objects(no_exist_reference)
assert output is None
def test_list_objects_bad_sort_key(self, test_model):
with pytest.raises(KeyError):
mackinac.list_workspace_objects(test_model['ref'], sort_key='foobar')
def test_get_object_meta(self, test_model):
output = mackinac.get_workspace_object_meta(test_model['ref'])
assert len(output) == 12
assert output[0] == '.' + test_model['id']
assert output[1] == 'folder'
assert output[8]['is_folder'] == 1
def test_get_object_meta_bad_ref(self):
bad_reference = '{0}/modelseed/badref'.format(mackinac.workspace.ws_client.username)
with pytest.raises(mackinac.SeedClient.ObjectNotFoundError):
mackinac.get_workspace_object_meta(bad_reference)
def test_get_object_data_json(self, test_model):
reference = '{0}/model'.format(test_model['ref'])
output = mackinac.get_workspace_object_data(reference)
assert output['id'] == '.' + test_model['id']
assert len(output['modelcompartments']) == 2
assert 'modelcompounds' in output
assert 'modelreactions' in output
def test_get_object_data_text(self, test_model):
reference = '{0}/{1}.rxntbl'.format(test_model['ref'], test_model['id'])
output = mackinac.get_workspace_object_data(reference, json_data=False)
assert len(output) > 100000 # Just a really long string
def test_get_object_data_bad_ref(self, bad_reference):
with pytest.raises(mackinac.SeedClient.ObjectNotFoundError):
mackinac.get_workspace_object_data(bad_reference)
def test_put_object_no_data(self, test_file):
output = mackinac.put_workspace_object(test_file, 'string')
assert output[0] == 'emergency'
assert output[1] == 'string'
assert output[6] == 0
assert len(output[7]) == 0
def test_put_object_meta(self, test_file, b_theta_id):
output = mackinac.put_workspace_object(test_file, 'string', metadata={'model': b_theta_id}, overwrite=True)
assert output[0] == 'emergency'
assert output[1] == 'string'
assert output[6] == 0
assert len(output[7]) == 1
def test_put_object_data(self, test_file, test_file_data, b_theta_id):
output = mackinac.put_workspace_object(test_file, 'string', data=test_file_data,
metadata={'model': b_theta_id}, overwrite=True)
assert output[0] == 'emergency'
assert output[1] == 'string'
assert output[6] == len(test_file_data)
assert len(output[7]) == 1
def test_delete_object(self, test_file):
output = mackinac.delete_workspace_object(test_file)
assert output[0] == 'emergency'
def test_delete_object_bad_ref(self, bad_reference):
with pytest.raises(mackinac.SeedClient.ObjectNotFoundError):
mackinac.delete_workspace_object(bad_reference)
| 2.03125
| 2
|
pragfastapi/web_server_security.py
|
skillplot/pragfastapi
| 0
|
12780278
|
<filename>pragfastapi/web_server_security.py
## Copyright (c) 2020 mangalbhaskar.
"""FastAPI Security
https://fastapi.tiangolo.com/tutorial/security/first-steps/
https://fastapi.tiangolo.com/advanced/behind-a-proxy
https://fastapi.tiangolo.com/tutorial/security/get-current-user/
https://en.wikipedia.org/wiki/Security_through_obscurity
"""
__author__ = 'mangalbhaskar'
__credit__ = 'fastapi.tiangolo.com'
from fastapi import Depends, FastAPI
from fastapi.security import OAuth2PasswordBearer
app = FastAPI()
## When we create an instance of the OAuth2PasswordBearer class we pass in the tokenUrl parameter. This parameter contains the URL that the client (the frontend running in the user's browser) will use to send the username and password in order to get a token.
## here tokenUrl="token" refers to a relative URL token that we haven't created yet. As it's a relative URL, it's equivalent to ./token. Because we are using a relative URL, if your API was located at https://example.com/, then it would refer to https://example.com/token. But if your API was located at https://example.com/api/v1/, then it would refer to https://example.com/api/v1/token.
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
## It will go and look in the request for that Authorization header, check if the value is Bearer plus some token, and will return the token as a str.
## If it doesn't see an Authorization header, or the value doesn't have a Bearer token, it will respond with a 401 status code error (UNAUTHORIZED) directly.
@app.get("/items/")
async def read_items(token: str = Depends(oauth2_scheme)):
return {"token": token}
| 3.15625
| 3
|
scripts/frustumPP_results.py
|
anshulpaigwar/Frustum-Pointpillars
| 12
|
12780279
|
<reponame>anshulpaigwar/Frustum-Pointpillars
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pathlib
import sys
path_model = "/home/anshul/es3cap/codes/pointpillars/second.pytorch/"
sys.path.append(path_model)
#print sys.path
from pathlib import Path
import glob
import os
#print os.getcwd()
import time
import numpy as np
import math
import json
from second.pytorch.inference import TorchInferenceContext
import second.core.box_np_ops as box_np_ops
from second.utils.progress_bar import ProgressBar
import ipdb as pdb
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
def remove_low_score(image_anno, thresh):
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(image_anno['score']) if s >= thresh
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
return img_filtered_annotations
def remove_dontcare(image_anno):
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(image_anno['name']) if x != "DontCare"
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
return img_filtered_annotations
class Settings:
def __init__(self, cfg_path):
self._cfg_path = cfg_path
self._settings = {}
self._setting_defaultvalue = {}
if not Path(self._cfg_path).exists():
with open(self._cfg_path, 'w') as f:
f.write(json.dumps(self._settings, indent=2, sort_keys=True))
else:
with open(self._cfg_path, 'r') as f:
self._settings = json.loads(f.read())
def set(self, name, value):
self._settings[name] = value
with open(self._cfg_path, 'w') as f:
f.write(json.dumps(self._settings, indent=2, sort_keys=True))
def get(self, name, default_value=None):
if name in self._settings:
return self._settings[name]
if default_value is None:
raise ValueError("name not exist")
return default_value
def save(self, path):
with open(path, 'w') as f:
f.write(json.dumps(self._settings, indent=2, sort_keys=True))
def load(self, path):
with open(self._cfg_path, 'r') as f:
self._settings = json.loads(f.read())
class Processor_ROS:
def __init__(self, config_path, ckpt_path, result_path, class_names):
self.points = None
self.json_setting = Settings(str('/home/anshul/es3cap/codes/pointpillars/' + ".kittiviewerrc"))
# self.config_path = self.json_setting.get("latest_vxnet_cfg_path", "")
self.config_path = config_path
self.ckpt_path = ckpt_path
self.result_path = result_path
self.image_info = None
self.inputs = None
self.inference_ctx = None
self.class_names = class_names
def initialize(self):
# self.read_calib()
self.build_vxnet()
self.load_vxnet()
def _extend_matrix(self, mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_label_anno(self, label_path):
annotations = {}
annotations.update({
'name': [],
'bbox': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
num_objects = len([x[0] for x in content if x[0] != 'DontCare'])
annotations['name'] = np.array([x[0] for x in content])
num_gt = len(annotations['name'])
annotations['bbox'] = np.array(
[[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros((annotations['bbox'].shape[0], ))
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
annotations['group_ids'] = np.arange(num_gt, dtype=np.int32)
return annotations
def get_info(self, idx, data_path, label_info =True, calib = True, extend_matrix=True):
image_info = {'image_idx': idx, 'pointcloud_num_features': 4}
annotations = None
if label_info:
label_path = data_path / "label_2" / ('%06d.txt' % idx)
annotations = self.get_label_anno(str(label_path))
# annotations = remove_low_score(annotations, 0.5)
annotations = remove_dontcare(annotations)
if calib:
calib_path = data_path / "calib"/ ('%06d.txt' % idx)
with open(str(calib_path), 'r') as f:
lines = f.readlines()
# P0 = np.array(
# [float(info) for info in lines[0].split(' ')[1:13]]).reshape(
# [3, 4])
# P1 = np.array(
# [float(info) for info in lines[1].split(' ')[1:13]]).reshape(
# [3, 4])
P2 = np.array(
[float(info) for info in lines[2].split(' ')[1:13]]).reshape(
[3, 4])
# P3 = np.array(
# [float(info) for info in lines[3].split(' ')[1:13]]).reshape(
# [3, 4])
if extend_matrix:
# P0 = self._extend_matrix(P0)
# P1 = self._extend_matrix(P1)
P2 = self._extend_matrix(P2)
# P3 = self._extend_matrix(P3)
# image_info['calib/P0'] = P0
# image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
# image_info['calib/P3'] = P3
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = self._extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = self._extend_matrix(Tr_imu_to_velo)
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
# add image shape info for lidar point cloud preprocessing
image_info["img_shape"] = np.array([375, 1242]) # kitti image size: height, width
if annotations is not None:
image_info['annos'] = annotations
# self.image_info = image_info
return image_info
def build_vxnet(self):
print("Start build_vxnet...")
self.inference_ctx = TorchInferenceContext()
self.inference_ctx.build(self.config_path)
self.json_setting.set("latest_vxnet_cfg_path", self.config_path)
print("Build VoxelNet ckpt succeeded.")
self.inference_ctx.result_path = self.result_path
def load_vxnet(self):
print("Start load_vxnet...")
self.json_setting.set("latest_vxnet_ckpt_path", self.ckpt_path)
self.inference_ctx.restore(self.ckpt_path)
print("Load VoxelNet ckpt succeeded.")
def run(self, idx, data_path, points):
image_info = self.get_info(idx, data_path)
rect = image_info['calib/R0_rect']
P2 = image_info['calib/P2']
Trv2c = image_info['calib/Tr_velo_to_cam']
image_shape = image_info['img_shape']
annos = image_info['annos']
ref_names = annos["name"]
ref_boxes_mask = np.array([n in self.class_names for n in ref_names], dtype=np.bool_)
if ref_boxes_mask.any() is not None:
points = box_np_ops.remove_outside_points(points, rect, Trv2c, P2, image_shape)
self.inputs = self.inference_ctx.get_inference_input_dict_ros_2(image_info, points, frustum_pp = True, add_points_to_example = False)
with self.inference_ctx.ctx():
self.inference_ctx.inference(self.inputs)
else:
print('creating empty file %06d.txt'% idx)
file_name = self.result_path + '/' + '%06d.txt' % idx
f = open(file_name, 'a+') # open file in append mode
f.close()
def KittiDataset(root, set):
global proc
data_path = pathlib.Path(root) / set
lidar_path = data_path / "velodyne"
# image_path = data_path / "image_2"
# calib_path = data_path / "calib"
# label_path = data_path / "label_2"
list = os.listdir(lidar_path) # dir is your directory path
prog_bar = ProgressBar()
prog_bar.start(len(list))
for idx in range(len(list)):
lidar_file = lidar_path / ('%06d.bin' % idx)
# image_file = image_path / ('%06d.png' % ids)
# calib_file = calib_path / ('%06d.txt' % ids)
# label_file = label_path / ('%06d.txt' % ids)
cloud = np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
# image = cv2.imread(str(image_file))
# start processing
proc.run(idx, data_path, cloud)
prog_bar.print_bar()
if __name__ == '__main__':
global proc
# initializing Pointpillars
config_path = '/home/anshul/es3cap/my_codes/frustum_pp/second.pytorch/second/configs/pointpillars/ped_cycle/xyres_16.proto'
ckpt_path = '/home/anshul/es3cap/my_codes/frustum_pp/second.pytorch/second/ckpt/frustum_pp_ped/voxelnet-261559.tckpt'
result_path = "/home/anshul/results"
class_names = ['Pedestrian']
# config_path = '/home/anshul/es3cap/my_codes/frustum_pp/second.pytorch/second/configs/pointpillars/car/xyres_16.proto'
# ckpt_path = '/home/anshul/es3cap/my_codes/frustum_pp/second.pytorch/second/ckpt/frustum_pp_car/voxelnet-271305.tckpt'
proc = Processor_ROS(config_path, ckpt_path, result_path, class_names)
proc.initialize()
KittiDataset(root = "/home/anshul/es3cap/kitti_data/", set = "training")
| 1.96875
| 2
|
mobile/urls.py
|
felixyin/qdqtrj_website
| 0
|
12780280
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: liangliangyy
@license: MIT Licence
@contact: <EMAIL>
@site: https://www.lylinux.net/
@software: PyCharm
@file: urls.py
@time: 2016/11/2 下午7:15
"""
from django.urls import path
from django.views.decorators.cache import cache_page
from website.utils import my_cache
from . import views as v
app_name = "mobile"
urlpatterns = [
path('service', my_cache(v.ServiceListView.as_view), name='service'),
path('service/<int:pk>', my_cache(v.ServiceDetailView.as_view), name='service-detail'),
path('product', my_cache(v.ProductListView.as_view), name='product'),
path('product/<int:pk>', my_cache(v.ProductDetailView.as_view), name='product-detail'),
path('category/<int:category_pk>', my_cache(v.CategoryDetailView.as_view), name='category'),
path('case', my_cache(v.CaseListView.as_view), name='case'),
path('case/<int:pk>', my_cache(v.CaseDetailView.as_view), name='case-detail'),
path('about/<int:pk>', my_cache(v.AboutDetailView.as_view), name='about'),
# path('', v.index, name='index'),
]
| 1.992188
| 2
|
worlds/migrations/0010_alter_job_job_definition.py
|
cognitive-space/warpzone
| 1
|
12780281
|
# Generated by Django 3.2.6 on 2021-08-04 18:09
import django.core.serializers.json
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('worlds', '0009_job_job_definition'),
]
operations = [
migrations.AlterField(
model_name='job',
name='job_definition',
field=models.JSONField(blank=True, encoder=django.core.serializers.json.DjangoJSONEncoder, null=True),
),
]
| 1.65625
| 2
|
solutions/delete_node.py
|
zmatteson/leetcode
| 0
|
12780282
|
"""
Write a function to delete a node (except the tail) in a singly linked list, given only access to that node.
Supposed the linked list is 1 -> 2 -> 3 -> 4 and you are given the third node with value 3, the linked list should become 1 -> 2 -> 4 after calling your function.
Supposing that we are given the position, we have to traverse the array
Given 2 as the 2nd node, we would delete 2 in 1 -> 2 -> 3
We do not consider the case where we delete the last node
Assumptions: the value x is in the linked list
We have access to this node!
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
"""
Method 1:
copy the next node over
"""
def print_list(node):
traverse = True
while traverse == True:
print(node.val, " -> ", end="")
if node.next == None:
traverse = False
else:
node = node.next
def deleteNode(node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
if __name__ == "__main__":
node_a = ListNode(1)
node_b = ListNode(2)
node_c = ListNode(3)
node_d = ListNode(4)
node_a.next = node_b
node_b.next = node_c
node_c.next = node_d
print("The linked list is")
print_list(node_a)
deleteNode(node_a)
print("\nThe list with node_a removed is")
print_list(node_a)
node_a = ListNode(1)
node_b = ListNode(2)
node_a.next = node_b
print("The linked list is")
print_list(node_a)
deleteNode(node_a)
print("\nThe list with node_a removed is")
print_list(node_a)
"""
Given 1 -> 2 we expect 2 ->
"""
| 3.953125
| 4
|
ecosante/recommandations/forms/search.py
|
betagouv/ecosante
| 0
|
12780283
|
<filename>ecosante/recommandations/forms/search.py
from wtforms.fields.core import SelectMultipleField
from ecosante.utils.form import BaseForm, MultiCheckboxField
from wtforms.widgets.html5 import SearchInput
from wtforms.fields import StringField, SelectField
from markupsafe import Markup
from ..models import RECOMMANDATION_FILTERS
class FormSearch(BaseForm):
search = StringField("Recherche", widget=SearchInput())
categories = MultiCheckboxField(
'Catégories',
choices=[
(filter[0], Markup(f'<abbr title="{filter[2]}">{filter[1]}</abbr>'))
for filter in RECOMMANDATION_FILTERS
]
)
status = SelectField(
"Statut",
choices=[
('published', 'Publiée'),
('draft', 'Brouillon'),
('', 'Toutes les recommandations')
],
default='published'
)
objectif = SelectField(
"Objectif",
choices = [
(None, 'Tous les objectifs'),
("", "(sans)"),
("Améliorer l’air intérieur de votre logement", "Améliorer l’air intérieur de votre logement"),
("Contribuer à réduire la pollution de l’air", "Contribuer à réduire la pollution de l’air"),
("Profiter du plein air", "Profiter du plein air")
]
)
type = SelectField(
"Type",
choices = [
(None, 'Tous les types'),
("generale", "Générale"),
("episode_pollution", "Épisode de pollution"),
("pollens", "Pollens"),
("radon", "Radon")
]
)
order = SelectField(
"Ordre",
choices=[
('random', 'Aléatoire'),
('id', 'Chronologique')
]
)
medias = SelectMultipleField(
"Medias",
choices=[
('newsletter_quotidienne', 'Newsletter quotidienne'),
('newsletter_hebdomadaire', 'Newsletter hebdomadaire'),
('widget', 'Widget'),
('dashboard', 'Dashboard')
]
)
| 2.375
| 2
|
src/run/Train_from_scratch.py
|
BruceBinBoxing/Deep_Learning_Weather_Forecasting
| 53
|
12780284
|
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import netCDF4 as nc
import pickle as pk
import pandas as pd
import datetime
import os
import numpy as np
import sys
src_dir = os.path.join(os.getcwd(), 'src/data')
sys.path.append(src_dir)
from helper import save_pkl, load_pkl, min_max_norm
src_dir = os.path.join(os.getcwd(), 'src/models')
sys.path.append(src_dir)
# from competition_model_class import Seq2Seq_Class # during Debug and Developing
from seq2seq_class import Seq2Seq_Class # during the game and competition
def train(processed_path, train_data, val_data, model_save_path, model_name):
train_dict = load_pkl(processed_path, train_data)
val_dict = load_pkl(processed_path, val_data)
print(train_dict.keys())
print('Original input_obs data shape:')
print(train_dict['input_obs'].shape)
print(val_dict['input_obs'].shape)
print('After clipping the 9 days, input_obs data shape:')
train_dict['input_obs'] = train_dict['input_obs'][:,:-9,:,:]
val_dict['input_obs'] = val_dict['input_obs'][:,:-9,:,:]
print(train_dict['input_obs'].shape)
print(val_dict['input_obs'].shape)
enc_dec = Seq2Seq_Class(model_save_path=model_save_path,
model_structure_name=model_name,
model_weights_name=model_name,
model_name=model_name)
enc_dec.build_graph()
val_size=val_dict['input_ruitu'].shape[0] # 87 val samples
val_ids=[]
val_times=[]
for i in range(10):
val_ids.append(np.ones(shape=(val_size,37))*i)
val_ids = np.stack(val_ids, axis=-1)
print('val_ids.shape is:', val_ids.shape)
val_times = np.array(range(37))
val_times = np.tile(val_times,(val_size,1))
print('val_times.shape is:',val_times.shape)
enc_dec.fit(train_dict['input_obs'], train_dict['input_ruitu'], train_dict['ground_truth'],
val_dict['input_obs'], val_dict['input_ruitu'], val_dict['ground_truth'], val_ids = val_ids, val_times=val_times,
iterations=10000, batch_size=512, validation=True)
print('Training finished!')
@click.command()
@click.argument('processed_path', type=click.Path(exists=True))
@click.option('--train_data', type=str)
@click.option('--val_data', type=str)
@click.argument('model_save_path', type=click.Path(exists=True))
@click.option('--model_name', type=str)
def main(processed_path, train_data, val_data, model_save_path, model_name):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
train(processed_path, train_data, val_data, model_save_path, model_name)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| 2.203125
| 2
|
Triage model/running_the_models/prediction_script.py
|
shirangol/Covid19-Survey
| 0
|
12780285
|
import pickle
import numpy as np
import pandas as pd
import shap
import matplotlib.pyplot as pl
shap.initjs()
json_path = "response.json"
model_path = "xgboost_primary_model.pkl"
AGE_GROUP_CUTOFFS = [0, 17, 30, 40, 50, 60, 70, 120]
AGE_GROUPS_TRANSFORMER = {1: 10, 2: 25, 3: 35, 4: 45, 5: 55, 6: 65, 7: 75}
AGE_COL = 'age_group'
X_COLS = ["gender", AGE_COL, "condition_any", "symptom_well", "symptom_sore_throat", "symptom_cough",
"symptom_shortness_of_breath", "symptom_smell_or_taste_loss", "symptom_fever"]
def add_age_group(df):
df[AGE_COL] = pd.cut(df['age'], bins=AGE_GROUP_CUTOFFS, labels=AGE_GROUPS_TRANSFORMER.values(), include_lowest=True, right=True)
df[AGE_COL] = df[AGE_COL].astype(int)
return df
def get_prediction(json_path, model_path):
response_df = pd.read_json(json_path, lines=True)
response_df = add_age_group(response_df)
response_df = response_df[X_COLS].sort_index(axis=1)
model = pickle.load(open(model_path, "rb"))
predictions = model.predict_proba(response_df)
predicted_probability = np.round(predictions[:, 1][0], 3)
return predicted_probability
if __name__ == '__main__':
print("The response probability to test positive according to our model is:", get_prediction(json_path, model_path))
model = pickle.load(open('xgboost_primary_model.pkl', "rb"))
explainer = shap.TreeExplainer(model)
data = pd.read_csv('../creating_the_models/primary model.csv')
BASE_MODEL_X_COLS = ['gender', 'age_group']
X_COLS = BASE_MODEL_X_COLS + \
['symptom_well',
'symptom_sore_throat',
'symptom_cough',
'symptom_shortness_of_breath',
'symptom_smell_or_taste_loss',
'symptom_fever',
'condition_any']
X = data[X_COLS].sort_index(axis=1)
y = data['label'].values.ravel()
shap_values = explainer.shap_values(X)
shap.force_plot(explainer.expected_value, shap_values[0, :], X.iloc[0, :])
shap.summary_plot(shap_values, X)
| 2.90625
| 3
|
carstate.py
|
ReFil/Ocelot
| 21
|
12780286
|
from cereal import car
from common.numpy_fast import mean, int_rnd
from opendbc.can.can_define import CANDefine
from selfdrive.car.interfaces import CarStateBase
from opendbc.can.parser import CANParser
from selfdrive.config import Conversions as CV
from selfdrive.car.ocelot.values import CAR, DBC, STEER_THRESHOLD, BUTTON_STATES
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]['chassis'])
self.shifter_values = can_define.dv["GEAR_PACKET"]['GEAR']
self.brakeUnavailable = True
self.enabled = False
self.oldEnabled = False
self.oldSpeedUp = False
self.oldSpeedDn = False
self.engineRPM = 0
self.setSpeed = 10
self.buttonStates = BUTTON_STATES.copy()
self.oldButtonStates = BUTTON_STATES.copy()
def update(self, cp, cp_body, enabled):
ret = car.CarState.new_message()
#Car specific information
if self.CP.carFingerprint == CAR.SMART_ROADSTER_COUPE:
ret.doorOpen = False #any([cp_body.vl["BODYCONTROL"]['RIGHT_DOOR'], cp_body.vl["BODYCONTROL"]['LEFT_DOOR']]) != 0
ret.seatbeltUnlatched = False
ret.leftBlinker = bool(cp_body.vl["BODYCONTROL"]['LEFT_SIGNAL'])
ret.rightBlinker = bool(cp_body.vl["BODYCONTROL"]['RIGHT_SIGNAL'])
ret.espDisabled = bool(cp_body.vl["ABS"]['ESP_STATUS'])
ret.wheelSpeeds.fl = cp_body.vl["SMARTROADSTERWHEELSPEEDS"]['WHEELSPEED_FL'] * CV.MPH_TO_MS
ret.wheelSpeeds.fr = cp_body.vl["SMARTROADSTERWHEELSPEEDS"]['WHEELSPEED_FR'] * CV.MPH_TO_MS
ret.wheelSpeeds.rl = cp_body.vl["SMARTROADSTERWHEELSPEEDS"]['WHEELSPEED_RL'] * CV.MPH_TO_MS
ret.wheelSpeeds.rr = cp_body.vl["SMARTROADSTERWHEELSPEEDS"]['WHEELSPEED_RR'] * CV.MPH_TO_MS
can_gear = int(cp_body.vl["GEAR_PACKET"]['GEAR'])
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(can_gear, None))
self.engineRPM = cp_body.vl["GEAR_PACKET"]["RPM"]
#iBooster data
ret.brakePressed = bool(cp.vl["BRAKE_STATUS"]['DRIVER_BRAKE_APPLIED'])
ret.brakeLights = bool(cp.vl["BRAKE_STATUS"]['BRAKE_APPLIED'])
self.brakeUnavailable = not bool(cp.vl["BRAKE_STATUS"]['BRAKE_OK'])
if self.CP.enableGasInterceptor:
ret.gas = (cp.vl["GAS_SENSOR"]['PED_GAS'] + cp.vl["GAS_SENSOR"]['PED_GAS2']) / 2.
ret.gasPressed = ret.gas > 15
#calculate speed from wheel speeds
ret.vEgoRaw = mean([ret.wheelSpeeds.fl, ret.wheelSpeeds.fr, ret.wheelSpeeds.rl, ret.wheelSpeeds.rr])
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = ret.vEgoRaw < 0.001
#Toyota SAS (installed flipped)
ret.steeringAngleDeg = -(cp.vl["TOYOTA_STEERING_ANGLE_SENSOR1"]['TOYOTA_STEER_ANGLE'] + cp.vl["TOYOTA_STEERING_ANGLE_SENSOR1"]['TOYOTA_STEER_FRACTION'])
ret.steeringRateDeg = -cp.vl["TOYOTA_STEERING_ANGLE_SENSOR1"]['TOYOTA_STEER_RATE']
#Steering information from smart standin ECU
ret.steeringTorque = cp.vl["STEERING_STATUS"]['STEERING_TORQUE_DRIVER']
ret.steeringTorqueEps = cp.vl["STEERING_STATUS"]['STEERING_TORQUE_EPS']
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
ret.steerError = bool(cp.vl["STEERING_STATUS"]['STEERING_OK'] == 0)
ret.cruiseState.available = True
ret.cruiseState.standstill = False
ret.cruiseState.nonAdaptive = False
self.buttonStates["accelCruise"] = bool(cp.vl["HIM_CTRLS"]['SPEEDUP_BTN'])
self.buttonStates["decelCruise"] = bool(cp.vl["HIM_CTRLS"]['SPEEDDN_BTN'])
self.buttonStates["cancel"] = bool(cp.vl["HIM_CTRLS"]['CANCEL_BTN'])
self.buttonStates["setCruise"] = bool(cp.vl["HIM_CTRLS"]['SET_BTN'])
#if enabled:
#print(" OPENPILOT ENABLED")
if not enabled:
self.enabled = False
#print(" OPENPILOT OFF")
if bool(self.buttonStates["setCruise"]) and not self.oldEnabled:
print("attempt enable")
self.enabled = not self.enabled
if self.enabled:
self.setSpeed = (int_rnd((ret.vEgo * CV.MS_TO_MPH)/5) * 5)
if ret.standstill:
self.setSpeed = 10
if bool(self.buttonStates["accelCruise"]) and not self.oldSpeedUp:
print("speedup")
self.setSpeed = self.setSpeed + 5
if bool(self.buttonStates["decelCruise"]) and not self.oldSpeedDn:
print("speeddn")
self.setSpeed = self.setSpeed - 5
ret.cruiseState.speed = self.setSpeed * CV.MPH_TO_MS
ret.cruiseState.enabled = self.enabled
ret.stockAeb = False
ret.leftBlindspot = False
ret.rightBlindspot = False
self.oldEnabled = bool(self.buttonStates["setCruise"])
self.oldSpeedDn = bool(self.buttonStates["decelCruise"])
self.oldSpeedUp = bool(self.buttonStates["accelCruise"])
return ret
@staticmethod
def get_can_parser(CP):
signals = [
("TOYOTA_STEER_ANGLE", "TOYOTA_STEERING_ANGLE_SENSOR1", 0),
("BRAKE_APPLIED", "BRAKE_STATUS", 0),
("DRIVER_BRAKE_APPLIED", "BRAKE_STATUS", 0),
("BRAKE_OK", "BRAKE_STATUS", 0),
("BRAKE_PEDAL_POSITION", "BRAKE_STATUS", 0),
("TOYOTA_STEER_FRACTION", "TOYOTA_STEERING_ANGLE_SENSOR1", 0),
("TOYOTA_STEER_RATE", "TOYOTA_STEERING_ANGLE_SENSOR1", 0),
("SET_BTN", "HIM_CTRLS", 0),
("CANCEL_BTN", "HIM_CTRLS", 0),
("SPEEDUP_BTN", "HIM_CTRLS", 0),
("SPEEDDN_BTN", "HIM_CTRLS", 0),
("STEERING_TORQUE_DRIVER", "STEERING_STATUS", 0),
("STEERING_TORQUE_EPS", "STEERING_STATUS", 0),
("STEERING_OK", "STEERING_STATUS", 0),
("PED_GAS", "GAS_SENSOR", 0),
("PED_GAS2", "GAS_SENSOR", 0)
]
checks = [
("TOYOTA_STEERING_ANGLE_SENSOR1", 80),
("STEERING_STATUS", 80),
("HIM_CTRLS", 0),
("BRAKE_STATUS", 80),
("GAS_SENSOR", 40),
]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 1)
@staticmethod
def get_body_can_parser(CP):
signals = [
]
checks = [
("BODYCONTROL", 10),
("ABS", 10),
("SMARTROADSTERWHEELSPEEDS", 10),
("GEAR_PACKET", 10),
]
if CP.carFingerprint == CAR.SMART_ROADSTER_COUPE:
signals.append(("RIGHT_DOOR", "BODYCONTROL",0))
signals.append(("LEFT_DOOR", "BODYCONTROL",0))
signals.append(("LEFT_SIGNAL", "BODYCONTROL",0))
signals.append(("RIGHT_SIGNAL", "BODYCONTROL",0))
signals.append(("ESP_STATUS", "ABS",0))
signals.append(("WHEELSPEED_FL", "SMARTROADSTERWHEELSPEEDS",0))
signals.append(("WHEELSPEED_FR", "SMARTROADSTERWHEELSPEEDS",0))
signals.append(("WHEELSPEED_RL", "SMARTROADSTERWHEELSPEEDS",0))
signals.append(("WHEELSPEED_RR", "SMARTROADSTERWHEELSPEEDS",0))
signals.append(("BRAKEPEDAL", "ABS",0))
signals.append(("GEAR","GEAR_PACKET", 0))
signals.append(("RPM","GEAR_PACKET",0))
return CANParser(DBC[CP.carFingerprint]['chassis'], signals, checks, 0)
| 2.265625
| 2
|
src/arrays/max-chunks-to-make-sorted.py
|
vighnesh153/ds-algo
| 0
|
12780287
|
<reponame>vighnesh153/ds-algo
def solve(arr):
prefix_max = []
max_till_now = -float('inf')
for elem in arr:
max_till_now = max(max_till_now, elem)
prefix_max.append(max_till_now)
chunks = 0
minimum = arr[-1]
for i in range(len(arr) - 1, -1, -1):
if i > 0:
minimum = min(minimum, arr[i])
if prefix_max[i - 1] <= minimum:
chunks += 1
minimum = arr[i - 1]
else:
chunks += 1
return chunks
A = [2, 0, 1, 2]
print(solve(A))
| 3.140625
| 3
|
ocsmesh/ops/combine_geom.py
|
yosoyjay/OCSMesh
| 0
|
12780288
|
<reponame>yosoyjay/OCSMesh<gh_stars>0
import gc
import logging
from multiprocessing import Pool, Lock, cpu_count
import os
import pathlib
import tempfile
import warnings
from typing import Union, Sequence, Tuple, List
import geopandas as gpd
import numpy as np
from pyproj import CRS, Transformer
from shapely import ops
from shapely.geometry import box, Polygon, MultiPolygon, LinearRing
from shapely.validation import explain_validity
from jigsawpy import jigsaw_msh_t, savemsh, savevtk
from ocsmesh.raster import Raster
from ocsmesh.mesh.mesh import Mesh
_logger = logging.getLogger(__name__)
class GeomCombine:
_base_mesh_lock = Lock()
def __init__(
self,
dem_files: Union[None, Sequence[Union[str, os.PathLike]]],
out_file: Union[str, os.PathLike],
out_format: str = "shapefile",
mesh_file: Union[str, os.PathLike, None] = None,
mesh_multipolygon: Union[MultiPolygon, Polygon] = None,
ignore_mesh_final_boundary : bool = False,
zmin: Union[float, None] = None,
zmax: Union[float, None] = None,
chunk_size: Union[int, None] = None,
overlap: Union[int, None] = None,
nprocs: int = -1,
out_crs: Union[str, CRS] = "EPSG:4326",
base_crs: Union[str, CRS] = None):
self._calc_crs = None
self._base_exterior = None
nprocs = cpu_count() if nprocs == -1 else nprocs
dem_files = [] if dem_files is None else dem_files
self._operation_info = dict(
dem_files=dem_files,
out_file=out_file,
out_format=out_format,
mesh_file=mesh_file,
mesh_mp_in=mesh_multipolygon,
ignore_mesh=ignore_mesh_final_boundary,
zmin=zmin,
zmax=zmax,
chunk_size=chunk_size,
overlap=overlap,
nprocs=nprocs,
out_crs=out_crs,
base_crs=base_crs)
def run(self):
dem_files = self._operation_info['dem_files']
out_file = self._operation_info['out_file']
out_format = self._operation_info['out_format']
mesh_file = self._operation_info['mesh_file']
mesh_mp_in = self._operation_info['mesh_mp_in']
ignore_mesh = self._operation_info['ignore_mesh']
zmin = self._operation_info['zmin']
zmax = self._operation_info['zmax']
chunk_size = self._operation_info['chunk_size']
overlap = self._operation_info['overlap']
nprocs = self._operation_info['nprocs']
out_crs = self._operation_info['out_crs']
base_crs = self._operation_info['base_crs']
out_dir = pathlib.Path(out_file).parent
out_dir.mkdir(exist_ok=True, parents=True)
# Warping takes time; to optimize, only warp rasters
# during calculation of polygons if needed. Otherwise
# only warp polygon before writing to file
if isinstance(out_crs, str):
out_crs = CRS.from_user_input(out_crs)
if isinstance(base_crs, str):
base_crs = CRS.from_user_input(base_crs)
all_crs = set(Raster(dem).crs for dem in dem_files)
self._calc_crs = out_crs
if len(all_crs) == 1:
self._calc_crs = list(all_crs)[0]
_logger.info(
f"All DEMs have the same CRS:"
f" {self._calc_crs.to_string()}")
base_mult_poly = None
if mesh_mp_in:
# Assumption: If base_mult_poly is provided, it's in
# base_crs if not None, else in out_crs
base_mult_poly = self._get_valid_multipolygon(mesh_mp_in)
if base_crs is None:
base_crs = out_crs
if not base_crs.equals(self._calc_crs):
_logger.info("Reprojecting base polygon...")
transformer = Transformer.from_crs(
base_crs, self._calc_crs, always_xy=True)
base_mult_poly = ops.transform(
transformer.transform, base_mult_poly)
elif mesh_file and pathlib.Path(mesh_file).is_file():
_logger.info("Creating mesh object from file...")
base_mesh = Mesh.open(mesh_file, crs=base_crs)
mesh_crs = base_mesh.crs
# Assumption: If mesh_crs is not defined, mesh is in
# base_crs if not None, else inout_crs
if base_crs is None:
if mesh_crs:
base_crs = mesh_crs
else:
base_crs = out_crs
if not self._calc_crs.equals(base_crs):
_logger.info("Reprojecting base mesh...")
transformer = Transformer.from_crs(
base_crs, self._calc_crs, always_xy=True)
xy = base_mesh.coord
xy = np.vstack(
transformer.transform(xy[:, 0], xy[:, 1])).T
base_mesh.coord[:] = xy
_logger.info("Done")
_logger.info("Getting mesh hull polygons...")
base_mult_poly = base_mesh.hull.multipolygon()
_logger.info("Done")
base_mult_poly = self._get_valid_multipolygon(base_mult_poly)
if base_mult_poly:
# NOTE: This needs to happen once and before any
# modification to basemesh happens (due to overlap
# w/ DEM, etc.). Exterior of base mesh is used for
# raster clipping
#
# TODO: Add buffer for base mesh exterior in case there
# was erosion and we want to make sure new DEMs futher
# inland are considered (?)
self._base_exterior = MultiPolygon(
list(ops.polygonize(
[poly.exterior for poly in base_mult_poly])))
z_info = {}
if zmin is not None:
z_info['zmin'] = zmin
if zmax is not None:
z_info['zmax'] = zmax
poly_files_coll = []
_logger.info(f"Number of processes: {nprocs}")
with tempfile.TemporaryDirectory(dir=out_dir) as temp_dir, \
tempfile.NamedTemporaryFile() as base_file:
if base_mult_poly:
base_mesh_path = base_file.name
self._multipolygon_to_disk(
base_mesh_path, base_mult_poly, fix=False)
else:
base_mesh_path = None
base_mult_poly = None
_logger.info("Processing DEM priorities ...")
# Process priority: priority is based on the order,
# the last input has the highest priority
# (i.e. lowest priority number)
priorities = list((range(len(dem_files))))[::-1]
# TODO: Needs some code refinement for bbox issue
# priority_args = []
# for priority, dem_file in zip(priorities, dem_files):
# priority_args.append(
# (priority, temp_dir, dem_file, chunk_size, overlap))
#
# with Pool(processes=nprocs) as p:
# p.starmap(self._process_priority, priority_args)
# p.join()
_logger.info("Processing DEM contours ...")
# Process contours
if nprocs > 1:
parallel_args = []
for priority, dem_file in zip(priorities, dem_files):
parallel_args.append(
(base_mesh_path, temp_dir,
priority, dem_file,
z_info, chunk_size, overlap))
with Pool(processes=nprocs) as p:
poly_files_coll.extend(
p.starmap(
self._parallel_get_polygon_worker,
parallel_args))
p.join()
else:
poly_files_coll.extend(
self._serial_get_polygon(
base_mesh_path, temp_dir,
priorities, dem_files,
z_info, chunk_size, overlap))
_logger.info("Generating final boundary polygon...")
# If a DEM doesn't intersect domain None will
# be returned by worker
poly_files_coll = [i for i in poly_files_coll if i]
if base_mesh_path is not None and not ignore_mesh:
poly_files_coll.append(base_mesh_path)
rasters_gdf = gpd.GeoDataFrame(
columns=['geometry'],
crs=self._calc_crs
)
for feather_f in poly_files_coll:
rasters_gdf = rasters_gdf.append(
gpd.GeoDataFrame(
{'geometry': self._read_multipolygon(
feather_f)
},
crs=self._calc_crs
),
ignore_index=True)
# The assumption is this returns polygon or multipolygon
fin_mult_poly = rasters_gdf.unary_union
_logger.info("Done")
# If DEM is not inside input base polygon, the end results
# is None
if fin_mult_poly:
# Get a clean multipolygon to write to output
# Is this necessary? It can be expensive if geom is not valid
fin_mult_poly = self._get_valid_multipolygon(fin_mult_poly)
self._write_to_file(
out_format, out_file, fin_mult_poly, out_crs)
self._base_exterior = None
def _get_valid_multipolygon(
self,
polygon: Union[Polygon, MultiPolygon]
) -> MultiPolygon:
if not polygon.is_valid:
polygon = ops.unary_union(polygon)
if not polygon.is_valid:
polygon = polygon.buffer(0)
if not polygon.is_valid:
raise ValueError(explain_validity(polygon))
if isinstance(polygon, Polygon):
polygon = MultiPolygon([polygon])
return polygon
def _multipolygon_to_disk(
self,
path: Union[str, os.PathLike],
multipolygon: MultiPolygon,
fix: bool = True):
if fix:
multipolygon = self._get_valid_multipolygon(
multipolygon)
if isinstance(multipolygon, Polygon):
# In case fix is not True, we need to make sure it's
# a multipolygon instead of polygon for dataframe creation
multipolygon = MultiPolygon([multipolygon])
gpd.GeoDataFrame({'geometry': multipolygon}).to_feather(path)
def _read_multipolygon(
self,
path: Union[str, os.PathLike],
fix: bool = True
) -> MultiPolygon:
multipolygon = MultiPolygon(
list(gpd.read_feather(path).geometry))
if fix:
multipolygon = self._get_valid_multipolygon(
multipolygon)
return multipolygon
def _read_to_geodf(
self,
path: Union[str, os.PathLike],
) -> gpd.GeoDataFrame:
gdf = gpd.read_feather(path)
return gdf
def _process_priority(
self,
priority: int,
temp_dir: Union[str, os.PathLike],
dem_path: Union[str, os.PathLike],
chunk_size: Union[int, None] = None,
overlap: Union[int, None] = None):
rast = Raster(
dem_path,
chunk_size=chunk_size,
overlap=overlap)
# Can cause issue with bbox(?)
if not self._calc_crs.equals(rast.crs):
rast.warp(dst_crs=self._calc_crs)
pri_dt_path = (
pathlib.Path(temp_dir) / f'dem_priority_{priority}.feather')
pri_mult_poly = MultiPolygon([box(*rast.src.bounds)])
self._multipolygon_to_disk(
pri_dt_path, pri_mult_poly)
def _serial_get_polygon(
self,
base_mesh_path: Union[str, os.PathLike, None],
temp_dir: Union[str, os.PathLike],
priorities: Sequence[int],
dem_files: Sequence[Union[str, os.PathLike]],
z_info: dict = {},
chunk_size: Union[int, None] = None,
overlap: Union[int, None] = None):
_logger.info("Getting DEM info")
poly_coll = []
for priority, dem_path in zip(priorities, dem_files):
_logger.info(f"Processing {dem_path} ...")
if not pathlib.Path(dem_path).is_file():
warnings.warn(f"File {dem_path} not found!")
_logger.debug(f"File {dem_path} not found!")
continue
# Calculate Polygon
_logger.info("Loading raster from file...")
rast = Raster(
dem_path,
chunk_size=chunk_size,
overlap=overlap)
# Can cause issue with bbox(?)
if not self._calc_crs.equals(rast.crs):
rast.warp(dst_crs=self._calc_crs)
_logger.info("Clipping to basemesh size if needed...")
rast_box = box(*rast.src.bounds)
if base_mesh_path is not None:
# NOTE: We use the exterior from the earlier calc
if self._base_exterior and not rast_box.within(self._base_exterior):
if not rast_box.intersects(self._base_exterior):
_logger.info(
f"{dem_path} is ignored due to base mesh...")
continue
_logger.info(
f"{dem_path} needs clipping by base mesh...")
rast.clip(self._base_exterior)
rast_box = box(*rast.src.bounds)
# Processing raster
_logger.info("Creating geom from raster...")
_logger.info("Getting polygons from geom...")
geom_mult_poly = rast.get_multipolygon(**z_info)
geom_mult_poly = self._get_valid_multipolygon(
geom_mult_poly)
if base_mesh_path is not None:
_logger.info("Subtract DEM bounds from base mesh polygons...")
self._base_mesh_lock.acquire()
try:
# Get a valid multipolygon from disk
base_mult_poly = self._read_multipolygon(
base_mesh_path)
# Get valid multipolygon after operation and write
base_mult_poly = base_mult_poly.difference(
rast_box)
self._multipolygon_to_disk(
base_mesh_path, base_mult_poly)
finally:
self._base_mesh_lock.release()
# TODO: Needs some code refinement due to bbox
# Processing DEM priority
# priority_geodf = gpd.GeoDataFrame(
# columns=['geometry'],
# crs=self._calc_crs)
# for p in range(priority):
# higher_pri_path = (
# pathlib.Path(temp_dir) / f'dem_priority_{p}.feather')
#
# if higher_pri_path.is_file():
# priority_geodf = priority_geodf.append(
# self._read_to_geodf(higher_pri_path))
#
# if len(priority_geodf):
# op_res = priority_geodf.unary_union
# pri_mult_poly = MultiPolygon()
# if isinstance(op_res, MultiPolygon):
# pri_mult_poly = op_res
# else:
# pri_mult_poly = MultiPolygon([op_res])
#
#
# if rast_box.within(pri_mult_poly):
# _logger.info(
# f"{dem_path} is ignored due to priority...")
# continue
#
# if rast_box.intersects(pri_mult_poly):
# _logger.info(
# f"{dem_path} needs clipping by priority...")
#
# # Clipping raster can cause problem at
# # boundaries due to difference in pixel size
# # between high and low resolution rasters
# # so instead we operate on extracted polygons
# geom_mult_poly = geom_mult_poly.difference(
# pri_mult_poly)
# Write geometry multipolygon to disk
temp_path = (
pathlib.Path(temp_dir)
/ f'{pathlib.Path(dem_path).name}.feather')
try:
self._multipolygon_to_disk(temp_path, geom_mult_poly)
poly_coll.append(temp_path)
except:
warnings.warn(f"Error writing {temp_path} to disk")
# Multipolygon takes a lot of memory
del geom_mult_poly
gc.collect(2)
return poly_coll
def _parallel_get_polygon_worker(
self,
base_mesh_path: Union[str, os.PathLike, None],
temp_dir: Union[str, os.PathLike],
priority: int,
dem_file: Union[str, os.PathLike],
z_info: dict = {},
chunk_size: Union[int, None] = None,
overlap: Union[int, None] = None):
poly_coll_files = self._serial_get_polygon(
base_mesh_path, temp_dir, [priority], [dem_file],
z_info, chunk_size, overlap)
# Only one item passed to serial code at most
return poly_coll_files[0] if poly_coll_files else None
def _linearring_to_vert_edge(
self,
coords: List[Tuple[float, float]],
edges: List[Tuple[int, int]],
lin_ring: LinearRing):
'''From shapely LinearRing get coords and edges'''
# NOTE: This function mutates coords and edges
# TODO: Move to utils?
idx_b = len(coords)
coords.extend(coord for coord in lin_ring.coords)
# Last coord is the same as first in a ring
coords.pop()
idx_e = len(coords) - 1
n_idx = len(coords)
edges.extend([
(i, (i + 1) % n_idx + idx_b * ((i + 1) // n_idx))
for i in range(idx_b, idx_e + 1)])
def _write_to_file(
self, out_format, out_file, multi_polygon, crs):
_logger.info(f"Writing for file ({out_format}) ...")
# TODO: Check for correct extension on out_file
if out_format == "shapefile":
gdf = gpd.GeoDataFrame(
{'geometry': multi_polygon},
crs=self._calc_crs
)
if not crs.equals(self._calc_crs):
_logger.info(
f"Project from {self._calc_crs.to_string()} to"
f" {crs.to_string()} ...")
gdf = gdf.to_crs(crs)
gdf.to_file(out_file)
elif out_format == "feather":
gdf = gpd.GeoDataFrame(
{'geometry': multi_polygon},
crs=self._calc_crs
)
if not crs.equals(self._calc_crs):
_logger.info(
f"Project from {self._calc_crs.to_string()} to"
f" {crs.to_string()} ...")
gdf = gdf.to_crs(crs)
gdf.to_feather(out_file)
elif out_format in ("jigsaw", "vtk"):
if not crs.equals(self._calc_crs):
_logger.info(
f"Project from {self._calc_crs.to_string()} to"
f" {crs.to_string()} ...")
transformer = Transformer.from_crs(
self._calc_crs, crs, always_xy=True)
multi_polygon = ops.transform(
transformer.transform, multi_polygon)
msh = jigsaw_msh_t()
msh.ndims = +2
msh.mshID = 'euclidean-mesh'
coords = []
edges = []
for polygon in multi_polygon:
self._linearring_to_vert_edge(
coords, edges, polygon.exterior)
for interior in polygon.interiors:
self._linearring_to_vert_edge(
coords, edges, interior)
msh.vert2 = np.array(
[(i, 0) for i in coords],
dtype=jigsaw_msh_t.VERT2_t)
msh.edge2 = np.array(
[(i, 0) for i in edges],
dtype=jigsaw_msh_t.EDGE2_t)
if out_format == "jigsaw":
savemsh(out_file, msh)
elif out_format == "vtk":
savevtk(out_file, msh)
else:
raise NotImplementedError(f"Output type {out_format} is not supported")
_logger.info("Done")
| 1.757813
| 2
|
app/main.py
|
aoirint/RoomSystemBotClient
| 0
|
12780289
|
<reponame>aoirint/RoomSystemBotClient<filename>app/main.py<gh_stars>0
import os
import time
import subprocess
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
FIREBASE_SECRET_PATH = os.environ['FIREBASE_SECRET_PATH']
FIREBASE_DATABASE_URL = os.environ['FIREBASE_DATABASE_URL']
OPENJTALK_HTSVOICE_PATH = os.environ.get('OPENJTALK_HTSVOICE_PATH')
SPEECH_ENABLED = os.environ.get('SPEECH_ENABLED', '1') == '1'
PERSON_SOUND_ENABLED = os.environ.get('PERSON_SOUND_ENABLED', '0') == '1'
STATIC_SPEECH_ENABLED = os.environ.get('STATIC_SPEECH_ENABLED', '0') == '1'
STATIC_SPEECH_TEXT = os.environ.get('STATIC_SPEECH_TEXT')
if (SPEECH_ENABLED or STATIC_SPEECH_ENABLED) and not os.path.exists(OPENJTALK_HTSVOICE_PATH):
raise Exception(f'HTS Voice not found at {OPENJTALK_HTSVOICE_PATH}')
cred = credentials.Certificate(FIREBASE_SECRET_PATH)
firebase_admin.initialize_app(cred, {
'databaseURL': FIREBASE_DATABASE_URL,
})
messagesRef = db.reference('messages')
def play_speech(text):
print(f'playing speech: {text}')
p = subprocess.Popen([ 'open_jtalk', '-x', '/var/lib/mecab/dic/open-jtalk/naist-jdic', '-m', OPENJTALK_HTSVOICE_PATH, '-r', '1.0', '-ow', '/dev/stdout' ], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
sound, _ = p.communicate(text.encode('utf-8'))
p = subprocess.Popen([ 'play', '-' ], stdin=subprocess.PIPE)
p.communicate(sound)
def play_sound(path):
print(f'playing sound at {path}')
subprocess.run([ 'play', path ])
def play_person_sound(user_id):
sound_path = os.path.join('/sounds/person/', os.path.basename(user_id) + '.mp3')
if not os.path.exists(sound_path):
print(f'Person Sound not found at {sound_path}')
return
play_sound(sound_path)
def on_message_updated(event: db.Event):
if event.data is None:
return
print('Message updated.')
print(f'Path: {event.path}')
if event.path == '/': # / == /messages
messages = event.data
elif len(event.path.split('/')) == 2: # /MESSAGE_KEY == /messages/MESSAGE_KEY
key = os.path.basename(event.path)
messages = { key: event.data }
else:
print('Ignored (c.z. minor change).')
return
message_count = len(messages.items())
print(f'Message Count: {message_count}')
print(messages)
for key, message in messages.items():
print(message)
post_user = message.get('from', {})
user_id = post_user.get('aadObjectId')
username = post_user.get('name')
text = message.get('text')
play_sound('/sounds/opening.mp3')
if PERSON_SOUND_ENABLED:
play_person_sound(user_id)
if SPEECH_ENABLED:
play_speech(text)
if STATIC_SPEECH_ENABLED:
play_speech(STATIC_SPEECH_TEXT)
messagesRef.child(key).delete()
print(f'Message {key} deleting')
play_sound('/sounds/closing.mp3')
print('All Messages proceeded.')
if __name__ == '__main__':
print('start listening')
messagesRef.listen(on_message_updated)
while True:
time.sleep(0.1)
| 2.078125
| 2
|
Mastermind/Mastermind.py
|
jamtot/PyProjects
| 0
|
12780290
|
import random
class Board(object):
def __init__(self, rows, columns, pegs):
self.rows = rows
self.columns = columns
self.pegs = pegs
self.code = self.makeCode()
def makeCode(self):
# randomly generate a peg for each row
code = []
for i in xrange(self.rows):
code+= [random.randint(1, self.pegs)]
return code
def getCode(self):
#return the code
return self.code
def getColumns(self):
return self.columns
def getRows(self):
return self.rows
def getPegs(self):
return self.pegs
class Mastermind(object):
def __init__(self, board):
self.board = board
def play(self):
# will only loop as many times as there are columns
#print "Code: %s" % ("").join(map(str,self.board.getCode()))
tries = self.board.getColumns()
pegs = self.board.getPegs()
rows = self.board.getRows()
code = self.board.getCode()
print "Can you guess the %d digit number between %s and %s in %d tries?" % ( rows, str(1)*rows, str(pegs)*rows, tries)
for i in xrange(tries):
print "You have %d tries left." % (tries-i)
choice = raw_input("> ")
if len(choice) == self.board.rows: # valid length
choice = [int(n) for n in list(choice)]
lastattempt = self.tryCode(choice)
if lastattempt == "Conratulations!":
print lastattempt
break
else:
print lastattempt
print "Game is over. Code was %s" % ("").join(map(str,code))
exit(1)
def tryCode(self, attempt):
# copy the code as I will be changing it
code = list(self.board.getCode())
#print code
print attempt
out, pop = [], []
if attempt == code:
return "Congratulations!"
else:
# find equal values, they will be removed
for i in xrange(len(code)):
if attempt[i] == code[i]:
pop+=[i]
out+= ["Correct."]
# removing equal values
for i in reversed(pop):
attempt.pop(i)
code.pop(i)
# searching through the remaining values for matches
for i in xrange(len(attempt)):
for j in xrange(len(code)):
if attempt[i] == code[j]:
out+=["Right peg."]
code.pop(j)
break
# join up the output
if (len(out) < 1):
return "None correct."
else:
return (" ").join(out)
if __name__ == "__main__":
gBoard = Board(4, 10, 6)
game = Mastermind(gBoard)
game.play()
| 3.875
| 4
|
Guanabara/ex11 - modulos_pacotes/uteis/numeros/__init__.py
|
Edu1769/python
| 0
|
12780291
|
<gh_stars>0
def fatorial(n):
f = 1
for cont in range(1,n+1):
f= f*cont
return f
| 3.125
| 3
|
meerkat/parser.py
|
crdietrich/meerkat
| 2
|
12780292
|
"""Data parsing tools"""
import json
import pandas as pd
## Data specific headers ##
# Global Positioning System Fix Data
# http://aprs.gids.nl/nmea/#gga
GGA_columns = ["nmea_type", "UTC_time", "latitude", "NS", "longitude", "EW", "quality", "n_satellites",
"horizontal_dilution", "altitude", "M", "geoidal_separation", "M",
"age_sec_diff", "diff_id", "checksum"]
# GPS DOP and active satellites
# http://aprs.gids.nl/nmea/#gsa
GSA_columns = (["nmea_type", "mode", "mode_fix"] +
["sv" + str(n) for n in list(range(12))] +
["pdop", "hdop", "vdop", "checksum"])
# GPS DOP and active satellites
# http://aprs.gids.nl/nmea/#gsv
GSV_columns = (["nmea_type", "total_messages", "message_number"] +
["total_sv_in_view", "sv_prn_number", "elev_degree", "azimuth", "snr"] * 4)
# Recommended minimum specific GPS/Transit data
# http://aprs.gids.nl/nmea/#rmc
RMC_columns = ["nmea_type", "UTC_time", "valid", "latitude", "NS", "longitude", "EW", "speed_knots",
"true_course", "date", "variation", "variation_EW", "checksum"]
# Track made good and ground speed
# http://aprs.gids.nl/nmea/#vtg
VTG_columns = ["nmea_type", "track_made_good", "T", "NA", "NA", "speed_knots", "N",
"speed_km_hr", "K"]
def pad_header(column_list, target_len):
unnamed_columns = list(range(len(column_list), target_len))
unnamed_columns = ["c" + str(c) for c in unnamed_columns]
return column_list[:target_len] + unnamed_columns
def csv_resource(fp):
"""Parse a .csv file generated with Meerkat
Parameters
----------
fp : filepath to saved data
Returns
-------
meta : dict, metadata describing data
df : Pandas DataFrame, data recorded from device(s) described in meta
"""
with open(fp, 'r') as f:
sbang = f.readline()
meta = json.loads(sbang[2:])
df = pd.read_csv(fp,
delimiter=meta['delimiter'],
comment=meta['comment'],
quotechar='"')
df['datetime64_ns'] = pd.to_datetime(df[meta['time_format']])
return meta, df
| 2.75
| 3
|
Interface/__init__.py
|
thuurzz/ping-python-google
| 0
|
12780293
|
<gh_stars>0
# Pensar em inteface
# Projetar interface
| 1.179688
| 1
|
clustercode/tests/cluster/main.py
|
MatKie/clustercode
| 0
|
12780294
|
import sys
import os
print(os.getcwd())
sys.path.append("../")
from clustercode.ClusterEnsemble import ClusterEnsemble
from clustercode.clustering import cluster_analysis
# tpr = "/home/trl11/Virtual_Share/gromacs_test/npt.tpr"
# traj = "/home/trl11/Virtual_Share/gromacs_test/npt.xtc"
traj = "clustercode/tests/cluster/files/traj_small.xtc"
tpr = "clustercode/tests/cluster/files/topol_small.tpr"
# traj = "/home/mk8118/OneDrive/2019/simulations/gromacs/SDS/\
# check_ensembles/NVT/PME_revised/nh_10/base/nvt.trr"
ClstrEns = ClusterEnsemble(tpr, traj, ["CE", "CM"])
ClstrEns.cluster_analysis(algorithm="static")
clstr_ens_static = ClstrEns.cluster_list
ClstrEns.cluster_analysis(algorithm="dynamic", work_in="Atom")
clstr_ens_dynamic = ClstrEns.cluster_list
# exit()
for idx_time, (static_clus_list, dynamic_clus_list) in enumerate(
zip(clstr_ens_static, clstr_ens_dynamic)
):
diff_clust_count = len(static_clus_list) - len(dynamic_clus_list)
print("_________________________________________________________")
print("Frame: {:d}".format(idx_time))
print(
"Difference in cluster counts (static - dynamic): {:d}".format(diff_clust_count)
)
static_molec_count = 0
for cluster in static_clus_list:
static_molec_count += len(cluster)
print("Statis molec count: {:d}".format(static_molec_count))
dynamic_molec_count = 0
for cluster in dynamic_clus_list:
dynamic_molec_count += cluster.n_residues
print("Dynamic molec count: {:d}".format(dynamic_molec_count))
print(
"Difference in molecule counts (static - dynamic): {:d}".format(
static_molec_count - dynamic_molec_count
)
)
new_s_set = static_clus_list[0]
for cluster in static_clus_list:
new_s_set = new_s_set.union(cluster)
new_d_set = dynamic_clus_list[0]
for cluster in dynamic_clus_list:
new_d_set = new_d_set.union(cluster)
print(
"Static molec double counted: {:d}".format(static_molec_count - len(new_d_set))
)
print(
"Dynamic molec double counted: {:d}".format(
dynamic_molec_count - new_d_set.n_residues
)
)
for idxi, clusteri in enumerate(dynamic_clus_list):
for idxj, clusterj in enumerate(dynamic_clus_list):
if clusteri.issuperset(clusterj) and idxi != idxj:
print(idxi, idxj)
print(clusteri)
print(clusteri.n_residues, clusterj.n_residues)
| 2.203125
| 2
|
darknet/tools/submit_tools/read_results.py
|
vicwer/rebar_detect
| 15
|
12780295
|
import os
import re
import cv2
import numpy as np
def gen_img_label_list(csv_list):
csv_f = open(csv_list, 'r')
lines = csv_f.readlines()
cnt_img = ''
cnt_label = ''
for i in lines:
img = i.strip().split(' ')[0]
score = float(i.strip().split(' ')[1])
b = [int(float(j)) for j in i.strip().split(' ')[2:]]
if score > 0.5:
continue
# if img != 'EBA961BC' or img != 'AD393334':
# continue
print(img)
img_ = cv2.imread('../../DataFountain/GLODON_objDet/test_dataset/' + img + '.jpg')
print(score)
h, w, _ = img_.shape
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.rectangle(img_, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (0,255,255), 3)
img_ = cv2.resize(img_, (int(w/2), int(h/2)))
cv2.imshow('img', img_)
cv2.waitKey(0)
if __name__ == '__main__':
csv_list = '../../results/comp4_det_test_rebar.txt'
gen_img_label_list(csv_list)
| 2.609375
| 3
|
exercises/fit_gaussian_estimators.py
|
shlomi-perles/IML.HUJI
| 0
|
12780296
|
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
from matplotlib import pyplot as plt
pio.templates.default = "simple_white"
SAMPLES = 1000
QUESTION_ONE_MEAN = 10
QUESTION_ONE_VAR = 1
QUESTION_ONE_SAMPLES_SKIP = 10
QUESTION_TWO_RESOLUTION = 200
QUESTION_TWO_GRID_SIZE = 10
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
samples = np.random.normal(QUESTION_ONE_MEAN, QUESTION_ONE_VAR, size=SAMPLES)
univariate_gaussian = UnivariateGaussian()
univariate_gaussian.fit(samples)
print(f"({univariate_gaussian.mu_}, {univariate_gaussian.var_})")
# Question 2 - Empirically showing sample mean is consistent
x = np.arange(QUESTION_ONE_MEAN, SAMPLES + 1, QUESTION_ONE_SAMPLES_SKIP)
estimate_mean_dis = np.vectorize(lambda last_index: np.abs(np.mean(samples[:last_index]) - QUESTION_ONE_MEAN))
fig = go.Figure(
[go.Scatter(x=x, y=estimate_mean_dis(x), mode='markers', name=r'$\left|\hat{\mu}(m)-10\right|$',
showlegend=True)], layout=go.Layout(
title={
"text": r"$\text{Distance Between The Estimated-And True Value Of The Expectations}\\"
r"\text{As Function Of Number Of Samples}$",
'y': 0.84, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'},
xaxis_title=r"$\text{Number of samples} [m]$", yaxis_title=r"$\left|\hat{\mu}(m)-10\right|$", height=400))
fig.show()
# fig.write_image("estimate_distance.svg")
# Question 3 - Plotting Empirical PDF of fitted model
fig = go.Figure(
[go.Scatter(x=samples, y=univariate_gaussian.pdf(samples), mode='markers',
showlegend=False, marker=dict(size=2))], layout=go.Layout(
title={
"text": r"$\text{Probability Density As Function Of Samples Values}$",
'y': 0.84, 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'},
xaxis_title=r"$\text{Sample value}$", yaxis_title=r"$\text{Probability density}$", height=400))
fig.show()
# fig.write_image("pdf_q1.svg")
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
mu = np.array([0, 0, 4, 0])
sigma = np.array([[1, 0.2, 0, 0.5],
[0.2, 2, 0, 0],
[0, 0, 1, 0],
[0.5, 0, 0, 1]])
samples = np.random.multivariate_normal(mu, sigma, SAMPLES)
multivariate_gaussian = MultivariateGaussian()
multivariate_gaussian.fit(samples)
print(multivariate_gaussian.mu_)
print(multivariate_gaussian.cov_)
# Question 5 - Likelihood evaluation
f1 = np.linspace(-QUESTION_TWO_GRID_SIZE, QUESTION_TWO_GRID_SIZE, QUESTION_TWO_RESOLUTION)
grid_tuples = np.transpose(np.array([np.repeat(f1, len(f1)), np.tile(f1, len(f1))]))
calc_log_likelihood = lambda x1, x3: multivariate_gaussian.log_likelihood(np.array([x1, 0, x3, 0]), sigma, samples)
Z = np.vectorize(calc_log_likelihood)(grid_tuples[:, 0], grid_tuples[:, 1]).reshape(QUESTION_TWO_RESOLUTION,
QUESTION_TWO_RESOLUTION)
fig, ax = plt.subplots()
heat_map = ax.pcolormesh(f1, f1, Z)
fig.colorbar(heat_map, format='%.e')
ax.set_title("log-likelihood for " + r"$\mu=\left[f_{1},0,f_{3},0\right]{}^{T}$")
ax.set_xlabel("$f_{3}$")
ax.set_ylabel("$f_{1}$")
plt.show()
# Question 6 - Maximum likelihood
max_coordinates = np.where(Z == np.amax(Z))
print(f"({round(f1[max_coordinates[0]][0], 3)}, {round(f1[max_coordinates[1]][0], 3)})")
if __name__ == '__main__':
np.random.seed(0)
test_univariate_gaussian()
test_multivariate_gaussian()
| 3.390625
| 3
|
python/venv/lib/python2.7/site-packages/keystoneauth1/tests/unit/keystoneauth_fixtures.py
|
sjsucohort6/openstack
| 0
|
12780297
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
class HackingCode(fixtures.Fixture):
"""A fixture to house the various code examples for the keystoneclient
hacking style checks.
"""
oslo_namespace_imports = {
'code': """
import oslo.utils
import oslo_utils
import oslo.utils.encodeutils
import oslo_utils.encodeutils
from oslo import utils
from oslo.utils import encodeutils
from oslo_utils import encodeutils
import oslo.serialization
import oslo_serialization
import oslo.serialization.jsonutils
import oslo_serialization.jsonutils
from oslo import serialization
from oslo.serialization import jsonutils
from oslo_serialization import jsonutils
import oslo.config
import oslo_config
import oslo.config.cfg
import oslo_config.cfg
from oslo import config
from oslo.config import cfg
from oslo_config import cfg
import oslo.i18n
import oslo_i18n
import oslo.i18n.log
import oslo_i18n.log
from oslo import i18n
from oslo.i18n import log
from oslo_i18n import log
""",
'expected_errors': [
(1, 0, 'K333'),
(3, 0, 'K333'),
(5, 0, 'K333'),
(6, 0, 'K333'),
(9, 0, 'K333'),
(11, 0, 'K333'),
(13, 0, 'K333'),
(14, 0, 'K333'),
(17, 0, 'K333'),
(19, 0, 'K333'),
(21, 0, 'K333'),
(22, 0, 'K333'),
(25, 0, 'K333'),
(27, 0, 'K333'),
(29, 0, 'K333'),
(30, 0, 'K333'),
],
}
| 1.6875
| 2
|
API/main.py
|
Ankuraxz/Tagonizer-2
| 3
|
12780298
|
<gh_stars>1-10
from fastapi import FastAPI, Query, status, Request
from typing import List
import re
import itertools
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from msrest.authentication import CognitiveServicesCredentials
from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from difflib import SequenceMatcher
import os
key1 = os.environ["KEY"]
ep = os.environ["ENDPOINT"]
loc = os.environ["LOCATION"]
KEY = os.environ["VKEY"]
ENDPOINT = os.environ["VENDPOINT"]
LOCATION = os.environ["LOCATION"]
app = FastAPI(
title="Tagonizer",
description="API for Tagonizer",
version="0.1.1",
openapi_url="/api/v0.1.1/openapi.json",
docs_url="/",
redoc_url=None,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class UnicornException(Exception):
def __init__(self, name: str):
self.name = name
class data(BaseModel):
comments: List[str] = Query(...)
class Vision(BaseModel):
seller_img: List[str] = Query(...)
customer_img: List[str] = Query(...)
@app.exception_handler(UnicornException)
async def unicorn_exception_handler(request: Request, exc: UnicornException):
return JSONResponse(
status_code=418,
content={
"message": f"Oops! {exc.name} can't be processed. There goes a rainbow..."
},
)
def cleaner(comments):
document = []
for ix in comments:
ix = re.sub(r'[^A-Za-z ,.]+', '', ix) # alphabets, ",", ".", "-" and spaces
ix = ix.lower()
document.append(ix)
return document
def authenticate_client():
ta_credential = AzureKeyCredential(key1)
text_analytics_client = TextAnalyticsClient(
endpoint=ep,
credential=ta_credential)
return text_analytics_client
def sentiment_analysis_with_opinion_mining_example(documents, client, reviews):
j = None
result = client.analyze_sentiment(documents, show_opinion_mining=True)
doc_result = [doc for doc in result if not doc.is_error]
# positive_reviews = [doc for doc in doc_result if doc.sentiment == "positive"]
# negative_reviews = [doc for doc in doc_result if doc.sentiment == "negative"]
# positive_mined_opinions = []
# mixed_mined_opinions = []
# negative_mined_opinions = []
for document in doc_result:
if document.sentiment == "negative":
j = 0
elif document.sentiment == "positive":
j = 1
else:
j = 2
for sentence in document.sentences:
for mined_opinion in sentence.mined_opinions:
count = 0
aspect = mined_opinion.aspect
opinions = []
# print("......'{}' aspect '{}'".format(aspect.sentiment, aspect.text))
for opinion in mined_opinion.opinions:
opinions.append(opinion.text)
if str(aspect.text).lower() in reviews.keys():
count += 1
reviews.update({str(aspect.text).lower(): (count, str(aspect.sentiment), opinions)})
else:
reviews.update({str(aspect.text).lower(): (count, str(aspect.sentiment), opinions)})
return j, reviews
@app.post('/predict', status_code=status.HTTP_201_CREATED, responses={
201: {
"description": "Review Analyzer",
"content": {
"application/json": {
"example": {
"Product_Sentiment": [
"Overall Sentiment 0 --> Negative 1 --> Positive 2 --> Moderate"
],
"reviews": {
"Review Number 0": "Sentiment",
"Review Number 1": "Sentiment",
"Review Number 2": "Sentiment",
},
"Tags": {
"Tag 0": "Sentiment",
"Tag 1": "Sentiment",
"Tag 2": "Sentiment"
}
}
}
},
},
})
async def predict(data: data):
reviews = {}
document = cleaner(data.comments)
client = authenticate_client()
docSentiment = {}
Senti = []
k = 0
for ix in document:
documents = []
if len(ix) >= 5000:
documents.append(ix[:5000]) # Limiting 5000 chars
docSentiment[k], reviews = sentiment_analysis_with_opinion_mining_example(documents, client, reviews)
else:
documents.append(ix)
docSentiment[k], reviews = sentiment_analysis_with_opinion_mining_example(documents, client, reviews)
k += 1
tags = {k: v for k, v in sorted(reviews.items(), key=lambda item: item[1], reverse=True)}
keys = tags.keys()
reviews.clear()
s = SequenceMatcher(None)
# print(keys)
reduntant = set()
limit = 0.60
for key in keys:
s.set_seq2(key)
for iy in keys:
# wordx = key
# wordy = iy
s.set_seq1(iy)
if key != iy: # Not The same words
if (s.ratio() >= limit and len(s.get_matching_blocks()) == 2): # matched word
reduntant.add(iy)
for ix in reduntant:
# print(ix)
tags.pop(ix)
revResult = {}
prediction = list(tags.items())
docResult = {
"Status": "Something Went Wrong"
}
for t, u in zip(tags.keys(), tags.values()):
if u[1] == "negative":
revResult[t] = 0
elif u[1] == "positive":
revResult[t] = 1
else:
revResult[t] = 2
# print(docSentiment)
Senti = list(docSentiment.values())
if Senti.count(0) > Senti.count(1):
overall = 0 # "Negative"
elif Senti.count(1) > Senti.count(0):
overall = 1 # "Positive"
else:
overall = 2 # "Moderate"
if len(revResult.items()) <= 10:
docResult = {
"Product_Sentiment": overall,
"reviews": docSentiment,
"tags": revResult
}
else:
docResult = {
"Product_Sentiment": overall,
"reviews": docSentiment,
"tags": dict(itertools.islice(revResult.items(), 10))
}
return docResult
def tagger(url, client):
tags = []
# print("===== Tag an image - remote =====")
tags_result_remote = client.tag_image(url)
if len(tags_result_remote.tags) != 0:
for tag in tags_result_remote.tags:
if tag.confidence >= 0.45: # Atleast 45% confidence score
tags.append(tag.name)
return tags
def url_cleaner(url):
id = url[49:].split(".")[0]
return (url[:49] + id + ".jpg")
@app.post('/image', status_code=status.HTTP_201_CREATED,
responses={
201: {
"description": "Image Analyzer",
"content": {
"application/json": {
"example": {
"Images": [
"Link for 1st Image",
"Link for 2nd Image",
"Link for nth Image"
]
}
}
},
},
},
)
async def predict_image(data: Vision):
s_tags_set = set()
good_images = []
c_tags = []
computervision_client = ComputerVisionClient(ENDPOINT, CognitiveServicesCredentials(KEY))
seller_img_resized = []
for ix in data.seller_img:
ix = url_cleaner(ix)
seller_img_resized.append(ix)
s_tags = tagger(ix, computervision_client)
for iw in s_tags:
iw = iw.lower()
s_tags_set.add(iw)
for iy in data.customer_img:
if "jpg" in iy:
iy = url_cleaner(iy)
c_tags = tagger(iy, computervision_client)
for iz in c_tags:
iz = iz.lower()
if iz in s_tags_set:
good_images.append(iy) # iy is image link
break
if len(good_images) == 0:
good_images = seller_img_resized
docResult = {
"Images": good_images
}
return docResult
| 2.25
| 2
|
src/stackoverflow/58862981/main.py
|
mrdulin/python-codelab
| 0
|
12780299
|
<filename>src/stackoverflow/58862981/main.py
class AADatabase:
@classmethod
def is_primary(cls):
return False
@classmethod
def run(cls):
return cls.is_primary()
| 1.726563
| 2
|
Breast Cancer Prediction by Logistic Regression/cancer_prediction.py
|
parakh-gupta/Machine-Learning-
| 0
|
12780300
|
# Importing the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Importing our cancer dataset
dataset = pd.read_csv('breast_cancer_dataset.csv')
X = dataset.iloc[:, 1:9].values
Y = dataset.iloc[:, 9].values
# Encoding categorical data values
from sklearn.preprocessing import LabelEncoder
labelencoder_Y = LabelEncoder()
Y = labelencoder_Y.fit_transform(Y)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.25, random_state = 0)
# Fitting Simple Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression( solver='lbfgs', max_iter=500)
classifier.fit(X_train, Y_train)
Y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score
cm_SVM = confusion_matrix(Y_test, Y_pred)
print(cm_SVM)
print("Accuracy score of train Classifier")
print(accuracy_score(Y_train, classifier.predict(X_train))*100)
print("Accuracy score of test Classifier")
print(accuracy_score(Y_test, Y_pred)*100)
| 3.484375
| 3
|
sichu/cabinet/backends.py
|
ax003d/sichu_web
| 55
|
12780301
|
<filename>sichu/cabinet/backends.py
import models
class WeiboBackend(object):
supports_object_permissions = False
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, wid=None):
try:
wu = models.WeiboUser.objects.get(uid=wid)
return wu.user
except models.WeiboUser.DoesNotExist:
return None
def get_user(self, user_id):
try:
return models.User.objects.get(pk=user_id)
except models.User.DoesNotExist:
return None
| 2.234375
| 2
|
examples/replicated_setups/prune/prep_data_prune.py
|
Jacobe2169/EvalNE
| 92
|
12780302
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
# Contact: <EMAIL>
# Date: 18/12/2018
# This code preprocessed the Facebook wall post and the Webspam datasets in order to produce edgelists
# which can be then used to replicate the paper experiments using EvalNE.
from __future__ import division
import os
from sys import argv
import networkx as nx
from evalne.utils import preprocess as pp
def main():
# Check cmd args
if len(argv) != 3:
print("ERROR: wrong number of parameters")
print("Usage: prep_data_prune.py <facebook_path> <webspam_path>")
exit(-1)
# Extract the dataset names and paths
fb_path, fb_name = os.path.split(argv[1])
ws_path, ws_name = os.path.split(argv[2])
# Preprocess FB graph
G1 = prep_fb(argv[1])
# Store FB graph to a file
pp.save_graph(G1, output_path=fb_path + "/prep_graph_slfloops.edgelist", delimiter=',', write_stats=True)
# Preprocess WS graph
G2 = prep_ws(argv[2])
# Store preprocessed graph to a file
pp.save_graph(G2, output_path=ws_path + "/prep_graph_slfloops.edgelist", delimiter=',', write_stats=True)
print("Preprocessing finished.")
def prep_fb(inpath):
"""
Preprocess facebook wall post graph.
"""
# Load a graph
G = pp.load_graph(inpath, delimiter='\t', comments='#', directed=True)
# The FB graph is stores as destination, origin so needs to be reversed
G = G.reverse()
# Preprocess the graph
G, ids = pp.prep_graph(G, relabel=True, del_self_loops=False)
# Return the preprocessed graph
return G
def prep_ws(inpath):
"""
Preprocess web spam graph.
"""
# Create an empty digraph
G = nx.DiGraph()
# Read the file and create the graph
src = 0
f = open(inpath, 'r')
for line in f:
if src != 0:
arr = line.split()
for dst in arr:
dst_id = int(dst.split(':')[0])
# We consider the graph unweighted
G.add_edge(src, dst_id)
src += 1
# G.add_node(src-2)
# Preprocess the graph
G, ids = pp.prep_graph(G, relabel=True, del_self_loops=False)
# Return the preprocessed graph
return G
if __name__ == "__main__":
main()
| 2.6875
| 3
|
celery/utils/coroutine.py
|
amplify-education/celery
| 0
|
12780303
|
from __future__ import absolute_import
from functools import wraps
from Queue import Queue
from celery.utils import cached_property
def coroutine(fun):
"""Decorator that turns a generator into a coroutine that is
started automatically, and that can send values back to the caller.
**Example coroutine that returns values to caller**::
@coroutine
def adder(self):
while 1:
x, y = (yield)
self.give(x + y)
>>> c = adder()
# call sends value and returns the result.
>>> c.call(4, 4)
8
# or you can send the value and get the result later.
>>> c.send(4, 4)
>>> c.get()
8
**Example sink (input-only coroutine)**::
@coroutine
def uniq():
seen = set()
while 1:
line = (yield)
if line not in seen:
seen.add(line)
print(line)
>>> u = uniq()
>>> [u.send(l) for l in [1, 2, 2, 3]]
[1, 2, 3]
**Example chaining coroutines**::
@coroutine
def uniq(callback):
seen = set()
while 1:
line = (yield)
if line not in seen:
callback.send(line)
seen.add(line)
@coroutine
def uppercaser(callback):
while 1:
line = (yield)
callback.send(str(line).upper())
@coroutine
def printer():
while 1:
line = (yield)
print(line)
>>> pipe = uniq(uppercaser(printer()))
>>> for line in file("AUTHORS").readlines():
pipe.send(line)
"""
@wraps(fun)
def start(*args, **kwargs):
return Coroutine.start_from(fun, *args, **kwargs)
return start
class Coroutine(object):
_gen = None
started = False
def bind(self, generator):
self._gen = generator
def _next(self):
return self._gen.next()
next = __next__ = _next
def start(self):
if self.started:
raise ValueError("coroutine already started")
self.next()
self.started = True
return self
def send1(self, value):
return self._gen.send(value)
def call1(self, value, timeout=None):
self.send1(value)
return self.get(timeout=timeout)
def send(self, *args):
return self._gen.send(args)
def call(self, *args, **opts):
self.send(*args)
return self.get(**opts)
@classmethod
def start_from(cls, fun, *args, **kwargs):
coro = cls()
coro.bind(fun(coro, *args, **kwargs))
return coro.start()
@cached_property
def __output__(self):
return Queue()
@property
def give(self):
return self.__output__.put_nowait
@property
def get(self):
return self.__output__.get
if __name__ == "__main__":
@coroutine
def adder(self):
while 1:
x, y = (yield)
self.give(x + y)
x = adder()
for i in xrange(10):
print(x.call(i, i))
| 3.515625
| 4
|
src/nexpy/api/frills/models/pdfdecay.py
|
nexpy/nexpy
| 36
|
12780304
|
import numpy as np
from lmfit.model import Model
class PDFdecayModel(Model):
r"""A model to describe the product of a decaying exponential and a Gaussian
with three parameters: ``amplitude``, ``xi``, and ``sigma``
.. math::
f(x; A, \xi, \sigma) = A e^{[-{|x|}/\xi]} e^{[{-{x^2}/{{2\sigma}^2}}]}
where the parameter ``amplitude`` corresponds to :math:`A`, ``xi`` to
:math:`\xi`, and ``sigma`` to :math:`\sigma`.
"""
def __init__(self, **kwargs):
def pdfdecay(x, amplitude=1.0, xi=1.0, sigma=1.0):
return amplitude * np.exp(-abs(x)/xi) * np.exp(-x**2/(2*sigma**2))
super().__init__(pdfdecay, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
"""Estimate initial model parameter values from data."""
sigma = np.sqrt(np.fabs((x**2*data).sum() / data.sum()))
return self.make_params(amplitude=data.max(), xi=sigma, sigma=sigma)
| 3.078125
| 3
|
ddsp/spectral_ops_test.py
|
vvolhejn/ddsp
| 0
|
12780305
|
<reponame>vvolhejn/ddsp
# Copyright 2022 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ddsp.losses."""
from absl.testing import parameterized
from ddsp import spectral_ops
from ddsp.test_util import gen_np_sinusoid
import numpy as np
import tensorflow.compat.v2 as tf
class STFTTest(tf.test.TestCase):
def test_tf_and_np_are_consistent(self):
amp = 1e-2
audio = amp * (np.random.rand(64000).astype(np.float32) * 2.0 - 1.0)
frame_size = 2048
hop_size = 128
overlap = 1.0 - float(hop_size) / frame_size
pad_end = True
s_np = spectral_ops.stft_np(
audio, frame_size=frame_size, overlap=overlap, pad_end=pad_end)
s_tf = spectral_ops.stft(
audio, frame_size=frame_size, overlap=overlap, pad_end=pad_end)
# TODO(jesseengel): The phase comes out a little different, figure out why.
self.assertAllClose(np.abs(s_np), np.abs(s_tf), rtol=1e-3, atol=1e-3)
class LoudnessTest(tf.test.TestCase):
def test_tf_and_np_are_consistent(self):
amp = 1e-2
audio = amp * (np.random.rand(64000).astype(np.float32) * 2.0 - 1.0)
frame_size = 2048
frame_rate = 250
ld_tf = spectral_ops.compute_loudness(
audio, n_fft=frame_size, frame_rate=frame_rate, use_tf=True)
ld_np = spectral_ops.compute_loudness(
audio, n_fft=frame_size, frame_rate=frame_rate, use_tf=False)
self.assertAllClose(np.abs(ld_np), np.abs(ld_tf), rtol=1e-3, atol=1e-3)
class PadOrTrimVectorToExpectedLengthTest(parameterized.TestCase,
tf.test.TestCase):
@parameterized.named_parameters(
('np_1d', False, 1),
('np_2d', False, 2),
('tf_1d', True, 1),
('tf_2d', True, 2),
)
def test_pad_or_trim_vector_to_expected_length(self, use_tf, num_dims):
vector_len = 10
padded_vector_expected_len = 15
trimmed_vector_expected_len = 4
# Generate target vectors for testing
vector = np.ones(vector_len) + np.random.uniform()
num_pad = padded_vector_expected_len - vector_len
target_padded = np.concatenate([vector, np.zeros(num_pad)])
target_trimmed = vector[:trimmed_vector_expected_len]
# Make a batch of target vectors
if num_dims > 1:
batch_size = 16
vector = np.tile(vector, (batch_size, 1))
target_padded = np.tile(target_padded, (batch_size, 1))
target_trimmed = np.tile(target_trimmed, (batch_size, 1))
vector_padded = spectral_ops.pad_or_trim_to_expected_length(
vector, padded_vector_expected_len, use_tf=use_tf)
vector_trimmmed = spectral_ops.pad_or_trim_to_expected_length(
vector, trimmed_vector_expected_len, use_tf=use_tf)
self.assertAllClose(target_padded, vector_padded)
self.assertAllClose(target_trimmed, vector_trimmmed)
class ComputeFeaturesTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
"""Creates some common default values for the test sinusoid."""
super().setUp()
self.amp = 0.75
self.frequency = 440.0
self.frame_rate = 250
self.frame_size = 512
def expected_f0_length(self, audio, padding):
n_t = audio.shape[-1]
frame_size = spectral_ops.CREPE_FRAME_SIZE
hop_size = int(16000 // self.frame_rate)
expected_len, _ = spectral_ops.get_framed_lengths(
n_t, frame_size, hop_size, padding)
return expected_len
def expected_db_length(self, audio, sr, padding):
n_t = audio.shape[-1]
hop_size = int(sr // self.frame_rate)
expected_len, _ = spectral_ops.get_framed_lengths(
n_t, self.frame_size, hop_size, padding)
return expected_len
@parameterized.named_parameters(
('same_.21secs', 'same', .21),
('same_.4secs', 'same', .4),
('center_.21secs', 'center', .21),
('center_.4secs', 'center', .4),
('valid_.21secs', 'valid', .21),
('valid_.4secs', 'valid', .4),
)
def test_compute_f0(self, padding, audio_len_sec):
"""Ensure that compute_f0 (crepe) has expected output shape."""
sr = 16000
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sr, audio_len_sec)
expected_len = self.expected_f0_length(audio_sin, padding)
f0_hz, f0_confidence = spectral_ops.compute_f0(
audio_sin, self.frame_rate, viterbi=True, padding=padding)
self.assertLen(f0_hz, expected_len)
self.assertLen(f0_confidence, expected_len)
self.assertTrue(np.all(np.isfinite(f0_hz)))
self.assertTrue(np.all(np.isfinite(f0_confidence)))
def test_batch_compute_db(self):
"""Ensure that compute_(loudness/power) can work on a batch."""
batch_size = 2
sample_rate = 16000
audio_len_sec = 0.21
padding = 'same'
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
expected_len = self.expected_db_length(audio_sin, sample_rate, padding)
audio_batch = tf.tile(audio_sin[None, :], [batch_size, 1])
loudness = spectral_ops.compute_loudness(
audio_batch, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
power = spectral_ops.compute_power(
audio_batch, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
self.assertLen(loudness.shape, 2)
self.assertLen(power.shape, 2)
self.assertEqual(batch_size, loudness.shape[0])
self.assertEqual(batch_size, power.shape[0])
self.assertEqual(expected_len, loudness.shape[1])
self.assertEqual(expected_len, power.shape[1])
def test_compute_loudness_tf_np(self):
"""Ensure that compute_loudness is the same output for np and tf."""
sample_rate = 16000
audio_len_sec = 0.21
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
loudness_tf = spectral_ops.compute_loudness(
audio_sin, sample_rate, self.frame_rate, self.frame_size, use_tf=True)
loudness_np = spectral_ops.compute_loudness(
audio_sin, sample_rate, self.frame_rate, self.frame_size, use_tf=False)
# Allow tolerance within 1dB
self.assertAllClose(loudness_tf.numpy(), loudness_np, atol=1, rtol=1)
@parameterized.named_parameters(
('16k_.21secs', 16000, .21),
('24k_.21secs', 24000, .21),
('44.1k_.21secs', 44100, .21),
('16k_.4secs', 16000, .4),
('24k_.4secs', 24000, .4),
('44.1k_.4secs', 44100, .4),
)
def test_compute_loudness(self, sample_rate, audio_len_sec):
"""Ensure that compute_loudness has expected output shape."""
padding = 'center'
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
expected_len = self.expected_db_length(audio_sin, sample_rate, padding)
loudness = spectral_ops.compute_loudness(
audio_sin, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
self.assertLen(loudness, expected_len)
self.assertTrue(np.all(np.isfinite(loudness)))
@parameterized.named_parameters(
('same', 'same'),
('valid', 'valid'),
('center', 'center'),
)
def test_compute_loudness_padding(self, padding):
"""Ensure that compute_loudness works with different paddings."""
sample_rate = 16000
audio_len_sec = 0.21
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
expected_len = self.expected_db_length(audio_sin, sample_rate, padding)
loudness = spectral_ops.compute_loudness(
audio_sin, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
self.assertLen(loudness, expected_len)
self.assertTrue(np.all(np.isfinite(loudness)))
@parameterized.named_parameters(
('16k_.21secs', 16000, .21),
('24k_.21secs', 24000, .21),
('44.1k_.21secs', 44100, .21),
('16k_.4secs', 16000, .4),
('24k_.4secs', 24000, .4),
('44.1k_.4secs', 44100, .4),
)
def test_compute_rms_energy(self, sample_rate, audio_len_sec):
"""Ensure that compute_rms_energy has expected output shape."""
padding = 'center'
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
expected_len = self.expected_db_length(audio_sin, sample_rate, padding)
rms_energy = spectral_ops.compute_rms_energy(
audio_sin, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
self.assertLen(rms_energy, expected_len)
self.assertTrue(np.all(np.isfinite(rms_energy)))
@parameterized.named_parameters(
('same', 'same'),
('valid', 'valid'),
('center', 'center'),
)
def test_compute_power_padding(self, padding):
"""Ensure that compute_power (-> +rms) work with different paddings."""
sample_rate = 16000
audio_len_sec = 0.21
audio_sin = gen_np_sinusoid(self.frequency, self.amp, sample_rate,
audio_len_sec)
expected_len = self.expected_db_length(audio_sin, sample_rate, padding)
power = spectral_ops.compute_power(
audio_sin, sample_rate, self.frame_rate, self.frame_size,
padding=padding)
self.assertLen(power, expected_len)
self.assertTrue(np.all(np.isfinite(power)))
class PadTest(parameterized.TestCase, tf.test.TestCase):
def test_pad_end_stft_is_consistent(self):
"""Ensure that spectral_ops.pad('same') is same as stft(pad_end=True)."""
frame_size = 200
hop_size = 180
audio = tf.random.normal([1, 1000])
padded_audio = spectral_ops.pad(audio, frame_size, hop_size, 'same')
s_pad_end = tf.signal.stft(audio, frame_size, hop_size, pad_end=True)
s_same = tf.signal.stft(padded_audio, frame_size, hop_size, pad_end=False)
self.assertAllClose(np.abs(s_pad_end), np.abs(s_same), rtol=1e-3, atol=1e-3)
@parameterized.named_parameters(
('valid_odd', 'valid', 180),
('same_odd', 'same', 180),
('center_odd', 'center', 180),
('valid_even', 'valid', 200),
('same_even', 'same', 200),
('center_even', 'center', 200),
)
def test_padding_shapes_are_correct(self, padding, hop_size):
"""Ensure that pad() and get_framed_lengths() have correct shapes."""
frame_size = 200
n_t = 1000
audio = tf.random.normal([1, n_t])
padded_audio = spectral_ops.pad(audio, frame_size, hop_size, padding)
n_t_pad = padded_audio.shape[1]
frames = tf.signal.frame(padded_audio, frame_size, hop_size)
n_frames = frames.shape[1]
exp_n_frames, exp_n_t_pad = spectral_ops.get_framed_lengths(
n_t, frame_size, hop_size, padding)
self.assertEqual(n_frames, exp_n_frames)
self.assertEqual(n_t_pad, exp_n_t_pad)
if __name__ == '__main__':
tf.test.main()
| 1.90625
| 2
|
day 9/multiple_linear_regression.py
|
JackRab/10-days-of-statistics
| 0
|
12780306
|
<reponame>JackRab/10-days-of-statistics<filename>day 9/multiple_linear_regression.py
"""
Link:
https://www.hackerrank.com/challenges/s10-multiple-linear-regression/problem
Objective
In this challenge, we practice using multiple linear regression.
"""
import numpy as np
def find_beta(X, Y):
"""
Find the coefficient beta give matrix X (independent variables), and Y (dependent variable)
"""
return np.dot( np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, Y) )
n, m = [int(s) for s in input().strip().split()]
X_list =[]
Y_list = []
for i in range(m):
xy = [float(s) for s in input().strip().split()]
x = [1.0] + xy[:n]
y = xy[n]
X_list.append(x)
Y_list.append(y)
X = np.array(X_list).reshape(m, n+1)
Y = np.array(Y_list).reshape(m, 1)
beta = find_beta(X, Y)
q = int(input())
X_p_list = []
for i in range(q):
x = [1.0] + [float(s) for s in input().strip().split()]
X_p_list.append(x)
X_p = np.array(X_p_list).reshape(q, n+1)
Y_p = np.dot(X_p, beta)
for y in Y_p:
print(round(y[0], 2))
| 4.03125
| 4
|
app/sendmail/__init__.py
|
csud-reservation/flask-backend
| 1
|
12780307
|
from flask import Blueprint
sendmail = Blueprint('sendmail', __name__, template_folder='templates/sendmail')
from . import views
| 1.351563
| 1
|
aa1_data_util/1_process_zhihu.py
|
sunshinenum/text_classification
| 7,723
|
12780308
|
# -*- coding: utf-8 -*-
import sys
#reload(sys)
#sys.setdefaultencoding('utf8')
#1.将问题ID和TOPIC对应关系保持到字典里:process question_topic_train_set.txt
#from:question_id,topics(topic_id1,topic_id2,topic_id3,topic_id4,topic_id5)
# to:(question_id,topic_id1)
# (question_id,topic_id2)
#read question_topic_train_set.txt
import codecs
#1.################################################################################################################
print("process question_topic_train_set.txt,started...")
q_t='question_topic_train_set.txt'
q_t_file = codecs.open(q_t, 'r', 'utf8')
lines=q_t_file.readlines()
question_topic_dict={}
for i,line in enumerate(lines):
if i%300000==0:
print(i)
#print(line)
question_id,topic_list_string=line.split('\t')
#print(question_id)
#print(topic_list_string)
topic_list=topic_list_string.replace("\n","").split(",")
question_topic_dict[question_id]=topic_list
#for ii,topic in enumerate(topic_list):
# print(ii,topic)
#print("=====================================")
#if i>10:
# print(question_topic_dict)
# break
print("process question_topic_train_set.txt,ended...")
###################################################################################################################
###################################################################################################################
#2.处理问题--得到问题ID:问题的表示,存成字典。proces question. for every question form a a list of string to reprensent it.
import codecs
print("process question started11...")
q='question_train_set.txt'
q_file = codecs.open(q, 'r', 'utf8')
q_lines=q_file.readlines()
questionid_words_representation={}
question_representation=[]
length_desc=30
for i,line in enumerate(q_lines):
#print("line:")
#print(line)
element_lists=line.split('\t') #['c324,c39','w305...','c']
question_id=element_lists[0]
#print("question_id:",element_lists[0])
#for i,q_e in enumerate(element_lists):
# print("e:",q_e)
#question_representation=[x for x in element_lists[2].split(",")] #+ #TODO this is only for title's word. no more.
title_words=[x for x in element_lists[2].strip().split(",")][-length_desc:]
#print("title_words:",title_words)
title_c=[x for x in element_lists[1].strip().split(",")][-length_desc:]
#print("title_c:", title_c)
desc_words=[x for x in element_lists[4].strip().split(",")][-length_desc:]
#print("desc_words:", desc_words)
desc_c=[x for x in element_lists[3].strip().split(",")][-length_desc:]
#print("desc_c:", desc_c)
question_representation =title_words+ title_c+desc_words+ desc_c
question_representation=" ".join(question_representation)
#print("question_representation:",question_representation)
#print("question_representation:",question_representation)
questionid_words_representation[question_id]=question_representation
q_file.close()
print("proces question ended2...")
#####################################################################################################################
###################################################################################################################
# 3.获得模型需要的训练数据。以{问题的表示:TOPIC_ID}的形式的列表
# save training data,testing data: question __label__topic_id
import codecs
import random
print("saving traininig data.started1...")
count = 0
train_zhihu = 'train-zhihu6-title-desc.txt'
test_zhihu = 'test-zhihu6-title-desc.txt'
valid_zhihu = 'valid-zhihu6-title-desc.txt'
data_list = []
multi_label_flag=True
def split_list(listt):
random.shuffle(listt)
list_len = len(listt)
train_len = 0.95
valid_len = 0.025
train = listt[0:int(list_len * train_len)]
valid = listt[int(list_len * train_len):int(list_len * (train_len + valid_len))]
test = listt[int(list_len * (train_len + valid_len)):]
return train, valid, test
for question_id, question_representation in questionid_words_representation.items():
# print("===================>")
# print('question_id',question_id)
# print("question_representation:",question_representation)
# get label_id for this question_id by using:question_topic_dict
topic_list = question_topic_dict[question_id]
# print("topic_list:",topic_list)
# if count>5:
# ii=0
# ii/0
if not multi_label_flag:
for topic_id in topic_list:
data_list.append((question_representation, topic_id)) #single-label
else:
data_list.append((question_representation, topic_list)) #multi-label
count = count + 1
# random shuffle list
random.shuffle(data_list)
def write_data_to_file_system(file_name, data):
file = codecs.open(file_name, 'a', 'utf8')
for d in data:
# print(d)
question_representation, topic_id = d
question_representation_ = " ".join(question_representation)
file.write(question_representation_ + " __label__" + str(topic_id) + "\n")
file.close()
def write_data_to_file_system_multilabel(file_name, data):
file = codecs.open(file_name, 'a', 'utf8')
for d in data:
question_representation, topic_id_list = d
topic_id_list_=" ".join(topic_id_list)
file.write(question_representation + " __label__" + str(topic_id_list_) + "\n")
file.close()
train_data, valid_data, test_data = split_list(data_list)
if not multi_label_flag:#single label
write_data_to_file_system(train_zhihu, train_data)
write_data_to_file_system(valid_zhihu, valid_data)
write_data_to_file_system(test_zhihu, test_data)
else:#multi-label
write_data_to_file_system_multilabel(train_zhihu, train_data)
write_data_to_file_system_multilabel(valid_zhihu, valid_data)
write_data_to_file_system_multilabel(test_zhihu, test_data)
print("saving traininig data.ended...")
######################################################################################################################
| 2.703125
| 3
|
model/model.py
|
multimodallearning/hand-gesture-posture-position
| 8
|
12780309
|
<gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.backbones.bps_densenet import BPSDensenet
from model.backbones.cnn3d import VoxNet
from model.backbones.dgcnn import DGCNN_cls
from model.backbones.pointnet import PointNetCls
from model.backbones.pointnet2 import PointNet2
class TwoStreamLSTM(nn.Module):
def __init__(self, cfg):
super(TwoStreamLSTM, self).__init__()
self.fusion_type = cfg.MODEL.FUSION_TYPE
self.input_type = cfg.MODEL.INPUT_COMBINATION
self.num_ts_per_pred = cfg.MODEL.NUM_TIMESTEPS_PER_PRED
self.local_scaler = cfg.MODEL.LOCAL_SCALER
self.global_scaler = cfg.MODEL.GLOBAL_SCALER
num_classes = cfg.MODEL.NUM_CLASSES
local_embed = 512
if cfg.MODEL.LOCAL_BACKBONE == 'pointnet':
self.local_backbone = PointNetCls(embed=local_embed)
elif cfg.MODEL.LOCAL_BACKBONE == 'dgcnn':
self.local_backbone = DGCNN_cls(cfg, final_embed=local_embed)
elif cfg.MODEL.LOCAL_BACKBONE == 'pointnet2':
self.local_backbone = PointNet2(embed=local_embed)
elif cfg.MODEL.LOCAL_BACKBONE == 'bps':
self.local_backbone = BPSDensenet(cfg, local_embed)
else:
raise ValueError
global_embed = 1024
if cfg.MODEL.GLOBAL_BACKBONE == 'pointnet':
self.global_backbone = PointNetCls(embed=global_embed)
elif cfg.MODEL.GLOBAL_BACKBONE == 'dgcnn':
self.global_backbone = DGCNN_cls(cfg, final_embed=global_embed)
elif cfg.MODEL.GLOBAL_BACKBONE == 'bps':
self.global_backbone = BPSDensenet(cfg, global_embed)
elif cfg.MODEL.GLOBAL_BACKBONE == 'voxnet':
self.global_backbone = VoxNet(embed=global_embed)
else:
raise ValueError()
if self.fusion_type in ['no_fusion', 'late_fusion', 'intermediate_fusion']:
self.local_lstm = nn.LSTM(local_embed, 256, 1, batch_first=True)
self.global_lstm = nn.LSTM(global_embed, 256, 1, batch_first=True)
if self.fusion_type == 'no_fusion':
self.local_heads = nn.ModuleList([nn.Dropout(0.5),
nn.Linear(256, num_classes)])
self.global_heads = nn.ModuleList([nn.Dropout(0.5),
nn.Linear(256, num_classes)])
elif self.fusion_type == 'late_fusion':
self.fusion_heads = nn.ModuleList([nn.Dropout(0.5),
nn.Linear(2 * 256, 128),
nn.ReLU(),
nn.BatchNorm1d(128),
nn.Dropout(0.5),
nn.Linear(128, num_classes)])
elif self.fusion_type == 'intermediate_fusion':
self.fusion_lstm = nn.LSTM(2 * 256, 256, 1, batch_first=True)
self.heads = nn.ModuleList([nn.Dropout(0.5),
nn.Linear(256, num_classes)])
else:
raise ValueError()
elif self.fusion_type == 'early_fusion':
if self.input_type == 'local_global':
input_size = local_embed + global_embed
elif self.input_type == 'local':
input_size = local_embed
elif self.input_type == 'global':
input_size = global_embed
else:
raise ValueError()
self.lstm = nn.LSTM(input_size, 256, 1, batch_first=True)
self.heads = nn.ModuleList([nn.Dropout(0.5),
nn.Linear(256, num_classes)])
else:
raise ValueError()
def forward(self, x):
x_loc, x_glob = x
if self.global_scaler > 0:
x_glob /= self.global_scaler
else:
dists = torch.sqrt(torch.sum(torch.square(x_glob), dim=3))
dists_per_T = torch.max(dists, dim=2, keepdim=True)[0]
dists_per_B = torch.max(dists_per_T, dim=1, keepdim=True)[0]
x_glob /= dists_per_B.unsqueeze(-1)
if self.local_scaler > 0:
x_loc /= self.local_scaler
else:
dists = torch.sqrt(torch.sum(torch.square(x_loc), dim=3))
dists_per_T = torch.max(dists, dim=2, keepdim=True)[0]
x_loc /= dists_per_T.unsqueeze(-1)
if self.training:
x_loc = x_loc[:, :, torch.randperm(x_loc.shape[2])[:128], :]
x_glob = x_glob[:, :, torch.randperm(x_glob.shape[2])[:128], :]
else:
x_loc = x_loc[:, :, ::x_loc.shape[2] // 128, :]
x_glob = x_glob[:, :, ::x_glob.shape[2] // 128, :]
# GLOBAL ENCODING
if 'global' in self.input_type:
B, T, N, D = x_glob.size()
x_glob = x_glob.view(-1, N, D)
x_glob = x_glob.transpose(1, 2)
x_glob = self.global_backbone(x_glob)
x_glob = x_glob.view(B, T, -1)
# LOCAL ENCODING
if 'local' in self.input_type:
B, T, N, D = x_loc.size()
x_loc = x_loc.view(-1, N, D)
x_loc = x_loc.transpose(1, 2)
x_loc = self.local_backbone(x_loc)
x_loc = x_loc.view(B, T, -1)
if self.fusion_type == 'early_fusion':
if self.input_type == 'local_global':
x = torch.cat((x_glob, x_loc), dim=2)
elif self.input_type == 'local':
x = x_loc
elif self.input_type == 'global':
x = x_glob
out, (ht, ct) = self.lstm(x)
if self.num_ts_per_pred == 1:
x = ht[-1]
elif self.num_ts_per_pred > 1:
x = out[:, -self.num_ts_per_pred:, :]
x = x.reshape(-1, x.size(2))
for layer in self.heads:
x = layer(x)
return x
else:
out_glob, (ht_glob, ct_glob) = self.global_lstm(x_glob)
out_loc, (ht_loc, ct_loc) = self.local_lstm(x_loc)
if self.fusion_type == 'intermediate_fusion':
x = torch.cat((out_glob, out_loc), dim=2)
out, (ht, ct) = self.fusion_lstm(x)
if self.num_ts_per_pred == 1:
x = ht[-1]
elif self.num_ts_per_pred > 1:
x = out[:, -self.num_ts_per_pred:, :]
x = x.reshape(-1, x.size(2))
for layer in self.heads:
x = layer(x)
return x
else:
if self.num_ts_per_pred == 1:
x_glob = ht_glob[-1]
x_loc = ht_loc[-1]
elif self.num_ts_per_pred > 1:
x_glob = out_glob[:, -self.num_ts_per_pred:, :]
x_glob = x_glob.reshape(-1, x_glob.size(2))
x_loc = out_loc[:, -self.num_ts_per_pred:, :]
x_loc = x_loc.reshape(-1, x_loc.size(2))
if self.fusion_type == 'late_fusion':
x = torch.cat((x_loc, x_glob), dim=1)
for layer in self.fusion_heads:
x = layer(x)
return x
elif self.fusion_type == 'no_fusion':
for layer in self.global_heads:
x_glob = layer(x_glob)
for layer in self.local_heads:
x_loc = layer(x_loc)
x = (F.log_softmax(x_glob, dim=1) + F.log_softmax(x_loc, dim=1)) / 2
return x
| 1.914063
| 2
|
Level/level_one.py
|
indiVar0508/Banania
| 0
|
12780310
|
import pygame
import numpy as np
from collections import OrderedDict
from Utility.shape import Rectangle
from Utility import ui
from Level.generic_level import GenericLevel
class Level(GenericLevel):
def __init__(self, player, **kwargs):
super().__init__(**kwargs)
self.player = player
self.hurdle_cords = [
(130, 30, 30, self.gameDimension[1] - 60),
(200, 0, 30, self.gameDimension[1] // 2 - 20),
(200, self.gameDimension[1] // 2 + 20, 30, self.gameDimension[1] // 2 - 15)
]
self.hurdle_cords.append((160, self.gameDimension[1] // 2 + 20, 40, 40))
for i in range(1, 3):
self.hurdle_cords.append((130 + i * 180, 30, 30, self.gameDimension[1] - 60))
self.hurdle_cords.append((130 + i * 180 + 70, 0, 30, self.gameDimension[1] // 2 - 20))
self.hurdle_cords.append((130 + i * 180 + 70, self.gameDimension[1] // 2 + 20,
30, self.gameDimension[1] // 2 - 15))
# todo: not hard code it duh.
self.hurdle_cords.append((340, 140, 40, 40))
self.hurdle_cords.append((520, 220, 40, 40))
self.hurdle = [Rectangle(x, y, l, w, (190, 220, 220)) for x, y, w, l in self.hurdle_cords]
self.food_exists = True
self.food_cords = [Rectangle(x=640, y=190, length=30, width=30, color=None)]
self.food = pygame.transform.scale(pygame.image.load(r"Resources/Food/banana.png"), (self.food_cords[0].width,
self.food_cords[0].length))
def draw_hurdle(self):
for hurdle in self.hurdle:
pygame.draw.rect(self.gameDisplay, hurdle.color, (hurdle.x, hurdle.y, hurdle.width, hurdle.length))
size = hurdle.length // hurdle.width
for y in range(size):
pygame.draw.line(self.gameDisplay, self.grid_lines, (hurdle.x, hurdle.y + y*hurdle.width),
(hurdle.x + hurdle.width, hurdle.y + y*hurdle.width))
pygame.draw.circle(self.gameDisplay, (220, 50, 50), (hurdle.x + hurdle.width // 2, hurdle.y + y*hurdle.width + hurdle.width // 2), 3)
pygame.draw.circle(self.gameDisplay, (220, 239, 0), (hurdle.x + + hurdle.width // 2, hurdle.y + y * hurdle.width + + hurdle.width // 2),
1)
def show_player(self, draw=True):
if draw:
pygame.draw.rect(self.gameDisplay, self.player.color,
(self.player.x, self.player.y, self.player.length, self.player.length))
return
blit_img = self.player.characterDefault
if not (self.player.left or self.player.right or self.player.up or self.player.down):
blit_img = self.player.characterDefault
self.player.r_img = self.player.u_img = self.player.d_img = self.player.l_img = 0
elif self.player.left:
blit_img = self.player.movements['Left'][self.player.l_img]
self.player.l_img = (self.player.l_img + 1) % 4
self.player.r_img = self.player.u_img = self.player.d_img = 0
elif self.player.right:
blit_img = self.player.movements['Right'][self.player.r_img]
self.player.r_img = (self.player.r_img + 1) % 4
self.player.l_img = self.player.u_img = self.player.d_img = 0
elif self.player.up:
blit_img = self.player.movements['Up'][self.player.u_img]
self.player.u_img = (self.player.u_img + 1) % 4
self.player.r_img = self.player.l_img = self.player.d_img = 0
elif self.player.down:
blit_img = self.player.movements['Down'][self.player.d_img]
self.player.d_img = (self.player.d_img + 1) % 4
self.player.r_img = self.player.u_img = self.player.l_img = 0
self.gameDisplay.blit(blit_img, (self.player.x, self.player.y))
def draw_food(self):
if self.food_exists:
self.gameDisplay.blit(self.food, (self.food_cords[0].x, self.food_cords[0].y))
def show(self, *args):
self.gameDisplay.fill(self.background)
self.draw_grids(*args)
self.draw_hurdle()
self.draw_food()
self.show_player(draw=self.player.draw)
def pause_game(self, *args):
resume = False
while not resume:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
resume = True
self.gameDisplay.fill((255, 255, 255))
ui.message(gameDisplay=self.gameDisplay,msg="Press, S to Start", x=self.gameDimension[0] // 2 - 50, y=self.gameDimension[1] // 2)
pygame.display.update()
self.clock.tick(30)
def dynamics(self, *args):
self.player.move()
def wall_logic(self):
if self.player.x < 0:
self.player.x = 0
self.player.left = False
elif self.player.x + self.player.width > self.gameDimension[0]:
self.player.x = self.gameDimension[0] - self.player.width
self.player.right = False
if self.player.y < 0:
self.player.y = 0
self.player.up = False
elif self.player.y + self.player.length > self.gameDimension[1]:
self.player.y = self.gameDimension[1] - self.player.length
self.player.down = False
def hurdle_contact(self, blocks):
for hurdle in blocks:
if hurdle.x > self.player.x + self.player.width:
continue
if ((hurdle.x < self.player.x + self.player.width < hurdle.x + hurdle.width)
or (hurdle.x < self.player.x < hurdle.x + hurdle.width)
or (hurdle.x < self.player.x + self.player.width // 2 < hurdle.x + hurdle.width)) \
and \
((hurdle.y < self.player.y + self.player.length < hurdle.y + hurdle.length)
or (hurdle.y < self.player.y < hurdle.y + hurdle.length)
or (hurdle.y < self.player.y + self.player.length // 2 < hurdle.y + hurdle.length)):
return hurdle
return None
def hurdle_logic(self):
cord = self.hurdle_contact(self.hurdle)
if cord is None:
return
if self.player.right:
self.player.x = cord.x - self.player.width
self.player.right = False
elif self.player.left:
self.player.x = cord.x + cord.width
self.player.left = False
if self.player.down:
self.player.y = cord.y - self.player.length
self.player.down = False
elif self.player.up:
self.player.y = cord.y + cord.length
self.player.up = False
def food_logic(self):
if self.hurdle_contact(self.food_cords):
self.player.gotFood = True
self.food_exists = False
self.player.characterDefault = self.player.winDefault
self.player.left = self.player.right = self.player.up = self.player.down = False
def collision(self, *args):
self.wall_logic()
self.hurdle_logic()
self.food_logic()
def have_won(self, *args):
self.show(*args)
ui.message(gameDisplay=self.gameDisplay,msg="Yeah.!", x=self.gameDimension[0] // 2 - 50, y=self.gameDimension[1] // 2 - 50,
color=(100, 200, 100), font_size=50)
pygame.display.flip()
def have_died(self, *args):
pass
def start_game(self, *args):
# self.pause_game()
while self.player.alive and not self.player.gotFood:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.player.left = True
self.player.right = self.player.up = self.player.down = False
if event.key == pygame.K_RIGHT:
self.player.right = True
self.player.left = self.player.up = self.player.down = False
if event.key == pygame.K_UP:
self.player.up = True
self.player.right = self.player.left = self.player.down = False
if event.key == pygame.K_DOWN:
self.player.down = True
self.player.right = self.player.up = self.player.left = False
if event.key == pygame.K_p:
self.pause_game()
self.show()
self.dynamics()
self.collision()
pygame.display.flip()
if self.screen_capt:
self.read_screen(stream=self.stream, gray=self.gray, maxi=self.maxi, store=self.store,
player=self.player, vision_limit=50)
self.clock.tick(30)
if self.player.alive and self.player.gotFood:
self.have_won()
pygame.time.wait(2000)
class Level_PathFinding(Level):
def __init__(self, player, **kwargs):
super().__init__(player, **kwargs)
self.wall = np.zeros((self.gameDimension[1] // 10, self.gameDimension[0] // 10))
for hurdle in self.hurdle:
for x in range(hurdle.x // 10, hurdle.x // 10 + hurdle.width // 10):
for y in range(hurdle.y // 10, hurdle.y // 10 + hurdle.length // 10):
self.wall[y, x] = 1
self.wall[y - 1, x] = 1
self.wall[y - 2, x] = 1
# self.wall[y - 3, x] = 1
self.wall[y, x - 1] = 1
self.wall[y, x - 2] = 1
self.wall[y, x - 3] = 1
self.wall[y - 1, x - 1] = 1
self.wall[y - 2, x - 2] = 1
# self.wall[y - 3, x - 3] = 1
self.f_score = np.full(self.wall.shape, np.inf)
self.g_score = np.zeros(self.wall.shape)
self.not_visited = list()
self.visited = list()
self.neighbour = OrderedDict()
self.came_from = OrderedDict()
self.cur_idx =None
for i in range(self.wall.shape[0]):
for j in range(self.wall.shape[1]):
self.neighbour[(i, j)] = self.get_neighbours(i, j)
self.start_pos = (self.player.y // 10, self.player.x // 10)
self.end_pos = (self.food_cords[0].y // 10, self.food_cords[0].x // 10)
def get_neighbours(self, i, j):
possible_neighbours = []
if i > 0:
possible_neighbours.append((i-1, j))
if i < self.wall.shape[0] - 1:
possible_neighbours.append((i + 1, j))
if j > 0:
possible_neighbours.append((i, j - 1))
if j < self.wall.shape[1] - 1:
possible_neighbours.append((i, j + 1))
# if i > 0 and j > 0:
# possible_neighbours.append((i-1, j-1))
# if i < self.wall.shape[0] - 1 and j < self.wall.shape[1] - 1:
# possible_neighbours.append((i + 1, j+1))
# if j > 0 and i < self.wall.shape[0] - 1:
# possible_neighbours.append((i+1, j - 1))
# if j < self.wall.shape[1] - 1 and i > 0:
# possible_neighbours.append((i-1, j + 1))
return possible_neighbours
def find_path_a_star(self):
self.not_visited += [self.start_pos]
while len(self.not_visited) > 0:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
cur_idx = self.not_visited[0]
for i, j in self.not_visited:
if self.f_score[(i, j)] < self.f_score[cur_idx]:
cur_idx = (i, j)
if cur_idx == self.end_pos:
# pygame.time.wait(3000)
self.cur_idx = cur_idx
return
self.not_visited.remove(cur_idx)
self.visited.append(cur_idx)
for neighbour in self.neighbour[cur_idx]:
if neighbour not in self.visited and self.wall[neighbour] == 0:
estimated_g_score = self.g_score[neighbour] + 10
if neighbour not in self.not_visited:
self.not_visited.append(neighbour)
elif self.g_score[neighbour] < estimated_g_score:
continue
self.g_score[neighbour] = estimated_g_score
# self.f_score[neighbour] = estimated_g_score + (abs(self.end_pos[0] - neighbour[0])*10 +
# abs(self.end_pos[1] - neighbour[1])*10)
self.f_score[neighbour] = estimated_g_score + np.sqrt((self.end_pos[0]*10 - neighbour[0]*10)**2 +
(self.end_pos[1]*10 - neighbour[1]*10)**2)
self.came_from[neighbour] = cur_idx
self.show(cur_idx)
pygame.display.update()
self.clock.tick(30)
print("No Path")
def draw_grids(self, current):
for point in self.not_visited:
pygame.draw.rect(self.gameDisplay, (200, 200, 200), (point[1] * 10, point[0] * 10, 10, 10))
for point in self.visited:
pygame.draw.rect(self.gameDisplay, (120, 120, 120), (point[1] * 10, point[0] * 10, 10, 10))
to_draw = list()
to_draw.append(current)
while current in self.came_from.keys():
current = self.came_from[current]
to_draw.append(current)
for point in to_draw:
pygame.draw.rect(self.gameDisplay, (0, 0, 250), (point[1] * 10, point[0] * 10, 10, 10))
for x in range(0, self.gameDimension[0], 10):
pygame.draw.line(self.gameDisplay, self.grid_lines, (x, 0), (x, self.gameDimension[1]))
for y in range(0, self.gameDimension[1], 10):
pygame.draw.line(self.gameDisplay, self.grid_lines, (0, y), (self.gameDimension[0], y))
# def draw_grids_path(self, current):
# for point in self.not_visited:
# pygame.draw.rect(self.gameDisplay, (0, 200, 0), (point[0] * 10, point[1] * 10, 10, 10))
# for point in self.visited:
# pygame.draw.rect(self.gameDisplay, (255, 0, 0), (point[0] * 10, point[1] * 10, 10, 10))
#
# to_draw = list()
# to_draw.append(current)
# while current in self.came_from.keys():
# current = self.came_from[current]
# to_draw.append(current)
#
# for point in to_draw:
# pygame.draw.rect(self.gameDisplay, (0, 0, 250), (point[0] * 10, point[1] * 10, 10, 10))
def start_game(self):
# self.pause_game()
self.find_path_a_star()
current = self.cur_idx
prev = current
# 0 - l, 1 - r, 2 - u, 3 - d
moves = []
c = 0
while current in self.came_from.keys():
c += 1
current = self.came_from[current]
if current[0] > prev[0] and current[1] == prev[1]:
moves.insert(0, 2)
if current[0] < prev[0] and current[1] == prev[1]:
moves.insert(0, 3)
if current[1] < prev[1] and current[0] == prev[0]:
moves.insert(0, 1)
if current[1] > prev[1] and current[0] == prev[0]:
moves.insert(0, 0)
prev = current
move_idx = 0
while self.player.alive and not self.player.gotFood and move_idx < len(moves):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
self.pause_game()
if moves[move_idx] == 0:
self.player.left = True
self.player.right = self.player.up = self.player.down = False
if moves[move_idx] == 1:
self.player.right = True
self.player.left = self.player.up = self.player.down = False
if moves[move_idx] == 2:
self.player.up = True
self.player.right = self.player.left = self.player.down = False
if moves[move_idx] == 3:
self.player.down = True
self.player.right = self.player.up = self.player.left = False
self.show(self.cur_idx)
pygame.draw.rect(self.gameDisplay, (200, 250,190), (self.player.x,self.player.y, 10, 10))
self.dynamics()
self.collision()
pygame.display.flip()
if self.screen_capt:
self.read_screen(stream=self.stream, gray=self.gray, maxi=self.maxi, store=self.store,
player=self.player, vision_limit=50)
self.clock.tick(30)
move_idx += 1
if self.player.alive and self.player.gotFood:
self.have_won(self.cur_idx)
pygame.time.wait(5_000)
if __name__ == "__main__":
lvl = Level(None, (600, 350))
lvl.start_game()
| 2.859375
| 3
|
tests/test_packaging.py
|
jayvdb/ubiquerg
| 0
|
12780311
|
<reponame>jayvdb/ubiquerg<filename>tests/test_packaging.py<gh_stars>0
""" Validate what's available directly on the top-level import. """
import pytest
from inspect import isclass, isfunction
__author__ = "<NAME>"
__email__ = "<EMAIL>"
@pytest.mark.parametrize(
["obj_name", "typecheck"],
[("build_cli_extra", isfunction), ("checksum", isfunction), ("size", isfunction),
("expandpath", isfunction), ("is_collection_like", isfunction),
("is_command_callable", isfunction), ("is_url", isfunction),
("powerset", isfunction), ("query_yes_no", isfunction),
("TmpEnv", isclass)])
def test_top_level_exports(obj_name, typecheck):
""" At package level, validate object availability and type. """
import ubiquerg
try:
obj = getattr(ubiquerg, obj_name)
except AttributeError:
pytest.fail("Unavailable on {}: {}".format(ubiquerg.__name__, obj_name))
else:
assert typecheck(obj)
| 2.21875
| 2
|
src/petronia/core/platform/api/font/__init__.py
|
groboclown/petronia
| 19
|
12780312
|
"""
State definitions for supported fonts, font families, and other descriptions.
"""
from .defs import (
FontDefinition,
)
| 0.972656
| 1
|
831. Masking Personal Information.py
|
ttang235/leetcode
| 0
|
12780313
|
<filename>831. Masking Personal Information.py
# https://leetcode.com/contest/weekly-contest-83/problems/masking-personal-information/
class Solution(object):
def maskPII(self, S):
"""
:type S: str
:rtype: str
"""
if '@' in S:
arr = S.split('@')
return arr[0][0].lower() + '*****' + arr[0][-1].lower() + '@' + arr[1].lower()
d = [x for x in S if x.isdigit()]
local = '***-***-' + ''.join(d[-4:])
if len(d) > 10:
return '+' + '*' * (len(d) - 10) + '-' + local
else:
return local
| 3.296875
| 3
|
wxcloudrun/common/pdfutils.py
|
vandyzhou/wxcloudrun-django
| 0
|
12780314
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2022/1/29 10:35 上午
# @Author: zhoumengjie
# @File : pdfutils.py
import base64
import logging
import math
import os
import time
import pdfplumber
from pyecharts.components import Table
from pyecharts.options import ComponentTitleOpts
from selenium import webdriver
from wxcloudrun.bond.BondUtils import Crawler
from wxcloudrun.bond.PageTemplate import PROJECT_DIR
from wxcloudrun.common import fingerprinter as fp
from wxcloudrun.common import tabledrawer
log = logging.getLogger('log')
crawler = Crawler()
def extract_draw_table(path):
tables = []
with pdfplumber.open(path) as pdf:
pages = pdf.pages
for page in pages:
for table in page.extract_tables():
tables.append(table)
return tables
def get_draw_pdf_table(url_path, bond_name, choose_table_idx:int=None, add_finger_print=False):
file_name = bond_name + '_anno' + '.pdf'
crawler.query_anno_pdf(file_name, url_path)
img_file = bond_name + '_draw' + '.png'
return draw_table(file_name, img_file, bond_name, choose_table_idx, add_finger_print)
def draw_table(pdf_path, img_file, bond_name, choose_table_idx:int=None, add_finger_print=False):
table_data = extract_draw_table(pdf_path)
if table_data is None or len(table_data) == 0:
log.info('未识别到pdf的中签表格')
return False, None
rows = []
if choose_table_idx is not None:
headers = table_data[choose_table_idx][0]
rows = table_data[choose_table_idx][1:]
if len(table_data) == 1:
headers = table_data[0][0]
rows = table_data[0][1:]
if len(table_data) == 2:
headers = table_data[0][0]
# 表头相同
if headers == table_data[1][0]:
rows = table_data[0][1:] + table_data[1][1:]
else:
rows = table_data[0][1:] + table_data[1][0:]
if len(rows) == 0:
return False, None
# 过滤空行
rows = filter(lambda row : is_valid_row(row), rows)
# tabledrawer.draw_table(headers, rows)
pic_base64 = draw_table_with_rows('配售结果', img_file, headers, rows, add_finger_print)
# 删除文件
os.remove(pdf_path)
return True, pic_base64
def draw_table_with_rows(title:str, img_file:str, headers:[], rows:[], add_finger_print=False):
table = Table()
table.add(headers, rows)
table.set_global_opts(title_opts=ComponentTitleOpts(title=title))
render_file_name = title + "_table-screenshot.html"
table.render(render_file_name)
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--headless')
driver = webdriver.Chrome(chrome_options=options)
driver.get("file://" + PROJECT_DIR + '/' + render_file_name)
time.sleep(1)
ele = driver.find_element_by_xpath("//tbody")
ele.screenshot(img_file)
# 添加水印
if add_finger_print:
fp.add_finger_print(img_file)
with open(img_file, 'rb') as f:
pic_base64 = base64.b64encode(f.read())
# 删除文件
os.remove(img_file)
os.remove(render_file_name)
return pic_base64
def is_valid_row(row:[]):
if len(row) == 0:
return False
count = len(row)
for cell in row:
if cell is None or cell=='':
count -= 1
return count != 0
if __name__ == '__main__':
# get_draw_pdf_table('/finalpage/2022-01-26/1212274930.PDF', '豪美转债')
# draw_table(PROJECT_DIR + '/中特转债_anno.pdf', '2.png', '中特转债')
# table = extract_draw_table(PROJECT_DIR + '/中特转债_anno.pdf')
# print(table)
print(math.ceil(1 / float(0.14)))
| 2.34375
| 2
|
benchmarks.py
|
jonahbaron/SELinuxBenchmarks
| 0
|
12780315
|
<reponame>jonahbaron/SELinuxBenchmarks<gh_stars>0
#!/usr/bin/python
from __future__ import division
import datetime
import os
def copytime(filesrc,filedst):
t1 = datetime.datetime.now()
os.system("cp " + filesrc + " copyfile.txt")
t2 = datetime.datetime.now()
t3 = t2 - t1
# print "Evaluating " + filesrc
# print "Pre-copy time: ", t1
# print "Post-copy time: ", t2
# print "Difference: ", t3
# print ""
os.system("rm copyfile.txt")
return t3
def copyfilesBM():
print "Copy file benchmark"
filepath = os.getcwd()
os.system("dd if=/dev/urandom of=4kbFile.txt bs=4KB count=1 > /dev/null 2>&1")
f1copy = copytime("4kbFile.txt",filepath)
os.system("dd if=/dev/urandom of=1kbFile.txt bs=1KB count=1 > /dev/null 2>&1")
f2copy = copytime("1kbFile.txt",filepath)
os.system("dd if=/dev/urandom of=256bFile.txt bs=256 count=1 > /dev/null 2>&1")
f3copy = copytime("256bFile.txt",filepath)
os.system("rm 4kbFile.txt")
os.system("rm 1kbFile.txt")
os.system("rm 256bFile.txt")
copybenchmarks = [f1copy,f2copy,f3copy]
return copybenchmarks
def pipesBM():
print "Pipes benchmark"
os.system("dd if=/dev/urandom of=512bFile.txt bs=512 count=1 > /dev/null 2>&1")
pipecmd1 = "cat 512bFile.txt > test.txt"
pipecmd2 = "cat test.txt > test2.txt"
t1 = datetime.datetime.now()
pipe1 = os.system(pipecmd1)
t2 = datetime.datetime.now()
pipe2 = os.system(pipecmd2)
t3 = datetime.datetime.now()
# print t1
# print pipecmd1
# print t2
# print pipecmd2
# print t3
# print ""
t4 = t2 - t1
t5 = t3 - t1
# print "Difference between t2 and t1: ", t4
# print "Difference between t3 and t1: ", t5
# print ""
os.system("rm 512bFile.txt")
os.system("rm test.txt")
os.system("rm test2.txt")
return t5
def readfile(count):
f = open("pipeswitching.txt", "r")
value = int(f.read())
f.close()
intvalue = int(value) + 1
if count < 10:
writefile(intvalue,count)
def writefile(value,count):
os.system("echo " + str(value) + " > pipeswitching.txt")
count += 1
readfile(count)
def pipeswitchingBM():
print "Context switching benchmark"
os.system("echo 0 > pipeswitching.txt")
t1 = datetime.datetime.now()
readfile(0)
t2 = datetime.datetime.now()
t3 = t2 - t1
# print "Pre-processes: ", t1
# print "Post-processes: ", t2
# print "Difference: ", t3
# print ""
os.system("rm pipeswitching.txt")
return t3
def processBM():
print "Process creation benchmark"
t1 = datetime.datetime.now()
simpleprocess = os.fork()
if simpleprocess == 0:
os._exit(0)
os.waitpid(simpleprocess, 0)
t2 = datetime.datetime.now()
t3 = t2 - t1
# print "Pre-child time: ", t1
# print "Post-child time: ", t2
# print "Difference: ", t3
# print ""
return t3
def execlBM():
print "Execl commands benchmark"
t1 = datetime.datetime.now()
execlprocess = os.fork()
if execlprocess == 0:
os.execl("/usr/bin/python", "python", "-V")
os.waitpid(execlprocess, 0)
t2 = datetime.datetime.now()
t3 = t2 - t1
# print "Pre-execl time: ", t1
# print "Post-execl time: ", t2
# print "Difference: ", t3
# print ""
return t3
def perlscript(count):
if count % 2 == 0:
os.system("perl -pe '$_= lc($_)' file.txt > file.txt")
elif count % 2 == 1:
os.system("perl -pe '$_= uc($_)' file.txt > file.txt")
else:
print "Error"
def scriptsBM():
print "Concurrent processes benchmark"
os.system("dd if=/dev/urandom of=file.txt bs=10KB count=1 > /dev/null 2>&1")
children = []
t1 = datetime.datetime.now()
for process in range(8):
pid = os.fork()
if pid:
children.append(pid)
else:
perlscript(process)
os._exit(0)
for i, child in enumerate(children):
os.waitpid(child, 0)
t2 = datetime.datetime.now()
t3 = t2 - t1
# print "Pre-processes: ", t1
# print "Post-processes: ", t2
# print "Difference: ", t3
# print ""
os.system("rm file.txt")
return t3
def main():
print "Unix benchmarks"
print ""
benchmarks1 = []
benchmarks2 = []
basemarks = []
semarks = []
for count in range(100):
print count
for count in range(2):
if count == 0:
print "SELinux disabled"
os.system("setenforce 0")
elif count == 1:
print "SELinux enabled"
os.system("setenforce 1")
copyfiles = copyfilesBM()
pipes = pipesBM()
pipeswitching = pipeswitchingBM()
process = processBM()
execl = execlBM()
scripts = scriptsBM()
f1copy = copyfiles[0]
f2copy = copyfiles[1]
f3copy = copyfiles[2]
if count == 0:
benchmarks1 = [f1copy.microseconds, f2copy.microseconds, f3copy.microseconds, pipes.microseconds, pipeswitching.microseconds, process.microseconds, execl.microseconds, scripts.microseconds]
elif count == 1:
benchmarks2 = [f1copy.microseconds, f2copy.microseconds, f3copy.microseconds, pipes.microseconds, pipeswitching.microseconds, process.microseconds, execl.microseconds, scripts.microseconds]
basemarks.append(benchmarks1)
semarks.append(benchmarks2)
print ""
benchmarks1 = [sum(time)/len(time) for time in zip(*basemarks)]
benchmarks2 = [sum(time)/len(time) for time in zip(*semarks)]
print "Raw BaseOS Benchmarks (microseconds)"
for value in basemarks:
print value
print ""
print "Raw SELinux Benchmarks (microseconds)"
for value in semarks:
print value
print ""
names = ["File copy 4KB", "File copy 1KB", "File copy 256B", "Pipe", "Pipe switching", "Process creation", "Execl", "Shell scripts(8)"]
print "BaseOS Benchmark Averages (microseconds)"
for count in range(8):
print names[count], " - ", benchmarks1[count]
print ""
print "SELinux Benchmark Averages (microseconds)"
for count in range(8):
print names[count], " - ", benchmarks2[count]
print ""
calcs = []
for count in range(8):
value = ((benchmarks2[count] - benchmarks1[count]) / benchmarks1[count]) * 100
calcs.append(value)
print "Overhead (% change)"
for count in range(8):
print names[count], " - ", calcs[count]
print ""
os.system("setenforce 1")
print "Unix benchmarks complete"
if __name__ == "__main__":
main()
| 2.296875
| 2
|
enaml/qt/qt_time_selector.py
|
mmckerns/enaml
| 11
|
12780316
|
#------------------------------------------------------------------------------
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from .qt.QtGui import QTimeEdit
from .qt_bounded_time import QtBoundedTime
class QtTimeSelector(QtBoundedTime):
""" A Qt implementation of an Enaml TimeSelector.
"""
#--------------------------------------------------------------------------
# Setup Methods
#--------------------------------------------------------------------------
def create_widget(self, parent, tree):
""" Create the underlying QTimeEdit widget.
"""
return QTimeEdit(parent)
def create(self, tree):
""" Create and initialize the underlying widget.
"""
super(QtTimeSelector, self).create(tree)
self.set_time_format(tree['time_format'])
self.widget().timeChanged.connect(self.on_time_changed)
#--------------------------------------------------------------------------
# Message Handling
#--------------------------------------------------------------------------
def on_action_set_time_format(self, content):
""" Handle the 'set_time_format' action from the Enaml widget.
"""
self.set_time_format(content['time_format'])
#--------------------------------------------------------------------------
# Widget Update Methods
#--------------------------------------------------------------------------
def get_time(self):
""" Return the current time in the control.
Returns
-------
result : QTime
The current control time as a QTime object.
"""
return self.widget().time()
def set_time(self, time):
""" Set the widget's current time.
Parameters
----------
time : QTime
The QTime object to use for setting the time.
"""
with self.loopback_guard('time'):
self.widget().setTime(time)
def set_max_time(self, time):
""" Set the widget's maximum time.
Parameters
----------
time : QTime
The QTime object to use for setting the maximum time.
"""
self.widget().setMaximumTime(time)
def set_min_time(self, time):
""" Set the widget's minimum time.
Parameters
----------
time : QTime
The QTime object to use for setting the minimum time.
"""
self.widget().setMinimumTime(time)
def set_time_format(self, time_format):
""" Set the widget's time format.
Parameters
----------
time_format : str
A Python time formatting string.
"""
# XXX make sure Python's and Qt's format strings are the
# same, or convert between the two.
self.widget().setDisplayFormat(time_format)
| 2.125
| 2
|
maddpg.py
|
170928/-Review-Multi-Agent-Actor-Critic-for-Mixed-Cooperative-Competitive-Environment
| 7
|
12780317
|
<reponame>170928/-Review-Multi-Agent-Actor-Critic-for-Mixed-Cooperative-Competitive-Environment<gh_stars>1-10
import numpy as np
import tensorflow as tf
import random
import tensorflow.layers as layer
from collections import deque
import random
import datetime
import time
from multiagent.environment import MultiAgentEnv
from multiagent.policy import InteractivePolicy
import multiagent.scenarios as scenarios
########################################
action_size = 5
load_model = False
train_mode = True
batch_size = 256
mem_maxlen = 50000
discount_factor = 0.99
learning_rate = 0.00025
run_episode = 10000
start_train_episode = 500
target_update_step = 5000
print_interval = 100
save_interval = 1000
epsilon_min = 0.1
softlambda = 0.9
date_time = str(datetime.date.today()) + '_' + \
str(datetime.datetime.now().hour) + '_' + \
str(datetime.datetime.now().minute) + '_' + \
str(datetime.datetime.now().second)
env_name = "simple_adverary.py"
save_path = "./saved_models/"+date_time+"_maddpg"
load_path = ""
numGoals = 3
###########################################
class Critic(object):
def __init__(self, state_size, action_size, input, action_input, other_action, model_name="Qmodel", agent_num=3, reuse=False):
self.state_size = state_size
self.action_size = action_size
self.agent_num = agent_num
# =================================
self.input = input
self.action_input = action_input
self.other_actions = other_action
# =================================
with tf.variable_scope(name_or_scope=model_name, reuse=reuse):
self.mlp1 = layer.dense(inputs=self.input, units=256, activation = tf.nn.leaky_relu)
self.concat_action = tf.concat([self.action_input, self.other_actions], axis=1)
self.concat = tf.concat([self.mlp1, self.concat_action], axis=1)
self.mlp2 = layer.dense(inputs=self.concat, units=256, activation = tf.nn.leaky_relu)
self.mlp3 = layer.dense(inputs=self.mlp2, units=512, activation = tf.nn.leaky_relu)
self.mlp4 = layer.dense(inputs=self.mlp3, units=512, activation = tf.nn.leaky_relu)
self.Q_Out = layer.dense(self.mlp4, units=1, activation=None)
self.q_predict = self.Q_Out
self.critic_optimizer = tf.train.AdamOptimizer(learning_rate)
class Actor(object):
def __init__(self, state_size, action_size, input, model_name="Pimodel"):
self.agent_num = 3
self.state_size = state_size
self.action_size = action_size
# =================================
self.input = input
# =================================
with tf.variable_scope(name_or_scope=model_name):
self.mlp1 = layer.dense(inputs=self.input, units=512, activation = tf.nn.leaky_relu)
self.mlp2 = layer.dense(inputs=self.mlp1, units=512, activation = tf.nn.leaky_relu)
self.mlp3 = layer.dense(inputs=self.mlp2, units=512, activation = tf.nn.leaky_relu)
self.mlp4 = layer.dense(inputs=self.mlp3, units=512, activation = tf.nn.leaky_relu)
self.Pi_Out = layer.dense(self.mlp4, units=self.action_size, activation=tf.nn.tanh)
self.pi_predict = self.Pi_Out
self.actor_optimizer = tf.train.AdamOptimizer(learning_rate)
class MADDPGAgent(object):
def __init__(self, agent_num, state_size, action_size, idx):
# (1) "actor" : agent in reinforcement learning
# (2) "critic" : helps the actor decide what actions to reinforce during training.
# Traditionally, the critic tries to predict the value (i.e. the reward we expect to get in the future) of an action in a particular state s(t)
# predicted value from critic is used to update the actor policy
# Using critic value as an baseline for update s is more stable than directly using the reward, which can vary considerably
# variation of reward makes the update pertuative
# In maddpg, we enhance our critics so they can access the observations and actions of all the agents,
# Default Environment Information =====
self.state_size = state_size
self.action_size = action_size
self.agent_num = agent_num
# =====================================
# Experience Buffer ===================
self.memory = deque(maxlen=mem_maxlen)
self.batch_size = batch_size
# =====================================
# Placeholer =============================================================================
self.input = tf.placeholder(shape=[None, self.state_size], dtype=tf.float32)
self.action_input = tf.placeholder(shape=[None, self.action_size], dtype=tf.float32)
self.other_actions = tf.placeholder(shape=[None, self.action_size * (self.agent_num-1)], dtype=tf.float32)
self.target_Q = tf.placeholder(shape=[None,1],dtype=tf.float32)
self.reward = tf.placeholder(shape=[None,1], dtype=tf.float32)
# ========================================================================================
self.actor = Actor(self.state_size, self.action_size, self.input, "Pimodel_" + idx)
self.critic = Critic(self.state_size, self.action_size, self.input, self.action_input, self.other_actions, "Qmodel_" + idx, self.agent_num, reuse=False)
'''
critic_value = Critic(self.state_size, self.action_size, self.input, self.actor.pi_predict, self.other_actions, "Qmodel_" + idx, self.agent_num, reuse=True).q_predict
self.action_gradients = tf.gradients(critic_predict.q_predict, self.actor.pi_predict)[0]
self.actor_gradients = tf.gradients(self.actor.pi_predict, actor_var, -self.action_gradients)
self.grads_and_vars = list(zip(self.actor_gradients, actor_var))
self.actor_train = self.actor.actor_optimizer.apply_gradients(self.grads_and_vars)
'''
actor_var = [i for i in tf.trainable_variables() if ("Pimodel_" + idx) in i.name]
action_Grad = tf.gradients(self.critic.q_predict, self.action_input)
self.policy_Grads = tf.gradients(ys=self.actor.pi_predict, xs=actor_var, grad_ys=action_Grad)
for idx, grads in enumerate(self.policy_Grads):
self.policy_Grads[idx] = -grads / batch_size
self.actor_train = self.actor.actor_optimizer.apply_gradients(zip(self.policy_Grads, actor_var))
self.critic_loss = tf.reduce_mean(tf.square(self.target_Q - self.critic.q_predict))
self.critic_train = self.critic.critic_optimizer.minimize(self.critic_loss)
def train_actor(self, state, action, other_action, sess):
sess.run(self.actor_train,
{self.input: state, self.action_input : action, self.other_actions: other_action})
def train_critic(self, state, action, other_action, target, sess):
sess.run(self.critic_train,
{self.input: state, self.action_input: action, self.other_actions: other_action, self.target_Q: target})
def action(self, state, sess):
return sess.run(self.actor.pi_predict, {self.input: state})
def Q(self, state, action, other_action, sess):
return sess.run(self.critic.q_predict,
{self.input: state, self.action_input: action, self.other_actions: other_action})
| 2.203125
| 2
|
cerebra/check_coverage.py
|
rvanheusden/cerebra
| 1
|
12780318
|
<gh_stars>1-10
""" this tool compares VCF records to gVCF for a given cell, to determine coverage
to a given loci. keep in mind the loci should be SMALL, ie. individual SNPs /
small indels. it is not intended for whole exon or whole transcript queries """
import pandas as pd
import numpy as np
from . import VCF
import sys
import multiprocessing as mp
import os
import click
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning) # fuck this message
def get_filenames():
""" get file names given path """
files = []
for file in os.listdir(cwd + "scVCF_filtered_all/"):
if file.endswith(".vcf"):
fullPath = cwd + 'scVCF_filtered_all/' + file
files.append(fullPath)
return files
def build_outfile_line(outCode, depth, cell_name):
""" generic func for creating a single line pd df with cellName,
coverage (bool) and depth """
colNames = ['cellName', 'coverage_bool', 'depth']
if outCode == 1: # no records found
toAddRow = pd.DataFrame([[cell_name, 0, 0]], columns=colNames)
elif outCode == 2: # single record found
toAddRow = pd.DataFrame([[cell_name, 1, depth]], columns=colNames)
else: # multiple records found
toAddRow = pd.DataFrame([[cell_name, 1, depth]], columns=colNames)
return(toAddRow)
def get_depth(df, cellName_):
""" given a dataframe containing multiple records, reports
depth for every record within that df """
outCode_ = 0 # were gonna send this to buildOutputFile()
if len(df.index) == 0: # no records found
outCode_ = 1
toAddRow_ = build_outfile_line(outCode_, 0, cellName_)
elif len(df.index) == 1: # single record found
outCode_ = 2
infoStr = df['INFO']
infoStr = str(infoStr)
DP = infoStr.split('DP')[1].split(';')[0].strip('=')
toAddRow_ = build_outfile_line(outCode_, DP, cellName_)
else: # multiple records found
outCode_ = 3
infoDF = df['INFO']
DP_vec = []
for i in range(0, len(infoDF.index)-1):
line = infoDF.iloc[i]
line = str(line)
try:
DP = line.split('DP')[1].split(';')[0].strip('=')
DP_vec.append(DP)
except IndexError:
continue
toAddRow_ = build_outfile_line(outCode_, DP_vec, cellName_)
return(toAddRow_)
def get_GOI_record(record, *args):
""" defines a list of records corresponding to the GOI """
chrom = 'chr' + str(args[0])
start = int(args[1])
end = int(args[2])
if record['CHROM'] == chrom:
if end >= record['POS'] >= start:
return 1
else:
return 0
else:
return 0
def run_batch(file):
""" implements BATCH MODE. for every cell, call subroutines to search
for ROI, get depth, and output """
try:
cellName = file.strip(cwd + 'scVCF_filtered_all/')
cellName = cellName.strip('.vcf')
vcf_path = file
gvcf_path = cwd + 'gVCF/' + cellName
gvcf_path_strip = gvcf_path.rstrip() + '.g.vcf'
vcf = VCF.dataframe(vcf_path)
gvcf = VCF.dataframe(gvcf_path_strip)
# get a list of the records we actually care about
toKeepList_v = vcf.apply(get_GOI_record, axis=1, args=(chrom_, start_ ,end_))
toKeepList_g = gvcf.apply(get_GOI_record, axis=1, args=(chrom_, start_, end_))
# subset by relevant records
vcf_GOI = vcf[np.array(toKeepList_v, dtype=bool)]
gvcf_GOI = gvcf[np.array(toKeepList_g, dtype=bool)]
# get depth of coverage, for relevant records
outputRow_v = get_depth(vcf_GOI, cellName)
outputRow_g = get_depth(gvcf_GOI, cellName)
# make the combined row, with both vcf and gvcf fields filled in
outputRow_comb = pd.DataFrame(columns=colNames) # colNames is a global
outputRow_comb['cellName'] = outputRow_v['cellName']
outputRow_comb['coverage_bool_vcf'] = outputRow_v['coverage_bool']
outputRow_comb['depth_vcf'] = outputRow_v['depth']
outputRow_comb['coverage_bool_gvcf'] = outputRow_g['coverage_bool']
outputRow_comb['depth_gvcf'] = outputRow_g['depth']
except:
outputRow_comb = pd.DataFrame(columns=colNames) # just an empty row
# fill in this row with something
return(outputRow_comb)
def init_pool(filenames, outfile_name):
""" function to set up the thread pool """
# init outFile
outputDF_init = pd.DataFrame(columns=colNames)
print('creating pool')
p = mp.Pool(processes=nThreads)
print('running...')
outputRows = p.map(run_batch, filenames)
p.close()
p.join()
print('done!')
print(' ')
# join all of the rows into single df
cmd = 'sudo mkdir -p ' + cwd + 'coverage/'
cmd1 = 'sudo chmod -R 777 ' + cwd + 'coverage/'
os.system(cmd)
os.system(cmd1)
outputDF = outputDF_init.append(outputRows)
outputDF.to_csv(cwd + 'coverage/' + outfile_name, index=False)
""" get cmdline input """
@click.command()
@click.option('--chrom', default = 7, prompt='chromosome', required=True, type=str)
@click.option('--start_pos', default = 55191820, prompt='start position', required=True, type=str)
@click.option('--end_pos', default = 55191822, prompt='end position', required=True, type=str)
@click.option('--nthreads', default = 4, prompt='number of threads', required=True, type=int)
@click.option('--wrkdir', default = '/Users/lincoln.harris/code/cerebra/cerebra/wrkdir/',
prompt='s3 import directory', required=True)
@click.option('--batch_mode', default = 0, prompt='batch mode', required=True, type=int)
@click.option('--outfile', default = 'egfr_L858R_coverageByCell.csv',
prompt='name of output file (leave blank for batch mode)', required=True)
@click.option('--test', default = False)
def check_coverage(chrom, start_pos, end_pos, nthreads, wrkdir, batch_mode, outfile, test):
""" check coverage to a given ROI """
global cellName
global vcf_s3_path
global gvcf_s3_path
global chrom_
global start_
global end_
global colNames
global cwd
global nThreads
cwd = wrkdir
nThreads = nthreads
print(' ')
print('this tool should be used for loci specific coverage queries.')
print('it is NOT intended for calculating coverage at the exon/transcript level.')
fNames = get_filenames()
colNames = ['cellName', 'coverage_bool_vcf', 'depth_vcf', 'coverage_bool_gvcf', 'depth_gvcf']
if batch_mode:
cov_df = pd.read_csv(cwd + '../coverageBatch.csv')
for i in range(0, len(cov_df.index)):
currRow = cov_df.iloc[i]
chrom_ = currRow['chrom']
start_ = currRow['start_pos']
end_ = currRow['end_pos']
outfile_ = currRow['outfile']
init_pool(fNames, outfile_)
else:
chrom_ = chrom
start_ = start_pos
end_ = end_pos
outfile_ = outfile
init_pool(fNames, outfile_)
| 2.546875
| 3
|
seaice/tools/xlsify/regional_daily.py
|
andypbarrett/nsidc-seaice
| 2
|
12780319
|
"""Reformats daily seaice data into regional xls file
This is for internal use by scientists.
"""
import calendar as cal
import os
import click
import pandas as pd
from . import util
import seaice.nasateam as nt
import seaice.logging as seaicelogging
import seaice.timeseries as sit
log = seaicelogging.init('seaice.tools')
def output_filepath(output_directory, *, hemi):
fn = '{}_Sea_Ice_Index_Regional_Daily_Data_G02135_{}.xlsx'.format(
hemi,
nt.VERSION_STRING
)
return os.path.join(output_directory, fn)
@click.command()
@click.argument('input_directory', type=click.Path(exists=True, file_okay=False))
@click.argument('output_directory', type=click.Path(exists=True, file_okay=False))
@seaicelogging.log_command(log)
def regional_daily(input_directory, output_directory):
data_store = os.path.join(input_directory, 'daily.p')
for hemisphere in (nt.NORTH, nt.SOUTH):
hemi = hemisphere['short_name']
output_file = open(output_filepath(output_directory, hemi=hemi), 'wb')
# Generate the daily dataframe with regional columns
daily = sit.daily(hemi, data_store=data_store, columns=[])
# Keep only regional columns
regional = daily.drop(nt.DAILY_DEFAULT_COLUMNS, axis=1)
writer = pd.ExcelWriter(output_file, engine='xlsxwriter')
extent_and_area_columns = [c for c in regional.columns if 'missing' not in c]
extent_and_area_columns.sort()
for col in extent_and_area_columns:
regional_mask_cfg, region_prefix = \
util.regional_mask_cfg_from_column_name(col)
# Don't add column to the sheet if wrong hemisphere
if regional_mask_cfg['hemisphere'] != hemisphere['long_name']:
continue
df = regional[col].rolling(window=5, min_periods=2).mean()
df = pd.DataFrame(df).set_index(
[df.index.year, df.index.month, df.index.day]
).unstack(0)
df.index.names = ['month', 'day']
df.index = df.index.set_levels(cal.month_name[1:], level=0)
# Strip the regional mask prefix from the column name
col = col[len(region_prefix):]
sheet_name = util.regional_sheet_name(col)
write_sheet(writer, df, sheet_name)
writer = util.add_documentation_sheet(
writer,
util.documentation_file(output_filepath('', hemi=hemi))
)
writer.save()
log.info('regional_daily created: {}'.format(output_file.name))
def write_sheet(writer, df, sheet_name):
df.columns = df.columns.droplevel(0)
df.to_excel(writer, sheet_name, float_format='%.3f')
if __name__ == '__main__':
regional_daily()
| 2.734375
| 3
|
9. Testing with pytest-mock and pytest-flask/source_code/tests/test_example.py
|
Edmartt/articles
| 31
|
12780320
|
import pytest
import example.app
@pytest.fixture
def app(mocker):
mocker.patch("flask_sqlalchemy.SQLAlchemy.init_app", return_value=True)
mocker.patch("flask_sqlalchemy.SQLAlchemy.create_all", return_value=True)
mocker.patch("example.database.get_all", return_value={})
return example.app.app
def test_example(client):
response = client.get("/")
assert response.status_code == 200
| 2.390625
| 2
|
models/mesh_classifier.py
|
fishfishson/MeshCNN
| 0
|
12780321
|
<filename>models/mesh_classifier.py
import torch
from . import networks
import torch.nn as nn
from os.path import join
from util.util import seg_accuracy, print_network
from models.resunet import DAResNet3d
class ClassifierModel:
""" Class for training Model weights
:args opt: structure containing configuration params
e.g.,
--dataset_mode -> classification / segmentation)
--arch -> network type
"""
def __init__(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.is_train = opt.is_train
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
self.save_dir = join(opt.checkpoints_dir, opt.name)
self.optimizer = None
self.edge_features = None
self.labels = None
self.mesh = None
self.soft_label = None
self.loss = None
#
self.nclasses = opt.nclasses
# load/define networks
self.net = networks.define_classifier(opt.input_nc, opt.ncf, opt.ninput_edges, opt.nclasses, opt,
self.gpu_ids, opt.arch, opt.init_type, opt.init_gain)
self.net.train(self.is_train)
self.criterion = networks.define_loss(opt).to(self.device)
if self.is_train:
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.scheduler = networks.get_scheduler(self.optimizer, opt)
print_network(self.net)
if not self.is_train or opt.continue_train:
self.load_network(opt.which_epoch)
def set_input(self, data):
input_edge_features = torch.from_numpy(data['edge_features']).float()
labels = torch.from_numpy(data['label']).long()
# set inputs
self.edge_features = input_edge_features.to(self.device).requires_grad_(self.is_train)
self.labels = labels.to(self.device)
self.mesh = data['mesh']
if self.opt.dataset_mode == 'segmentation' and not self.is_train:
self.soft_label = torch.from_numpy(data['soft_label'])
def forward(self):
out = self.net(self.edge_features, self.mesh)
return out
def backward(self, out):
self.loss = self.criterion(out, self.labels)
self.loss.backward()
def optimize_parameters(self):
self.optimizer.zero_grad()
out = self.forward()
self.backward(out)
self.optimizer.step()
##################
def load_network(self, which_epoch):
"""load model from disk"""
save_filename = '%s_net.pth' % which_epoch
load_path = join(self.save_dir, save_filename)
net = self.net
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
net.load_state_dict(state_dict)
def save_network(self, which_epoch):
"""save model to disk"""
save_filename = '%s_net.pth' % (which_epoch)
save_path = join(self.save_dir, save_filename)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(self.net.module.cpu().state_dict(), save_path)
self.net.cuda(self.gpu_ids[0])
else:
torch.save(self.net.cpu().state_dict(), save_path)
def update_learning_rate(self):
"""update learning rate (called once every epoch)"""
self.scheduler.step()
lr = self.optimizer.param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def test(self):
"""tests model
returns: number correct and total number
"""
with torch.no_grad():
out = self.forward()
# compute number of correct
pred_class = out.data.max(1)[1]
label_class = self.labels
self.export_segmentation(pred_class.cpu())
correct = self.get_accuracy(pred_class, label_class)
return correct, len(label_class)
def get_accuracy(self, pred, labels):
"""computes accuracy for classification / segmentation """
if self.opt.dataset_mode == 'classification':
correct = pred.eq(labels).sum()
elif self.opt.dataset_mode == 'segmentation':
correct = seg_accuracy(pred, self.soft_label, self.mesh)
return correct
def export_segmentation(self, pred_seg):
if self.opt.dataset_mode == 'segmentation':
for meshi, mesh in enumerate(self.mesh):
mesh.export_segments(pred_seg[meshi, :])
class RegresserModel(nn.Module):
def __init__(self, opt):
super(RegresserModel, self).__init__()
self.opt = opt
self.seg_net = DAResNet3d(opt.nclasses, opt.seg_inplanes)
self.down_convs = [opt.input_nc] + opt.ncf
self.up_convs = opt.ncf[::-1] + [3]
self.pool_res = [opt.ninput_edges] + opt.pool_res
self.mesh_net = networks.MeshEncoderDecoder(self.pool_res,
self.down_convs,
self.up_convs,
blocks=opt.resblocks,
transfer_data=True)
def forward(self, img_patch, edge_fs, edges, vs, mesh):
out_mask, out_fmap = self.seg_net(img_patch)
fmap = unpatch(out_fmap)
edge_fmaps = add_feature(edges, fmap, vs)
edge_inputs = torch.cat([edge_fs, edge_fmaps], dim=1)
edge_offsets = self.mesh_net(edge_inputs, mesh)
return out_mask, edge_offsets
def patch(img, ps):
assert len(img.shape) == 4
patches = img.unfold(1, ps[0], ps[0]).unfold(2, ps[1], ps[1]).unfold(3, ps[2], ps[2])
patches = patches.contiguous().view(-1, ps[0], ps[1], ps[2])
return patches
def unpatch(patches):
size = patches.shape
channel = size[1]
patches = patches.permute(1, 0, 2, 3, 4)
patches = patches.reshape(channel, -1, 2, 2, 2, size[2], size[3], size[4])
patches = patches.permute(0, 1, 2, 5, 3, 6, 4, 7)
patches = patches.reshape(channel, -1, 2 * size[2], 2 * size[3], 2 * size[4])
patches = patches.permute(1, 0, 2, 3, 4)
return patches
def add_feature(edges, fmap, vs):
size = fmap.size()
b = size[0]
k = size[1]
n_e = edges.shape[1]
edges_map = torch.zeros((b, k, n_e)).float().cuda()
for i in range(b):
edge = edges[i]
v = vs[i]
v1 = v[edge[:, 0]]
v2 = v[edge[:, 1]]
fmap_1 = fmap[i, :, v1[:, 0], v1[:, 1], v1[:, 2]]
fmap_2 = fmap[i, :, v2[:, 0], v2[:, 1], v2[:, 2]]
edges_map[i] = (fmap_1 + fmap_2) / 2
return edges_map
| 2.484375
| 2
|
tordatahub/core.py
|
jasonz93/python-tordatahub
| 0
|
12780322
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import os
import time
import json
import re
import traceback
from collections import Iterable
import tornado.gen
from .thirdparty import six
from .auth import AliyunAccount
from .utils import Logger, Path
from .errors import *
from .models import RestClient
from .models import Project, Projects
from .models import Topic, Topics
from .models import ShardAction, ShardState, Shards
from .models import Records
from .models import MeteringInfo
from .models import CursorType, Cursor
class DataHub(object):
"""
Main entrance to DataHub.
Convenient operations on DataHub objects are provided.
Please refer to `DataHub docs <https://tordatahub.console.aliyun.com/intro/index.html>`_
to see the details.
Generally, basic operations such as ``create``, ``list``, ``delete``, ``update`` are provided for each DataHub object.
Take the ``project`` as an example.
To create an DataHub instance, access_id and access_key is required, and should ensure correctness,
or ``SignatureNotMatch`` error will throw.
:param access_id: Aliyun Access ID
:param secret_access_key: Aliyun Access Key
:param endpoint: Rest service URL
:Example:
>>> tordatahub = DataHub('**your access id**', '**your access key**', '**endpoint**')
>>>
>>> project = tordatahub.get_project('datahub_test')
>>>
>>> print project is None
>>>
"""
def __init__(self, access_id, access_key, endpoint=None, **kwds):
"""
"""
self.account = kwds.pop('account', None)
if self.account is None:
self.account = AliyunAccount(access_id=access_id, access_key=access_key)
self.endpoint = endpoint
self.restclient = RestClient(self.account, self.endpoint, **kwds)
@tornado.gen.coroutine
def list_projects(self):
"""
List all projects
:return: projects in tordatahub server
:rtype: generator
.. seealso:: :class:`tordatahub.models.Projects`
"""
projects = Projects()
yield self.restclient.get(restmodel=projects)
return projects
@tornado.gen.coroutine
def get_project(self, name):
"""
Get a project by given name
:param name: project name
:return: the right project
:rtype: :class:`datahub.models.Project`
:raise: :class:`tordatahub.errors.NoSuchObjectException` if not exists
.. seealso:: :class:`tordatahub.models.Project`
"""
if not name:
raise InvalidArgument('project name is empty')
proj = Project(name=name)
yield self.restclient.get(restmodel=proj)
return proj
@tornado.gen.coroutine
def list_topics(self, project_name):
"""
Get all topics of a project
:param project_name: project name
:return: all topics of the project
:rtype: generator
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the project not exists
.. seealso:: :class:`tordatahub.models.Topics`
"""
if not project_name:
raise InvalidArgument('project name is empty')
topics = Topics(project_name=project_name)
yield self.restclient.get(restmodel=topics)
return topics
@tornado.gen.coroutine
def create_topic(self, topic):
"""
Create topic
:param topic: a object instance of :class:`tordatahub.models.Topic`
:return: none
"""
if not isinstance(topic, Topic):
raise InvalidArgument('argument topic type must be tordatahub.models.Topic')
yield self.restclient.post(restmodel=topic)
@tornado.gen.coroutine
def get_topic(self, name, project_name):
"""
Get a topic
:param name: topic name
:param project_name: project name
:return: topic object
:rtype: :class:`datahub.models.Topic`
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the project or topic not exists
.. seealso:: :class:`tordatahub.models.Topic`
"""
if not name or not project_name:
raise InvalidArgument('topic or project name is empty')
topic = Topic(name=name, project_name=project_name)
yield self.restclient.get(restmodel=topic)
return topic
@tornado.gen.coroutine
def update_topic(self, name, project_name, life_cycle=0, comment=''):
"""
Update topic info, only life cycle and comment can be modified.
:param name: topic name
:param project_name: project name
:param life_cycle: life cycle of topic
:param comment: topic comment
:return: none
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the project or topic not exists
"""
if 0 == life_cycle and '' == comment:
return
if not name or not project_name:
raise InvalidArgument('topic or project name is empty')
topic = Topic(name=name, project_name=project_name, life_cycle=life_cycle, comment=comment)
yield self.restclient.put(restmodel=topic)
@tornado.gen.coroutine
def delete_topic(self, name, project_name):
"""
Delete a topic
:param name: topic name
:param project_name: project name
:return: none
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the project or topic not exists
"""
if not name or not project_name:
raise InvalidArgument('topic or project name is empty')
topic = Topic(name=name, project_name=project_name)
yield self.restclient.delete(restmodel=topic)
@tornado.gen.coroutine
def wait_shards_ready(self, project_name, topic_name, timeout=-1):
"""
Wait all shard state in ``active`` or ``closed``.
It always be invoked when create a topic, and will be blocked and unitl all
shards state in ``active`` or ``closed`` or timeout .
:param project_name: project name
:param topic_name: topic name
:param timeout: -1 means it will be blocked until all shards state in ``active`` or ``closed``, else will be wait timeout seconds
:return: if all shards ready
:rtype: boolean
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the project or topic not exists
"""
if not topic_name or not project_name:
raise InvalidArgument('topic or project name is empty')
iCostTime = 0
bNotReady = True
bTimeout = False
while bNotReady and not bTimeout:
bNotReady = False
shards = Shards(action=ShardAction.LIST, project_name=project_name, topic_name=topic_name)
yield self.restclient.get(restmodel=shards)
for shard in shards:
if shard.state not in (ShardState.ACTIVE, ShardState.CLOSED):
Logger.logger.debug("project: %s, topic: %s, shard: %s state is %s, sleep 1s" %(project_name, topic_name, shard.shard_id, shard.state))
bNotReady = True
yield tornado.gen.sleep(1)
iCostTime += 1
if timeout > 0 and iCostTime >= timeout:
bTimeout = True
break
return not bNotReady
@tornado.gen.coroutine
def list_shards(self, project_name, topic_name):
"""
List all shards of a topic
:param project_name: project name
:param topic_name: topic name
:return: all shards
:rtype: :class:`datahub.models.Shards`
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the project or topic not exists
.. seealso:: :class:`tordatahub.models.Shards`
"""
if not topic_name or not project_name:
raise InvalidArgument('topic or project name is empty')
shards = Shards(action=ShardAction.LIST, project_name=project_name, topic_name=topic_name)
yield self.restclient.get(restmodel=shards)
return shards
@tornado.gen.coroutine
def merge_shard(self, project_name, topic_name, shard_id, adj_shard_id):
"""
Merge shards
:param project_name: project name
:param topic_name: topic name
:param shard_id: shard id
:param adj_shard_id: adjacent shard id
:return: after merged shards
:rtype: :class:`datahub.models.Shards`
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the shard not exists
.. seealso:: :class:`tordatahub.models.Shards`
"""
if not topic_name or not project_name:
raise InvalidArgument('topic or project name is empty')
if not shard_id or not adj_shard_id:
raise InvalidArgument('shard id or adjacent shard id is empty')
shards = Shards(action=ShardAction.MERGE, project_name=project_name, topic_name=topic_name)
yield shards.set_mergeinfo(shard_id, adj_shard_id)
self.restclient.post(restmodel=shards)
return shards
@tornado.gen.coroutine
def split_shard(self, project_name, topic_name, shard_id, split_key):
"""
Split shard
:param project_name: project name
:param topic_name: topic name
:param shard_id: split shard id
:param split_key: split key
:return: after split shards
:rtype: :class:`datahub.models.Shards`
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the shard not exists
.. seealso:: :class:`tordatahub.models.Shards`
"""
if not topic_name or not project_name:
raise InvalidArgument('topic or project name is empty')
if not shard_id or not split_key:
raise InvalidArgument('shard id or split key is empty')
shards = Shards(action=ShardAction.SPLIT, project_name=project_name, topic_name=topic_name)
shards.set_splitinfo(shard_id, split_key)
yield self.restclient.post(restmodel=shards)
return shards
@tornado.gen.coroutine
def get_cursor(self, project_name, topic_name, type, shard_id, system_time=0):
"""
Get cursor.
When you invoke get_records first, you must be invoke it to get a cursor
:param project_name: project name
:param topic_name: topic name
:param type: cursor type
:param shard_id: shard id
:param system_time: if type=CursorType.SYSTEM_TIME, it must be set
:return: a cursor
:rtype: :class:`datahub.models.Cursor`
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the shard not exists
.. seealso:: :class:`tordatahub.models.CursorType`, :class:`tordatahub.models.Cursor`
"""
if not topic_name or not project_name:
raise InvalidArgument('topic or project name is empty')
if not shard_id or not type:
raise InvalidArgument('shard id or type is empty')
cursor = Cursor(project_name=project_name, topic_name=topic_name, type=type, shard_id=shard_id)
if CursorType.SYSTEM_TIME == type and system_time == 0:
raise InvalidArgument('get SYSTEM_TIME cursor must provide system_time argument')
cursor.system_time = system_time
yield self.restclient.post(restmodel=cursor)
return cursor
@tornado.gen.coroutine
def put_records(self, project_name, topic_name, record_list):
"""
Put records to a topic
:param project_name: project name
:param topic_name: topic name
:param record_list: record list
:return: failed record indeies
:rtype: list
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the topic not exists
.. seealso:: :class:`tordatahub.models.Record`
"""
if not topic_name or not project_name:
raise InvalidArgument('topic or project name is empty')
if not isinstance(record_list, list):
raise InvalidArgument('record list must be a List')
records = Records(project_name=project_name, topic_name=topic_name)
records.record_list = record_list
yield self.restclient.post(restmodel=records)
return records.failed_indexs
@tornado.gen.coroutine
def get_records(self, topic, shard_id, cursor, limit_num=1):
"""
Get records from a topic
:param topic: a object instance of :class:`tordatahub.models.Topic`
:param shard_id: shard id
:param cursor: the cursor
:return: record list, record num and next cursor
:rtype: tuple
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the topic not exists
.. seealso:: :class:`tordatahub.models.Topic`, :class:`tordatahub.models.Cursor`
"""
if not shard_id:
raise InvalidArgument('shard id is empty')
if not isinstance(topic, Topic):
raise InvalidArgument('argument topic type must be tordatahub.models.Topic')
records = Records(project_name=topic.project_name, topic_name=topic.name, schema=topic.record_schema)
records.shard_id = shard_id
records.next_cursor = str(cursor)
records.limit_num = limit_num
yield self.restclient.post(restmodel=records)
return (records.record_list, records.record_num, records.next_cursor)
@tornado.gen.coroutine
def get_meteringinfo(self, project_name, topic_name, shard_id):
"""
Get a shard metering info
:param project_name: project name
:param topic_name: topic name
:param shard_id: shard id
:return: the shard metering info
:rtype: :class:`datahub.models.MeteringInfo`
:raise: :class:`tordatahub.errors.NoSuchObjectException` if the topic not exists
.. seealso:: :class:`tordatahub.models.MeteringInfo`
"""
if not project_name or not topic_name:
raise InvalidArgument('project or topic name is empty')
if not shard_id:
raise InvalidArgument('shard id is empty')
meteringInfo = MeteringInfo(project_name=project_name, topic_name=topic_name, shard_id=shard_id)
yield self.restclient.post(restmodel=meteringInfo)
return meteringInfo
| 1.835938
| 2
|
figS3_eCLe.py
|
j-friedrich/neuralOFC
| 4
|
12780323
|
<gh_stars>1-10
""" Script to produce Fig. S3 """
from ofc import System, parmap
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage import median_filter
# colors from Okabe & Ito's colorblind friendly palette
colors = ["#0072B2", "#009E73", "#D55E00", "#E69F00"]
plt.rc('axes', prop_cycle=plt.cycler('color', colors))
plt.rc('font', size=18)
plt.rc('legend', **{'fontsize': 12})
s0 = System(A=np.array([[1, 1], [0, 1]]),
B=np.array([[0], [1]]),
C=np.eye(2),
V=.01 * np.eye(2),
W=np.diag([.04, .25]),
Q=np.array([[1, 0], [0, 0]]),
R=np.ones((1, 1)),
T=11)
s1 = System(A=np.array([[1, 1], [0, 1]]),
B=np.array([[0], [1]]),
C=np.array([[1, 0], [0, -1], [.5, .5]]),
V=.01 * np.eye(2),
W=np.array([[.04, .09, 0], [.09, .25, 0], [0, 0, .04]]),
Q=np.array([[1, 0], [0, 0]]),
R=np.ones((1, 1)),
T=11)
s2 = System(A=np.array([[1, 1, 0], [0, 1, 0], [0, 0, 0]]),
B=np.array([[0], [1], [0]]),
C=np.array([[1, 0, 0], [0, -1, 0], [.5, .5, 0]]),
V=.01 * np.eye(3),
W=np.array([[.04, .09, 0], [.09, .25, 0], [0, 0, .04]]),
Q=np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]),
R=np.ones((1, 1)),
T=11)
for ss in (0, 1, 2):
print('\nSystem', ss)
s = (s0, s1, s2)[ss]
eta = np.load('results/LQG_%s_delay%g_sigma%g_momentum%g%s' %
('open', 1, .2, .99,
('_C=I.npy', '.npy', '_m=3.npy')[ss]),
allow_pickle=True).item().best_params['eta']
def foo(seed=0):
np.random.seed(seed)
if s.m == s.n:
Ahat, Bhat = [.1*np.random.randn(*a_.shape)
for a_ in (s.A, s.B)]
Chat, Lhat = [.5*np.eye(s.m) + .5*np.random.rand(*a.shape)
for a in (s.C, s.C.T)]
else:
Ahat, Bhat, Chat, Lhat = [.1*np.random.randn(*a_.shape) for a_ in
(s.A, s.B, s.C, s.C.T)]
while np.any(np.linalg.eigvals(Lhat.dot(Chat)) <= .01):
Lhat = .1*np.random.randn(*Chat.T.shape)
while np.any(np.linalg.eigvals(Lhat.dot(Chat)) <= .01):
Lhat = .1*np.random.randn(*Chat.T.shape)
return s.SysID(Ahat, Bhat, Chat, Lhat, eta, episodes=5000,
init_seed=seed, x0=[-1, 0, 0][:s.m], verbose=True)
sysIdresult = parmap(foo, range(20))
eCLe = np.array([j[-1] for j in sysIdresult])
g1traj = eCLe.reshape(20, -1, 10).mean(-1)
g10traj = eCLe.reshape(20, -1, 100).mean(-1)
print('Percentage of cases where eCLe < 0')
for l, g in ((' 1 step ', eCLe), (' 1 epoch ', g1traj), ('10 epochs', g10traj)):
print('%s %.3f%%' % (l, 100*(g < 0).sum()/g.size))
plt.figure(figsize=(6, 4))
plt.plot(g1traj.T)
plt.xlabel('Episodes')
plt.ylabel(r'$\sum_t^T e_t^\top C L e_t$')
plt.ylim(-.2, 2.8)
plt.tight_layout(pad=.05)
plt.savefig('fig/eCLe' + ('_C=I.pdf', '.pdf', '_m=3.pdf')[ss])
| 2.03125
| 2
|
sentry_scrapy/utils.py
|
m-vdb/sentry-scrapy
| 7
|
12780324
|
<gh_stars>1-10
"""Utils module."""
def response_to_dict(response):
"""
Convert a `scrapy.http.Response` to a dictionnary.
"""
return {
'status': response.status,
'url': response.url,
'headers': response.headers.to_unicode_dict(),
'body': response.text,
}
| 2.671875
| 3
|
tests/TestParetoFront.py
|
ianran/rdml_graph
| 4
|
12780325
|
<gh_stars>1-10
# TestParetoFront.py
# Written <NAME> February 2021
#
# The test Pareto Front.
import rdml_graph as gr
import numpy as np
import matplotlib.pyplot as plt
import time
if __name__ == '__main__':
front = gr.ParetoFront(3, alloc_size=10)
rewards = np.array([[3,4,5], [2, 3,4], [5,2,1], [3,4,6], [3, 2, 1], [2, 7,2]])
for i in range(rewards.shape[0]):
front.check_and_add(rewards[i], i)
pareto_idx = gr.get_pareto(rewards)
print('pareto_idx: ' + str(pareto_idx))
plt.scatter(rewards[:,0], rewards[:, 1], color='red')
#plt.scatter(front.front[:front.size,0], front.front[:front.size,1], color='blue')
plt.scatter(rewards[pareto_idx][:,0], rewards[pareto_idx][:,1], color='green')
print(front.get())
plt.show()
rewards = np.random.random((100, 2))
for i in range(100):
if np.sum(rewards[i]) > 1.1:
rewards[i] = np.array([0,0])
rewards[0] = np.array([0,0.1])
front = gr.ParetoFront(2, alloc_size=100)
start = time.time()
for i in range(rewards.shape[0]):
front.check_and_add(rewards[i], i)
end = time.time()
print(front.front)
print(front.front_val)
print(end - start)
plt.scatter(rewards[:,0], rewards[:, 1], color='red')
plt.scatter(front.front[:front.size,0], front.front[:front.size,1], color='blue')
plt.show()
| 2.65625
| 3
|
main.py
|
btanner/differential_value_iteration
| 0
|
12780326
|
"""Sample program that runs a sweep and records results."""
from pathlib import Path
from typing import Sequence
import numpy as np
from absl import app
from absl import flags
from differential_value_iteration import utils
from differential_value_iteration.algorithms import algorithms
from differential_value_iteration.environments import garet
from differential_value_iteration.environments import micro
FLAGS = flags.FLAGS
flags.DEFINE_string(name='plot_dir', default='plots', help='path to plot dir')
flags.DEFINE_integer('max_iters', 100000, 'Maximum iterations per algorithm.')
flags.DEFINE_float('epsilon', 1e-7, 'Tolerance for convergence.')
flags.DEFINE_bool('mrp', True, 'Run mrp experiments.')
flags.DEFINE_bool('mdp', True, 'Run mdp experiments.')
def main(argv):
del argv
alphas = [1.0, 0.999, 0.99, 0.9, 0.7, 0.5, 0.3, 0.1, 0.01, 0.001]
betas = [1.0, 0.999, 0.99, 0.9, 0.7, 0.5, 0.3, 0.1, 0.01, 0.001]
max_iters = FLAGS.max_iters
epsilon = FLAGS.epsilon
plot_dir = FLAGS.plot_dir
if plot_dir[-1] != '/':
plot_dir += '/'
Path(plot_dir).mkdir(parents=True, exist_ok=True)
if FLAGS.mrp:
run_mrps(alphas=alphas,
betas=betas,
max_iters=max_iters,
epsilon=epsilon,
plot_dir=plot_dir)
if FLAGS.mdp:
run_mdps(alphas=alphas,
betas=betas,
max_iters=max_iters,
epsilon=epsilon,
plot_dir=plot_dir)
def run_mrps(
alphas: Sequence[float],
betas: Sequence[float],
max_iters: int,
epsilon: float,
plot_dir: str):
envs = [
micro.create_mrp1(dtype=np.float32),
micro.create_mrp2(dtype=np.float32),
micro.create_mrp3(dtype=np.float32),
]
for env in envs:
init_v = np.zeros(env.num_states)
init_r_bar_scalar = 0
init_r_bar_vec = np.zeros(env.num_states)
results = exp_RVI_Evaluation(env, 'exec_sync', alphas, init_v, max_iters,
epsilon, ref_idx=0)
utils.draw(results, plot_dir + env.name + '_RVI_Evaluation_sync', alphas)
results = exp_RVI_Evaluation(env, 'exec_async', alphas, init_v, max_iters,
epsilon, ref_idx=0)
utils.draw(results, plot_dir + env.name + '_RVI_Evaluation_async', alphas)
results = exp_DVI_Evaluation(env, 'exec_sync', alphas, betas, init_v,
init_r_bar_scalar, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_DVI_Evaluation_sync', alphas,
betas)
results = exp_DVI_Evaluation(env, 'exec_async', alphas, betas, init_v,
init_r_bar_scalar, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_DVI_Evaluation_async', alphas,
betas)
results = exp_MDVI_Evaluation(env, 'exec_sync', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Evaluation_sync', alphas,
betas)
results = exp_MDVI_Evaluation(env, 'exec_async', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Evaluation_async',
alphas,
betas)
def run_mdps(alphas: Sequence[float], betas: Sequence[float], max_iters: int,
epsilon: float, plot_dir: str):
garet_env = garet.create(seed=42,
num_states=10,
num_actions=2,
branching_factor=3)
envs = [garet_env, micro.mdp2]
for env in envs:
init_v = np.zeros(env.num_states)
init_r_bar_scalar = 0
init_r_bar_vec = np.zeros(env.num_states)
results = exp_RVI_Control(env, 'exec_sync', alphas, init_v, max_iters,
epsilon, ref_idx=0)
utils.draw(results, plot_dir + env.name + '_RVI_Control_sync', alphas)
results = exp_RVI_Control(env, 'exec_async', alphas, init_v, max_iters,
epsilon, ref_idx=0)
utils.draw(results, plot_dir + env.name + '_RVI_Control_async', alphas)
results = exp_DVI_Control(env, 'exec_sync', alphas, betas, init_v,
init_r_bar_scalar, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_DVI_Control_sync', alphas,
betas)
results = exp_DVI_Control(env, 'exec_async', alphas, betas, init_v,
init_r_bar_scalar, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_DVI_Control_async', alphas,
betas)
results = exp_MDVI_Control1(env, 'exec_sync', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Control1_sync', alphas,
betas)
results = exp_MDVI_Control1(env, 'exec_async', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Control1_async', alphas,
betas)
results = exp_MDVI_Control2(env, 'exec_sync', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Control2_sync', alphas,
betas)
results = exp_MDVI_Control2(env, 'exec_async', alphas, betas, init_v,
init_r_bar_vec, max_iters, epsilon)
utils.draw(results, plot_dir + env.name + '_MDVI_Control2_async', alphas,
betas)
def exp_RVI_Evaluation(env, update_rule, alphas, init_v, max_iters, epsilon,
ref_idx=0):
convergence_flags = np.zeros(len(alphas))
for alpha_idx, alpha in enumerate(alphas):
alg = algorithms.RVI_Evaluation(env, init_v, alpha, ref_idx)
print(f'{env.name} RVI Evaluation {update_rule} alpha:{alpha}', end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx] = convergence
return convergence_flags
def exp_RVI_Control(env, update_rule, alphas, init_v, max_iters, epsilon,
ref_idx=0):
convergence_flags = np.zeros(len(alphas))
for alpha_idx, alpha in enumerate(alphas):
alg = algorithms.RVI_Control(env, init_v, alpha, ref_idx)
print(f'{env.name} RVI Control {update_rule} alpha:{alpha}', end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx] = convergence
return convergence_flags
def exp_DVI_Evaluation(env, update_rule, alphas, betas, init_v, init_r_bar,
max_iters, epsilon):
convergence_flags = np.zeros((len(alphas), len(betas)))
for alpha_idx, alpha in enumerate(alphas):
for beta_idx, beta in enumerate(betas):
alg = algorithms.DVI_Evaluation(env, init_v, init_r_bar, alpha, beta)
print(
f'{env.name} DVI Evaluation {update_rule} alpha:{alpha} beta:{beta}',
end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx, beta_idx] = convergence
return convergence_flags
def exp_DVI_Control(env, update_rule, alphas, betas, init_v, init_r_bar,
max_iters, epsilon):
convergence_flags = np.zeros((len(alphas), len(betas)))
for alpha_idx, alpha in enumerate(alphas):
for beta_idx, beta in enumerate(betas):
alg = algorithms.DVI_Control(env, init_v, init_r_bar, alpha, beta)
print(f'{env.name} DVI Control {update_rule} alpha:{alpha} beta:{beta}',
end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx, beta_idx] = convergence
return convergence_flags
def exp_MDVI_Evaluation(env, update_rule, alphas, betas, init_v, init_r_bar,
max_iters, epsilon):
convergence_flags = np.zeros((len(alphas), len(betas)))
for alpha_idx, alpha in enumerate(alphas):
for beta_idx, beta in enumerate(betas):
alg = algorithms.MDVI_Evaluation(env, init_v, init_r_bar, alpha, beta)
print(
f'{env.name} MDVI Evaluation {update_rule} alpha:{alpha} beta:{beta}',
end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx, beta_idx] = convergence
return convergence_flags
def exp_MDVI_Control1(env, update_rule, alphas, betas, init_v, init_r_bar,
max_iters, epsilon):
convergence_flags = np.zeros((len(alphas), len(betas)))
for alpha_idx, alpha in enumerate(alphas):
for beta_idx, beta in enumerate(betas):
alg = algorithms.MDVI_Control1(env, init_v, init_r_bar, alpha, beta)
print(f'{env.name} MDVI Control1 {update_rule} alpha:{alpha} beta:{beta}',
end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx, beta_idx] = convergence
return convergence_flags
def exp_MDVI_Control2(env, update_rule, alphas, betas, init_v, init_r_bar,
max_iters, epsilon):
convergence_flags = np.zeros((len(alphas), len(betas)))
for alpha_idx, alpha in enumerate(alphas):
for beta_idx, beta in enumerate(betas):
alg = algorithms.MDVI_Control2(env, init_v, init_r_bar, alpha, beta)
print(f'{env.name} MDVI Control2 {update_rule} alpha:{alpha} beta:{beta}',
end=' ')
convergence = utils.run_alg(alg, update_rule, max_iters, epsilon)
print(f'Converged? {convergence}')
convergence_flags[alpha_idx, beta_idx] = convergence
return convergence_flags
if __name__ == '__main__':
app.run(main)
| 2.84375
| 3
|
dz.py
|
miliskok/bot1
| 0
|
12780327
|
from telebot import types
def my_input(bot, chat_id, txt, ResponseHandler):
message = bot.send_message(chat_id, text=txt)
bot.register_next_step_handler(message, ResponseHandler)
# -----------------------------------------------------------------------
def my_inputInt(bot, chat_id, txt, ResponseHandler):
message = bot.send_message(chat_id, text=txt)
bot.register_next_step_handler(message, my_inputInt_SecondPart, botQuestion=bot, txtQuestion=txt,
ResponseHandler=ResponseHandler)
def my_inputInt_SecondPart(message, botQuestion, txtQuestion, ResponseHandler):
chat_id = message.chat.id
try:
if message.content_type != "text":
raise ValueError
var_int = int(message.text)
# данные корректно преобразовались в int, можно вызвать обработчик ответа, и передать туда наше число
ResponseHandler(botQuestion, chat_id, var_int)
except ValueError:
botQuestion.send_message(chat_id,
text="Можно вводить ТОЛЬКО целое число в десятичной системе исчисления (символами от 0 до 9)!\nПопробуйте еще раз...")
my_inputInt(botQuestion, chat_id, txtQuestion, ResponseHandler) # это не рекурсия, но очень похоже
def dz1(bot, chat_id):
markup = types.InlineKeyboardMarkup()
name = my_input('Введите свое имя')
bot.send_message(chat_id, text="обычно тебя зовут-" + name)
def dz2(bot, chat_id):
age = my_inputInt('Введите свой возраст')
bot.send_message(chat_id, text="твой возраст " + (str(age)))
def dz3(bot, chat_id):
age2 = my_inputInt('Введите свой возраст')
bot.send_message(chat_id, (str(age2)) * 5)
def dz4(bot, chat_id):
name2 = my_input('как <NAME>?')
age3 = my_inputInt('сколько тебе лет?')
bot.send_message(chat_id, text="ку," + name2)
def dz5(bot, chat_id):
user_age = my_inputInt("сколько тебе лет?")
if user_age > 30:
bot.send_message(chat_id,
text="Судья говорит свидетельнице: -Ваш возраст? -Все дают мне 18 лет! -Будете выдумывать, я вам сейчас пожизненное дам")
if user_age < 18:
bot.send_message(chat_id,
text="ты сейчас в таком возрасте, что покупая новые ботинки, должен задуматься: а не в них ли меня будут хоронить?")
else:
bot.send_message(chat_id, text="вы где то между 18 и 30 - shit")
def dz6(bot, chat_id):
name3 = my_input('Введите свое имя')
lenght = len(name3)
bot.send_message(chat_id, str(name3[1:lenght - 1:]))
bot.send_message(chat_id, str(name3[:: -1]))
bot.send_message(chat_id, str(name3[-3::]))
bot.send_message(chat_id, str(name3[0:5:]))
def dz7(bot, chat_id):
name4 = my_input('Введите свое имя')
bot.send_message(chat_id, text='букв в твоеи имени: ' + str(len(name4)))
user_age2 = my_inputInt("сколько тебе лет?")
suma = 0
nesuma = 1
while user_age2 > 0:
digit = user_age2 % 10
suma = suma + digit
nesuma = nesuma * digit
user_age2 = user_age2 // 10
bot.send_message(chat_id, text='сумма чисел твоего возраста: ' + str(suma))
bot.send_message(chat_id, text='произведение чисел твоего возраста: ' + str(nesuma))
def dz8(bot, chat_id):
name4 = my_input('Введите свое имя')
bot.send_message(chat_id, name4.title())
bot.send_message(chat_id, name4.lower())
bot.send_message(chat_id, name4.upper())
def dz9(bot, chat_id):
while True:
user_age2 = my_input('сколько тебе лет?')
if not user_age2.isnumeric():
bot.send_message(chat_id, text='вы ввели не число, ошибка')
elif not 0 <= int(user_age2) <= 150:
bot.send_message(chat_id, text='ваше число не входит в диапазон существующих')
else:
bot.send_message(chat_id, text='ok')
break
def dz10(bot, chat_id):
key = types.InlineKeyboardMarkup()
name44 = my_input('введите свое имя')
if name44.isalpha() or name44.isspace():
bot.send_message(chat_id, text='ok')
else:
bot.send_message(chat_id, text='bad')
| 2.328125
| 2
|
torchbenchmark/models/fastNLP/reproduction/Star_transformer/util.py
|
Chillee/benchmark
| 2,693
|
12780328
|
import fastNLP as FN
import argparse
import os
import random
import numpy
import torch
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, required=True)
parser.add_argument('--w_decay', type=float, required=True)
parser.add_argument('--lr_decay', type=float, required=True)
parser.add_argument('--bsz', type=int, required=True)
parser.add_argument('--ep', type=int, required=True)
parser.add_argument('--drop', type=float, required=True)
parser.add_argument('--gpu', type=str, required=True)
parser.add_argument('--log', type=str, default=None)
return parser
def add_model_args(parser):
parser.add_argument('--nhead', type=int, default=6)
parser.add_argument('--hdim', type=int, default=50)
parser.add_argument('--hidden', type=int, default=300)
return parser
def set_gpu(gpu_str):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_str
def set_rng_seeds(seed=None):
if seed is None:
seed = numpy.random.randint(0, 65536)
random.seed(seed)
numpy.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# print('RNG_SEED {}'.format(seed))
return seed
class TensorboardCallback(FN.Callback):
"""
接受以下一个或多个字符串作为参数:
- "model"
- "loss"
- "metric"
"""
def __init__(self, *options):
super(TensorboardCallback, self).__init__()
args = {"model", "loss", "metric"}
for opt in options:
if opt not in args:
raise ValueError(
"Unrecognized argument {}. Expect one of {}".format(opt, args))
self.options = options
self._summary_writer = None
self.graph_added = False
def on_train_begin(self):
save_dir = self.trainer.save_path
if save_dir is None:
path = os.path.join(
"./", 'tensorboard_logs_{}'.format(self.trainer.start_time))
else:
path = os.path.join(
save_dir, 'tensorboard_logs_{}'.format(self.trainer.start_time))
self._summary_writer = SummaryWriter(path)
def on_batch_begin(self, batch_x, batch_y, indices):
if "model" in self.options and self.graph_added is False:
# tesorboardX 这里有大bug,暂时没法画模型图
# from fastNLP.core.utils import _build_args
# inputs = _build_args(self.trainer.model, **batch_x)
# args = tuple([value for value in inputs.values()])
# args = args[0] if len(args) == 1 else args
# self._summary_writer.add_graph(self.trainer.model, torch.zeros(32, 2))
self.graph_added = True
def on_backward_begin(self, loss):
if "loss" in self.options:
self._summary_writer.add_scalar(
"loss", loss.item(), global_step=self.trainer.step)
if "model" in self.options:
for name, param in self.trainer.model.named_parameters():
if param.requires_grad:
self._summary_writer.add_scalar(
name + "_mean", param.mean(), global_step=self.trainer.step)
# self._summary_writer.add_scalar(name + "_std", param.std(), global_step=self.trainer.step)
self._summary_writer.add_scalar(name + "_grad_mean", param.grad.mean(),
global_step=self.trainer.step)
def on_valid_end(self, eval_result, metric_key):
if "metric" in self.options:
for name, metric in eval_result.items():
for metric_key, metric_val in metric.items():
self._summary_writer.add_scalar("valid_{}_{}".format(name, metric_key), metric_val,
global_step=self.trainer.step)
def on_train_end(self):
self._summary_writer.close()
del self._summary_writer
def on_exception(self, exception):
if hasattr(self, "_summary_writer"):
self._summary_writer.close()
del self._summary_writer
| 2.421875
| 2
|
core/models/behaviours/nameable.py
|
bergran/pokemon_project_example
| 0
|
12780329
|
# -*- coding: utf-8 -*-
from django.db import models
class Nameable(models.Model):
name = models.CharField(max_length=40)
class Meta:
abstract = True
| 2.046875
| 2
|
cortex2/__init__.py
|
lowenhere/emotiv-cortex2-python-client
| 10
|
12780330
|
from .emotiv_cortex2_client import EmotivCortex2Client
__all__ = [
'EmotivCortex2Client'
]
# Version 1.0.0
| 1.117188
| 1
|
Bot.py
|
danHoberman1999/nim_ai
| 0
|
12780331
|
from Board import Board
class Bot(Board):
def __init__(self, state):
self.name = "Dr. Nimbot"
self.pile_states = None
self.state = state
self.even = []
self.odd = []
'''
Keeps track of states off current nim board.
'''
def nim_develop_state(self):
states = []
first_pile = []
second_pile = []
third_pile = []
fourth_pile = []
first_pile_state = self.state[0][0]
if first_pile_state == 1:
first_pile.append(1)
second_pile_state = self.state[1][0]
if second_pile_state == 3:
second_pile.append(1)
second_pile.append(2)
elif second_pile_state == 2:
second_pile.append(2)
elif second_pile_state == 1:
second_pile.append(1)
third_pile_state = self.state[2][0]
if third_pile_state == 5:
third_pile.append(1)
third_pile.append(4)
elif third_pile_state == 4:
third_pile.append(4)
elif third_pile_state == 3:
third_pile.append(1)
third_pile.append(2)
elif third_pile_state == 2:
third_pile.append(2)
elif third_pile_state == 1:
third_pile.append(1)
fourth_pile_state = self.state[3][0]
if fourth_pile_state == 7:
fourth_pile.append(1)
fourth_pile.append(2)
fourth_pile.append(4)
elif fourth_pile_state == 6:
fourth_pile.append(2)
fourth_pile.append(4)
elif fourth_pile_state == 5:
fourth_pile.append(1)
fourth_pile.append(4)
elif fourth_pile_state == 4:
fourth_pile.append(4)
elif fourth_pile_state == 3:
fourth_pile.append(1)
fourth_pile.append(2)
elif fourth_pile_state == 2:
fourth_pile.append(2)
elif fourth_pile_state == 1:
fourth_pile.append(1)
states.append(first_pile)
states.append(second_pile)
states.append(third_pile)
states.append(fourth_pile)
self.pile_states = states
'''
Uses a dictionary to figure out if board has odd state values
'''
def nim_dictionary(self):
even_values = []
odd_values = []
nim_dictionary = {}
for cols in self.pile_states:
for value in cols:
if value in nim_dictionary:
nim_dictionary[value] += 1
elif value not in nim_dictionary:
nim_dictionary[value] = 1
for value in nim_dictionary:
if nim_dictionary[value] %2 == 0:
even_values.append([value, nim_dictionary[value]])
else:
odd_values.append([value, nim_dictionary[value]])
self.even = even_values
self.odd = odd_values
'''
Depending on number of odd state values uses different algorithm to determine which column and amount to subtract from piles.
This algorithm changes as game goes on. Game changes states many times through out 1v1
'''
def nim_algorithm(self):
smallest = 10
largest = 0
pile_largest = []
largest_pile = 0
largest_column = None
smallest_pile = 10
empty_pile_tracker = 0
single_pile_tracker = 0
large_pile_tracker = 0
col_num = 0
col_total = 0
for piles in self.state:
if piles[0] > 1:
large_pile_tracker +=1
if piles[0] == 0:
empty_pile_tracker += 1
if piles[0] == 1:
single_pile_tracker +=1
if empty_pile_tracker ==2 and large_pile_tracker ==2:
for col, piles in enumerate(self.state):
if piles[0] > 1:
pile_largest.append([piles[0],col])
for val in pile_largest:
if val[0] > largest_pile:
largest_pile = val[0]
largest_column = val[1]
if val[0]< smallest_pile:
smallest_pile = val[0]
subtraction_amount = largest_pile - smallest_pile
return largest_column, subtraction_amount
if empty_pile_tracker ==2 and single_pile_tracker ==2:
for col, piles in enumerate(self.state):
if piles[0] == 1:
col_total = piles[0]
return col, col_total
if single_pile_tracker ==3:
for col, piles in enumerate(self.state):
if piles[0] > 1:
col_total = piles[0]
return col, col_total
if single_pile_tracker ==2 and large_pile_tracker ==1:
for col, piles in enumerate(self.state):
if piles[0] > 1:
col_total = piles[0]-1
return col, col_total
if empty_pile_tracker == 3:
for col, piles in enumerate(self.state):
if piles[0] > 1:
col_total = piles[0] -1
return col, col_total
if empty_pile_tracker == 2 and single_pile_tracker == 1:
for col, piles in enumerate(self.state):
if piles[0] >1:
col_num = col
col_total = piles[0]
return col_num, col_total
else:
if len(self.odd) ==1:
for col, val in enumerate(self.pile_states):
if self.odd[0][0] in val:
return col, self.odd[0][0]
elif len(self.odd) ==2: # find largest item figure out how much to subtract to equal other number
for val in self.odd:
if val[0] > largest:
largest = val[0]
for val in self.odd:
if val[0] < smallest:
smallest = val[0]
total = largest + smallest
state_total = 0
for val in self.pile_states:
if largest in val:
for num in val:
state_total += num
if (total > state_total):
subtraction_amount = largest-smallest
else:
subtraction_amount = largest+smallest
for col, val in enumerate(self.pile_states):
if largest in val:
return col, subtraction_amount
elif len(self.odd) ==3: # add smallest item with largest item, figure out how much to subtract to equal middle item
col_tracker = [0,1,2]
col_largest = None
col_smallest = None
for col, val in enumerate(self.odd):
if val[0] > largest:
largest = val[0]
col_largest = col
for col,val in enumerate(self.odd):
if val[0] < smallest:
smallest = val[0]
col_smallest = col
col_tracker.remove(col_largest)
col_tracker.remove(col_smallest)
middle_col_val = self.odd[col_tracker[0]][0]
holding_number = smallest + largest
subtraction_amount = holding_number - middle_col_val
for col, val in enumerate(self.pile_states):
if largest in val and smallest in val and middle_col_val in val:
subtraction_amount = val[0] + val[1] + val[2]
return col, subtraction_amount
elif largest in val and middle_col_val in val:
subtraction_amount = largest + middle_col_val - smallest
return col, subtraction_amount
elif len(val) ==1 and largest in val:
subtraction_amount = largest-middle_col_val - smallest
return col, subtraction_amount
elif largest in val:
return col, subtraction_amount
'''
Keeps track of the state of the board after the Dr. makes his moves.
'''
def dr_state(self, col, amount):
print("Dr. Nimbot is making his move now")
print("AI turn : \n")
self.state[int(col)][0] -= int(amount)
print("The Dr took ", amount, "sticks from pile: ", col +1)
self.print_board(self.state)
return self.state
| 3.171875
| 3
|
randomPassword.py
|
Jarvis-Yu/RandomPasswordGenerator
| 0
|
12780332
|
import sys
from random import randint
class SymbolSet:
def __init__(self, string: str, weight: int) -> None:
self.__str = string
self.__weight = weight
def getWeight(self) -> int:
return self.__weight
def getChar(self) -> str:
index = randint(0, len(self.__str) - 1)
return self.__str[index]
def argOfCorrectFormat(arg: str) -> bool: # of syntax "-*"
if len(arg) == 0:
return False
if arg[0] == "-":
return True
return False
def main() -> int:
if len(sys.argv) <= 1:
return 0
passwordLen = int(sys.argv[1])
if passwordLen <= 0:
return 0
lowerCases = SymbolSet(
"abcdefghijklmnopqrstuvwxyz",
260)
upperCases = SymbolSet(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
260)
numbers = SymbolSet(
"1234567890",
400)
symbols = SymbolSet(
"~`! @#$%^&*()_-+={[}]|\:;\"'<,>.?/",
200)
password = ""
listOfSymbols = []
if len(sys.argv) <= 2:
listOfSymbols = [lowerCases, upperCases, numbers, symbols]
else:
for arg in sys.argv[2:]:
if argOfCorrectFormat(arg):
arg_sub = arg[1:]
if arg_sub == "lc":
listOfSymbols.append(lowerCases)
elif arg_sub == "uc":
listOfSymbols.append(upperCases)
elif arg_sub == "n":
listOfSymbols.append(numbers)
elif arg_sub == "s":
listOfSymbols.append(symbols)
else: # invalid arg
print("Invalid Argument Error: only -lc -uc -n -s are allowed")
totalWeight = sum([i.getWeight() for i in listOfSymbols])
for i in range(passwordLen):
randomNumber = randint(0, totalWeight - 1)
for symbolSet in listOfSymbols:
if symbolSet.getWeight() <= randomNumber:
randomNumber -= symbolSet.getWeight()
else:
password += symbolSet.getChar()
break
print(password)
return 0
if __name__ == "__main__":
main()
| 3.3125
| 3
|
Vault7/Lost-in-Translation/windows/Resources/Ops/PyScripts/windows/sqliteWorkaround.py
|
dendisuhubdy/grokmachine
| 46
|
12780333
|
<gh_stars>10-100
import dsz
import sqlite3
import sys
if (__name__ == '__main__'):
save_flags = dsz.control.Method()
dsz.control.echo.Off()
if (dsz.script.Env['script_parent_echo_disabled'].lower() != 'false'):
dsz.control.quiet.On()
if (len(sys.argv) != 3):
dsz.ui.Echo(('Invalid number of arguments supplied. Expected 3 (including program name), received %d.' % len(sys.argv)))
print ('For debugging purposes:\n%s' % sys.argv)
sys.exit((-1))
database_file = sys.argv[1]
sql_statement = sys.argv[2]
dsz.script.data.Start('sqlstatementinfo')
dsz.script.data.Add('database_file', database_file, dsz.TYPE_STRING)
dsz.script.data.Add('sql_statement', sql_statement, dsz.TYPE_STRING)
dsz.script.data.Store()
db = sqlite3.connect(database_file)
c = db.cursor()
rows = c.execute(sql_statement).fetchall()
if (len(rows) > 0):
for r in rows:
dsz.script.data.Start('row')
d = 0
for c in r:
dsz.script.data.Add(('column%d' % d), str(c), dsz.TYPE_STRING)
d += 1
dsz.script.data.Store()
| 2.546875
| 3
|
wu_current_json.py
|
johnny22/Weather_app
| 0
|
12780334
|
import requests
import pickle
import json
def make_call(location):
apikey = '<KEY>'
URL = 'https://api.weather.com/v2/pws/observations/current?apiKey={0}&stationId={1}&numericPrecision=decimal&format=json&units=e'.format(apikey, location)
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1) AppleWebKit/605.1.fifteen (KHTML, like Gecko) Version/14.0.2 Safari/605.1.fifteen",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-US,en;q=0.5",
#"referrer": "https://www.wunderground.com/calendar/us/wa/carnation",
"method": "GET",
"mode": "cors"
}
r = requests.get(URL, headers=headers)
if r.status_code == 200:
response_data = json.loads(r.content)
return response_data
else:
print ("Ignored : " + str(location))
return None
class WuData():
def __init__(self, response_in, location):
"""this takes json in """
self.out_dict = {}
self.out_dict['location'] = '"' + location + '"'
data = response_in['observations'][0]['imperial']
self.current_temp = data['temp']
self.out_dict['current_temp'] = data['temp']
self.out_dict['current_pressure'] = data['pressure']
self.out_dict['today_precip'] = data['precipTotal']
self.out_dict['current_humidity'] = response_in['observations'][0]['humidity']
self.out_dict['wind_speed'] = data['windSpeed']
self.out_dict['wind_direction'] = response_in['observations'][0]['winddir']
self.out_dict['wind_gust'] = data['windGust']
self.out_dict['wind_chill'] = data['windChill']
self.out_dict['dew_point'] = data['dewpt']
self.current_pressure = data['pressure']
self.today_precip = data['precipTotal']
self.humidity = response_in['observations'][0]['humidity']
self.wind_speed = data['windSpeed']
self.wind_direction = response_in['observations'][0]['winddir']
self.wind_gust = data['windGust']
self.wind_chill = data['windChill']
self.dew_point = data['dewpt']
if __name__ == "__main__":
location_list = ['KWACARNA1', 'KWAFALLC80']
page_list = []
for location in location_list:
page = make_call(location)
page_list.append((page, location))
for tup in page_list:
conditions = WuData(tup[0], tup[1])
print (conditions.out_dict)
| 3.328125
| 3
|
photos/views.py
|
octaviodive/social_network_project
| 0
|
12780335
|
<filename>photos/views.py
from django.shortcuts import render, redirect
from .models import Category, Photo
from django.contrib.auth.decorators import login_required
@login_required(login_url='login')
def gallery(request):
user = request.user
category = request.GET.get('category')
if category == None:
photos = Photo.objects.filter(category__user=user)
else:
photos = Photo.objects.filter(
category__name=category, category__user=user)
categories = Category.objects.filter(user=user)
context = {'categories': categories, 'photos': photos}
return render(request, 'photos/home.html', context)
@login_required(login_url='login')
def view_photo(request, pk):
photo = Photo.objects.get(id=pk)
return render(request, 'photos/photo.html', {'photo': photo})
@login_required(login_url='login')
def add_photo(request):
user = request.user
categories = user.category_set.all()
if request.method == 'POST':
data = request.POST
images = request.FILES.getlist('images')
if data['category'] != 'none':
category = Category.objects.get(id=data['category'])
elif data['category_new'] != '':
category, created = Category.objects.get_or_create(
user=user,
name=data['category_new'])
else:
category = None
for image in images:
photo = Photo.objects.create(
category=category,
description=data['description'],
image=image,
)
return redirect('home')
context = {'categories': categories}
return render(request, 'photos/add.html', context)
| 2.203125
| 2
|
tools/check_extract_queue.py
|
itpir/geo-hpc
| 3
|
12780336
|
"""
check if any items that are ready for processing exist in extract queue
ready for processing = status set to 0
extract queue = mongodb db/collection: asdf->extracts
"""
# ----------------------------------------------------------------------------
import sys
import os
branch = sys.argv[1]
utils_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'utils')
sys.path.insert(0, utils_dir)
from config_utility import BranchConfig
config = BranchConfig(branch=branch)
config.test_connection()
# ----------------------------------------------------------------------------
# check mongodb connection
if config.connection_status != 0:
print("error")
# sys.exit("connection status error: " + str(config.connection_error))
# ----------------------------------------------------------------------------
job_type = sys.argv[2]
import pymongo
client = pymongo.MongoClient(config.database)
c_extracts = client.asdf.extracts
if job_type == "det":
request_count = c_extracts.find({'status': {'$in': [0, -1, 2]}, '$or': [{'attempts': {'$exists': False}}, {'attempts': {'$lt': 5}}], 'priority': {'$gt': -1}}).count()
elif job_type == "default":
request_count = c_extracts.find({'status': {'$in': [0, -1, 2]}, '$or': [{'attempts': {'$exists': False}}, {'attempts': {'$lt': 5}}]}).count()
elif job_type == "raster":
request_count = c_extracts.find({'status': {'$in': [0, -1, 2]}, '$or': [{'attempts': {'$exists': False}}, {'attempts': {'$lt': 5}}], 'classification': 'raster'}).count()
elif job_type == "msr":
request_count = c_extracts.find({'status': {'$in': [0, -1, 2]}, '$or': [{'attempts': {'$exists': False}}, {'attempts': {'$lt': 5}}], 'classification': 'msr'}).count()
elif "errors" in job_type:
request_count = c_extracts.find({'status': {'$in': [0, -1, 2]}, '$and': [{'attempts': {'$gte': 5}}, {'attempts': {'$lt': 20}}]}).count()
else:
request_count = "invalid"
if request_count == "invalid":
print("invalid")
elif request_count > 0:
print("ready")
else:
print("empty")
| 2.578125
| 3
|
lib/jsonModule.py
|
mrotke/pyStock
| 0
|
12780337
|
<filename>lib/jsonModule.py<gh_stars>0
#!/usr/bin/python3
'''
Created on 3 sty 2020
@author: spasz
'''
import json
import os
def jsonRead(filename):
data = []
if os.path.isfile(filename):
with open(filename, 'r') as f:
data = json.load(f)
else:
print('(JsonModule) File not exists!')
return data
def jsonWrite(filename, data):
with open(filename, 'w') as f:
json.dump(data, f, indent=4, sort_keys=True)
f.close()
print('Written %s.\n' % (filename))
def jsonShow(data):
for entry in data:
print(entry)
| 3.078125
| 3
|
code/utils/vad_util.py
|
zacharyclam/speaker_recognition
| 37
|
12780338
|
#!/usr/env/python python3
# -*- coding: utf-8 -*-
# @File : vad_util.py
# @Time : 2018/8/29 13:37
# @Software : PyCharm
import numpy as np
from math import log
import librosa
def mse(data):
return ((data ** 2).mean()) ** 0.5
def dBFS(data):
mse_data = mse(data)
if mse_data == 0.0:
return 0
max_possible_val = 2 ** 16 / 2
return 20 * log(mse_data / max_possible_val, 10)
def cut_wav(data, per_f=150):
num_f = int(len(data) / per_f)
data = data[:num_f * per_f]
data = data.reshape((num_f, per_f))
return data
def remove_silence(source_sound, common_sound, silence_threshold=140, chunk_size=148):
source_sounds = cut_wav(source_sound, chunk_size)
common_sounds = cut_wav(common_sound, chunk_size)
y = []
for i in range(common_sounds.shape[0]):
db = -dBFS(common_sounds[i, ...])
if db < silence_threshold:
y.append(source_sounds[i])
# print("db", i, db)
y = np.array(y)
y = y.flatten()
return y
def comman(sound):
abs_sound = np.abs(sound)
return sound / np.max(abs_sound)
if __name__ == '__main__':
wav_data, rate = librosa.load("BAC009S0908W0161.wav", sr=16000)
y = remove_silence(wav_data, wav_data, 139, 300)
librosa.output.write_wav("c.wav", y, sr=16000)
| 2.5625
| 3
|
src/translate.py
|
SeungoneKim/Transformer_implementation
| 0
|
12780339
|
import os
import sys
import argparse
import logging
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from data.tokenizer import Tokenizer
from util.utils import load_bestmodel
def translate(args, src_sentence, generated_max_length=100):
# prepare best model
best_model_path = os.path.join(args.final_model_path,"bestmodel.pth")
try:
best_model, _= load_bestmodel(best_model_path)
best_model.eval()
except OSError:
logging.info("Check if there is bestmodel.pth file in final_results folder")
# prepare device
device = args.device
# prepare tokenizer for both enc, dec
enc_tokenizer = Tokenizer(args.enc_language,args.enc_max_len)
dec_tokenizer = Tokenizer(args.dec_language,args.dec_max_len)
# prepare vocabulary for both enc, dec
enc_vocabulary = enc_tokenizer.get_vocab()
dec_vocabulary = dec_tokenizer.get_vocab()
# convert src_sentence, and measure the length of the src_sentence
src_sentence = src_sentence.lower() # delete if you do not need cased
src_sentence_length = len(src_sentence)
logging.info(f"The original {args.enc_language} sentence you provided was : ")
logging.info(src_sentence)
# encode the given src_sentence with enc_tokenizer
src_tensor = enc_tokenizer.encode(src_sentence).input_ids # [bs, sl]
enc_mask = best_model.generate_padding_mask(src_tensor, src_tensor, "src", "src")
logging.info(f"The {args.enc_language} Tokenizer converted sentence such as : ")
logging.info(src_tensor)
# prepare the pred_sentence
pred_tensor=[dec_tokenizer.bos_token] # now : [1] -> goal : [generated_max_length]
# translate the given sentence into target language
with torch.no_grad():
# pass through encoder
encoder_output = best_model.Encoder(encoded_src_sentence, enc_mask) # [bs, sl, hs]
for idx in range(generated_max_length):
tgt_tensor = torch.LongTensor(pred_tensor).to(device)
enc_dec_mask = best_model.geneate_padding_mask(tgt_tensor, enc_tensor, "src", "tgt")
dec_mask = best_model.generate_padding_mask(tgt_tensor, tgt_tensor, "tgt", "tgt")
# pass through decoder
decoder_output = best_model.Decoder(tgt_tensor, encoder_output, enc_dec_mask, dec_mask) # [bs, sl, hs]
# append predicted_token into pred_tensor
predicted_token = output.argmax(dim=2)[:,-1].item()
pred_tensor.append(predicted_token)
# ENDING CONDITION : facing eos token
if predicted_token == dec_vocabulary.eos_token :
break
# decode with dec_tokenizer
translated_result = dec_tokenizer.decode(pred_tensor)
translated_result = translated_result[0]
# convert tensor into string
translated_sentence = ""
for tokens in translated_result:
translated_sentence += tokens
if tokens !='.':
translated_sentence += " "
return translated_sentence
| 2.296875
| 2
|
demo/visdrone_demo.py
|
kding1225/TDTS-visdrone
| 10
|
12780340
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2, os
from fcos_core.config import cfg
from predictor import VisDroneDemo
import time
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="configs/visdrone_tdts/tdts_R_50_FPN_1x_640x1024_visdrone_cn_mw1.5-nms0.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--weights",
default="models/tdts_R_50_FPN_1x_640x1024_visdrone_cn_mw1.5-nms0.pth",
metavar="FILE",
help="path to the trained model",
)
parser.add_argument(
"--images-dir",
default="demo/images",
metavar="DIR",
help="path to demo images directory",
)
parser.add_argument(
"--results-dir",
default="demo/results",
metavar="DIR",
help="path to demo images directory",
)
parser.add_argument(
"--min-image-size",
type=int,
default=640, # 800
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.MODEL.WEIGHT = args.weights
cfg.freeze()
# The following per-class thresholds are computed by maximizing
# per-class f-measure in their precision-recall curve.
# Please see compute_thresholds_for_classes() in coco_eval.py for details.
thresholds_for_classes = [1.0, 0.4543384611606598, 0.4528161883354187, 0.4456373155117035,
0.4930519461631775, 0.49669983983039856, 0.4916415810585022,
0.43324407935142517, 0.4070464074611664, 0.49178892374038696,
0.43258824944496155, 1.0]
demo_im_names = os.listdir(args.images_dir)
demo_im_names.sort()
print('{} images to test'.format(len(demo_im_names)))
# prepare object that handles inference plus adds predictions on top of image
demo = VisDroneDemo(
cfg,
confidence_thresholds_for_classes=thresholds_for_classes,
min_image_size=args.min_image_size
)
if args.results_dir:
if not os.path.exists(args.results_dir):
os.mkdir(args.results_dir)
# plt
for i, im_name in enumerate(demo_im_names):
img = cv2.imread(os.path.join(args.images_dir, im_name))
if img is None:
continue
start_time = time.time()
demo.run_det_on_opencv_image_plt(img, os.path.join(args.results_dir, im_name))
print("{}, {}\tinference time: {:.2f}s".format(i, im_name, time.time() - start_time))
print("Done!")
else:
for im_name in demo_im_names:
img = cv2.imread(os.path.join(args.images_dir, im_name))
if img is None:
continue
start_time = time.time()
composite = demo.run_on_opencv_image(img)
print("{}\tinference time: {:.2f}s".format(im_name, time.time() - start_time))
cv2.imwrite(os.path.join('result', im_name), composite)
# cv2.imshow(im_name, composite)
print("Press any keys to exit ...")
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 2.296875
| 2
|
rpython/translator/goal/targetgcbench.py
|
kantai/passe-pypy-taint-tracking
| 2
|
12780341
|
import os, sys
from rpython.translator.goal import gcbench
# _____ Define and setup target ___
def target(*args):
gcbench.ENABLE_THREADS = False # not RPython
return gcbench.entry_point, None
"""
Why is this a stand-alone target?
The above target specifies None as the argument types list.
This is a case treated specially in the driver.py . If the list
of input types is empty, it is meant to be a list of strings,
actually implementing argv of the executable.
"""
| 2.25
| 2
|
migrations/versions/1c581a07c81f_.py
|
AbhishekPednekar84/personal-portfolio
| 2
|
12780342
|
<gh_stars>1-10
"""empty message
Revision ID: 1c581a07c81f
Revises:
Create Date: 2019-11-25 14:58:22.016437
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "1<PASSWORD>"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"blog",
sa.Column("description_token", postgresql.TSVECTOR(), nullable=True),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("blog", "description_token")
# ### end Alembic commands ###
| 1.257813
| 1
|
motorcycles.py
|
yiyidhuang/PythonCrashCrouse2nd
| 0
|
12780343
|
<filename>motorcycles.py
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
motorcycles[0] = 'ducati'
print(motorcycles)
motorcycles.append('duati')
print(motorcycles)
motorcycles = []
motorcycles.append('honda')
motorcycles.append('yamaha')
motorcycles.append('suzuki')
print(motorcycles)
# Insert
print('==========Insert elements')
motorcycles = ['honda', 'yamaha', 'suzuki']
motorcycles.insert(0, 'ducati')
print(motorcycles)
# Delete
print("==========Delete elements")
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
del motorcycles[0]
print(motorcycles)
# Pop
print('==========Pop elements')
motorcycles = ['honda', 'yamaha', 'suzuki']
print(motorcycles)
popped_motorcycle = motorcycles.pop()
print(motorcycles)
print(popped_motorcycle)
motorcycles = ['honda', 'yamaha', 'suzuki']
last_owned = motorcycles.pop()
print(f"The last motorcycles I owned was a {last_owned.title()}.")
motorcycles = ['honda', 'yamaha', 'suzuki']
first_owned = motorcycles.pop(0)
print(f"The first motorcycle I owned was a {first_owned.title()}.")
# Remove
motorcycles = ['honda', 'yamaha', 'ducati']
print(motorcycles)
motorcycles.remove('ducati')
print(motorcycles)
motorcycles = ['honda', 'yamaha', 'suzuki', 'ducati']
print(motorcycles)
too_expensive = 'ducati'
motorcycles.remove(too_expensive)
print(motorcycles)
print(f"\nA {too_expensive.title()} is too expensive for me.")
# Error
#motorcycles = ['honda', 'yamaha', 'suzuki']
#print(motorcycles[3])
| 3.453125
| 3
|
sequeval/profiler.py
|
D2KLab/sequeval
| 10
|
12780344
|
<filename>sequeval/profiler.py
class Profiler:
def __init__(self, sequences):
users = []
items = []
ratings = []
for sequence in sequences:
for rating in sequence:
users.append(rating[1])
items.append(rating[0])
ratings.append(rating)
self._sequences = sequences
self._users = list(set(users))
self._items = list(set(items))
self._ratings = ratings
def users(self):
"""
:return: The number of unique users.
"""
return len(self._users)
def items(self):
"""
:return: The number of unique items.
"""
return len(self._items)
def ratings(self):
"""
:return: The number of ratings.
"""
return len(self._ratings)
def sequences(self):
"""
:return: The number of sequences.
"""
return len(self._sequences)
def sparsity(self):
"""
Compute the sequence-item sparsity, that is the number of ratings
divided by the number of sequences times the number of items.
:return: The sequence-item sparsity.
"""
return self.ratings() / (self.sequences() * self.items())
def sequence_length(self):
"""
:return: The average length of a sequence.
"""
return self.ratings() / self.sequences()
| 3.328125
| 3
|
gym/envs/box2d/find_unconstructed_roads.py
|
Riya5915/gym
| 46
|
12780345
|
import os
import shutil
from pdb import set_trace
from gym.envs.box2d.car_racing import CarRacing
import numpy as np
import pandas as pd
def find_roads():
path = './touching_tracks_tests'
# Check if dir exists TODO
if os.path.isdir(path):
# Remove files TODO
shutil.rmtree(path)
# Create dir TODO
os.mkdir(path)
env = CarRacing(
allow_reverse=False,
show_info_panel=False,
num_tracks=2,
num_lanes=2,
num_lanes_changes=0,
num_obstacles=100,
random_obstacle_x_position=False,
random_obstacle_shape=False,)
env.change_zoom()
for j in range(100):
env.reset()
for i in range(len(env.tracks[0])):
prev_tile = env.tracks[0][i-2]
curr_tile = env.tracks[0][i-1]
next_tile = env.tracks[0][i]
if any(curr_tile[0] != prev_tile[1]):
set_trace()
elif any(curr_tile[1] != next_tile[0]):
set_trace()
env.screenshot(path,name=str(j),quality='high')
np.save(path + "/info_" + str(j) + ".csv", env.info)
np.save(path + "/track0_" + str(j) + ".csv", env.tracks[0])
np.save(path + "/track1_" + str(j) + ".csv", env.tracks[1])
if __name__ == '__main__':
find_roads()
| 2.6875
| 3
|
python/isbn-verifier/isbn_verifier.py
|
ajayat/exercism.io
| 1
|
12780346
|
<reponame>ajayat/exercism.io
import re
def is_valid(isbn: str) -> bool:
if match := re.match(r"^(\d{9}[0-9X])$", isbn.replace('-', '')):
isbn = list(match.group(1))
if isbn[-1] == 'X':
isbn[-1] = 10
if sum(int(n)*(10-i) for i, n in enumerate(isbn)) % 11 == 0:
return True
return False
| 3.453125
| 3
|
order/migrations/0008_order_stripe_payment_id.py
|
abdellatifLabr/MyStore
| 0
|
12780347
|
<reponame>abdellatifLabr/MyStore<filename>order/migrations/0008_order_stripe_payment_id.py
# Generated by Django 3.1 on 2020-08-21 13:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0007_auto_20200818_1555'),
]
operations = [
migrations.AddField(
model_name='order',
name='stripe_payment_id',
field=models.CharField(default='', max_length=32),
preserve_default=False,
),
]
| 1.320313
| 1
|
tui/textwin.py
|
Chiel92/tfate
| 3
|
12780348
|
"Module containing TextWin class."""
from .window import Window, EndOfWin
from logging import info
class TextWin(Window):
"""Window containing the text"""
def __init__(self, ui):
Window.__init__(self, ui)
self.offset = 0
def draw_empty_interval(self):
try:
self.draw_string('ε', self.create_attribute(reverse=True), silent=False)
except UnicodeEncodeError:
self.draw_string('|', self.create_attribute(reverse=True), silent=False)
def draw(self):
"""Draw the visible text in the text window."""
textview = self.doc.view.text
length = len(textview)
highlightingview = self.doc.view.highlighting
selectionview = self.doc.view.selection
# Find the places of all empty selected intervals
empty_interval_positions = [beg for beg, end in selectionview if end - beg == 0]
try:
for pos, char in enumerate(textview):
# Draw possible empty selected interval at position
if empty_interval_positions and empty_interval_positions[0] == pos:
self.draw_empty_interval()
del empty_interval_positions[0]
# Apply color attribute if char is labeled
alt_background = False
if highlightingview[pos] == 'error':
alt_background = True
elif highlightingview[pos] == 'warning':
alt_background = True
color = 0
for i, label in enumerate(['string', 'number', 'keyword', 'comment']):
if highlightingview[pos] == label:
color = 11 + i
# Apply reverse attribute when char is selected
reverse = False
if selectionview.contains(pos):
reverse = True
# display newline character explicitly when selected
if char == '\n':
# char = '↵\n'
char = ' \n'
#drawchar = ' \n'
attribute = self.create_attribute(reverse=reverse, color=color,
highlight=False, alt_background=alt_background)
self.draw_string(char, attribute, silent=False)
# If we come here, the entire textview fits on the screen
# Draw possible remaining empty interval
if empty_interval_positions:
self.draw_empty_interval()
# Draw EOF character
self.draw_line('EOF', self.create_attribute(bold=True), silent=False)
except EndOfWin:
pass
| 2.890625
| 3
|
envs/act_game.py
|
rradules/opponent_modelling_monfg
| 1
|
12780349
|
<reponame>rradules/opponent_modelling_monfg
"""
(Im)balancing Act Game environment.
"""
import gym
import numpy as np
class ActGame(gym.Env):
"""
A two-agent vectorized multi-objective environment.
Possible actions for each agent are (L)eft, (M)iddle and (R)ight.
"""
NUM_AGENTS = 2
NUM_OBJECTIVES = 2
def __init__(self, max_steps, batch_size, payout_mat):
self.max_steps = max_steps
self.batch_size = batch_size
self.NUM_ACTIONS = len(payout_mat[0])
# s_0 + all action combinations
self.NUM_STATES = self.NUM_ACTIONS ** 2 + 1
self.payout_mat = payout_mat
self.states = np.reshape(np.array(range(self.NUM_ACTIONS ** 2)) + 1,
(self.NUM_ACTIONS, self.NUM_ACTIONS))
self.available_actions = [
np.ones((batch_size, self.NUM_ACTIONS), dtype=int)
for _ in range(self.NUM_AGENTS)
]
self.step_count = None
def reset(self):
self.step_count = 0
init_state = np.zeros(self.batch_size)
observation = [init_state, init_state]
info = [{'available_actions': aa} for aa in self.available_actions]
return observation, info
def step(self, action):
ac0, ac1 = action
self.step_count += 1
r = np.array([el[ac0, ac1] for el in self.payout_mat])
s0 = self.states[ac0, ac1]
s1 = self.states[ac1, ac0]
observation = [s0, s1]
reward = [r, r]
done = (self.step_count == self.max_steps)
info = [{'available_actions': aa} for aa in self.available_actions]
return observation, reward, done, info
def render(self, mode='human'):
pass
| 2.71875
| 3
|
src/lib/test_environment/abstract_spawn_test_environment.py
|
tkilias/integration-test-docker-environment
| 0
|
12780350
|
import luigi
from ...abstract_method_exception import AbstractMethodException
from ...lib.test_environment.populate_data import PopulateEngineSmallTestDataToDatabase
from ...lib.test_environment.upload_exa_jdbc import UploadExaJDBC
from ...lib.test_environment.upload_virtual_schema_jdbc_adapter import UploadVirtualSchemaJDBCAdapter
from ...lib.base.dependency_logger_base_task import DependencyLoggerBaseTask
from ...lib.data.container_info import ContainerInfo
from ...lib.data.database_credentials import DatabaseCredentialsParameter
from ...lib.data.database_info import DatabaseInfo
from ...lib.data.docker_network_info import DockerNetworkInfo
from ...lib.data.environment_info import EnvironmentInfo
from ...lib.test_environment.general_spawn_test_environment_parameter import \
GeneralSpawnTestEnvironmentParameter
from ...lib.test_environment.spawn_test_container import SpawnTestContainer
DATABASE = "database"
TEST_CONTAINER = "test_container"
class AbstractSpawnTestEnvironment(DependencyLoggerBaseTask,
GeneralSpawnTestEnvironmentParameter,
DatabaseCredentialsParameter):
environment_name = luigi.Parameter()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_container_name = f"""test_container_{self.environment_name}"""
self.network_name = f"""db_network_{self.environment_name}"""
def get_environment_type(self):
raise AbstractMethodException()
def run_task(self):
test_environment_info = yield from self._attempt_database_start()
yield from self._setup_test_database(test_environment_info)
self.return_object(test_environment_info)
def _attempt_database_start(self):
is_database_ready = False
attempt = 0
database_info = None
test_container_info = None
while not is_database_ready and attempt < self.max_start_attempts:
network_info, database_info, is_database_ready, test_container_info = \
yield from self._start_database(attempt)
attempt += 1
if not is_database_ready and not attempt < self.max_start_attempts:
raise Exception(f"Maximum attempts {attempt} to start the database reached.")
test_environment_info = \
EnvironmentInfo(name=self.environment_name,
env_type=self.get_environment_type(),
database_info=database_info,
test_container_info=test_container_info,
network_info=network_info)
return test_environment_info
def _start_database(self, attempt):
network_info = yield from self._create_network(attempt)
database_info, test_container_info = \
yield from self._spawn_database_and_test_container(network_info, attempt)
is_database_ready = yield from self._wait_for_database(
database_info, test_container_info, attempt)
return network_info, database_info, is_database_ready, test_container_info
def _create_network(self, attempt):
network_info_future = yield from self.run_dependencies(self.create_network_task(attempt))
network_info = self.get_values_from_future(network_info_future)
return network_info
def create_network_task(self, attempt: int):
raise AbstractMethodException()
def _spawn_database_and_test_container(self,
network_info: DockerNetworkInfo,
attempt: int):
database_and_test_container_info_future = \
yield from self.run_dependencies({
TEST_CONTAINER: SpawnTestContainer(
environment_name=self.environment_name,
test_container_name=self.test_container_name,
network_info=network_info,
ip_address_index_in_subnet=1,
reuse_test_container=self.reuse_test_container,
no_test_container_cleanup_after_end=self.no_test_container_cleanup_after_end,
attempt=attempt),
DATABASE: self.create_spawn_database_task(network_info, attempt)
})
database_and_test_container_info = \
self.get_values_from_futures(database_and_test_container_info_future)
test_container_info = database_and_test_container_info[TEST_CONTAINER]
database_info = database_and_test_container_info[DATABASE]
return database_info, test_container_info
def create_spawn_database_task(self,
network_info: DockerNetworkInfo,
attempt: int):
raise AbstractMethodException()
def _wait_for_database(self,
database_info: DatabaseInfo,
test_container_info: ContainerInfo,
attempt: int):
database_ready_target_future = \
yield from self.run_dependencies(
self.create_wait_for_database_task(
attempt, database_info, test_container_info))
is_database_ready = self.get_values_from_futures(database_ready_target_future)
return is_database_ready
def create_wait_for_database_task(self,
attempt: int,
database_info: DatabaseInfo,
test_container_info: ContainerInfo):
raise AbstractMethodException()
def _setup_test_database(self, test_environment_info: EnvironmentInfo):
# TODO check if database is setup
self.logger.info("Setup database")
upload_tasks = [
self.create_child_task_with_common_params(
UploadExaJDBC,
test_environment_info=test_environment_info,
reuse_uploaded=self.reuse_database_setup),
self.create_child_task_with_common_params(
UploadVirtualSchemaJDBCAdapter,
test_environment_info=test_environment_info,
reuse_uploaded=self.reuse_database_setup),
self.create_child_task_with_common_params(
PopulateEngineSmallTestDataToDatabase,
test_environment_info=test_environment_info,
reuse_data=self.reuse_database_setup
)]
yield from self.run_dependencies(upload_tasks)
| 2
| 2
|