hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e2c2694c299d4cbc5539c1ed1698c657e667dc8d
| 1,504
|
py
|
Python
|
tests/pages/inputs/date_selector_page.py
|
rescbr/aws-device-farm-appium-python-tests-for-ios-sample-app
|
e006bfc830fa2dc27fe5ba630b662cd022a837c4
|
[
"Apache-2.0"
] | 9
|
2018-02-18T23:24:43.000Z
|
2022-03-24T09:39:45.000Z
|
tests/pages/inputs/date_selector_page.py
|
rescbr/aws-device-farm-appium-python-tests-for-ios-sample-app
|
e006bfc830fa2dc27fe5ba630b662cd022a837c4
|
[
"Apache-2.0"
] | null | null | null |
tests/pages/inputs/date_selector_page.py
|
rescbr/aws-device-farm-appium-python-tests-for-ios-sample-app
|
e006bfc830fa2dc27fe5ba630b662cd022a837c4
|
[
"Apache-2.0"
] | 6
|
2017-06-12T17:59:46.000Z
|
2017-11-07T14:25:11.000Z
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from tests.pages.base_pages.base_page import BasePage
class DateSelectorPage(BasePage):
MONTH_INDEX = 0
DAY_INDEX = 1
YEAR_INDEX = 2
"""Date selector page representation."""
def set_date(self, month, day, year):
"""Sets month, day, year, picker wheels.
month -- the month as a string
day -- the day as a string
year -- the year as a string
"""
wheels = self.driver.find_elements_by_class_name(self.PICKER_WHEEL_CLASS)
month_wheel = wheels[MONTH_INDEX]
day_wheel = wheels[DAY_INDEX]
year_wheel = wheels[YEAR_INDEX]
month_wheel.send_keys(month)
day_wheel.send_keys(day)
year_wheel.send_keys(year)
def get_selected_date(self):
"""Retrieves selected date as a string (Month is truncated to three letters)."""
date = self.driver.find_element_by_class_name(self.STATIC_TEXT_CLASS)
return date.text
| 34.181818
| 88
| 0.698138
|
8bd448c5b4b436def859016e7201e8a689b0abd2
| 2,415
|
py
|
Python
|
ci/deploy/deploy_util.py
|
sungchun12/materialize
|
7282293960368019d583163b2f06545b1379c0a1
|
[
"MIT"
] | 1
|
2021-09-05T03:07:46.000Z
|
2021-09-05T03:07:46.000Z
|
ci/deploy/deploy_util.py
|
sungchun12/materialize
|
7282293960368019d583163b2f06545b1379c0a1
|
[
"MIT"
] | 101
|
2020-04-21T05:44:38.000Z
|
2022-03-27T22:22:56.000Z
|
ci/deploy/deploy_util.py
|
mjibson/materialize
|
09309bcdc3719642b45b3d1e214e5b5427d3a4c6
|
[
"MIT"
] | null | null | null |
# Copyright Materialize, Inc. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from materialize import git
from materialize import spawn
from pathlib import Path
import humanize
import os
import tarfile
import tempfile
import time
def _tardir(name: str) -> tarfile.TarInfo:
d = tarfile.TarInfo(name)
d.mtime = int(time.time())
d.mode = 0o40755
d.type = tarfile.DIRTYPE
return d
def _sanitize_tarinfo(tarinfo: tarfile.TarInfo) -> tarfile.TarInfo:
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = "root"
return tarinfo
def upload_tarball(tarball: Path, platform: str, version: str) -> None:
s3_url = f"s3://downloads.mtrlz.dev/materialized-{version}-{platform}.tar.gz"
spawn.runv(["aws", "s3", "cp", "--acl=public-read", tarball, s3_url])
def set_latest_redirect(platform: str, version: str) -> None:
with tempfile.NamedTemporaryFile() as empty:
target = f"/materialized-{version}-{platform}.tar.gz"
s3_url = f"s3://downloads.mtrlz.dev/materialized-latest-{platform}.tar.gz"
spawn.runv(
[
"aws",
"s3",
"cp",
"--acl=public-read",
f"--website-redirect={target}",
empty.name,
s3_url,
]
)
def deploy_tarball(platform: str, materialized: Path) -> None:
tar_path = Path("materialized.tar.gz")
with tarfile.open(str(tar_path), "x:gz") as f:
f.addfile(_tardir("materialized"))
f.addfile(_tardir("materialized/bin"))
f.add(
str(materialized),
arcname="materialized/bin/materialized",
filter=_sanitize_tarinfo,
)
f.addfile(_tardir("materialized/etc/materialized"))
size = humanize.naturalsize(os.lstat(tar_path).st_size)
print(f"Tarball size: {size}")
if os.environ["BUILDKITE_TAG"]:
upload_tarball(tar_path, platform, os.environ["BUILDKITE_TAG"])
else:
commit_sha = git.rev_parse("HEAD")
upload_tarball(tar_path, platform, commit_sha)
set_latest_redirect(platform, commit_sha)
| 31.363636
| 82
| 0.645963
|
b1a109f382367e4c6f0f29a1bfffe9988f685dce
| 87
|
py
|
Python
|
website/home/models/__init__.py
|
SebastiaanZ/minigigscyclingteam
|
6c8c4f7ae41a5b01a551c592dc81fd37fd4f686e
|
[
"MIT"
] | null | null | null |
website/home/models/__init__.py
|
SebastiaanZ/minigigscyclingteam
|
6c8c4f7ae41a5b01a551c592dc81fd37fd4f686e
|
[
"MIT"
] | 9
|
2020-01-25T12:24:43.000Z
|
2022-03-12T00:18:38.000Z
|
website/home/models/__init__.py
|
SebastiaanZ/minigigscyclingteam
|
6c8c4f7ae41a5b01a551c592dc81fd37fd4f686e
|
[
"MIT"
] | null | null | null |
"""Home app models module."""
from .article import Article
__all__ = [
Article,
]
| 12.428571
| 29
| 0.655172
|
4f4a5de51c1fd94365daefb1f9e45dc9ba54a2b2
| 1,465
|
py
|
Python
|
padog/__init__.py
|
explore2019-hub/py-apple-quadruped-robot
|
52061e0772e53ae4c3f9c59bd65f85c703278f14
|
[
"Apache-2.0"
] | null | null | null |
padog/__init__.py
|
explore2019-hub/py-apple-quadruped-robot
|
52061e0772e53ae4c3f9c59bd65f85c703278f14
|
[
"Apache-2.0"
] | null | null | null |
padog/__init__.py
|
explore2019-hub/py-apple-quadruped-robot
|
52061e0772e53ae4c3f9c59bd65f85c703278f14
|
[
"Apache-2.0"
] | null | null | null |
'''
Copyright Deng (ream_d@yeah.net) Py-apple dog project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from math import *
#from machine import I2C
#from pyb import UART
#from pyb import Pin
import time
#import _thread
#==============版本信息==================#
version="V1.1 BETA 1 2020506"
#打印版本信息
print("=============================")
print("PY-APPLE DOG TEST VER "+version)
print("=============================")
print("作者:灯哥 ream_d@yeah.net 开源协议:Apache License")
print("=========实现功能=========")
print("1、踏步 2、高度调节 3、小跑基础版(向前、向后)4、分别调节各腿高度")
print("=========实现功能=========")
print("加载程序...")
Init_File_List=[".//padog//config.py",".//padog//user//foot_init.py",".//padog//execute//servo.py",".//padog//execute//position_control.py",".//padog//gait//trot.py",".//padog//gait//trans.py",".//padog//gait//jump.py",".//padog//gait//gesture_control.py"]
#调试使用
for i in Init_File_List:
exec(open(i).read())
print(i)
print("程序加载完成...")
#预先执行函数
caculate()
servo_output()
| 30.520833
| 256
| 0.659386
|
c16404f6961b888c0766d9ddeee9bafa846c6975
| 5,714
|
py
|
Python
|
francoralite/apps/francoralite_api/tests/test_collection_location.py
|
Francoralite/francoralite
|
f8c5eeffe6d395c7e4222a9f5a4a7a01841b503c
|
[
"BSD-3-Clause"
] | 2
|
2021-07-26T08:29:26.000Z
|
2021-07-26T08:29:27.000Z
|
francoralite/apps/francoralite_api/tests/test_collection_location.py
|
lluc/telemeta-integration
|
c2fb116471235674eae597abac84a7113e0f7c82
|
[
"BSD-3-Clause"
] | 167
|
2018-10-20T14:34:46.000Z
|
2021-06-01T10:40:55.000Z
|
francoralite/apps/francoralite_api/tests/test_collection_location.py
|
Francoralite/francoralite
|
f8c5eeffe6d395c7e4222a9f5a4a7a01841b503c
|
[
"BSD-3-Clause"
] | 1
|
2021-06-06T12:16:49.000Z
|
2021-06-06T12:16:49.000Z
|
# -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luc LEGER / Coopérative ARTEFACTS <artefacts.lle@gmail.com>
"""
CollectionLocation tests
"""
import factory
import pytest
import sys
import random
from django.core.management import call_command
from django.urls import reverse
from parameterized import parameterized
from rest_framework import status
from rest_framework.test import APITestCase
from .factories.collection_location import CollectionLocationFactory
from ..models.collection_location import CollectionLocation
from ..models.location import Location
from ..models.collection import Collection
from .keycloak import get_token
# Expected structure for Collection_location objects
COLLECTIONLOCATION_STRUCTURE = [
('id', int),
('collection', dict),
('location', dict),
]
# Expected keys for MODEL objects
COLLECTIONLOCATION_FIELDS = sorted(
[item[0] for item in COLLECTIONLOCATION_STRUCTURE])
@pytest.mark.django_db
class TestCollectionLocationList(APITestCase):
"""
This class manage all CollectionLocation tests
"""
def setUp(self):
"""
Run needed commands to have a fully working project
"""
get_token(self)
# Create a set of sample data
CollectionLocationFactory.create_batch(6)
def test_can_get_collection_location_list(self):
"""
Ensure CollectionLocation objects exists
"""
url = reverse('collectionlocation-list', kwargs={
'collection_pk': 1})
# ORM side
collection_locations = CollectionLocation.objects.all()
self.assertEqual(len(collection_locations), 6)
# API side
response = self.client.get(url)
self.assertIsInstance(response.data, list)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
@parameterized.expand(COLLECTIONLOCATION_STRUCTURE)
def test_has_valid_collection_location_values(self,
attribute, attribute_type):
"""
Ensure CollectionLocation objects have valid values
"""
url = reverse('collectionlocation-list', kwargs={
'collection_pk': 1})
response = self.client.get(url)
self.assertIsInstance(response.data, list)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for collection_location in response.data:
# Check only expected attributes returned
self.assertEqual(
sorted(collection_location.keys()), COLLECTIONLOCATION_FIELDS)
# Ensure type of each attribute
if attribute_type == str:
self.assertIsInstance(collection_location[attribute], str)
else:
self.assertIsInstance(
collection_location[attribute], attribute_type)
self.assertIsNot(collection_location[attribute], '')
def test_get_a_collection_location(self):
"""
Ensure we can get a CollectionLocation objects
using an existing id
"""
item = CollectionLocation.objects.first()
url = reverse('collectionlocation-detail', kwargs={
'collection_pk': item.collection.id,
'pk': item.location.id})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, dict)
def test_create_a_collection_location(self):
"""
Ensure we can create a CollectionLocation object
"""
data = factory.build(
dict,
FACTORY_CLASS=CollectionLocationFactory)
# Convert the related entity in dictionnary.
# Then they will be easily converted in JSON format.
data['location'] = 2
data['collection'] = 1
url = reverse('collectionlocation-list', kwargs={
'collection_pk': data['collection']})
response = self.client.post(url, data, format='json')
# Check only expected attributes returned
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIsInstance(response.data, dict)
self.assertEqual(
sorted(response.data.keys()),
COLLECTIONLOCATION_FIELDS)
url = reverse(
'collectionlocation-detail',
kwargs={'collection_pk': response.data['collection']['id'],
'pk': response.data['id']}
)
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, status.HTTP_200_OK)
self.assertIsInstance(response_get.data, dict)
def test_delete_a_collection_location(self):
"""
Ensure we can delete a CollectionLocation object
"""
item = CollectionLocation.objects.first()
# Delete this object
url = reverse(
'collectionlocation-detail', kwargs={
'collection_pk': item.collection.id,
'pk': item.location.id}
)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# Ensure CollectionLocation removed
url_get = reverse(
'collectionlocation-detail', kwargs={
'collection_pk': item.collection.id,
'pk': item.location.id}
)
response_get = self.client.get(url_get)
self.assertEqual(response_get.status_code, status.HTTP_404_NOT_FOUND)
| 32.282486
| 78
| 0.648407
|
77f14437f1de7c59cd16b4703afc24d0d677464a
| 8,887
|
py
|
Python
|
tests/test_types/test_array.py
|
dtcaciuc/nitrous
|
9ed63c13dbdc1b8b1880e49bbcc6fa442612fe71
|
[
"MIT"
] | 1
|
2020-11-27T04:08:49.000Z
|
2020-11-27T04:08:49.000Z
|
tests/test_types/test_array.py
|
dtcaciuc/nitrous
|
9ed63c13dbdc1b8b1880e49bbcc6fa442612fe71
|
[
"MIT"
] | null | null | null |
tests/test_types/test_array.py
|
dtcaciuc/nitrous
|
9ed63c13dbdc1b8b1880e49bbcc6fa442612fe71
|
[
"MIT"
] | null | null | null |
import unittest
import ctypes
from nitrous.module import module
from nitrous.function import function
from nitrous.types import Long
from nitrous.types.array import Array, FastSlice, Slice, Any
try:
import numpy as np
except ImportError:
np = None
class ArrayTestsBase(object):
def setUp(self):
X, Y, Z = range(3)
@function(Long, a=self.A, b=self.B)
def f(a, b):
m = 0
for i in range(a.shape[X]):
for j in range(a.shape[Y]):
for k in range(a.shape[Z]):
b[m] = a[i, j, k]
m += 1
return m
self.m = module([f])
self.addCleanup(delattr, self, "m")
def test_array(self):
A = (((ctypes.c_long * 2) * 3) * 2)
a = A(((1, 2), (3, 4), (5, 6)),
((7, 8), (9, 10), (11, 12)))
B = ctypes.c_long * 12
b = B()
m = self.m.f(a, b)
self.assertEqual(m, 12)
self.assertEqual(list(b), range(1, 13))
@unittest.skipIf(not np, "NumPy integration feature")
def test_ndarray(self):
dtype = np.dtype("i{0}".format(ctypes.sizeof(ctypes.c_long)))
a = np.array([
((1, 2), (3, 4), (5, 6)),
((7, 8), (9, 10), (11, 12))
], dtype=dtype)
b = np.empty(12, dtype=dtype)
m = self.m.f(a, b)
self.assertEqual(m, 12)
self.assertEqual(list(b), range(1, 13))
class SliceTests(ArrayTestsBase, unittest.TestCase):
A = Slice(Long, (Any,) * 3)
B = Slice(Long)
def test_repr(self):
self.assertEqual(repr(self.A), "Slice(Long, shape=(Any, Any, Any))")
self.assertEqual(repr(self.B), "Slice(Long, shape=(Any,))")
def test_str(self):
self.assertEqual(str(self.A), "<Slice [? x [? x [? x Long]]]>")
self.assertEqual(str(self.B), "<Slice [? x Long]>")
class FastSliceTests(ArrayTestsBase, unittest.TestCase):
A = FastSlice(Long, (2, 3, 2))
B = FastSlice(Long, (12,))
def test_repr(self):
self.assertEqual(repr(self.A), "FastSlice(Long, shape=(2, 3, 2))")
self.assertEqual(repr(self.B), "FastSlice(Long, shape=(12,))")
def test_str(self):
self.assertEqual(str(self.A), "<FastSlice [2 x [3 x [2 x Long]]]>")
self.assertEqual(str(self.B), "<FastSlice [12 x Long]>")
class ArrayTests(ArrayTestsBase, unittest.TestCase):
A = Array(Long, (2, 3, 2))
B = Array(Long, (12,))
def test_repr(self):
self.assertEqual(repr(self.A), "Array(Long, shape=(2, 3, 2))")
self.assertEqual(repr(self.B), "Array(Long, shape=(12,))")
def test_str(self):
self.assertEqual(str(self.A), "<Array [2 x [3 x [2 x Long]]]>")
self.assertEqual(str(self.B), "<Array [12 x Long]>")
class ArrayAllocTests(unittest.TestCase):
def test_alloc_return(self):
"""Allocate array and pass back through return value."""
from nitrous.types import Double
Coord = Array(Double, (3,))
@function(Coord, x=Double, y=Double, z=Double)
def make_coord(x, y, z):
return Coord((x, y, z))
@function(Coord, x=Double, y=Double, z=Double)
def make_coord_2(x, y, z):
return make_coord(x, y, z)
m = module([make_coord, make_coord_2])
c = m.make_coord_2(1.0, 2.0, 3.0)
self.assertEqual(tuple(c), (1.0, 2.0, 3.0))
def test_init_2d(self):
"""Multi-dimensional array initialization."""
from nitrous.types import Double
Double2x2 = Array(Double, (2, 2))
@function(Double2x2, x=Double, y=Double, z=Double, w=Double)
def make_2x2(x, y, z, w):
return Double2x2(((x, y), (z, w)))
m = module([make_2x2])
c = m.make_2x2(1.0, 2.0, 3.0, 4.0)
self.assertEqual(c[0][0], 1.0)
self.assertEqual(c[0][1], 2.0)
self.assertEqual(c[1][0], 3.0)
self.assertEqual(c[1][1], 4.0)
class SliceReferenceTests(unittest.TestCase):
def test_reference_arg(self):
"""Slice is treated as reference type."""
from nitrous.types import is_aggregate
self.assertTrue(is_aggregate(Slice(Long)))
class IndexTests(unittest.TestCase):
def setUp(self):
self.data = (((Long.c_type * 3) * 3) * 3)(
((0, 1, 2), (3, 4, 5), (6, 7, 8)),
((18, 19, 20), (21, 22, 23), (24, 25, 26)),
((9, 10, 11), (12, 13, 14), (15, 16, 17)),
)
self.addCleanup(delattr, self, "data")
def test_static_dimension(self):
"""Replace access to known dimensions with direct constants"""
from nitrous.module import dump
D = Slice(Long, shape=(Any, 3, 3))
X, Y, Z = range(3)
@function(Long, a=D)
def f(a):
return a[2, 1, 2]
m = module([f])
# All indices should be resolved at run-time, so there should be no multiplications.
self.assertNotRegexpMatches(dump(m), "mul")
self.assertEqual(m.f(self.data), 14)
def test_all_dynamic_dimension(self):
"""All dimensions are dynamic, no indices can be resolved at runtime"""
from nitrous.module import dump
D = Slice(Long, shape=(Any, Any, Any))
X, Y, Z = range(3)
@function(Long, a=D)
def f(a):
return a[2, 1, 2]
m = module([f])
# Should have run-time multiplications during index flattening.
self.assertRegexpMatches(dump(m), "mul")
self.assertEqual(m.f(self.data), 14)
def test_mixed_dynamic_dimension(self):
"""Some dimensions are dynamic, other than major one"""
from nitrous.module import dump
D = Slice(Long, shape=(Any, 3, Any))
X, Y, Z = range(3)
@function(Long, a=D)
def f(a):
return a[2, 1, 2]
m = module([f])
# Should have run-time multiplications during index flattening.
self.assertRegexpMatches(dump(m), "mul")
self.assertEqual(m.f(self.data), 14)
class SubsliceTests(unittest.TestCase):
def setUp(self):
self.DataSlice = Slice(Long, (5, 2, 3))
self.data = (((Long.c_type * 3) * 2) * 5)(
((0, 1, 2), (3, 4, 5)),
((6, 7, 8), (18, 19, 20)),
((21, 22, 23), (24, 25, 26)),
((9, 10, 11), (12, 13, 14)),
((15, 16, 17), (33, 34, 35)),
)
self.addCleanup(delattr, self, "DataSlice")
self.addCleanup(delattr, self, "data")
def test_subslice_shape_i(self):
"""Subslice shape reduced by one dimension (two remain)"""
ND, S0, S1 = range(3)
@function(x=self.DataSlice, i=Long, v=Slice(Long))
def get_i(x, i, v):
s = x[i]
v[ND] = s.ndim
v[S0] = s.shape[0]
v[S1] = s.shape[1]
m = module([get_i])
v = (Long.c_type * 3)()
# Shape and dimensions should not depend on indices.
for i in range(5):
m.get_i(self.data, i, v)
self.assertEqual(v[ND], 2)
self.assertEqual(v[S0], 2)
self.assertEqual(v[S1], 3)
def test_subslice_shape_ij(self):
"""Subslice shape reduced by two dimensions (one remains)"""
ND, S0 = range(2)
@function(x=self.DataSlice, i=Long, j=Long, v=Slice(Long))
def get_ij(x, i, j, v):
s = x[i, j]
v[ND] = s.ndim
v[S0] = s.shape[0]
m = module([get_ij])
v = (Long.c_type * 2)()
# Shape and dimensions should not depend on indices.
for i in range(5):
for j in range(2):
m.get_ij(self.data, i, j, v)
self.assertEqual(v[ND], 1)
self.assertEqual(v[S0], 3)
def test_subslice_data_i(self):
"""Subslice data reduced by one dimension (two remain)"""
@function(x=self.DataSlice, i=Long, v=Slice(Long, (2, 3)))
def get_i(x, i, v):
s = x[i]
for j in range(2):
for k in range(3):
v[j, k] = s[j, k]
m = module([get_i])
v = ((Long.c_type * 3) * 2)()
for i in range(5):
m.get_i(self.data, i, v)
ref_v = list(list(row) for row in self.data[i])
self.assertEqual(list(list(row) for row in v), ref_v)
def test_subslice_data_ij(self):
"""Subslice data reduced by one dimension (two remain)"""
@function(x=self.DataSlice, i=Long, j=Long, v=Slice(Long, (3,)))
def get_ij(x, i, j, v):
s = x[i, j]
for k in range(3):
v[k] = s[k]
m = module([get_ij])
v = (Long.c_type * 3)()
for i in range(5):
for j in range(2):
m.get_ij(self.data, i, j, v)
self.assertEqual(list(v), list(self.data[i][j]))
| 29.137705
| 92
| 0.528637
|
05410767bddd3b97f797542989fa9e2e3f51cf82
| 11,122
|
py
|
Python
|
h2o-bindings/bin/custom/python/gen_isolationforest.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 1
|
2020-10-21T05:09:23.000Z
|
2020-10-21T05:09:23.000Z
|
h2o-bindings/bin/custom/python/gen_isolationforest.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 1
|
2020-05-10T15:33:07.000Z
|
2020-05-10T15:33:07.000Z
|
h2o-bindings/bin/custom/python/gen_isolationforest.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 1
|
2020-04-17T13:06:26.000Z
|
2020-04-17T13:06:26.000Z
|
def update_param(name, param):
if name == 'stopping_metric':
param['values'] = ['AUTO', 'anomaly_score']
return param
return None # param untouched
doc = dict(
__class__="""
Builds an Isolation Forest model. Isolation Forest algorithm samples the training frame
and in each iteration builds a tree that partitions the space of the sample observations until
it isolates each observation. Length of the path from root to a leaf node of the resulting tree
is used to calculate the anomaly score. Anomalies are easier to isolate and their average
tree path is expected to be shorter than paths of regular observations.
"""
)
examples = dict(
build_tree_one_node="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> cars_if = H2OIsolationForestEstimator(build_tree_one_node=True,
... seed=1234)
>>> cars_if.train(x=predictors,
... training_frame=cars)
>>> cars_if.model_performance()
""",
categorical_encoding="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> encoding = "one_hot_explicit"
>>> airlines_if = H2OIsolationForestEstimator(categorical_encoding=encoding,
... seed=1234)
>>> airlines_if.train(x=predictors,
... training_frame=airlines)
>>> airlines_if.model_performance()
""",
col_sample_rate_change_per_level="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> airlines_if = H2OIsolationForestEstimator(col_sample_rate_change_per_level=.9,
... seed=1234)
>>> airlines_if.train(x=predictors,
... training_frame=airlines)
>>> airlines_if.model_performance()
""",
col_sample_rate_per_tree="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> airlines_if = H2OIsolationForestEstimator(col_sample_rate_per_tree=.7,
... seed=1234)
>>> airlines_if.train(x=predictors,
... training_frame=airlines)
>>> airlines_if.model_performance()
""",
export_checkpoints_dir="""
>>> import tempfile
>>> from os import listdir
>>> airlines = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip", destination_frame="air.hex")
>>> predictors = ["DayofMonth", "DayOfWeek"]
>>> checkpoints_dir = tempfile.mkdtemp()
>>> air_if = H2OIsolationForestEstimator(max_depth=3,
... seed=1234,
... export_checkpoints_dir=checkpoints_dir)
>>> air_if.train(x=predictors,
... training_frame=airlines)
>>> len(listdir(checkpoints_dir))
""",
ignore_const_cols="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> cars["const_1"] = 6
>>> cars["const_2"] = 7
>>> train, valid = cars.split_frame(ratios=[.8], seed=1234)
>>> cars_if = H2OIsolationForestEstimator(seed=1234,
... ignore_const_cols=True)
>>> cars_if.train(x=predictors,
... training_frame=cars)
>>> cars_if.model_performance()
""",
max_depth="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> cars_if = H2OIsolationForestEstimator(max_depth=2,
... seed=1234)
>>> cars_if.train(x=predictors,
... training_frame=cars)
>>> cars_if.model_performance()
""",
max_runtime_secs="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> cars_if = H2OIsolationForestEstimator(max_runtime_secs=10,
... ntrees=10000,
... max_depth=10,
... seed=1234)
>>> cars_if.train(x=predictors,
... training_frame=cars)
>>> cars_if.model_performance()
""",
min_rows="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> cars_if = H2OIsolationForestEstimator(min_rows=16,
... seed=1234)
>>> cars_if.train(x=predictors,
... training_frame=cars)
>>> cars_if.model_performance()
""",
mtries="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> predictors = covtype.columns[0:54]
>>> cov_if = H2OIsolationForestEstimator(mtries=30, seed=1234)
>>> cov_if.train(x=predictors,
... training_frame=covtype)
>>> cov_if.model_performance()
""",
ntrees="""
>>> titanic = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/titanic.csv")
>>> predictors = titanic.columns
>>> tree_num = [20, 50, 80, 110, 140, 170, 200]
>>> label = ["20", "50", "80", "110", "140", "170", "200"]
>>> for key, num in enumerate(tree_num):
... titanic_if = H2OIsolationForestEstimator(ntrees=num,
... seed=1234)
... titanic_if.train(x=predictors,
... training_frame=titanic)
... print(label[key], 'training score', titanic_if.mse(train=True))
""",
sample_rate="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> airlines_if = H2OIsolationForestEstimator(sample_rate=.7,
... seed=1234)
>>> airlines_if.train(x=predictors,
... training_frame=airlines)
>>> airlines_if.model_performance()
""",
sample_size="""
>>> train = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/anomaly/ecg_discord_train.csv")
>>> test = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/anomaly/ecg_discord_test.csv")
>>> isofor_model = H2OIsolationForestEstimator(sample_size=5,
... ntrees=7)
>>> isofor_model.train(training_frame=train)
>>> isofor_model.model_performance()
>>> isofor_model.model_performance(test)
""",
score_each_iteration="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> cars_if = H2OIsolationForestEstimator(score_each_iteration=True,
... ntrees=55,
... seed=1234)
>>> cars_if.train(x=predictors,
... training_frame=cars)
>>> cars_if.model_performance()
""",
score_tree_interval="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> cars_if = H2OIsolationForestEstimator(score_tree_interval=5,
... seed=1234)
>>> cars_if.train(x=predictors,
... training_frame=cars)
>>> cars_if.model_performance()
""",
seed="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> isofor_w_seed = H2OIsolationForestEstimator(seed=1234)
>>> isofor_w_seed.train(x=predictors,
... training_frame=airlines)
>>> isofor_wo_seed = H2OIsolationForestEstimator()
>>> isofor_wo_seed.train(x=predictors,
... training_frame=airlines)
>>> isofor_w_seed.model_performance()
>>> isofor_wo_seed.model_performance()
""",
stopping_metric="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> airlines_if = H2OIsolationForestEstimator(stopping_metric="auto",
... stopping_rounds=3,
... stopping_tolerance=1e-2,
... seed=1234)
>>> airlines_if.train(x=predictors,
... training_frame=airlines)
>>> airlines_if.model_performance()
""",
stopping_rounds="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> airlines_if = H2OIsolationForestEstimator(stopping_metric="auto",
... stopping_rounds=3,
... stopping_tolerance=1e-2,
... seed=1234)
>>> airlines_if.train(x=predictors,
... training_frame=airlines)
>>> airlines_if.model_performance()
""",
stopping_tolerance="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> airlines_if = H2OIsolationForestEstimator(stopping_metric="auto",
... stopping_rounds=3,
... stopping_tolerance=1e-2,
... seed=1234)
>>> airlines_if.train(x=predictors,
... training_frame=airlines)
>>> airlines_if.model_performance()
""",
training_frame="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> cars_if = H2OIsolationForestEstimator(seed=1234)
>>> cars_if.train(x=predictors,
... training_frame=cars)
>>> cars_if.model_performance()
"""
)
| 48.356522
| 149
| 0.620572
|
75309e3ecb73eedca957a0c7b984afd6d35e707b
| 6,744
|
py
|
Python
|
examples/pwr_run/checkpointing/socket_short/max_pwr/job24.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/socket_short/max_pwr/job24.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/socket_short/max_pwr/job24.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.nasnet import NASNetMobile
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'mnasnet'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_pwr/' + job_name + '*'
total_epochs = 20
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
base_model = NASNetMobile(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_pwr/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 30.515837
| 118
| 0.702995
|
a232aa97a2e0fd1037bf7a88f6341a5155e66c5c
| 4,148
|
py
|
Python
|
CONUS/Cloud_Cover/analysis_cloud_coverage_at_altitude.py
|
peterdsharpe/Wind_Analysis
|
7df9c34b0a355a3405ea6cecede0641e86b5b3ac
|
[
"MIT"
] | 1
|
2021-09-18T21:59:40.000Z
|
2021-09-18T21:59:40.000Z
|
CONUS/Cloud_Cover/analysis_cloud_coverage_at_altitude.py
|
peterdsharpe/Wind_Analysis
|
7df9c34b0a355a3405ea6cecede0641e86b5b3ac
|
[
"MIT"
] | null | null | null |
CONUS/Cloud_Cover/analysis_cloud_coverage_at_altitude.py
|
peterdsharpe/Wind_Analysis
|
7df9c34b0a355a3405ea6cecede0641e86b5b3ac
|
[
"MIT"
] | 1
|
2020-09-11T15:21:23.000Z
|
2020-09-11T15:21:23.000Z
|
import numpy as np
from scipy import interpolate
try:
from .process_data import data
except ImportError:
from process_data import data
import matplotlib.pyplot as plt
from matplotlib import style, ticker
import seaborn as sns
from labellines import labelLines
from scipy import stats
sns.set()
# style.use("seaborn")
fig, ax_m = plt.subplots(1, 1, figsize=(8, 6), dpi=200)
ax_ft = ax_m.twinx()
def convert_ax_m_to_ft(ax_m):
y1, y2 = ax_m.get_ylim()
meters2feet = lambda x: x / 0.3048
ax_ft.set_ylim(meters2feet(y1), meters2feet(y2))
ax_ft.figure.canvas.draw()
ax_m.callbacks.connect("ylim_changed", convert_ax_m_to_ft)
# colors = plt.cm.rainbow(np.linspace(0, 1, len(percentiles)))
altitude = data.altitudes.mean(
dim=["latitude", "longitude", "time"],
).data
coverage_thresholds = [1e-3, 1e-2, 1e-1]
# colors = plt.cm.rainbow(np.linspace(0, 1, len(coverage_thresholds))[::-1])
colors = sns.husl_palette(len(coverage_thresholds))[::-1]
for i, coverage_threshold in enumerate(coverage_thresholds):
fraction_covered = []
for j in range(len(data.level)):
cc_overhead_level = data.cc_overhead.isel(level=j).data.flatten()
# Build up a statistical model
# fit = stats.
fraction_covered.append(
(
np.sum(cc_overhead_level >= coverage_threshold) + 1
) / (len(cc_overhead_level) + 2)
)
fraction_covered = np.array(fraction_covered)
altitude_plt = np.linspace(
0,
30000,
500
)
# fraction_covered_plt = 10**interpolate.interp1d(
# altitude,
# np.log10(fraction_covered),
# kind="slinear",
# fill_value="extrapolate",
# )(altitude_plt)
fraction_covered_plt = 10**interpolate.PchipInterpolator(
altitude[::-1],
np.log10(fraction_covered[::-1]),
extrapolate=True
)(altitude_plt)
ax_m.plot(
fraction_covered_plt,
altitude_plt,
label=f"Cloud Coverage > {coverage_threshold:.0e}",
color=colors[i]
)
ax_m.annotate(
s="1/1000 chance of >10%\ncloud coverage at 55.3 kft",
xy=(1e-3, 16850),
xytext=(2e-4, 10000),
xycoords="data",
ha="right",
arrowprops={
"color" : "k",
"width" : 0.25,
"headwidth" : 4,
"headlength": 6,
}
)
ax_m.annotate(
s="Nonzero asymptote due\nto Bayesian prior\n(beta dist.)",
xy=(3.53e-6, 29000),
xytext=(2e-5, 27000),
xycoords="data",
ha="left",
arrowprops={
"color" : "k",
"width" : 0.25,
"headwidth" : 4,
"headlength": 6,
}
)
plt.annotate(
s="30% chance of clouds\non a given CONUS day",
xy=(0.29, 1000),
xytext=(7e-2, 3000),
xycoords="data",
ha="right",
arrowprops={
"color" : "k",
"width" : 0.25,
"headwidth" : 4,
"headlength": 6,
}
)
ax_m.axhline(y=65000 * 0.3048, ls='--', color="gray")
ax_m.text(
x=0.8*ax_m.get_xlim()[1]+(1-0.8)*ax_m.get_xlim()[0],
y=65000 * 0.3048 ,
s="65,000 ft",
color="gray",
horizontalalignment='center',
verticalalignment='bottom'
)
ax_m.axhline(y=55000 * 0.3048, ls='--', color="gray")
ax_m.text(
x=0.8*ax_m.get_xlim()[1]+(1-0.8)*ax_m.get_xlim()[0],
y=55000 * 0.3048 ,
s="55,000 ft",
color="gray",
horizontalalignment='center',
verticalalignment='bottom'
)
plt.annotate(
s="Source: ECMWF ERA5 Reanalysis",
xy=(0.02, 0.02),
xycoords="axes fraction",
ha="left",
fontsize=9
)
ax_m.set_xlabel(r"Fraction of Time with Cloud Coverage above Threshold")
ax_m.set_ylabel(r"Altitude [m]")
ax_ft.set_ylabel(r"Altitude [ft]")
plt.title("Cloud Coverage by Altitude over CONUS")
# ax.xaxis.set_major_locator(ticker.MultipleLocator(base=2))
# ax.yaxis.set_major_locator(ticker.MultipleLocator(base=2000))
plt.xscale('log')
ax_m.set_xlim((1e-6, 1))
plt.tight_layout()
ax_m.grid(True)
ax_ft.grid(False)
ax_m.legend()
# labelLines(plt.gca().get_lines(), zorder=2.5)
plt.savefig("analysis_cloud_coverage_at_altitude/analysis_cloud_coverage_at_altitude.png")
plt.show()
| 25.292683
| 90
| 0.627049
|
54a29eb33112ed55dc59387dcce2c245f166091d
| 2,619
|
py
|
Python
|
zerver/management/commands/realm_emoji.py
|
Supermanu/zulip
|
26f6d708c2e30cfe50d9d61031edb759e8117596
|
[
"Apache-2.0"
] | null | null | null |
zerver/management/commands/realm_emoji.py
|
Supermanu/zulip
|
26f6d708c2e30cfe50d9d61031edb759e8117596
|
[
"Apache-2.0"
] | 15
|
2020-06-05T18:44:15.000Z
|
2022-03-11T23:26:03.000Z
|
zerver/management/commands/realm_emoji.py
|
Supermanu/zulip
|
26f6d708c2e30cfe50d9d61031edb759e8117596
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
from argparse import RawTextHelpFormatter
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand, CommandParser
from zerver.lib.actions import check_add_realm_emoji, do_remove_realm_emoji
from zerver.lib.management import ZulipBaseCommand
import sys
import six
class Command(ZulipBaseCommand):
help = """Manage emoji for the specified realm
Example: ./manage.py realm_emoji --realm=zulip.com --op=add robotheart \\
https://humbug-user-avatars.s3.amazonaws.com/95ffa70fe0e7aea3c052ba91b38a28d8779f5705
Example: ./manage.py realm_emoji --realm=zulip.com --op=remove robotheart
Example: ./manage.py realm_emoji --realm=zulip.com --op=show
"""
# Fix support for multi-line usage
def create_parser(self, *args, **kwargs):
# type: (*Any, **Any) -> CommandParser
parser = super(Command, self).create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('name', metavar='<name>', type=str, nargs='?', default=None,
help="name of the emoji")
parser.add_argument('img_url', metavar='<image url>', type=str, nargs='?',
help="URL of image to display for the emoji")
self.add_realm_args(parser, True)
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = self.get_realm(options)
if options["op"] == "show":
for name, url in six.iteritems(realm.get_emoji()):
print(name, url)
sys.exit(0)
name = options['name']
if name is None:
self.print_help("./manage.py", "realm_emoji")
sys.exit(1)
if options["op"] == "add":
img_url = options['img_url']
if img_url is None:
self.print_help("./manage.py", "realm_emoji")
sys.exit(1)
check_add_realm_emoji(realm, name, img_url)
sys.exit(0)
elif options["op"] == "remove":
do_remove_realm_emoji(realm, name)
sys.exit(0)
else:
self.print_help("./manage.py", "realm_emoji")
sys.exit(1)
| 37.956522
| 89
| 0.601756
|
21b822d4a5b2c98457465adc77513245e9c9b241
| 1,722
|
py
|
Python
|
SimpleCV/examples/manipulation/RotationExample.py
|
tpltnt/SimpleCV
|
9fd75457cce5fd111c4d251b1076b9447fa4f1a2
|
[
"BSD-3-Clause"
] | 8
|
2016-12-11T20:28:03.000Z
|
2022-02-11T21:15:26.000Z
|
SimpleCV/examples/manipulation/RotationExample.py
|
tpltnt/SimpleCV
|
9fd75457cce5fd111c4d251b1076b9447fa4f1a2
|
[
"BSD-3-Clause"
] | 19
|
2016-11-24T20:28:18.000Z
|
2017-03-18T16:40:40.000Z
|
SimpleCV/examples/manipulation/RotationExample.py
|
tpltnt/SimpleCV
|
9fd75457cce5fd111c4d251b1076b9447fa4f1a2
|
[
"BSD-3-Clause"
] | 3
|
2016-11-27T22:29:08.000Z
|
2019-12-04T00:35:32.000Z
|
#!/usr/bin/python
'''
This example shows how to perform various rotations and warps on images
and put back into a display.
'''
from __future__ import print_function
print(__doc__)
from SimpleCV import *
font_size = 30
sleep_for = 3 #seconds to sleep for
draw_color = Color.RED
while True:
image = Image("orson_welles.jpg", sample=True)
image.drawText("Original Size", 10,10, color=draw_color, fontsize=font_size)
image.show()
time.sleep(sleep_for)
rot = image.rotate(45)
rot.drawText("Rotated 45 degrees", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
rot = image.rotate(45, scale=0.5)
rot.drawText("Rotated 45 degrees and scaled", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
rot = image.rotate(45,scale=0.5, point = (0,0) )
rot.drawText("Rotated 45 degrees and scaled around a point", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
rot = image.rotate(45,"full")
rot.drawText("Rotated 45 degrees and full", 10,10, color=draw_color, fontsize=font_size)
rot.show()
time.sleep(sleep_for)
atrans = image.shear([(image.width/2,0),(image.width-1,image.height/2),(image.width/2,image.height-1)])
atrans.drawText("Affine Transformation", 10,10, color=draw_color, fontsize=font_size)
atrans.show()
time.sleep(sleep_for)
ptrans = image.warp([(image.width*0.05,image.height*0.03),(image.width*0.9,image.height*0.1),(image.width*0.8,image.height*0.7),(image.width*0.2,image.height*0.9)])
ptrans.drawText("Perspective Transformation", 10,10, color=draw_color, fontsize=font_size)
ptrans.show()
time.sleep(sleep_for)
| 33.764706
| 168
| 0.706156
|
5b09b78a2e90288352d2716e81db8fb76d190794
| 227
|
py
|
Python
|
ConvModel/encoder/mobilenet.py
|
YuHe0108/cvmodule
|
ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd
|
[
"Apache-2.0"
] | null | null | null |
ConvModel/encoder/mobilenet.py
|
YuHe0108/cvmodule
|
ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd
|
[
"Apache-2.0"
] | null | null | null |
ConvModel/encoder/mobilenet.py
|
YuHe0108/cvmodule
|
ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd
|
[
"Apache-2.0"
] | null | null | null |
import os
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras import backend as K
from tensorflow.keras.applications import mobilenet, mobilenet_v2
| 18.916667
| 66
| 0.792952
|
33623606afbebfe3218ef05708fe92919f260645
| 1,896
|
py
|
Python
|
tests/cli/test_stack_purge.py
|
cdk-comp/WordOps
|
d1f0ccc7202d43c90ee7640f7acd4b7c3c158ee1
|
[
"MIT"
] | 1
|
2019-07-13T10:25:15.000Z
|
2019-07-13T10:25:15.000Z
|
tests/cli/test_stack_purge.py
|
umahmadx/WordOps
|
f7360687f379bdd711c51b746dfa358317a880bd
|
[
"MIT"
] | null | null | null |
tests/cli/test_stack_purge.py
|
umahmadx/WordOps
|
f7360687f379bdd711c51b746dfa358317a880bd
|
[
"MIT"
] | 2
|
2021-01-02T07:49:51.000Z
|
2022-03-26T15:58:50.000Z
|
from wo.utils import test
from wo.cli.main import get_test_app
class CliTestCaseStack(test.WOTestCase):
def test_wo_cli(self):
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_stack_purge_web(self):
self.app = get_test_app(argv=['stack', 'purge', '--web'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_stack_purge_admin(self):
self.app = get_test_app(argv=['stack', 'purge', '--admin'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_stack_purge_nginx(self):
self.app = get_test_app(argv=['stack', 'purge', '--nginx'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_stack_purge_php(self):
self.app = get_test_app(argv=['stack', 'purge', '--php'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_stack_purge_mysql(self):
self.app = get_test_app(argv=['stack', 'purge', '--mysql'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_stack_purge_wpcli(self):
self.app = get_test_app(argv=['stack', 'purge', '--wpcli'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_stack_purge_phpmyadmin(self):
self.app = get_test_app(argv=['stack', 'purge', '--phpmyadmin'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_stack_purge_adminer(self):
self.app = get_test_app(argv=['stack', 'purge', '--adminer'])
self.app.setup()
self.app.run()
self.app.close()
def test_wo_cli_stack_purge_utils(self):
self.app = get_test_app(argv=['stack', 'purge', '--utils'])
self.app.setup()
self.app.run()
self.app.close()
| 29.169231
| 72
| 0.589135
|
ed9ae4ab4cc9cdc9b9ffea0e6cbb02e1e45fb600
| 2,823
|
py
|
Python
|
run_this_image_mnist1.py
|
PopGalacticHistory/imagewalker
|
dd151d0698472aebdde0487a364c56fed048bb61
|
[
"MIT"
] | 2
|
2021-04-28T13:33:45.000Z
|
2021-11-09T14:31:09.000Z
|
run_this_image_mnist1.py
|
PopGalacticHistory/imagewalker
|
dd151d0698472aebdde0487a364c56fed048bb61
|
[
"MIT"
] | null | null | null |
run_this_image_mnist1.py
|
PopGalacticHistory/imagewalker
|
dd151d0698472aebdde0487a364c56fed048bb61
|
[
"MIT"
] | 1
|
2021-03-07T13:25:59.000Z
|
2021-03-07T13:25:59.000Z
|
from image_env_mnist1 import Image_env1
from RL_brain_b import DeepQNetwork
import numpy as np
import time
BMP_MODE = True
def my_print_array(a):
uu=''
for ii in range(a.shape[0]):
for jj in range(a.shape[1]):
uu +=a[ii,jj]
uu += '\n'
print(uu)
def run_img():
step = 0
for episode in range(30000):
# initial observation
observation = env.reset()
while True:
# RL choose action based on observation
action = RL.choose_action(observation)
# RL take action and get next observation and reward
observation_, reward, done = env.step(action)
RL.store_transition(observation, action, reward, observation_)
#print('sample q entry',observation, action, reward, observation_)
if (step > 100) and (step % 10000 == 0):
RL.learn()
if not BMP_MODE:
act_map=RL.map_actions(all_observations_for_mapping) if RL.dqn_mode else RL.q_eval
#print(np.argmax(act_map,axis=1).reshape((env.image_x,env.image_y)))
my_print_array(np.flip(np.array(env.num2srt_actions(np.argmax(act_map, axis=1))).reshape((env.image_x,env.image_y)).transpose(), axis=0))
print('epsilon', RL.epsilon)
if not BMP_MODE:
env.q_snapshots.append([step,all_observations_for_mapping,act_map])
env.plot_reward()
# swap observation
observation = observation_
# break while loop when end of this episode
if done:
print(reward)
print(observation_)
break
step += 1
env.save_train_history()
# time.sleep(2.0)
# end of game
print('game over')
#env.destroy()
if __name__ == "__main__":
# maze game
env = Image_env1(bmp_features=BMP_MODE)
all_observations_for_mapping = env.observation_space() if not BMP_MODE else None
RL = DeepQNetwork(env.n_actions, env.n_features,
#learning_rate=0.00005,
reward_decay=0.5,
e_greedy=0.8,
e_greedy0=0.5,
replace_target_iter=20,
memory_size=300000,
# memory_size=1000,
e_greedy_increment=0.001,
# output_graph=True
state_table=all_observations_for_mapping
)
run_img()
# env.after(100, run_img)
# print('-----------------------------------------')
# env.mainloop()
env.plot_reward()
env.save_train_history()
#RL.plot_cost()
| 34.851852
| 158
| 0.533475
|
53546e716e272cc655ca3d990044816ff06ab903
| 12,085
|
py
|
Python
|
trees/redblacktree.py
|
shivam3009/fun-with-algorithms
|
e297701fd37d51abc51d4ed5e5e2a14689c2af15
|
[
"MIT"
] | null | null | null |
trees/redblacktree.py
|
shivam3009/fun-with-algorithms
|
e297701fd37d51abc51d4ed5e5e2a14689c2af15
|
[
"MIT"
] | null | null | null |
trees/redblacktree.py
|
shivam3009/fun-with-algorithms
|
e297701fd37d51abc51d4ed5e5e2a14689c2af15
|
[
"MIT"
] | null | null | null |
"""
# Structure of a red black tree
1. A node is either red or black.
2. The root is black.
3. All leaves are black.
4. If a node is red, then both its children are black.
5. Every path from a given node to a leaf node has the same
number of black nodes.
"""
class Node:
""" Implementaion of red black tree node
a node has value, color (RED or BLACK),
parent node (node or None) and left and right child (node or None)
"""
RED = True
BLACK = False
def __init__(self, value, color=RED):
self.color = color
self.value = value
self.left = None
self.right = None
self.parent = None
def __str__(self):
return str(self.value) + ':' + str('R' if self.color else 'B')
def verbose(self):
return '{} (parent:{} left:{} right:{})'.format(
self, self.parent, self.left, self.right)
class RedBlackTree:
""" Implementation of Red Black Tree """
def __init__(self):
self.root = None
def max_depth(self, root=None):
""" return max depth of tree """
if root is None:
return 0
else:
return max(self.max_depth(root.left),
self.max_depth(root.right)) + 1
def depth(self, node):
""" returns the value of the node depth
relative to the root of the tree
"""
if node is None:
return 0
node_ = node
depth = 0
while node_ != self.root:
node_ = node_.parent
depth += 1
return depth
def min(self, current=None):
""" return minimum value in tree """
if not current:
current = self.root
while current.left is not None:
current = current.left
return current
def max(self, current=None):
""" return maximum value in tree """
if not current:
current = self.root
while current.right is not None:
current = current.right
return current
def search(self, value):
""" return a Node with given value otherwise None"""
return self.__search(self.root, value)
def __search(self, node, value):
while node is not None and value != node.value:
if value < node.value:
node = node.left
else:
node = node.right
return node
def successor(self, value):
""" return a node with nearest number that is more than given """
current = self.search(value)
if current is None:
raise Exception(('a Node with value ({})'
' does not exist').format(value))
return self.__successor(current)
def __successor(self, current):
if current.right is not None:
return self.min(current.right)
while (current.parent is not None
and current.parent.right is current):
current = current.parent
return current.parent
def insert(self, key):
""" insert a Node with given key to Red Black Tree """
# define a new Node
node = Node(key)
# start from root of tree
x = self.root
y = None
while x is not None:
# find a parent for Node
y = x
if key < x.value:
x = x.left
else:
x = x.right
# set parent for current Node
node.parent = y
if y is None:
# set Node as new tree root
self.root = node
elif key < y.value:
# set Node as left branch
y.left = node
else:
# set Node as right branch
y.right = node
# set default value for current Node
node.left = None
node.right = None
node.color = Node.RED
# run fixup function for
# restore red black properties of the tree
self.__insert_fixup(node)
def __insert_fixup(self, x):
""" restore red-black tree properties after insert new node """
while x != self.root and x.parent.color == Node.RED:
# we have a violation
if x.parent == x.parent.parent.left:
# we are on left branch
y = x.parent.parent.right
if y is not None and y.color == Node.RED:
# parent is red
x.parent.color = Node.BLACK
y.color = Node.BLACK
x.parent.parent.color = Node.RED
x = x.parent.parent
else:
# uncle is black
if x == x.parent.right:
# make x a left child
x = x.parent
self.__left_rotate(x)
# recolor and rotate
x.parent.color = Node.BLACK
x.parent.parent.color = Node.RED
self.__right_rotate(x.parent.parent)
else:
# mirror image of above code
y = x.parent.parent.left
if y is not None and y.color == Node.RED:
# parent is red
x.parent.color = Node.BLACK
y.color = Node.BLACK
x.parent.parent.color = Node.RED
x = x.parent.parent
else:
# parent is black
if x == x.parent.left:
x = x.parent
self.__right_rotate(x)
x.parent.color = Node.BLACK
x.parent.parent.color = Node.RED
self.__left_rotate(x.parent.parent)
self.root.color = Node.BLACK
def __left_rotate(self, x):
""" transformation of the left subtree to the right subtree """
if not x.right:
raise Exception("a right branch of Node is None")
# get right subtree
y = x.right
# transformation of the left subtree to the right
x.right = y.left
if y.left:
y.left.parent = x
# set new parent
y.parent = x.parent
if not x.parent:
# set new root
self.root = y
else:
if x == x.parent.left:
# we are on left branch
x.parent.left = y
else:
x.parent.right = y
# set x as left parent node
y.left = x
x.parent = y
def __right_rotate(self, x):
""" transformation of the right subtree to the left subtree """
if not x.left:
raise Exception("a right branch of Node is None")
# get left subtree
y = x.left
# transformation of the right subtree to the left
x.left = y.right
if y.right:
y.right.parent = x
# set new parent
y.parent = x.parent
if not x.parent:
# set new root
self.root = y
else:
if x == x.parent.left:
# we are on left branch
x.parent.left = y
else:
x.parent.right = y
# set x as right parent node
y.right = x
x.parent = y
def transplant(self, node, newnode):
""" transplant new node to current node """
if node.parent is None:
self.root = newnode
elif node == node.parent.left:
node.parent.left = newnode
else:
node.parent.right = newnode
if newnode is not None:
newnode.parent = node.parent
def delete(self, value):
""" delete value from tree """
node = self.search(value)
return self.__delete(node)
def __delete(self, node):
y = node
color = y.color
if node.left is None:
x = node.right
self.transplant(node, node.right)
elif node.right is None:
x = node.left
self.transplant(node, node.left)
else:
y = self.min(node.right)
color = y.color
x = y.right
if x is not None and x.parent is not None and y.parent == node:
x.parent = y
else:
self.transplant(y, y.right)
y.right = node.right
y.right.parent = y
self.transplant(node, y)
y.left = node.left
y.left.parent = y
y.color = node.color
if color == Node.BLACK:
self.__delete_fixup(x)
def __delete_fixup(self, x):
""" restore red-black tree properties after insert new node """
while x != self.root and x.color == Node.BLACK:
# we have a violation
if x == x.parent.left:
# we are on left branch
y = x.parent.right
if y is not None and y.color == Node.RED:
# parent is red
y.color = Node.BLACK
x.parent.color = Node.RED
x = x.parent.parent
self.__left_rotate(x.parent)
y = x.parent.right
if y.left.color == Node.BLACK and y.right.color == Node.BLACK:
y.color = Node.RED
x = x.parent
else:
if y.right.color == Node.BLACK:
y.left.color = Node.BLACK
y.color = Node.RED
self.__right_rotate(y)
y = x.parent.right
y.color = x.parent.color
x.parent.color = Node.BLACK
y.right.color = Node.BLACK
self.__left_rotate(x.parent)
x = self.root
else:
y = x.parent.left
if y is not None and y.color == Node.RED:
# parent is red
y.color = Node.BLACK
x.parent.color = Node.RED
x = x.parent.parent
self.__right_rotate(x.parent)
y = x.parent.left
if y.right.color == Node.BLACK and y.left.color == Node.BLACK:
y.color = Node.RED
x = x.parent
else:
if y.left.color == Node.BLACK:
y.right.color = Node.BLACK
y.color = Node.RED
self.__left_rotate(y)
y = x.parent.left
y.color = x.parent.color
x.parent.color = Node.BLACK
y.left.color = Node.BLACK
self.__right_rotate(x.parent)
x = self.root
x.color = Node.BLACK
def __str__(self):
""" return a string representation of Tree """
# a variable to hold the node in ascending order
sortnodes = []
# last node in tree
maxnode = self.max(self.root)
# first node in tree
node = self.min(self.root)
while True:
sortnodes.append((node, self.depth(node)))
if node == maxnode:
break
# next node
node = self.__successor(node)
# max depth of tree
maxdepth = self.max_depth(self.root)
# list of tree strings
strings = ['' for _ in range(maxdepth + 1)]
for node, rank in sortnodes:
for level in range(maxdepth + 1):
if rank == level:
strings[level] += str(node)
else:
strings[level] += ' ' * len(str(node))
return "\n".join(strings)
if __name__ in "__main__":
tree = RedBlackTree()
for i in [0, -12, -8, 10, -100]:
print('insert {} to tree'.format(i))
tree.insert(i)
print(tree)
for i in [-100, -8]:
tree.delete(i)
print(tree)
| 31.30829
| 78
| 0.484154
|
d0d6d173ffa3714962402acbaf084d32bd6b26b8
| 8
|
py
|
Python
|
asd asd as d/asd asd asd/asd.py
|
admin-greyatom/test-test
|
dba262d0e546f297d7f29de771fde47f74c369a4
|
[
"MIT"
] | null | null | null |
asd asd as d/asd asd asd/asd.py
|
admin-greyatom/test-test
|
dba262d0e546f297d7f29de771fde47f74c369a4
|
[
"MIT"
] | null | null | null |
asd asd as d/asd asd asd/asd.py
|
admin-greyatom/test-test
|
dba262d0e546f297d7f29de771fde47f74c369a4
|
[
"MIT"
] | null | null | null |
as dasd
| 4
| 7
| 0.75
|
25c7e020603146ea6d1e068e0ee914845c60b5b6
| 991
|
py
|
Python
|
code/utils.py
|
MindMimicLabs/model-template
|
a1000a51ec9e9d9483677713b1a08af9748a76f9
|
[
"MIT"
] | null | null | null |
code/utils.py
|
MindMimicLabs/model-template
|
a1000a51ec9e9d9483677713b1a08af9748a76f9
|
[
"MIT"
] | null | null | null |
code/utils.py
|
MindMimicLabs/model-template
|
a1000a51ec9e9d9483677713b1a08af9748a76f9
|
[
"MIT"
] | null | null | null |
import pathlib
from typeguard import typechecked
# makes sure our parameters are good
@typechecked
def assert_folder_is_readable(folder: pathlib.Path) -> None:
if not folder.exists():
raise FileNotFoundError(str(folder))
elif not folder.is_dir():
raise NotADirectoryError(str(folder))
@typechecked
def assert_file_is_readable(file_path: pathlib.Path) -> None:
if not file_path.exists():
raise FileNotFoundError(str(file_path))
elif not file_path.is_file():
raise FileNotFoundError(str(file_path))
@typechecked
def ensure_folder_is_writable(folder: pathlib.Path) -> None:
if not folder.exists():
folder.mkdir(parents = True)
elif not folder.is_dir():
raise NotADirectoryError(str(folder))
@typechecked
def is_corpus_document(file_path: pathlib.Path) -> bool:
result = \
file_path.is_file() and \
file_path.suffix.lower() == '.txt' and \
not file_path.stem.startswith('_')
return result
| 31.967742
| 61
| 0.705348
|
bd796a20d90edb458cd9fcc8f4283ea985445d24
| 4,676
|
py
|
Python
|
sdk/yapily/models/other_residency_type.py
|
yapily/yapily-sdk-python
|
c09930c44e8795e270e2846a2c0fb783200df76a
|
[
"MIT"
] | 11
|
2018-05-18T14:38:49.000Z
|
2021-09-08T13:24:37.000Z
|
sdk/yapily/models/other_residency_type.py
|
yapily/yapily-sdk-python
|
c09930c44e8795e270e2846a2c0fb783200df76a
|
[
"MIT"
] | 5
|
2019-10-23T15:06:33.000Z
|
2021-08-03T21:18:50.000Z
|
sdk/yapily/models/other_residency_type.py
|
yapily/yapily-sdk-python
|
c09930c44e8795e270e2846a2c0fb783200df76a
|
[
"MIT"
] | 8
|
2019-04-27T00:02:18.000Z
|
2021-11-21T02:54:12.000Z
|
# coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 1.154.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from yapily.configuration import Configuration
class OtherResidencyType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'code': 'str',
'description': 'str',
'name': 'str'
}
attribute_map = {
'code': 'Code',
'description': 'Description',
'name': 'Name'
}
def __init__(self, code=None, description=None, name=None, local_vars_configuration=None): # noqa: E501
"""OtherResidencyType - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._code = None
self._description = None
self._name = None
self.discriminator = None
if code is not None:
self.code = code
if description is not None:
self.description = description
if name is not None:
self.name = name
@property
def code(self):
"""Gets the code of this OtherResidencyType. # noqa: E501
:return: The code of this OtherResidencyType. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this OtherResidencyType.
:param code: The code of this OtherResidencyType. # noqa: E501
:type: str
"""
self._code = code
@property
def description(self):
"""Gets the description of this OtherResidencyType. # noqa: E501
:return: The description of this OtherResidencyType. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this OtherResidencyType.
:param description: The description of this OtherResidencyType. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this OtherResidencyType. # noqa: E501
:return: The name of this OtherResidencyType. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this OtherResidencyType.
:param name: The name of this OtherResidencyType. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OtherResidencyType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OtherResidencyType):
return True
return self.to_dict() != other.to_dict()
| 27.028902
| 158
| 0.576775
|
b6e5db511e6dce419ab99aa952b672cf6d3075b8
| 3,870
|
py
|
Python
|
app/recipe/tests/test_tags_api.py
|
jflan91/Recipe-API
|
2967814628134e74dfed9812c7c0ab98fd2e5860
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
jflan91/Recipe-API
|
2967814628134e74dfed9812c7c0ab98fd2e5860
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
jflan91/Recipe-API
|
2967814628134e74dfed9812c7c0ab98fd2e5860
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test public api tags"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login required to retrieve tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test user tags"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@email.com',
'testpass123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test tag retrieval"""
Tag.objects.create(user=self.user, name='Italian')
Tag.objects.create(user=self.user, name='German')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_user_limited_tags(self):
"""Test only retrieve authenticated enduser tags"""
user2 = get_user_model().objects.create_user(
'test2',
'testpass123'
)
Tag.objects.create(user=user2, name='Seafood')
tag = Tag.objects.create(user=self.user, name='Quick Meals')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag(self):
"""Test creating new tag"""
payload = {'name': 'Test'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_tag_creation_invalid(self):
"""Test tag creation with invalid input"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_by_recipe(self):
"""Test recipe tag retrieval by recipe"""
tag1 = Tag.objects.create(user=self.user, name='Entree')
tag2 = Tag.objects.create(user=self.user, name='Dessert')
recipe = Recipe.objects.create(
title='Italian Chicken',
time_minutes=30,
price=10.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_unique(self):
"""Test filtering tags returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Dinner')
recipe1 = Recipe.objects.create(
title='Bacon Omelette',
time_minutes=8,
price=5.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Biscuits and Sausage Gravy',
time_minutes=20,
price=8.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| 30.472441
| 71
| 0.624806
|
8dbf641c5494bd53a25f00da7ac5c7e49b85b6cd
| 2,083
|
py
|
Python
|
tests/test_document_smart_insert.py
|
p32929/mongoengine_mate-project
|
960e947c4cfbbf4c0ccc34d296a2517d6abd28d4
|
[
"MIT"
] | 6
|
2020-01-06T07:39:30.000Z
|
2021-09-16T04:10:57.000Z
|
tests/test_document_smart_insert.py
|
p32929/mongoengine_mate-project
|
960e947c4cfbbf4c0ccc34d296a2517d6abd28d4
|
[
"MIT"
] | null | null | null |
tests/test_document_smart_insert.py
|
p32929/mongoengine_mate-project
|
960e947c4cfbbf4c0ccc34d296a2517d6abd28d4
|
[
"MIT"
] | 1
|
2022-03-08T03:41:11.000Z
|
2022-03-08T03:41:11.000Z
|
# -*- coding: utf-8 -*-
import pytest
import sys
from pymongo.database import Database
import mongoengine
from mongoengine_mate import ExtendedDocument
py_ver = "%s.%s" % (sys.version_info.major, sys.version_info.minor)
user_col_name = "user_%s" % py_ver
class User(ExtendedDocument):
user_id = mongoengine.IntField(primary_key=True)
name = mongoengine.StringField()
meta = {
"collection": user_col_name
}
def test_smart_insert(connect):
import time
import random
n_breaker = 5
n_total = 120
# Smart Insert
User.objects.delete()
total_user_ids = list(range(1, 1+n_total))
random.shuffle(total_user_ids)
breaker_user_ids = total_user_ids[:n_breaker]
total_users = [User(user_id=_id) for _id in total_user_ids]
breaker_users = [User(user_id=_id) for _id in breaker_user_ids]
User.objects.insert(breaker_users)
assert User.objects.count() == n_breaker
st = time.clock()
n_insert, n_skipped = User.smart_insert(total_users)
elapse1 = time.clock() - st
assert n_insert == n_total - n_breaker
assert n_skipped == n_breaker
assert User.objects.count() == n_total # after smart insert, we got 400 doc
assert [
user.to_dict()
for user in User.objects
] == [
user.to_dict()
for user in total_users
]
# Regular Insert
User.objects.delete()
total_users = [User(user_id=_id) for _id in total_user_ids]
breaker_users = [User(user_id=_id) for _id in breaker_user_ids]
User.objects.insert(breaker_users)
assert User.objects.count() == n_breaker
st = time.clock()
for user in total_users:
try:
user.save()
except:
pass
elapse2 = time.clock() - st
assert User.objects.count() == n_total # after regular insert, we got 400 doc
assert elapse1 <= elapse2
# Single Document Insert
User.smart_insert(User(id=1))
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 23.942529
| 82
| 0.663946
|
c3063a20bd3c373b438ffd5a9a032deda7c12b88
| 62
|
py
|
Python
|
Snippets/code-gym-1/fifty_to_sixty.py
|
ursaMaj0r/python-csc-125
|
1d0968ad144112e24ae331c75aad58b74041593a
|
[
"MIT"
] | null | null | null |
Snippets/code-gym-1/fifty_to_sixty.py
|
ursaMaj0r/python-csc-125
|
1d0968ad144112e24ae331c75aad58b74041593a
|
[
"MIT"
] | null | null | null |
Snippets/code-gym-1/fifty_to_sixty.py
|
ursaMaj0r/python-csc-125
|
1d0968ad144112e24ae331c75aad58b74041593a
|
[
"MIT"
] | null | null | null |
count = 50
while count <= 60:
print(count)
count += 1
| 12.4
| 18
| 0.564516
|
b618c87565f8e6fc4f443bc57c9aaf17fe53786c
| 5,402
|
py
|
Python
|
moodle/mod/lesson/lesson.py
|
Hardikris/moodlepy
|
8f5cb0cb4c2297e10f48396de681f6bb250f7751
|
[
"MIT"
] | null | null | null |
moodle/mod/lesson/lesson.py
|
Hardikris/moodlepy
|
8f5cb0cb4c2297e10f48396de681f6bb250f7751
|
[
"MIT"
] | null | null | null |
moodle/mod/lesson/lesson.py
|
Hardikris/moodlepy
|
8f5cb0cb4c2297e10f48396de681f6bb250f7751
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
from moodle import MoodleWarning, ResponsesFactory
from moodle.attr import dataclass, field
@dataclass
class File:
"""File
Args:
filename (Optional[str]): File name.
filepath (Optional[str]): File path.
filesize (Optional[int]): File size.
fileurl (Optional[str]): Downloadable file url.
timemodified (Optional[int]): Time modified.
mimetype (Optional[str]): File mime type.
isexternalfile (Optional[int]): Whether is an external file.
repositorytype (Optional[str]): The repository type for the external files.
"""
filename: Optional[str]
filepath: Optional[str]
filesize: Optional[int]
fileurl: Optional[str]
timemodified: Optional[int]
mimetype: Optional[str]
isexternalfile: Optional[int]
repositorytype: Optional[str]
@dataclass
class Lesson:
"""Lesson
Args:
id (int): Standard Moodle primary key.
course (int): Foreign key reference to the course this lesson is part of.
coursemodule (int): Course module id.
name (str): Lesson name.
intro (Optional[str]): Lesson introduction text.
introformat (int): Default untuk "1" intro format (1 = HTML, 0 = MOODLE, 2 = PLAIN or 4 = MARKDOWN)
practice (Optional[int]): Practice lesson?
modattempts (Optional[int]): Allow student review?
usepassword (Optional[int]): Password protected lesson?
password (Optional[str]): Password
dependency (Optional[int]): Dependent on (another lesson id)
conditions (Optional[str]): Conditions to enable the lesson
grade (Optional[int]): The total that the grade is scaled to be out of
custom (Optional[int]): Custom scoring?
ongoing (Optional[int]): Display ongoing score?
usemaxgrade (Optional[int]): How to calculate the final grade
maxanswers (Optional[int]): Maximum answers per page
maxattempts (Optional[int]): Maximum attempts
review (Optional[int]): Provide option to try a question again
nextpagedefault (Optional[int]): Action for a correct answer
feedback (Optional[int]): Display default feedback
minquestions (Optional[int]): Minimum number of questions
maxpages (Optional[int]): Number of pages to show
timelimit (Optional[int]): Time limit
retake (Optional[int]): Re-takes allowed
activitylink (Optional[int]): Id of the next activity to be linked once the lesson is completed
mediafile (Optional[str]): Local file path or full external URL
mediaheight (Optional[int]): Popup for media file height
mediawidth (Optional[int]): Popup for media with
mediaclose (Optional[int]): Display a close button in the popup?
slideshow (Optional[int]): Display lesson as slideshow
width (Optional[int]): Slideshow width
height (Optional[int]): Slideshow height
bgcolor (Optional[str]): Slideshow bgcolor
displayleft (Optional[int]): Display left pages menu?
displayleftif (Optional[int]): Minimum grade to display menu
progressbar (Optional[int]): Display progress bar?
available (Optional[int]): Available from
deadline (Optional[int]): Available until
timemodified (Optional[int]): Last time settings were updated
completionendreached (Optional[int]): Require end reached for completion?
completiontimespent (Optional[int]): Student must do this activity at least for
allowofflineattempts (int): Whether to allow the lesson to be attempted offline in the mobile app
introfiles (List[File]): introfiles
mediafiles (List[File]): mediafiles
"""
id: int
course: int
coursemodule: int
name: str
intro: Optional[str]
introformat: int
practice: Optional[int]
modattempts: Optional[int]
usepassword: Optional[int]
password: Optional[str]
dependency: Optional[int]
conditions: Optional[str]
grade: Optional[int]
custom: Optional[int]
ongoing: Optional[int]
usemaxgrade: Optional[int]
maxanswers: Optional[int]
maxattempts: Optional[int]
review: Optional[int]
nextpagedefault: Optional[int]
feedback: Optional[int]
minquestions: Optional[int]
maxpages: Optional[int]
timelimit: Optional[int]
retake: Optional[int]
activitylink: Optional[int]
mediafile: Optional[str]
mediaheight: Optional[int]
mediawidth: Optional[int]
mediaclose: Optional[int]
slideshow: Optional[int]
width: Optional[int]
height: Optional[int]
bgcolor: Optional[str]
displayleft: Optional[int]
displayleftif: Optional[int]
progressbar: Optional[int]
available: Optional[int]
deadline: Optional[int]
timemodified: Optional[int]
completionendreached: Optional[int]
completiontimespent: Optional[int]
allowofflineattempts: int
introfiles: List[File] = field(factory=list)
mediafiles: List[File] = field(factory=list)
@dataclass
class OneLesson:
lesson: Lesson
warnings: List[MoodleWarning] = field(factory=list)
@dataclass
class Lessons(ResponsesFactory[Lesson]):
lessons: List[Lesson] = field(factory=list)
warnings: List[MoodleWarning] = field(factory=list)
@property
def items(self) -> List[Lesson]:
return self.lessons
| 37.776224
| 107
| 0.682525
|
999cb728eef9f11df6936359f2166c9102c8f9f2
| 3,511
|
py
|
Python
|
oauth2client/contrib/xsrfutil.py
|
GFXpulse/oauth2client
|
411b4a97e1a652bc3ef20cab87ea9cc6c9d292e7
|
[
"Apache-2.0"
] | null | null | null |
oauth2client/contrib/xsrfutil.py
|
GFXpulse/oauth2client
|
411b4a97e1a652bc3ef20cab87ea9cc6c9d292e7
|
[
"Apache-2.0"
] | null | null | null |
oauth2client/contrib/xsrfutil.py
|
GFXpulse/oauth2client
|
411b4a97e1a652bc3ef20cab87ea9cc6c9d292e7
|
[
"Apache-2.0"
] | 1
|
2019-11-13T12:39:17.000Z
|
2019-11-13T12:39:17.000Z
|
# Copyright 2014 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for creating & verifying XSRF tokens."""
import base64
import binascii
import hmac
import time
import hashlib
from oauth2client import _helpers
# Delimiter character
DELIMITER = b':'
# 1 hour in seconds
DEFAULT_TIMEOUT_SECS = 60 * 60
@_helpers.positional(2)
def generate_token(key, user_id, action_id='', when=None):
"""Generates a URL-safe token for the given user, action, time tuple.
Args:
key: secret key to use.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
when: the time in seconds since the epoch at which the user was
authorized for this action. If not set the current time is used.
Returns:
A string XSRF protection token.
"""
digester = hmac.new(_helpers._to_bytes(key, encoding='utf-8'), digestmod=hashlib.sha256)
digester.update(_helpers._to_bytes(str(user_id), encoding='utf-8'))
digester.update(DELIMITER)
digester.update(_helpers._to_bytes(action_id, encoding='utf-8'))
digester.update(DELIMITER)
when = _helpers._to_bytes(str(when or int(time.time())), encoding='utf-8')
digester.update(when)
digest = digester.digest()
token = base64.urlsafe_b64encode(digest + DELIMITER + when)
return token
@_helpers.positional(3)
def validate_token(key, token, user_id, action_id="", current_time=None):
"""Validates that the given token authorizes the user for the action.
Tokens are invalid if the time of issue is too old or if the token
does not match what generateToken outputs (i.e. the token was forged).
Args:
key: secret key to use.
token: a string of the token generated by generateToken.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
Returns:
A boolean - True if the user is authorized for the action, False
otherwise.
"""
if not token:
return False
try:
decoded = base64.urlsafe_b64decode(token)
token_time = int(decoded.split(DELIMITER)[-1])
except (TypeError, ValueError, binascii.Error):
return False
if current_time is None:
current_time = time.time()
# If the token is too old it's not valid.
if current_time - token_time > DEFAULT_TIMEOUT_SECS:
return False
# The given token should match the generated one with the same time.
expected_token = generate_token(key, user_id, action_id=action_id,
when=token_time)
if len(token) != len(expected_token):
return False
# Perform constant time comparison to avoid timing attacks
different = 0
for x, y in zip(bytearray(token), bytearray(expected_token)):
different |= x ^ y
return not different
| 34.087379
| 92
| 0.690971
|
b950168cccb4a2ded47e872a5d48f262581e99d9
| 3,457
|
py
|
Python
|
utils.py
|
paolo-05/discord-bot-dashboard-python
|
108735bd0a838295800c14360176bbd5924606cc
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
paolo-05/discord-bot-dashboard-python
|
108735bd0a838295800c14360176bbd5924606cc
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
paolo-05/discord-bot-dashboard-python
|
108735bd0a838295800c14360176bbd5924606cc
|
[
"Apache-2.0"
] | null | null | null |
import requests
import config
def get_token(code: str):
data = {
'client_id': config.CLIENT_ID,
'client_secret': config.CLIENT_SECRET,
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': config.REDIRECT_URI,
'scope': 'identify guilds'
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
resp = requests.post(
"https://discord.com/api/oauth2/token", data=data, headers=headers)
resp.raise_for_status()
return resp.json()['access_token']
def get_user_info(token):
resp = requests.get("https://discord.com/api/v6/users/@me",
headers={"Authorization": f"Bearer {token}"})
resp.raise_for_status()
return resp.json()
def get_user_info_by_id(user_id):
token = config.BOT_TOKEN
resp = requests.get(f"https://discord.com/api/v6/users/{user_id}",
headers={"Authorization": f"Bot {token}"})
resp.raise_for_status()
json = {
'user_name': f'{resp.json()["username"]}#{resp.json()["discriminator"]}',
'avatar_url': f'https://cdn.discordapp.com/avatars/{resp.json()["id"]}/{resp.json()["avatar"]}.png'if resp.json()["avatar"] is not None else f'https://www.shitpostbot.com/resize/585/400?img=%2Fimg%2Fsourceimages%2Fdefault-discord-icon-5b254285e1034.png"'
}
return json
def get_user_guilds(token: str):
resp = requests.get("https://discord.com/api/v6/users/@me/guilds",
headers={"Authorization": f"Bearer {token}"})
resp.raise_for_status()
return resp.json()
def get_bot_guilds():
token = config.BOT_TOKEN
resp = requests.get("https://discord.com/api/v6/users/@me/guilds",
headers={"Authorization": f"Bot {token}"})
resp.raise_for_status()
return resp.json()
def get_mutual_guilds(user_guilds: list, bot_guilds: list):
return [guild for guild in user_guilds if (guild['permissions'] & 0x20) == 0x20]
def get_guild_data(guild_id: int):
token = config.BOT_TOKEN
resp = requests.get(
f"https://discord.com/api/v6/guilds/{guild_id}", headers={"Authorization": f"Bot {token}"})
try:
resp.raise_for_status()
return resp.json()
except requests.exceptions.HTTPError:
return None
def get_guild_channels(guild_id: int):
token = config.BOT_TOKEN
resp = requests.get(
f"https://discord.com/api/v6/guilds/{guild_id}/channels", headers={"Authorization": f"Bot {token}"}
)
channels = []
for channel in resp.json():
if channel['type'] == 0:
channels.append(channel)
return channels
def get_channel_by_id(guild_id: int, channel_id: str):
token = config.BOT_TOKEN
resp = requests.get(
f"https://discord.com/api/v6/guilds/{guild_id}/channels", headers={"Authorization": f"Bot {token}"}
)
for channel in resp.json():
if channel["type"] == 0 and channel["id"] == channel_id:
channel_name = channel["name"]
return channel_name
def get_channel_by_name(guild_id: int, channel_name: str):
token = config.BOT_TOKEN
resp = requests.get(
f"https://discord.com/api/v6/guilds/{guild_id}/channels", headers={"Authorization": f"Bot {token}"}
)
for channel in resp.json():
if channel['type'] == 0 and channel['name'] == channel_name:
channel_id = channel['id']
return channel_id
| 31.144144
| 262
| 0.633497
|
faa82d01c3c35fc32f00ba7526999c67015a65f7
| 4,459
|
py
|
Python
|
trainer.py
|
rogerhcheng/tiny-faces-pytorch
|
959ca952c30ea78b04d6b84088b027faea600cac
|
[
"MIT"
] | 154
|
2018-08-30T01:27:52.000Z
|
2022-03-21T12:09:55.000Z
|
trainer.py
|
sa-y-an/tiny-faces-pytorch
|
a3fc6223d01c0589c45e04d62f613af2876c3695
|
[
"MIT"
] | 24
|
2018-09-11T12:09:03.000Z
|
2022-01-13T00:49:47.000Z
|
trainer.py
|
sa-y-an/tiny-faces-pytorch
|
a3fc6223d01c0589c45e04d62f613af2876c3695
|
[
"MIT"
] | 44
|
2018-08-09T05:49:14.000Z
|
2022-01-24T18:11:20.000Z
|
from pathlib import Path
import numpy as np
import torch
from torch.nn import functional as nnfunc
from torchvision import transforms
from models.utils import get_bboxes
from utils.nms import nms
def print_state(idx, epoch, size, loss_cls, loss_reg):
if epoch >= 0:
message = "Epoch: [{0}][{1}/{2}]\t".format(epoch, idx, size)
else:
message = "Val: [{0}/{1}]\t".format(idx, size)
print(message +
'\tloss_cls: {loss_cls:.6f}' \
'\tloss_reg: {loss_reg:.6f}'.format(loss_cls=loss_cls, loss_reg=loss_reg))
def save_checkpoint(state, filename="checkpoint.pth", save_path="weights"):
# check if the save directory exists
if not Path(save_path).exists():
Path(save_path).mkdir()
save_path = Path(save_path, filename)
torch.save(state, str(save_path))
def visualize_output(img, output, templates, proc, prob_thresh=0.55, nms_thresh=0.1):
tensor_to_image = transforms.ToPILImage()
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for t, m, s in zip(img[0], mean, std):
t.mul_(s).add_(m)
image = tensor_to_image(img[0]) # Index into the batch
cls_map = nnfunc.sigmoid(output[:, 0:templates.shape[0], :, :]).data.cpu(
).numpy().transpose((0, 2, 3, 1))[0, :, :, :]
reg_map = output[:, templates.shape[0]:, :, :].data.cpu(
).numpy().transpose((0, 2, 3, 1))[0, :, :, :]
print(np.sort(np.unique(cls_map))[::-1])
proc.visualize_heatmaps(image, cls_map, reg_map, templates,
prob_thresh=prob_thresh, nms_thresh=nms_thresh)
p = input("Continue? [Yn]")
if p.lower().strip() == 'n':
exit(0)
def draw_bboxes(image, img_id, bboxes, scores, scales, processor):
processor.render_and_save_bboxes(image, img_id, bboxes, scores, scales)
def train(model, loss_fn, optimizer, dataloader, epoch, device):
model = model.to(device)
model.train()
for idx, (img, class_map, regression_map) in enumerate(dataloader):
x = img.float().to(device)
class_map_var = class_map.float().to(device)
regression_map_var = regression_map.float().to(device)
output = model(x)
loss = loss_fn(output,
class_map_var, regression_map_var)
# visualize_output(img, output, dataloader.dataset.templates)
optimizer.zero_grad()
# Get the gradients
# torch will automatically mask the gradients to 0 where applicable!
loss.backward()
optimizer.step()
print_state(idx, epoch, len(dataloader),
loss_fn.class_average.average,
loss_fn.reg_average.average)
def get_detections(model, img, templates, rf, img_transforms,
prob_thresh=0.65, nms_thresh=0.3, scales=(-2, -1, 0, 1), device=None):
model = model.to(device)
model.eval()
dets = np.empty((0, 5)) # store bbox (x1, y1, x2, y2), score
num_templates = templates.shape[0]
# Evaluate over multiple scale
scales_list = [2 ** x for x in scales]
# convert tensor to PIL image so we can perform resizing
image = transforms.functional.to_pil_image(img[0])
min_side = np.min(image.size)
for scale in scales_list:
# scale the images
scaled_image = transforms.functional.resize(image,
np.int(min_side*scale))
# normalize the images
img = img_transforms(scaled_image)
# add batch dimension
img.unsqueeze_(0)
# now run the model
x = img.float().to(device)
output = model(x)
# first `num_templates` channels are class maps
score_cls = output[:, :num_templates, :, :]
prob_cls = torch.sigmoid(score_cls)
score_cls = score_cls.data.cpu().numpy().transpose((0, 2, 3, 1))
prob_cls = prob_cls.data.cpu().numpy().transpose((0, 2, 3, 1))
score_reg = output[:, num_templates:, :, :]
score_reg = score_reg.data.cpu().numpy().transpose((0, 2, 3, 1))
t_bboxes, scores = get_bboxes(score_cls, score_reg, prob_cls,
templates, prob_thresh, rf, scale)
scales = np.ones((t_bboxes.shape[0], 1)) / scale
# append scores at the end for NMS
d = np.hstack((t_bboxes, scores))
dets = np.vstack((dets, d))
# Apply NMS
keep = nms(dets, nms_thresh)
dets = dets[keep]
return dets
| 30.965278
| 89
| 0.610675
|
26cf066ce618fab0bf5d22d891146df28c25192c
| 2,130
|
py
|
Python
|
basic/18-web3py/scripts/5_use_openzeppelin_dynamic_mintable_contract.py
|
defiAnalysis/Dapp-Learning
|
7401815cb6c8528c39b24a703ff74702607e8f7f
|
[
"MIT"
] | 4
|
2021-07-21T12:19:38.000Z
|
2022-01-01T13:52:35.000Z
|
basic/18-web3py/scripts/5_use_openzeppelin_dynamic_mintable_contract.py
|
defiAnalysis/Dapp-Learning
|
7401815cb6c8528c39b24a703ff74702607e8f7f
|
[
"MIT"
] | null | null | null |
basic/18-web3py/scripts/5_use_openzeppelin_dynamic_mintable_contract.py
|
defiAnalysis/Dapp-Learning
|
7401815cb6c8528c39b24a703ff74702607e8f7f
|
[
"MIT"
] | null | null | null |
import json
from web3 import Web3
def main():
w3 = Web3(Web3.HTTPProvider('http://127.0.0.1:8545'))
with open('./build/contracts/MyTokenMintable2.json', 'r') as fr:
erc20_json_dict = json.load(fr)
my_contract = w3.eth.contract(abi=erc20_json_dict['abi'], bytecode=erc20_json_dict['bytecode'])
tx_hash = my_contract.constructor().transact({'from': w3.eth.accounts[0]})
# 0. 使用my_contract类直接构建一个contract
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
contract = my_contract(address=tx_receipt['contractAddress'])
# 1. 测试account[1]的mint权限
# 根据MyTokenMintable1的初始函数可以看到account[1]是被赋予了铸币权限
print('1. ---- mint role')
#print(w3.toHex(contract.functions.MINTER_ROLE().call()))
#print(w3.toHex(w3.keccak(text="MINTER_ROLE")))
print("Account " + w3.eth.accounts[5] + ", Mint Role : ", contract.functions.hasRole(contract.functions.MINTER_ROLE().call(), w3.eth.accounts[5]).call())
#print(w3.toHex(contract.functions.getRoleAdmin(contract.functions.MINTER_ROLE().call()).call()))
# 2. 进行铸币,
# 开始前查看所有账户的Token数量
print('\n2. ---- check and mint')
print("Before Mint")
minter_role = contract.functions.MINTER_ROLE().call()
for acc in w3.eth.accounts:
print("Account " + acc + " Tokens Balance :" + str(contract.functions.balanceOf(acc).call()), ", Mint Role: " + str(contract.functions.hasRole(minter_role, acc).call()))
# 3. 设置权限,现在默认管理员是合约的创建者accounts[0]
# accounts[0]能赋予任何一个账户minter的角色,包括它自己
# 使用AccessControl提供的grantRole接口进行授权,发起人是管理员
grant_role = contract.functions.MINTER_ROLE().call()
tx_hash = contract.functions.grantRole(role=grant_role, account=w3.eth.accounts[5]).transact({"from":w3.eth.accounts[0]})
# give everyone 10 Tokens
print("\nAfter Mint")
for acc in w3.eth.accounts:
contract.functions.mint(to=acc, amount=10).transact({'from': w3.eth.accounts[5]})
print("Account " + acc + " Tokens Balance :" + str(contract.functions.balanceOf(acc).call()), ", Mint Role: " + str(contract.functions.hasRole(minter_role, acc).call()))
if __name__ =='__main__':
main()
| 45.319149
| 177
| 0.69108
|
d4e7ebcbd9a355bbc4cb210343321c164b3ff445
| 1,987
|
py
|
Python
|
netflix_notify/management/commands/sync_titles.py
|
mikeengland/netflix-notify
|
7c49f171e62680a6882822a4f57346cd3cc3f828
|
[
"Apache-2.0"
] | 1
|
2017-06-07T11:46:46.000Z
|
2017-06-07T11:46:46.000Z
|
netflix_notify/management/commands/sync_titles.py
|
mikeengland/netflix-notify
|
7c49f171e62680a6882822a4f57346cd3cc3f828
|
[
"Apache-2.0"
] | null | null | null |
netflix_notify/management/commands/sync_titles.py
|
mikeengland/netflix-notify
|
7c49f171e62680a6882822a4f57346cd3cc3f828
|
[
"Apache-2.0"
] | null | null | null |
import logging
from django.core.management.base import BaseCommand
from netflix_notify.enums import Regions
from netflix_notify.models import (Title,
SyncLog)
from netflix_notify.scraper import Scraper
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Sync the titles with the application database'
def add_arguments(self, parser):
# TODO Add option to sync a specific Netflix region
pass
def handle(self, *args, **options):
self.get_and_store_titles()
def get_and_store_titles(self):
"""
Retrieve the titles from the API, post-process them and store them in the database, ensuring
any existing but now missing titles are set as inactive.
"""
logger.info('Retrieving titles from the API')
scraper = Scraper()
titles = scraper.get_titles()
created_or_updated = []
logger.info('Syncing titles in the database')
for title in titles:
title, _ = Title.objects.update_or_create(title_type=title.get('object_type'),
name=title.get('title'),
description=title.get('short_description'),
language=title.get('original_language'),
release_year=title.get('original_release_year'),
runtime=title.get('runtime'),
netflix_region=Regions.UK,
active=True)
created_or_updated.append(title)
currently_active = [title.pk for title in created_or_updated]
Title.objects.exclude(pk__in=currently_active).update(active=False)
SyncLog.objects.create()
logger.info('Title sync complete!')
| 38.211538
| 102
| 0.558631
|
50f6c7a40e8b73361d9f6c01f20f63aebc1c9b56
| 1,826
|
py
|
Python
|
profile.py
|
AvinashSingh786/SRedS
|
1ff1cebc78502359f41bd48d40b17a9aaa80d0e0
|
[
"MIT"
] | 2
|
2021-07-08T14:12:36.000Z
|
2022-02-18T00:42:55.000Z
|
profile.py
|
AvinashSingh786/SRedS
|
1ff1cebc78502359f41bd48d40b17a9aaa80d0e0
|
[
"MIT"
] | 1
|
2021-03-01T05:15:04.000Z
|
2021-03-01T05:15:04.000Z
|
profile.py
|
AvinashSingh786/SRedS
|
1ff1cebc78502359f41bd48d40b17a9aaa80d0e0
|
[
"MIT"
] | null | null | null |
# profile.py
import time
import os
import psutil
import inspect
def elapsed_since(start):
#return time.strftime("%H:%M:%S", time.gmtime(time.time() - start))
elapsed = time.time() - start
if elapsed < 1:
return str(round(elapsed*1000,2)) + "ms"
if elapsed < 60:
return str(round(elapsed, 2)) + "s"
if elapsed < 3600:
return str(round(elapsed/60, 2)) + "min"
else:
return str(round(elapsed / 3600, 2)) + "hrs"
def get_process_memory():
process = psutil.Process(os.getpid())
mi = process.memory_info()
return mi.rss, mi.vms, mi.shared
def format_bytes(bytes):
if abs(bytes) < 1000:
return str(bytes)+"B"
elif abs(bytes) < 1e6:
return str(round(bytes/1e3,2)) + "kB"
elif abs(bytes) < 1e9:
return str(round(bytes / 1e6, 2)) + "MB"
else:
return str(round(bytes / 1e9, 2)) + "GB"
def profile(func, *args, **kwargs):
def wrapper(*args, **kwargs):
rss_before, vms_before, shared_before = get_process_memory()
cpu1 = psutil.cpu_percent()
start = time.time()
result = func(*args, **kwargs)
elapsed_time = elapsed_since(start)
cpu2= psutil.cpu_percent()
rss_after, vms_after, shared_after = get_process_memory()
print("Profiling: {:>20} RSS: {:>8} | VMS: {:>8} | SHR {"
":>8} | time: {:>8} | CPU% {:>8}"
.format("<" + func.__name__ + ">",
format_bytes(rss_after - rss_before),
format_bytes(vms_after - vms_before),
format_bytes(shared_after - shared_before),
elapsed_time, (cpu1+cpu2)/2))
return result
if inspect.isfunction(func):
return wrapper
elif inspect.ismethod(func):
return wrapper(*args,**kwargs)
| 31.482759
| 71
| 0.575027
|
daec12a95e26d66876b0502566898361d1a1751e
| 338
|
py
|
Python
|
OtavioMiranda/POO/agregacao/main.py
|
juarezhenriquelisboa/Python
|
5c5498b33e7cba4e3bfa322a6a76bed74b68e6bf
|
[
"MIT"
] | 1
|
2021-01-01T14:46:28.000Z
|
2021-01-01T14:46:28.000Z
|
OtavioMiranda/POO/agregacao/main.py
|
juarezhenriquelisboa/Python
|
5c5498b33e7cba4e3bfa322a6a76bed74b68e6bf
|
[
"MIT"
] | null | null | null |
OtavioMiranda/POO/agregacao/main.py
|
juarezhenriquelisboa/Python
|
5c5498b33e7cba4e3bfa322a6a76bed74b68e6bf
|
[
"MIT"
] | null | null | null |
from classe_s import CarrinhoDeCompras, Produto
carrinho = CarrinhoDeCompras()
p1 = Produto('Camiseta', 50)
p2 = Produto('Iphone', 5000)
p3 = Produto('Caneca', 15)
carrinho.lista_produtos()
carrinho.inserir_produto(p1)
carrinho.inserir_produto(p2)
carrinho.inserir_produto(p3)
carrinho.lista_produtos()
print(carrinho.soma_total())
| 18.777778
| 47
| 0.781065
|
27405470e4bb5df1d9400c8d739dbc5583877222
| 90,637
|
py
|
Python
|
t2t_bert/utils/tensor2tensor/utils/t2t_model.py
|
yyht/bert
|
480c909e0835a455606e829310ff949c9dd23549
|
[
"Apache-2.0"
] | 34
|
2018-12-19T01:00:57.000Z
|
2021-03-26T09:36:37.000Z
|
t2t_bert/utils/tensor2tensor/utils/t2t_model.py
|
yyht/bert
|
480c909e0835a455606e829310ff949c9dd23549
|
[
"Apache-2.0"
] | 11
|
2018-12-25T03:37:59.000Z
|
2021-08-25T14:43:58.000Z
|
t2t_bert/utils/tensor2tensor/utils/t2t_model.py
|
yyht/bert
|
480c909e0835a455606e829310ff949c9dd23549
|
[
"Apache-2.0"
] | 9
|
2018-12-27T08:00:44.000Z
|
2020-06-08T03:05:14.000Z
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T2TModel Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import functools
import math
import os
import time
import six
from tensor2tensor.data_generators import multi_problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators.problem import problem_hparams_to_features
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.layers.common_attention import mixed_precision_is_enabled
from tensor2tensor.utils import beam_search
from tensor2tensor.utils import decoding
from tensor2tensor.utils import expert_utils as eu
from tensor2tensor.utils import hparams_lib
from tensor2tensor.utils import learning_rate
from tensor2tensor.utils import metrics
from tensor2tensor.utils import mlperf_log
from tensor2tensor.utils import optimize
from tensor2tensor.utils import quantization
from tensor2tensor.utils import registry
from tensor2tensor.utils import scheduled_sampling
import tensorflow as tf
from tensorflow.python.layers import base
from tensorflow.python.ops import inplace_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import tf_inspect as inspect
_no_problem_err_str = (
"The default implementation of %s requires that the "
"model be used with a Problem. If using a Problem, augment the "
"hparams object with trainer_lib.add_problem_hparams. If not, "
"override %s.")
_no_problem_err = (
lambda method_name: _no_problem_err_str % (method_name, method_name))
def _flatten_dict(original_dict):
"""Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
"""
flat_dict = {}
for key, value in original_dict.items():
if isinstance(value, dict):
for name, tensor in value.items():
if isinstance(tensor, dict):
raise ValueError("flatten_dict only handles 2 levels of nesting.")
flat_key = "__" + key + "_" + name
flat_dict[flat_key] = tensor
else:
flat_dict[key] = value
return flat_dict
def _unflatten_dict(flat_dict, prefixes):
"""Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure.
"""
original_dict = {}
for key, value in flat_dict.items():
prefix_found = False
for prefix in prefixes:
full_prefix = "__" + prefix + "_"
if key.startswith(full_prefix):
# Add a dict to the original dict with key=prefix
if prefix not in original_dict:
original_dict[prefix] = {}
original_dict[prefix][key[len(full_prefix):]] = value
prefix_found = True
break
if not prefix_found:
# No key matched a prefix in the for loop.
original_dict[key] = value
return original_dict
class T2TModel(base.Layer):
"""Abstract base class for models.
`T2TModel` has three typical usages:
1. Estimator: The method `make_estimator_model_fn` builds a `model_fn` for
the tf.Estimator workflow of training, evaluation, and prediction.
It performs the method `call`, which performs the core computation,
followed by `estimator_spec_train`, `estimator_spec_eval`, or
`estimator_spec_predict` depending on the tf.Estimator mode.
2. Layer: The method `call` enables `T2TModel` to be used a callable by
itself. It calls the following methods:
* `bottom`, which transforms features according to `problem_hparams`' input
and target `Modality`s;
* `body`, which takes features and performs the core model computation to
return output and any auxiliary loss terms;
* `top`, which takes features and the body output, and transforms them
according to `problem_hparams`' input and target `Modality`s to return
the final logits;
* `loss`, which takes the logits, forms any missing training loss, and sums
all loss terms.
3. Inference: The method `infer` enables `T2TModel` to make sequence
predictions by itself.
Subclasses generally only need to override `body`.
"""
REGISTERED_NAME = None # Updated on registration.
def __init__(self,
hparams,
mode=tf.estimator.ModeKeys.TRAIN,
problem_hparams=None,
data_parallelism=None,
decode_hparams=None,
**kwargs):
"""Creates a T2TModel.
Args:
hparams: HParams, model hyperparameters.
mode: tf.estimator.ModeKeys, the execution mode.
problem_hparams: HParams, hyperparameters for the
Problem. If provided here or in hparams.problem_hparams, the model will
automatically determine bottom, top, and loss methods. If not provided,
calling the model will only invoke body.
data_parallelism: a expert_utils.Parallelism object,
specifies devices for data parallelism.
decode_hparams: a hyperparameter object with decoding parameters.
See decoding.decode_hparams.
**kwargs: arguments to pass to base.Layer constructor.
"""
# Determine name first: use registered name if possible, class name else.
default_name = registry.default_name(type(self))
name = self.REGISTERED_NAME or default_name
super(T2TModel, self).__init__(
trainable=mode == tf.estimator.ModeKeys.TRAIN, name=name, **kwargs)
if not problem_hparams and hasattr(hparams, "problem_hparams"):
problem_hparams = hparams.problem_hparams
self._problem_hparams = problem_hparams
# Setup hparams
hparams = hparams_lib.copy_hparams(hparams)
if self._problem_hparams and hparams.shared_embedding_and_softmax_weights:
# If vocabularies differ, unset shared_embedding_and_softmax_weights.
input_vocab_size = self._problem_hparams.vocab_size.get("inputs")
target_vocab_size = self._problem_hparams.vocab_size.get("targets")
if input_vocab_size is not None and hasattr(hparams, "vocab_divisor"):
input_vocab_size += (-input_vocab_size) % hparams.vocab_divisor
if target_vocab_size is not None and hasattr(hparams, "vocab_divisor"):
target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor
if (input_vocab_size is not None and target_vocab_size is not None and
input_vocab_size != target_vocab_size):
log_info("Unsetting shared_embedding_and_softmax_weights.")
hparams.shared_embedding_and_softmax_weights = 0
if hparams.hidden_size:
hidden_size = hparams.hidden_size
else:
hidden_size = 1024
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_EMBEDDING_SHARED_WEIGHTS,
value={
"vocab_size": target_vocab_size,
"hidden_size": hidden_size
},
hparams=hparams)
if self._problem_hparams:
for feature_name, modality in six.iteritems(
self._problem_hparams.modality):
# If prepend mode, set weights_fn to appropriately handle it.
if (modality in (modalities.ModalityType.CTC_SYMBOL,
modalities.ModalityType.IDENTITY_SYMBOL,
modalities.ModalityType.SYMBOL,
modalities.ModalityType.SYMBOL_ONE_HOT)):
if (hparams.prepend_mode == "prepend_inputs_full_attention" or
(hparams.prepend_mode == "prepend_inputs_masked_attention" and
mode != tf.estimator.ModeKeys.TRAIN)):
weights_fn = common_layers.weights_prepend_inputs_to_targets
hparams.weights_fn[feature_name] = weights_fn
self._original_hparams = hparams
self.set_mode(mode)
self._decode_hparams = hparams_lib.copy_hparams(
decode_hparams or decoding.decode_hparams())
self._data_parallelism = data_parallelism or eu.Parallelism([""])
self._num_datashards = self._data_parallelism.n
self._ps_devices = self._data_parallelism.ps_devices
self._eager_var_store = create_eager_var_store()
if not common_layers.is_xla_compiled():
self.summarize_hparams()
self._variable_scopes = {}
def _add_variable_scope(self, key, vs):
if key not in self._variable_scopes:
self._variable_scopes[key] = vs
def summarize_hparams(self):
def create_hparams_summary(hparams, name):
hparams_strs = [tf.convert_to_tensor([k, str(v)])
for k, v in hparams.values().items()]
tf.summary.text(name, tf.cast(tf.stack(hparams_strs), tf.string))
create_hparams_summary(self._hparams, "%s_hparams" % self.name)
if self._problem_hparams:
create_hparams_summary(self._problem_hparams,
"%s_problem_hparams" % self.name)
# Replace the two methods below in order to add custom SessionRunHooks to
# the training procedure.
@staticmethod
def train_hooks(hook_context):
return []
@staticmethod
def eval_hooks(hook_context):
return []
@property
def hparams(self):
return self._hparams
@property
def problem_hparams(self):
return self._problem_hparams
@property
def is_training(self):
return self._hparams.mode == tf.estimator.ModeKeys.TRAIN
@property
def is_predicting(self):
return self._hparams.mode == tf.estimator.ModeKeys.PREDICT
@property
def has_input(self):
if self._problem_hparams:
return "inputs" in self._problem_hparams.modality
else:
return True
@property
def _custom_getter(self):
if self.hparams.weight_dtype == "bfloat16":
if self.hparams.optimizer != "Adafactor":
raise NotImplementedError(
"weight_dtype=bfloat16 only implemented with Adafactor optimizer")
activation_dtype = tf.float32
if self.hparams.activation_dtype == "bfloat16":
activation_dtype = tf.bfloat16
return quantization.EighthPowerEncoding().custom_getter(
activation_dtype=activation_dtype)
elif self.hparams.activation_dtype == "bfloat16":
return quantization.bfloat16_activations_var_getter
elif mixed_precision_is_enabled(hparams=self.hparams):
return quantization.float16_activations_var_getter
else:
return None
@property
def _target_modality_is_real(self):
"""Whether the target modality is real-valued."""
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality = self._problem_hparams.modality["targets"]
modality_name = self._hparams.name.get(
"targets",
modalities.get_name(modality))(self._hparams, vocab_size)
return modality_name.startswith("real")
def call(self, inputs, **kwargs):
del kwargs
features = inputs
set_custom_getter_compose(self._custom_getter)
tf.get_variable_scope().set_initializer(
optimize.get_variable_initializer(self.hparams))
with self._eager_var_store.as_default():
self._fill_problem_hparams_features(features)
summarize_features(features, num_shards=self._num_datashards)
sharded_features = self._shard_features(features)
sharded_logits, losses = self.model_fn_sharded(sharded_features)
if isinstance(sharded_logits, dict):
concat_logits = {}
for k, v in six.iteritems(sharded_logits):
concat_logits[k] = tf.concat(v, 0)
return concat_logits, losses
else:
return tf.concat(sharded_logits, 0), losses
@staticmethod
def has_symmetric_shards(model_name):
# model_fn is sharded symmetrically unless the model overrides body_sharded
# method to manually control the sharding.
model_cls = registry.model(model_name)
return not model_cls.use_body_sharded()
@staticmethod
def use_body_sharded():
return False
def body_sharded(self, sharded_features):
raise NotImplementedError("Models that wish to manually control sharding, "
"e.g. MoE models, should override body_sharded "
"and set use_body_sharded to True.")
def model_fn_sharded(self, sharded_features):
"""Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards.
"""
dp = self._data_parallelism
# [{str: Tensor}]. Transpose of 'sharded_features'.
datashard_to_features = self._to_features_per_datashard(sharded_features)
if self.use_body_sharded():
if self.hparams.scheduled_sampling_prob > 0.0:
raise NotImplementedError(
"Scheduled sampling for non-sharded body only.")
# MoE models override body_sharded
transformed_features = dp(self.bottom, datashard_to_features)
body_out = self.body_sharded(
self._to_single_features_dict(transformed_features))
body_out, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
sharded_logits = body_out
else:
if isinstance(body_out, dict):
sharded_logits = collections.OrderedDict()
sharded_losses = collections.OrderedDict()
for k, v in sorted(six.iteritems(body_out)):
sharded_logits[k] = dp(self.top, v, datashard_to_features)
sharded_losses[k] = dp(self.loss, sharded_logits[k],
datashard_to_features)
training_loss_dict = average_sharded_losses([({
"training": l
} for l in loss) for loss in sharded_losses.values()])
losses.update(training_loss_dict)
else:
sharded_logits = dp(self.top, body_out, datashard_to_features)
sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)
if isinstance(sharded_losses, tuple):
nums, dens = sharded_losses
sharded_losses = zip(nums, dens)
training_loss_dict = average_sharded_losses([{
"training": loss
} for loss in sharded_losses])
losses.update(training_loss_dict)
else:
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
sharded_logits, sharded_losses = dp(
self.maybe_scheduled_sampling,
datashard_to_features, sharded_logits, sharded_losses)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}
for k, _ in six.iteritems(sharded_logits[0]):
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = average_sharded_losses(sharded_losses)
return sharded_logits, losses
def model_fn(self, features):
with tf.variable_scope(tf.get_variable_scope(), use_resource=True) as vs:
self._add_variable_scope("model_fn", vs)
transformed_features = self.bottom(features)
if self.hparams.activation_dtype == "bfloat16":
for k, v in sorted(six.iteritems(transformed_features)):
if v.dtype == tf.float32:
transformed_features[k] = tf.cast(v, tf.bfloat16)
with tf.variable_scope("body") as body_vs:
self._add_variable_scope("body", body_vs)
log_info("Building model body")
body_out = self.body(transformed_features)
output, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
logits = output
else:
logits = self.top(output, features)
losses["training"] = 0.0
if (self._hparams.mode != tf.estimator.ModeKeys.PREDICT and
self._hparams.mode != "attack"):
losses["training"] = self.loss(logits, features)
return logits, losses
def bottom(self, features):
"""Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
"""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
# Transform features via its corresponding modality.
for feature_name, modality in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality_name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
# Use if-else clauses to preserve behavior of previous changes: namely,
# the variable scope name for the targets feature if there is only one
# target modality; and to reuse variable scopes for only input modalities.
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_name, feature_name)
else:
variable_scope_name = modality_name
bottom = self._hparams.bottom.get(
feature_name,
modalities.get_targets_bottom(modality))
# TODO(aidangomez): share variables?
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
else:
bottom = self._hparams.bottom.get(feature_name,
modalities.get_bottom(modality))
do_reuse = modality_name in all_previous_modalities
with tf.variable_scope(modality_name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
all_previous_modalities.append(modality_name)
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features
def body(self, features):
"""Computes the targets' pre-logit activations given transformed inputs.
Most `T2TModel` subclasses will override this method.
Args:
features: dict of str to Tensor, where each Tensor has shape [batch_size,
..., hidden_size]. It typically contains keys `inputs` and `targets`.
Returns:
output: Tensor of pre-logit activations with shape [batch_size, ...,
hidden_size].
losses: Either single loss as a scalar, a list, a Tensor (to be averaged),
or a dictionary of losses. If losses is a dictionary with the key
"training", losses["training"] is considered the final training
loss and output is considered logits; self.top and self.loss will
be skipped.
"""
raise NotImplementedError("Abstract Method")
def _top_single(self, body_output, feature_name, features):
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.top is a passthrough.")
return body_output
modality = self._problem_hparams.modality[feature_name]
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
with tf.variable_scope(name) as tm_vs:
self._add_variable_scope(tm_vs.name, tm_vs)
log_info("Transforming body output with %s.top", name)
top = self._hparams.top.get(feature_name, modalities.get_top(modality))
top_is_pointwise = getattr(top, "pointwise", False)
last_only = (top_is_pointwise and
self.hparams.mode == tf.estimator.ModeKeys.PREDICT and
not self.hparams.force_full_predict)
if not last_only:
logits = top(body_output, features.get("targets"),
self._hparams, vocab_size)
else:
# Take body outputs for the last position only, and targets too.
if "decode_loop_step" not in features:
last_position_body_output = tf.expand_dims(
body_output[:, -1, :, :], axis=[1])
last_position_targets = tf.expand_dims(
features["targets"][:, -1, :, :], axis=[1])
else:
body_output_shape = body_output.shape.as_list()
last_position_body_output = tf.slice(
body_output, [0, features["decode_loop_step"][0], 0, 0], [
body_output_shape[0], 1, body_output_shape[2],
body_output_shape[3]
])
target_shape = features["targets"].shape.as_list()
last_position_targets = tf.slice(
features["targets"], [0, features["decode_loop_step"][0], 0, 0],
[target_shape[0], 1, target_shape[2], target_shape[3]])
logits = top(last_position_body_output, last_position_targets,
self._hparams, vocab_size)
return logits
def top(self, body_output, features):
"""Computes logits given body output and features.
Args:
body_output: dict of str to Tensor, comprising one key-value pair for each
target. Each value denotes the target's pre-logit activations.
Alternatively, it may be a single Tensor denoting the pre-logits for
that target.
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
logits: dict of str to Tensor, denoting each logits for each target; or
a single Tensor denoting the logits for that target.
When targets are generated at training time:
logits == {
"self_generated_targets": <generated targets tensor>
"logits": <original logits Tensor or dict>
}
"""
if isinstance(body_output, dict):
logits = {}
for k, v in six.iteritems(body_output):
# TODO(aidangomez): share variables here?
with tf.variable_scope(k) as top_vs:
self._add_variable_scope("top_%s" % k, top_vs)
logits[k] = self._top_single(v, k, features)
return logits
else:
return self._top_single(body_output, "targets", features)
def _loss_single(self, logits, feature_name, feature, weights=None):
# The current bfloat16 version still uses float32 for most parts of backward
# propagation to keep model quality, so cast back before computing the loss
# value.
if not self._problem_hparams:
log_warn(_no_problem_err("loss"))
return (tf.constant(0., dtype=tf.float32),
tf.constant(1., dtype=tf.float32))
# Calculate loss contribution.
modality = self._problem_hparams.modality[feature_name]
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
loss = self._hparams.loss.get(feature_name, modalities.get_loss(modality))
targets_weights_fn = self._hparams.weights_fn.get(
"targets", modalities.get_weights_fn(modality))
if weights is None:
loss_num, loss_den = loss(logits, feature, self._hparams, vocab_size,
weights_fn=targets_weights_fn)
else:
def weights_fn(labels):
"""Per-token weights for loss."""
# Use target_weights_fn() given by modality as well as explicitly given
# weights.
modality_weights = targets_weights_fn(labels)
# Broadcast 'weights' along minor dimensions (TF's default is major).
explicit_weights = weights
if len(explicit_weights.shape) < len(modality_weights.shape):
explicit_weights = common_layers.expand_squeeze_to_nd(
weights, modality_weights.shape.ndims)
return explicit_weights * modality_weights
# Ensure that target.modality_loss() supports "weights_fn" keyword
# argument. If it doesn't and "weights" is specified, raise an exception.
argument_names = inspect.getargspec(loss).args
if "weights_fn" not in argument_names:
raise ValueError(
"Explicit 'weights' given but default loss for modality doesn't "
"support 'weights_fn' keyword argument: %s.loss(%s)." %
(modality, ", ".join(argument_names)))
loss_num, loss_den = loss(
logits, feature, self._hparams, vocab_size, weights_fn=weights_fn)
loss_num *= self._problem_hparams.loss_multiplier
if hasattr(self.hparams, "problem") and hasattr(
self.hparams.problem, "task_list"):
if weights is not None:
raise NotImplementedError("weights not yet implemented in "
"multitask setting.")
loss_num, loss_den, summaries = multi_problem.aggregate_task_losses(
self.hparams,
self._problem_hparams,
logits,
feature_name,
feature
)
for key, val in summaries:
tf.summary.scalar(key, val)
return loss_num, loss_den
def loss(self, logits, features):
if isinstance(logits, dict):
losses = {}
for k, v in six.iteritems(logits):
losses[k] = self._loss_single(
v,
k,
features[k],
weights=features.get(k + "_mask"))
n, d = losses[k]
if common_layers.should_generate_summaries():
tf.summary.scalar(k + "_loss", n / d)
tf.summary.scalar(k + "_loss_num", n)
tf.summary.scalar(k + "_loss_den", d)
if getattr(self.hparams, "visualize_logits_histogram", False):
hist = tf.summary.histogram
hist(k + "_predict", tf.argmax(tf.squeeze(v), axis=-1))
hist(k + "_targets", features[k])
return tf.add_n([n / d for n, d in losses.values()])
else:
return self._loss_single(
logits,
"targets",
features["targets"],
weights=features.get("targets_mask"))
def optimize(self, loss, num_async_replicas=1, use_tpu=False, variables=None):
"""Return a training op minimizing loss."""
lr = learning_rate.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(
loss, lr, self.hparams, use_tpu=use_tpu, variables=variables)
return train_op
def set_mode(self, mode):
"""Set hparams with the given mode."""
log_info("Setting T2TModel mode to '%s'", mode)
hparams = hparams_lib.copy_hparams(self._original_hparams)
hparams.add_hparam("mode", mode)
# When not in training mode, set all forms of dropout to zero.
if mode != tf.estimator.ModeKeys.TRAIN:
for key in hparams.values():
if key.endswith("dropout") or key == "label_smoothing":
log_info("Setting hparams.%s to 0.0", key)
setattr(hparams, key, 0.0)
self._hparams = hparams
def prepare_features_for_infer(self, features):
"""Called before inference to allow adding infer-specific features."""
pass
def eval_autoregressive(self, features=None, decode_length=50):
"""Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
"""
results = self._slow_greedy_infer(features, decode_length=decode_length)
return results["logits"], results["losses"]
def _fill_problem_hparams_features(self, features):
if features is not None:
for k, v in sorted(
six.iteritems(problem_hparams_to_features(self._problem_hparams))):
if k not in features:
features[k] = tf.constant(v, name=k)
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
"""
set_custom_getter_compose(self._custom_getter)
with self._eager_var_store.as_default():
# TODO(rsepassi): Make decoding work with real-valued model outputs
# (i.e. if the target modality is RealModality).
self.prepare_features_for_infer(features)
if not self.has_input and beam_size > 1:
log_warn("Beam searching for a model with no inputs.")
if not self.has_input and self.hparams.sampling_method != "random":
log_warn("Non-random sampling for a model with no inputs.")
self._fill_problem_hparams_features(features)
if self._problem_hparams:
target_modality = self._problem_hparams.modality["targets"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
beam_size = 1 # No use to run beam-search for a single class.
if beam_size == 1:
log_info("Greedy Decoding")
results = self._greedy_infer(features, decode_length, use_tpu)
else:
log_info("Beam Decoding with beam size %d" % beam_size)
results = self._beam_decode(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
return results
def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
return self._beam_decode_slow(features, decode_length, beam_size, top_beams,
alpha, use_tpu)
def _beam_decode_slow(self, features, decode_length, beam_size, top_beams,
alpha, use_tpu=False):
"""Slow version of Beam search decoding.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do slow beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search.
Raises:
NotImplementedError: If use_tpu is set to true.
"""
batch_size = common_layers.shape_list(features["inputs"])[0]
def symbols_to_logits_fn(ids, i=None):
"""Go from ids to logits."""
ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]])
if "partial_targets" in features:
pt = features["partial_targets"]
pt_length = common_layers.shape_list(pt)[1]
pt = tf.tile(pt, [1, beam_size])
pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1])
ids = tf.concat([pt, ids], axis=1)
features["targets"] = ids
if i is not None:
features["decode_loop_step"] = i
self._coverage = None
logits, _ = self(features) # pylint: disable=not-callable
# now self._coverage is a coverage tensor for the first datashard.
# it has shape [batch_size] and contains floats between 0 and
# source_length.
if self._problem_hparams:
modality = self._problem_hparams.modality["targets"]
top = self._hparams.top.get("targets", modalities.get_top(modality))
if getattr(top, "pointwise", False):
return tf.squeeze(logits, axis=[1, 2, 3])
# -1 due to the pad above.
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :, :]
return tf.squeeze(logits, axis=[1, 2])
def _clone_examples_for_beam(old_feature, n):
"""Clone each example n times."""
old_shape = common_layers.shape_list(old_feature)
assert len(old_shape) >= 1
# Expand the inputs in to the beam size.
feature = tf.expand_dims(old_feature, 1)
feature = tf.tile(feature, [1, n] + [1] * (len(old_shape) - 1))
new_shape = common_layers.shape_list(feature)
feature = tf.reshape(feature,
[new_shape[0] * new_shape[1]] + new_shape[2:])
return feature
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
# Clone select features multiple times to account for beam size.
old_features = {}
for feature_name in ["inputs", "knowledge"]:
if feature_name not in features:
continue
old_features[feature_name] = features[feature_name]
features[feature_name] = _clone_examples_for_beam(
features[feature_name], beam_size)
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
# Setting decode length to input length + decode_length
if "partial_targets" not in features:
inputs = features["inputs"]
decode_length = (common_layers.shape_list(inputs)[1] +
features.get("decode_length", decode_length))
ids, scores, _ = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
stop_early=(top_beams == 1),
use_tpu=use_tpu)
# Set features back to the unexpanded form to not to confuse the
# Estimator!
features.update(old_features)
# Return `top_beams` decodings (also remove initial id from the beam search)
# TODO(lukaszkaiser): make it work multi-problem.
if top_beams == 1:
samples = ids[:, 0, 1:]
else:
samples = ids[:, :top_beams, 1:]
return {"outputs": samples, "scores": scores}
def _greedy_infer(self, features, decode_length, use_tpu=False):
"""A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if use_tpu:
return self._slow_greedy_infer_tpu(features, decode_length)
return self._slow_greedy_infer(features, decode_length)
def _slow_greedy_infer_tpu(self, features, decode_length):
"""A slow greedy inference method on TPU.
Quadratic time in decode_length.
Args:
features: An map of string to `Tensor`.
decode_length: An integer, how many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
if not self.has_input:
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
features["partial_targets"] = tf.to_int64(partial_targets)
# Save the targets in a var and reassign it after the tf.while loop to avoid
# having targets being in a 'while' frame. This ensures targets when used
# in metric functions stays in the same frame as other vars.
targets_old = features.get("targets", None)
target_modality = self._problem_hparams.modality["targets"]
def infer_step(i, recent_output, recent_logits, unused_loss):
"""Inference step."""
if not tf.executing_eagerly():
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
features["decode_loop_step"] = i
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.z
top = self._hparams.top.get("targets",
modalities.get_top(target_modality))
if getattr(top, "pointwise", False):
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:, i, :, :]
samples = tf.transpose(recent_output, perm=[1, 0, 2, 3])
samples = inplace_ops.alias_inplace_update(samples, i,
tf.to_int64(cur_sample))
samples = tf.transpose(samples, perm=[1, 0, 2, 3])
if not tf.executing_eagerly():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
recent_logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4])
recent_logits = inplace_ops.alias_inplace_update(
recent_logits, i, tf.squeeze(logits[:, -1:], axis=1))
logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4])
loss = sum([l for l in losses.values() if l is not None])
return i + 1, samples, logits, loss
# Create an initial output tensor. This will be passed
# to the infer_step, which adds one timestep at every iteration.
if "partial_targets" in features:
initial_output = tf.to_int64(features["partial_targets"])
while len(initial_output.get_shape().as_list()) < 4:
initial_output = tf.expand_dims(initial_output, 2)
batch_size = common_layers.shape_list(initial_output)[0]
else:
batch_size = common_layers.shape_list(features["inputs"])[0]
initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64)
# Hack: foldl complains when the output shape is less specified than the
# input shape, so we confuse it about the input shape.
initial_output = tf.slice(initial_output, [0, 0, 0, 0],
common_layers.shape_list(initial_output))
target_modality = self._problem_hparams.modality["targets"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
decode_length = 1
else:
if "partial_targets" in features:
prefix_length = common_layers.shape_list(features["partial_targets"])[1]
else:
prefix_length = common_layers.shape_list(features["inputs"])[1]
decode_length = prefix_length + decode_length
# Initial values of result, logits and loss.
result = tf.concat(
[initial_output,
tf.zeros([batch_size, decode_length, 1, 1], tf.int64)],
axis=1)
# tensor padded to [batch_size, decode_length, 1, 1, vocab_size]
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
logits = tf.zeros((batch_size, decode_length, 1, 1, vocab_size))
if not tf.executing_eagerly():
logits.set_shape([None, None, None, None, None])
loss = 0.0
def while_exit_cond(i, result, logits, loss): # pylint: disable=unused-argument
"""Exit the loop either if reach decode_length or EOS."""
not_overflow = i < decode_length
if self._problem_hparams.stop_at_eos:
def fn_not_eos():
# Check if the last predicted element is a EOS
return tf.reduce_any(
tf.not_equal(
tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID))
not_eos = tf.cond(
# We only check for early stopping if there is at least 1 element (
# otherwise not_eos will crash).
tf.not_equal(i, 0),
fn_not_eos,
lambda: True,
)
return tf.cond(
tf.equal(batch_size, 1),
# If batch_size == 1, we check EOS for early stopping.
lambda: tf.logical_and(not_overflow, not_eos),
# Else, just wait for max length
lambda: not_overflow)
return not_overflow
_, result, logits, loss = tf.while_loop(
while_exit_cond,
infer_step, [tf.constant(0), result, logits, loss],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([batch_size, decode_length, 1, 1]),
tf.TensorShape([batch_size, decode_length, 1, 1, vocab_size]),
tf.TensorShape([]),
],
back_prop=False,
parallel_iterations=1)
if inputs_old is not None: # Restore to not confuse Estimator.
features["inputs"] = inputs_old
# Reassign targets back to the previous value.
if targets_old is not None:
features["targets"] = targets_old
losses = {"training": loss}
if "partial_targets" in features:
partial_target_length = common_layers.shape_list(
features["partial_targets"])[1]
result = tf.slice(result, [0, partial_target_length, 0, 0],
[-1, -1, -1, -1])
return {
"outputs": result,
"scores": None,
"logits": logits,
"losses": losses,
}
def _slow_greedy_infer(self, features, decode_length):
"""A slow greedy inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
if not self.has_input:
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
features["partial_targets"] = tf.to_int64(partial_targets)
# Save the targets in a var and reassign it after the tf.while loop to avoid
# having targets being in a 'while' frame. This ensures targets when used
# in metric functions stays in the same frame as other vars.
targets_old = features.get("targets", None)
target_modality = self._problem_hparams.modality["targets"]
def infer_step(recent_output, recent_logits, unused_loss):
"""Inference step."""
if not tf.executing_eagerly():
if self._target_modality_is_real:
dim = self._problem_hparams.vocab_size["targets"]
if dim is not None and hasattr(self._hparams, "vocab_divisor"):
dim += (-dim) % self._hparams.vocab_divisor
recent_output.set_shape([None, None, None, dim])
else:
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.
top = self._hparams.top.get("targets",
modalities.get_top(target_modality))
if getattr(top, "pointwise", False):
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:,
common_layers.shape_list(recent_output)[1], :, :]
if self._target_modality_is_real:
cur_sample = tf.expand_dims(cur_sample, axis=1)
samples = tf.concat([recent_output, cur_sample], axis=1)
else:
cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1))
samples = tf.concat([recent_output, cur_sample], axis=1)
if not tf.executing_eagerly():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
logits = tf.concat([recent_logits, logits[:, -1:]], 1)
loss = sum([l for l in losses.values() if l is not None])
return samples, logits, loss
# Create an initial output tensor. This will be passed
# to the infer_step, which adds one timestep at every iteration.
if "partial_targets" in features:
initial_output = tf.to_int64(features["partial_targets"])
while len(initial_output.get_shape().as_list()) < 4:
initial_output = tf.expand_dims(initial_output, 2)
batch_size = common_layers.shape_list(initial_output)[0]
else:
batch_size = common_layers.shape_list(features["inputs"])[0]
if self._target_modality_is_real:
dim = self._problem_hparams.vocab_size["targets"]
if dim is not None and hasattr(self._hparams, "vocab_divisor"):
dim += (-dim) % self._hparams.vocab_divisor
initial_output = tf.zeros((batch_size, 0, 1, dim), dtype=tf.float32)
else:
initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64)
# Hack: foldl complains when the output shape is less specified than the
# input shape, so we confuse it about the input shape.
initial_output = tf.slice(initial_output, [0, 0, 0, 0],
common_layers.shape_list(initial_output))
target_modality = self._problem_hparams.modality["targets"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
decode_length = 1
else:
if "partial_targets" in features:
prefix_length = common_layers.shape_list(features["partial_targets"])[1]
else:
prefix_length = common_layers.shape_list(features["inputs"])[1]
decode_length = prefix_length + decode_length
# Initial values of result, logits and loss.
result = initial_output
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
if self._target_modality_is_real:
logits = tf.zeros((batch_size, 0, 1, vocab_size))
logits_shape_inv = [None, None, None, None]
else:
# tensor of shape [batch_size, time, 1, 1, vocab_size]
logits = tf.zeros((batch_size, 0, 1, 1, vocab_size))
logits_shape_inv = [None, None, None, None, None]
if not tf.executing_eagerly():
logits.set_shape(logits_shape_inv)
loss = 0.0
def while_exit_cond(result, logits, loss): # pylint: disable=unused-argument
"""Exit the loop either if reach decode_length or EOS."""
length = common_layers.shape_list(result)[1]
not_overflow = length < decode_length
if self._problem_hparams.stop_at_eos:
def fn_not_eos():
return tf.not_equal( # Check if the last predicted element is a EOS
tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID)
not_eos = tf.cond(
# We only check for early stopping if there is at least 1 element (
# otherwise not_eos will crash).
tf.not_equal(length, 0),
fn_not_eos,
lambda: True,
)
return tf.cond(
tf.equal(batch_size, 1),
# If batch_size == 1, we check EOS for early stopping.
lambda: tf.logical_and(not_overflow, not_eos),
# Else, just wait for max length
lambda: not_overflow)
return not_overflow
result, logits, loss = tf.while_loop(
while_exit_cond,
infer_step, [result, logits, loss],
shape_invariants=[
tf.TensorShape([None, None, None, None]),
tf.TensorShape(logits_shape_inv),
tf.TensorShape([]),
],
back_prop=False,
parallel_iterations=1)
if inputs_old is not None: # Restore to not confuse Estimator.
features["inputs"] = inputs_old
# Reassign targets back to the previous value.
if targets_old is not None:
features["targets"] = targets_old
losses = {"training": loss}
if "partial_targets" in features:
partial_target_length = common_layers.shape_list(
features["partial_targets"])[1]
result = tf.slice(result, [0, partial_target_length, 0, 0],
[-1, -1, -1, -1])
return {
"outputs": result,
"scores": None,
"logits": logits,
"losses": losses,
}
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
logits, losses = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return logits, logits, losses # Raw numbers returned from real modality.
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses
def _shard_features(self, features): # pylint: disable=missing-docstring
sharded_features = {}
for k, v in sorted(six.iteritems(features)):
v = tf.convert_to_tensor(v)
v_shape = common_layers.shape_list(v)
if not v_shape:
v = tf.expand_dims(v, axis=-1)
v_shape = [1]
if v_shape == [1]:
v = tf.tile(v, tf.to_int32([self._num_datashards]))
sharded_features[k] = self._data_parallelism(
tf.identity, tf.split(v, self._num_datashards, 0))
return sharded_features
def _to_features_per_datashard(self, features):
datashard_features = []
assert len(features[list(features.keys())[0]]) == self._num_datashards
for d in range(self._num_datashards):
f = {k: v[d] for k, v in six.iteritems(features)}
datashard_features.append(f)
return datashard_features
def _to_single_features_dict(self, datashard_features):
assert len(datashard_features) == self._num_datashards
features = collections.defaultdict(list)
for feats in datashard_features:
for k, v in six.iteritems(feats):
features[k].append(v)
return features
@staticmethod
def get_train_hooks(model_name, hook_context):
model_cls = registry.model(model_name)
return model_cls.train_hooks(hook_context)
@staticmethod
def get_eval_hooks(model_name, hook_context):
model_cls = registry.model(model_name)
return model_cls.eval_hooks(hook_context)
@staticmethod
def make_estimator_model_fn(model_name,
hparams,
decode_hparams=None,
use_tpu=False):
model_cls = registry.model(model_name)
def wrapping_model_fn(features, labels, mode, params=None, config=None):
return model_cls.estimator_model_fn(
hparams,
features,
labels,
mode,
config=config,
params=params,
decode_hparams=decode_hparams,
use_tpu=use_tpu)
return wrapping_model_fn
@classmethod
def estimator_model_fn(cls,
hparams,
features,
labels,
mode,
config=None,
params=None,
decode_hparams=None,
use_tpu=False):
"""Model fn for Estimator.
Args:
hparams: HParams, model hyperparameters
features: dict<str name, Tensor feature>
labels: Tensor
mode: tf.estimator.ModeKeys
config: RunConfig, possibly with data_parallelism attribute
params: dict, may include batch_size, use_tpu
decode_hparams: HParams, used when mode == PREDICT.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
TPUEstimatorSpec if use tpu else EstimatorSpec
"""
if mode == tf.estimator.ModeKeys.TRAIN:
create_dummy_vars()
hparams = hparams_lib.copy_hparams(hparams)
# Instantiate model
data_parallelism = None
if not use_tpu and config:
data_parallelism = config.data_parallelism
reuse = tf.get_variable_scope().reuse
model = cls(
hparams,
mode,
data_parallelism=data_parallelism,
decode_hparams=decode_hparams,
_reuse=reuse)
# PREDICT mode
if mode == tf.estimator.ModeKeys.PREDICT:
if use_tpu:
inputs = features.get("inputs")
if inputs is None:
inputs = features["targets"]
shape = inputs.get_shape().as_list()
if shape[0] is None:
shape[0] = decode_hparams.batch_size or hparams.batch_size
if shape[1] is None:
shape[1] = hparams.max_input_seq_length or hparams.max_length
inputs.set_shape(shape)
return model.estimator_spec_predict(features, use_tpu=use_tpu)
# TRAIN and EVAL modes
if hparams.eval_run_autoregressive and mode == tf.estimator.ModeKeys.EVAL:
logits, losses_dict = model.eval_autoregressive(features)
else:
logits, losses_dict = model(features) # pylint: disable=not-callable
# Support model-generated labels by overriding features["targets"] with
# logits["self_generated_targets"].
if isinstance(logits, dict) and "self_generated_targets" in logits:
# Overwrite 'features["targets"]' and 'labels'
# by logits["self_generated_targets"].
tf.logging.info("Replacing targets with model-provided targets.")
features["targets"] = labels = logits.pop("self_generated_targets")
assert list(logits.keys()) == ["logits"], (
# See "Returns" in the "top" method docstring for the expected
# "logits" format when targets are generated at training time.
"Expect only key 'logits' when there is 'self_generated_targets'. "
"Found {}".format(logits.keys())
)
# Recover the original logits tensor from the logits dict.
logits = logits["logits"] # Can be a tf.Tensor or a dict.
# Set known shapes
if common_layers.is_xla_compiled():
if isinstance(logits, dict):
for k, v in sorted(six.iteritems(logits)):
if "scalar/" in k:
continue
shape = v.get_shape().as_list()
if shape[0] is None:
shape[0] = params["batch_size"]
if shape[1] is None:
shape[1] = hparams.max_length
v.set_shape(shape)
else:
shape = logits.get_shape().as_list()
if shape[0] is None:
shape[0] = params["batch_size"]
if shape[1] is None:
shape[1] = hparams.max_length
logits.set_shape(shape)
assert "training" in losses_dict
# Attack mode
if mode == "attack":
return logits
# Summarize losses
model._summarize_losses(losses_dict) # pylint: disable=protected-access
# Accumulate losses
loss = sum(losses_dict[key] for key in sorted(losses_dict.keys()))
# EVAL mode
if mode == tf.estimator.ModeKeys.EVAL:
return model.estimator_spec_eval(features, logits, labels, loss,
losses_dict)
# TRAIN mode
assert mode == tf.estimator.ModeKeys.TRAIN
num_async_replicas = 1
if config and not use_tpu:
num_async_replicas = config.t2t_device_info["num_async_replicas"]
return model.estimator_spec_train(
loss, num_async_replicas=num_async_replicas, use_tpu=use_tpu)
def initialize_from_ckpt(self, ckpt_dir):
return initialize_from_ckpt(ckpt_dir=ckpt_dir, hparams=self._hparams)
def create_train_host_call(self):
return create_host_call(self.hparams.model_dir)
def create_eval_host_call(self):
return self.create_train_host_call()
def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False):
"""Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode."""
train_op = self.optimize(loss, num_async_replicas=num_async_replicas,
use_tpu=use_tpu)
if use_tpu:
if self._hparams.warm_start_from:
def scaffold_fn():
self.initialize_from_ckpt(self._hparams.warm_start_from)
return tf.train.Scaffold()
else:
scaffold_fn = None
# Note: important to call this before remove_summaries()
if self.hparams.tpu_enable_host_call:
host_call = self.create_train_host_call()
else:
host_call = None
remove_summaries()
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=train_op,
host_call=host_call,
scaffold_fn=scaffold_fn)
else:
if self._hparams.warm_start_from:
self.initialize_from_ckpt(self._hparams.warm_start_from)
# When loading weights from a pre-trained model, you want to be able to
# load separate weights into the encoder and decoder.
if self._hparams.warm_start_from_second:
self.initialize_from_ckpt(self._hparams.warm_start_from_second)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op)
def estimator_spec_eval(self, features, logits, labels, loss, losses_dict):
"""Constructs `tf.estimator.EstimatorSpec` for EVAL (evaluation) mode."""
del losses_dict
hparams = self.hparams
if not hasattr(hparams, "problem"):
raise NotImplementedError(_no_problem_err("estimator_spec_eval"))
problem = hparams.problem
if common_layers.is_xla_compiled():
# Note: important to call this before remove_summaries()
if self.hparams.tpu_enable_host_call:
host_call = self.create_eval_host_call()
else:
host_call = None
remove_summaries()
eval_metrics_fn = create_tpu_eval_metrics_fn(problem, hparams)
batch_size = [feature.shape.as_list()[0] for _, feature
in features.items() if feature.shape.ndims][0]
# Add batch dimension to all features since tpu requires the batch
# dimension on all tensors.
for name, feature in features.items():
if not feature.shape.as_list():
# All features must have a batch dimension
feature = tf.tile(tf.expand_dims(feature, 0), [batch_size])
features[name] = feature
eval_metrics_fn_args = dict(
logits=logits, # possibly a dict
labels=labels,
features=features, # dict
)
eval_metrics_fn_flat_args = _flatten_dict(eval_metrics_fn_args)
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
eval_metrics=(eval_metrics_fn, eval_metrics_fn_flat_args),
host_call=host_call,
loss=loss)
else:
task_list = [problem]
if hasattr(problem, "task_list"):
task_list = problem.task_list
eval_metrics_fns = metrics.create_evaluation_metrics(task_list, hparams)
eval_metrics = {}
for metric_name, metric_fn in six.iteritems(eval_metrics_fns):
if isinstance(logits, dict):
# the key is located in the center of metric_name: "metrics-%s/%s/%s"
k = metric_name.split("/")[1]
if k in logits:
eval_metrics[metric_name] = metric_fn(logits[k], features,
features[k])
else:
# We do not make it an error because we sometimes run models that
# predict only parts of the targets defined by the Problem class.
# For example, an autoencoder or pure-video model can run on a gym
# problem even if another model is also predicting other things,
# like actions or rewards.
tf.logging.warning("No key %s in logits for evaluation." % k)
else:
eval_metrics[metric_name] = metric_fn(logits, features,
features["targets"])
if isinstance(logits, dict):
predictions = logits
else:
predictions = {"predictions": logits}
evaluation_hooks = []
# Create a SummarySaverHook
eval_dir = os.path.join(
self.hparams.model_dir,
self.hparams.get("eval_dir_name", "eval"))
eval_summary_hook = tf.train.SummarySaverHook(
save_steps=1,
output_dir=eval_dir,
summary_op=tf.summary.merge_all())
evaluation_hooks.append(eval_summary_hook)
evaluation_hooks += problem.eval_hooks(features, logits, hparams)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.EVAL,
predictions=predictions,
eval_metric_ops=eval_metrics,
evaluation_hooks=evaluation_hooks,
loss=loss)
def estimator_spec_predict(self, features, use_tpu=False):
"""Constructs `tf.estimator.EstimatorSpec` for PREDICT (inference) mode."""
decode_hparams = self._decode_hparams
top_beams = decode_hparams.beam_size if decode_hparams.return_beams else 1
infer_out = self.infer(
features,
beam_size=decode_hparams.beam_size,
top_beams=top_beams,
alpha=decode_hparams.alpha,
decode_length=decode_hparams.extra_length,
use_tpu=use_tpu)
if isinstance(infer_out, dict):
outputs = infer_out["outputs"]
scores = infer_out["scores"]
else:
outputs = infer_out
scores = None
# Workaround for "ValueError: prediction values must be from the default
# graph" during TPU model exporting.
# TODO(b/130501786): remove tf.identity once default graph mismatch is fixed
for name, feature in features.items():
features[name] = tf.identity(feature)
inputs = features.get("inputs")
if inputs is None:
inputs = features["targets"]
predictions = {
"outputs": outputs,
"scores": scores,
"inputs": inputs,
"targets": features.get("infer_targets"),
}
# Pass through remaining features
for name, feature in features.items():
if name not in list(predictions.keys()) + ["infer_targets"]:
if name == "decode_loop_step":
continue
if not feature.shape.as_list():
# All features must have a batch dimension
batch_size = common_layers.shape_list(outputs)[0]
feature = tf.tile(tf.expand_dims(feature, 0), [batch_size])
predictions[name] = feature
_del_dict_non_tensors(predictions)
export_out = {"outputs": predictions["outputs"]}
if "scores" in predictions:
export_out["scores"] = predictions["scores"]
# Necessary to rejoin examples in the correct order with the Cloud ML Engine
# batch prediction API.
if "batch_prediction_key" in predictions:
export_out["batch_prediction_key"] = predictions["batch_prediction_key"]
export_outputs = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.PredictOutput(export_out)
}
if use_tpu:
# Note: important to call this before remove_summaries()
if self.hparams.tpu_enable_host_call:
host_call = self.create_eval_host_call()
else:
host_call = None
remove_summaries()
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
host_call=host_call,
export_outputs=export_outputs)
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs=export_outputs)
def _normalize_body_output(self, body_out):
if isinstance(body_out, tuple):
output, losses = body_out
if isinstance(losses, (list, tuple)):
losses = {"extra": tf.add_n([tf.reduce_mean(l) for l in losses])}
elif isinstance(losses, dict):
pass
else:
losses = {"extra": tf.reduce_mean(losses)}
else:
output = body_out
losses = {"extra": 0.0}
return output, losses
def _summarize_losses(self, losses_dict):
"""Adds `tf.summary`s to all terms in the losses dictionary."""
if common_layers.should_generate_summaries():
with tf.name_scope("losses"):
for loss_name, loss_val in sorted(losses_dict.items()):
tf.summary.scalar(loss_name, loss_val)
def maybe_scheduled_sampling(self, features, logits, losses):
"""Scheduled sampling.
Performs forward inference again with "targets" feature replaced with values
sampled from the model.
This is the identity unless self.hparams.scheduled_sampling_prob > 0
(default).
**WARNING**: If hparams.scheduled_sampling_method == "parallel", this is
not a faithful implementation of scheduled sampling. This implementation
samples tokens for timestep t condtioned on gold tokens 1...t-1. A proper
implementation must condition on a mix of gold and sampled tokens. Doing
so is not efficient for models such like Transformer.
Args:
features: {str: Tensor}. Features sharded along batch dimension.
logits: Tensor. Logits for each shard of data.
losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor
Returns:
new_logits: Tensor.
new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or
(ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a
weighted average.
"""
hparams = self.hparams
problem_hparams = self._problem_hparams
# Only do scheduled sampling if requested.
if hparams.scheduled_sampling_prob == 0.0:
return (logits, losses)
# Only do scheduled sampling on language tasks.
modality = problem_hparams.modality["targets"]
if modality != modalities.ModalityType.SYMBOL:
assert hparams.scheduled_sampling_prob == 0, (
"Scheduled sampling only applies to ModalityType.SYMBOL. Set "
"hparams.scheduled_sampling_prob == 0.0.")
return (logits, losses)
# Only do scheduled sampling when training.
is_training = (hparams.mode == tf.estimator.ModeKeys.TRAIN)
if not is_training:
tf.logging.info("Running in %s mode. Not using scheduled sampling.",
hparams.mode)
return (logits, losses)
# Pad vocabulary if vocab size must be evenly divisible by vocab_divisor.
vocab_size = problem_hparams.vocab_size["targets"]
assert vocab_size is not None
assert hparams.vocab_divisor == 1
# TODO(duckworthd): Move to scheduled_sampling.py.
def sample(x):
"""Multinomial sampling from a n-dimensional tensor."""
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]), 1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
# TODO(duckworthd): Move to scheduled_sampling.py.
def mix_gold_sampled(gold_targets,
sampled_targets,
mixin_prob,
i,
prev_new_targets):
"""Interleave sampled and gold tokens randomly."""
# Resample each location iid.
should_use_sampled_targets = tf.less(
tf.random_uniform(common_layers.shape_list(sampled_targets)),
mixin_prob)
mixed_targets = tf.where(
should_use_sampled_targets,
sampled_targets,
gold_targets)
# Reuse sample tokens for earlier timesteps.
new_targets = tf.where(
is_later_timestep(gold_targets, i),
mixed_targets,
prev_new_targets)
return new_targets
# TODO(duckworthd): Move to scheduled_sampling.py.
def is_later_timestep(x, pass_idx):
"""Constructs mask based on timestep."""
assert x.shape.ndims == 4, x.shape
x_shape = tf.shape(x)
batch_size = x_shape[0]
num_timesteps = x_shape[1]
timesteps = tf.range(num_timesteps)
timesteps = tf.reshape(timesteps, [1, num_timesteps, 1, 1])
timesteps = tf.tile(timesteps, [batch_size, 1, 1, 1])
return tf.greater_equal(timesteps, pass_idx)
# TODO(duckworthd): Move to scheduled_sampling.py.
def parallel_scheduled_sampling_pass(
i, prev_new_targets, features, logits, mixin_prob):
"""Generate scheduled sampling results."""
sampled_targets = sample(logits)
new_targets = mix_gold_sampled(features["targets"],
sampled_targets,
mixin_prob,
i,
prev_new_targets)
new_targets = tf.stop_gradient(new_targets) # Treat new_targets as given.
new_features = copy.copy(features)
new_features["targets"] = new_targets
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
# Compute bottom() for new_targets.
#
# TODO(duckworthd): Only apply bottom to 'new_targets'.
new_transformed_features = self.bottom(new_features)
# Compute body.
with tf.variable_scope("body"):
new_body_outputs, new_losses = self._normalize_body_output(
self.body(new_transformed_features))
assert "training" not in new_losses
# Compute top.
new_logits = self.top(new_body_outputs, new_features)
# Compute loss. Use original features (== labels).
if (hparams.mode != tf.estimator.ModeKeys.PREDICT and
hparams.mode != "attack"):
new_losses["training"] = self.loss(new_logits, features)
else:
new_losses["training"] = 0.0
return new_targets, new_logits, new_losses
tf.logging.info("Using scheduled sampling.")
tf.logging.info("Warming scheduled sampling up with schedule: %s",
hparams.scheduled_sampling_warmup_schedule)
assert hparams.scheduled_sampling_prob == 1.0, (
"hparams.scheduled_sampling_prob must be 0 or 1.")
if hparams.scheduled_sampling_method == "sequential":
tf.logging.info("Using SEQUENTIAL scheduled sampling.")
assert hparams.scheduled_sampling_num_passes == 1, (
"hparams.scheduled_sampling_num_passes must equal 1 if "
"doing sequential scheduled sampling.")
return scheduled_sampling.sequential_scheduled_sampling_for_t2tmodel(
self, features)
elif hparams.scheduled_sampling_method == "parallel":
tf.logging.info("Using PARALLEL scheduled sampling.")
# TODO(duckworthd): Move this block to scheduled_sampling.py.
# Gradually increase over a warmup period. Lower numbers mean more gold
# tokens.
mixin_prob = scheduled_sampling.inverse_decay_mix_prob(
hparams.scheduled_sampling_warmup_schedule,
hparams.scheduled_sampling_gold_mixin_prob,
hparams.scheduled_sampling_warmup_steps)
# Apply scheduled sampling over N passes. The logits from the (n-1)-th
# pass will be mixed with gold tokens for conditioning in the n-th pass.
assert hparams.scheduled_sampling_num_passes > 0, (
"hparams.scheduled_sampling_num_passes must be > 0 if "
"hparams.scheduled_sampling_prob > 0.0")
new_logits = logits
new_losses = losses
prev_new_targets = features["targets"]
for i in range(hparams.scheduled_sampling_num_passes):
prev_new_targets, new_logits, new_losses = parallel_scheduled_sampling_pass(
i, prev_new_targets, features, new_logits, mixin_prob)
return new_logits, new_losses
else:
raise ValueError(
"Unknown scheduled_sampling_method = %s" % (
hparams.scheduled_sampling_method,))
def _with_timing(fn, msg, silent=False):
def fn_with_timing(*args, **kwargs):
start_time = time.time()
res = fn(*args, **kwargs)
if not silent:
log_info("Doing %s took %.3f sec." % (msg, time.time() - start_time))
return res
return fn_with_timing
def create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False)
# These metrics are implemented with py_funcs and therefore do no work with TPU
TPU_METRIC_BLACKLIST = set([
metrics.Metrics.APPROX_BLEU,
metrics.Metrics.ROUGE_2_F,
metrics.Metrics.ROUGE_L_F,
metrics.Metrics.IMAGE_SUMMARY,
])
def create_tpu_eval_metrics_fn(problem, model_hparams):
"""Create the metrics_fn that TPUEstimatorSpec expects."""
def reduce_dimensions(predictions, labels):
"""Reduce dimensions for high-dimensional predictions and labels."""
if len(predictions.get_shape()) > 5:
predictions_shape = common_layers.shape_list(predictions)
predictions = tf.reshape(
predictions, [predictions_shape[0], predictions_shape[1], -1,
predictions_shape[-1]])
labels_shape = common_layers.shape_list(labels)
labels = tf.reshape(
labels, [labels_shape[0], labels_shape[1], -1])
return predictions, labels
metric_fns = []
eval_metrics = problem.eval_metric_fns(model_hparams)
tm = _create_target_modality(problem.get_hparams(model_hparams).modality)
if isinstance(tm, dict):
for k, v in six.iteritems(tm):
weights_fn = modalities.get_weights_fn(v)
def make_metric_fn(metric_fn):
"""returns a metric_fn."""
def wrapped_metric_fn(logits, labels, features, weights_fn=weights_fn):
kwargs = {}
args, _, keywords, _ = inspect.getargspec(metric_fn)
if ("features" in args) or keywords:
kwargs["features"] = features
logits, labels = reduce_dimensions(logits, labels)
num, den = metric_fn(logits, labels, weights_fn=weights_fn, **kwargs)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
for metric, metric_fn in six.iteritems(eval_metrics):
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "%s/metrics-%s/%s" % (k, problem.name, metric)
metric_fns.append((name, make_metric_fn(metric_fn)))
else:
weights_fn = modalities.get_weights_fn(tm)
def make_metric_fn(metric_fn):
"""returns a metric fn."""
def wrapped_metric_fn(logits, labels, features):
kwargs = {}
args, _, keywords, _ = inspect.getargspec(metric_fn)
if ("features" in args) or keywords:
kwargs["features"] = features
logits, labels = reduce_dimensions(logits, labels)
num, den = metric_fn(logits, labels, weights_fn=weights_fn, **kwargs)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
for metric, metric_fn in six.iteritems(eval_metrics):
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "metrics-%s/%s" % (problem.name, metric)
metric_fns.append((name, make_metric_fn(metric_fn)))
def all_metrics_fn(**kwargs):
"""Construct metrics dictionary."""
original_kwargs = _unflatten_dict(kwargs, prefixes=["logits", "features"])
del kwargs
logits = original_kwargs["logits"]
labels = original_kwargs["labels"]
features = original_kwargs["features"]
del original_kwargs
metrics_dict = {}
for name, fn in metric_fns:
if isinstance(logits, dict) and isinstance(labels, dict):
for k, v in six.iteritems(logits):
metrics_dict["%s/%s" % (k, name)] = fn(v, labels[k], features)
elif isinstance(logits, dict):
tf.logging.warning("Logits is a dict, but labels is not; only "
"evaluating logits['targets'] against labels.")
metrics_dict["%s/%s" % ("targets", name)] = fn(logits["targets"],
labels, features)
else:
metrics_dict[name] = fn(logits, labels, features)
return metrics_dict
return all_metrics_fn
def remove_summaries():
"""Remove summaries from the default graph."""
g = tf.get_default_graph()
key = tf.GraphKeys.SUMMARIES
log_debug("Remove summaries %s" % str(g.get_collection(key)))
del g.get_collection_ref(key)[:]
assert not g.get_collection(key)
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)
gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
summary_kwargs = collections.OrderedDict()
for t in summaries:
# TODO(aidangomez): enable ImageSummary support when we have a faster method
# see @shibow's comment in cl/202344570
if t.op.type not in ["ScalarSummary"]:
tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type)
continue
name = t.op.name
tensor = t.op.inputs[1]
if t.op.type == "ScalarSummary":
assert tensor.shape.is_compatible_with([])
if tensor.dtype == tf.int64:
tensor = tf.to_int32(tensor)
summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1])
elif t.op.type == "ImageSummary":
# TODO(aidangomez): as we move to support more types, update
# common_layers.tpu_safe_image_summary
if tensor.dtype != tf.float32:
tf.logging.warn(
"Currently T2T on TPU only supports ImageSummary of "
"tf.float32-type Tensors. Skipping Tensor "
"%s with dtype %s..." % (tensor.name, tensor.dtype))
continue
# tensor = tf.to_float(tensor)
summary_kwargs["ImageSummary" + name] = tensor
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not summary_kwargs:
return None
summary_kwargs["global_step"] = gs_t
log_info("summary_kwargs %s" % str(summary_kwargs))
def host_call_fn(**kwargs):
"""Training host call. Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.to_int64(kwargs.pop("global_step")[0])
with tf.contrib.summary.create_file_writer(model_dir).as_default():
with tf.contrib.summary.always_record_summaries():
# We need to use tf.contrib.summary in order to feed the `step`.
for name, value in sorted(six.iteritems(kwargs)):
if name.startswith("ScalarSummary"):
name = name[len("ScalarSummary"):]
tf.contrib.summary.scalar(
name, tf.reduce_mean(tf.to_float(value)), step=gs)
elif name.startswith("ImageSummary"):
name = name[len("ImageSummary"):]
tf.contrib.summary.image(name, value, step=gs)
return tf.contrib.summary.all_summary_ops()
return (host_call_fn, summary_kwargs)
def _del_dict_non_tensors(d):
for k in list(d.keys()):
if not isinstance(d[k], tf.Tensor):
del d[k]
class DummyVariableStore(object):
@contextlib.contextmanager
def as_default(self):
yield
def create_eager_var_store():
if tf.executing_eagerly():
return variable_scope.EagerVariableStore()
else:
return DummyVariableStore()
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
def summarize_features(features, num_shards=1):
"""Generate summaries for features."""
if not common_layers.should_generate_summaries():
return
with tf.name_scope("input_stats"):
for (k, v) in sorted(six.iteritems(features)):
if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and
(v.dtype != tf.string)):
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
_already_logged = set()
def _eager_log(level, *args):
if tf.executing_eagerly() and args in _already_logged:
return
_already_logged.add(args)
getattr(tf.logging, level)(*args)
def log_debug(*args):
_eager_log("debug", *args)
def log_info(*args):
_eager_log("info", *args)
def log_warn(*args):
_eager_log("warn", *args)
def _compose_custom_getters(getter_a, getter_b):
"""Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
"""
if not getter_a:
return getter_b
if not getter_b:
return getter_a
def getter_fn(getter, *args, **kwargs):
return getter_b(functools.partial(getter_a, getter), *args, **kwargs)
return getter_fn
def set_custom_getter_compose(custom_getter):
"""Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
"""
tf.get_variable_scope().set_custom_getter(
_compose_custom_getters(tf.get_variable_scope().custom_getter,
custom_getter))
def _create_target_modality(modality_dict):
# TODO(trandustin): We require this in order to apply methods utilized
# differently for modalities which are "targets"
# (e.g., modality.target_bottom). In the future, remove need for this
# behavior.
return {k: v for k, v in six.iteritems(modality_dict) if "target" in k
and k != "targets_segmentation" and k != "targets_position"}
def initialize_from_ckpt(ckpt_dir, hparams):
"""Initialize variables from given directory."""
model_dir = hparams.get("model_dir", None)
already_has_ckpt = (
model_dir and tf.train.latest_checkpoint(model_dir) is not None)
if already_has_ckpt:
return
tf.logging.info("Checkpoint dir: %s", ckpt_dir)
reader = tf.contrib.framework.load_checkpoint(ckpt_dir)
variable_map = {}
for var in tf.contrib.framework.get_trainable_variables():
var_name = var.name.split(":")[0]
if reader.has_tensor(var_name):
tf.logging.info("Loading variable from checkpoint: %s", var_name)
variable_map[var_name] = var
else:
tf.logging.info("Cannot find variable in checkpoint, skipping: %s",
var_name)
tf.train.init_from_checkpoint(ckpt_dir, variable_map)
| 38.9
| 84
| 0.661397
|
567e60a09bd6eb96948b4529e2d3879907145186
| 2,860
|
py
|
Python
|
freezer_api/api/v1/clients.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | null | null | null |
freezer_api/api/v1/clients.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | null | null | null |
freezer_api/api/v1/clients.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | null | null | null |
"""
(c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import falcon
from freezer_api.api.common import resource
from freezer_api.common import exceptions as freezer_api_exc
from freezer_api import policy
class ClientsCollectionResource(resource.BaseResource):
"""
Handler for endpoint: /v1/clients
"""
def __init__(self, storage_driver):
self.db = storage_driver
@policy.enforce('clients:get_all')
def on_get(self, req, resp):
# GET /v1/clients(?limit,offset) Lists clients
user_id = req.get_header('X-User-ID')
offset = req.get_param_as_int('offset', min=0) or 0
limit = req.get_param_as_int('limit', min=1) or 10
search = self.json_body(req)
obj_list = self.db.get_client(user_id=user_id, offset=offset,
limit=limit, search=search)
resp.body = {'clients': obj_list}
@policy.enforce('clients:create')
def on_post(self, req, resp):
# POST /v1/clients Creates client entry
doc = self.json_body(req)
if not doc:
raise freezer_api_exc.BadDataFormat(
message='Missing request body')
user_id = req.get_header('X-User-ID')
client_id = self.db.add_client(
user_id=user_id, doc=doc)
resp.status = falcon.HTTP_201
resp.body = {'client_id': client_id}
class ClientsResource(resource.BaseResource):
"""
Handler for endpoint: /v1/clients/{client_id}
"""
def __init__(self, storage_driver):
self.db = storage_driver
@policy.enforce('clients:get')
def on_get(self, req, resp, client_id):
# GET /v1/clients(?limit,offset)
# search in body
user_id = req.get_header('X-User-ID') or ''
obj = self.db.get_client(user_id=user_id, client_id=client_id)
if obj:
resp.body = obj[0]
else:
resp.status = falcon.HTTP_404
@policy.enforce('clients:delete')
def on_delete(self, req, resp, client_id):
# DELETE /v1/clients/{client_id} Deletes the specified backup
user_id = req.get_header('X-User-ID')
self.db.delete_client(
user_id=user_id, client_id=client_id)
resp.body = {'client_id': client_id}
resp.status = falcon.HTTP_204
| 34.457831
| 73
| 0.658392
|
ad2127d4dc5efaff717d33c78733f3088590c4b8
| 12,902
|
py
|
Python
|
advancedmovieselection/src/MoviePreview.py
|
wedebe/enigma2-plugins
|
58e1897866ad65294283970e96e5f2841c3cb6e2
|
[
"OLDAP-2.3"
] | null | null | null |
advancedmovieselection/src/MoviePreview.py
|
wedebe/enigma2-plugins
|
58e1897866ad65294283970e96e5f2841c3cb6e2
|
[
"OLDAP-2.3"
] | null | null | null |
advancedmovieselection/src/MoviePreview.py
|
wedebe/enigma2-plugins
|
58e1897866ad65294283970e96e5f2841c3cb6e2
|
[
"OLDAP-2.3"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Advanced Movie Selection for Dreambox-Enigma2
#
# The plugin is developed on the basis from a lot of single plugins (thx for the code @ all)
# Coded by JackDaniel and cmikula (c)2011
# Support: www.i-have-a-dreambox.com
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported
# License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter to Creative
# Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Multimedia GmbH.
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
from Components.AVSwitch import AVSwitch
from Components.Pixmap import Pixmap
from enigma import ePicLoad, gPixmapPtr, eTimer
from Tools.Directories import fileExists
import os
from Components.config import config
from Source.ServiceProvider import eServiceReferenceDvd, getServiceInfoValue, ServiceCenter, eServiceReferenceBludisc
from Source.ISOInfo import ISOInfo
from enigma import iServiceInformation, eServiceReference
from os import environ
from Tools.Directories import resolveFilename, SCOPE_CURRENT_PLUGIN
nocover = None
class MoviePreview():
def __init__(self, session):
self.onHide.append(self.hideDialog)
self["CoverPreview"] = Pixmap()
self.old_service = None
self.working = False
self.picParam = None
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.showPreviewCallback)
self.onLayoutFinish.append(self.layoutFinish)
self.onClose.append(self.__onClose)
global nocover
if environ["LANGUAGE"] == "de" or environ["LANGUAGE"] == "de_DE":
nocover = resolveFilename(SCOPE_CURRENT_PLUGIN, "Extensions/AdvancedMovieSelection/images/nocover_de.png")
else:
nocover = resolveFilename(SCOPE_CURRENT_PLUGIN, "Extensions/AdvancedMovieSelection/images/nocover_en.png")
def __onClose(self):
del self.picload
def layoutFinish(self):
sc = AVSwitch().getFramebufferScale()
self.picload.setPara((self["CoverPreview"].instance.size().width(), self["CoverPreview"].instance.size().height(), sc[0], sc[1], False, 1, "#ff000000"))
self.cpX = self["CoverPreview"].instance.position().x()
self.cpY = self["CoverPreview"].instance.position().y()
self.cpW = self["CoverPreview"].instance.size().width()
self.cpH = self["CoverPreview"].instance.size().height()
self.piconX = self.cpX + int(self.cpW / 2) - int(100 / 2)
self.piconY = self.cpY + int(self.cpH / 2) - int(60 / 2)
def loadPreview(self, serviceref):
self.hideDialog()
if serviceref is None:
empty = gPixmapPtr()
self["CoverPreview"].instance.setPixmap(empty)
return
path = serviceref.getPath()
if path.endswith("/"):
if fileExists(path + ".jpg"):
path += ".jpg"
elif config.AdvancedMovieSelection.usefoldername.value:
path = path[:-1] + ".jpg"
else:
path = path + "folder.jpg"
elif os.path.isfile(path):
path = os.path.splitext(path)[0] + ".jpg"
else:
path = path + ".jpg"
self.working = True
self["CoverPreview"].setPosition(self.cpX, self.cpY)
if fileExists(path):
self.picload.startDecode(path)
return
series_path = os.path.join(os.path.dirname(path), "series.jpg")
if os.path.exists(series_path):
self.picload.startDecode(series_path)
return
if serviceref.getPath().endswith(".ts") and config.AdvancedMovieSelection.show_picon.value:
picon = getServiceInfoValue(serviceref, iServiceInformation.sServiceref).rstrip(':').replace(':', '_') + ".png"
piconpath = os.path.join(config.AdvancedMovieSelection.piconpath.value, picon)
if os.path.exists(piconpath):
if config.AdvancedMovieSelection.piconsize.value:
self["CoverPreview"].instance.setPixmapFromFile(piconpath)
self["CoverPreview"].setPosition(self.piconX, self.piconY)
else:
self.picload.startDecode(piconpath)
return
self.picload.startDecode(nocover)
def showPreviewCallback(self, picInfo=None):
if picInfo:
ptr = self.picload.getData()
if ptr != None and self.working:
self["CoverPreview"].instance.setPixmap(ptr)
self.working = False
def hideDialog(self):
self.working = False
from Screens.Screen import Screen
from enigma import getDesktop
class DVDOverlay(Screen):
def __init__(self, session, args=None):
desktop_size = getDesktop(0).size()
DVDOverlay.skin = """<screen name="DVDOverlay" position="0,0" size="%d,%d" flags="wfNoBorder" zPosition="-1" backgroundColor="transparent" />""" % (desktop_size.width(), desktop_size.height())
Screen.__init__(self, session)
from ServiceReference import ServiceReference
from Screens.InfoBarGenerics import InfoBarCueSheetSupport
from Source.ServiceProvider import CueSheet
class VideoPreview():
def __init__(self):
self.fwd_timer = eTimer()
self.fwd_timer.timeout.get().append(self.fwd)
self.dvd_preview_timer = eTimer()
self.dvd_preview_timer.timeout.get().append(self.playLastDVD)
self.video_preview_timer = eTimer()
self.video_preview_timer.timeout.get().append(self.playMovie)
self.service = None
self.currentlyPlayingService = None
self.cut_list = None
self.lastService = self.session.nav.getCurrentlyPlayingServiceReference()
self.updateVideoPreviewSettings()
self.onClose.append(self.__playLastService)
self.dvdScreen = self.session.instantiateDialog(DVDOverlay)
def updateVideoPreviewSettings(self):
self.enabled = config.AdvancedMovieSelection.video_preview.value
if not self.enabled:
self.__playLastService()
def stopCurrentlyPlayingService(self):
if self.currentlyPlayingService:
if isinstance(self.currentlyPlayingService, eServiceReferenceDvd):
subs = self.getServiceInterface("subtitle")
if subs:
subs.disableSubtitles(self.session.current_dialog.instance)
self.session.nav.stopService()
cue = CueSheet(self.currentlyPlayingService)
cue.setCutList(self.cut_list)
self.currentlyPlayingService = None
def setNewCutList(self, cut_list):
self.cut_list = cut_list
def jumpForward(self):
self.seekRelativ(config.AdvancedMovieSelection.video_preview_jump_time.value)
def jumpBackward(self):
jumptime = config.AdvancedMovieSelection.video_preview_jump_time.value
self.seekRelativ(-jumptime)
def togglePreviewStatus(self, service=None):
self.enabled = not self.enabled
if not self.currentlyPlayingService:
self.enabled = True
self.__playLastService()
if self.enabled and service:
self.service = service
self.playMovie()
def seekRelativ(self, minutes):
if self.currentlyPlayingService:
self.doSeekRelative(minutes * 60 * 90000)
def getSeek(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
seek = service.seek()
if seek is None or not seek.isCurrentlySeekable():
return None
return seek
def doSeekRelative(self, pts):
seekable = self.getSeek()
if seekable is None:
return
seekable.seekRelative(pts < 0 and -1 or 1, abs(pts))
def playMovie(self):
if self.service and self.enabled:
if self.service.flags & eServiceReference.mustDescent or isinstance(self.service, eServiceReferenceBludisc):
print "Skipping video preview"
self.__playLastService()
return
from MoviePlayer import playerChoice
if playerChoice and playerChoice.isPlaying():
print "Skipping video preview"
return
cpsr = self.session.nav.getCurrentlyPlayingServiceReference()
if cpsr and cpsr == self.service:
return
#if not self.lastService:
# self.lastService = self.session.nav.getCurrentlyPlayingServiceReference()
self.stopCurrentlyPlayingService()
if isinstance(self.service, eServiceReferenceDvd):
if self.service.isIsoImage():
if ISOInfo().getFormatISO9660(self.service) != ISOInfo.DVD:
print "Skipping video preview"
self.__playLastService()
return
newref = eServiceReference(4369, 0, self.service.getPath())
self.session.nav.playService(newref)
subs = self.getServiceInterface("subtitle")
if subs:
subs.enableSubtitles(self.dvdScreen.instance, None)
else:
self.session.nav.playService(self.service)
print "play", self.service.getPath()
self.currentlyPlayingService = self.service
seekable = self.getSeek()
if seekable:
try:
cue = CueSheet(self.service)
self.cut_list = cue.getCutList()
length, last = self.getCuePositions()
stop_before_end_time = int(config.AdvancedMovieSelection.stop_before_end_time.value)
if stop_before_end_time > 0:
if (((length) - (last / 90000)) / 60) < stop_before_end_time:
return
if last > 0 and config.AdvancedMovieSelection.video_preview_marker.value:
if self.service.getPath().endswith('ts'):
seekable.seekTo(last)
else:
self.minutes = long(last / 90000 / 60)
if isinstance(self.service, eServiceReferenceDvd):
self.resume_point = last
self.dvd_preview_timer.start(1000, True)
return
self.fwd_timer.start(1000, True)
except Exception, e:
print e
def fwd(self):
self.seekRelativ(self.minutes)
def getServiceInterface(self, iface):
service = self.session.nav.getCurrentService()
if service:
attr = getattr(service, iface, None)
if callable(attr):
return attr()
return None
def playLastDVD(self, answer=True):
print "playLastDVD", self.resume_point
service = self.session.nav.getCurrentService()
if service:
if answer == True:
seekable = self.getSeek()
if seekable:
seekable.seekTo(self.resume_point)
pause = service.pause()
pause.unpause()
def preparePlayMovie(self, service, event):
if not self.execing or not self.enabled:
return
self.service = service
if service:
serviceHandler = ServiceCenter.getInstance()
info = serviceHandler.info(self.service)
service = ServiceReference(info.getInfoString(self.service, iServiceInformation.sServiceref))
self.video_preview_timer.start(config.AdvancedMovieSelection.video_preview_delay.value * 1000, True)
def getCuePositions(self):
length = 0
last_pos = 0
for (pts, what) in self.cut_list:
if what == 1 == InfoBarCueSheetSupport.CUT_TYPE_OUT:
length = pts / 90000
elif what == InfoBarCueSheetSupport.CUT_TYPE_LAST:
last_pos = pts
if length == 0:
info = ServiceCenter.getInstance().info(self.currentlyPlayingService)
if info:
length = info.getLength(self.currentlyPlayingService)
return [length, last_pos]
def getCurrentlyPlayingService(self):
return self.currentlyPlayingService
def __playLastService(self):
self.stopCurrentlyPlayingService()
if self.lastService:
self.session.nav.playService(self.lastService)
| 41.352564
| 200
| 0.626027
|
e5d6e9021b9ec926bdf0e1662011facda64c92b0
| 1,032
|
py
|
Python
|
scripts/bed_intersect_basewise.py
|
tweirick/bx-python
|
f16a57e9f0a133ab4d62aed6fec087b8ce4ec848
|
[
"MIT"
] | null | null | null |
scripts/bed_intersect_basewise.py
|
tweirick/bx-python
|
f16a57e9f0a133ab4d62aed6fec087b8ce4ec848
|
[
"MIT"
] | null | null | null |
scripts/bed_intersect_basewise.py
|
tweirick/bx-python
|
f16a57e9f0a133ab4d62aed6fec087b8ce4ec848
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Find regions of first bed file that overlap regions in a second bed file. This
program performs a base-by-base intersection, so only runs of bases that are
covered in both of the inputs will be output.
usage: %prog bed_file_1 bed_file_2
"""
from __future__ import print_function
import sys
from warnings import warn
from bx.bitset import *
from bx.bitset_builders import *
from bx.cookbook import doc_optparse
options, args = doc_optparse.parse( __doc__ )
try:
in_fname, in2_fname = args
except:
doc_optparse.exit()
bits1 = binned_bitsets_from_file( open( in_fname ) )
bits2 = binned_bitsets_from_file( open( in2_fname ) )
bitsets = dict()
for key in bits1:
if key in bits2:
bits1[key].iand( bits2[key] )
bitsets[key] = bits1[key]
for chrom in bitsets:
bits = bitsets[chrom]
end = 0
while 1:
start = bits.next_set( end )
if start == bits.size: break
end = bits.next_clear( start )
print("%s\t%d\t%d" % ( chrom, start, end ))
| 24
| 78
| 0.690891
|
590d2e3dccc5897abbd119b24cbc0af54031ce34
| 4,899
|
py
|
Python
|
ProjectApplication/comments/migrations/0020_proposalevaluationattachment_proposalevaluationattachmentcategory_proposalevaluationcomment_proposal.py
|
code-review-doctor/project-application
|
d85b40b69572efbcda24ce9c40803f76d8ffd192
|
[
"MIT"
] | 5
|
2020-07-29T10:00:11.000Z
|
2022-02-19T11:00:34.000Z
|
ProjectApplication/comments/migrations/0020_proposalevaluationattachment_proposalevaluationattachmentcategory_proposalevaluationcomment_proposal.py
|
code-review-doctor/project-application
|
d85b40b69572efbcda24ce9c40803f76d8ffd192
|
[
"MIT"
] | 471
|
2019-09-20T14:37:28.000Z
|
2022-03-25T14:16:34.000Z
|
ProjectApplication/comments/migrations/0020_proposalevaluationattachment_proposalevaluationattachmentcategory_proposalevaluationcomment_proposal.py
|
code-review-doctor/project-application
|
d85b40b69572efbcda24ce9c40803f76d8ffd192
|
[
"MIT"
] | 5
|
2020-03-15T12:42:47.000Z
|
2022-02-15T18:06:52.000Z
|
# Generated by Django 3.0.3 on 2020-03-04 12:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import storages.backends.s3boto3
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0010_file_name_processed_on_save'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('comments', '0019_removes_created_by'),
]
operations = [
migrations.CreateModel(
name='ProposalEvaluationCommentCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, help_text='Date and time at which the entry was created')),
('modified_on', models.DateTimeField(auto_now=True, help_text='Date and time at which the entry was modified', null=True)),
('category', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to='comments.Category')),
],
options={
'verbose_name_plural': 'Proposal Evaluation Comment Categories',
},
),
migrations.CreateModel(
name='ProposalEvaluationAttachmentCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, help_text='Date and time at which the entry was created')),
('modified_on', models.DateTimeField(auto_now=True, help_text='Date and time at which the entry was modified', null=True)),
('category', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to='comments.Category')),
],
options={
'verbose_name_plural': 'Proposal Evaluation Attachment Categories',
},
),
migrations.CreateModel(
name='ProposalEvaluationComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, help_text='Date and time at which the entry was created')),
('modified_on', models.DateTimeField(auto_now=True, help_text='Date and time at which the entry was modified', null=True)),
('text', models.TextField(help_text='Comment text')),
('category', models.ForeignKey(help_text='Type of comment', on_delete=django.db.models.deletion.PROTECT, to='comments.ProposalEvaluationCommentCategory')),
('created_by', models.ForeignKey(blank=True, help_text='User by which the entry was created', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='comments_proposalevaluationcomment_created_by_related', to=settings.AUTH_USER_MODEL)),
('proposal_evaluation', models.ForeignKey(help_text='Proposal Evaluation about which the comment was made', on_delete=django.db.models.deletion.PROTECT, to='evaluation.ProposalEvaluation')),
],
options={
'unique_together': {('proposal_evaluation', 'created_on', 'created_by')},
},
),
migrations.CreateModel(
name='ProposalEvaluationAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, help_text='Date and time at which the entry was created')),
('modified_on', models.DateTimeField(auto_now=True, help_text='Date and time at which the entry was modified', null=True)),
('text', models.TextField(blank=True, help_text='Comment of the attachment', null=True)),
('file', models.FileField(storage=storages.backends.s3boto3.S3Boto3Storage(), upload_to='attachments/proposal_evaluation/')),
('category', models.ForeignKey(help_text='Category of the attachment', on_delete=django.db.models.deletion.PROTECT, to='comments.ProposalEvaluationAttachmentCategory')),
('created_by', models.ForeignKey(blank=True, help_text='User by which the entry was created', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='comments_proposalevaluationattachment_created_by_related', to=settings.AUTH_USER_MODEL)),
('proposal_evaluation', models.ForeignKey(help_text='Proposal Evaluation that this attachment belongs to', on_delete=django.db.models.deletion.PROTECT, to='evaluation.ProposalEvaluation')),
],
options={
'abstract': False,
'unique_together': {('created_on', 'created_by')},
},
),
]
| 65.32
| 269
| 0.66238
|
bf256a29fc79849bc718b139efccf803235a4380
| 203
|
py
|
Python
|
Chapter-1/ex2-flask.py
|
gulkotapriyanka955/SoftwareEngineeringLab
|
afbb97b11164d045930e8b3fc6bb1e2f2310256f
|
[
"MIT"
] | null | null | null |
Chapter-1/ex2-flask.py
|
gulkotapriyanka955/SoftwareEngineeringLab
|
afbb97b11164d045930e8b3fc6bb1e2f2310256f
|
[
"MIT"
] | null | null | null |
Chapter-1/ex2-flask.py
|
gulkotapriyanka955/SoftwareEngineeringLab
|
afbb97b11164d045930e8b3fc6bb1e2f2310256f
|
[
"MIT"
] | null | null | null |
#Basic Flask Code
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug = True)
| 22.555556
| 52
| 0.674877
|
5d68002b5eb83d771193a85c73f2d8d138b2b350
| 3,776
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
cryptopool-builders/Streamies
|
330a802c3000bbbb91c81bc45ad7415ba2b8825f
|
[
"MIT"
] | 1
|
2020-04-02T08:26:57.000Z
|
2020-04-02T08:26:57.000Z
|
contrib/macdeploy/custom_dsstore.py
|
cryptopool-builders/Streamies
|
330a802c3000bbbb91c81bc45ad7415ba2b8825f
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
cryptopool-builders/Streamies
|
330a802c3000bbbb91c81bc45ad7415ba2b8825f
|
[
"MIT"
] | 1
|
2021-02-19T02:01:17.000Z
|
2021-02-19T02:01:17.000Z
|
#!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Streamies-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.901639
| 1,817
| 0.727489
|
81257e7b0c19de38204646702445f3ab93bb2341
| 4,143
|
py
|
Python
|
xlsform_prj/settings.py
|
yanokwa/odk-xlsform-online
|
597a24a2e97eda7b1336da3195f96f924adec321
|
[
"Apache-2.0"
] | 7
|
2018-04-25T16:46:59.000Z
|
2020-02-07T05:06:26.000Z
|
xlsform_prj/settings.py
|
yanokwa/odk-xlsform-online
|
597a24a2e97eda7b1336da3195f96f924adec321
|
[
"Apache-2.0"
] | 8
|
2018-01-16T19:22:47.000Z
|
2020-03-28T22:20:27.000Z
|
xlsform_prj/settings.py
|
yanokwa/odk-xlsform-online
|
597a24a2e97eda7b1336da3195f96f924adec321
|
[
"Apache-2.0"
] | 7
|
2018-05-14T04:00:43.000Z
|
2020-02-29T18:46:22.000Z
|
"""
Django settings for xlsform_prj project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# SECURITY WARNING: don't run with ALLOWED_HOSTS = * in production!
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
# 'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'xlsform_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'xlsform_prj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'xlsform_prj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Logging
# https://docs.djangoproject.com/en/1.11/topics/logging/#examples
# Don't log missing context variables in django.template
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'debug.log',
'when': 'midnight',
'backupCount': 31,
'formatter':'standard',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
'django.template': {
'handlers': ['file'],
'level': 'INFO',
'propagate': False,
},
},
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| 25.732919
| 91
| 0.647357
|
332f16510b16c794b2df31b03f6dc2ba9502a834
| 447
|
py
|
Python
|
pictogram/__main__.py
|
Alquimista/pictogram
|
57943e9679db0e26edfb60cfe2a88c56bbfc469a
|
[
"MIT"
] | null | null | null |
pictogram/__main__.py
|
Alquimista/pictogram
|
57943e9679db0e26edfb60cfe2a88c56bbfc469a
|
[
"MIT"
] | null | null | null |
pictogram/__main__.py
|
Alquimista/pictogram
|
57943e9679db0e26edfb60cfe2a88c56bbfc469a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import pictogram
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
print("This is the main routine.")
print("It should do something interesting.")
# Do argument parsing here (eg. with argparse) and anything else
# you want your project to do.
if __name__ == "__main__":
main()
| 19.434783
| 68
| 0.64877
|
ea01c4cdaa7306f5b34e64f19b58e4ee7f56597d
| 1,217
|
py
|
Python
|
01_basic/revolving/revolving.py
|
kaityo256/yamagata2022
|
435deb3210b1eb0191364570b378656c31cc23e5
|
[
"CC-BY-4.0"
] | 1
|
2022-01-19T15:33:23.000Z
|
2022-01-19T15:33:23.000Z
|
01_basic/revolving/revolving.py
|
kaityo256/yamagata2022
|
435deb3210b1eb0191364570b378656c31cc23e5
|
[
"CC-BY-4.0"
] | null | null | null |
01_basic/revolving/revolving.py
|
kaityo256/yamagata2022
|
435deb3210b1eb0191364570b378656c31cc23e5
|
[
"CC-BY-4.0"
] | null | null | null |
def show_schedule(balance, y_rate, payment):
m_rate = y_rate / 12.0
i = 0
total = 0
while balance > 0:
i = i + 1
interest = int(balance * m_rate)
pay = payment - interest
if balance < pay:
pay = balance
total += pay
total += interest
balance -= pay
print(f"{i} {interest} {pay} {interest + pay} {balance}")
print(f"Total = {total}, {i} times")
def num_payment(balance, y_rate, payment):
m_rate = y_rate / 12.0
i = 0
total = 0
while balance > 0:
i = i + 1
interest = int(balance * m_rate)
pay = payment - interest
if balance < pay:
pay = balance
total += pay
total += interest
balance -= pay
#print(f"{i} {interest} {pay} {interest + pay} {balance}")
return i, total
def main():
y_rate = 0.15 # Yearly rate
payment = 10000 # Monthly payment
b = [100000, 200000, 300000, 400000, 500000, 700000, 780000, 790000, 799999]
for balance in b:
n, total = num_payment(balance, y_rate, payment)
print(balance, n, total)
if __name__ == '__main__':
#main()
show_schedule(100000,0.15, 10000)
| 26.456522
| 80
| 0.548891
|
60fff9ab162c424f8146b3404013470ae5bc2304
| 26,991
|
py
|
Python
|
leafmap/toolbar.py
|
Kikehulk/leafmap
|
998654ad364a4e2b7048ef517306301756c2b9f9
|
[
"MIT"
] | null | null | null |
leafmap/toolbar.py
|
Kikehulk/leafmap
|
998654ad364a4e2b7048ef517306301756c2b9f9
|
[
"MIT"
] | null | null | null |
leafmap/toolbar.py
|
Kikehulk/leafmap
|
998654ad364a4e2b7048ef517306301756c2b9f9
|
[
"MIT"
] | null | null | null |
"""Module for dealing with the toolbar.
"""
import math
import os
import ipyevents
import ipyleaflet
import ipywidgets as widgets
from ipyleaflet import TileLayer, WidgetControl
from IPython.core.display import display
from ipyfilechooser import FileChooser
from .common import *
def tool_template(m=None):
"""Generates a tool GUI template using ipywidgets.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gear",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Checkbox",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
dropdown = widgets.Dropdown(
options=["Option 1", "Option 2", "Option 3"],
value=None,
description="Dropdown:",
layout=widgets.Layout(width=widget_width, padding=padding),
style={"description_width": "initial"},
)
int_slider = widgets.IntSlider(
min=1,
max=100,
description="Int Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style={"description_width": "initial"},
)
int_slider_label = widgets.Label()
widgets.jslink((int_slider, "value"), (int_slider_label, "value"))
float_slider = widgets.FloatSlider(
min=1,
max=100,
description="Float Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style={"description_width": "initial"},
)
float_slider_label = widgets.Label()
widgets.jslink((float_slider, "value"), (float_slider_label, "value"))
color = widgets.ColorPicker(
concise=False,
description="Color:",
value="white",
style={"description_width": "initial"},
layout=widgets.Layout(width=widget_width, padding=padding),
)
text = widgets.Text(
value="",
description="Textbox:",
placeholder="Placeholder",
style={"description_width": "initial"},
layout=widgets.Layout(width=widget_width, padding=padding),
)
textarea = widgets.Textarea(
placeholder="Placeholder",
layout=widgets.Layout(width=widget_width),
)
buttons = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
widgets.HBox([int_slider, int_slider_label]),
widgets.HBox([float_slider, float_slider_label]),
dropdown,
text,
color,
textarea,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Apply":
with output:
output.clear_output()
print("Running ...")
elif change["new"] == "Reset":
textarea.value = ""
output.clear_output()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = WidgetControl(widget=toolbar_widget, position="topright")
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def main_toolbar(m):
"""Creates the main toolbar and adds it to the map.
Args:
m (leafmap.Map): The leafmap Map object.
"""
tools = {
"map": {
"name": "basemap",
"tooltip": "Change basemap",
},
"folder-open": {
"name": "open_data",
"tooltip": "Open local vector/raster data",
},
"eraser": {
"name": "eraser",
"tooltip": "Remove all drawn features",
},
"gears": {
"name": "whitebox",
"tooltip": "WhiteboxTools for local geoprocessing",
},
"camera": {
"name": "save_map",
"tooltip": "Save map as HTML or image",
},
"question": {
"name": "help",
"tooltip": "Get help",
},
}
icons = list(tools.keys())
tooltips = [item["tooltip"] for item in list(tools.values())]
icon_width = "32px"
icon_height = "32px"
n_cols = 3
n_rows = math.ceil(len(icons) / n_cols)
toolbar_grid = widgets.GridBox(
children=[
widgets.ToggleButton(
layout=widgets.Layout(
width="auto", height="auto", padding="0px 0px 0px 4px"
),
button_style="primary",
icon=icons[i],
tooltip=tooltips[i],
)
for i in range(len(icons))
],
layout=widgets.Layout(
width="107px",
grid_template_columns=(icon_width + " ") * n_cols,
grid_template_rows=(icon_height + " ") * n_rows,
grid_gap="1px 1px",
padding="5px",
),
)
m.toolbar = toolbar_grid
def tool_callback(change):
if change["new"]:
current_tool = change["owner"]
for tool in toolbar_grid.children:
if tool is not current_tool:
tool.value = False
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
if tool_name == "basemap":
change_basemap(m)
elif tool_name == "open_data":
open_data_widget(m)
elif tool_name == "eraser":
if m.draw_control is not None:
m.draw_control.clear()
m.user_roi = None
m.user_rois = None
m.draw_features = []
elif tool_name == "whitebox":
import whiteboxgui.whiteboxgui as wbt
tools_dict = wbt.get_wbt_dict()
wbt_toolbox = wbt.build_toolbox(
tools_dict, max_width="800px", max_height="500px"
)
wbt_control = WidgetControl(widget=wbt_toolbox, position="bottomright")
m.whitebox = wbt_control
m.add_control(wbt_control)
elif tool_name == "save_map":
save_map((m))
elif tool_name == "help":
import webbrowser
webbrowser.open_new_tab("https://leafmap.gishub.org")
current_tool.value = False
else:
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
m.toolbar_reset()
for tool in toolbar_grid.children:
tool.observe(tool_callback, "value")
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="wrench",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
m.toolbar_button = toolbar_button
layers_button = widgets.ToggleButton(
value=False,
tooltip="Layers",
icon="server",
layout=widgets.Layout(height="28px", width="72px"),
)
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [layers_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [toolbar_grid]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
layers_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
layers_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not layers_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def layers_btn_click(change):
if change["new"]:
layers_hbox = []
all_layers_chk = widgets.Checkbox(
value=False,
description="All layers on/off",
indent=False,
layout=widgets.Layout(height="18px", padding="0px 8px 25px 8px"),
)
all_layers_chk.layout.width = "30ex"
layers_hbox.append(all_layers_chk)
def all_layers_chk_changed(change):
if change["new"]:
for layer in m.layers:
layer.visible = True
else:
for layer in m.layers:
layer.visible = False
all_layers_chk.observe(all_layers_chk_changed, "value")
layers = [
lyr
for lyr in m.layers[1:]
if (isinstance(lyr, TileLayer) or isinstance(lyr, ipyleaflet.WMSLayer))
]
# if the layers contain unsupported layers (e.g., GeoJSON, GeoData), adds the ipyleaflet built-in LayerControl
if len(layers) < (len(m.layers) - 1):
if m.layer_control is None:
layer_control = ipyleaflet.LayersControl(position="topright")
m.layer_control = layer_control
if m.layer_control not in m.controls:
m.add_control(m.layer_control)
# for non-TileLayer, use layer.style={'opacity':0, 'fillOpacity': 0} to turn layer off.
for layer in layers:
layer_chk = widgets.Checkbox(
value=layer.visible,
description=layer.name,
indent=False,
layout=widgets.Layout(height="18px"),
)
layer_chk.layout.width = "25ex"
layer_opacity = widgets.FloatSlider(
value=layer.opacity,
min=0,
max=1,
step=0.01,
readout=False,
layout=widgets.Layout(width="80px"),
)
layer_settings = widgets.ToggleButton(
icon="gear",
tooltip=layer.name,
layout=widgets.Layout(
width="25px", height="25px", padding="0px 0px 0px 5px"
),
)
def layer_vis_on_click(change):
if change["new"]:
layer_name = change["owner"].tooltip
change["owner"].value = False
layer_settings.observe(layer_vis_on_click, "value")
def layer_chk_changed(change):
layer_name = change["owner"].description
layer_chk.observe(layer_chk_changed, "value")
widgets.jslink((layer_chk, "value"), (layer, "visible"))
widgets.jsdlink((layer_opacity, "value"), (layer, "opacity"))
hbox = widgets.HBox(
[layer_chk, layer_settings, layer_opacity],
layout=widgets.Layout(padding="0px 8px 0px 8px"),
)
layers_hbox.append(hbox)
toolbar_footer.children = layers_hbox
toolbar_button.value = False
else:
toolbar_footer.children = [toolbar_grid]
layers_button.observe(layers_btn_click, "value")
toolbar_control = WidgetControl(widget=toolbar_widget, position="topright")
m.add_control(toolbar_control)
def open_data_widget(m):
"""A widget for opening local vector/raster data.
Args:
m (object): leafmap.Map
"""
tool_output = widgets.Output()
tool_output_ctrl = WidgetControl(widget=tool_output, position="topright")
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
file_type = widgets.ToggleButtons(
options=["Shapefile", "GeoJSON", "Vector", "CSV", "GeoTIFF"],
tooltips=[
"Open a shapefile",
"Open a GeoJSON file",
"Open a vector dataset",
"Create points from CSV",
"Open a vector dataset" "Open a GeoTIFF",
],
)
file_type.style.button_width = "88px"
file_chooser = FileChooser(os.getcwd())
file_chooser.filter_pattern = "*.shp"
file_chooser.use_dir_icons = True
style = {"description_width": "initial"}
layer_name = widgets.Text(
value="Shapefile",
description="Enter a layer name:",
tooltip="Enter a layer name for the selected file",
style=style,
layout=widgets.Layout(width="454px", padding="0px 0px 0px 5px"),
)
longitude = widgets.Dropdown(
options=[],
value=None,
description="Longitude:",
layout=widgets.Layout(width="149px", padding="0px 0px 0px 5px"),
style={"description_width": "initial"},
)
latitude = widgets.Dropdown(
options=[],
value=None,
description="Latitude:",
layout=widgets.Layout(width="149px", padding="0px 0px 0px 5px"),
style={"description_width": "initial"},
)
label = widgets.Dropdown(
options=[],
value=None,
description="Label:",
layout=widgets.Layout(width="149px", padding="0px 0px 0px 5px"),
style={"description_width": "initial"},
)
csv_widget = widgets.HBox()
ok_cancel = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
# ok_cancel.style.button_width = "133px"
bands = widgets.Text(
value="1",
description="Bands:",
tooltip="Enter a list of band indices",
style=style,
layout=widgets.Layout(width="110px"),
)
colormap = widgets.Dropdown(
options=[],
value=None,
description="colormap:",
layout=widgets.Layout(width="172px"),
style=style,
)
x_dim = widgets.Text(
value="x",
description="x_dim:",
tooltip="The x dimension",
style=style,
layout=widgets.Layout(width="80px"),
)
y_dim = widgets.Text(
value="y",
description="y_dim:",
tooltip="The xydimension",
style=style,
layout=widgets.Layout(width="80px"),
)
raster_options = widgets.HBox()
main_widget = widgets.VBox(
[
file_type,
file_chooser,
csv_widget,
layer_name,
raster_options,
ok_cancel,
]
)
tool_output.clear_output()
with tool_output:
display(main_widget)
# def chooser_callback(chooser):
# if len(layer_name.value) == 0 and file_chooser.selected is not None:
# layer_name.value = os.path.splitext(file_chooser.selected_filename)[0]
def bands_changed(change):
if change["new"] and "," in change["owner"].value:
colormap.value = None
colormap.disabled = True
else:
colormap.disabled = False
bands.observe(bands_changed, "value")
def chooser_callback(chooser):
if file_type.value == "CSV":
import pandas as pd
df = pd.read_csv(file_chooser.selected)
col_names = df.columns.values.tolist()
longitude.options = col_names
latitude.options = col_names
label.options = col_names
if "longitude" in col_names:
longitude.value = "longitude"
if "latitude" in col_names:
latitude.value = "latitude"
if "name" in col_names:
label.value = "name"
file_chooser.register_callback(chooser_callback)
def file_type_changed(change):
ok_cancel.value = None
file_chooser.default_path = os.getcwd()
file_chooser.reset()
layer_name.value = file_type.value
csv_widget.children = []
if change["new"] == "Shapefile":
file_chooser.filter_pattern = "*.shp"
raster_options.children = []
elif change["new"] == "GeoJSON":
file_chooser.filter_pattern = "*.geojson"
raster_options.children = []
elif change["new"] == "Vector":
file_chooser.filter_pattern = "*.*"
raster_options.children = []
elif change["new"] == "CSV":
file_chooser.filter_pattern = ["*.csv", "*.CSV"]
csv_widget.children = [longitude, latitude, label]
raster_options.children = []
elif change["new"] == "GeoTIFF":
import matplotlib.pyplot as plt
file_chooser.filter_pattern = "*.tif"
colormap.options = plt.colormaps()
colormap.value = "terrain"
raster_options.children = [bands, colormap, x_dim, y_dim]
def ok_cancel_clicked(change):
if change["new"] == "Apply":
m.default_style = {"cursor": "wait"}
file_path = file_chooser.selected
if file_path is not None:
ext = os.path.splitext(file_path)[1]
with tool_output:
if ext.lower() == ".shp":
m.add_shapefile(
file_path, style=None, layer_name=layer_name.value
)
elif ext.lower() == ".geojson":
m.add_geojson(
file_path, style=None, layer_name=layer_name.value
)
elif ext.lower() == ".csv":
m.add_xy_data(
file_path,
x=longitude.value,
y=latitude.value,
label=label.value,
layer_name=layer_name.value,
)
elif ext.lower() == ".tif":
sel_bands = [int(b.strip()) for b in bands.value.split(",")]
m.add_raster(
image=file_path,
bands=sel_bands,
layer_name=layer_name.value,
colormap=colormap.value,
x_dim=x_dim.value,
y_dim=y_dim.value,
)
else:
m.add_vector(file_path, style=None, layer_name=layer_name.value)
else:
print("Please select a file to open.")
m.toolbar_reset()
m.default_style = {"cursor": "default"}
elif change["new"] == "Reset":
file_chooser.reset()
tool_output.clear_output()
with tool_output:
display(main_widget)
m.toolbar_reset()
elif change["new"] == "Close":
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
m.tool_output_ctrl = None
m.toolbar_reset()
ok_cancel.value = None
file_type.observe(file_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
# file_chooser.register_callback(chooser_callback)
m.add_control(tool_output_ctrl)
m.tool_output_ctrl = tool_output_ctrl
def change_basemap(m):
"""Widget for changing basemaps.
Args:
m (object): leafmap.Map.
"""
from .basemaps import leaf_basemaps
dropdown = widgets.Dropdown(
options=list(leaf_basemaps.keys()),
value="ROADMAP",
layout=widgets.Layout(width="200px")
# description="Basemaps",
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the basemap widget",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
basemap_widget = widgets.HBox([dropdown, close_btn])
def on_click(change):
basemap_name = change["new"]
if len(m.layers) == 1:
old_basemap = m.layers[0]
else:
old_basemap = m.layers[1]
m.substitute_layer(old_basemap, leaf_basemaps[basemap_name])
dropdown.observe(on_click, "value")
def close_click(change):
m.toolbar_reset()
if m.basemap_ctrl is not None and m.basemap_ctrl in m.controls:
m.remove_control(m.basemap_ctrl)
basemap_widget.close()
close_btn.on_click(close_click)
basemap_control = WidgetControl(widget=basemap_widget, position="topright")
m.add_control(basemap_control)
m.basemap_ctrl = basemap_control
def save_map(m):
"""Saves the map as HTML, JPG, or PNG.
Args:
m (leafmap.Map): The leafmap Map object.
"""
import time
tool_output = widgets.Output()
m.tool_output = tool_output
tool_output.clear_output(wait=True)
save_map_widget = widgets.VBox()
save_type = widgets.ToggleButtons(
options=["HTML", "PNG", "JPG"],
tooltips=[
"Save the map as an HTML file",
"Take a screenshot and save as a PNG file",
"Take a screenshot and save as a JPG file",
],
)
file_chooser = FileChooser(os.getcwd())
file_chooser.default_filename = "my_map.html"
file_chooser.use_dir_icons = True
ok_cancel = widgets.ToggleButtons(
value=None,
options=["OK", "Cancel", "Close"],
tooltips=["OK", "Cancel", "Close"],
button_style="primary",
)
def save_type_changed(change):
ok_cancel.value = None
# file_chooser.reset()
file_chooser.default_path = os.getcwd()
if change["new"] == "HTML":
file_chooser.default_filename = "my_map.html"
elif change["new"] == "PNG":
file_chooser.default_filename = "my_map.png"
elif change["new"] == "JPG":
file_chooser.default_filename = "my_map.jpg"
save_map_widget.children = [save_type, file_chooser]
def chooser_callback(chooser):
save_map_widget.children = [save_type, file_chooser, ok_cancel]
def ok_cancel_clicked(change):
if change["new"] == "OK":
file_path = file_chooser.selected
ext = os.path.splitext(file_path)[1]
if save_type.value == "HTML" and ext.upper() == ".HTML":
tool_output.clear_output()
m.to_html(file_path)
elif save_type.value == "PNG" and ext.upper() == ".PNG":
tool_output.clear_output()
m.toolbar_button.value = False
time.sleep(1)
screen_capture(outfile=file_path)
elif save_type.value == "JPG" and ext.upper() == ".JPG":
tool_output.clear_output()
m.toolbar_button.value = False
time.sleep(1)
screen_capture(outfile=file_path)
else:
label = widgets.Label(
value="The selected file extension does not match the selected exporting type."
)
save_map_widget.children = [save_type, file_chooser, label]
elif change["new"] == "Cancel":
tool_output.clear_output()
file_chooser.reset()
elif change["new"] == "Close":
if m.save_map_control is not None:
m.remove_control(m.save_map_control)
m.save_map_control = None
ok_cancel.value = None
m.toolbar_reset()
save_type.observe(save_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
file_chooser.register_callback(chooser_callback)
save_map_widget.children = [save_type, file_chooser]
save_map_control = WidgetControl(widget=save_map_widget, position="topright")
m.add_control(save_map_control)
m.save_map_control = save_map_control
| 32.363309
| 122
| 0.561928
|
68f9dff706b7e95c4d31b87de4a90d59f1d58d14
| 583
|
py
|
Python
|
2020_11_Cooper_Type/pattern_example_recording.py
|
benkiel/python_workshops
|
9483c1fd5f7dd87e595289efb7376e1b81ff5ede
|
[
"MIT"
] | 6
|
2018-03-24T17:31:51.000Z
|
2021-11-18T06:02:09.000Z
|
2020_11_Cooper_Type/pattern_example_recording.py
|
benkiel/python_workshops
|
9483c1fd5f7dd87e595289efb7376e1b81ff5ede
|
[
"MIT"
] | null | null | null |
2020_11_Cooper_Type/pattern_example_recording.py
|
benkiel/python_workshops
|
9483c1fd5f7dd87e595289efb7376e1b81ff5ede
|
[
"MIT"
] | null | null | null |
from random import random
size(1000, 1000)
step = 20
margin = 4
xRange = range(0, width(), step)
yRange = range(0, height(), step)
xRange2 = range(0, width(), step*4)
yRange2 = range(0, height(), step*4)
# Drawing
def module(x, y, w, h):
fill(0)
rect(x, y, w, h)
def module2(x, y, w, h):
fill(1,0,1)
rect(x, y, w, h)
notDrawn = []
for x in xRange:
for y in yRange:
if random() > 0.5:
module(x, y, step, step)
else:
notDrawn.append([x,y])
print(notDrawn)
for x, y in notDrawn:
module2(x,y,step,step)
| 15.342105
| 36
| 0.552316
|
e80fd8c45e9ad9c442f19619434ab1794decf946
| 223
|
py
|
Python
|
test.py
|
centurio1987/testexercise
|
bca5c4db42331576f10370c7e042671674d4ad39
|
[
"MIT"
] | null | null | null |
test.py
|
centurio1987/testexercise
|
bca5c4db42331576f10370c7e042671674d4ad39
|
[
"MIT"
] | null | null | null |
test.py
|
centurio1987/testexercise
|
bca5c4db42331576f10370c7e042671674d4ad39
|
[
"MIT"
] | null | null | null |
list = [8, 4, 5, 6, 7]
for i in range(len(list)):
a = list[i]
for j in range(i, -1, -1):
if a < list[j]:
list[j + 1] = list[j]
list[j] = a
else:
pass
print(list)
| 18.583333
| 33
| 0.399103
|
a60fd4d1cdc1cb42b20a7ad47c2bc364ffb1f687
| 4,116
|
py
|
Python
|
examples/nlp/lasertagger/official_lasertagger/score_lib.py
|
yidong72/NeMo
|
8e703fba0aa7368be1a44e641e59346ca786a6dc
|
[
"Apache-2.0"
] | null | null | null |
examples/nlp/lasertagger/official_lasertagger/score_lib.py
|
yidong72/NeMo
|
8e703fba0aa7368be1a44e641e59346ca786a6dc
|
[
"Apache-2.0"
] | null | null | null |
examples/nlp/lasertagger/official_lasertagger/score_lib.py
|
yidong72/NeMo
|
8e703fba0aa7368be1a44e641e59346ca786a6dc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# -*- coding: utf-8 -*-
#
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/lasertagger/blob/master/score_lib.py
"""
"""Utility functions for computing evaluation metrics."""
import re
from examples.nlp.lasertagger.official_lasertagger import sari_hook
def compute_exact_score(predictions, target_lists):
"""Computes the Exact score (accuracy) of the predictions.
Exact score is defined as the percentage of predictions that match at least
one of the targets.
Args:
predictions: List of predictions.
target_lists: List of targets (1 or more per prediction).
Returns:
Exact score between [0, 1].
"""
num_matches = sum(any(pred == target for target in targets) for pred, targets in zip(predictions, target_lists))
return num_matches / max(len(predictions), 0.1) # Avoids 0/0.
def compute_sari_scores(sources, predictions, target_lists, ignore_wikisplit_separators=True):
"""Computes SARI scores.
Wraps the t2t implementation of SARI computation.
Args:
sources: List of sources.
predictions: List of predictions.
target_lists: List of targets (1 or more per prediction).
ignore_wikisplit_separators: Whether to ignore "<::::>" tokens, used as
sentence separators in Wikisplit, when evaluating. For the numbers
reported in the paper, we accidentally ignored those tokens. Ignoring them
does not affect the Exact score (since there's usually always a period
before the separator to indicate sentence break), but it decreases the
SARI score (since the Addition score goes down as the model doesn't get
points for correctly adding <::::> anymore).
Returns:
Tuple (SARI score, keep score, addition score, deletion score).
"""
sari_sum = 0
keep_sum = 0
add_sum = 0
del_sum = 0
for source, pred, targets in zip(sources, predictions, target_lists):
if ignore_wikisplit_separators:
source = re.sub(' <::::> ', ' ', source)
pred = re.sub(' <::::> ', ' ', pred)
targets = [re.sub(' <::::> ', ' ', t) for t in targets]
source_ids = source.split()
pred_ids = pred.split()
list_of_targets = [t.split() for t in targets]
sari, keep, addition, deletion = sari_hook.get_sari_score(
source_ids, pred_ids, list_of_targets, beta_for_deletion=1
)
sari_sum += sari
keep_sum += keep
add_sum += addition
del_sum += deletion
n = max(len(sources), 0.1) # Avoids 0/0.
return (sari_sum / n, keep_sum / n, add_sum / n, del_sum / n)
| 39.576923
| 116
| 0.659135
|
d123f18c3e5aec2e6eb81f01ab3299bd146cf447
| 1,369
|
py
|
Python
|
tests/test_cli.py
|
EndlessTrax/pgn-to-sqlite
|
e5fa0a64b7595a10ce91bec27a3561d6904f0de4
|
[
"MIT"
] | 8
|
2021-04-20T20:53:32.000Z
|
2022-02-04T11:14:31.000Z
|
tests/test_cli.py
|
EndlessTrax/pgn-to-sqlite
|
e5fa0a64b7595a10ce91bec27a3561d6904f0de4
|
[
"MIT"
] | 1
|
2021-04-14T22:36:45.000Z
|
2021-04-15T01:06:21.000Z
|
tests/test_cli.py
|
EndlessTrax/pgn-to-sqlite
|
e5fa0a64b7595a10ce91bec27a3561d6904f0de4
|
[
"MIT"
] | null | null | null |
import requests
from click.testing import CliRunner
from pgn_to_sqlite.cli import build_pgn_dict, convert_to_snake_case, cli
def test_snake_case_conversion():
result = convert_to_snake_case("SpamSpam")
assert result == "spam_spam"
def test_build_png_dict_from_chess_dotcom():
with open("tests/game_files/test_pgn_file_chess_dotcom.pgn", "r") as f:
pgn_str = f.read()
result = build_pgn_dict(pgn_str)
assert result["white"] == "EndlessTrax"
assert result["termination"] == "EndlessTrax won by checkmate"
def test_build_png_dict_from_lichess():
with open("tests/game_files/test_pgn_file_lichess.pgn", "r") as f:
pgn_str = f.read()
result = build_pgn_dict(pgn_str)
assert result["black"] == "endlesstrax"
assert result["opening"] == "Sicilian Defense: Old Sicilian"
def test_chess_dotcom_api_endpoint():
r = requests.get(f"https://api.chess.com/pub/player/endlesstrax/games/archives")
assert r.status_code == 200
def test_ValueError_on_invalid_args():
runner = CliRunner()
result = runner.invoke(
cli, ["-u", "endlesstrax", "-o", "games.db", "fetch", "invaild"]
)
assert result.exit_code == 1
def test_folder_input_file():
runner = CliRunner()
result = runner.invoke(cli, ["-o", "games.db", "save", "tests/game_files/"])
assert result.exit_code == 0
| 29.76087
| 84
| 0.696129
|
39ff3334e1ce8c6107bdb0529937d408370698a4
| 546
|
py
|
Python
|
app/api/DAO/user_addressDAO.py
|
EljakimHerrewijnen/Project_5-6
|
219893588220eff4004efb09e755d0b864f56392
|
[
"MIT"
] | null | null | null |
app/api/DAO/user_addressDAO.py
|
EljakimHerrewijnen/Project_5-6
|
219893588220eff4004efb09e755d0b864f56392
|
[
"MIT"
] | null | null | null |
app/api/DAO/user_addressDAO.py
|
EljakimHerrewijnen/Project_5-6
|
219893588220eff4004efb09e755d0b864f56392
|
[
"MIT"
] | null | null | null |
from app.api.database import Database
# Add adress to user
def Create(postal_code, house_number, username):
db = Database()
sql = {
"postal_code" : postal_code,
"house_number" : house_number,
"username" : username
}
return db.insert("user_address", sql)
# Remove adress from user
def Delete(postal_code, house_number, username):
db = Database()
db.where("postal_code", postal_code)
db.where("house_number", house_number)
db.where("username", username)
return db.delete("user_address")
| 28.736842
| 48
| 0.677656
|
6cf95d756c269d5c8c7806ca9bb38b8927ebb72e
| 2,858
|
py
|
Python
|
paddlespeech/server/engine/engine_factory.py
|
jerryuhoo/PaddleSpeech
|
1eec7b5e042da294c7524af92f0fae4c32a71aa3
|
[
"Apache-2.0"
] | 1,379
|
2021-11-10T02:42:21.000Z
|
2022-03-31T13:34:25.000Z
|
paddlespeech/server/engine/engine_factory.py
|
jerryuhoo/PaddleSpeech
|
1eec7b5e042da294c7524af92f0fae4c32a71aa3
|
[
"Apache-2.0"
] | 268
|
2021-11-10T14:07:34.000Z
|
2022-03-31T02:25:20.000Z
|
paddlespeech/server/engine/engine_factory.py
|
jerryuhoo/PaddleSpeech
|
1eec7b5e042da294c7524af92f0fae4c32a71aa3
|
[
"Apache-2.0"
] | 296
|
2021-11-15T02:37:11.000Z
|
2022-03-31T12:14:46.000Z
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Text
__all__ = ['EngineFactory']
class EngineFactory(object):
@staticmethod
def get_engine(engine_name: Text, engine_type: Text):
if engine_name == 'asr' and engine_type == 'inference':
from paddlespeech.server.engine.asr.paddleinference.asr_engine import ASREngine
return ASREngine()
elif engine_name == 'asr' and engine_type == 'python':
from paddlespeech.server.engine.asr.python.asr_engine import ASREngine
return ASREngine()
elif engine_name == 'asr' and engine_type == 'online':
from paddlespeech.server.engine.asr.online.asr_engine import ASREngine
return ASREngine()
elif engine_name == 'tts' and engine_type == 'inference':
from paddlespeech.server.engine.tts.paddleinference.tts_engine import TTSEngine
return TTSEngine()
elif engine_name == 'tts' and engine_type == 'python':
from paddlespeech.server.engine.tts.python.tts_engine import TTSEngine
return TTSEngine()
elif engine_name == 'tts' and engine_type == 'online':
from paddlespeech.server.engine.tts.online.python.tts_engine import TTSEngine
return TTSEngine()
elif engine_name == 'tts' and engine_type == 'online-onnx':
from paddlespeech.server.engine.tts.online.onnx.tts_engine import TTSEngine
return TTSEngine()
elif engine_name == 'cls' and engine_type == 'inference':
from paddlespeech.server.engine.cls.paddleinference.cls_engine import CLSEngine
return CLSEngine()
elif engine_name == 'cls' and engine_type == 'python':
from paddlespeech.server.engine.cls.python.cls_engine import CLSEngine
return CLSEngine()
elif engine_name.lower() == 'text' and engine_type.lower() == 'python':
from paddlespeech.server.engine.text.python.text_engine import TextEngine
return TextEngine()
elif engine_name.lower() == 'vector' and engine_type.lower() == 'python':
from paddlespeech.server.engine.vector.python.vector_engine import VectorEngine
return VectorEngine()
else:
return None
| 50.140351
| 91
| 0.682995
|
f520e0521852384219979cd37a86a78dbeeec799
| 7,676
|
py
|
Python
|
commands/maf_processing/python/createUniProt_ISOForm1_Sequence_File.py
|
cancerregulome/gidget
|
6c9e9a37f9992267c7505c7a396ff7e2638599ab
|
[
"MIT"
] | 3
|
2016-02-22T21:29:23.000Z
|
2020-09-19T07:38:21.000Z
|
commands/maf_processing/python/createUniProt_ISOForm1_Sequence_File.py
|
cancerregulome/gidget
|
6c9e9a37f9992267c7505c7a396ff7e2638599ab
|
[
"MIT"
] | 1
|
2015-01-16T02:33:59.000Z
|
2015-01-16T02:33:59.000Z
|
commands/maf_processing/python/createUniProt_ISOForm1_Sequence_File.py
|
cancerregulome/gidget
|
6c9e9a37f9992267c7505c7a396ff7e2638599ab
|
[
"MIT"
] | 2
|
2015-12-27T08:40:12.000Z
|
2021-03-01T06:30:23.000Z
|
import sys
import traceback
import re
import linecache
__author__ = 'xshu'
# global variables
unprocessedIDs = []
iso12PosInFileMapping = {}
def showHelp():
print\
'''
This program updates gene2uniprot by adding more mappings with the records combined from
uni_sprot and uni_trembl dat files (downloaded from UniProt)
Usage: %s
Parameter Description
-uniprot_sprot_human gene info 2 uniprot sprot mapping extracted from uniprot_sprot dat file
-uniprot_trembl_human gene info 2 uniprot trembl mapping extracted from uniprot_trembl dat file
-uniprot_isoform the isoform file from UniProt
-output_sprot updated gene info 2 uniprot sprot mapping
-output_trembl updated gene info 2 uniprot trembl mapping
(eg. %s -uniprot_sprot_human <<absolute uniprot sprot human file path>>
-uniprot_trembl_human <<absolute uniprot trembl human file path>>
-uniprot_isoform <<absolute uniprot isoform file>>
-output_sprot <<absolute sprot output file path>>
-output_trembl <<absolute trembl output file path>> )
''' % (sys.argv[0], sys.argv[0])
class DataType:
SPROT = 1
TREMBL = 2
def processUniprotDAT(uniprot_human, output):
# get isoform1 sequences from uniprot_human dat files
if output != "" and uniprot_human != "":
# suppose a sequence is always needed UNLESS the field "IsoId=XXXX-1" is explicitly shown in the uniprot dat files AND
# its following field "Sequence" has a value other than "Displayed". For example in the section for "ID ARHG7_HUMAN"
# these lines appear:
# CC Name=4;
# CC IsoId=Q14155-4; Sequence=Displayed;
# CC Name=1;
# CC IsoId=Q14155-1; Sequence=VSP_011032, VSP_011035;
# In the above case. The sequence presented for this section belongs to isoform4 NOT isoform1. So this program
# will grab isoform1 from "uniprot_sprot_varsplic.fasta" instead (Check for more details in the method
# processUniprotIsoformFasta(uniprot_isoform, output) that follows
needSequence = True
gettingSequence = False
primAccession = ""
sequence = ""
outputHandle = open(output, "w")
uniprot_humanHandle = open(uniprot_human, "r")
for line in uniprot_humanHandle:
line = line.rstrip("\n")
idMatcher = re.match("^ID\s+([^\s]*)\s+(.*)", line)
if idMatcher:
needSequence = True
gettingSequence = False
primAccession = ""
sequence = ""
acMatcher = re.match("^AC\s+(.*)", line)
if acMatcher:
accessions = acMatcher.group(1)
if primAccession == "":
primAccession = accessions.split(";")[0]
# removed the condition of -1 primAccession. Now all displayed
# sequences are shown
iso1Matcher = re.match(
"^CC\s+IsoId=(" + primAccession + ");\s*Sequence=([^;]*);", line)
if iso1Matcher:
iso1Id = iso1Matcher.group(1)
if iso1Matcher.group(2) != "Displayed":
unprocessedIDs.append(iso1Id)
needSequence = False
elif needSequence:
if not gettingSequence:
sequenceMatcher = re.match("^SQ\s+.*SEQUENCE\s+.*", line)
if sequenceMatcher:
gettingSequence = True
else:
sequenceEndMatcher = re.match("^//.*", line)
if not sequenceEndMatcher:
sequence += line.replace(" ", "") + "\n"
else:
outputHandle.write(">sp|%s\n" % (primAccession))
outputHandle.write(sequence)
uniprot_humanHandle.close()
outputHandle.close()
def processUniprotIsoformFasta(uniprot_isoform, output):
# get isoform1 sequences from uniprot_isoform file
if output != "" and uniprot_isoform != "":
# first map isoform1 accession number to the position range in the file
isoId = ""
getPreRecord = False
preFileLineNo = 0
recordStartLineNo = preFileLineNo
uniprot_isoformHandle = open(uniprot_isoform, "r")
for line in uniprot_isoformHandle:
line = line.rstrip("\n")
seqHeaderMatcher = re.match("^>sp\|([^\|]+)\|.*", line)
if seqHeaderMatcher:
if getPreRecord:
# save the start and end line # for the previous record
iso12PosInFileMapping[isoId] = (
recordStartLineNo, preFileLineNo)
isoId = seqHeaderMatcher.group(1)
iso1Matcher = re.match("^.*-1$", isoId)
if iso1Matcher:
getPreRecord = True
else:
getPreRecord = False
recordStartLineNo = preFileLineNo + 1
preFileLineNo += 1
uniprot_isoformHandle.close()
# output the sequences for the proteins not displayed in uniprot human
# dat
outputHandle = open(output, "a")
for item in unprocessedIDs:
if item in iso12PosInFileMapping:
linesRange = iso12PosInFileMapping[item]
startLine = linesRange[0]
endLine = linesRange[1]
for step in range(0, endLine - startLine):
outputHandle.write(
linecache.getline(uniprot_isoform, startLine + step))
outputHandle.close()
def _mainFunc():
try:
uniprot_sprot_human = ""
uniprot_trembl_human = ""
uniprot_isoform = ""
output_sprot = ""
output_trembl = ""
for index in range(len(sys.argv)):
if sys.argv[index] == "-uniprot_sprot_human":
uniprot_sprot_human = sys.argv[index + 1].strip()
elif sys.argv[index] == "-uniprot_trembl_human":
uniprot_trembl_human = sys.argv[index + 1].strip()
elif sys.argv[index] == "-uniprot_isoform":
uniprot_isoform = sys.argv[index + 1].strip()
elif sys.argv[index] == "-output_sprot":
output_sprot = sys.argv[index + 1].strip()
elif sys.argv[index] == "-output_trembl":
output_trembl = sys.argv[index + 1].strip()
if uniprot_sprot_human == "" or uniprot_trembl_human == "" or uniprot_isoform == "" or output_sprot == "" or output_trembl == "":
raise Exception("All parameters are required!")
# todo: Memory is in an intensive use when the mappings are pre-loaded.
# Check if PyTables can offer an alternative whenever possible
processUniprotDAT(uniprot_sprot_human, output_sprot)
processUniprotIsoformFasta(uniprot_isoform, output_sprot)
global iso12PosInFileMapping
iso12PosInFileMapping = {}
global unprocessedIDs
unprocessedIDs = []
# todo: Memory is in an intensive use when the mappings are pre-loaded.
# Check if PyTables can offer an alternative whenever possible
processUniprotDAT(uniprot_trembl_human, output_trembl)
processUniprotIsoformFasta(uniprot_isoform, output_trembl)
except Exception:
traceback.print_exc()
showHelp()
if __name__ == "__main__":
_mainFunc()
| 39.772021
| 137
| 0.582074
|
6342ebce761008a22354c2d191759f826cd92020
| 38,800
|
py
|
Python
|
src/commands.py
|
rzr/Limnoria
|
42f9e7667bc513c61d68bb4733b5632e959e8968
|
[
"BSD-3-Clause"
] | null | null | null |
src/commands.py
|
rzr/Limnoria
|
42f9e7667bc513c61d68bb4733b5632e959e8968
|
[
"BSD-3-Clause"
] | null | null | null |
src/commands.py
|
rzr/Limnoria
|
42f9e7667bc513c61d68bb4733b5632e959e8968
|
[
"BSD-3-Clause"
] | null | null | null |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009-2010,2015, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Includes wrappers for commands.
"""
import time
import Queue
import getopt
import inspect
import threading
import multiprocessing #python2.6 or later!
try:
import resource
except ImportError: # Windows!
resource = None
from . import callbacks, conf, ircdb, ircmsgs, ircutils, log, utils, world
from .i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization()
###
# Non-arg wrappers -- these just change the behavior of a command without
# changing the arguments given to it.
###
# Thread has to be a non-arg wrapper because by the time we're parsing and
# validating arguments, we're inside the function we'd want to thread.
def thread(f):
"""Makes sure a command spawns a thread when called."""
def newf(self, irc, msg, args, *L, **kwargs):
if world.isMainThread():
targetArgs = (self.callingCommand, irc, msg, args) + tuple(L)
t = callbacks.CommandThread(target=self._callCommand,
args=targetArgs, kwargs=kwargs)
t.start()
else:
f(self, irc, msg, args, *L, **kwargs)
return utils.python.changeFunctionName(newf, f.__name__, f.__doc__)
class ProcessTimeoutError(Exception):
"""Gets raised when a process is killed due to timeout."""
pass
def process(f, *args, **kwargs):
"""Runs a function <f> in a subprocess.
Several extra keyword arguments can be supplied.
<pn>, the pluginname, and <cn>, the command name, are strings used to
create the process name, for identification purposes.
<timeout>, if supplied, limits the length of execution of target
function to <timeout> seconds."""
timeout = kwargs.pop('timeout', None)
heap_size = kwargs.pop('heap_size', None)
if resource and heap_size is None:
heap_size = resource.RLIM_INFINITY
if world.disableMultiprocessing:
pn = kwargs.pop('pn', 'Unknown')
cn = kwargs.pop('cn', 'unknown')
try:
return f(*args, **kwargs)
except Exception as e:
raise e
try:
q = multiprocessing.Queue()
except OSError:
log.error('Using multiprocessing.Queue raised an OSError.\n'
'This is probably caused by your system denying semaphore\n'
'usage. You should run these two commands:\n'
'\tsudo rmdir /dev/shm\n'
'\tsudo ln -Tsf /{run,dev}/shm\n'
'(See https://github.com/travis-ci/travis-core/issues/187\n'
'for more information about this bug.)\n')
raise
def newf(f, q, *args, **kwargs):
if resource:
rsrc = resource.RLIMIT_DATA
resource.setrlimit(rsrc, (heap_size, heap_size))
try:
r = f(*args, **kwargs)
q.put(r)
except Exception as e:
q.put(e)
targetArgs = (f, q,) + args
p = callbacks.CommandProcess(target=newf,
args=targetArgs, kwargs=kwargs)
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
q.close()
raise ProcessTimeoutError("%s aborted due to timeout." % (p.name,))
try:
v = q.get(block=False)
except Queue.Empty:
return None
finally:
q.close()
if isinstance(v, Exception):
raise v
else:
return v
def regexp_wrapper(s, reobj, timeout, plugin_name, fcn_name):
'''A convenient wrapper to stuff regexp search queries through a subprocess.
This is used because specially-crafted regexps can use exponential time
and hang the bot.'''
def re_bool(s, reobj):
"""Since we can't enqueue match objects into the multiprocessing queue,
we'll just wrap the function to return bools."""
if reobj.search(s) is not None:
return True
else:
return False
try:
v = process(re_bool, s, reobj, timeout=timeout, pn=plugin_name, cn=fcn_name)
return v
except ProcessTimeoutError:
return False
class UrlSnarfThread(world.SupyThread):
def __init__(self, *args, **kwargs):
assert 'url' in kwargs
kwargs['name'] = 'Thread #%s (for snarfing %s)' % \
(world.threadsSpawned, kwargs.pop('url'))
super(UrlSnarfThread, self).__init__(*args, **kwargs)
self.setDaemon(True)
def run(self):
try:
super(UrlSnarfThread, self).run()
except utils.web.Error as e:
log.debug('Exception in urlSnarfer: %s', utils.exnToString(e))
class SnarfQueue(ircutils.FloodQueue):
timeout = conf.supybot.snarfThrottle
def key(self, channel):
return channel
_snarfed = SnarfQueue()
class SnarfIrc(object):
def __init__(self, irc, channel, url):
self.irc = irc
self.url = url
self.channel = channel
def __getattr__(self, attr):
return getattr(self.irc, attr)
def reply(self, *args, **kwargs):
_snarfed.enqueue(self.channel, self.url)
return self.irc.reply(*args, **kwargs)
# This lock is used to serialize the calls to snarfers, so
# earlier snarfers are guaranteed to beat out later snarfers.
_snarfLock = threading.Lock()
def urlSnarfer(f):
"""Protects the snarfer from loops (with other bots) and whatnot."""
def newf(self, irc, msg, match, *L, **kwargs):
url = match.group(0)
channel = msg.args[0]
if not irc.isChannel(channel) or (ircmsgs.isCtcp(msg) and not
ircmsgs.isAction(msg)):
return
if ircdb.channels.getChannel(channel).lobotomized:
self.log.debug('Not snarfing in %s: lobotomized.', channel)
return
if _snarfed.has(channel, url):
self.log.info('Throttling snarf of %s in %s.', url, channel)
return
irc = SnarfIrc(irc, channel, url)
def doSnarf():
_snarfLock.acquire()
try:
# This has to be *after* we've acquired the lock so we can be
# sure that all previous urlSnarfers have already run to
# completion.
if msg.repliedTo:
self.log.debug('Not snarfing, msg is already repliedTo.')
return
f(self, irc, msg, match, *L, **kwargs)
finally:
_snarfLock.release()
if threading.currentThread() is not world.mainThread:
doSnarf()
else:
L = list(L)
t = UrlSnarfThread(target=doSnarf, url=url)
t.start()
newf = utils.python.changeFunctionName(newf, f.__name__, f.__doc__)
return newf
###
# Converters, which take irc, msg, args, and a state object, and build up the
# validated and converted args for the method in state.args.
###
# This is just so we can centralize this, since it may change.
def _int(s):
base = 10
if s.startswith('0x'):
base = 16
s = s[2:]
elif s.startswith('0b'):
base = 2
s = s[2:]
elif s.startswith('0') and len(s) > 1:
base = 8
s = s[1:]
try:
return int(s, base)
except ValueError:
if base == 10 and '.' not in s:
try:
return int(float(s))
except OverflowError:
raise ValueError('I don\'t understand numbers that large.')
else:
raise
def getInt(irc, msg, args, state, type=_('integer'), p=None):
try:
i = _int(args[0])
if p is not None:
if not p(i):
state.errorInvalid(type, args[0])
state.args.append(i)
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getNonInt(irc, msg, args, state, type=_('non-integer value')):
try:
i = _int(args[0])
state.errorInvalid(type, args[0])
except ValueError:
state.args.append(args.pop(0))
def getLong(irc, msg, args, state, type='long'):
getInt(irc, msg, args, state, type)
state.args[-1] = long(state.args[-1])
def getFloat(irc, msg, args, state, type=_('floating point number')):
try:
state.args.append(float(args[0]))
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getPositiveInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>0, type=_('positive integer'), *L)
def getNonNegativeInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>=0, type=_('non-negative integer'), *L)
def getIndex(irc, msg, args, state):
getInt(irc, msg, args, state, type=_('index'))
if state.args[-1] > 0:
state.args[-1] -= 1
def getId(irc, msg, args, state, kind=None):
type = 'id'
if kind is not None and not kind.endswith('id'):
type = kind + ' id'
original = args[0]
try:
args[0] = args[0].lstrip('#')
getInt(irc, msg, args, state, type=type)
except Exception as e:
args[0] = original
raise
def getExpiry(irc, msg, args, state):
now = int(time.time())
try:
expires = _int(args[0])
if expires:
expires += now
state.args.append(expires)
del args[0]
except ValueError:
state.errorInvalid(_('number of seconds'), args[0])
def getBoolean(irc, msg, args, state):
try:
state.args.append(utils.str.toBool(args[0]))
del args[0]
except ValueError:
state.errorInvalid(_('boolean'), args[0])
def getNetworkIrc(irc, msg, args, state, errorIfNoMatch=False):
if args:
for otherIrc in world.ircs:
if otherIrc.network.lower() == args[0].lower():
state.args.append(otherIrc)
del args[0]
return
if errorIfNoMatch:
raise callbacks.ArgumentError
else:
state.args.append(irc)
def getHaveVoice(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isVoice(irc.nick):
state.error(_('I need to be voiced to %s.') % action, Raise=True)
def getHaveVoicePlus(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isVoicePlus(irc.nick):
# isOp includes owners and protected users
state.error(_('I need to be at least voiced to %s.') % action,
Raise=True)
def getHaveHalfop(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isHalfop(irc.nick):
state.error(_('I need to be halfopped to %s.') % action, Raise=True)
def getHaveHalfopPlus(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isHalfopPlus(irc.nick):
# isOp includes owners and protected users
state.error(_('I need to be at least halfopped to %s.') % action,
Raise=True)
def getHaveOp(irc, msg, args, state, action=_('do that')):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not even in %s.') % state.channel, Raise=True)
if not irc.state.channels[state.channel].isOp(irc.nick):
state.error(_('I need to be opped to %s.') % action, Raise=True)
def validChannel(irc, msg, args, state):
if irc.isChannel(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('channel'), args[0])
def getHostmask(irc, msg, args, state):
if ircutils.isUserHostmask(args[0]) or \
(not conf.supybot.protocols.irc.strictRfc() and
args[0].startswith('$')):
state.args.append(args.pop(0))
else:
try:
hostmask = irc.state.nickToHostmask(args[0])
state.args.append(hostmask)
del args[0]
except KeyError:
state.errorInvalid(_('nick or hostmask'), args[0])
def getBanmask(irc, msg, args, state):
getHostmask(irc, msg, args, state)
getChannel(irc, msg, args, state)
channel = state.channel
banmaskstyle = conf.supybot.protocols.irc.banmask
state.args[-1] = banmaskstyle.makeBanmask(state.args[-1])
def getUser(irc, msg, args, state):
try:
state.args.append(ircdb.users.getUser(msg.prefix))
except KeyError:
state.errorNotRegistered(Raise=True)
def getOtherUser(irc, msg, args, state):
# Although ircdb.users.getUser could accept a hostmask, we're explicitly
# excluding that from our interface with this check
if ircutils.isUserHostmask(args[0]):
state.errorNoUser(args[0])
try:
state.args.append(ircdb.users.getUser(args[0]))
del args[0]
except KeyError:
try:
getHostmask(irc, msg, [args[0]], state)
hostmask = state.args.pop()
state.args.append(ircdb.users.getUser(hostmask))
del args[0]
except (KeyError, callbacks.Error):
state.errorNoUser(name=args[0])
def _getRe(f):
def get(irc, msg, args, state, convert=True):
original = args[:]
s = args.pop(0)
def isRe(s):
try:
foo = f(s)
return True
except ValueError:
return False
try:
while len(s) < 512 and not isRe(s):
s += ' ' + args.pop(0)
if len(s) < 512:
if convert:
state.args.append(f(s))
else:
state.args.append(s)
else:
raise ValueError
except (ValueError, IndexError):
args[:] = original
state.errorInvalid(_('regular expression'), s)
return get
getMatcher = _getRe(utils.str.perlReToPythonRe)
getMatcherMany = _getRe(utils.str.perlReToFindall)
getReplacer = _getRe(utils.str.perlReToReplacer)
def getNick(irc, msg, args, state):
if ircutils.isNick(args[0], conf.supybot.protocols.irc.strictRfc()):
if 'nicklen' in irc.state.supported:
if len(args[0]) > irc.state.supported['nicklen']:
state.errorInvalid(_('nick'), args[0],
_('That nick is too long for this server.'))
state.args.append(args.pop(0))
else:
state.errorInvalid(_('nick'), args[0])
def getSeenNick(irc, msg, args, state, errmsg=None):
try:
foo = irc.state.nickToHostmask(args[0])
state.args.append(args.pop(0))
except KeyError:
if errmsg is None:
errmsg = _('I haven\'t seen %s.') % args[0]
state.error(errmsg, Raise=True)
def getChannel(irc, msg, args, state):
if state.channel:
return
if args and irc.isChannel(args[0]):
channel = args.pop(0)
elif irc.isChannel(msg.args[0]):
channel = msg.args[0]
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.channel = channel
state.args.append(channel)
def getChannelDb(irc, msg, args, state, **kwargs):
channelSpecific = conf.supybot.databases.plugins.channelSpecific
try:
getChannel(irc, msg, args, state, **kwargs)
channel = channelSpecific.getChannelLink(state.channel)
state.channel = channel
state.args[-1] = channel
except (callbacks.ArgumentError, IndexError):
if channelSpecific():
raise
channel = channelSpecific.link()
if not conf.get(channelSpecific.link.allow, channel):
log.warning('channelSpecific.link is globally set to %s, but '
'%s disallowed linking to its db.', channel, channel)
raise
else:
channel = channelSpecific.getChannelLink(channel)
state.channel = channel
state.args.append(channel)
def inChannel(irc, msg, args, state):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error(_('I\'m not in %s.') % state.channel, Raise=True)
def onlyInChannel(irc, msg, args, state):
if not (irc.isChannel(msg.args[0]) and msg.args[0] in irc.state.channels):
state.error(_('This command may only be given in a channel that I am '
'in.'), Raise=True)
else:
state.channel = msg.args[0]
state.args.append(state.channel)
def callerInGivenChannel(irc, msg, args, state):
channel = args[0]
if irc.isChannel(channel):
if channel in irc.state.channels:
if msg.nick in irc.state.channels[channel].users:
state.args.append(args.pop(0))
else:
state.error(_('You must be in %s.') % channel, Raise=True)
else:
state.error(_('I\'m not in %s.') % channel, Raise=True)
else:
state.errorInvalid(_('channel'), args[0])
def nickInChannel(irc, msg, args, state):
originalArgs = state.args[:]
inChannel(irc, msg, args, state)
state.args = originalArgs
if args[0] not in irc.state.channels[state.channel].users:
state.error(_('%s is not in %s.') % (args[0], state.channel), Raise=True)
state.args.append(args.pop(0))
def getChannelOrNone(irc, msg, args, state):
try:
getChannel(irc, msg, args, state)
except callbacks.ArgumentError:
state.args.append(None)
def getChannelOrGlobal(irc, msg, args, state):
if args and args[0] == 'global':
channel = args.pop(0)
channel = 'global'
elif args and irc.isChannel(args[0]):
channel = args.pop(0)
state.channel = channel
elif irc.isChannel(msg.args[0]):
channel = msg.args[0]
state.channel = channel
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.args.append(channel)
def checkChannelCapability(irc, msg, args, state, cap):
getChannel(irc, msg, args, state)
cap = ircdb.canonicalCapability(cap)
cap = ircdb.makeChannelCapability(state.channel, cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def getOp(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'op')
def getHalfop(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'halfop')
def getVoice(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'voice')
def getLowered(irc, msg, args, state):
state.args.append(ircutils.toLower(args.pop(0)))
def getSomething(irc, msg, args, state, errorMsg=None, p=None):
if p is None:
p = lambda _: True
if not args[0] or not p(args[0]):
if errorMsg is None:
errorMsg = _('You must not give the empty string as an argument.')
state.error(errorMsg, Raise=True)
else:
state.args.append(args.pop(0))
def getSomethingNoSpaces(irc, msg, args, state, *L):
def p(s):
return len(s.split(None, 1)) == 1
L = L or [_('You must not give a string containing spaces as an argument.')]
getSomething(irc, msg, args, state, p=p, *L)
def private(irc, msg, args, state):
if irc.isChannel(msg.args[0]):
state.errorRequiresPrivacy(Raise=True)
def public(irc, msg, args, state, errmsg=None):
if not irc.isChannel(msg.args[0]):
if errmsg is None:
errmsg = _('This message must be sent in a channel.')
state.error(errmsg, Raise=True)
def checkCapability(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def checkCapabilityButIgnoreOwner(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap, ignoreOwner=True):
state.errorNoCapability(cap, Raise=True)
def owner(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'owner')
def admin(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'admin')
def anything(irc, msg, args, state):
state.args.append(args.pop(0))
def getGlob(irc, msg, args, state):
glob = args.pop(0)
if '*' not in glob and '?' not in glob:
glob = '*%s*' % glob
state.args.append(glob)
def getUrl(irc, msg, args, state):
if utils.web.urlRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('url'), args[0])
def getEmail(irc, msg, args, state):
if utils.net.emailRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('email'), args[0])
def getHttpUrl(irc, msg, args, state):
if utils.web.httpUrlRe.match(args[0]):
state.args.append(args.pop(0))
elif utils.web.httpUrlRe.match('http://' + args[0]):
state.args.append('http://' + args.pop(0))
else:
state.errorInvalid(_('http url'), args[0])
def getNow(irc, msg, args, state):
state.args.append(int(time.time()))
def getCommandName(irc, msg, args, state):
if ' ' in args[0]:
state.errorInvalid(_('command name'), args[0])
else:
state.args.append(callbacks.canonicalName(args.pop(0)))
def getIp(irc, msg, args, state):
if utils.net.isIP(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid(_('ip'), args[0])
def getLetter(irc, msg, args, state):
if len(args[0]) == 1:
state.args.append(args.pop(0))
else:
state.errorInvalid(_('letter'), args[0])
def getMatch(irc, msg, args, state, regexp, errmsg):
m = regexp.search(args[0])
if m is not None:
state.args.append(m)
del args[0]
else:
state.error(errmsg, Raise=True)
def getLiteral(irc, msg, args, state, literals, errmsg=None):
# ??? Should we allow abbreviations?
if isinstance(literals, basestring):
literals = (literals,)
abbrevs = utils.abbrev(literals)
if args[0] in abbrevs:
state.args.append(abbrevs[args.pop(0)])
elif errmsg is not None:
state.error(errmsg, Raise=True)
else:
raise callbacks.ArgumentError
def getTo(irc, msg, args, state):
if args[0].lower() == 'to':
args.pop(0)
def getPlugin(irc, msg, args, state, require=True):
cb = irc.getCallback(args[0])
if cb is not None:
state.args.append(cb)
del args[0]
elif require:
state.errorInvalid(_('plugin'), args[0])
else:
state.args.append(None)
def getIrcColor(irc, msg, args, state):
if args[0] in ircutils.mircColors:
state.args.append(ircutils.mircColors[args.pop(0)])
else:
state.errorInvalid(_('irc color'))
def getText(irc, msg, args, state):
if args:
state.args.append(' '.join(args))
args[:] = []
else:
raise IndexError
wrappers = ircutils.IrcDict({
'admin': admin,
'anything': anything,
'banmask': getBanmask,
'boolean': getBoolean,
'callerInGivenChannel': callerInGivenChannel,
'isGranted': getHaveHalfopPlus, # Backward compatibility
'capability': getSomethingNoSpaces,
'channel': getChannel,
'channelOrGlobal': getChannelOrGlobal,
'channelDb': getChannelDb,
'checkCapability': checkCapability,
'checkCapabilityButIgnoreOwner': checkCapabilityButIgnoreOwner,
'checkChannelCapability': checkChannelCapability,
'color': getIrcColor,
'commandName': getCommandName,
'email': getEmail,
'expiry': getExpiry,
'filename': getSomething, # XXX Check for validity.
'float': getFloat,
'glob': getGlob,
'halfop': getHalfop,
'haveHalfop': getHaveHalfop,
'haveHalfop+': getHaveHalfopPlus,
'haveOp': getHaveOp,
'haveOp+': getHaveOp, # We don't handle modes greater than op.
'haveVoice': getHaveVoice,
'haveVoice+': getHaveVoicePlus,
'hostmask': getHostmask,
'httpUrl': getHttpUrl,
'id': getId,
'inChannel': inChannel,
'index': getIndex,
'int': getInt,
'ip': getIp,
'letter': getLetter,
'literal': getLiteral,
'long': getLong,
'lowered': getLowered,
'matches': getMatch,
'networkIrc': getNetworkIrc,
'nick': getNick,
'nickInChannel': nickInChannel,
'nonInt': getNonInt,
'nonNegativeInt': getNonNegativeInt,
'now': getNow,
'onlyInChannel': onlyInChannel,
'op': getOp,
'otherUser': getOtherUser,
'owner': owner,
'plugin': getPlugin,
'positiveInt': getPositiveInt,
'private': private,
'public': public,
'regexpMatcher': getMatcher,
'regexpMatcherMany': getMatcherMany,
'regexpReplacer': getReplacer,
'seenNick': getSeenNick,
'something': getSomething,
'somethingWithoutSpaces': getSomethingNoSpaces,
'text': getText,
'to': getTo,
'url': getUrl,
'user': getUser,
'validChannel': validChannel,
'voice': getVoice,
})
def addConverter(name, wrapper):
wrappers[name] = wrapper
class UnknownConverter(KeyError):
pass
def getConverter(name):
try:
return wrappers[name]
except KeyError as e:
raise UnknownConverter(str(e))
def callConverter(name, irc, msg, args, state, *L):
getConverter(name)(irc, msg, args, state, *L)
###
# Contexts. These determine what the nature of conversions is; whether they're
# defaulted, or many of them are allowed, etc. Contexts should be reusable;
# i.e., they should not maintain state between calls.
###
def contextify(spec):
if not isinstance(spec, context):
spec = context(spec)
return spec
def setDefault(state, default):
if callable(default):
state.args.append(default())
else:
state.args.append(default)
class context(object):
def __init__(self, spec):
self.args = ()
self.spec = spec # for repr
if isinstance(spec, tuple):
assert spec, 'tuple spec must not be empty.'
self.args = spec[1:]
self.converter = getConverter(spec[0])
elif spec is None:
self.converter = getConverter('anything')
elif isinstance(spec, basestring):
self.args = ()
self.converter = getConverter(spec)
else:
assert isinstance(spec, context)
self.converter = spec
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
self.converter(irc, msg, args, state, *self.args)
log.debug('args after %r: %r', self, args)
def __repr__(self):
return '<%s for %s>' % (self.__class__.__name__, self.spec)
class rest(context):
def __call__(self, irc, msg, args, state):
if args:
original = args[:]
args[:] = [' '.join(args)]
try:
super(rest, self).__call__(irc, msg, args, state)
except Exception as e:
args[:] = original
else:
raise IndexError
# additional means: Look for this (and make sure it's of this type). If
# there are no arguments for us to check, then use our default.
class additional(context):
def __init__(self, spec, default=None):
self.__parent = super(additional, self)
self.__parent.__init__(spec)
self.default = default
def __call__(self, irc, msg, args, state):
try:
self.__parent.__call__(irc, msg, args, state)
except IndexError:
log.debug('Got IndexError, returning default.')
setDefault(state, self.default)
# optional means: Look for this, but if it's not the type I'm expecting or
# there are no arguments for us to check, then use the default value.
class optional(additional):
def __call__(self, irc, msg, args, state):
try:
super(optional, self).__call__(irc, msg, args, state)
except (callbacks.ArgumentError, callbacks.Error) as e:
log.debug('Got %s, returning default.', utils.exnToString(e))
state.errored = False
setDefault(state, self.default)
class any(context):
def __init__(self, spec, continueOnError=False):
self.__parent = super(any, self)
self.__parent.__init__(spec)
self.continueOnError = continueOnError
def __call__(self, irc, msg, args, state):
st = state.essence()
try:
while args:
self.__parent.__call__(irc, msg, args, st)
except IndexError:
pass
except (callbacks.ArgumentError, callbacks.Error) as e:
if not self.continueOnError:
raise
else:
log.debug('Got %s, returning default.', utils.exnToString(e))
pass
state.args.append(st.args)
class many(any):
def __call__(self, irc, msg, args, state):
super(many, self).__call__(irc, msg, args, state)
if not state.args[-1]:
state.args.pop()
raise callbacks.ArgumentError
class first(context):
def __init__(self, *specs, **kw):
if 'default' in kw:
self.default = kw.pop('default')
assert not kw, 'Bad kwargs for first.__init__'
self.spec = specs # for __repr__
self.specs = list(map(contextify, specs))
def __call__(self, irc, msg, args, state):
errored = False
for spec in self.specs:
try:
spec(irc, msg, args, state)
return
except Exception as e:
e2 = e # 'e' is local.
errored = state.errored
state.errored = False
continue
if hasattr(self, 'default'):
state.args.append(self.default)
else:
state.errored = errored
raise e2
class reverse(context):
def __call__(self, irc, msg, args, state):
args[:] = args[::-1]
super(reverse, self).__call__(irc, msg, args, state)
args[:] = args[::-1]
class commalist(context):
def __call__(self, irc, msg, args, state):
original = args[:]
st = state.essence()
trailingComma = True
try:
while trailingComma:
arg = args.pop(0)
if not arg.endswith(','):
trailingComma = False
for part in arg.split(','):
if part: # trailing commas
super(commalist, self).__call__(irc, msg, [part], st)
state.args.append(st.args)
except Exception as e:
args[:] = original
raise
class getopts(context):
"""The empty string indicates that no argument is taken; None indicates
that there is no converter for the argument."""
def __init__(self, getopts):
self.spec = getopts # for repr
self.getopts = {}
self.getoptL = []
self.getoptLs = ''
for (name, spec) in getopts.iteritems():
if spec == '':
if len(name) == 1:
self.getoptLs += name
self.getopts[name] = None
self.getoptL.append(name)
self.getopts[name] = None
else:
if len(name) == 1:
self.getoptLs += name + ':'
self.getopts[name] = contextify(spec)
self.getoptL.append(name + '=')
self.getopts[name] = contextify(spec)
log.debug('getopts: %r', self.getopts)
log.debug('getoptL: %r', self.getoptL)
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
(optlist, rest) = getopt.getopt(args, self.getoptLs, self.getoptL)
getopts = []
for (opt, arg) in optlist:
if opt.startswith('--'):
opt = opt[2:] # Strip --
else:
opt = opt[1:]
log.debug('opt: %r, arg: %r', opt, arg)
context = self.getopts[opt]
if context is not None:
st = state.essence()
context(irc, msg, [arg], st)
assert len(st.args) == 1
getopts.append((opt, st.args[0]))
else:
getopts.append((opt, True))
state.args.append(getopts)
args[:] = rest
log.debug('args after %r: %r', self, args)
###
# This is our state object, passed to converters along with irc, msg, and args.
###
class State(object):
log = log
def __init__(self, types):
self.args = []
self.kwargs = {}
self.types = types
self.channel = None
self.errored = False
def __getattr__(self, attr):
if attr.startswith('error'):
self.errored = True
return getattr(dynamic.irc, attr)
else:
raise AttributeError(attr)
def essence(self):
st = State(self.types)
for (attr, value) in self.__dict__.iteritems():
if attr not in ('args', 'kwargs'):
setattr(st, attr, value)
return st
def __repr__(self):
return '%s(args=%r, kwargs=%r, channel=%r)' % (self.__class__.__name__,
self.args, self.kwargs,
self.channel)
###
# This is a compiled Spec object.
###
class Spec(object):
def _state(self, types, attrs={}):
st = State(types)
st.__dict__.update(attrs)
st.allowExtra = self.allowExtra
return st
def __init__(self, types, allowExtra=False):
self.types = types
self.allowExtra = allowExtra
utils.seq.mapinto(contextify, self.types)
def __call__(self, irc, msg, args, stateAttrs={}):
state = self._state(self.types[:], stateAttrs)
while state.types:
context = state.types.pop(0)
try:
context(irc, msg, args, state)
except IndexError:
raise callbacks.ArgumentError
if args and not state.allowExtra:
log.debug('args and not self.allowExtra: %r', args)
raise callbacks.ArgumentError
return state
def _wrap(f, specList=[], name=None, checkDoc=True, **kw):
name = name or f.__name__
assert (not checkDoc) or (hasattr(f, '__doc__') and f.__doc__), \
'Command %r has no docstring.' % name
spec = Spec(specList, **kw)
def newf(self, irc, msg, args, **kwargs):
state = spec(irc, msg, args, stateAttrs={'cb': self, 'log': self.log})
self.log.debug('State before call: %s', state)
if state.errored:
self.log.debug('Refusing to call %s due to state.errored.', f)
else:
try:
f(self, irc, msg, args, *state.args, **state.kwargs)
except TypeError:
self.log.error('Spec: %s', specList)
self.log.error('Received args: %s', args)
code = f.__code__
funcArgs = inspect.getargs(code)[0][len(self.commandArgs):]
self.log.error('Extra args: %s', funcArgs)
self.log.debug('Make sure you did not wrap a wrapped '
'function ;)')
raise
newf2 = utils.python.changeFunctionName(newf, name, f.__doc__)
newf2.__module__ = f.__module__
return internationalizeDocstring(newf2)
def wrap(f, *args, **kwargs):
if callable(f):
# Old-style call OR decorator syntax with no converter.
# f is the command.
return _wrap(f, *args, **kwargs)
else:
# Call with the Python decorator syntax
assert isinstance(f, list) or isinstance(f, tuple)
specList = f
def decorator(f):
return _wrap(f, specList, *args, **kwargs)
return decorator
wrap.__doc__ = """Useful wrapper for plugin commands.
Valid converters are: %s.
:param f: A command, taking (self, irc, msg, args, ...) as arguments
:param specList: A list of converters and contexts""" % \
', '.join(sorted(wrappers.keys()))
__all__ = [
# Contexts.
'any', 'many',
'optional', 'additional',
'rest', 'getopts',
'first', 'reverse',
'commalist',
# Converter helpers.
'getConverter', 'addConverter', 'callConverter',
# Decorators.
'urlSnarfer', 'thread',
# Functions.
'wrap', 'process', 'regexp_wrapper',
# Stuff for testing.
'Spec',
]
# This doesn't work. Suck.
## if world.testing:
## __all__.append('Spec')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 34.185022
| 84
| 0.601701
|
3cfa4c056d405d5018dca69a752b649cfd779995
| 668
|
py
|
Python
|
test/test_nn/test_nonlinear/test_log_softmax.py
|
alexandru-dinu/PRML
|
acd823e098df67abe0306a70225e7539f8edda40
|
[
"MIT"
] | null | null | null |
test/test_nn/test_nonlinear/test_log_softmax.py
|
alexandru-dinu/PRML
|
acd823e098df67abe0306a70225e7539f8edda40
|
[
"MIT"
] | null | null | null |
test/test_nn/test_nonlinear/test_log_softmax.py
|
alexandru-dinu/PRML
|
acd823e098df67abe0306a70225e7539f8edda40
|
[
"MIT"
] | 1
|
2019-06-22T20:56:02.000Z
|
2019-06-22T20:56:02.000Z
|
import unittest
import numpy as np
import prml.nn as nn
class TestLogSoftmax(unittest.TestCase):
def test_forward(self):
npx = np.random.randn(5, 3)
npy = np.log(np.exp(npx) / np.exp(npx).sum(axis=-1, keepdims=True))
self.assertTrue(np.allclose(npy, nn.log_softmax(npx).value))
def test_backward(self):
npx = np.random.randn(1, 5)
x = nn.asarray(npx)
nn.softmax(x).backward()
grad1 = np.copy(x.grad)
x.cleargrad()
nn.exp(nn.log_softmax(x)).backward()
grad2 = np.copy(x.grad)
self.assertTrue(np.allclose(grad1, grad2))
if __name__ == "__main__":
unittest.main()
| 24.740741
| 75
| 0.615269
|
363235539fcc6adef52af3a0174ced5b274d27f5
| 2,170
|
py
|
Python
|
setup.py
|
desbma/pyfastcopy
|
a572042afd8f7f3b761db45aa9ab56e236983211
|
[
"PSF-2.0"
] | 13
|
2016-02-09T09:26:59.000Z
|
2021-12-28T19:51:08.000Z
|
setup.py
|
desbma/pyfastcopy
|
a572042afd8f7f3b761db45aa9ab56e236983211
|
[
"PSF-2.0"
] | 1
|
2016-07-31T14:50:54.000Z
|
2016-07-31T17:21:49.000Z
|
setup.py
|
desbma/pyfastcopy
|
a572042afd8f7f3b761db45aa9ab56e236983211
|
[
"PSF-2.0"
] | 1
|
2016-07-31T14:50:12.000Z
|
2016-07-31T14:50:12.000Z
|
#!/usr/bin/env python3
import os
import re
import sys
import time
from setuptools import find_packages, setup
with open(os.path.join("pyfastcopy", "__init__.py"), "rt") as f:
version = re.search("__version__ = \"([^\"]+)\"", f.read()).group(1)
requirements = []
if not hasattr(os, "sendfile") and not sys.platform.startswith("win32"):
requirements.append("pysendfile")
try:
import unittest.mock
except ImportError:
requirements.append("mock")
if not hasattr(time, "monotonic"):
requirements.append("monotonic")
try:
import pypandoc
readme = pypandoc.convert("README.md", "rst")
except ImportError:
with open("README.md", "rt") as f:
readme = f.read()
setup(name="pyfastcopy",
version=version,
author="desbma",
packages=find_packages(exclude=("tests",)),
test_suite="tests",
install_requires=requirements,
description="Speed up shutil.copyfile by using sendfile system call",
long_description=readme,
url="https://github.com/desbma/pyfastcopy",
download_url="https://github.com/desbma/pyfastcopy/archive/%s.tar.gz" % (version),
keywords=["shutil", "copy", "copyfile", "file", "performance", "speed", "sendfile"],
classifiers=["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules"])
| 37.413793
| 90
| 0.593548
|
4f25fd12e8dc3c286cce0080dc796283bf024b14
| 496
|
py
|
Python
|
hostelmanager/wsgi.py
|
rc4594/Dbms
|
57a160fd4339a884b1ce4ef75fe8489f6ff30fa2
|
[
"MIT"
] | null | null | null |
hostelmanager/wsgi.py
|
rc4594/Dbms
|
57a160fd4339a884b1ce4ef75fe8489f6ff30fa2
|
[
"MIT"
] | null | null | null |
hostelmanager/wsgi.py
|
rc4594/Dbms
|
57a160fd4339a884b1ce4ef75fe8489f6ff30fa2
|
[
"MIT"
] | null | null | null |
"""
WSGI config for hostelmanager project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hostelmanager.settings")
application = get_wsgi_application()
from whitenoise.django import DjangoWhiteNoise
application = DjangoWhiteNoise(application)
| 24.8
| 78
| 0.806452
|
7b75fb41fc86b988ccc3a36793feeb098037aa0c
| 60
|
py
|
Python
|
oebb/__init__.py
|
HerrHofrat/oebb
|
6cc0bb4d369d2715ecf4d9dc28fc00f2d493a8e4
|
[
"MIT"
] | null | null | null |
oebb/__init__.py
|
HerrHofrat/oebb
|
6cc0bb4d369d2715ecf4d9dc28fc00f2d493a8e4
|
[
"MIT"
] | null | null | null |
oebb/__init__.py
|
HerrHofrat/oebb
|
6cc0bb4d369d2715ecf4d9dc28fc00f2d493a8e4
|
[
"MIT"
] | null | null | null |
"""Module to parse gearbest site"""
from .oebb import OeBB
| 15
| 35
| 0.716667
|
217318a2cf00bb8bbf672a4d1c022b8a468be232
| 6,469
|
py
|
Python
|
generators/cppcookie.py
|
jeremyong/xpp
|
f70dd8bb50944ab64ab902d9d8ab555599249dc1
|
[
"MIT"
] | 8
|
2015-03-03T08:49:33.000Z
|
2018-05-05T21:01:12.000Z
|
generators/cppcookie.py
|
jeremyong/xpp
|
f70dd8bb50944ab64ab902d9d8ab555599249dc1
|
[
"MIT"
] | 11
|
2016-08-31T14:54:11.000Z
|
2018-10-14T21:19:41.000Z
|
generators/cppcookie.py
|
jeremyong/xpp
|
f70dd8bb50944ab64ab902d9d8ab555599249dc1
|
[
"MIT"
] | 10
|
2016-08-31T08:30:17.000Z
|
2019-03-24T22:47:38.000Z
|
from utils import _n, _ext, _n_item, get_namespace
_templates = {}
_templates['void_cookie_function'] = \
'''\
%s\
void
%s_checked(Connection && c%s)
{%s\
xpp::generic::check<Connection, xpp::%s::error::dispatcher>(
std::forward<Connection>(c),
%s_checked(std::forward<Connection>(c)%s));
}
%s\
void
%s(Connection && c%s)
{%s\
%s(std::forward<Connection>(c)%s);
}
'''
def _void_cookie_function(ns, name, c_name, template, return_value, protos, calls, initializer):
if len(template) == 0: template = "template<typename Connection>\n"
return _templates['void_cookie_function'] % \
( template
, name
, protos
, initializer
, ns
, c_name
, calls
, template
, name
, protos
, initializer
, c_name
, calls
)
_templates['cookie_static_getter'] = \
'''\
%s\
static
%s
cookie(xcb_connection_t * const c%s)
{%s\
return base::cookie(c%s);
}
'''
def _cookie_static_getter(template, return_value, protos, calls, initializer):
return _templates['cookie_static_getter'] % \
( template
, return_value
, protos
, initializer
, calls
)
class CppCookie(object):
def __init__(self, namespace, is_void, name, reply, parameter_list):
self.namespace = namespace
self.is_void = is_void
self.name = name
self.reply = reply
self.parameter_list = parameter_list
self.request_name = _ext(_n_item(self.name[-1]))
self.c_name = "xcb" \
+ (("_" + get_namespace(namespace)) if namespace.is_ext else "") \
+ "_" + self.request_name
def comma(self):
return self.parameter_list.comma()
def calls(self, sort):
return self.parameter_list.calls(sort)
def protos(self, sort, defaults):
return self.parameter_list.protos(sort, defaults)
def iterator_template(self, indent=" ", tail="\n"):
prefix = "template<typename " + ("Connection, typename " if self.is_void else "")
return indent + prefix \
+ ", typename ".join(self.parameter_list.iterator_templates \
+ self.parameter_list.templates) \
+ ">" + tail \
if len(self.parameter_list.iterator_templates) > 0 \
else ""
def iterator_calls(self, sort):
return self.parameter_list.iterator_calls(sort)
def iterator_protos(self, sort, defaults):
return self.parameter_list.iterator_protos(sort, defaults)
def iterator_initializers(self):
return self.parameter_list.iterator_initializers()
def void_functions(self, protos, calls, template="", initializer=[]):
inits = "" if len(initializer) > 0 else "\n"
for i in initializer:
inits += "\n"
for line in i.split('\n'):
inits += " " + line + "\n"
return_value = "xcb_void_cookie_t"
return _void_cookie_function(get_namespace(self.namespace),
self.request_name,
self.c_name,
template,
return_value,
self.comma() + protos,
self.comma() + calls,
inits)
def static_reply_methods(self, protos, calls, template="", initializer=[]):
inits = "" if len(initializer) > 0 else "\n"
for i in initializer:
inits += "\n"
for line in i.split('\n'):
inits += " " + line + "\n"
if self.is_void: return_value = "xcb_void_cookie_t"
else: return_value = self.c_name + "_cookie_t"
return _cookie_static_getter(template,
return_value,
self.comma() + protos,
self.comma() + calls,
inits)
def make_static_getter(self):
default = self.static_reply_methods(self.protos(False, False), self.calls(False))
if self.parameter_list.has_defaults:
default = self.static_reply_methods(self.protos(True, True), self.calls(False))
wrapped = ""
if self.parameter_list.want_wrap:
wrapped = \
self.static_reply_methods(self.iterator_protos(True, True),
self.iterator_calls(False), self.iterator_template(),
self.iterator_initializers())
default_args = ""
if self.parameter_list.is_reordered():
default_args = \
self.static_reply_methods(self.protos(True, True), self.calls(False))
result = ""
if (self.parameter_list.has_defaults
or self.parameter_list.is_reordered()
or self.parameter_list.want_wrap):
result += default
if self.parameter_list.is_reordered():
result += "\n" + default_args
if self.parameter_list.want_wrap:
result += "\n" + wrapped
return result
def make_void_functions(self):
default = self.void_functions(self.protos(False, False), self.calls(False))
if self.parameter_list.has_defaults:
default = self.void_functions(self.protos(True, True), self.calls(False))
wrapped = ""
if self.parameter_list.want_wrap:
wrapped = \
self.void_functions(self.iterator_protos(True, True),
self.iterator_calls(False),
self.iterator_template(indent=""),
self.iterator_initializers())
default_args = ""
if self.parameter_list.is_reordered():
default_args = \
self.void_functions(self.protos(True, True), self.calls(False))
result = ""
if (self.parameter_list.has_defaults
or self.parameter_list.is_reordered()
or self.parameter_list.want_wrap):
result += default
if self.parameter_list.is_reordered():
result += "\n" + default_args
if self.parameter_list.want_wrap:
result += "\n" + wrapped
return result
| 31.866995
| 96
| 0.547071
|
ba1f31c6751fe0d8407d1ed4035ed8cebe0b81e4
| 990
|
py
|
Python
|
simplemooc/simplemooc/accounts/urls.py
|
eduardormonteiro/DjangoSimpleMOOC
|
c59d4ca2432d36c5ff9cc76028e84237ac840a28
|
[
"MIT"
] | null | null | null |
simplemooc/simplemooc/accounts/urls.py
|
eduardormonteiro/DjangoSimpleMOOC
|
c59d4ca2432d36c5ff9cc76028e84237ac840a28
|
[
"MIT"
] | null | null | null |
simplemooc/simplemooc/accounts/urls.py
|
eduardormonteiro/DjangoSimpleMOOC
|
c59d4ca2432d36c5ff9cc76028e84237ac840a28
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from datetime import datetime
# from django.contrib.auth import views #login, logout
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth.views import LoginView, LogoutView, PasswordChangeView
from simplemooc.accounts import forms, views
from simplemooc.accounts.views import register, dashboard, edit, edit_password
urlpatterns = [
path('/login/',
LoginView.as_view
(
template_name='accounts/login.html',
authentication_form=forms.BootstrapAuthenticationForm,
extra_context=
{
'title': 'Log in',
'year' : datetime.now().year,
}
),
name='login'),
path('/register/', register, name='register'),
path('/edit/', edit, name='edit'),
path('/edit_password/', edit_password, name='edit_password'),
path('/logout/', LogoutView.as_view(), name='logout')
]
| 35.357143
| 79
| 0.648485
|
f74eca7e77859726f4fb38addd1950435509cd67
| 1,428
|
py
|
Python
|
froide/accesstoken/migrations/0001_initial.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
froide/accesstoken/migrations/0001_initial.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
froide/accesstoken/migrations/0001_initial.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-09 15:29
from __future__ import unicode_literals
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="AccessToken",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"token",
models.UUIDField(db_index=True, default=uuid.uuid4, editable=False),
),
("purpose", models.CharField(max_length=30)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "access token",
"verbose_name_plural": "access tokens",
},
),
]
| 27.461538
| 88
| 0.460784
|
11ce73c248a3559230b2ef9340ca09e093a1100e
| 868
|
py
|
Python
|
ceph_medic/__init__.py
|
alfredodeza/ceph-doctor
|
cf48c26636091eb11c4c00e09089680ce72b26bc
|
[
"MIT"
] | 21
|
2017-06-27T15:20:33.000Z
|
2021-09-30T21:50:31.000Z
|
ceph_medic/__init__.py
|
alfredodeza/ceph-doctor
|
cf48c26636091eb11c4c00e09089680ce72b26bc
|
[
"MIT"
] | 86
|
2017-06-21T20:10:13.000Z
|
2021-10-30T00:04:05.000Z
|
ceph_medic/__init__.py
|
alfredodeza/ceph-doctor
|
cf48c26636091eb11c4c00e09089680ce72b26bc
|
[
"MIT"
] | 22
|
2017-08-07T14:34:39.000Z
|
2021-04-07T05:15:26.000Z
|
from collections import namedtuple
class UnloadedConfig(object):
"""
This class is used as the default value for config.ceph so that if
a configuration file is not successfully loaded then it will give
a nice error message when values from the config are used.
"""
def __init__(self, error=None):
self.error = error
def __getattr__(self, *a):
raise RuntimeError(self.error)
config = namedtuple('config', ['verbosity', 'nodes', 'hosts_file', 'file', 'cluster_name'])
config.file = UnloadedConfig("No valid ceph-medic configuration file was loaded")
config.nodes = {}
metadata = {'failed_nodes': {}, 'rgws': {}, 'mgrs': {}, 'mdss': {}, 'clients': {}, 'osds': {}, 'mons': {}, 'nodes': {}, 'cluster': {}}
daemon_types = [i for i in metadata.keys() if i not in ('nodes', 'failed_nodes', 'cluster')]
__version__ = '1.0.8'
| 33.384615
| 134
| 0.65553
|
04645a915493ec73aaa1b3237c0dda29c27e4b69
| 3,352
|
py
|
Python
|
applications/popart/resnext_inference/get_model.py
|
xihuaiwen/chinese_bert
|
631afbc76c40b0ac033be2186e717885246f446c
|
[
"MIT"
] | null | null | null |
applications/popart/resnext_inference/get_model.py
|
xihuaiwen/chinese_bert
|
631afbc76c40b0ac033be2186e717885246f446c
|
[
"MIT"
] | null | null | null |
applications/popart/resnext_inference/get_model.py
|
xihuaiwen/chinese_bert
|
631afbc76c40b0ac033be2186e717885246f446c
|
[
"MIT"
] | null | null | null |
# Copyright 2019 Graphcore Ltd.
import torch
import torch.onnx
import urllib.request
import pretrainedmodels
import os
import onnx
import argparse
"""
Downloads the model in Pytorch format and converts to ONNX.
Creates copies with different (micro) batch size dimensions.
"""
def get_model(opts):
path = "{}/{}/".format(opts.model_path, opts.model_name)
log_path = "{}/".format(opts.log_path)
filename = "model.onnx"
if not os.path.exists(path):
print("Creating models directory")
os.makedirs(path)
if not os.path.exists(log_path):
print("Creating logs directory")
os.makedirs(log_path)
dataset = "imagenet"
if opts.url:
# Monkey patch an alternate URL into pretrained models package
pretrainedmodels.models.resnext.pretrained_settings[
opts.model_name
][dataset]["url"] = opts.url
print(f"Download URL set to {opts.url}")
pretrained_model_base_path = opts.pretrained_model_path
if not pretrained_model_base_path:
# Get the model. If it doesn't exist it will be downloaded
if not os.path.isfile(path + filename):
print(f"Downloading model to {path + filename}")
pretrained_model_path = path + filename
# Create the right input shape
dummy_input = torch.randn(1, 3, 224, 224)
model = pretrainedmodels.__dict__[opts.model_name](
num_classes=1000, pretrained=dataset)
torch.onnx.export(model, dummy_input, pretrained_model_path)
else:
pretrained_model_path = os.path.join(
pretrained_model_base_path,
opts.model_name,
filename
)
onnx_model = onnx.load(pretrained_model_path)
onnx_model.graph.input[0].type.tensor_type.shape.dim[0].dim_value = opts.micro_batch_size
print(
f"Converting model to batch size {opts.micro_batch_size} and saving to {path + 'model_' + str(opts.micro_batch_size) + '.onnx'}")
onnx.save(onnx_model, path + f"model_{opts.micro_batch_size}.onnx")
parser = argparse.ArgumentParser()
parser.add_argument("--micro-batch-size", type=int, default=1, help="""Batch size per device.
Larger batches can be run with this model by launching the app with resnext_inference_launch.py and passing in a value > 1 for num_ipus """)
parser.add_argument("--model-name", type=str, default='resnext101_32x4d',
help="pretrained model name, according to `pretrainedmodels` Python package")
parser.add_argument(
"--model-path", type=str, default="models",
help=(
"If set, the model will be saved to this"
" specfic path, instead of models/"
)
)
parser.add_argument(
"--log-path", type=str, default="logs",
help=(
"If set, the logs will be saved to this"
" specfic path, instead of logs/"
)
)
parser.add_argument(
"--url", type=str, default=None,
help="If set, uses an alternate url to get the pretrained model from."
)
parser.add_argument(
"--pretrained-model-path", type=str, default=None,
help=(
"If set, the pretrained model will attempt to be loaded from here,"
" instead of downloaded."
)
)
# set up directory
model_name = 'resnext101_32x4d'
filename = "model.onnx"
if __name__ == "__main__":
opts = parser.parse_args()
get_model(opts)
| 31.92381
| 144
| 0.672733
|
0e75a8b6a65d8b9bf96d85751a9638c781e1b91e
| 7,093
|
py
|
Python
|
kubernetes/client/models/v1beta2_deployment.py
|
iamneha/python
|
5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0
|
[
"Apache-2.0"
] | 1
|
2019-02-17T15:28:39.000Z
|
2019-02-17T15:28:39.000Z
|
kubernetes/client/models/v1beta2_deployment.py
|
iamneha/python
|
5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1beta2_deployment.py
|
iamneha/python
|
5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2Deployment(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta2DeploymentSpec',
'status': 'V1beta2DeploymentStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1beta2Deployment - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V1beta2Deployment.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta2Deployment.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta2Deployment.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta2Deployment.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta2Deployment.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta2Deployment.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta2Deployment.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta2Deployment.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta2Deployment.
Standard object metadata.
:return: The metadata of this V1beta2Deployment.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta2Deployment.
Standard object metadata.
:param metadata: The metadata of this V1beta2Deployment.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta2Deployment.
Specification of the desired behavior of the Deployment.
:return: The spec of this V1beta2Deployment.
:rtype: V1beta2DeploymentSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta2Deployment.
Specification of the desired behavior of the Deployment.
:param spec: The spec of this V1beta2Deployment.
:type: V1beta2DeploymentSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta2Deployment.
Most recently observed status of the Deployment.
:return: The status of this V1beta2Deployment.
:rtype: V1beta2DeploymentStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta2Deployment.
Most recently observed status of the Deployment.
:param status: The status of this V1beta2Deployment.
:type: V1beta2DeploymentStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2Deployment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.677824
| 281
| 0.602848
|
bf94810dca21d4a24581c9e0148e430822dbcbd4
| 6,724
|
py
|
Python
|
decision_trees/own_parfit/plot.py
|
sudorudu/decision_tree
|
b50ffcb87a60471b0e31b6c80b0964eeb53ad365
|
[
"MIT"
] | 6
|
2018-02-26T11:47:51.000Z
|
2021-11-30T13:36:08.000Z
|
decision_trees/own_parfit/plot.py
|
sudorudu/decision_tree
|
b50ffcb87a60471b0e31b6c80b0964eeb53ad365
|
[
"MIT"
] | 1
|
2018-08-09T12:59:42.000Z
|
2018-08-09T12:59:42.000Z
|
decision_trees/own_parfit/plot.py
|
sudorudu/decision_tree
|
b50ffcb87a60471b0e31b6c80b0964eeb53ad365
|
[
"MIT"
] | 5
|
2017-10-31T02:32:23.000Z
|
2018-12-12T09:09:08.000Z
|
import matplotlib.pyplot as plt
import matplotlib.colorbar as cb
import numpy as np
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
__all__ = ["plot1DGrid", "plot2DGrid", "plot3DGrid", "plotScores"]
def plot1DGrid(scores, paramsToPlot, scoreLabel, vrange):
"""
Makes a line plot of scores, over the parameter to plot
:param scores: A list of scores, estimated using scoreModels
:param paramsToPlot: The parameter to plot, chosen automatically by plotScores
:param scoreLabel: The specified score label (dependent on scoring metric used)
:param vrange: The yrange of the plot
"""
key = list(paramsToPlot.keys())
plt.figure(figsize=(int(round(len(paramsToPlot[key[0]]) / 1.33)), 6))
plt.plot(np.linspace(0, max(paramsToPlot[key[0]]), len(
paramsToPlot[key[0]])), scores, '-or')
plt.xlabel(key[0])
plt.xticks(np.linspace(0, max(paramsToPlot[key[0]]), len(
paramsToPlot[key[0]])), paramsToPlot[key[0]])
if scoreLabel is not None:
plt.ylabel(scoreLabel)
else:
plt.ylabel('Score')
if vrange is not None:
plt.ylim(vrange[0], vrange[1])
plt.box(on=False)
plt.show()
def plot2DGrid(scores, paramsToPlot, keysToPlot, scoreLabel, vrange):
"""
Plots a heatmap of scores, over the paramsToPlot
:param scores: A list of scores, estimated using parallelizeScore
:param paramsToPlot: The parameters to plot, chosen automatically by plotScores
:param scoreLabel: The specified score label (dependent on scoring metric used)
:param vrange: The visible range of the heatmap (range you wish the heatmap to be specified over)
"""
scoreGrid = np.reshape(
scores, (len(paramsToPlot[keysToPlot[0]]), len(paramsToPlot[keysToPlot[1]])))
plt.figure(figsize=(int(round(len(paramsToPlot[keysToPlot[1]]) / 1.33)), int(
round(len(paramsToPlot[keysToPlot[0]]) / 1.33))))
if vrange is not None:
plt.imshow(scoreGrid, cmap='jet', vmin=vrange[0], vmax=vrange[1])
else:
plt.imshow(scoreGrid, cmap='jet')
plt.xlabel(keysToPlot[1])
plt.xticks(
np.arange(len(paramsToPlot[keysToPlot[1]])), paramsToPlot[keysToPlot[1]])
plt.ylabel(keysToPlot[0])
plt.yticks(
np.arange(len(paramsToPlot[keysToPlot[0]])), paramsToPlot[keysToPlot[0]])
if scoreLabel is not None:
plt.title(scoreLabel)
else:
plt.title('Score')
plt.colorbar()
plt.box(on=False)
plt.show()
def plot3DGrid(scores, paramsToPlot, keysToPlot, scoreLabel, vrange):
"""
Plots a grid of heatmaps of scores, over the paramsToPlot
:param scores: A list of scores, estimated using parallelizeScore
:param paramsToPlot: The parameters to plot, chosen automatically by plotScores
:param scoreLabel: The specified score label (dependent on scoring metric used)
:param vrange: The visible range of the heatmap (range you wish the heatmap to be specified over)
"""
vmin = np.min(scores)
vmax = np.max(scores)
scoreGrid = np.reshape(scores, (len(paramsToPlot[keysToPlot[0]]), len(
paramsToPlot[keysToPlot[1]]), len(paramsToPlot[keysToPlot[2]])))
smallest_dim = np.argmin(scoreGrid.shape)
if smallest_dim != 2:
scoreGrid = np.swapaxes(scoreGrid, smallest_dim, 2)
keysToPlot[smallest_dim], keysToPlot[2] = keysToPlot[2], keysToPlot[smallest_dim]
nelements = scoreGrid.shape[2]
nrows = np.floor(nelements ** 0.5).astype(int)
ncols = np.ceil(1. * nelements / nrows).astype(int)
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex='all', sharey='all', figsize=(int(round(len(
paramsToPlot[keysToPlot[1]]) * ncols * 1.33)), int(round(len(paramsToPlot[keysToPlot[0]]) * nrows * 1.33))))
i = 0
for ax in axes.flat:
if vrange is not None:
im = ax.imshow(scoreGrid[:, :, i], cmap='jet',
vmin=vrange[0], vmax=vrange[1])
else:
im = ax.imshow(scoreGrid[:, :, i],
cmap='jet', vmin=vmin, vmax=vmax)
ax.set_xlabel(keysToPlot[1])
ax.set_xticks(np.arange(len(paramsToPlot[keysToPlot[1]])))
ax.set_xticklabels(paramsToPlot[keysToPlot[1]])
ax.set_ylabel(keysToPlot[0])
ax.set_yticks(np.arange(len(paramsToPlot[keysToPlot[0]])))
ax.set_yticklabels(paramsToPlot[keysToPlot[0]])
ax.set_title(keysToPlot[2] + ' = ' +
str(paramsToPlot[keysToPlot[2]][i]))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
i += 1
if i == nelements:
break
if scoreLabel is not None:
fig.suptitle(scoreLabel, fontsize=18)
else:
fig.suptitle('Score', fontsize=18)
fig.subplots_adjust(right=0.8)
cbar = cb.make_axes(ax, location='right', fraction=0.03)
fig.colorbar(im, cax=cbar[0])
plt.show()
def plotScores(scores, paramGrid, scoreLabel=None, vrange=None):
"""
Makes a plot representing how the scores vary over the parameter grid
Automatically decides whether to use a simple line plot (varying over one parameter)
or a heatmap (varying over two parameters)
:param scores: A list of scores, estimated using scoreModels
:param paramGrid: The parameter grid specified when fitting the models using fitModels
:param scoreLabel: The specified label (dependent on scoring metric used), e.g. 'AUC'
:param vrange: The visible range over which to display the scores
:return:
"""
keys = sorted(list(paramGrid)[0].keys())
uniqParams = dict()
order = dict()
for k in keys:
order[k] = np.unique([params[k] if params[k] is not None else 'None'
for params in list(paramGrid)], return_index=True)[1]
uniqParams[k] = [params[k]
for params in np.asarray(list(paramGrid))[sorted(order[k])]]
keysToPlot = list()
for k in keys:
if len(uniqParams[k]) > 1:
keysToPlot.append(k)
for k in keys:
if k not in keysToPlot:
uniqParams.pop(k, None)
numDim = len(keysToPlot)
if numDim > 3:
print('Too many dimensions to plot.')
elif numDim == 3:
plot3DGrid(scores, uniqParams, keysToPlot, scoreLabel, vrange)
elif numDim == 2:
plot2DGrid(scores, uniqParams, keysToPlot, scoreLabel, vrange)
elif numDim == 1:
plot1DGrid(scores, uniqParams, scoreLabel, vrange)
else:
print('No parameters that vary in the grid')
| 41.251534
| 116
| 0.653629
|
d715b57722882c6d8c2dfffed427a69d92ab287d
| 327
|
py
|
Python
|
atividades/ex103.py
|
Fleen66/Python_exercises
|
fd05fdf1181da833a1a1bc9f4a476afc8f467977
|
[
"MIT"
] | null | null | null |
atividades/ex103.py
|
Fleen66/Python_exercises
|
fd05fdf1181da833a1a1bc9f4a476afc8f467977
|
[
"MIT"
] | null | null | null |
atividades/ex103.py
|
Fleen66/Python_exercises
|
fd05fdf1181da833a1a1bc9f4a476afc8f467977
|
[
"MIT"
] | null | null | null |
def ficha(n = '<desconhecido>', g = 0):
print(f'O jogador {n} fez {g} gols')
nome = str(input('Digite o nome do jogador: '))
gols = str(input('Digite o numero de gols: '))
if gols.isnumeric():
gols = int(gols)
else:
gols = 0
if nome.strip() == '':
ficha('<desconhecido>', gols)
else:
ficha(nome, gols)
| 18.166667
| 47
| 0.590214
|
cb9cd0d52d10d5f9111d6013cfc32649afb0128b
| 858
|
py
|
Python
|
blog_api/tests.py
|
maiconwa/crispy-eureka
|
8b3c575516582b71b71686c4aec04ce4e18745a1
|
[
"MIT"
] | null | null | null |
blog_api/tests.py
|
maiconwa/crispy-eureka
|
8b3c575516582b71b71686c4aec04ce4e18745a1
|
[
"MIT"
] | null | null | null |
blog_api/tests.py
|
maiconwa/crispy-eureka
|
8b3c575516582b71b71686c4aec04ce4e18745a1
|
[
"MIT"
] | null | null | null |
from urllib import response
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from blog.models import Post, Category
class PostTests(APITestCase):
def test_view_posts(self):
url = reverse('blog_api:listcreate')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def create_post(self):
self.test_category = Category.objects.create_user(
username='test_user1', password='123456789')
data = {"title": "new", "author": 1, "excerpt": "new", "content": "new"}
url = reverse('blog_api:listcreate')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
| 35.75
| 80
| 0.664336
|
332aa372eeaa5a02980a86d3fdb56c7b529476de
| 6,340
|
py
|
Python
|
scripts/label_image.py
|
MaciejDurski/FaceRecognitionServ
|
4c297195ba8f1a353cfdcadef4284093a9c1577c
|
[
"Apache-2.0"
] | null | null | null |
scripts/label_image.py
|
MaciejDurski/FaceRecognitionServ
|
4c297195ba8f1a353cfdcadef4284093a9c1577c
|
[
"Apache-2.0"
] | null | null | null |
scripts/label_image.py
|
MaciejDurski/FaceRecognitionServ
|
4c297195ba8f1a353cfdcadef4284093a9c1577c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import numpy as np
import tensorflow as tf
import shutil
import os
import subprocess
import sys
import shlex
from subprocess import Popen, PIPE
from sys import stdout, stdin, stderr
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
input_mean=0, input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels = 3,
name='png_reader')
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
name='gif_reader'))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
else:
image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
name='jpeg_reader')
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0);
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
if __name__ == "__main__":
file_name = "tf_files/flower_photos/daisy/3475870145_685a19116d.jpg"
model_file = "tf_files/retrained_graph.pb"
label_file = "tf_files/retrained_labels.txt"
input_height = 224
input_width = 224
input_mean = 128
input_std = 128
input_layer = "input"
output_layer = "final_result"
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="image to be processed")
parser.add_argument("--graph", help="graph/model to be executed")
parser.add_argument("--labels", help="name of file containing labels")
parser.add_argument("--input_height", type=int, help="input height")
parser.add_argument("--input_width", type=int, help="input width")
parser.add_argument("--input_mean", type=int, help="input mean")
parser.add_argument("--input_std", type=int, help="input std")
parser.add_argument("--input_layer", help="name of input layer")
parser.add_argument("--output_layer", help="name of output layer")
args = parser.parse_args()
if args.graph:
model_file = args.graph
if args.image:
file_name = args.image
if args.labels:
label_file = args.labels
if args.input_height:
input_height = args.input_height
if args.input_width:
input_width = args.input_width
if args.input_mean:
input_mean = args.input_mean
if args.input_std:
input_std = args.input_std
if args.input_layer:
input_layer = args.input_layer
if args.output_layer:
output_layer = args.output_layer
graph = load_graph(model_file)
t = read_tensor_from_image_file(file_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name);
output_operation = graph.get_operation_by_name(output_name);
with tf.Session(graph=graph) as sess:
start = time.time()
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: t})
end=time.time()
results = np.squeeze(results)
top_k = results.argsort()[-1:][::-1]
labels = load_labels(label_file)
best_score=1
source='C:\\Users\\vrh\\Desktop\\staz\\tensorflow-for-poets-2\\tf_files\\photos\\'
dest='C:\\Users\\vrh\\Desktop\\staz\\tensorflow-for-poets-2\\tf_files\\face_photos\\'
for i in top_k:
best_score=results[i]
if best_score<0.95:
print("")
print('Not recognized')
#dest+=imie
#files = os.listdir(source)
#for f in files:
# shutil.move(source+f, dest)
#os.environ['IMAGE_SIZE']='224'
#os.environ['ARCHITECTURE']="mobilenet_0.50_"+os.environ.get('IMAGE_SIZE')
#os.system('python -m scripts.retrain \
# --bottleneck_dir=tf_files/bottlenecks \
#--how_many_training_steps=500 \
#--model_dir=tf_files/models/ \
#--summaries_dir=tf_files/training_summaries/'+os.environ.get('ARCHITECTURE')+' \
#--output_graph=tf_files/retrained_graph.pb \
#--output_labels=tf_files/retrained_labels.txt \
#--architecture="'+os.environ.get('ARCHITECTURE')+'" \
#--image_dir=tf_files/face_photos')
else:
# print('\nEvaluation time (1-image): {:.3f}s\n'.format(end-start))
template = "{};{:0.5f};"
for i in top_k:
print("")
if labels[i]=='maciej':
print(template.format(labels[i], results[i])+"0;0;10")
elif labels[i]=='filip':
print(template.format(labels[i], results[i])+"0;10;0")
else:
print('Not recognized')
| 32.849741
| 90
| 0.664353
|
0d06c53b6c47d2001290579cd959cd8567bbdb67
| 3,919
|
py
|
Python
|
test/test_services.py
|
Birjot-Bala/OSRS-Discord-Bot
|
311d567e18ec586afd9101a263c185a78a7951f5
|
[
"MIT"
] | 3
|
2021-04-16T05:30:08.000Z
|
2022-02-17T14:39:25.000Z
|
test/test_services.py
|
Birjot-Bala/OSRS-Discord-Bot
|
311d567e18ec586afd9101a263c185a78a7951f5
|
[
"MIT"
] | 2
|
2022-02-02T15:47:38.000Z
|
2022-02-02T15:56:27.000Z
|
test/test_services.py
|
Birjot-Bala/OSRS-Discord-Bot
|
311d567e18ec586afd9101a263c185a78a7951f5
|
[
"MIT"
] | 1
|
2021-05-14T21:48:14.000Z
|
2021-05-14T21:48:14.000Z
|
# test_services.py
# contains tests for services.py
import pytest
import requests
import json
import osrs_discord_bot.services as se
from requests.exceptions import Timeout
class MockAPI:
def __init__(self, requests_mock):
self.mock_base_url = requests_mock.get('https://test.com')
self.mock_404_url = requests_mock.get('https://test.com/404',
text='Not Found', status_code=404)
self.mock_json_url = requests_mock.get('https://test.com/json',
json= {'abc': 'def'})
self.mock_text_url = requests_mock.get('https://test.com/text',
text='resp')
self.mock_timeout_url = requests_mock.get('https://test.com/timeout',
exc=Timeout)
self.mock_exchange_url = requests_mock.get(
'https://rsbuddy.com/exchange/summary.json',
json={
"4151":{"id":4151,"name":"Abyssal whip","members":True,
"sp":120001,"buy_average":2864609,"buy_quantity":12,
"sell_average":2859858,"sell_quantity":10,
"overall_average":2862450,"overall_quantity":22}
}
)
self.mock_wiki_price_url = requests_mock.get(
'https://oldschool.runescape.wiki/latest',
json={
"data":{"4151":{"high":2494002,"highTime":1615934068,
"low":2490000,"lowTime":1615934079}
}}
)
with open('test/wise_response.json') as json_file:
wise_response = json.load(json_file)
self.mock_tracker_url = requests_mock.get(
'https://wiseoldman.net/api/players/username/test/'
'gained?period=test',
json=wise_response
)
with open('test/hiscore_response.txt') as text_file:
hiscore_response = text_file.read()
self.mock_hiscore_url = requests_mock.get(
'https://secure.runescape.com/m=hiscore_oldschool/'
'index_lite.ws?player=test_user',
text=hiscore_response
)
@pytest.fixture
def mockAPI(requests_mock):
return MockAPI(requests_mock)
@pytest.mark.parametrize(
"item,result",[
("1321931",({}, False)),
("Blood Rune", ({'565':'Blood rune'}, True))
]
)
def test_search_items(item, result):
num = 1
assert se.search_items(item, num) == result
def test_search_price_wiki(mockAPI):
test_response = {'4151':{'name':'Abyssal whip', 'buy_price':2494002,
'sell_price':2490000, 'margin':4002}
}
test_itemDict = se.search_items('Abyssal whip',1)[0]
assert se.search_price_wiki(test_itemDict) == test_response
def test_search_price(mockAPI):
test_Response = {'4151':{'name':'Abyssal whip', 'buy_price':2864609,
'sell_price':2859858, 'margin':4751}
}
test_itemDict = se.search_items('Abyssal whip',1)[0]
assert se.search_price(test_itemDict) == test_Response
@pytest.mark.parametrize(
"chance,actions,message", [
('1/100', 100, r'63.40% chance of getting the drop within 100 actions.'),
('1/100', None, 'Please enter the number of actions.'),
('abc', None, 'Please enter the drop rate as a fraction '
'or decimal and the number of actions as an integer.')
]
)
def test_chance_message(chance, actions, message):
assert se.chance_message(chance, actions) == message
def test_price_message(mockAPI):
price_message = se.price_message('Abyssal whip')
assert isinstance(price_message, str) == True
def test_tracker_message(mockAPI):
tracker_message = se.tracker_message('test', 'test')
assert isinstance(tracker_message, str) == True
def test_hiscore_message(mockAPI):
hiscore_message = se.hiscore_message('All', 'test_user')
assert isinstance(hiscore_message, str) == True
def test_name_to_id():
assert isinstance(se.name_to_id('Abyssal whip'), str) == True
| 33.784483
| 81
| 0.634601
|
1e2e32cbfce75a4f2c0a788b0876ab11abf6bb9c
| 12,306
|
py
|
Python
|
Lib/encodings/tis_620.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | 1
|
2018-06-21T18:21:24.000Z
|
2018-06-21T18:21:24.000Z
|
Lib/encodings/tis_620.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
Lib/encodings/tis_620.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
""" Python Character Mapping Codec tis_620 generated z 'python-mappings/TIS-620.TXT' przy gencodec.py.
"""#"
zaimportuj codecs
### Codec APIs
klasa Codec(codecs.Codec):
def encode(self,input,errors='strict'):
zwróć codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
zwróć codecs.charmap_decode(input,errors,decoding_table)
klasa IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=Nieprawda):
zwróć codecs.charmap_encode(input,self.errors,encoding_table)[0]
klasa IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=Nieprawda):
zwróć codecs.charmap_decode(input,self.errors,decoding_table)[0]
klasa StreamWriter(Codec,codecs.StreamWriter):
dalej
klasa StreamReader(Codec,codecs.StreamReader):
dalej
### encodings module API
def getregentry():
zwróć codecs.CodecInfo(
name='tis-620',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\ufffe'
'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
'\u0e24' # 0xC4 -> THAI CHARACTER RU
'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
'\u0e26' # 0xC6 -> THAI CHARACTER LU
'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
'\u0e50' # 0xF0 -> THAI DIGIT ZERO
'\u0e51' # 0xF1 -> THAI DIGIT ONE
'\u0e52' # 0xF2 -> THAI DIGIT TWO
'\u0e53' # 0xF3 -> THAI DIGIT THREE
'\u0e54' # 0xF4 -> THAI DIGIT FOUR
'\u0e55' # 0xF5 -> THAI DIGIT FIVE
'\u0e56' # 0xF6 -> THAI DIGIT SIX
'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
'\u0e59' # 0xF9 -> THAI DIGIT NINE
'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| 39.954545
| 102
| 0.51609
|
b35e2728abab075927f1cfc0f91272fa61fbd664
| 2,925
|
py
|
Python
|
data/task_scripts/main/task01008.py
|
aallaire91/phyre
|
ee882194c12bae5561c25ec65f95a7c0944f8129
|
[
"Apache-2.0"
] | 432
|
2019-08-15T15:45:43.000Z
|
2022-02-26T23:13:34.000Z
|
data/task_scripts/main/task01008.py
|
aallaire91/phyre
|
ee882194c12bae5561c25ec65f95a7c0944f8129
|
[
"Apache-2.0"
] | 38
|
2019-09-06T15:39:03.000Z
|
2022-03-12T00:11:25.000Z
|
data/task_scripts/main/task01008.py
|
aallaire91/phyre
|
ee882194c12bae5561c25ec65f95a7c0944f8129
|
[
"Apache-2.0"
] | 69
|
2019-08-16T02:08:41.000Z
|
2022-01-27T23:23:03.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import phyre.creator as creator_lib
import phyre.virtual_tools as vt
@creator_lib.define_task_template(
seed=range(1000),
version="1",
search_params=dict(max_search_tasks=300,
required_flags=['BALL:GOOD_STABLE'],
excluded_flags=['BALL:TRIVIAL'],
diversify_tier='ball'),
)
def build_task(C, seed):
rng = np.random.RandomState(seed=seed)
blockRange = [2, 6]
stackHeight = 3
tableHeight = [50, 200]
blockSize = [15, 40]
maxTabWid = 150
tableX = [10, 400]
flip_lr = rng.uniform(0, 1) < 0.5
bSize = rng.uniform(blockSize[0], blockSize[1])
tHeight = rng.uniform(tableHeight[0], tableHeight[1])
## Make the stacks
stackSizes = [rng.randint(blockRange[0], blockRange[1])]
lastSize = stackSizes[0]
for i in range(1, stackHeight):
lastSize = rng.randint(0, lastSize)
if lastSize == 0:
break
stackSizes.append(lastSize)
blockIdxs = []
for i in range(0, len(stackSizes)):
for j in range(0, stackSizes[i]):
blockIdxs.append(str(i) + '_' + str(j))
blockToHit = blockIdxs[rng.randint(0, len(blockIdxs))]
baseWidth = stackSizes[0] * bSize
tWidth = rng.uniform(baseWidth - bSize, maxTabWid)
tPos = rng.uniform(tableX[0], tableX[1])
floor = vt.add_box(C, [0, 0, vt.VT_SCALE, 10], False, flip_lr=flip_lr)
table = vt.add_box(C, [tPos, 10, tPos + tWidth, tHeight],
False,
flip_lr=flip_lr)
baseH = tHeight
for i in range(0, len(stackSizes)):
stackN = stackSizes[i]
stackWid = stackN * bSize
baseX = tPos + tWidth / 2 - stackWid / 2
for j in range(0, stackN):
blockExt = [baseX, baseH, baseX + bSize, baseH + bSize]
if str(i) + '_' + str(j) == blockToHit:
goal = True
else:
goal = False
blockID = vt.add_box(C, blockExt, True, flip_lr=flip_lr)
if goal:
goalBlock = blockID
baseX += bSize
baseH += bSize
C.update_task(body1=goalBlock,
body2=floor,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.VIRTUAL_TOOLS)
| 32.865169
| 74
| 0.610256
|
9f3ab7511d249718b7b1578cf6d6f33f71ebc509
| 5,850
|
py
|
Python
|
Part1/HSI_LiDAR/HoustonDataset.py
|
efkandurakli/Graduation-Project1
|
fd2cba89929da2cef49ec67214b54c310b57ce01
|
[
"MIT"
] | 1
|
2019-12-18T08:16:55.000Z
|
2019-12-18T08:16:55.000Z
|
Part1/HSI_LiDAR/HoustonDataset.py
|
efkandurakli/Graduation-Project1
|
fd2cba89929da2cef49ec67214b54c310b57ce01
|
[
"MIT"
] | null | null | null |
Part1/HSI_LiDAR/HoustonDataset.py
|
efkandurakli/Graduation-Project1
|
fd2cba89929da2cef49ec67214b54c310b57ce01
|
[
"MIT"
] | null | null | null |
hsi_image_file_path = "drive/Undergraduate_Project/HoustonDataset/HSI_DATA.tif"
lidar_image_file_path = "drive/Undergraduate_Project/HoustonDataset/LiDAR_DATA.tif"
train_file_path = "drive/Undergraduate_Project/HoustonDataset/train.txt"
test_file_path = "drive/Undergraduate_Project/HoustonDataset/test.txt"
from osgeo import gdal
import numpy as np
from sklearn.decomposition import PCA
class Houston:
def __init__(self):
raster = gdal.Open(hsi_image_file_path)
self.hsi_raw_data = np.array(raster.ReadAsArray())
raster = gdal.Open(lidar_image_file_path)
self.lidar_raw_data = np.array(raster.ReadAsArray())
HEIGHT = self.hsi_raw_data.shape[1]
WIDTH = self.hsi_raw_data.shape[2]
self.train_pixels = self.get_pixels(train_file_path)
self.test_pixels = self.get_pixels(test_file_path)
hsi_train_data = []
hsi_test_data = []
lidar_train_data = []
lidar_test_data = []
train_labels = []
test_labels = []
for i in range(HEIGHT):
for j in range(WIDTH):
if self.train_pixels[i, j] != 0:
hsi_train_data.append(self.hsi_raw_data[:, i, j])
lidar_train_data.append(self.lidar_raw_data[i, j])
train_labels.append(self.train_pixels[i, j])
if self.test_pixels[i, j] != 0:
hsi_test_data.append(self.hsi_raw_data[:, i, j])
lidar_test_data.append(self.lidar_raw_data[i, j])
test_labels.append(self.test_pixels[i, j])
self.hsi_train_data = np.array(hsi_train_data)
self.hsi_test_data = np.array(hsi_test_data)
self.lidar_train_data = np.array(lidar_train_data)
self.lidar_test_data = np.array(lidar_test_data)
self.train_labels = np.array(train_labels)
self.test_labels = np.array(test_labels)
self.one_hot_train = self.convert_to_one_hot(self.train_labels)
self.one_hot_test = self.convert_to_one_hot(self.test_labels)
def get_hsi_data(self):
return self.hsi_raw_data
def get_lidar_data(self):
return self.lidar_raw_data
def get_hsi_train_data(self):
return self.hsi_train_data
def get_hsi_test_data(self):
return self.hsi_test_data
def get_lidar_train_data(self):
return self.lidar_train_data
def get_lidar_test_data(self):
return self.lidar_test_data
def get_train_labels(self):
return self.train_labels
def get_test_labels(self):
return self.test_labels
def get_train_as_one_hot(self):
return self.one_hot_train
def get_test_as_one_hot(self):
return self.one_hot_test
def get_pixels(self, filename):
file = open(filename)
triplets = file.read().split()
for i in range(0, len(triplets)):
triplets[i] = triplets[i].split(",")
array = np.array(triplets, dtype=int)
file.close()
return array
def get_train_pixels(self):
return self.train_pixels
def get_test_pixels(self):
return self.test_pixels
def convert_to_one_hot(self, vector, num_classes=None):
assert isinstance(vector, np.ndarray)
assert len(vector) > 0
vector = vector-1
if num_classes is None:
num_classes = np.max(vector) + 1
else:
assert num_classes > 0
assert num_classes >= np.max(vector)
result = np.zeros(shape=(len(vector), num_classes))
result[np.arange(len(vector)), vector] = 1
return result.astype(int)
def HSI_PCA(self, n_components=2):
NUM_BANDS = self.hsi_raw_data.shape[0]
HEIGHT = self.hsi_raw_data.shape[1]
WIDTH = self.hsi_raw_data.shape[2]
hsi_data_2d = self.hsi_raw_data.transpose(1,2,0).reshape((HEIGHT*WIDTH), NUM_BANDS)
pca = PCA(n_components=n_components)
principalComponents = pca.fit_transform(hsi_data_2d)
principalComponents = np.array(principalComponents).transpose(1, 0).reshape(n_components, HEIGHT, WIDTH)
return principalComponents
def get_patches(self, patch_size, Train=True, PCA=False, LiDAR=False, n_components=2):
if PCA:
image_data = self.HSI_PCA(n_components=n_components)
else:
image_data = self.hsi_raw_data
if LiDAR:
lidar_data = self.get_lidar_data()
image_data = np.concatenate([image_data, lidar_data[None, ...]], axis=0)
HEIGHT = image_data.shape[1]
WIDTH = image_data.shape[2]
offset = int(patch_size / 2)
train_patches = []
if Train:
data = self.train_pixels
else:
data = self.test_pixels
for i in range(HEIGHT):
for j in range(WIDTH):
if data[i, j] != 0:
row_low = max(0, i - offset)
row_high = min(HEIGHT - 1, i + offset)
if row_low == 0:
row_high = row_low + patch_size - 1
if row_high == HEIGHT - 1:
row_low = row_high - patch_size + 1
col_low = max(0, j - offset)
col_high = min(WIDTH - 1, j + offset)
if col_low == 0:
col_high = col_low + patch_size - 1
if col_high == WIDTH - 1:
col_low = col_high - patch_size + 1
train_patches.append(image_data[0:, row_low:row_high + 1, col_low:col_high + 1])
return np.array(train_patches)
| 33.238636
| 113
| 0.594188
|
a186b9ba77962023c5b6396cec11cb317080eed6
| 14,812
|
py
|
Python
|
tests/gradients/test_gradient_transform.py
|
ral9000/pennylane
|
0afbd155d044730af546c6d90cef9d01f931632d
|
[
"Apache-2.0"
] | null | null | null |
tests/gradients/test_gradient_transform.py
|
ral9000/pennylane
|
0afbd155d044730af546c6d90cef9d01f931632d
|
[
"Apache-2.0"
] | null | null | null |
tests/gradients/test_gradient_transform.py
|
ral9000/pennylane
|
0afbd155d044730af546c6d90cef9d01f931632d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the gradients.gradient_transform module."""
import pytest
import pennylane as qml
from pennylane import numpy as np
from pennylane.gradients.gradient_transform import gradient_transform
class TestGradientTransformIntegration:
"""Test integration of the gradient transform decorator"""
def test_acting_on_qnodes(self, tol):
"""Test that a gradient transform acts on QNodes
correctly"""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(weights):
qml.RX(weights[0], wires=[0])
qml.RY(weights[1], wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.var(qml.PauliX(1))
grad_fn = qml.gradients.param_shift(circuit)
w = np.array([0.543, -0.654], requires_grad=True)
res = grad_fn(w)
x, y = w
expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_decorator(self, tol):
"""Test that a gradient transform decorating a QNode
acts correctly"""
dev = qml.device("default.qubit", wires=2)
@qml.gradients.param_shift
@qml.qnode(dev)
def circuit(weights):
qml.RX(weights[0], wires=[0])
qml.RY(weights[1], wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.var(qml.PauliX(1))
w = np.array([0.543, -0.654], requires_grad=True)
res = circuit(w)
x, y = w
expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_passing_arguments(self, mocker, tol):
"""Test that a gradient transform correctly
passes arguments"""
dev = qml.device("default.qubit", wires=2)
spy = mocker.spy(qml.gradients.parameter_shift, "expval_param_shift")
@qml.qnode(dev)
def circuit(weights):
qml.RX(weights[0], wires=[0])
qml.RY(weights[1], wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.var(qml.PauliX(1))
grad_fn = qml.gradients.param_shift(circuit, shift=np.pi / 4)
w = np.array([0.543, -0.654], requires_grad=True)
res = grad_fn(w)
x, y = w
expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
assert spy.call_args[0][2] == np.pi / 4
def test_expansion(self, mocker, tol):
"""Test that a gradient transform correctly
expands gates with no gradient recipe"""
dev = qml.device("default.qubit", wires=2)
spy = mocker.spy(qml.gradients.parameter_shift, "expval_param_shift")
class NonDiffRXGate(qml.PhaseShift):
grad_method = None
@staticmethod
def decomposition(x, wires):
return [qml.RX(x, wires=wires)]
@qml.qnode(dev)
def circuit(weights):
NonDiffRXGate(weights[0], wires=[0])
qml.RY(weights[1], wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.var(qml.PauliX(1))
grad_fn = qml.gradients.param_shift(circuit)
w = np.array([0.543, -0.654], requires_grad=True)
res = grad_fn(w)
x, y = w
expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])
assert np.allclose(res, expected, atol=tol, rtol=0)
assert spy.call_args[0][0].operations[0].name == "RX"
def test_permuted_arguments(self, tol):
"""Test that a gradient transform acts on QNodes
correctly when the QNode arguments are permuted"""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(weights):
qml.RX(weights[1], wires=[0])
qml.RY(weights[0], wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.var(qml.PauliX(1))
w = np.array([-0.654, 0.543], requires_grad=True)
res = qml.gradients.param_shift(circuit)(w)
expected = qml.jacobian(circuit)(w)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_classical_processing_arguments(self, mocker, tol):
"""Test that a gradient transform acts on QNodes
correctly when the QNode arguments are classically processed"""
dev = qml.device("default.qubit", wires=2)
spy = mocker.spy(qml.transforms, "classical_jacobian")
@qml.qnode(dev)
def circuit(weights):
qml.RX(weights[0] ** 2, wires=[0])
qml.RY(weights[1], wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
w = np.array([0.543, -0.654], requires_grad=True)
res = qml.gradients.param_shift(circuit)(w)
classical_jac = spy.spy_return(w)
assert isinstance(classical_jac, np.ndarray)
assert np.allclose(classical_jac, np.array([[2 * w[0], 0], [0, 1]]))
x, y = w
expected = [-2 * x * np.sin(x ** 2), 0]
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_classical_processing_multiple_arguments(self, mocker, tol):
"""Test that a gradient transform acts on QNodes
correctly when multiple QNode arguments are classically processed"""
dev = qml.device("default.qubit", wires=2)
spy = mocker.spy(qml.transforms, "classical_jacobian")
@qml.qnode(dev)
def circuit(data, weights):
qml.RY(np.cos(data), wires=0)
qml.RX(weights[0] ** 2, wires=[0])
qml.RY(weights[1], wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
# set d as non-differentiable
d = np.array(0.56, requires_grad=False)
w = np.array([0.543, -0.654], requires_grad=True)
x, y = w
res = qml.gradients.param_shift(circuit)(d, w)
classical_jac = spy.spy_return(d, w)
assert np.allclose(classical_jac, np.array([[2 * w[0], 0], [0, 1]]).T)
expected = np.array([-2 * x * np.cos(np.cos(d)) * np.sin(x ** 2), 0])
assert np.allclose(res, expected, atol=tol, rtol=0)
# set d as differentiable
d = np.array(0.56, requires_grad=True)
w = np.array([0.543, -0.654], requires_grad=True)
res = qml.gradients.param_shift(circuit)(d, w)
classical_jac = spy.spy_return(d, w)
assert isinstance(classical_jac, tuple)
assert np.allclose(classical_jac[0], [-np.sin(d), 0, 0])
assert np.allclose(classical_jac[1], np.array([[0, 2 * w[0], 0], [0, 0, 1]]).T)
expected_dd = np.cos(x ** 2) * np.sin(d) * np.sin(np.cos(d))
expected_dw = np.array([-2 * x * np.cos(np.cos(d)) * np.sin(x ** 2), 0])
assert np.allclose(res[0], expected_dd, atol=tol, rtol=0)
assert np.allclose(res[1], expected_dw, atol=tol, rtol=0)
def test_advanced_classical_processing_arguments(self, tol):
"""Test that a gradient transform acts on QNodes
correctly when the QNode arguments are classically processed,
and the input weights and the output weights have weird shape."""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(weights):
qml.RX(weights[0, 0] ** 2, wires=[0])
qml.RY(weights[0, 1], wires=[1])
qml.CNOT(wires=[0, 1])
return qml.probs(wires=[0, 1])
w = np.array([[0.543, -0.654], [0.0, 0.0]], requires_grad=True)
res = qml.gradients.param_shift(circuit)(w)
assert res.shape == (4, 2, 2)
expected = qml.jacobian(circuit)(w)
assert np.allclose(res, expected, atol=tol, rtol=0)
# when executed with hybrid=False, only the quantum jacobian is returned
res = qml.gradients.param_shift(circuit, hybrid=False)(w)
assert res.shape == (4, 2)
@qml.qnode(dev)
def circuit(weights):
qml.RX(weights[0], wires=[0])
qml.RY(weights[1], wires=[1])
qml.CNOT(wires=[0, 1])
return qml.probs(wires=[0, 1])
w = np.array([0.543 ** 2, -0.654], requires_grad=True)
expected = qml.jacobian(circuit)(w)
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("strategy", ["gradient", "device"])
def test_template_integration(self, strategy, tol):
"""Test that the gradient transform acts on QNodes
correctly when the QNode contains a template"""
dev = qml.device("default.qubit", wires=3)
@qml.beta.qnode(dev, expansion_strategy=strategy)
def circuit(weights):
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2])
return qml.probs(wires=[0, 1])
weights = np.ones([2, 3, 3], dtype=np.float64, requires_grad=True)
res = qml.gradients.param_shift(circuit)(weights)
assert res.shape == (4, 2, 3, 3)
expected = qml.jacobian(circuit)(weights)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_setting_shots(self):
"""Test that setting the number of shots works correctly for
a gradient transform"""
dev = qml.device("default.qubit", wires=1, shots=1000)
@qml.beta.qnode(dev)
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
x = 0.543
# the gradient function can be called with different shot values
grad_fn = qml.gradients.param_shift(circuit)
assert grad_fn(x).shape == (1, 1)
assert grad_fn(x, shots=[(1, 1000)]).shape == (1000, 1)
# the original QNode is unaffected
assert circuit(x).shape == tuple()
assert circuit(x, shots=1000).shape == tuple()
def test_shots_error(self):
"""Raise an exception if shots is used within the QNode"""
dev = qml.device("default.qubit", wires=1, shots=1000)
@qml.beta.qnode(dev)
def circuit(x, shots):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
with pytest.raises(
ValueError, match="'shots' argument name is reserved for overriding the number of shots"
):
qml.gradients.param_shift(circuit)(0.2, shots=100)
class TestInterfaceIntegration:
"""Test that the gradient transforms are differentiable
using each interface"""
def test_autograd(self, tol):
"""Test that a gradient transform remains differentiable
with autograd"""
dev = qml.device("default.qubit", wires=2)
@qml.gradients.param_shift
@qml.qnode(dev)
def circuit(x):
qml.RY(x ** 2, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.var(qml.PauliX(1))
x = np.array(-0.654, requires_grad=True)
res = circuit(x)
expected = -4 * x * np.cos(x ** 2) * np.sin(x ** 2)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = qml.grad(circuit)(x)
expected = -2 * (4 * x ** 2 * np.cos(2 * x ** 2) + np.sin(2 * x ** 2))
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_tf(self, tol):
"""Test that a gradient transform remains differentiable
with TF"""
tf = pytest.importorskip("tensorflow")
dev = qml.device("default.qubit", wires=2)
@qml.gradients.param_shift
@qml.qnode(dev, interface="tf", diff_method="parameter-shift")
def circuit(x):
qml.RY(x ** 2, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.var(qml.PauliX(1))
x_ = -0.654
x = tf.Variable(x_, dtype=tf.float64)
with tf.GradientTape() as tape:
res = circuit(x)
expected = -4 * x_ * np.cos(x_ ** 2) * np.sin(x_ ** 2)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = tape.gradient(res, x)
expected = -2 * (4 * x_ ** 2 * np.cos(2 * x_ ** 2) + np.sin(2 * x_ ** 2))
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_torch(self, tol):
"""Test that a gradient transform remains differentiable
with PyTorch"""
torch = pytest.importorskip("torch")
dev = qml.device("default.qubit", wires=2)
@qml.gradients.param_shift
@qml.qnode(dev, interface="torch")
def circuit(x):
qml.RY(x, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.var(qml.PauliX(1))
x_ = -0.654
x = torch.tensor(x_, dtype=torch.float64, requires_grad=True)
res = circuit(x)[0]
expected = -2 * np.cos(x_) * np.sin(x_)
assert np.allclose(res.detach(), expected, atol=tol, rtol=0)
res.backward()
expected = -2 * np.cos(2 * x_)
assert np.allclose(x.grad.detach(), expected, atol=tol, rtol=0)
def test_jax(self, tol):
"""Test that a gradient transform remains differentiable
with JAX"""
jax = pytest.importorskip("jax")
jnp = jax.numpy
dev = qml.device("default.qubit", wires=2)
@qml.gradients.param_shift
@qml.qnode(dev, interface="jax")
def circuit(x):
qml.RY(x ** 2, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.var(qml.PauliX(1))
x = jnp.array(-0.654)
res = circuit(x)
expected = -4 * x * np.cos(x ** 2) * np.sin(x ** 2)
assert np.allclose(res, expected, atol=tol, rtol=0)
res = jax.grad(circuit)(x)
expected = -2 * (4 * x ** 2 * np.cos(2 * x ** 2) + np.sin(2 * x ** 2))
assert np.allclose(res, expected, atol=tol, rtol=0)
| 37.785714
| 101
| 0.570348
|
e122a8a285a84878389ab7f8ea78acb9e3337947
| 1,114
|
py
|
Python
|
wordpress/komand_wordpress/actions/delete_user/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
wordpress/komand_wordpress/actions/delete_user/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
wordpress/komand_wordpress/actions/delete_user/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
REASSIGNEE = "reassignee"
USERNAME = "username"
class Output:
SUCCESS = "success"
class DeleteUserInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"reassignee": {
"type": "string",
"title": "Reassignee",
"description": "Username to reassign posts to",
"order": 2
},
"username": {
"type": "string",
"title": "Username",
"description": "Username",
"order": 1
}
},
"required": [
"username",
"reassignee"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class DeleteUserOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"success": {
"type": "boolean",
"title": "User Deleted",
"description": "User Deleted",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 17.68254
| 57
| 0.552065
|
fad05997b7c8c8c83c22c1ddd185e7ade8e0adf0
| 2,250
|
py
|
Python
|
1335/Minimum Difficulty of a Job Schedule.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
1335/Minimum Difficulty of a Job Schedule.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
1335/Minimum Difficulty of a Job Schedule.py
|
cccccccccccccc/Myleetcode
|
fb3fa6df7c77feb2d252feea7f3507569e057c70
|
[
"Apache-2.0"
] | null | null | null |
"""
timecomplexity = O(dj^2)
this is a dynamic programming question.
dp [i][j] the minimum difficulty about 1 to i days to construct 1to j jobs
the function for this dp problem is
dp[i][j] = min(dp[i][j],max(dp[i-1][x-1],maxd))
maxd is the max difficult job between x to j
for x in range(j,i-1,-1)
maxd = max(maxd,jobDifficulty[x-1])
dp[0][0]is padding 0
others is padding 10001
similar with 139 and 472
"""
from collections import defaultdict
from typing import List
class Solution:
def minDifficulty(self, jobDifficulty: List[int], d: int) -> int:
if len(jobDifficulty) < d:
return -1
job= len(jobDifficulty)
dp = [[10001 for _ in range(job+1)] for _ in range(d+1)]
dp[0][0] = 0
for i in range(1,d+1):
for j in range(i,job+1):
#for x in range(i-1,j):
maxd = 0
for x in range(j,i-1,-1):
maxd = max(maxd,jobDifficulty[x-1]) # x-1 is for padding
dp[i][j] = min(dp[i][j],(dp[i-1][x-1]+maxd)) # day 1...i-1 do job 1...x-1 day i do job x...j
return dp[d][job]
"""
from typing import List
from collections import defaultdict
class Solution1:
def _mindifficulty(self,jobDifficulty,dictdp,d,j):
if (d,j) not in dictdp.keys():
dictdp[(d,j)] = 10001
maxd=0
for x in range(j,d-1,-1):
maxd = max(maxd,jobDifficulty[x-1])
dictdp[(d,j)] = min(dictdp[(d,j)],self._mindifficulty(jobDifficulty,dictdp,d-1,x-1)+maxd)
return dictdp[(d,j)]
def minDifficulty(self, jobDifficulty: List[int], d: int) -> int:
if len(jobDifficulty) < d:
return -1
job= len(jobDifficulty)
dictdp = {}
for i in range(d+1):
dictdp[(0,i)] = 0
return self._mindifficulty(jobDifficulty,dictdp,d,job)
"""
A = Solution1()
j = [6,5,4,3,2,1]
#d = 6
d=2
print(A.minDifficulty(j,d))
#[11,111,22,222,33,333,44,444]
'''
cost: 10 20 30
index 1 2 3
max[x] => max{cost[x..3]}
max:
x=1
max1 = max(1, 2, 3)
x=2
max2 = max(2, 3)
max1, cost[1] =/=> max2
reverse:
max:
x=3
max3 = max(3)
x=2
max2 = max(cost[2], max3)
x=1
max1 = max(cost[1], max2)
'''
| 22.058824
| 115
| 0.563556
|
8f47e7904df551e83e8076c5348fe0de113a356b
| 991
|
py
|
Python
|
lib/pysgpp/extensions/datadriven/uq/learner/builder/InterpolantSpecificationDescriptor.py
|
valentjn/thesis
|
65a0eb7d5f7488aac93882959e81ac6b115a9ea8
|
[
"CC0-1.0"
] | 4
|
2022-01-15T19:50:36.000Z
|
2022-01-15T20:16:10.000Z
|
lib/pysgpp/extensions/datadriven/uq/learner/builder/InterpolantSpecificationDescriptor.py
|
valentjn/thesis
|
65a0eb7d5f7488aac93882959e81ac6b115a9ea8
|
[
"CC0-1.0"
] | null | null | null |
lib/pysgpp/extensions/datadriven/uq/learner/builder/InterpolantSpecificationDescriptor.py
|
valentjn/thesis
|
65a0eb7d5f7488aac93882959e81ac6b115a9ea8
|
[
"CC0-1.0"
] | null | null | null |
class InterpolantSpecificationDescriptor(object):
"""
TrainingSpecification Descriptor helps to implement fluid interface pattern
on Python it encapsulates functionality concerning creation of the training
specification
"""
def __init__(self):
"""
Constructor
@param builder: LearnerBuilder which creates this Descriptor
"""
self._builder = builder
def __getattr__(self, attr):
"""
Overrides built-in method
if method called is not a object method of this Descriptor, most
probably it's a method of LearnerBuilder so it tries to call the
method from our builder
@param attr: String for method name
@return: Method calling in LearnerBuilder
"""
self._builder.getLearner().setSpecification(self.__specification)
return getattr(self._builder, attr)
def create(self):
"""
Nothing needs to be done
"""
return
| 30.96875
| 79
| 0.650858
|
e5f78392740479478176bf8dc6c2c6a5e36f6ed8
| 1,645
|
py
|
Python
|
tests/stories/convert-all.py
|
livingbio/web-stories-wp
|
94a6173abeb50c9c8e67fb979fa0a24791af5b3b
|
[
"Apache-2.0"
] | null | null | null |
tests/stories/convert-all.py
|
livingbio/web-stories-wp
|
94a6173abeb50c9c8e67fb979fa0a24791af5b3b
|
[
"Apache-2.0"
] | 59
|
2020-05-26T10:52:48.000Z
|
2020-06-18T06:09:52.000Z
|
tests/stories/convert-all.py
|
livingbio/web-stories-wp
|
94a6173abeb50c9c8e67fb979fa0a24791af5b3b
|
[
"Apache-2.0"
] | null | null | null |
import os
import glob
import json
import random
def samples():
return glob.glob("./tests/stories/*/*.json")
def load(ifilename):
with open(ifilename) as ifile:
data = json.load(ifile)
layout = None
for i, page in enumerate(data["pages"]):
if i == 0:
layout = "COVER"
elif page["headline"] and page["title"]:
layout = "NORMAL"
elif not page["headline"]:
if len(page["title"]) < 50:
layout = "QUOTE"
else:
layout = random.choice(["FOCUS", "NORMAL"])
else:
raise NotImplementedError()
page["layout"] = layout
return data
def convert():
for ifilename in samples():
ofilename = ifilename.replace("stories/", "stories-out/").rstrip(".json")
ofilename = os.path.realpath(ofilename)
data = load(ifilename)
test_filename = ifilename.replace("stories/", "stories-tmp/")
os.makedirs(os.path.dirname(test_filename), exist_ok=True)
with open(test_filename, "w") as ofile:
json.dump(data, ofile, indent=4, ensure_ascii=False)
print(ofilename)
os.makedirs(os.path.dirname(ofilename), exist_ok=True)
os.system(
f"npm run workflow:aiconvert -- {test_filename} template.json {ofilename}.json {ofilename}.html"
)
def data():
cache = {}
for ifilename in samples():
with open(ifilename) as ifile:
cache[ifilename] = json.load(ifile)
return data
if __name__ == "__main__":
convert()
| 24.552239
| 108
| 0.558663
|
41cfc7c970a4ac72b521849400e212ce25da1357
| 9,752
|
py
|
Python
|
tests/scripts/thread-cert/Cert_6_4_02_RealmLocal.py
|
dismirlian/openthread
|
726f793609e11d8f14358cd04385a72a86d1fe74
|
[
"BSD-3-Clause"
] | 2
|
2019-07-12T13:19:40.000Z
|
2019-07-15T13:39:04.000Z
|
tests/scripts/thread-cert/Cert_6_4_02_RealmLocal.py
|
dismirlian/openthread
|
726f793609e11d8f14358cd04385a72a86d1fe74
|
[
"BSD-3-Clause"
] | 14
|
2020-04-21T19:36:36.000Z
|
2021-01-15T01:39:59.000Z
|
tests/scripts/thread-cert/Cert_6_4_02_RealmLocal.py
|
dismirlian/openthread
|
726f793609e11d8f14358cd04385a72a86d1fe74
|
[
"BSD-3-Clause"
] | 1
|
2021-01-21T13:50:22.000Z
|
2021-01-21T13:50:22.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import copy
import config
import thread_cert
from pktverify.packet_verifier import PacketVerifier
LEADER = 1
ROUTER = 2
MTD = 3
FRAGMENTED_DATA_LEN = 256
# Test Purpose and Description:
# -----------------------------
# The purpose of this test case is to validate the Realm-Local addresses
# that the DUT configures.
#
# Test Topology:
# -------------
# Leader
# |
# Router
# |
# DUT
#
# DUT Types:
# ----------
# ED
# SED
class Cert_6_4_2_RealmLocal_Base(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'panid': 0xface,
'allowlist': [ROUTER]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER, MTD]
},
MTD: {
'name': 'DUT',
'is_mtd': True,
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[MTD].start()
self.simulator.go(5)
self.assertEqual(self.nodes[MTD].get_state(), 'child')
self.collect_ipaddrs()
self.collect_rloc16s()
dut_addr = self.nodes[MTD].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.assertTrue(self.nodes[LEADER].\
ping(dut_addr,
size=FRAGMENTED_DATA_LEN))
self.simulator.go(1)
self.assertTrue(self.nodes[LEADER].\
ping(dut_addr))
self.simulator.go(1)
if self.TOPOLOGY[MTD]['mode'] == 'rn':
self.assertTrue(self.nodes[LEADER].\
ping(config.REALM_LOCAL_ALL_NODES_ADDRESS,
num_responses=2,
size=FRAGMENTED_DATA_LEN))
self.simulator.go(2)
self.assertTrue(self.nodes[LEADER].\
ping(config.REALM_LOCAL_ALL_NODES_ADDRESS,
num_responses=2))
self.simulator.go(2)
self.assertTrue(self.nodes[LEADER].\
ping(config.REALM_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS,
num_responses=2,
size=FRAGMENTED_DATA_LEN))
self.simulator.go(2)
self.assertTrue(self.nodes[LEADER].\
ping(config.REALM_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS,
num_responses=2))
self.simulator.go(2)
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
LEADER_MLEID = pv.vars['LEADER_MLEID']
ROUTER = pv.vars['ROUTER']
ROUTER_MLEID = pv.vars['ROUTER_MLEID']
ROUTER_RLOC16 = pv.vars['ROUTER_RLOC16']
DUT = pv.vars['DUT']
DUT_MLEID = pv.vars['DUT_MLEID']
DUT_RLOC16 = pv.vars['DUT_RLOC16']
# Step 1: Ensure topology is formed correctly
pv.verify_attached('ROUTER', 'LEADER')
pv.verify_attached('DUT', 'ROUTER', 'MTD')
# Step 2: Leader sends a Fragmented ICMPv6 Echo Request to
# DUT's ML-EID
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt = pkts.filter_ping_request().\
filter_ipv6_src_dst(LEADER_MLEID, DUT_MLEID).\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_ipv6_src_dst(DUT_MLEID, LEADER_MLEID).\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
# Step 3: Leader sends an Unfragmented ICMPv6 Echo Request to
# DUT’s ML-EID
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt = pkts.filter_ping_request().\
filter_ipv6_src_dst(LEADER_MLEID, DUT_MLEID).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_ipv6_src_dst(DUT_MLEID, LEADER_MLEID).\
must_next()
if self.TOPOLOGY[MTD]['mode'] == 'rn':
# Step 4: Leader sends a Fragmented ICMPv6 Echo Request to the
# Realm-Local All Nodes multicast address (FF03::1)
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt1 = pkts.filter_ping_request().\
filter_wpan_src64(LEADER).\
filter_RLANMA().\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
with pkts.save_index():
pkts.filter_ping_reply(identifier=_pkt1.icmpv6.echo.identifier).\
filter_ipv6_src_dst(ROUTER_MLEID, LEADER_MLEID).\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
pkts.filter_ping_request(identifier=_pkt1.icmpv6.echo.identifier).\
filter_wpan_src16_dst16(ROUTER_RLOC16, DUT_RLOC16).\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_not_next()
# Step 5: Leader sends an Unfragmented ICMPv6 Echo Request to the
# Realm-Local All Nodes multicast address (FF03::1)
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt2 = pkts.filter_ping_request().\
filter_wpan_src64(LEADER).\
filter_RLANMA().\
filter(lambda p: p.icmpv6.echo.sequence_number !=
_pkt1.icmpv6.echo.sequence_number
).\
must_next()
with pkts.save_index():
pkts.filter_ping_reply(identifier=_pkt2.icmpv6.echo.identifier).\
filter_ipv6_src_dst(ROUTER_MLEID, LEADER_MLEID).\
must_next()
pkts.filter_ping_request(identifier = _pkt2.icmpv6.echo.identifier).\
filter_wpan_src16_dst16(ROUTER_RLOC16, DUT_RLOC16).\
must_not_next()
# Step 6: Leader sends a Fragmented ICMPv6 Echo Request to the
# Realm-Local All Thread Nodes multicast address
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt = pkts.filter_ping_request().\
filter_wpan_src64(LEADER).\
filter_RLATNMA().\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_wpan_src64(DUT).\
filter_ipv6_dst(LEADER_MLEID).\
filter(lambda p: p.icmpv6.data.len == FRAGMENTED_DATA_LEN).\
must_next()
# Step 7: Leader sends an Unfragmented ICMPv6 Echo Request to the
# Realm-Local All Thread Nodes multicast address
# The DUT MUST respond with an ICMPv6 Echo Reply
_pkt = pkts.filter_ping_request().\
filter_wpan_src64(LEADER).\
filter_RLATNMA().\
filter(lambda p: p.icmpv6.data.len != FRAGMENTED_DATA_LEN).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_wpan_src64(DUT).\
filter_ipv6_dst(LEADER_MLEID).\
must_next()
class Cert_6_4_2_RealmLocal_ED(Cert_6_4_2_RealmLocal_Base):
TOPOLOGY = copy.deepcopy(Cert_6_4_2_RealmLocal_Base.TOPOLOGY)
TOPOLOGY[MTD]['mode'] = 'rn'
class Cert_6_4_2_RealmLocal_SED(Cert_6_4_2_RealmLocal_Base):
TOPOLOGY = copy.deepcopy(Cert_6_4_2_RealmLocal_Base.TOPOLOGY)
TOPOLOGY[MTD]['mode'] = '-'
del (Cert_6_4_2_RealmLocal_Base)
if __name__ == '__main__':
unittest.main()
| 38.243137
| 83
| 0.606337
|
b541e15fd1f8d39b62f9506e7fd65b62709a3c05
| 15,969
|
py
|
Python
|
references/cpsc2019/CPSC0436_qrs9079_hr9377/train_code/YOLO_1d_HR_4h_loss_ava.py
|
busyyang/torch_ecg
|
031d90a32b8a1e202364efe1e5a19a9ba1f0a726
|
[
"MIT"
] | null | null | null |
references/cpsc2019/CPSC0436_qrs9079_hr9377/train_code/YOLO_1d_HR_4h_loss_ava.py
|
busyyang/torch_ecg
|
031d90a32b8a1e202364efe1e5a19a9ba1f0a726
|
[
"MIT"
] | null | null | null |
references/cpsc2019/CPSC0436_qrs9079_hr9377/train_code/YOLO_1d_HR_4h_loss_ava.py
|
busyyang/torch_ecg
|
031d90a32b8a1e202364efe1e5a19a9ba1f0a726
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
import math
# from torchsummary import summary
def layer_1(in_channel,out_channel):
layer = nn.Sequential(
nn.Conv1d(in_channel,out_channel,16,stride=2,padding=7),
nn.BatchNorm1d(out_channel),
nn.ReLU(True),
nn.MaxPool1d(2,2)
)
return layer
class GCModule(nn.Module):
"""
Global Context (GC) block
the difference with ref. [1,2] lies in the treatment of mid-channels of `channel_att`
References:
-----------
[1] Cao, Yue, et al. "Gcnet: Non-local networks meet squeeze-excitation networks and beyond." Proceedings of the IEEE International Conference on Computer Vision Workshops. 2019.
[2] https://github.com/xvjiarui/GCNet/blob/master/mmdet/ops/gcb/context_block.py
"""
def __init__(self,channels,reduction=16,mode='mul'):
super(GCModule,self).__init__()
self.mode = mode
mid_channels = channels // reduction
self.channel_att = nn.Sequential(
nn.Conv1d(channels, mid_channels, kernel_size=1),
nn.LayerNorm([mid_channels,1]),
nn.ReLU(True),
nn.Conv1d(mid_channels, channels, kernel_size=1),
)
self.conv_mask = nn.Conv1d(channels,1,kernel_size=1)
self.softmax = nn.Softmax(dim=2)
def sptial_att(self,x):
input_x = x.unsqueeze(1)
context = self.conv_mask(x)
context = self.softmax(context)
context = torch.matmul(input_x,context.unsqueeze(3))
return context.squeeze(1)
def forward(self, x):
context = self.sptial_att(x)
att = self.channel_att(context)
if self.mode == 'add':
return x + att
else:
return x * torch.sigmoid(att)
class Residual_block(nn.Module):
def __init__(self,in_channel,out_channel,stride=1,down=False,drop=True):
super(Residual_block,self).__init__()
self.down = down
self.do_drop = drop
if down:
self.conv1 = nn.Sequential(
nn.Conv1d(in_channel, out_channel, kernel_size=7, stride=stride, padding=3, dilation=1),
nn.BatchNorm1d(out_channel))
self.conv2 = nn.Sequential(
nn.Conv1d(out_channel, out_channel, kernel_size=7, stride=1, padding=3, dilation=1),
nn.BatchNorm1d(out_channel))
self.down_sample = nn.Sequential(nn.Conv1d(in_channel,out_channel,1,stride=2))
else:
self.conv1 = nn.Sequential(
nn.Conv1d(in_channel, out_channel, kernel_size=7, stride=stride, padding=6, dilation=2),
nn.BatchNorm1d(out_channel))
self.conv2 = nn.Sequential(
nn.Conv1d(out_channel, out_channel, kernel_size=7, stride=1, padding=6, dilation=2),
nn.BatchNorm1d(out_channel))
self.GC = GCModule(out_channel)
self.relu = nn.ReLU(True)
self.drop = nn.Dropout(0.3)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu(x)
x = self.drop(x)
x = self.conv2(x)
x = self.GC(x)
if self.down:
identity = self.down_sample(identity)
x += identity
x = self.relu(x)
if self.do_drop:
x = self.drop(x)
return x
def encoder(labels,region_num):
N = labels.shape[0]
target = torch.zeros(N,region_num, 2)
region_size = 1. / region_num
# for i in range(len(label)):
for i,label in enumerate(labels):
for l in range(len(label)):
if label[l] == 0:
continue
point = label[l]
start_p = int(np.floor(point / region_size))
target[i,start_p, 1] = 1
delta_p = (point / region_size) - start_p
target[i,start_p, 0] = delta_p
return target.cuda()
class Yolo_layer(nn.Module):
def __init__(self):
super(Yolo_layer,self).__init__()
# self.bs = bs
self.mse_loss = nn.MSELoss(size_average=True).cuda()
self.bce_loss = nn.BCELoss(size_average=True).cuda()
def forward(self, x,target=None):
if target is not None:
region_num = x.size(1)
target = encoder(target,region_num)
r_mask = target[:, :, 1] > 0 ## 有标注的区间mask
all_r_pred = x[r_mask].view(-1, 2)
all_r_target = target[r_mask].view(-1, 2)
point_loss = self.mse_loss(all_r_pred[:, 0], ## 只对有标注的区间进行点回归
all_r_target[:, 0])
conf_loss = self.bce_loss(x[...,1],target[...,1])
return conf_loss+point_loss
else:
x = x.detach().cpu()
nR = x.size(1)
offset = torch.arange(nR).view(1, nR).type(torch.FloatTensor)
x[..., 0] += offset
x[..., 0] = (x[..., 0] / nR) * 5000
return x
class final_layer(nn.Module):
def __init__(self,in_ch):
super(final_layer,self).__init__()
self.fuse_conv = nn.Sequential(nn.Conv1d(in_ch,in_ch // 2,7,stride=1,padding=3),
nn.BatchNorm1d(in_ch // 2),
nn.ReLU())
self.conv_final = nn.Sequential(nn.Conv1d(in_ch // 2,2,1))
self.drop = nn.Dropout(0.3)
def forward(self, x):
x = self.fuse_conv(x)
x = self.conv_final(x)
x = F.sigmoid(x)
x = x.permute(0,2,1)
return x
class StageModule(nn.Module):
def __init__(self,stage, out_branches, c):
super(StageModule,self).__init__()
self.stage = stage
# self.num_blocks = num_blocks
self.out_branches = out_branches
self.branches = nn.ModuleList()
for i in range(self.stage):
w = c * (2**i)
branch = nn.Sequential(
Residual_block(w, w),
Residual_block(w, w),
Residual_block(w, w),
)
self.branches.append(branch)
self.fuse_layers = nn.ModuleList()
for i in range(self.out_branches):
self.fuse_layers.append(nn.ModuleList())
for j in range(self.stage):
if i == j :
self.fuse_layers[-1].append(nn.Sequential())
elif i < j :
if i == 0:
# self.fuse_layers[-1].append(nn.Upsample(size=313))
self.fuse_layers[-1].append(nn.Sequential(
nn.Conv1d(c * (2 ** j),c * (2 ** i),kernel_size=1, stride=1),
nn.BatchNorm1d(c * (2 ** i)),
nn.Upsample(size=625)
))
# elif i < j and i == 1:
elif i == 1:
# self.fuse_layers[-1].append(nn.Upsample(size=157))
self.fuse_layers[-1].append(nn.Sequential(
nn.Conv1d(c * (2 ** j), c * (2 ** i), kernel_size=1, stride=1),
nn.BatchNorm1d(c * (2 ** i)),
nn.Upsample(size=313)
))
elif i == 2:
self.fuse_layers[-1].append(nn.Sequential(
nn.Conv1d(c * (2 ** j), c * (2 ** i), kernel_size=1, stride=1),
nn.BatchNorm1d(c * (2 ** i)),
nn.Upsample(size=157)
))
elif i > j:
opts = []
if i == j+1:
opts.append(nn.Sequential(
nn.Conv1d(c * (2 ** j), c * (2 ** i), kernel_size=7, stride=2, padding=3),
nn.BatchNorm1d(c * (2 ** i)),
))
elif i == j+2:
opts.append(nn.Sequential(
nn.Conv1d(c * (2 ** j), c * (2 ** (j+1)), kernel_size=7, stride=2, padding=3),
nn.BatchNorm1d(c * (2 ** (j+1))),
nn.ReLU(True),
nn.Conv1d(c * (2 ** (j+1)), c * (2 ** (j + 2)), kernel_size=7, stride=2, padding=3),
nn.BatchNorm1d(c * (2 ** (j + 2))),
))
elif i == j+3:
opts.append(nn.Sequential(
nn.Conv1d(c * (2 ** j), c * (2 ** (j+1)), kernel_size=7, stride=2, padding=3),
nn.BatchNorm1d(c * (2 ** (j+1))),
nn.ReLU(True),
nn.Conv1d(c * (2 ** (j+1)), c * (2 ** (j + 2)), kernel_size=7, stride=2, padding=3),
nn.BatchNorm1d(c * (2 ** (j + 2))),
nn.ReLU(True),
nn.Conv1d(c * (2 ** (j + 2)), c * (2 ** (j + 3)), kernel_size=7, stride=2, padding=3),
nn.BatchNorm1d(c * (2 ** (j + 3))),
))
self.fuse_layers[-1].append(nn.Sequential(*opts))
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
assert len(self.branches) == len(x)
x = [branch(b) for branch, b in zip(self.branches, x)]
x_fused = []
for i in range(len(self.fuse_layers)):
for j in range(0, len(self.branches)):
if j == 0:
x_fused.append(self.fuse_layers[i][0](x[0]))
else:
x_fused[i] = x_fused[i] + self.fuse_layers[i][j](x[j])
for i in range(len(x_fused)):
x_fused[i] = self.relu(x_fused[i])
return x_fused
class Yolo_1d(nn.Module):
def __init__(self,c=256):
super(Yolo_1d,self).__init__()
self.layer_1 = layer_1(1,64)
self.layer_2 = self._make_layers(64,128,3)
self.transition1 = nn.ModuleList([
nn.Sequential(),
nn.Sequential(nn.Sequential( # Double Sequential to fit with official pretrained weights
nn.Conv1d(128, c * (2 ** 1), kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm1d(c * (2 ** 1)),
nn.ReLU(inplace=True),
)),
])
self.stage2 = nn.Sequential(StageModule(stage=2,out_branches=2,c=c))
self.transition2 = nn.ModuleList([
nn.Sequential(),
nn.Sequential(),
nn.Sequential(nn.Sequential(
nn.Conv1d(c * (2 ** 1), c * (2 ** 2), kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm1d(c * (2 ** 2)),
nn.ReLU(inplace=True),
)),
])
self.stage3 = nn.Sequential(StageModule(stage=3,out_branches=3,c=c),
StageModule(stage=3,out_branches=3,c=c))
self.transition3 = nn.ModuleList([
nn.Sequential(),
nn.Sequential(),
nn.Sequential(),
nn.Sequential(nn.Sequential( # Double Sequential to fit with official pretrained weights
nn.Conv1d(c * (2 ** 2), c * (2 ** 3), kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm1d(c * (2 ** 3)),
nn.ReLU(inplace=True),
)),
])
self.stage4 = nn.Sequential(StageModule(stage=4,out_branches=4,c=c))
self.conv_o5 = final_layer(in_ch=1024)
self.conv_o4 = final_layer(in_ch=512)
self.conv_o3 = final_layer(in_ch=256)
self.conv_o2 = final_layer(in_ch=128)
self.yolo = Yolo_layer()
def _make_layers(self,in_ch,out_ch,blocks):
layers = []
layers.append(Residual_block(in_ch,out_ch,stride=2,down=True))
for _ in range(1,blocks):
layers.append(Residual_block(out_ch,out_ch))
return nn.Sequential(*layers)
def forward(self, x, target=None):
is_training = target is not None
x = self.layer_1(x)
x = self.layer_2(x)
# x = self.layer_3(x)
x = [trans(x) for trans in self.transition1]
x = self.stage2(x)
x = [
self.transition2[0](x[0]),
self.transition2[1](x[1]),
self.transition2[2](x[-1])
]
x = self.stage3(x)
x = [
self.transition3[0](x[0]),
self.transition3[1](x[1]),
self.transition3[2](x[2]),
self.transition3[3](x[-1])
]
out2,out3,out4,out5 = self.stage4(x)
f5 = self.conv_o5(out5)
f4 = self.conv_o4(out4)
f3 = self.conv_o3(out3)
f2 = self.conv_o2(out2)
if is_training:
y5 = self.yolo(f5,target)
y4 = self.yolo(f4,target)
y3 = self.yolo(f3,target)
y2 = self.yolo(f2,target)
else:
y5 = self.yolo(f5)
y4 = self.yolo(f4)
y3 = self.yolo(f3)
y2 = self.yolo(f2)
out = [y5,y4,y3,y2]
# tt = torch.cat(out,1)
return sum(out) if is_training else torch.cat(out,1)
def cal_dis(self,sig1,sig2):
distance = torch.abs(sig1[0]-sig2[:,0])
return distance
def predict(self,out,conf_thr):
out = out.detach().cpu()
hr_ans = []
r_ans = []
for i_sig, pred in enumerate(out): ## nms
cut_idx = (pred[..., 0] >= 0.5 * 500) & (pred[..., 0] <= 9.5 * 500)
pred = pred[cut_idx]
conf_mask = pred[..., 1] > conf_thr
pred = pred[conf_mask]
if not pred.size(0):
hr_ans.append(math.nan)
r_ans.append(np.array([]))
continue
_, conf_sort_idex = torch.sort(pred[:, 1], descending=True)
pred = pred[conf_sort_idex]
max_pred = []
while pred.size(0):
max_pred.append(pred[0])
if len(pred) == 1:
break
dis = self.cal_dis(max_pred[-1], pred[1:])
pred = pred[1:][dis > 80]
max_pred = torch.cat(max_pred, 0).view(-1, 2)
_, point_sort_index = torch.sort(max_pred[:, 0])
max_pred = np.array(max_pred[point_sort_index])
r_peak = max_pred[:, 0]
r_hr = np.array([loc for loc in r_peak if (loc > 5.5 * 500 and loc < 5000 - 0.5 * 500)])
hr = round(60 * 500 / np.mean(np.diff(r_hr)))
hr_ans.append(hr)
r_ans.append(r_peak)
return r_ans, hr_ans
def _predict(self, out, conf_thr):
out = out.detach()
hr_ans = []
r_ans = []
for i_sig, pred in enumerate(out):
conf_mask = pred[..., 1] > conf_thr
pred = pred[conf_mask]
if not pred.size(0):
hr_ans.append(math.nan)
r_ans.append(np.array([]))
continue
_, conf_sort_idex = torch.sort(pred[:, 1], descending=True)
pred = pred[conf_sort_idex]
max_pred = []
while pred.size(0):
max_pred.append(pred[0])
if len(pred) == 1:
break
dis = self.cal_dis(max_pred[-1], pred[1:])
pred = pred[1:][dis > 100]
max_pred = torch.cat(max_pred, 0).view(-1, 2)
_, point_sort_index = torch.sort(max_pred[:, 0])
max_pred = np.array(max_pred[point_sort_index])
idx = (max_pred[:, 0] >= 0.5 * 500) & (max_pred[:, 0] <= 9.5 * 500)
qrs = max_pred[idx]
hr_ans.append(0)
r_ans.append(qrs)
return r_ans, hr_ans
# if __name__ == "__main__":
# t = torch.randn(1,1,5000).cuda()
# model = Yolo_1d(c=128).cuda()
# model = torch.nn.DataParallel(model, device_ids=[0, 1])
# # summary(model,(1,5000))
# tt = model(t)
# print(1)
| 35.966216
| 182
| 0.49809
|
301a165bb4f4ff1ba5e02fda7b4e2e6ffc34e481
| 1,205
|
py
|
Python
|
demo/deep_learning/base/data_loader_factory.py
|
jihuacao/Putil
|
b753fc94bea4cbda00f483681c55f0e9f54adef2
|
[
"Apache-2.0"
] | 1
|
2018-12-09T06:09:29.000Z
|
2018-12-09T06:09:29.000Z
|
demo/deep_learning/base/data_loader_factory.py
|
jihuacao/Putil
|
b753fc94bea4cbda00f483681c55f0e9f54adef2
|
[
"Apache-2.0"
] | null | null | null |
demo/deep_learning/base/data_loader_factory.py
|
jihuacao/Putil
|
b753fc94bea4cbda00f483681c55f0e9f54adef2
|
[
"Apache-2.0"
] | null | null | null |
# conding=utf-8
import Putil.base.logger as plog
logger = plog.PutilLogConfig('data_loader_factory').logger()
logger.setLevel(plog.DEBUG)
data_loader_factory_logger = logger.getChild('data_loader_factory')
data_loader_factory_logger.setLevel(plog.DEBUG)
from Putil.demo.deep_learning.base import data_loader as standard
from util import data_loader as project
def data_loader_factory(args, data_loader_source, data_loader_name, property_type='', **kwargs):
'''
@brief generate the callable obj
@note
@param[in] args
framework: 'torch'
'''
data_loader_factory_logger.info('use {} data loader'.format(args.framework))
if args.framework == 'torch':
pass
else:
raise NotImplementedError('data_loader of framework: {} is not implemented'.format(args.framework))
data_loader = '{0}.{1}'.format(data_loader_source, data_loader_name)
return eval('{}(args, property_type, **kwargs)'.format(data_loader))
def data_loader_arg_factory(parser, source, name, property_type='', **kwargs):
arg = '{}.{}Arg'.format(source, name)
logger.info('data_loader_arg: {}'.format(arg))
return eval('{}(parser, property_type, **kwargs)'.format(arg))
| 38.870968
| 107
| 0.724481
|
fe3dc911cdb33f3f0ad3a65c0d7c7f9369edd80d
| 4,064
|
py
|
Python
|
src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/__init__.py
|
SunguckLee/real-mongodb
|
fef0e44fafc6d3709a84101327e7d2f54dd18d88
|
[
"Apache-2.0"
] | 4
|
2018-02-06T01:53:12.000Z
|
2018-02-20T01:47:36.000Z
|
src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/__init__.py
|
SunguckLee/real-mongodb
|
fef0e44fafc6d3709a84101327e7d2f54dd18d88
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/wiredtiger/test/3rdparty/extras-0.0.3/extras/__init__.py
|
SunguckLee/real-mongodb
|
fef0e44fafc6d3709a84101327e7d2f54dd18d88
|
[
"Apache-2.0"
] | 3
|
2018-02-06T01:53:18.000Z
|
2021-07-28T09:48:15.000Z
|
# Copyright (c) 2010-2012 extras developers. See LICENSE for details.
"""Extensions to the Python standard library."""
import sys
__all__ = [
'safe_hasattr',
'try_import',
'try_imports',
]
# same format as sys.version_info: "A tuple containing the five components of
# the version number: major, minor, micro, releaselevel, and serial. All
# values except releaselevel are integers; the release level is 'alpha',
# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
# releaselevel of 'dev' for unreleased under-development code.
#
# If the releaselevel is 'alpha' then the major/minor/micro components are not
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
__version__ = (0, 0, 3, 'final', 0)
def try_import(name, alternative=None, error_callback=None):
"""Attempt to import ``name``. If it fails, return ``alternative``.
When supporting multiple versions of Python or optional dependencies, it
is useful to be able to try to import a module.
:param name: The name of the object to import, e.g. ``os.path`` or
``os.path.join``.
:param alternative: The value to return if no module can be imported.
Defaults to None.
:param error_callback: If non-None, a callable that is passed the ImportError
when the module cannot be loaded.
"""
module_segments = name.split('.')
last_error = None
while module_segments:
module_name = '.'.join(module_segments)
try:
module = __import__(module_name)
except ImportError:
last_error = sys.exc_info()[1]
module_segments.pop()
continue
else:
break
else:
if last_error is not None and error_callback is not None:
error_callback(last_error)
return alternative
nonexistent = object()
for segment in name.split('.')[1:]:
module = getattr(module, segment, nonexistent)
if module is nonexistent:
if last_error is not None and error_callback is not None:
error_callback(last_error)
return alternative
return module
_RAISE_EXCEPTION = object()
def try_imports(module_names, alternative=_RAISE_EXCEPTION, error_callback=None):
"""Attempt to import modules.
Tries to import the first module in ``module_names``. If it can be
imported, we return it. If not, we go on to the second module and try
that. The process continues until we run out of modules to try. If none
of the modules can be imported, either raise an exception or return the
provided ``alternative`` value.
:param module_names: A sequence of module names to try to import.
:param alternative: The value to return if no module can be imported.
If unspecified, we raise an ImportError.
:param error_callback: If None, called with the ImportError for *each*
module that fails to load.
:raises ImportError: If none of the modules can be imported and no
alternative value was specified.
"""
module_names = list(module_names)
for module_name in module_names:
module = try_import(module_name, error_callback=error_callback)
if module:
return module
if alternative is _RAISE_EXCEPTION:
raise ImportError(
"Could not import any of: %s" % ', '.join(module_names))
return alternative
def safe_hasattr(obj, attr, _marker=object()):
"""Does 'obj' have an attribute 'attr'?
Use this rather than built-in hasattr, as the built-in swallows exceptions
in some versions of Python and behaves unpredictably with respect to
properties.
"""
return getattr(obj, attr, _marker) is not _marker
| 38.339623
| 82
| 0.663878
|
0cf2e63382c3656f8242a0afa93c269430e30a07
| 1,818
|
py
|
Python
|
nikola/plugins/command/orphans.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | null | null | null |
nikola/plugins/command/orphans.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | null | null | null |
nikola/plugins/command/orphans.py
|
asmeurer/nikola
|
ea1c651bfed0fd6337f1d22cf8dd99899722912c
|
[
"MIT"
] | 1
|
2021-07-07T11:32:42.000Z
|
2021-07-07T11:32:42.000Z
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2020 Roberto Alsina, Chris Warrick and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""List all orphans."""
import os
from nikola.plugin_categories import Command
from nikola.plugins.command.check import real_scan_files
class CommandOrphans(Command):
"""List all orphans."""
name = "orphans"
doc_purpose = "list all orphans"
doc_description = """\
List all orphans, i.e. all files that are in the output directory,
but are not generated by Nikola.
Output contains filenames only (it is passable to `xargs rm` or the like)."""
def _execute(self, options, args):
"""Run the orphans command."""
orphans = real_scan_files(self.site)[0]
print('\n'.join([p for p in orphans if not os.path.isdir(p)]))
| 36.36
| 77
| 0.738174
|
570e8abe2137f6d7e6130d535ca22bf2cca814e4
| 10,984
|
py
|
Python
|
deepcell_retinamask/layers/retinanet_test.py
|
vanvalenlab/deepcell-retinamask
|
c922d4d836e881270da8b43c420c60d365883639
|
[
"Apache-2.0"
] | null | null | null |
deepcell_retinamask/layers/retinanet_test.py
|
vanvalenlab/deepcell-retinamask
|
c922d4d836e881270da8b43c420c60d365883639
|
[
"Apache-2.0"
] | 2
|
2021-11-26T11:18:44.000Z
|
2022-01-21T11:29:41.000Z
|
deepcell_retinamask/layers/retinanet_test.py
|
vanvalenlab/deepcell-retinamask
|
c922d4d836e881270da8b43c420c60d365883639
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2021 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-retinamask/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the retinanet layers"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.keras.utils import custom_object_scope
from tensorflow.python.platform import test
from deepcell_retinamask import layers
@keras_parameterized.run_all_keras_modes
class TestAnchors(keras_parameterized.TestCase):
def test_anchors_2d(self):
with custom_object_scope({'Anchors': layers.Anchors}):
testing_utils.layer_test(
layers.Anchors,
kwargs={'size': 1, 'stride': 1,
'data_format': 'channels_last'},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
layers.Anchors,
kwargs={'size': 1, 'stride': 1,
'data_format': 'channels_last'},
input_shape=(3, None, None, None))
testing_utils.layer_test(
layers.Anchors,
kwargs={'size': 1, 'stride': 1,
'data_format': 'channels_first'},
input_shape=(3, 5, 6, 4))
def test_simple(self):
# create simple Anchors layer
anchors_layer = layers.Anchors(
size=32,
stride=8,
ratios=np.array([1], K.floatx()),
scales=np.array([1], K.floatx()),
)
# create fake features input (only shape is used anyway)
features = np.zeros((1, 2, 2, 1024), dtype=K.floatx())
features = K.variable(features)
# call the Anchors layer
anchors = anchors_layer.call(features)
anchors = K.get_value(anchors)
# expected anchor values
expected = np.array([[
[-12, -12, 20, 20],
[-4, -12, 28, 20],
[-12, -4, 20, 28],
[-4, -4, 28, 28],
]], dtype=K.floatx())
# test anchor values
self.assertAllEqual(anchors, expected)
def test_mini_batch(self):
# create simple Anchors layer
anchors_layer = layers.Anchors(
size=32,
stride=8,
ratios=np.array([1], dtype=K.floatx()),
scales=np.array([1], dtype=K.floatx()),
)
# create fake features input with batch_size=2
features = np.zeros((2, 2, 2, 1024), dtype=K.floatx())
features = K.variable(features)
# call the Anchors layer
anchors = anchors_layer.call(features)
anchors = K.get_value(anchors)
# expected anchor values
expected = np.array([[
[-12, -12, 20, 20],
[-4, -12, 28, 20],
[-12, -4, 20, 28],
[-4, -4, 28, 28],
]], dtype=K.floatx())
expected = np.tile(expected, (2, 1, 1))
# test anchor values
self.assertAllEqual(anchors, expected)
@keras_parameterized.run_all_keras_modes
class TestRegressBoxes(keras_parameterized.TestCase):
def test_simple(self):
# create simple RegressBoxes layer
layer = layers.RegressBoxes()
# create input
anchors = np.array([[
[0, 0, 10, 10],
[50, 50, 100, 100],
[20, 20, 40, 40],
]], dtype=K.floatx())
anchors = K.variable(anchors)
regression = np.array([[
[0, 0, 0, 0],
[0.1, 0.1, 0, 0],
[0, 0, 0.1, 0.1],
]], dtype=K.floatx())
regression = K.variable(regression)
# compute output
computed_shape = layer.compute_output_shape(
[anchors.shape, regression.shape])
actual = layer.call([anchors, regression])
actual = K.get_value(actual)
self.assertEqual(actual.shape, computed_shape)
# compute expected output
expected = np.array([[
[0, 0, 10, 10],
[51, 51, 100, 100],
[20, 20, 40.4, 40.4],
]], dtype=K.floatx())
self.assertAllClose(actual, expected)
def test_mini_batch(self):
mean = [0, 0, 0, 0]
std = [0.2, 0.2, 0.2, 0.2]
# create simple RegressBoxes layer
layer = layers.RegressBoxes(mean=mean, std=std)
# create input
anchors = np.array([
[
[0, 0, 10, 10], # 1
[50, 50, 100, 100], # 2
[20, 20, 40, 40], # 3
],
[
[20, 20, 40, 40], # 3
[0, 0, 10, 10], # 1
[50, 50, 100, 100], # 2
],
], dtype=K.floatx())
anchors = K.variable(anchors)
regression = np.array([
[
[0, 0, 0, 0], # 1
[0.1, 0.1, 0, 0], # 2
[0, 0, 0.1, 0.1], # 3
],
[
[0, 0, 0.1, 0.1], # 3
[0, 0, 0, 0], # 1
[0.1, 0.1, 0, 0], # 2
],
], dtype=K.floatx())
regression = K.variable(regression)
# compute output
actual = layer.call([anchors, regression])
actual = K.get_value(actual)
# compute expected output
expected = np.array([
[
[0, 0, 10, 10], # 1
[51, 51, 100, 100], # 2
[20, 20, 40.4, 40.4], # 3
],
[
[20, 20, 40.4, 40.4], # 3
[0, 0, 10, 10], # 1
[51, 51, 100, 100], # 2
],
], dtype=K.floatx())
self.assertAllClose(actual, expected)
def test_invalid_input(self):
bad_mean = 'invalid_data_type'
bad_std = 'invalid_data_type'
with self.assertRaises(ValueError):
layers.RegressBoxes(mean=bad_mean, std=None)
with self.assertRaises(ValueError):
layers.RegressBoxes(mean=None, std=bad_std)
@keras_parameterized.run_all_keras_modes
class ClipBoxesTest(keras_parameterized.TestCase):
def test_simple(self):
img_h, img_w = np.random.randint(2, 5), np.random.randint(5, 9)
boxes = np.array([[
[9, 9, 9, 9],
[-1, -1, -1, -1],
[0, 0, img_w, img_h],
[0, 0, img_w + 1, img_h + 1],
[0, 0, img_w - 1, img_h - 1],
]], dtype='int')
boxes = K.variable(boxes)
# compute expected output
expected = np.array([[
[img_w - 1, img_h - 1, img_w - 1, img_h - 1],
[0, 0, 0, 0],
[0, 0, img_w - 1, img_h - 1],
[0, 0, img_w - 1, img_h - 1],
[0, 0, img_w - 1, img_h - 1],
]], dtype=K.floatx())
# test channels_last
# create input
image = K.variable(np.random.random((1, img_h, img_w, 3)))
# create simple ClipBoxes layer
layer = layers.ClipBoxes(data_format='channels_last')
# compute output
computed_shape = layer.compute_output_shape(
[image.shape, boxes.shape])
actual = layer.call([image, boxes])
actual = K.get_value(actual)
self.assertEqual(actual.shape, tuple(computed_shape))
self.assertAllClose(actual, expected)
# test channels_first
# create input
image = K.variable(np.random.random((1, 6, img_h, img_w)))
# create simple ClipBoxes layer
layer = layers.ClipBoxes(data_format='channels_first')
# compute output
computed_shape = layer.compute_output_shape(
[image.shape, boxes.shape])
actual = layer.call([image, boxes])
actual = K.get_value(actual)
self.assertEqual(actual.shape, tuple(computed_shape))
self.assertAllClose(actual, expected)
def test_simple_3d(self):
img_h, img_w = np.random.randint(2, 5), np.random.randint(5, 9)
boxes = np.array([[
[9, 9, 9, 9],
[-1, -1, -1, -1],
[0, 0, img_w, img_h],
[0, 0, img_w + 1, img_h + 1],
[0, 0, img_w - 1, img_h - 1],
]], dtype='int')
boxes = np.expand_dims(boxes, axis=0)
boxes = K.variable(boxes)
# compute expected output
expected = np.array([[
[img_w - 1, img_h - 1, img_w - 1, img_h - 1],
[0, 0, 0, 0],
[0, 0, img_w - 1, img_h - 1],
[0, 0, img_w - 1, img_h - 1],
[0, 0, img_w - 1, img_h - 1],
]], dtype=K.floatx())
expected = np.expand_dims(expected, axis=0)
# test channels_last
# create input
image = K.variable(np.random.random((1, 1, img_h, img_w, 3)))
# create simple ClipBoxes layer
layer = layers.ClipBoxes(data_format='channels_last')
# compute output
computed_shape = layer.compute_output_shape(
[image.shape, boxes.shape])
actual = layer.call([image, boxes])
actual = K.get_value(actual)
self.assertEqual(actual.shape, tuple(computed_shape))
self.assertAllClose(actual, expected)
# test channels_first
# create input
image = K.variable(np.random.random((1, 6, 1, img_h, img_w)))
# create simple ClipBoxes layer
layer = layers.ClipBoxes(data_format='channels_first')
# compute output
computed_shape = layer.compute_output_shape(
[image.shape, boxes.shape])
actual = layer.call([image, boxes])
actual = K.get_value(actual)
self.assertEqual(actual.shape, tuple(computed_shape))
self.assertAllClose(actual, expected)
if __name__ == '__main__':
test.main()
| 32.40118
| 80
| 0.545794
|
27e6f828d84ba7dfcac70775d74e8c8c60102f10
| 24,586
|
py
|
Python
|
visgraph/graphcore.py
|
6un9-h0-Dan/vivisect
|
b0f10cad6796c914df2d88fae56647a172b8c1b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-12-23T19:23:17.000Z
|
2020-12-23T19:23:17.000Z
|
visgraph/graphcore.py
|
6un9-h0-Dan/vivisect
|
b0f10cad6796c914df2d88fae56647a172b8c1b7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
visgraph/graphcore.py
|
6un9-h0-Dan/vivisect
|
b0f10cad6796c914df2d88fae56647a172b8c1b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-12-23T19:23:58.000Z
|
2020-12-23T19:23:58.000Z
|
'''
Graphcore contains all the base graph manipulation objects.
'''
import os
import json
import itertools
import threading
import collections
from binascii import hexlify
from exc import *
import visgraph.pathcore as vg_pathcore
def guid(size=16):
return hexlify(os.urandom(size))
def zdict():
return collections.defaultdict(int)
def ldict():
return collections.defaultdict(list)
def pdict():
return collections.defaultdict(ldict)
class Graph:
'''
The base Graph object implements a simple nodes and edges graph.
Nodes -
Nodes consist of a dicionary of properties about that node and
a unique id which identifies the node in the current graph. From
API perspective a node is a tuple of (nid, nodeprops).
Edges -
Edges are directional sets of a from node-id and to node-id and
a piece of arbitrary edge information.
'''
def __init__(self):
self.wipeGraph()
self.formlock = threading.Lock()
def setMeta(self, mprop, mval):
self.metadata[mprop] = mval
def getMeta(self, mprop, default=None):
'''
Retrieve a value from the dictionary of metadata about this graph.
Example:
m = g.getMeta('made-up-property')
'''
return self.metadata.get(mprop, default)
def toJson(self):
'''
convert the graph to a json serializable
data set.
'''
graph = {'nodes': {}, 'edges':[]}
graph['nodes'] = {node[0]: node[1] for node in self.getNodes()}
graph['edges'] = [edge for edge in self.getEdges()]
return json.dumps(graph)
@classmethod
def fromJsonFd(cls, fd):
'''
instiate a graph from a file descriptor
containing serialized json.
'''
g = cls()
g.fromJson(json.loads(fd.read()))
return g
@classmethod
def fromJsonBuf(cls, buf):
'''
instiatiate a graph from a seralized json buffer
'''
g = cls()
g.fromJson(json.loads(buf))
return g
def fromJson(self, graph):
'''
load a json serializable graph
'''
for nid,nprops in graph['nodes'].items():
self.addNode(nid=nid, nprops=nprops)
for eid, n1, n2, eprops in graph['edges']:
node1 = (n1, graph['nodes'][n1])
node2 = (n2, graph['nodes'][n2])
self.addEdge(node1, node2, eid=eid, eprops=eprops)
def merge(self, graph):
'''
duplicate another graph's contents. subclasses may wish to modify this
function to duplicate more properties if present.
'''
self.wipeGraph()
self.metadata.update(graph.metadata)
self.formnodes.update(graph.formnodes)
for nid,nprops in graph.nodes.values():
self.addNode(nid=nid, nprops=nprops)
for eid, n1, n2, eprops in graph.edges.values():
node1 = graph.getNode(n1)
node2 = graph.getNode(n2)
self.addEdge(node1, node2, eid=eid, eprops=eprops)
def wipeGraph(self):
'''
Re-initialize the graph structures and start clean again.
'''
self.edges = {}
self.nodes = {}
self.metadata = {}
self.formnodes = {}
self.nodeprops = pdict() # nodeprops[key][value] = list of nodes
self.edgeprops = pdict() # edgeprops[key][value] = list of edges
self.edge_by_to = ldict()
self.edge_by_from = ldict()
def getEdges(self):
'''
Get the list of edges in the graph. Edges are defined
as (eid, n1, n2, eprops) tuples.
Example: for eid, n1, n2, eprops in g.getEdges():
'''
return list(self.edges.values())
def getEdge(self, eid):
'''
Get the (eid, n1, n2, eprops) tuple for the specified
edge.
Example: e = g.getEdge(eid)
'''
return self.edges.get(eid)
def getEdgeProps(self, eid):
'''
Retrieve the properties dictionary for the given edge id.
'''
edge = self.edges.get(eid)
if not edge:
raise Exception('Invalid edge id')
return edge[3]
def getEdgesByProp(self, prop, val=None):
'''
Retrieve all the edges with the specified prop (optionally value).
Example:
# my made up "score" property on edges...
for edge in g.getEdgesByProp("score",300):
print(edge)
'''
if val != None:
return self.edgeprops.get(prop,{}).get(val,[])
ret = []
[ ret.extend(v) for v in self.edgeprops.get(prop,{}).values() ]
return ret
def setEdgeProp(self, edge, prop, value):
'''
Set a key/value pair about a given edge.
Example: g.setEdgeProp(edge, 'awesomeness', 99)
'''
curval = edge[3].get(prop)
if curval == value:
return False
edge[3][prop] = value
try:
if curval != None:
curlist = self.edgeprops[prop][curval]
curlist.remove( edge )
self.edgeprops[prop][value].append(edge)
except TypeError, e:
pass
return True
def setNodeProp(self, node, prop, value):
'''
Store a piece of information (by prop:value) about a given node.
( value must *not* be None )
Example:
g.setNodeProp(node, 'Description', 'My Node Is Awesome!')
'''
if value == None:
raise Exception('graph prop values may not be None! %r' % (node,))
curval = node[1].get(prop)
if curval == value:
return False
node[1][prop] = value
try:
if curval != None:
curlist = self.nodeprops[prop][curval]
curlist.remove( node )
self.nodeprops[prop][value].append(node)
except TypeError, e:
pass # no value indexing for un-hashable values
return True
def getNodesByProp(self, prop, val=None):
'''
Retrieve a list of nodes with the given property (optionally value).
Example:
for node in g.getNodesByProp("awesome",1):
print(node)
'''
if val != None:
return self.nodeprops.get(prop,{}).get(val,[])
ret = []
[ ret.extend(v) for v in self.nodeprops.get(prop,{}).values() ]
return ret
def addNode(self, nid=None, nprops=None, **kwargs):
'''
Add a Node object to the graph. Returns the node. (nid,nprops)
Example: node = g.addNode()
- or -
node = g.addNode('woot', {'height':20, 'width':20})
NOTE: If nid is unspecified, it is considered an 'anonymous'
node and will have an ID automagically assigned.
'''
if nid == None:
nid = guid()
p = self.nodes.get(nid)
if p != None:
raise DuplicateNode(nid)
myprops = {}
myprops.update(kwargs)
if nprops != None:
myprops.update(nprops)
node = (nid,myprops)
self.nodes[nid] = node
for k,v in myprops.items():
try:
self.nodeprops[k][v].append(node)
except TypeError, e:
pass
return node
def formNode(self, prop, value, ctor=None):
'''
Retrieve or create a node with the given prop=value.
If no node with the given property exists, create a new
one and trigger the optional ctor function. This allows
uniq'd "primary property" nodes in the graph.
NOTE: this will *only* be deconflicted with other formNode
calls.
Example:
def fooctor(node):
g.setNodeProp(node,"thing",0)
node1 = g.formNode("foo","bar",ctor=fooctor)
...
node2 = g.formNode("foo","bar",ctor=fooctor)
# node2 is a ref to node1 and fooctor was called once.
'''
with self.formlock:
node = self.formnodes.get( (prop,value) )
if node != None:
return node
nid = guid()
node = (nid,{prop:value})
self.nodes[nid] = node
self.formnodes[ (prop,value) ] = node
self.nodeprops[prop][value].append(node)
# fire ctor with lock to prevent an un-initialized retrieve.
if ctor != None:
ctor(node)
return node
def delNodeProp(self, node, prop):
'''
Delete a property from a node.
Example:
g.delNodeProp(node,"foo")
'''
pval = node[1].pop(prop,None)
if pval != None:
vlist = self.nodeprops[prop][pval]
vlist.remove(node)
if not vlist:
self.nodeprops[prop].pop(pval,None)
return pval
def delNodesProps(self, props):
'''
Delete all listed properties from all nodes in the graph.
Example:
g.delNodesProps(('foo', 'bar'))
'''
for prop in props:
for node in self.getNodesByProp(prop):
self.delNodeProp(node, prop)
def delNode(self, node):
'''
Delete a node from the graph.
Example:
g.delNode(node)
'''
for edge in self.getRefsFrom(node)[:]:
self.delEdge(edge)
for edge in self.getRefsTo(node)[:]:
self.delEdge(edge)
[ self.delNodeProp(node, k) for k in node[1].keys() ]
return self.nodes.pop(node[0])
def getNode(self, nid):
'''
Return the dictionary of properties for the specified node id.
'''
return self.nodes.get(nid)
def getNodeProps(self, nid):
return self.nodes.get(nid)[1]
def getNodes(self):
'''
Return a list of (nid, nprops) tuples.
'''
return self.nodes.values()
def getNodeCount(self):
return len(self.nodes)
def isLeafNode(self, node):
'''
A node is a "leaf" node if he has no "outgoing" edges.
'''
return len(self.getRefsFrom(node)) == 0
def isRootNode(self, node):
'''
A node is a "root" node if he has no "incoming" edges.
'''
return len(self.getRefsTo(node)) == 0
def hasEdge(self, edgeid):
return self.edges.get(edgeid) != None
def hasNode(self, nid):
'''
Check if a given node is present within the graph.
Example: if g.hasNode('yermom'): print 'woot'
'''
return self.getNode(nid) != None
def addEdgeByNids(self, n1, n2, eid=None, eprops=None, **kwargs):
node1 = self.getNode(n1)
node2 = self.getNode(n2)
return self.addEdge(node1, node2, eid=eid, eprops=eprops, **kwargs)
def addEdge(self, node1, node2, eid=None, eprops=None, **kwargs):
'''
Add an edge to the graph. Edges are directional.
Example: g.addEdge(node1, node2, eprops={'name':'Woot Edge'})
'''
if eprops == None:
eprops = {}
eprops.update(kwargs)
if eid == None:
eid = guid()
n1 = node1[0]
n2 = node2[0]
edge = (eid, n1, n2, eprops)
self.edges[eid] = edge
self.edge_by_to[n2].append(edge)
self.edge_by_from[n1].append(edge)
for k,v in eprops.items():
try:
self.edgeprops[k][v].append(edge)
except TypeError, e:
pass # no value indexes for unhashable types
return edge
def delEdge(self, edge):
'''
Delete an edge from the graph (by eid).
Example: g.delEdge(eid)
'''
eid,n1,n2,eprops = edge
[ self.delEdgeProp(edge,k) for k in eprops.keys() ]
self.edges.pop(eid)
self.edge_by_to[n2].remove(edge)
self.edge_by_from[n1].remove(edge)
def delEdgeByEid(self, eid):
edge = self.getEdge(eid)
return self.delEdge(edge)
def delEdgeProp(self, edge, prop):
'''
'''
v = edge[3].pop(prop,None)
if v != None:
vlist = self.edgeprops[prop][v]
vlist.remove(edge)
if not vlist:
self.edgeprops[prop].pop(v,None)
return v
def getRefsFrom(self, node):
'''
Return a list of edges which originate with us.
Example: for eid, n1, n2, eprops in g.getRefsFrom(node):
'''
return self.edge_by_from.get(node[0],[])
def getRefsFromByNid(self, nid):
return self.edge_by_from.get(nid,[])
def getRefsTo(self, node):
'''
Return a list of edges which terminate at us.
Example: for eid, n1, n2, eprops in g.getRefsFrom(node):
'''
return self.edge_by_to.get(node[0],[])
def getRefsToByNid(self, nid):
return self.edge_by_to.get(nid,[])
def getClusterGraphs(self):
'''
Return a list of the subgraph clusters (as graphs) contained
within this graph. A subgraph "cluster" is defined as a
group of interconnected nodes. This can be used to split
out grouped subgraphs from within a larger graph.
'''
ret = []
nodes = self.getNodes()
done = {}
while len(nodes):
nid,nprops = nodes.pop()
if done.get(nid):
continue
# Generate the cluster subgraph
todo = [ (nid, nprops), ]
g = Graph()
while len(todo):
gnid, gnprops = todo.pop()
done[gnid] = True
if not g.hasNode(gnid):
g.addNode(nid=gnid, nprops=gnprops)
for eid,n1,n2,eprops in self.getRefsFromByNid(gnid):
if not g.getNode(n2):
n2props = self.getNodeProps(n2)
g.addNode(nid=n2, nprops=n2props)
todo.append((n2, n2props))
if not g.getEdge(eid):
g.addEdgeByNids(n1, n2, eid=eid, eprops=eprops)
for eid,n1,n2,eprops in self.getRefsToByNid(gnid):
if not g.getNode(n1):
n1props = self.getNodeProps(n1)
g.addNode(nid=n1, nprops=n1props)
todo.append((n1, n1props))
if not g.getEdge(eid):
g.addEdgeByNids(n1, n2, eid=eid, eprops=eprops)
ret.append(g)
return ret
def pathSearchOne(self, *args, **kwargs):
for p in self.pathSearch(*args, **kwargs):
return p
def pathSearch(self, n1, n2=None, edgecb=None, tocb=None):
'''
Search for the shortest path from one node to another
with the option to filter based on edges using
edgecb. edgecb should be a function:
def myedgecb(graph, eid, n1, n2, depth)
which returns True if it's OK to traverse this node
in the search.
Additionally, n2 may be None and the caller may specify
tocb with a function such as:
def mytocb(graph, nid)
which must return True on finding the target node
Returns a list of edge ids...
'''
if n2 == None and tocb == None:
raise Exception('You must use either n2 or tocb!')
root = vg_pathcore.newPathNode(nid=n1, eid=None)
todo = [(root, 0),]
# FIXME make this a deque so it can be FIFO
while len(todo):
pnode,depth = todo.pop() # popleft()
ppnode, pkids, pprops = pnode
nid = pprops.get('nid')
for edge in self.getRefsFromByNid(nid):
eid, srcid, dstid, eprops = edge
if vg_pathcore.isPathLoop(pnode, 'nid', dstid):
continue
# Check if the callback is present and likes us...
if edgecb != None:
if not edgecb(self, edge, depth):
continue
# Are we the match?
match = False
if dstid == n2:
match = True
if tocb and tocb(self, dstid):
match = True
if match:
m = vg_pathcore.newPathNode(pnode, nid=dstid, eid=eid)
path = vg_pathcore.getPathToNode(m)
ret = []
for ppnode, pkids, pprops in path:
eid = pprops.get('eid')
if eid != None:
ret.append(eid)
yield ret
# Add the next set of choices to evaluate.
branch = vg_pathcore.newPathNode(pnode, nid=dstid, eid=eid)
todo.append((branch, depth+1))
#return []
def pathSearchFrom(self, n1, nodecb, edgecb=None):
'''
Search from the specified node (breadth first) until you
find a node where nodecb(graph, nid) == True. See
pathSearchFromTo for docs on edgecb...
'''
class HierGraph(Graph):
'''
An extension to the directed Graph class which facilitates the
idea of node "hierarchies" which begin with root nodes.
NOTE: rootnodes are designated by the presence of the "rootnode"
property.
'''
def __init__(self):
Graph.__init__(self)
def addHierRootNode(self,*args,**kwargs):
'''
This API is the same as Graph.addNode but will also
mark the newly created node as a rootnode.
'''
node = self.addNode(*args,**kwargs)
self.setNodeProp(node,'rootnode',True)
return node
def setHierRootNode(self, node):
return self.setNodeProp(node,'rootnode',True)
def getHierRootNodes(self):
'''
Get all the node id's in this graph which are weight 0 (meaning
they have no parents)...
'''
return self.getNodesByProp('rootnode')
def getHierNodeWeights(self):
'''
Calculate the node weights for the given nodes in the hierarchical
graph based on the added "rootnode" nodes. This will return a dict
of { nid: weight, } key-pairs.
# NOTE: This will also set the 'weight' property on the nodes
'''
weights = {}
rootnodes = self.getHierRootNodes()
if not len(rootnodes):
raise Exception('getHierNodeWeights() with no root nodes!')
todo = [ (root[0], {}) for root in rootnodes ]
while len(todo):
nid,path = todo.pop()
cweight = weights.get(nid, 0)
weights[nid] = max(cweight, len(path))
path[nid] = True
for eid, n1, n2, eprops in self.getRefsFromByNid(nid):
if weights.get(n2, -1) >= len(path):
continue
if not path.get(n2):
todo.append((n2, dict(path)))
for nid,weight in weights.items():
node = self.getNode(nid)
self.setNodeProp(node,'weight',weight)
return weights
def getHierPathCount(self):
'''
Retrieve the total number of paths from the specified
node to any "leaf" nodes which are reachable.
Example:
if g.getHierPathCount() > pathmax:
print('skipping huge graph!')
'''
# We must put all nodes into weight order
weights = self.getHierNodeWeights()
# root nodes get a beginning score of 1
pcounts = zdict()
for root in self.getHierRootNodes():
pcounts[root[0]] = 1
def weightcmp(n1node,n2node):
return cmp(n1node[1]['weight'],n2node[1]['weight'])
nodes = self.getNodes()
# Lets make the list of nodes *ordered* by weight
nodes.sort(cmp=weightcmp)
# Here's the magic... In *hierarchy order* each node
# gets the sum of the paths of his parents.
ret = 0
for node in nodes:
for eid, n1, n2, eprops in self.getRefsFrom(node):
# edge to node with == weight is a loop...
if weights[n1] == weights[n2]:
continue
pcounts[n2] += pcounts[n1]
return sum([ pcounts[n[0]] for n in nodes if self.isLeafNode(n) ])
def getHierPathsFrom(self, node, loopcnt=0, maxpath=None, maxlen=None):
'''
Retrieve a generator of lists of (node,edge) tuples which represent
the paths from the specified node to any terminating "leaf" nodes.
Options:
loopcnt - how many times may a single node be repeated in a path
maxpath - maximum number of paths to yield
maxlen - maximum "length" of a path ( trunc if too long )
NOTE: The last tuple in the list will have edge == None.
However, if the last element in the list represents a
truncated loop, the last tuple's "edge" field will be
filled in with the loop's edge.
Example:
for path in g.getHierPathsFrom():
for node,edge in path:
checkstuff(node,edge)
'''
cnt = 0
todo = [(node,[],[node[0],])] # [ node, path, nids ]
while todo:
# nids is a speed hack...
pnode,path,nids = todo.pop()
edges = self.getRefsFrom(pnode)
if len(edges) == 0: # leaf/root...
path.append( (pnode,None) )
yield path
cnt += 1
if maxpath != None and cnt >= maxpath:
return
continue
if maxlen and len(path) >= maxlen:
continue
for edge in edges:
# yep... coppies...
# ( still faster than keeping/culling tree )
newpath = list(path)
newpath.append((pnode,edge))
etoid = edge[2]
if nids.count(etoid) > loopcnt:
yield newpath
cnt += 1
if maxpath != None and cnt >= maxpath:
return
continue
newnids = list(nids)
newnids.append(etoid)
nnode = self.getNode(etoid)
todo.append((nnode,newpath,newnids))
def getHierPathsThru(self, node, maxpath=None, maxlen=None):
'''
Retrieve a generator of paths from root-->leaf nodes in this
graph which must also traverse the specified node.
'''
cnt = 0
for pathto in self.getHierPathsTo(node, maxpath=maxpath, maxlen=maxlen):
for pathfrom in self.getHierPathsFrom(node, maxpath=maxpath, maxlen=maxlen):
yield pathto[:-1] + pathfrom
cnt += 1
if maxpath != None and cnt >= maxpath:
return
def getHierPathsTo(self, node, maxpath=None, maxlen=None):
'''
Retrieve a generator of lists of (node,edge) tuples which represent
the paths to specified node from any root nodes.
(See getHierPathsFrom for details )
'''
cnt = 0
todo = [(node,[(node,None)],[node[0],])] # [ node, path, nids ]
while todo:
# nids is a speed hack...
pnode,path,nids = todo.pop()
edges = self.getRefsTo(pnode)
if len(edges) == 0: # leaf/root...
path.reverse()
yield path
cnt += 1
if maxpath != None and cnt >= maxpath:
return
continue
if maxlen and len(path) >= maxlen:
continue
for edge in edges:
# yep... coppies...
# ( still faster than keeping/culling tree )
etoid = edge[1]
nnode = self.getNode(etoid)
newpath = list(path)
newpath.append((nnode,edge))
if etoid in nids:
continue
newnids = list(nids)
newnids.append(etoid)
todo.append((nnode,newpath,newnids))
| 28.958775
| 88
| 0.528675
|
86cdb88a17c8a80d189d809536aba06639c18fdb
| 2,871
|
py
|
Python
|
LYOCR/lyocr.py
|
lstoryzx/LYOCR
|
adf1239be3bebb4cbccac8034577ef03b89d4d76
|
[
"MIT"
] | 2
|
2021-03-12T06:50:53.000Z
|
2021-03-16T05:29:31.000Z
|
LYOCR/lyocr.py
|
lstoryzx/LYOCR
|
adf1239be3bebb4cbccac8034577ef03b89d4d76
|
[
"MIT"
] | 1
|
2021-03-12T08:09:05.000Z
|
2021-03-16T05:33:35.000Z
|
LYOCR/lyocr.py
|
lstoryzx/LYOCR
|
adf1239be3bebb4cbccac8034577ef03b89d4d76
|
[
"MIT"
] | 1
|
2021-09-25T07:20:38.000Z
|
2021-09-25T07:20:38.000Z
|
# -*- coding:utf8 -*-
import json
import tkinter as tk
from tkinter import Toplevel, ttk
from tkinter.constants import BOTH, CENTER, E, LEFT, NE, NW, RIGHT, W, X, Y
import apikeysetting as aks
import frame_one
import frame_two
import frame_three
import frame_qr
import os
#初始化配置文件
aks.initializejson()
#用于存放表格的文件夹
if not ('Tables') in os.listdir():
os.mkdir("./Tables/")
#读取配置
with open('api.json', 'r') as f:
data = json.load(f)
text_value = int(data['text'])
translation_value = int(data['translation'])
table_value = int(data['table'])
math_value = int(data['math'])
#初始化窗口
win = tk.Tk()
win.title('落叶OCR')
#让窗口显示再屏幕中间
sw = win.winfo_screenwidth()
#得到屏幕宽度
sh = win.winfo_screenheight()
#得到屏幕高度
ww = 800
wh = 500
x = (sw-ww) / 2
y = (sh-wh) / 2
win.geometry("%dx%d+%d+%d" %(ww,wh,x,y))
win.minsize(800,500)
win.iconbitmap('.\\logo.ico')
#自定义样式
style = ttk.Style()
style.theme_create( "MyStyle", parent="xpnative", settings={"TNotebook": {"configure": {"tabmargins": [0, 0, 0, 0] } },"TNotebook.Tab": {"configure": {"padding": [79, 10],"font" : ('URW Gothic L', '11')},}})
style.theme_use("MyStyle")
#初始化四个选项卡
notebook = ttk.Notebook(win)
frameOne = tk.Frame()
frameTwo = tk.Frame()
frameThree = tk.Frame(bg='Ivory')
frameFour = tk.Frame()
notebook.add(frameOne, text='文字')
notebook.add(frameTwo, text='表格')
notebook.add(frameThree, text='公式')
notebook.add(frameFour, text='二维码')
notebook.pack(fill=tk.BOTH, expand=True)
#文本
frame_one.Frameoneset(frameOne,text_value,translation_value,win)
#表格
frame_two.Frametwoset(frameTwo,table_value,win)
#公式
frame_three.Framethreeset(frameThree,math_value,win)
#二维码
frameqr = tk.Frame(frameFour,width=800,height=225,bg='Azure')
frameqr.pack(fill=X)
frame_qr.Frameqrset(frameqr,win)
#about
framesetting = tk.Frame(frameFour,width=800,height=200,)
framesetting.pack(fill=BOTH,expand=True)
framesetleft = tk.Frame(framesetting,width=400,height=200,)
framesetleft.pack(side=LEFT,fill=BOTH,expand=True)
framesetright = tk.Frame(framesetting,width=400,height=200,)
framesetright.pack(side=RIGHT,fill=BOTH,expand=True)
ocrlable = tk.Label(framesetleft,text='项目地址:',font=('仿宋', 15), width=10, height=2)
ocrlable.pack(padx=15,pady=15,anchor=NW)
github = tk.Label(framesetleft,text='Github: https://github.com/lstoryzx/LYOCR',width=50,height=2)
github.pack(anchor=NW,padx=5,pady=10)
gitee = tk.Label(framesetleft,text='Gitee: https://gitee.com/lstoryzx/lyocr',width=50,height=2)
gitee.pack(anchor=NW,padx=5,pady=10)
def apisetting():
api_set = tk.Toplevel()
api_set.title('API设置')
api_set.geometry('500x400')
api_set.resizable(0, 0)
api_set.iconbitmap('.\\logo.ico')
aks.Apiset(api_set)
apibutton = tk.Button(framesetright,text='API设置',font=('仿宋', 15), width=15, height=3,relief='groove',bg='Azure',activebackground='Azure',command=apisetting)
apibutton.pack(padx=20,pady=100)
win.mainloop()
| 29.90625
| 207
| 0.724486
|
23b2bdeeda40279baf162ef144cdf7bc6d3016af
| 2,237
|
py
|
Python
|
PYTHON/projects/scraper/scrapy crawler py/quotetutorial/quotetutorial/pipelines.py
|
Zed-M/dzcode.talk
|
7da4e0e83aa58597f30a61384370c3ae41926894
|
[
"Unlicense"
] | 12
|
2019-04-22T10:16:54.000Z
|
2022-01-08T02:44:19.000Z
|
PYTHON/projects/scraper/scrapy crawler py/quotetutorial/quotetutorial/pipelines.py
|
Zed-M/dzcode.talk
|
7da4e0e83aa58597f30a61384370c3ae41926894
|
[
"Unlicense"
] | null | null | null |
PYTHON/projects/scraper/scrapy crawler py/quotetutorial/quotetutorial/pipelines.py
|
Zed-M/dzcode.talk
|
7da4e0e83aa58597f30a61384370c3ae41926894
|
[
"Unlicense"
] | 3
|
2019-05-14T11:24:42.000Z
|
2019-08-08T16:13:09.000Z
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import sqlite3
class QuotetutorialPipeline(object):
def process_item(self, item, spider):
return item
# -*- coding: utf-8 -*-
# # Define your item pipelines here
# #
# # Don't forget to add your pipeline to the ITEM_PIPELINES setting
# # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# # extracted data => temporary item containers => Pipelines => mongodb/sqlite/mysql database
# # activate it in the settings.py where 'ITEM_PIPELINES = {}'
# import mysql.connector
# import pymongo
class QuotetutorialPipeline(object):
def __init__(self):
self.create_connection()
self.create_table()
def create_connection(self):
# sqlite3 connection
self.conn = sqlite3.connect("myquotes_tb")
self.curr = self.conn.cursor()
# mysql connection
# self.conn = mysql.connector.connect(
# host_=_'localhost',
# user_=_'root',
# passwd_=_'',
# database_=_'myquotes'
# )
# mongodb connection
# self.conn = pymongo.MongoClient(
# 'localhost',
# '27017'
# )
# db=self.conn("myquotes")
# self.collection = db["quotes_tb"]
# create table for mysql/sqlit3
def create_table(self):
self.curr.execute(""" DROP TABLE IF EXISTS myquotes_tb""")
self.curr.execute(""" create table myquotes_tb(
title text,
author text,
tags text
)""")
def process_item(self, item, spider):
# mongodb storedata ez
# self.collection.insert(dict(item))
# mysql/sqlite3 db store data
self.store_db(item)
return item
def store_db(self, item):
# replace ? %s in mysql
self.curr.execute("""insert into myquotes_tb values(?, ?, ?) """, (
item['title'][0],
item['author'][0],
item['tags'][0]
))
self.conn.commit()
| 28.316456
| 93
| 0.573983
|
2256bf24c6c2e4370450c9f74747468ee00e7dc9
| 430
|
py
|
Python
|
website/models.py
|
trianglefraternitymtu/slack-bridge
|
63f4cdc6cd13fbb96ca6734da98a5a2a4bc44709
|
[
"MIT"
] | 2
|
2018-09-22T03:10:40.000Z
|
2019-04-24T14:57:30.000Z
|
website/models.py
|
trianglefraternitymtu/slack-bridge
|
63f4cdc6cd13fbb96ca6734da98a5a2a4bc44709
|
[
"MIT"
] | 13
|
2017-05-08T12:27:46.000Z
|
2019-04-24T14:59:04.000Z
|
website/models.py
|
trianglefraternitymtu/slack-bridge
|
63f4cdc6cd13fbb96ca6734da98a5a2a4bc44709
|
[
"MIT"
] | null | null | null |
from django.db import models
class Team(models.Model):
team_id = models.CharField(max_length=10, primary_key=True)
bot_id = models.CharField(max_length=12)
app_access_token = models.CharField(max_length=128)
bot_access_token = models.CharField(max_length=128)
class SharedChannel(models.Model):
local_team = models.ForeignKey(Team, on_delete=models.CASCADE)
channel_id = models.CharField(max_length=12)
| 35.833333
| 66
| 0.772093
|
f4f46c1c875395e02704a415a6aa8c2eac476aed
| 145
|
py
|
Python
|
examples/docs_snippets_crag/docs_snippets_crag_tests/concepts_tests/io_management_tests/test_output_config.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | null | null | null |
examples/docs_snippets_crag/docs_snippets_crag_tests/concepts_tests/io_management_tests/test_output_config.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | null | null | null |
examples/docs_snippets_crag/docs_snippets_crag_tests/concepts_tests/io_management_tests/test_output_config.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-11-30T21:40:46.000Z
|
2021-11-30T21:40:46.000Z
|
from docs_snippets_crag.concepts.io_management.output_config import execute_with_config
def test_execute_pipeline():
execute_with_config()
| 24.166667
| 87
| 0.855172
|
e927808dfc0246fe6e535614f0f967b44e33050c
| 1,139
|
py
|
Python
|
src/main.py
|
sistemasnegros/pydownloader
|
2759d66ba2684460e751284c2f7be19cd34d5bb8
|
[
"MIT"
] | 1
|
2019-09-23T22:28:55.000Z
|
2019-09-23T22:28:55.000Z
|
src/main.py
|
sistemasnegros/pydownloader
|
2759d66ba2684460e751284c2f7be19cd34d5bb8
|
[
"MIT"
] | null | null | null |
src/main.py
|
sistemasnegros/pydownloader
|
2759d66ba2684460e751284c2f7be19cd34d5bb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
# load file config .cfg
from lib_sysblack.lib_config import load_config
from lib_sysblack.lib_csv import parser_cvs
from constans import NAME_FILE_LOG_PATH, NAME_FILE_CONFIG_PATH, NAME_FILE_CSV_PATH
from generic import GenericConfig
from downloader import Downloader
from report import Report
class Main(GenericConfig):
"""docstring for Main"""
def __init__(self):
super(Main, self).__init__(load_config, NAME_FILE_CONFIG_PATH, NAME_FILE_LOG_PATH, NAME_FILE_CSV_PATH)
self.loading_args()
self.log_configuration()
# General variable
self.config = self.loading_file_config()
self.errors = []
self.fields_csv = self.config.get("GENERAL", "fields_csv").split(",")
list_links = parser_cvs(self.args.csv, self.fields_csv)
http = Downloader()
report = Report()
for data in list_links:
download = http.download(data["link"])
report.add_elements(download)
report.print_report()
logging.info("Script Completado.")
if __name__ == '__main__':
Main()
| 21.903846
| 110
| 0.681299
|
188f6738b2e74ff026a9a220f210c74a6ec5ed19
| 2,554
|
py
|
Python
|
star-wars-analysis/match_csv_yarn2/match_csv_yarn2.py
|
GeneralMisquoti/star-wars-prequels-dialogues
|
6d64bdb5e8a11badaf658ad21fc64459b574ce70
|
[
"MIT"
] | null | null | null |
star-wars-analysis/match_csv_yarn2/match_csv_yarn2.py
|
GeneralMisquoti/star-wars-prequels-dialogues
|
6d64bdb5e8a11badaf658ad21fc64459b574ce70
|
[
"MIT"
] | 1
|
2020-06-23T20:51:32.000Z
|
2020-06-24T10:20:29.000Z
|
star-wars-analysis/match_csv_yarn2/match_csv_yarn2.py
|
GeneralMisquoti/star-wars-prequels-dialogues
|
6d64bdb5e8a11badaf658ad21fc64459b574ce70
|
[
"MIT"
] | null | null | null |
import logging
from time import strftime
from pathlib import Path
from .utils.file_json import JsonFile
from .utils.file_csv import CsvFile
from .utils.overrides import Overrides, GlobalOverride
from .utils.gaps import DetectGaps
HERE = Path(__file__).parent
# Logging
LOGGING_DIR = HERE / "logs"
if not LOGGING_DIR.exists():
LOGGING_DIR.mkdir()
log_file = LOGGING_DIR / (strftime("%y_%m_%d_%H_%M_%S") + ".log")
file_handler = logging.FileHandler(
filename=log_file,
)
file_handler.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(
)
stream_handler.setLevel(logging.ERROR)
logging.basicConfig(
handlers=[file_handler, stream_handler],
format='[%(asctime)s,%(msecs)d] %(levelname)s [%(filename)s:%(module)s:%(funcName)s:%(lineno)d] %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p'
)
module_logger = logging.getLogger(__name__)
module_logger.info('Starting this bitch.')
CSV_DATA_DIR = HERE.parent / "preprocessed"
YARN_DATA_DIR = HERE.parent.parent / "yarn" / "parsing_source" / "parsed_sources" / "hand_fixed"
OVERRIDES_DIR = HERE / "manual_overrides"
GLOBAL_OVERRIDE = GlobalOverride(OVERRIDES_DIR / "global.toml")
MOVIE_NAMES = ["phantom_menace", "attack_of_the_clones", "revenge_of_the_sith"]
CSV_FILES = [
CsvFile(
CSV_DATA_DIR / (f"{i+1:02}_" + movie_name + ".csv"),
Overrides(OVERRIDES_DIR / (movie_name + ".csv"), GLOBAL_OVERRIDE),
movie_index=i
) for i, movie_name in enumerate(MOVIE_NAMES)
]
JSON_FILES = [
JsonFile(
YARN_DATA_DIR / (movie_name + ".json"),
movie_index=i
) for i, movie_name in enumerate(MOVIE_NAMES)
]
ZIPPED_FILES = list(zip(CSV_FILES, JSON_FILES))
assert len(ZIPPED_FILES) != 0
for i, (csv, yarn) in enumerate(ZIPPED_FILES):
csv.find_matches(
yarn,
show_progress=True,
detect_gaps=DetectGaps(log_file.with_name(log_file.stem + f"_gaps_{i}.txt"), movie=yarn.movie)
)
OUTPUT_DIR = HERE / "output"
if not OUTPUT_DIR.exists():
OUTPUT_DIR.mkdir()
OUTPUT_FILES_PROD = [OUTPUT_DIR / (movie_name + ".csv") for movie_name in MOVIE_NAMES]
for out_file, (csv_file, yarn_file) in zip(OUTPUT_FILES_PROD, ZIPPED_FILES):
with out_file.open('w+', encoding='UTF-8', newline='') as file:
csv_file.write(file, test=False)
OUTPUT_FILES_TEST = [OUTPUT_DIR / (movie_name + ".test.csv") for movie_name in MOVIE_NAMES]
for out_file, (csv_file, yarn_file) in zip(OUTPUT_FILES_TEST, ZIPPED_FILES):
with out_file.open('w+', encoding='UTF-8', newline='') as file:
csv_file.write(file, test=True)
| 33.168831
| 113
| 0.713391
|
8c3c7550301dcd150ebce34f77ba0d20788df124
| 817
|
py
|
Python
|
src/evo/death.py
|
feagi/feagi
|
598abbe294b5d9cd7ff34861fa6568ba899b2ab8
|
[
"Apache-2.0"
] | 1
|
2022-03-17T08:27:11.000Z
|
2022-03-17T08:27:11.000Z
|
src/evo/death.py
|
feagi/feagi
|
598abbe294b5d9cd7ff34861fa6568ba899b2ab8
|
[
"Apache-2.0"
] | 1
|
2022-02-10T16:30:35.000Z
|
2022-02-10T16:33:21.000Z
|
src/evo/death.py
|
feagi/feagi
|
598abbe294b5d9cd7ff34861fa6568ba899b2ab8
|
[
"Apache-2.0"
] | 1
|
2022-02-07T22:15:54.000Z
|
2022-02-07T22:15:54.000Z
|
# Copyright 2016-2022 The FEAGI Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
def register():
"""
Document test
Args:
ABCD
Returns:
Something
"""
return
| 28.172414
| 80
| 0.634027
|
9539b4f4e523b4f9c0783bf61aa74131aff8569f
| 1,352
|
py
|
Python
|
test/test_closed_indices.py
|
yumimobi/graylog.py
|
3118f4a49c91c2cbbd660523b0ab99e56fbfd861
|
[
"Apache-2.0"
] | 10
|
2016-09-27T08:13:22.000Z
|
2018-09-04T13:15:42.000Z
|
test/test_closed_indices.py
|
yumimobi/graylog.py
|
3118f4a49c91c2cbbd660523b0ab99e56fbfd861
|
[
"Apache-2.0"
] | 1
|
2019-08-28T16:16:09.000Z
|
2019-08-28T16:16:09.000Z
|
test/test_closed_indices.py
|
yumimobi/graylog.py
|
3118f4a49c91c2cbbd660523b0ab99e56fbfd861
|
[
"Apache-2.0"
] | 5
|
2016-11-03T07:45:18.000Z
|
2021-08-19T14:21:49.000Z
|
# coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.1.1+01d50e5
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import graylog
from graylog.rest import ApiException
from graylog.models.closed_indices import ClosedIndices
class TestClosedIndices(unittest.TestCase):
""" ClosedIndices unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testClosedIndices(self):
"""
Test ClosedIndices
"""
model = graylog.models.closed_indices.ClosedIndices()
if __name__ == '__main__':
unittest.main()
| 25.509434
| 104
| 0.714497
|
b2e73d3cf9726ab3c8655037cc5e1928faa79dd2
| 2,452
|
py
|
Python
|
scripts/piazza_api/piazza.py
|
ee16b/courserobot
|
73eecffca1b1f4a5cdf1408035553d43fcee6c29
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
scripts/piazza_api/piazza.py
|
ee16b/courserobot
|
73eecffca1b1f4a5cdf1408035553d43fcee6c29
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
scripts/piazza_api/piazza.py
|
ee16b/courserobot
|
73eecffca1b1f4a5cdf1408035553d43fcee6c29
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
from .rpc import PiazzaRPC
from .network import Network
class Piazza(object):
"""Unofficial Client for Piazza's Internal API
:type piazza_rpc: :class:`PiazzaRPC`
"""
def __init__(self, piazza_rpc=None):
self._rpc_api = piazza_rpc if piazza_rpc else None
def user_login(self, email=None, password=None):
"""Login with email, password and get back a session cookie
:type email: str
:param email: The email used for authentication
:type password: str
:param password: The password used for authentication
"""
self._rpc_api = PiazzaRPC()
self._rpc_api.user_login(email=email, password=password)
def demo_login(self, auth=None, url=None):
"""Authenticate with a "Share Your Class" URL using a demo user.
You may provide either the entire ``url`` or simply the ``auth``
parameter.
:param url: Example - "https://piazza.com/demo_login?nid=hbj11a1gcvl1s6&auth=06c111b"
:param auth: Example - "06c111b"
"""
self._rpc_api = PiazzaRPC()
self._rpc_api.demo_login(auth=auth, url=url)
def network(self, network_id):
"""Returns :class:`Network` instance for ``network_id``
:type network_id: str
:param network_id: This is the ID of the network.
This can be found by visiting your class page
on Piazza's web UI and grabbing it from
https://piazza.com/class/{network_id}
"""
self._ensure_authenticated()
return Network(network_id, self._rpc_api.cookies)
def get_user_profile(self):
"""Get profile of the current user
:returns: Profile of currently authenticated user
:rtype: dict
"""
return self._rpc_api.get_user_profile()
def get_user_classes(self):
"""Get list of the current user's classes. This is a subset of
``get_user_profile``.
:returns: Classes of currently authenticated user
:rtype: list
"""
raw_classes = self.get_user_profile().get('all_classes').values()
classes = []
for rawc in raw_classes:
c = {k: rawc[k] for k in ['name', 'num', 'term']}
c['nid'] = rawc['id']
c['is_ta'] = rawc.get('is_ta', False)
classes.append(c)
return classes
def _ensure_authenticated(self):
self._rpc_api._check_authenticated()
| 32.263158
| 93
| 0.619902
|
cd8f2cf908ee798cddf9d8fde67fe35ae5407dc4
| 19,963
|
py
|
Python
|
multiple_pseuodo.py
|
aliayub7/EEC
|
ffb65e6701f5316b69c1ef3c3c130f00b73a18da
|
[
"MIT"
] | 6
|
2021-05-25T03:21:07.000Z
|
2021-11-18T13:38:10.000Z
|
multiple_pseuodo.py
|
aliayub7/EEC
|
ffb65e6701f5316b69c1ef3c3c130f00b73a18da
|
[
"MIT"
] | null | null | null |
multiple_pseuodo.py
|
aliayub7/EEC
|
ffb65e6701f5316b69c1ef3c3c130f00b73a18da
|
[
"MIT"
] | 1
|
2021-05-25T12:07:49.000Z
|
2021-05-25T12:07:49.000Z
|
import numpy as np
import sys
import os
import time
import pickle
from PIL import Image
from copy import deepcopy
import random
from sklearn.model_selection import train_test_split
import json
#from multiprocessing import Pool as cpu_pool
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from models.resnet import resnet18
from models.resnet import resnet34
import torch.nn.functional as F
from get_incremental_data import getIncrementalData
from get_transformed_data_with_decay import getTransformedData
from get_previous_data import getPreviousData
#from get_transformed_data import getTransformedData
from my_models.new_shallow import auto_shallow
from training_functions import train_reconstruction
from training_functions import eval_reconstruction
from training_functions import get_embeddings
from training_functions import get_pseudoimages
from training_functions import train
from training_functions import eval_training
from training_functions import train_with_decay
from training_functions import eval_training_with_decay
from get_centroids import getCentroids
from Functions_new import get_pseudoSamples
from label_smoothing import LSR
seed=random.randint(0,10000)
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
if __name__ == '__main__':
dataset_name = 'imagenet'
save_data = False
use_saved_images = False
path_to_previous = '/home/ali/Ali_Work/clean_autoencoder_based/Imagenet-50/previous_classes'
validation_based = False
if dataset_name == 'imagenet':
path_to_train = '/media/ali/860 Evo/ali/ILSVRC2012_Train'
path_to_test = '/media/ali/860 Evo/ali/ILSVRC2012_Test'
# incremental steps info
total_classes = 10
full_classes = 1000
limiter = 50
# Image transformation mean and std
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
# hyperparameters
weight_decay = 5e-4
classify_lr = 0.1
reconstruction_lr = 0.001
reconstruction_epochs = 100
classification_epochs = 200
batch_size = 128
# for centroids
get_covariances = True
diag_covariances = True
clustering_type = 'k_means' # can use other clustering types from Functions_new
centroids_limit = 25000
centroid_finder = getCentroids(None,None,total_classes,seed=seed,get_covariances=get_covariances,diag_covariances=diag_covariances,centroids_limit=centroids_limit)
features_name = 'multiple_'+str(centroids_limit)
# autoencoders_set
auto_1 = auto_shallow(total_classes,seed=seed)
auto_2 = auto_shallow(total_classes,seed=seed)
auto_3 = auto_shallow(total_classes,seed=seed)
auto_4 = auto_shallow(total_classes,seed=seed)
auto_5 = auto_shallow(total_classes,seed=seed)
auto_1.cuda()
auto_2.cuda()
auto_3.cuda()
auto_4.cuda()
auto_5.cuda()
autoencoder_set = [auto_1,auto_2,auto_3,auto_4,auto_5]
#classify_net
classify_net = resnet18(total_classes)
# loss functions and optimizers
#loss_classify = nn.CrossEntropyLoss()
loss_classify = LSR()
loss_rec = nn.MSELoss()
# get incremental data
incremental_data_creator = getIncrementalData(path_to_train,path_to_test,full_classes=full_classes,seed=seed)
incremental_data_creator.incremental_data(total_classes=total_classes,limiter=limiter)
# define transforms
transforms_classification_train = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(32),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(imagenet_mean,imagenet_std)
])
transforms_classification_test = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(imagenet_mean,imagenet_std)
])
transforms_reconstruction = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(imagenet_mean,imagenet_std)
])
################################# INCREMENTAL LEARNING PHASE ##################################
complete_x_train = []
complete_y_train = []
complete_x_test = []
complete_y_test = []
complete_centroids = []
complete_covariances = []
complete_centroids_num = []
complete_samples = []
original_complete_centroids = []
original_complete_covariances = []
original_complete_centroids_num = []
Accus = []
full_classes = limiter
for increment in range(0,int(full_classes/total_classes)):
print ('This is increment number: ',increment)
# get data for the current increment
train_images_increment,train_labels_increment,test_images_increment,test_labels_increment = incremental_data_creator.incremental_data_per_increment(increment)
if increment==0:
previous_images = deepcopy(train_images_increment)
previous_labels = deepcopy(train_labels_increment)
else:
""" Regeneration of pseudo_images """
# first generate psuedo samples from centroids and covariances
previous_images = []
previous_labels = []
pack = []
for i in range(0,len(complete_centroids_num)):
# for filtering
#pack.append([complete_centroids[i],complete_covariances[i],[750 for x in range(0,len(complete_centroids_num[i]))],i,diag_covariances,total_classes,
#increment,seed])
pack.append([complete_centroids[i],complete_covariances[i],complete_centroids_num[i],i,diag_covariances,total_classes,
increment,seed])
previous_samples,_ = get_pseudoSamples(pack)
print ('pseudo_samples generated. Creating pseudo_images.')
print (' ')
temp_loss = LSR(reduction='none')
for i in range(0,len(complete_centroids)):
#samples_needed = 500 - len(complete_samples[i])
samples_needed = sum(complete_centroids_num[i])
# for filtering
#psuedo_images = psuedoImage_filtering(net,classify_net,temp_loss,previous_samples[i],[i for x in range(len(previous_samples[i]))],samples_needed,seed=seed)
# for no filtering
temp = np.array(previous_samples[i])
temp = torch.from_numpy(temp)
temp = temp.float()
psuedo_images,counter = get_pseudoimages(autoencoder_set[increment-1],temp,class_number=i,seed=seed)
previous_images.extend(psuedo_images)
previous_labels.extend([i for x in range(samples_needed)])
temp = np.array(complete_samples[i])
temp = torch.from_numpy(temp)
temp = temp.float()
psuedo_images,_ = get_pseudoimages(autoencoder_set[increment-1],temp,class_number=i,seed=seed,global_counter=counter)
previous_images.extend(list(psuedo_images))
previous_labels.extend([i for x in range(len(psuedo_images))])
print ('actual previous images',np.array(previous_images).shape)
print ('previous labels',np.array(previous_labels).shape)
#print ('previous ages',np.array(ages).shape)
# append new images
previous_images.extend(train_images_increment)
previous_labels.extend(train_labels_increment)
print ('train images',np.array(previous_images).shape)
print ('train labels',np.array(previous_labels).shape)
# complete x test update with new classes' test images
complete_x_test.extend(test_images_increment)
complete_y_test.extend(test_labels_increment)
if validation_based:
# Creating a validation split
x_train,x_test,y_train,y_test = train_test_split(previous_images,previous_labels,test_size=0.2,stratify=previous_labels)
else:
# otherwise just rename variables
x_train = previous_images
y_train = previous_labels
#x_test = complete_x_test
#y_test = complete_y_test
############################## Classifier Training ######################################
# get dataloaders
train_dataset_classification = getTransformedData(x_train,y_train,transform=transforms_classification_train,seed=seed)
test_dataset_classification = getTransformedData(complete_x_test,complete_y_test,transform=transforms_classification_test,seed=seed)
dataloaders_train_classification = torch.utils.data.DataLoader(train_dataset_classification,batch_size = batch_size,
shuffle=True, num_workers = 4)
dataloaders_test_classification = torch.utils.data.DataLoader(test_dataset_classification,batch_size = batch_size,
shuffle=False, num_workers = 4)
if validation_based:
val_dataset_classification = getTransformedData(x_test,y_test,transform=transforms_classification_test,seed=seed)
dataloaders_val_classification = torch.utils.data.DataLoader(val_dataset_classification,batch_size = batch_size,
shuffle=False, num_workers = 4)
# update classifier's fc layer and optimizer
classify_net.fc = nn.Linear(512,total_classes+(total_classes*increment))
optimizer = optim.SGD(classify_net.parameters(),lr=classify_lr,weight_decay=weight_decay,momentum=0.9)
train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[60,120,160], gamma=0.2) #learning rate decay
classify_net = classify_net.cuda()
# for faster training times after the first increment
if increment>0:
classification_epochs = 45
train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[37], gamma=0.1) #learning rate decay
# load the classifier from file if it has already been trained on the classes of this increment
classifier_path = './checkpoint/'+str(total_classes+(increment*total_classes))+"classes_"+dataset_name
if os.path.exists(classifier_path):
classify_net.load_state_dict(torch.load(classifier_path))
epoch_acc = eval_training_with_decay(classify_net,dataloaders_test_classification,loss_classify,seed=seed)
Accus.append(epoch_acc.cpu().numpy().tolist())
else:
since=time.time()
best_acc = 0.0
for epoch in range(0, classification_epochs):
classification_loss = train(classify_net,dataloaders_train_classification,optimizer,loss_classify,lambda_based=None,seed=seed)
print ('epoch:', epoch, ' classification loss:', classification_loss, ' learning rate:', optimizer.param_groups[0]['lr'])
train_scheduler.step(epoch)
if validation_based:
epoch_acc = eval_training_with_decay(classify_net,dataloaders_val_classification,loss_classify,seed=seed)
if epoch_acc>=best_acc:
best_acc = epoch_acc
best_model_wts = deepcopy(classify_net.state_dict())
print (' ')
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
if validation_based:
#print ('best_acc',best_acc)
classify_net.load_state_dict(best_model_wts)
epoch_acc = eval_training(classify_net,dataloaders_test_classification,loss_classify,seed=seed)
print ('test_acc',epoch_acc)
Accus.append(epoch_acc.cpu().numpy().tolist())
Accus.append(epoch_acc.cpu().numpy().tolist())
if validation_based:
torch.save(best_model_wts, "./checkpoint/"+str(total_classes+(increment*total_classes))+"classes_"+dataset_name)
else:
torch.save(classify_net.state_dict(),"./checkpoint/"+str(total_classes+(increment*total_classes))+"classes_"+dataset_name)
############################## Autoencoder Training ######################################
# get dataloaders
train_dataset_reconstruction = getTransformedData(train_images_increment,train_labels_increment,
transform=transforms_reconstruction,seed=seed)
test_dataset_reconstruction = getTransformedData(test_images_increment,test_labels_increment,transform=transforms_reconstruction,seed=seed)
dataloaders_train_reconstruction = torch.utils.data.DataLoader(train_dataset_reconstruction,batch_size = batch_size,
shuffle=True, num_workers = 4)
dataloaders_test_reconstruction = torch.utils.data.DataLoader(test_dataset_reconstruction,batch_size = batch_size,
shuffle=True, num_workers = 4)
for_embeddings_dataloader = torch.utils.data.DataLoader(train_dataset_reconstruction,batch_size = batch_size,
shuffle=False, num_workers = 4)
# no need to run autoencoder in the last increment
if increment < 4:
# path to load autoencoder from file if it has already been trained on the classes of this increment
autoencoder_path = './checkpoint/autoencoder_'+str(total_classes+(increment*total_classes))+"classes_"+dataset_name
if os.path.exists(autoencoder_path):
autoencoder_set[increment].load_state_dict(torch.load(autoencoder_path))
else:
optimizer_rec = optim.Adam(autoencoder_set[increment].parameters(), lr=reconstruction_lr, weight_decay=weight_decay)
train_scheduler_rec = optim.lr_scheduler.MultiStepLR(optimizer_rec, milestones=[50], gamma=0.1) #learning rate decay
since=time.time()
best_loss = 100.0
for epoch in range(1, reconstruction_epochs):
#reconstruction_loss = train_reconstruction(autoencoder_set[increment],dataloaders_train_reconstruction,
#optimizer_rec,loss_rec,lambda_based=True,classify_net=classify_net,seed=seed,epoch=epoch)
reconstruction_loss = train_reconstruction(autoencoder_set[increment],dataloaders_train_reconstruction,optimizer_rec,loss_rec,seed=seed,epoch=epoch)
print ('epoch:', epoch, ' reconstruction loss:', reconstruction_loss)
train_scheduler_rec.step(epoch)
"""
#test_loss = eval_reconstruction(net,dataloaders_test_reconstruction,loss_rec,seed=seed)
test_loss = eval_reconstruction(autoencoder_set[increment],dataloaders_test_reconstruction,loss_rec,seed=seed)
if test_loss<=best_loss:
best_loss = test_loss
#best_model_wts = deepcopy(net.state_dict())
best_model_wts = deepcopy(autoencoder_set[increment].state_dict())
"""
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print (' ')
#autoencoder_set[increment].load_state_dict(best_model_wts)
if validation_based:
torch.save(best_model_wts, "./checkpoint/autoencoder_"+str(total_classes+(increment*total_classes))+"classes_"+dataset_name)
else:
torch.save(autoencoder_set[increment].state_dict(),
"./checkpoint/autoencoder_"+str(total_classes+(increment*total_classes))+"classes_"+dataset_name)
# get embeddings from the trained autoencoder
embeddings = get_embeddings(autoencoder_set[increment],for_embeddings_dataloader,total_classes,seed=seed,increment=increment)
print ('embeddings',np.array(embeddings).shape)
distance_threshold = int(centroids_limit/((increment*total_classes)+total_classes))
# 1300*10 = 13000 => save all samples, no need to perform clustering
if distance_threshold>=1300:
temp = list(embeddings)
complete_samples.extend(temp)
original_complete_centroids.extend(temp)
complete_centroids = [[] for x in range(total_classes+(total_classes*increment))]
complete_centroids_num = [[] for x in range(total_classes+(total_classes*increment))]
complete_covariances = [[] for x in range(total_classes+(total_classes*increment))]
for i in range(0,len(temp)):
original_complete_centroids_num.append([1 for x in range(0,len(temp[i]))])
original_complete_covariances.append([[1.0 for x in range(0,len(temp[i][0]))] for y in range(0,len(temp[i]))])
else:
# initialize the centroid finder variable
centroid_finder.initialize(None,None,total_classes,increment=0,d_base=distance_threshold,get_covariances=get_covariances,
diag_covariances=diag_covariances,seed=seed,current_centroids=original_complete_centroids,
complete_covariances=original_complete_covariances,complete_centroids_num=original_complete_centroids_num,clustering_type=clustering_type,
centroids_limit=centroids_limit)
# find clusters
centroid_finder.without_validation(embeddings)
complete_centroids = centroid_finder.complete_centroids
complete_covariances = centroid_finder.complete_covariances
complete_centroids_num = centroid_finder.complete_centroids_num
original_complete_centroids = deepcopy(complete_centroids)
original_complete_covariances = deepcopy(complete_covariances)
original_complete_centroids_num = deepcopy(complete_centroids_num)
complete_samples = [[] for x in range(0,len(complete_centroids))]
# separate samples and centroids from the output by clustering
cur_num_cent = 0
for i in range(0,len(complete_centroids)):
sample_indices = [j for j,x in enumerate(complete_centroids_num[i]) if x==1]
temp = np.array(complete_centroids[i])
complete_samples[i] = temp[sample_indices]
sample_indices = [j for j,x in enumerate(complete_centroids_num[i]) if x!=1]
temp = np.array(complete_centroids[i])
complete_centroids[i] = temp[sample_indices]
temp = np.array(complete_centroids_num[i])
complete_centroids_num[i] = temp[sample_indices]
temp = np.array(complete_covariances[i])
complete_covariances[i] = temp[sample_indices]
print ('for class',i,'samples are',np.array(complete_samples[i]).shape)
print ('for class',i,'centroids are',np.array(complete_centroids[i]).shape)
cur_num_cent += len(complete_centroids[i])
print ('All accuracies yet', Accus)
experimental_data = dict()
experimental_data['seed'] = seed
experimental_data['acc'] = Accus
experimental_data['centroids_limit'] = centroids_limit
experimental_data['current_centroids'] = cur_num_cent
if save_data == True:
with open('data.json','r') as f:
data=json.load(f)
if features_name not in data:
data[features_name] = dict()
data[features_name][str(len(data[features_name])+1)] = experimental_data
with open('data.json', 'w') as fp:
json.dump(data, fp, indent=4, sort_keys=True)
| 50.796438
| 172
| 0.676502
|
23e031889660646a919a64f48d5b58321b03e7a0
| 638
|
py
|
Python
|
examples/venv10/bin/rst2html.py
|
OpenPrecincts/verification
|
83a4486729ddae20b48c81e8c230f41fc28b31ce
|
[
"MIT"
] | 6
|
2020-06-17T14:41:48.000Z
|
2022-01-31T03:48:14.000Z
|
examples/venv10/bin/rst2html.py
|
OpenPrecincts/verification
|
83a4486729ddae20b48c81e8c230f41fc28b31ce
|
[
"MIT"
] | null | null | null |
examples/venv10/bin/rst2html.py
|
OpenPrecincts/verification
|
83a4486729ddae20b48c81e8c230f41fc28b31ce
|
[
"MIT"
] | null | null | null |
#!/Users/baxterdemers/verification/examples/venv10/bin/python3
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| 26.583333
| 78
| 0.744514
|
2d7732ed182eb878f86fdc4c5b0cf683870c6448
| 7,192
|
py
|
Python
|
fatf/utils/metrics/tests/test_metrics_metrics.py
|
perellonieto/fat-forensics
|
0fd975ec743c5f44fc29bb2a499a2c1067bdbeff
|
[
"BSD-3-Clause"
] | null | null | null |
fatf/utils/metrics/tests/test_metrics_metrics.py
|
perellonieto/fat-forensics
|
0fd975ec743c5f44fc29bb2a499a2c1067bdbeff
|
[
"BSD-3-Clause"
] | null | null | null |
fatf/utils/metrics/tests/test_metrics_metrics.py
|
perellonieto/fat-forensics
|
0fd975ec743c5f44fc29bb2a499a2c1067bdbeff
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Holds custom distance functions used for FAT-Forensics examples and testing.
"""
# Author: Kacper Sokol <k.sokol@bristol.ac.uk>
# License: new BSD
import numpy as np
import pytest
import fatf.utils.metrics.metrics as fumm
CMA = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]])
CMA_BIN = np.array([[3, 11], [7, 5]])
def test_multiclass_true_positive_rate():
"""
Tests :func:`fatf.utils.metrics.metrics.multiclass_true_positive_rate`.
"""
mtpr_0 = fumm.multiclass_true_positive_rate(CMA, 0)
assert mtpr_0 == pytest.approx(0.333, abs=1e-3)
mtpr_1 = fumm.multiclass_true_positive_rate(CMA, 1)
assert mtpr_1 == 0.5
mtpr_2 = fumm.multiclass_true_positive_rate(CMA, 2)
assert mtpr_2 == pytest.approx(0.333, abs=1e-3)
def test_multiclass_true_negative_rate():
"""
Tests :func:`fatf.utils.metrics.metrics.multiclass_true_negative_rate`.
"""
type_error = 'The strict parameter has to be a boolean.'
with pytest.raises(TypeError) as exi:
fumm.multiclass_true_negative_rate(CMA, 0, 'one')
assert str(exi.value) == type_error
metric = pytest.approx(5 / 7)
mtpr_0_n = fumm.multiclass_true_negative_rate(CMA, 0)
assert mtpr_0_n == metric
mtpr_0_n = fumm.multiclass_true_negative_rate(CMA, 0, False)
assert mtpr_0_n == metric
#
metric = pytest.approx(4 / 6)
mtpr_1_n = fumm.multiclass_true_negative_rate(CMA, 1)
assert mtpr_1_n == metric
mtpr_1_n = fumm.multiclass_true_negative_rate(CMA, 1, False)
assert mtpr_1_n == metric
#
metric = pytest.approx(5 / 7)
mtpr_2_n = fumm.multiclass_true_negative_rate(CMA, 2)
assert mtpr_2_n == metric
mtpr_2_n = fumm.multiclass_true_negative_rate(CMA, 2, False)
assert mtpr_2_n == metric
metric = pytest.approx(3 / 7)
mtpr_0_p = fumm.multiclass_true_negative_rate(CMA, 0, True)
assert mtpr_0_p == metric
metric = pytest.approx(2 / 6)
mtpr_1_p = fumm.multiclass_true_negative_rate(CMA, 1, True)
assert mtpr_1_p == metric
metric = pytest.approx(3 / 7)
mtpr_2_p = fumm.multiclass_true_negative_rate(CMA, 2, True)
assert mtpr_2_p == metric
def test_multiclass_false_positive_rate():
"""
Tests :func:`fatf.utils.metrics.metrics.multiclass_false_positive_rate`.
"""
mtpr_0 = fumm.multiclass_false_positive_rate(CMA, 0)
assert mtpr_0 == pytest.approx(2 / 7, abs=1e-3)
mtpr_1 = fumm.multiclass_false_positive_rate(CMA, 1)
assert mtpr_1 == pytest.approx(2 / 6, abs=1e-3)
mtpr_2 = fumm.multiclass_false_positive_rate(CMA, 2)
assert mtpr_2 == pytest.approx(2 / 7, abs=1e-3)
def test_multiclass_false_negative_rate():
"""
Tests :func:`fatf.utils.metrics.metrics.multiclass_false_negative_rate`.
"""
mtpr_0 = fumm.multiclass_false_negative_rate(CMA, 0)
assert mtpr_0 == pytest.approx(2 / 3, abs=1e-3)
mtpr_1 = fumm.multiclass_false_negative_rate(CMA, 1)
assert mtpr_1 == pytest.approx(2 / 4, abs=1e-3)
mtpr_2 = fumm.multiclass_false_negative_rate(CMA, 2)
assert mtpr_2 == pytest.approx(2 / 3, abs=1e-3)
def test_true_positive_rate():
"""
Tests :func:`fatf.utils.metrics.metrics.true_positive_rate`.
"""
mtpr = fumm.true_positive_rate(CMA_BIN)
assert mtpr == pytest.approx(3 / 10, abs=1e-3)
def test_true_negative_rate():
"""
Tests :func:`fatf.utils.metrics.metrics.true_negative_rate`.
"""
mtpr = fumm.true_negative_rate(CMA_BIN)
assert mtpr == pytest.approx(5 / 16, abs=1e-3)
def test_false_negative_rate():
"""
Tests :func:`fatf.utils.metrics.metrics.false_negative_rate`.
"""
mtpr = fumm.false_negative_rate(CMA_BIN)
assert mtpr == pytest.approx(7 / 10, abs=1e-3)
def test_false_positive_rate():
"""
Tests :func:`fatf.utils.metrics.metrics.false_positive_rate`.
"""
mtpr = fumm.false_positive_rate(CMA_BIN)
assert mtpr == pytest.approx(11 / 16, abs=1e-3)
def test_multiclass_positive_predictive_value():
"""
:func:`fatf.utils.metrics.metrics.multiclass_positive_predictive_value`.
"""
mtpr_0 = fumm.multiclass_positive_predictive_value(CMA, 0)
assert mtpr_0 == pytest.approx(1 / 3, abs=1e-3)
mtpr_1 = fumm.multiclass_positive_predictive_value(CMA, 1)
assert mtpr_1 == pytest.approx(2 / 4, abs=1e-3)
mtpr_2 = fumm.multiclass_positive_predictive_value(CMA, 2)
assert mtpr_2 == pytest.approx(1 / 3, abs=1e-3)
def test_multiclass_negative_predictive_value():
"""
:func:`fatf.utils.metrics.metrics.multiclass_negative_predictive_value`.
"""
type_error = 'The strict parameter has to be a boolean.'
with pytest.raises(TypeError) as exi:
fumm.multiclass_negative_predictive_value(CMA, 0, 'one')
assert str(exi.value) == type_error
metric = pytest.approx(5 / 7)
mtpr_0_n = fumm.multiclass_negative_predictive_value(CMA, 0)
assert mtpr_0_n == metric
mtpr_0_n = fumm.multiclass_negative_predictive_value(CMA, 0, False)
assert mtpr_0_n == metric
#
metric = pytest.approx(4 / 6)
mtpr_1_n = fumm.multiclass_negative_predictive_value(CMA, 1)
assert mtpr_1_n == metric
mtpr_1_n = fumm.multiclass_negative_predictive_value(CMA, 1, False)
assert mtpr_1_n == metric
#
metric = pytest.approx(5 / 7)
mtpr_2_n = fumm.multiclass_negative_predictive_value(CMA, 2)
assert mtpr_2_n == metric
mtpr_2_n = fumm.multiclass_negative_predictive_value(CMA, 2, False)
assert mtpr_2_n == metric
metric = pytest.approx(3 / 7)
mtpr_0_p = fumm.multiclass_negative_predictive_value(CMA, 0, True)
assert mtpr_0_p == metric
metric = pytest.approx(2 / 6)
mtpr_1_p = fumm.multiclass_negative_predictive_value(CMA, 1, True)
assert mtpr_1_p == metric
metric = pytest.approx(3 / 7)
mtpr_2_p = fumm.multiclass_negative_predictive_value(CMA, 2, True)
assert mtpr_2_p == metric
def test_positive_predictive_value():
"""
:func:`fatf.utils.metrics.metrics.positive_predictive_value`.
"""
mtpr = fumm.positive_predictive_value(CMA_BIN)
assert mtpr == pytest.approx(3 / 14, abs=1e-3)
def test_negative_predictive_value():
"""
:func:`fatf.utils.metrics.metrics.negative_predictive_value`.
"""
mtpr = fumm.negative_predictive_value(CMA_BIN)
assert mtpr == pytest.approx(5 / 12, abs=1e-3)
def test_accuracy():
"""
Tests :func:`fatf.utils.metrics.metrics.accuracy` function.
"""
acc = fumm.accuracy(CMA)
assert acc == pytest.approx(4 / 10, abs=1e-3)
acc = fumm.accuracy(CMA_BIN)
assert acc == pytest.approx(8 / 26, abs=1e-3)
def test_multiclass_treatment():
"""
Tests :func:`fatf.utils.metrics.metrics.multiclass_treatment` function.
"""
mtpr_0 = fumm.multiclass_treatment(CMA, 0)
assert mtpr_0 == pytest.approx(2 / 6, abs=1e-3)
mtpr_1 = fumm.multiclass_treatment(CMA, 1)
assert mtpr_1 == pytest.approx(2 / 6, abs=1e-3)
mtpr_2 = fumm.multiclass_treatment(CMA, 2)
assert mtpr_2 == pytest.approx(2 / 6, abs=1e-3)
def test_treatment():
"""
Tests :func:`fatf.utils.metrics.metrics.treatment` function.
"""
mtpr = fumm.treatment(CMA_BIN)
assert mtpr == pytest.approx(11 / 18, abs=1e-3)
| 33.142857
| 76
| 0.695217
|
9c13652a492fba32d7331efd9994b61a7c8648cb
| 1,027
|
py
|
Python
|
old_experiments/generate_test_files.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | null | null | null |
old_experiments/generate_test_files.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | null | null | null |
old_experiments/generate_test_files.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | 2
|
2021-10-03T14:51:38.000Z
|
2021-11-10T02:54:26.000Z
|
from .extract_params import ParamLoader
from .generate_random_filter import generate_random_directions
import os
def generate_test_file(base_folder, outfile, x_window, y_window, xpos, ypos):
extension = "."+outfile.split(".")[-1]
base_paraloader = ParamLoader(os.path.join(base_folder, "base_params"+extension))
base_params = base_paraloader.get_params()
x_dir_params = ParamLoader(os.path.join(base_folder, "x_dir"+extension)).get_params()
y_dir_params = ParamLoader(os.path.join(base_folder, "y_dir"+extension)).get_params()
x_loc = (xpos/x_window)*2-1
y_loc = (ypos/y_window)*2-1
alt_params = [param + dir1*x_loc + dir2*y_loc for dir1,dir2,param in zip(x_dir_params,y_dir_params,base_params)]
for p, ap in zip(base_params, alt_params):
assert p.shape == ap.shape, f"{p.shape},{ap.shape}"
base_paraloader.set_params(alt_params)
base_paraloader.save(outfile)
if __name__ == "__main__":
generate_test_files("trained_agents/trpo/LunarLander-v2.pkl", "antbullettest/")
| 44.652174
| 116
| 0.741967
|
a6f2db8b0a5afe9f71fe31118f784308b7a9e651
| 723
|
py
|
Python
|
pair-finder/pair_finder/pipeline/steps/download_pull_request_info.py
|
kingdido999/bugswarm
|
8ff2b3e71ca2598c354e8481c6b887cd5988816a
|
[
"BSD-3-Clause"
] | 18
|
2019-12-27T06:53:39.000Z
|
2022-03-03T03:05:35.000Z
|
pair-finder/pair_finder/pipeline/steps/download_pull_request_info.py
|
kingdido999/bugswarm
|
8ff2b3e71ca2598c354e8481c6b887cd5988816a
|
[
"BSD-3-Clause"
] | 13
|
2020-01-10T17:11:38.000Z
|
2021-12-13T20:34:38.000Z
|
pair-finder/pair_finder/pipeline/steps/download_pull_request_info.py
|
kingdido999/bugswarm
|
8ff2b3e71ca2598c354e8481c6b887cd5988816a
|
[
"BSD-3-Clause"
] | 10
|
2020-01-10T17:36:57.000Z
|
2021-09-13T19:51:43.000Z
|
from typing import Any
from typing import Dict
from typing import Optional
from bugswarm.common import log
from ...model.pull_request import PullRequest
from .step import Step
class DownloadPullRequestInfo(Step):
def process(self, data: Dict[str, PullRequest], context: dict) -> Optional[Any]:
repo = context['repo']
utils = context['utils']
log.info('Downloading pull request info.')
pull_requests = data
for pr_num, pr in pull_requests.items():
pr_info = utils.github.get_pr_info(repo, pr_num)
if pr_info is None:
continue
num_commits = pr_info['commits']
pr.num_commits = num_commits
return data
| 25.821429
| 84
| 0.650069
|
6724691fdf7654f81dc976442eef90b6a6f1e062
| 599
|
py
|
Python
|
myApp.py
|
mounirboulwafa/Hello_Medium
|
98919e93c981c5b324b36efbc9d33216bb228df8
|
[
"Apache-2.0"
] | 2
|
2020-11-09T19:44:34.000Z
|
2021-05-29T05:12:57.000Z
|
myApp.py
|
mounirboulwafa/Hello_Medium
|
98919e93c981c5b324b36efbc9d33216bb228df8
|
[
"Apache-2.0"
] | null | null | null |
myApp.py
|
mounirboulwafa/Hello_Medium
|
98919e93c981c5b324b36efbc9d33216bb228df8
|
[
"Apache-2.0"
] | 1
|
2021-08-12T04:52:46.000Z
|
2021-08-12T04:52:46.000Z
|
import tkinter
from tkinter import messagebox
window = tkinter.Tk()
window.title(" My Python Application")
window.geometry("400x300")
window.iconbitmap(default='images\\myicon.ico')
window.resizable(0, 0)
def show_hello():
messagebox.showinfo('Hello Medium', ' Hello Medium')
myimage = tkinter.PhotoImage(file="images\\python_to_exe.png")
image = tkinter.Label(window, image=myimage, width=500, height=170, )
image.place(x=20, y=60)
image.pack()
space = tkinter.Label(window, height=2, ).pack()
button = tkinter.Button(text=" Hello ", command=show_hello)
button.pack()
window.mainloop()
| 23.038462
| 69
| 0.741235
|
7ed30214a3f6063d2c5b0f6f6aa64a04aba0c9a9
| 5,697
|
py
|
Python
|
starthinker/task/bigquery/run.py
|
quan/starthinker
|
4e392415d77affd4a3d91165d1141ab38efd3b8b
|
[
"Apache-2.0"
] | 1
|
2020-12-04T17:13:35.000Z
|
2020-12-04T17:13:35.000Z
|
starthinker/task/bigquery/run.py
|
hgrias/starthinker
|
b9ed33e23b4ffd72565a31ebb8a8041d346bfca2
|
[
"Apache-2.0"
] | null | null | null |
starthinker/task/bigquery/run.py
|
hgrias/starthinker
|
b9ed33e23b4ffd72565a31ebb8a8041d346bfca2
|
[
"Apache-2.0"
] | null | null | null |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from starthinker.util.project import project
from starthinker.util.csv import rows_to_type
from starthinker.util.sheets import sheets_clear
from starthinker.util.sheets import sheets_write
from starthinker.util.data import get_rows, put_rows
from starthinker.util.bigquery import query_to_table, query_to_view, storage_to_table, query_to_rows, rows_to_table, run_query, query_parameters
from starthinker.util.bigquery.functions import pearson_significance_test
@project.from_parameters
def bigquery():
if 'function' in project.task:
query = None
if project.task['function'] == 'pearson_significance_test':
query = pearson_significance_test()
if query:
run_query(project.task['auth'], project.id, query, False,
project.task['to']['dataset'])
elif 'run' in project.task and 'query' in project.task.get('run', {}):
if project.verbose:
print('QUERY', project.task['run']['query'])
run_query(
project.task['auth'],
project.id,
query_parameters(project.task['run']['query'],
project.task['run'].get('parameters')),
project.task['run'].get('legacy', True),
)
elif 'values' in project.task['from']:
rows = get_rows(project.task['auth'], project.task['from'])
rows_to_table(project.task['to'].get('auth', project.task['auth']),
project.id, project.task['to']['dataset'],
project.task['to']['table'], rows,
project.task.get('schema', []), 0)
elif 'query' in project.task['from']:
if 'table' in project.task['to']:
if project.verbose:
print('QUERY TO TABLE', project.task['to']['table'])
query_to_table(
project.task['auth'],
project.id,
project.task['to']['dataset'],
project.task['to']['table'],
query_parameters(project.task['from']['query'],
project.task['from'].get('parameters')),
disposition=project.task['write_disposition']
if 'write_disposition' in project.task else 'WRITE_TRUNCATE',
legacy=project.task['from'].get(
'legacy',
project.task['from'].get('useLegacySql',
True)), # DEPRECATED: useLegacySql,
target_project_id=project.task['to'].get('project_id', project.id))
elif 'sheet' in project.task['to']:
if project.verbose:
print('QUERY TO SHEET', project.task['to']['sheet'])
rows = query_to_rows(
project.task['auth'],
project.id,
project.task['from']['dataset'],
query_parameters(project.task['from']['query'],
project.task['from'].get('parameters')),
legacy=project.task['from'].get('legacy', True))
# makes sure types are correct in sheet
rows = rows_to_type(rows)
sheets_clear(project.task['to'].get('auth', project.task['auth']),
project.task['to']['sheet'], project.task['to']['tab'],
project.task['to'].get('range', 'A2'))
sheets_write(project.task['to'].get('auth', project.task['auth']),
project.task['to']['sheet'], project.task['to']['tab'],
project.task['to'].get('range', 'A2'), rows)
elif 'sftp' in project.task['to']:
rows = query_to_rows(
project.task['auth'],
project.id,
project.task['from']['dataset'],
query_parameters(project.task['from']['query'],
project.task['from'].get('parameters')),
legacy=project.task['from'].get('use_legacy_sql', True))
if rows:
if project.verbose:
print('QUERY TO SFTP')
put_rows(project.task['auth'], project.task['to'], rows)
else:
if project.verbose:
print('QUERY TO VIEW', project.task['to']['view'])
query_to_view(
project.task['auth'],
project.id,
project.task['to']['dataset'],
project.task['to']['view'],
query_parameters(project.task['from']['query'],
project.task['from'].get('parameters')),
project.task['from'].get(
'legacy',
project.task['from'].get('useLegacySql',
True)), # DEPRECATED: useLegacySql
project.task['to'].get('replace', False))
else:
if project.verbose:
print('STORAGE TO TABLE', project.task['to']['table'])
storage_to_table(
project.task['auth'], project.id, project.task['to']['dataset'],
project.task['to']['table'],
project.task['from']['bucket'] + ':' + project.task['from']['path'],
project.task.get('schema', []), project.task.get('skip_rows', 1),
project.task.get('structure', 'CSV'),
project.task.get('disposition', 'WRITE_TRUNCATE'))
if __name__ == '__main__':
bigquery()
| 39.020548
| 144
| 0.579428
|
b0139cdddebeae1323f98d5a7945d810f770a406
| 319
|
py
|
Python
|
hydra_plugins/meta_blocks_experiment_searchpath_plugin/meta_blocks_experiment_searchpath_plugin.py
|
alshedivat/meta-blocks
|
6f6d93dfaab75766e8afdf9eb2fad17dc79218f2
|
[
"BSD-3-Clause"
] | 124
|
2020-04-10T00:55:19.000Z
|
2022-03-12T13:11:01.000Z
|
hydra_plugins/meta_blocks_experiment_searchpath_plugin/meta_blocks_experiment_searchpath_plugin.py
|
meteozay/meta-blocks
|
6f6d93dfaab75766e8afdf9eb2fad17dc79218f2
|
[
"BSD-3-Clause"
] | 2
|
2020-04-10T17:28:42.000Z
|
2020-05-12T16:07:38.000Z
|
hydra_plugins/meta_blocks_experiment_searchpath_plugin/meta_blocks_experiment_searchpath_plugin.py
|
meteozay/meta-blocks
|
6f6d93dfaab75766e8afdf9eb2fad17dc79218f2
|
[
"BSD-3-Clause"
] | 8
|
2020-04-11T04:40:47.000Z
|
2021-02-17T23:52:21.000Z
|
from hydra._internal.config_search_path import ConfigSearchPath
from hydra.plugins import SearchPathPlugin
class MetaBlocksExperimentSearchPathPlugin(SearchPathPlugin):
def manipulate_search_path(self, search_path: ConfigSearchPath):
search_path.append("meta-blocks", "pkg://meta_blocks.experiment.conf")
| 39.875
| 78
| 0.827586
|
b9572db192d357b8981ebc4eb1ae466dbd7a5b32
| 7,938
|
py
|
Python
|
docs/conf.py
|
davidwrq/medrecords
|
7dd91d3be9c8c3fe2c9a31906486b5041b30e526
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
davidwrq/medrecords
|
7dd91d3be9c8c3fe2c9a31906486b5041b30e526
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
davidwrq/medrecords
|
7dd91d3be9c8c3fe2c9a31906486b5041b30e526
|
[
"MIT"
] | null | null | null |
# medrecords documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "medrecords"
copyright = """2018, David Liencura"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "medrecordsdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"medrecords.tex",
"medrecords Documentation",
"""David Liencura""",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"medrecords",
"medrecords Documentation",
["""David Liencura"""],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"medrecords",
"medrecords Documentation",
"""David Liencura""",
"medrecords",
"""Sistema de manejo para fichas medicas""",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.007813
| 80
| 0.695767
|
a0c15d493a58076760e4b51f3f3fd57e67b846a2
| 734
|
py
|
Python
|
web/migrations/0011_auto_20191120_2343.py
|
nonomal/oh-my-rss
|
68b9284e0acaf44ea389d675b71949177f9f3256
|
[
"MIT"
] | 270
|
2019-09-05T05:51:19.000Z
|
2022-03-12T18:26:13.000Z
|
web/migrations/0011_auto_20191120_2343.py
|
virtual-emperor/oh-my-rss
|
3e04899aba4dec27026f67e44193ca8f1eca616a
|
[
"MIT"
] | 6
|
2019-09-06T03:52:47.000Z
|
2021-04-10T06:21:14.000Z
|
web/migrations/0011_auto_20191120_2343.py
|
virtual-emperor/oh-my-rss
|
3e04899aba4dec27026f67e44193ca8f1eca616a
|
[
"MIT"
] | 37
|
2019-09-06T05:13:24.000Z
|
2022-01-21T08:05:33.000Z
|
# Generated by Django 2.2.4 on 2019-11-20 15:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0010_auto_20191104_2022'),
]
operations = [
migrations.AlterField(
model_name='article',
name='status',
field=models.CharField(choices=[('active', '激活'), ('close', '关闭,下线')], db_index=True, default='active', max_length=20, verbose_name='状态'),
),
migrations.AlterField(
model_name='site',
name='status',
field=models.CharField(choices=[('active', '激活'), ('close', '关闭,下线')], db_index=True, default='active', max_length=20, verbose_name='状态'),
),
]
| 30.583333
| 150
| 0.585831
|
c70e08810f6ee4f8196a0f391c946a514268a0f8
| 460
|
py
|
Python
|
plotly/validators/scattergl/error_x/_color.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/scattergl/error_x/_color.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/scattergl/error_x/_color.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name='color', parent_name='scattergl.error_x', **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 28.75
| 76
| 0.632609
|
1d5239d11632ab497bcfb3f909b6f8167e1917fb
| 1,587
|
py
|
Python
|
src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/extract-colors.py
|
1690296356/jdk
|
eaf668d1510c28d51e26c397b582b66ebdf7e263
|
[
"Apache-2.0"
] | null | null | null |
src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/extract-colors.py
|
1690296356/jdk
|
eaf668d1510c28d51e26c397b582b66ebdf7e263
|
[
"Apache-2.0"
] | null | null | null |
src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/extract-colors.py
|
1690296356/jdk
|
eaf668d1510c28d51e26c397b582b66ebdf7e263
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
#
# Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
import matplotlib.cm
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--steps', type=int, default=10)
parser.add_argument('--colormap', default='coolwarm')
args = parser.parse_args()
cmap = matplotlib.cm.get_cmap(args.colormap)
n = args.steps
for step in range(n + 1):
point = step / float(n)
rgb = tuple([int(round(c * 255)) for c in cmap(point)[0:3]])
hex = '#%02x%02x%02x' % rgb
print("var step" + str(step) + "Color" + " = java.awt.Color.decode(\"" + \
hex + "\");")
| 37.785714
| 78
| 0.720227
|
232bc7f1cd9c126422394a8903d8a1d292e9440f
| 1,672
|
py
|
Python
|
tools/api/permissions_by_hand.py
|
yang-guangliang/android_guard
|
704fea36d796d5fbda2badf2237fc9dba74c3a96
|
[
"Apache-2.0"
] | 3
|
2015-10-23T13:36:07.000Z
|
2021-07-22T23:30:41.000Z
|
tools/api/permissions_by_hand.py
|
yang-guangliang/android_guard
|
704fea36d796d5fbda2badf2237fc9dba74c3a96
|
[
"Apache-2.0"
] | null | null | null |
tools/api/permissions_by_hand.py
|
yang-guangliang/android_guard
|
704fea36d796d5fbda2badf2237fc9dba74c3a96
|
[
"Apache-2.0"
] | 1
|
2015-10-31T08:38:25.000Z
|
2015-10-31T08:38:25.000Z
|
PERMISSIONS_BY_HAND = {
"SEND_SMS": {
"android.telephony.SmsManager": [
["F", "getDefault()", "static SmsManager"],
["F",
"sendDataMessage(java.lang.String, java.lang.String, short, bytecollections.deque(), PendingIntent, PendingIntent)",
"void"],
# [ "F", "sendMultipartTextMessage(String destinationAddress, String scAddress, ArrayList<String> parts, ArrayList<PendingIntent> sentIntents, ArrayList<PendingIntent> deliveryIntents", "void" ],
["F",
"sendTextMessage(java.lang.String, java.lang.String, java.lang.String, PendingIntent, PendingIntent)",
"void"],
],
"android.telephony.gsm.SmsManager": [
["F", "getDefault()", "static android.telephony.gsm.SmsManager"],
["F",
"sendDataMessage(java.lang.String, java.lang.String, short, bytecollections.deque(), PendingIntent, PendingIntent)",
"void"],
# [ "F", "sendMultipartTextMessage(String destinationAddress, String scAddress, ArrayList<String> parts, ArrayList<PendingIntent> sentIntents, ArrayList<PendingIntent> deliveryIntents", "void" ],
["F",
"sendTextMessage(java.lang.String, java.lang.String, java.lang.String, PendingIntent, PendingIntent)",
"void"],
],
},
"SET_WALLPAPER":
{"android.app.WallpaperManager": [
["F", "setBitmap(Bitmap)", "void"],
],},
"READ_CONTACTS": {
"android.provider.ContactsContract$CommonDataKinds$Phone": [
["C", "CONTENT_URI", "Uri"]
],
},
}
| 49.176471
| 230
| 0.587919
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.